diff --git a/modneflib/modnef/modnef_torch/__init__.py b/modneflib/modnef/modnef_torch/__init__.py
index c9c0455efc06f85a76d1001e1d7bd9c47b7d7c79..d61cdd572b826b94bdeff33516e608ee915a622c 100644
--- a/modneflib/modnef/modnef_torch/__init__.py
+++ b/modneflib/modnef/modnef_torch/__init__.py
@@ -1,4 +1,5 @@
 from .modnef_neurons import *
+from .model_builder import ModNEFModelBuilder
 from .model import ModNEFModel
 from .trainer import ModNEFTrainer
 from .executor import ModNEFExecutor
\ No newline at end of file
diff --git a/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc
index 9f4f447a3ea926f1f09a8bb926113c80fe26a5ed..fb5f930237a51ae64b29d117c9107ba84b67fbc0 100644
Binary files a/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc
index 94eb020d8c00c284144907451213f356d271c90b..980e74db9a47de5f660f7f5c0d7efaab3cda51b7 100644
Binary files a/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc and b/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc
index 244dc9429332508edd71083bd2fc0446d3427800..18e25f81bcf3e35df0c64461da7b3606c3a31ae7 100644
Binary files a/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc and b/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/model.py b/modneflib/modnef/modnef_torch/model.py
index 71d346cd97db8553fda711d6698f559f63f3328d..8eaf453a4ca3cddbd464f9301c1ba8b37cd6e40f 100644
--- a/modneflib/modnef/modnef_torch/model.py
+++ b/modneflib/modnef/modnef_torch/model.py
@@ -13,17 +13,7 @@ import torch.nn as nn
 import torch
 from modnef.arch_builder import *
 from modnef.modnef_driver import load_driver_from_yaml
-from math import ceil, log
-
-neuron_model = {
-  "blif" : mn.BLIF,
-  "rblif" : mn.RBLIF,
-  "slif" : mn.SLIF,
-  "rslif" : mn.RSLIF,
-  "shiftlif" : mn.ShiftLIF,
-  "rshiftlif" : mn.RShiftLIF
-}
-
+from modnef.modnef_torch.modnef_neurons import ModNEFNeuron
 
 class ModNEFModel(nn.Module):
   """
@@ -70,10 +60,7 @@ class ModNEFModel(nn.Module):
     Generate VHDL file of model
   """
 
-  def __init__(self, 
-               configuration, 
-               spike_grad
-               ):
+  def __init__(self):
     """
     Initialize class
 
@@ -87,107 +74,38 @@ class ModNEFModel(nn.Module):
 
     super().__init__()
 
-    self.name = "MySNN"
-
-    self.layers = nn.ModuleDict()
-
-    self.__hardware_estimation_flag = False
-
-    self.__fpga_eval_flag = False
+    self.hardware_estimation_flag = False
 
-    self.mem = {}
-    self.spk = {}
-
-    self.__input_size = -1
-    self.__num_class = -1
-
-    self.configuration = configuration
+    self.fpga_eval_flag = False
 
     self.driver = None
 
-    self.hardware_description = {
-      "clock_name" : "clock",
-      "clock_freq" : 125_000_000,
-      "baud_rate" : 921_600,
-      "txd" : "uart_txd",
-      "rxd" : "uart_rxd",
-      "queue_read_depth" : 4096,
-      "queue_write_depth" : 1024
-    }
-
-    self.build(configuration=configuration, spike_grad=spike_grad)
-
-  def build(self, configuration, spike_grad):
-    """
-    Build layers of model (call during initialization)
-
-    Parameters
-    ----------
-    configuration : dict
-      model configuraiton dictionnary
-    spike_grade : function
-      surrogate gradient
-    """
-
-    for key in configuration:
-
-      if key == "name":
-        self.name = configuration[key]
-      elif key == "input":
-        self.__input_size = configuration[key]
-      elif key=="num_class":
-        self.__num_class = configuration[key]
-      elif key=="hardware":
-        for k in configuration[key]:
-          self.hardware_description[k] = configuration[key][k]
-      else:
-        layer_name = key
-
-        self.mem[layer_name] = None
-        self.spk[layer_name] = None
-
-        layer_config = configuration[key]
-
-        layer_model = layer_config["model"].lower()
-
-        if layer_model in neuron_model.keys():
-          self.layers[layer_name] = neuron_model[layer_model].from_dict(layer_config, spike_grad)
-        else:
-          print(f"{layer_model} is unknow")
-
+  def __train(self, mode, quant, hardware, fpga):
 
-  def hardware_estimation(self):
-    """
-    Set hardware estimation flag
-    """
+    self.hardware_estimation_flag = hardware
+    self.fpga = fpga
     
-    self.eval()
-    self.__hardware_estimation_flag = True
-    for layer in self.layers:
-      self.layers[layer].hardware_estimation_flag = True
-      self.layers[layer].quantize()
+    for m in self.modules():
+      if isinstance(m, ModNEFNeuron):
+        m.hardware_estimation(hardware)
+        m.set_quant(quant)
 
-  def train(self, mode : bool = True):
+    return super().train(mode=mode)
+
+  def train(self, mode : bool = True, quant : bool = True):
     """
     Set neuron model for trainning
     """
-    
-    self.__hardware_estimation_flag = False
-    self.__fpga_eval_flag = False
-    for layer in self.layers:
-      self.layers[layer].hardware_estimation_flag = False
-    return super().train(mode=mode)
 
-  def eval(self):
+    return self.__train(mode=mode, quant=quant, hardware=False, fpga=False)
+  
+
+  def eval(self,  quant : bool = False):
     """
     Set neuron model for evaluation
     """
 
-    self.__hardware_estimation_flag = False
-    self.__fpga_eval_flag = False
-    for layer in self.layers:
-      self.layers[layer].hardware_estimation_flag = False
-    return super().eval()
+    return self.__train(mode=False, quant=quant, hardware=False, fpga=False)
   
   def fpga_eval(self, board_path, driver_config = "./driver.yml"):
     """
@@ -196,17 +114,15 @@ class ModNEFModel(nn.Module):
 
     if self.driver==None:
       self.driver = load_driver_from_yaml(driver_config, board_path)
-    self.eval()
-    self.__fpga_eval_flag=True
-
-
-  def init_mem(self):
+    return self.__train(mode=False, quant=False, hardware=False, fpga=True)
+  
+  def hardware_estimation(self):
     """
-    Initialize layers memory
+    Set hardware estimation flag
     """
+    
+    return self.__train(mode=False, quant=True, hardware=True, fpga=False)
 
-    for layer in self.layers:
-      self.spk[layer], self.mem[layer] = self.layers[layer].reset_mem()
 
   def close_driver(self):
     """
@@ -231,12 +147,12 @@ class ModNEFModel(nn.Module):
       output_spike, output_mem
     """
 
-    if self.__fpga_eval_flag:
-      return self.__fpga_forward(input_spikes)
+    if self.fpga_eval_flag:
+      return self._fpga_forward(input_spikes)
     else:
-      return self.__computer_forward(input_spikes)
+      return self.software_forward(input_spikes)
   
-  def __fpga_forward(self, input_spikes):
+  def _fpga_forward(self, input_spikes):
     """
     Transmit input spike to FPGA
 
@@ -273,7 +189,7 @@ class ModNEFModel(nn.Module):
     return torch.tensor(batch_result).permute(1, 0, 2), None
       
 
-  def __computer_forward(self, input_spikes):
+  def software_forward(self, input_spikes):
     """
     Run layers upate
 
@@ -288,51 +204,8 @@ class ModNEFModel(nn.Module):
       output_spike, output_mem
     """
 
-    self.init_mem()
-
-    batch_size = input_spikes.shape[0]
-    n_steps = input_spikes.shape[1]
-
-    spk_rec = []
-    mem_rec = []
-
-    if self.__hardware_estimation_flag:
-      spike_count = torch.zeros((batch_size), device=input_spikes.device)
-
-    for step in range(n_steps):
-      last_layer = None
-      x = input_spikes[:,step].reshape(batch_size, -1)
-
-      if self.__hardware_estimation_flag:
-        spike_count += torch.sum(x, dim=1)
-
-      for layer in self.layers:
-        if last_layer==None:
-          self.spk[layer], self.mem[layer] = self.layers[layer](input_=x, spk=self.spk[layer], mem=self.mem[layer])
-        else:
-          self.spk[layer], self.mem[layer] = self.layers[layer](input_=self.spk[last_layer], spk=self.spk[layer], mem=self.mem[layer])
-        last_layer = layer
-
-      spk_rec.append(self.spk[last_layer])
-      mem_rec.append(self.mem[last_layer])
-
-    if self.__hardware_estimation_flag:
-      self.hardware_description["queue_read_depth"] = int(max(self.hardware_description["queue_read_depth"], torch.max(spike_count).item()))
-      self.hardware_description["queue_write_depth"] = int(max(self.hardware_description["queue_write_depth"], (n_steps+len(self.layers))*self.__num_class))
+    raise NotImplementedError()
 
-    return torch.stack(spk_rec, dim=0), torch.stack(mem_rec, dim=0)
-  
-  def set_layer_hardware(self, config):
-    """
-    Set hardware description to all layers
-
-    Parameters
-    ----------
-    config: dict
-      hardware configuration
-    """
-    for k in config:
-      self.layers[k].set_hardware(config[k])
   
   def to_vhdl(self, file_name=None, output_path = ".", driver_config_path = "./driver.yml"):
     """
@@ -349,42 +222,4 @@ class ModNEFModel(nn.Module):
       driver configuration file
     """
     
-    if file_name==None:
-      file_name = f"{output_path}/{self.name}.vhd"
-
-    builder = ModNEFBuilder(self.name, self.__input_size, self.__num_class)
-
-    self.hardware_description["queue_read_depth"] += ceil(log(self.hardware_description["queue_read_depth"])/log(256))
-    self.hardware_description["queue_write_depth"] += ceil(log(self.hardware_description["queue_write_depth"])/log(256))
-
-    self.hardware_description["queue_read_depth"] = 2**(ceil(log(self.hardware_description["queue_read_depth"])/log(2)))
-    self.hardware_description["queue_write_depth"] = 2**(ceil(log(self.hardware_description["queue_write_depth"])/log(2)))
-
-
-    uart = Uart_XStep(
-      name="uart",
-      input_layer_size=self.__input_size,
-      output_layer_size=self.__num_class,
-      clk_freq=self.hardware_description["clock_freq"],
-      baud_rate=self.hardware_description["baud_rate"],
-      queue_read_depth=self.hardware_description["queue_read_depth"],
-      queue_write_depth=self.hardware_description["queue_write_depth"],
-      tx_name=self.hardware_description["txd"],
-      rx_name=self.hardware_description["rxd"]
-    )
-
-    builder.add_module(uart)
-    builder.set_io(uart)
-
-    last_module = uart
-
-    for layer in self.layers:
-      module = self.layers[layer].get_builder_module(layer, output_path)
-      builder.add_module(module)
-      builder.add_link(last_module, module)
-      last_module = module
-
-    builder.add_link(last_module, uart)
-
-    builder.get_driver_yaml(f"{output_path}/{driver_config_path}")
-    builder.to_vhdl(file_name, self.hardware_description["clock_name"])
\ No newline at end of file
+    raise NotImplementedError()
\ No newline at end of file
diff --git a/modneflib/modnef/modnef_torch/model_builder.py b/modneflib/modnef/modnef_torch/model_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e50060e1e3f6cae2f09c8c6cc1e5af3b92e6b33
--- /dev/null
+++ b/modneflib/modnef/modnef_torch/model_builder.py
@@ -0,0 +1,327 @@
+"""
+File name: model
+Author: Aurélie Saulquin  
+Version: 0.1.0
+License: GPL-3.0-or-later
+Contact: aurelie.saulquin@univ-lille.fr
+Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron
+Descriptions: ModNEF SNN Model
+"""
+
+import modnef.modnef_torch.modnef_neurons as mn
+import torch.nn as nn
+import torch
+from modnef.arch_builder import *
+from modnef.modnef_driver import load_driver_from_yaml
+from math import ceil, log
+from snntorch.surrogate import fast_sigmoid
+import json
+from modnef.modnef_torch.model import ModNEFModel
+
+neuron_model = {
+  "blif" : mn.BLIF,
+  "rblif" : mn.RBLIF,
+  "slif" : mn.SLIF,
+  "rslif" : mn.RSLIF,
+  "shiftlif" : mn.ShiftLIF,
+  "rshiftlif" : mn.RShiftLIF
+}
+
+
+class ModNEFModelBuilder(ModNEFModel):
+  """
+  ModNEF snntorhch model
+
+  Attributes
+  ----------
+  name : str
+    name of model
+  layers : torch.nn.ModuleDict
+    layers of model
+  mem : dict
+    membrane votlage memory of all layers
+  spk : dict
+    output spikes of all layers
+  configuration : dict
+    model configuration dictionary
+  driver : ModNEFDriver
+    ModNEF FPGA Driver
+  hardware_description : Dict
+    hardware description for VHDL generation
+
+  Methods
+  -------
+  build(configuration, spike_grad)
+    Build layers of model (call during initialization)
+  hardware_estimation()
+    Set hardware estimation flag
+  train()
+    Set neuron model for trainning
+  eval()
+    Set neuron model for evaluating
+  fpga_eval()
+    Set neuron model FPGA evaluation
+  init_mem()
+    Initialize layers memory
+  close_driver()
+    Close FPGA driver
+  forward(input_spikes)
+    Run layers upate
+  set_layer_hardware(config)
+    Set hardware description to all layers
+  to_vhdl()
+    Generate VHDL file of model
+  """
+
+  def __init__(self, 
+               configuration, 
+               spike_grad = fast_sigmoid(slope=25)
+               ):
+    """
+    Initialize class
+
+    Parameters
+    ----------
+    configuration : dict
+      model configuraiton dictionnary
+    spike_grade : function
+      surrogate gradient
+    """
+
+    super().__init__()
+
+    self.name = "MySNN"
+
+    self.layers = nn.ModuleDict()
+
+    self.mem = {}
+    self.spk = {}
+
+    self.__input_size = -1
+    self.__num_class = -1
+
+    if type(configuration) != dict:
+      configuration = json.load(open(configuration, 'r'))
+
+    self.configuration = configuration
+
+
+    self.hardware_description = {
+      "clock_name" : "clock",
+      "clock_freq" : 125_000_000,
+      "baud_rate" : 921_600,
+      "txd" : "uart_txd",
+      "rxd" : "uart_rxd",
+      "queue_read_depth" : 4096,
+      "queue_write_depth" : 1024
+    }
+
+    self.build(configuration=self.configuration, spike_grad=spike_grad)
+
+  def build(self, configuration, spike_grad):
+    """
+    Build layers of model (call during initialization)
+
+    Parameters
+    ----------
+    configuration : dict
+      model configuraiton dictionnary
+    spike_grade : function
+      surrogate gradient
+    """
+
+    for key in configuration:
+
+      if key == "name":
+        self.name = configuration[key]
+      elif key == "input":
+        self.__input_size = configuration[key]
+      elif key=="num_class":
+        self.__num_class = configuration[key]
+      elif key=="hardware":
+        for k in configuration[key]:
+          self.hardware_description[k] = configuration[key][k]
+      else:
+        layer_name = key
+
+        self.mem[layer_name] = None
+        self.spk[layer_name] = None
+
+        layer_config = configuration[key]
+
+        layer_model = layer_config["model"].lower()
+
+        if layer_model in neuron_model.keys():
+          self.layers[layer_name] = neuron_model[layer_model].from_dict(layer_config, spike_grad)
+        else:
+          print(f"{layer_model} is unknow")
+
+
+  def init_mem(self):
+    """
+    Initialize layers memory
+    """
+
+    for layer in self.layers:
+      self.spk[layer], self.mem[layer] = self.layers[layer].reset_mem()
+
+  def close_driver(self):
+    """
+    Close FPGA driver
+    """
+
+    if self.driver != None:
+      self.driver.close()
+  
+  def fpga_forward(self, input_spikes):
+    """
+    Transmit input spike to FPGA
+
+    Parameters
+    ----------
+    input_spikes : Tensor
+      input spikes
+
+    Returns
+    -------
+    tuple of tensor
+      output_spike, None
+    """
+
+    def to_aer(input):
+      input = input.reshape(-1).to(torch.int32)
+
+      aer = []
+      for i in range(input.shape[0]):
+        for _ in range(input[i]):
+          aer.append(i)
+
+      return aer
+    
+    if self.driver == None:
+      raise Exception("please open fpga driver before")
+    
+    batch_result = []
+
+    for sample in input_spikes:
+      sample_res = self.driver.run_sample(sample, to_aer, True, len(self.layers))
+      batch_result.append([sample_res])
+
+    return torch.tensor(batch_result).permute(1, 0, 2), None
+      
+
+  def software_forward(self, input_spikes):
+    """
+    Run layers upate
+
+    Parameters
+    ----------
+    input_spikes : Tensor
+      input spikes
+
+    Returns
+    -------
+    tuple of tensor
+      output_spike, output_mem
+    """
+
+    self.init_mem()
+
+    batch_size = input_spikes.shape[0]
+    n_steps = input_spikes.shape[1]
+
+    spk_rec = []
+    mem_rec = []
+
+    if self.hardware_estimation_flag:
+      spike_count = torch.zeros((batch_size), device=input_spikes.device)
+
+    for step in range(n_steps):
+      last_layer = None
+      x = input_spikes[:,step].reshape(batch_size, -1)
+
+      if self.hardware_estimation_flag:
+        spike_count += torch.sum(x, dim=1)
+
+      for layer in self.layers:
+        if last_layer==None:
+          self.spk[layer], self.mem[layer] = self.layers[layer](input_=x, spk=self.spk[layer], mem=self.mem[layer])
+        else:
+          self.spk[layer], self.mem[layer] = self.layers[layer](input_=self.spk[last_layer], spk=self.spk[layer], mem=self.mem[layer])
+        last_layer = layer
+
+      spk_rec.append(self.spk[last_layer])
+      mem_rec.append(self.mem[last_layer])
+
+    if self.hardware_estimation_flag:
+      self.hardware_description["queue_read_depth"] = int(max(self.hardware_description["queue_read_depth"], torch.max(spike_count).item()))
+      self.hardware_description["queue_write_depth"] = int(max(self.hardware_description["queue_write_depth"], (n_steps+len(self.layers))*self.__num_class))
+
+    return torch.stack(spk_rec, dim=0), torch.stack(mem_rec, dim=0)
+  
+  def set_layer_hardware(self, config):
+    """
+    Set hardware description to all layers
+
+    Parameters
+    ----------
+    config: dict
+      hardware configuration
+    """
+    for k in config:
+      self.layers[k].set_hardware(config[k])
+  
+  def to_vhdl(self, file_name=None, output_path = ".", driver_config_path = "./driver.yml"):
+    """
+    Generate VHDL file of model
+
+    Parameters
+    ----------
+    file_name = None : str
+      VHDL file name
+      if default, file name is model name
+    output_path = "." : str
+      output file path
+    driver_config_path = "./driver.yml" : str
+      driver configuration file
+    """
+    
+    if file_name==None:
+      file_name = f"{output_path}/{self.name}.vhd"
+
+    builder = ModNEFBuilder(self.name, self.__input_size, self.__num_class)
+
+    self.hardware_description["queue_read_depth"] += ceil(log(self.hardware_description["queue_read_depth"])/log(256))
+    self.hardware_description["queue_write_depth"] += ceil(log(self.hardware_description["queue_write_depth"])/log(256))
+
+    self.hardware_description["queue_read_depth"] = 2**(ceil(log(self.hardware_description["queue_read_depth"])/log(2)))
+    self.hardware_description["queue_write_depth"] = 2**(ceil(log(self.hardware_description["queue_write_depth"])/log(2)))
+
+
+    uart = Uart_XStep(
+      name="uart",
+      input_layer_size=self.__input_size,
+      output_layer_size=self.__num_class,
+      clk_freq=self.hardware_description["clock_freq"],
+      baud_rate=self.hardware_description["baud_rate"],
+      queue_read_depth=self.hardware_description["queue_read_depth"],
+      queue_write_depth=self.hardware_description["queue_write_depth"],
+      tx_name=self.hardware_description["txd"],
+      rx_name=self.hardware_description["rxd"]
+    )
+
+    builder.add_module(uart)
+    builder.set_io(uart)
+
+    last_module = uart
+
+    for layer in self.layers:
+      module = self.layers[layer].get_builder_module(layer, output_path)
+      builder.add_module(module)
+      builder.add_link(last_module, module)
+      last_module = module
+
+    builder.add_link(last_module, uart)
+
+    builder.get_driver_yaml(f"{output_path}/{driver_config_path}")
+    builder.to_vhdl(file_name, self.hardware_description["clock_name"])
\ No newline at end of file
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc
index c3dbe4e4212a44871a1e6d5e960688f70786b897..895a5aacc1efbab760c5e04db68a67bb7652872b 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc
index db3f256ec621749ba0f90484169a3275e89add61..0da4515400f65ec51a49a88e508075b209f54f51 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc
index 54b42d98c6799fa9d9b1117e5317ff4c885c47a4..ce3b0d27934dac2dfcd67a029307b2f42181b0e5 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
index 5398751de2e6adafbe022ae23eac1b87281ddb01..653e417b03a6ddeb49b1d216aacea3b420b600d3 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
@@ -239,7 +239,7 @@ class BLIF(Leaky, ModNEFNeuron):
     if not self.mem.shape == input_.shape:
       self.mem = torch.zeros_like(input_, device=self.mem.device)
 
-    if self.hardware_estimation_flag:
+    if self.quantization_flag:
       input_.data = self.quantizer(input_.data, True)
       self.mem.data = self.quantizer(self.mem.data, True)
 
@@ -335,7 +335,7 @@ class BLIF(Leaky, ModNEFNeuron):
     self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
     self.beta.data = self.quantizer(self.beta.data, True, dtype)
 
-  def quantize(self, dtype=torch.int32):
+  def quantize(self, force_init=False, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -345,8 +345,12 @@ class BLIF(Leaky, ModNEFNeuron):
       type use during quantization
     """
     
+    if force_init or not self.quantizer.is_initialize:
+      self.quantizer.init_from_weight(self.fc.weight)
+
     self.quantize_weight(dtype)
     self.quantize_parameters(dtype)
+    self.quantization_flag = True
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
index 96c150cd73c5dd18af5ea542c2e38ced3376789b..44763561942f8c0a4f1ae057807e4882a4025eb2 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
@@ -253,7 +253,7 @@ class RBLIF(Leaky, ModNEFNeuron):
 
     rec = self.reccurent(self.spk)
 
-    if self.hardware_estimation_flag:
+    if self.quantization_flag:
       self.mem.data = self.quantizer(self.mem.data, True)
       input_.data = self.quantizer(input_.data, True)
       rec.data = self.quantizer(rec.data, True)
@@ -350,7 +350,7 @@ class RBLIF(Leaky, ModNEFNeuron):
     self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
     self.beta.data = self.quantizer(self.beta.data, True, dtype)
 
-  def quantize(self, dtype=torch.int32):
+  def quantize(self, force_init=False, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -359,9 +359,13 @@ class RBLIF(Leaky, ModNEFNeuron):
     dtype = torch.int32 : dtype
       type use during quantization
     """
+    
+    if force_init or not self.quantizer.is_initialize:
+      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
     self.quantize_weight(dtype)
     self.quantize_parameters(dtype)
+    self.quantization_flag = True
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
index c3942719cc5f0cd2d537c2d34899ddbb7abf4c4a..dbfb48aebf08ec0f94598443326c3fc9ab412ce8 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
@@ -44,6 +44,7 @@ class ModNEFNeuron():
 
   def __init__(self, quantizer : Quantizer):
     self.hardware_estimation_flag = False
+    self.quantization_flag = False
 
     self.val_min = torch.as_tensor(torch.finfo(torch.float32).max)
     self.val_max = torch.as_tensor(torch.finfo(torch.float32).min)
@@ -78,12 +79,33 @@ class ModNEFNeuron():
     
     raise NotImplementedError()
   
-  def hardware_estimation(self):
+  def quantize(self, force_init=False, dtype=torch.int32):
+    """
+    Quantize synaptic weight and neuron hyper-parameters
+
+    Parameters
+    ----------
+    dtype = torch.int32 : dtype
+      type use during quantization
+    """
+    
+    raise NotImplementedError()
+  
+  def set_quant(self, mode=False):
+    self.quantization_flag = mode
+
+    # if mode:
+    #   self.quantize(False)
+  
+  def hardware_estimation(self, mode = False):
     """
     Toggle hardware estimation calculation
     """
     
-    self.hardware_estimation_flag = not self.hardware_estimation_flag
+    self.hardware_estimation_flag = mode
+
+    if mode:
+      self.set_quant(mode)
   
   def get_builder_module(self, module_name : str, output_path : str = "."):
     """
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc
index 1c2bb1928e74bd03a4756bfe3f1dd202bdc63a1c..c689156476790470b9b7d39d1f684d72cbc014cc 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc
index b2b7a8f662dd25e520f374167f6a5c09dda32334..56a8d34cf4cbbbb57ba5f63a00f4e82af30ca69b 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
index cdbbc04f9201726ad2dd3288366c8f43ae15abc6..fd5b994f04c87ffe86c3c142a6ea65da232236f7 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
@@ -267,7 +267,7 @@ class RSLIF(LIF, ModNEFNeuron):
 
     rec_input = self.reccurent(self.spk)
 
-    if self.hardware_estimation_flag:
+    if self.quantization_flag:
       input_.data = self.quantizer(input_.data, True)
       rec_input.data = self.quantizer(rec_input.data, True)
       self.mem = self.quantizer(self.mem.data, True)
@@ -370,7 +370,7 @@ class RSLIF(LIF, ModNEFNeuron):
     self.v_rest.data = self.quantizer(self.v_rest.data, True, dtype)
     self.threshold.data = self.quantizer(self.threshold, True, dtype)
 
-  def quantize(self, dtype=torch.int32):
+  def quantize(self, force_init=False, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -379,9 +379,13 @@ class RSLIF(LIF, ModNEFNeuron):
     dtype = torch.int32 : dtype
       type use during quantization
     """
+    
+    if force_init or not self.quantizer.is_initialize:
+      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
     self.quantize_weight(dtype)
     self.quantize_parameters(dtype)
+    self.quantization_flag = True
   
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
index 71e3ac10592c3535e71d70278a07eecf96d53e60..a7b89ebc3012f273badcc203dd67c4806b19d4fc 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
@@ -256,7 +256,7 @@ class SLIF(LIF, ModNEFNeuron):
     if not self.mem.shape == input_.shape:
       self.mem = torch.ones_like(input_)*self.v_rest
 
-    if self.hardware_estimation_flag:
+    if self.quantization_flag:
       input_.data = self.quantizer(input_.data, True)
       self.mem.data = self.quantizer(self.mem.data, True)
 
@@ -358,7 +358,7 @@ class SLIF(LIF, ModNEFNeuron):
     self.v_rest.data = self.quantizer(self.v_rest.data, True, dtype)
     self.threshold.data = self.quantizer(self.threshold, True, dtype)
 
-  def quantize(self, dtype=torch.int32):
+  def quantize(self, force_init=False, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -368,8 +368,12 @@ class SLIF(LIF, ModNEFNeuron):
       type use during quantization
     """
     
+    if force_init or not self.quantizer.is_initialize:
+      self.quantizer.init_from_weight(self.fc.weight)
+
     self.quantize_weight(dtype)
     self.quantize_parameters(dtype)
+    self.quantization_flag = True
 
   
   @classmethod
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc
index b895a0cb0c7fb3bd699e0fddcc66c8e3c6011f4b..fc3997c992141b7289ae5141eb50036c7311aa12 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc
index 51207de58951388697859e3db0354711ffcf5ca0..70eb665a7242039cc59cb7aeb23a76d5ccffe1f7 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
index 85eba375181d41eade9e03837a6f435195b18e6d..1def1cd4edf2e3c0946e72e3158cfa120dd52098 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
@@ -268,7 +268,7 @@ class RShiftLIF(LIF, ModNEFNeuron):
 
     rec_input = self.reccurent(self.spk)
 
-    if self.hardware_estimation_flag:
+    if self.quantization_flag:
       self.mem.data = self.quantizer(self.mem.data, True)
       input_.data = self.quantizer(input_.data, True)
       rec_input.data = self.quantizer(rec_input.data, True)
@@ -368,7 +368,7 @@ class RShiftLIF(LIF, ModNEFNeuron):
     self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
     self.beta.data = self.quantizer(self.beta.data, True, dtype)
 
-  def quantize(self, dtype=torch.int32):
+  def quantize(self, force_init=False, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -377,9 +377,13 @@ class RShiftLIF(LIF, ModNEFNeuron):
     dtype = torch.int32 : dtype
       type use during quantization
     """
+    
+    if force_init or not self.quantizer.is_initialize:
+      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
     self.quantize_weight(dtype)
     self.quantize_parameters(dtype)
+    self.quantization_flag = True
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
index 621a3609a0c6b6bbb15b523b8ee48eeec33c7284..c798c99c7af0bf1c7c11dc28092a68d6db0466b6 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
@@ -256,7 +256,7 @@ class ShiftLIF(LIF, ModNEFNeuron):
 
     self.reset = self.mem_reset(self.mem)
 
-    if self.hardware_estimation_flag:
+    if self.quantization_flag:
       self.mem.data = self.quantizer(self.mem.data, True)
       input_.data = self.quantizer(input_.data, True)
 
@@ -352,7 +352,7 @@ class ShiftLIF(LIF, ModNEFNeuron):
     self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
     self.beta.data = self.quantizer(self.beta.data, True, dtype)
     
-  def quantize(self, dtype=torch.int32):
+  def quantize(self, force_init=False, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -361,9 +361,13 @@ class ShiftLIF(LIF, ModNEFNeuron):
     dtype = torch.int32 : dtype
       type use during quantization
     """
+    
+    if force_init or not self.quantizer.is_initialize:
+      self.quantizer.init_from_weight(self.fc.weight)
 
     self.quantize_weight(dtype)
     self.quantize_parameters(dtype)
+    self.quantization_flag = True
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/trainer.py b/modneflib/modnef/modnef_torch/trainer.py
index 244b6bdc9aacbe32f3f4d730ea75b7d490cc88c9..7fb4704a21cad4f2b757088dc0de31b8d0787124 100644
--- a/modneflib/modnef/modnef_torch/trainer.py
+++ b/modneflib/modnef/modnef_torch/trainer.py
@@ -13,6 +13,7 @@ from snntorch import surrogate
 import torch
 spike_grad = surrogate.fast_sigmoid(slope=25)
 from tqdm import tqdm
+from .model_builder import ModNEFModelBuilder
 from .model import ModNEFModel
 import numpy as np
 
@@ -77,7 +78,7 @@ class ModNEFTrainer():
     """
     
     if type(model) != ModNEFModel:
-      self.model = ModNEFModel(model)
+      self.model = ModNEFModelBuilder(model)
     else:
       self.model = model
 
@@ -262,6 +263,11 @@ class ModNEFTrainer():
       if self.verbose:
         loader.set_postfix_str(f"Accuracy : {np.mean(correct/total*100):0<3.2f}")
 
+      del data
+      del target
+      del spk_rec
+      del mem_rec
+
     y_true = torch.stack(y_true).reshape(-1)
     y_pred = torch.stack(y_pred).reshape(-1)
     
diff --git a/modneflib/modnef/quantizer/quantizer.py b/modneflib/modnef/quantizer/quantizer.py
index 275ca29e6222111b8d579bd492c2d7134995545c..898ec98786a87b5db117847f4a2ceb3148e77554 100644
--- a/modneflib/modnef/quantizer/quantizer.py
+++ b/modneflib/modnef/quantizer/quantizer.py
@@ -86,7 +86,7 @@ class Quantizer():
     
     raise NotImplementedError()
 
-  def __call__(self, data, unscale=False, dtype=torch.int32):
+  def __call__(self, data, unscale=False, dtype=torch.int16):
     """
     Call quantization function
 
diff --git a/modneflib/modnef/templates/evaluation.py b/modneflib/modnef/templates/evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..40d71473ba39069a367c214a2ae76e0c3b2da933
--- /dev/null
+++ b/modneflib/modnef/templates/evaluation.py
@@ -0,0 +1,98 @@
+import tonic
+from torch.utils.data import DataLoader
+from modnef.modnef_torch import ModNEFModelBuilder
+import os
+from snntorch.surrogate import fast_sigmoid
+from run_lib import *
+import sys
+
+if __name__ == "__main__":
+
+  """Experience name"""
+  exp_name = "Evaluation"
+
+  """Model definition"""
+  model_path = "model_template.json"
+  best_model_name = "best_model"
+
+  model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
+
+  model.load_state_dict(torch.load(best_model_name))
+
+
+  """Kind of run
+  eval : full precision evaluation
+  qeval : quantized evaluation
+  heval : hardware estimation evaluation
+  feval : FPGA eval
+  """
+  
+  kind = sys.argv[1] 
+
+  """Device definition"""
+  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+
+  """Evaluation variable definition"""
+  verbose = True
+  save_conf_matrix = False
+  output_path = "."
+
+  """Output variable definition"""
+  acc = 0.0
+  y_true = None
+  y_pred = None
+  save_conf_matrix = False
+  conf_matrix_file = "confusion_matrix.png"
+  conf_matrix_classes = [str(i) for i in range(10)]
+  
+  """DataSet Definition"""
+  dataset_path = f"{os.environ['HOME']}/datasets"
+
+  # data set definition, change to your dataset
+  sensor_size = tonic.datasets.NMNIST.sensor_size
+  frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10)
+
+  test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
+
+  # batch loader
+  batch_size = 64
+  
+  testLoader = DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
+
+  if kind == "eval":
+    acc, y_pred, y_true = evaluation(
+      model=model, 
+      testLoader=testLoader, 
+      name=exp_name, 
+      device=device, 
+      verbose=verbose
+      )
+  elif kind == "qeval":
+    # TODO
+    raise NotImplementedError()
+  elif kind == "heval":
+    acc, y_pred, y_true = hardware_estimation(
+      model=model, 
+      testLoader=testLoader, 
+      name=exp_name, 
+      device=device, 
+      verbose=verbose
+      )
+  elif kind == "feval":
+    acc, y_pred, y_true = fpga_evaluation(
+      model=model, 
+      testLoader=testLoader, 
+      name=exp_name, 
+      verbose=verbose
+      )
+  else:
+    print(f"{kind} not supported")
+    exit(-1)
+
+  if save_conf_matrix:
+    confusion_matrix(
+      y_true=y_true,
+      y_pred=y_pred,
+      file_name=conf_matrix_file,
+      classes=conf_matrix_classes
+      )
\ No newline at end of file
diff --git a/modneflib/modnef/templates/main.py b/modneflib/modnef/templates/main.py
deleted file mode 100644
index 88f281233b36d51f30aa272d81cb305c129e71db..0000000000000000000000000000000000000000
--- a/modneflib/modnef/templates/main.py
+++ /dev/null
@@ -1,29 +0,0 @@
-
-import tonic
-from torch.utils.data import DataLoader
-import sys
-import os
-from modnef.modnef_torch import ModNEFExecutor
-
-if __name__ == "__main__":
-
-  # dataset path
-  dataset_path = f"{os.environ['HOME']}/datasets"
-
-  # data set definition, change to your dataset
-  sensor_size = tonic.datasets.NMNIST.sensor_size
-  frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10)
-
-  train_set = tonic.datasets.NMNIST(save_to=dataset_path, train=True, transform=frame_transform)
-  test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
-
-  # batch loader
-  batch_size = 64
-  trainloader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
-  testloader = DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
-
-  # execution variable
-  model_path = sys.argv[1]
-  exe_command = sys.argv[2:]
-
-  ModNEFExecutor(model_path, exe_command, testloader, trainloader)
\ No newline at end of file
diff --git a/modneflib/modnef/templates/model_template.json b/modneflib/modnef/templates/model_template.json
new file mode 100644
index 0000000000000000000000000000000000000000..31fd483bafa2366531f188f883e75fa120e46398
--- /dev/null
+++ b/modneflib/modnef/templates/model_template.json
@@ -0,0 +1,81 @@
+{
+  "name" : "blif_model",
+
+  "input" : 2312,
+  "num_class" : 10,
+
+  "slif_layer" : {
+    "model" : "slif",
+    "in_features" : 2312,
+    "neurons" : 128,
+    "threshold" : 0.8,
+    "leak" : 0.015,
+    "min" : 0.0,
+    "rest" : 0.0,
+    "quantizer" : {
+      "kind" : "MinMaxQuantizer",
+      "parameters" : {
+        "bitwidth" : 8,
+        "signed" : null
+      }
+    },
+    "hardware" : {
+      "strategy" : "Sequential",
+      "mem_init_file" : null,
+      "variable_size" : -1
+    }
+  },
+
+  "shiftlif_layer" : {
+    "model" : "shiftlif",
+    "in_features" : 128,
+    "neurons" : 64,
+    "beta" : 0.875,
+    "threshold" : 0.8,
+    "reset_mechanism" : "subtract",
+    "quantizer" : {
+      "kind" : "DynamicScaleFactorQuantizer",
+      "parameters" : {
+        "bitwidth" : 8,
+        "signed" : null
+      }
+    },
+    "hardware" : {
+      "strategy" : "Parallel",
+      "mem_init_file" : null,
+      "variable_size" : -1
+    }
+  },
+
+  "blif_layer" : {
+    "model" : "blif",
+    "in_features" : 64,
+    "neurons" : 10,
+    "beta" : 0.9,
+    "threshold" : 0.8,
+    "reset_mechanism" : "subtract",
+    "quantizer" : {
+      "kind" : "FixedPointQuantizer",
+      "parameters" : {
+        "bitwidth" : 8,
+        "fixed_point" : -1,
+        "signed" : null
+      }
+    },
+    "hardware" : {
+      "strategy" : "Parallel",
+      "mem_init_file" : null,
+      "variable_size" : -1
+    }
+  },
+
+  "hardware" : {
+    "clock_name" : "clock",
+    "clock_freq" : 125000000,
+    "baud_rate" : 921600,
+    "txd" : "uart_txd",
+    "rxd" : "uart_rxd",
+    "queue_read_depth" : 4096,
+    "queue_write_depth" : 1024
+  }
+}
diff --git a/modneflib/modnef/templates/run_lib.py b/modneflib/modnef/templates/run_lib.py
new file mode 100644
index 0000000000000000000000000000000000000000..38bec42008e9bdeed92c9077ebc456d73502f5ea
--- /dev/null
+++ b/modneflib/modnef/templates/run_lib.py
@@ -0,0 +1,211 @@
+import torch.nn as nn
+from snntorch import surrogate
+import torch
+spike_grad = surrogate.fast_sigmoid(slope=25)
+from tqdm import tqdm
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+from sklearn.metrics import confusion_matrix
+import seaborn as sns
+
+
+def train_1_epoch(model, trainLoader, optimizer, loss, device, verbose):
+  epoch_loss = []
+
+  if verbose:
+    bar_format = "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]"
+    loader = tqdm(trainLoader, desc="Train", bar_format=bar_format)
+  else:
+    loader = trainLoader
+
+  for _, (data, target) in enumerate(loader):
+    model.train()
+    data = data.to(device)
+    data = data.squeeze(0)
+    target = target.to(device)
+    
+    spk_rec, mem_rec = model(data)
+
+    loss_val = torch.zeros((1), dtype=torch.float, device=device)
+
+    for step in range(data.shape[1]):
+      loss_val += loss(mem_rec[step], target)
+
+    epoch_loss.append(loss_val.item())
+
+    model.zero_grad()
+    loss_val.backward()
+    optimizer.step()
+
+    if verbose:
+      loader.set_postfix_str(f"Loss : {np.mean(epoch_loss):0<3.2f}")
+
+
+  return np.mean(epoch_loss)
+
+def train(model, trainLoader, testLoader, optimizer, loss, device=torch.device("cpu"), validationLoader=None, n_epoch=10, best_model_name="best_model", verbose=True, save_plot=False, save_history=False, output_path="."):
+  avg_loss_history = []
+  acc_test_history = []
+  acc_val_history = []
+
+
+  best_acc = 0
+
+  model = model.to(device)
+
+  for epoch in range(n_epoch):
+    if verbose:
+      print(f"---------- Epoch : {epoch} ----------")
+    
+    epoch_loss = train_1_epoch(model=model, trainLoader=trainLoader, optimizer=optimizer, loss=loss, device=device, verbose=verbose)
+    avg_loss_history.append(epoch_loss)
+
+    if validationLoader!=None:
+      acc_val, _, _ = evaluation(model=model, testLoader=validationLoader, name="Validation", verbose=verbose, device=device)
+      acc_val_history.append(acc_val)
+
+    acc_test, _, _ = evaluation(model=model, testLoader=testLoader, name="Test", verbose=verbose, device=device)
+    acc_test_history.append(acc_test)
+
+    if best_model_name!="" and acc_test>best_acc:
+      torch.save(model.state_dict(), best_model_name)
+      best_acc = acc_test
+
+  if save_history:
+    np.save(f"{output_path}/loss.npy", np.array(avg_loss_history))
+    np.save(f"{output_path}/acc_test.npy", np.array(acc_test_history))
+    
+    if len(acc_val_history)!=0:
+      np.save(f"{output_path}/acc_validation.npy", np.array(acc_val_history))
+
+  if save_plot:
+    plt.figure()  # Create a new figure
+    plt.plot([i for i in range(n_epoch)], avg_loss_history)
+    plt.title('Average Loss')
+    plt.xlabel("Epoch")
+    plt.ylabel("Loss")
+    plt.savefig(f"{output_path}/loss.png")
+    
+    plt.figure()
+    if len(acc_val_history)!=0:
+      plt.plot([i for i in range(n_epoch)], acc_val_history, label="Validation")
+    plt.plot([i for i in range(n_epoch)], acc_test_history, label="Test")
+    plt.scatter([acc_test_history.index(best_acc)], [best_acc], label="Best Accuracy")
+    plt.legend()
+    plt.title("Accuracy")
+    plt.xlabel("Epoch")
+    plt.ylabel("Accuracy")
+    plt.savefig(f"{output_path}/accuracy.png")
+
+  return avg_loss_history, acc_val_history, acc_test_history, best_acc
+
+def __run_accuracy(model, testLoader, name, verbose, device):
+    """
+    Run inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    name : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
+
+    y_true = []
+    y_pred = []
+    correct = 0
+    total = 0
+
+    if verbose:
+      bar_format = "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]"
+      loader = tqdm(testLoader, desc=name, bar_format=bar_format)
+    else:
+      loader = testLoader
+
+
+    for _, (data, target) in enumerate(loader):
+      
+      data = data.to(device)
+      target = target.to(device)
+
+      y_true.append(target)
+
+      spk_rec, mem_rec = model(data)
+
+      output = (spk_rec.sum(dim=0))/data.shape[1]
+      predicted = output.argmax(dim=1).to(device)
+      correct += predicted.eq(target.view_as(predicted)).sum().item()
+      y_pred.append(predicted)
+      total += target.size(0)
+
+      if verbose:
+        loader.set_postfix_str(f"Accuracy : {np.mean(correct/total*100):0<3.2f}")
+
+      del data
+      del target
+      del spk_rec
+      del mem_rec
+
+    y_true = torch.stack(y_true).reshape(-1)
+    y_pred = torch.stack(y_pred).reshape(-1)
+    
+      
+    return (correct/total), y_pred, y_true
+
+def evaluation(model, testLoader, name="Evaluation", device=torch.device("cpu"), verbose=False):
+  accuracy = 0
+  y_pred = []
+  y_true = []
+
+  model = model.to(device)
+
+  model.eval()
+
+  accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device)
+
+  return accuracy, y_pred, y_true   
+
+def hardware_estimation(model, testLoader, name="Hardware Estimation", device=torch.device("cpu"), verbose=False):
+  accuracy = 0
+  y_pred = []
+  y_true = []
+
+  model = model.to(device)
+
+  model.hardware_estimation()
+
+  accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device)
+
+  return accuracy, y_pred, y_true
+
+
+def fpga_evaluation(model, testLoader, name="FPGA Evaluation", verbose=False):
+  accuracy = 0
+  y_pred = []
+  y_true = []
+
+  device = torch.device("cpu")
+
+  model = model.to(device)
+
+  model.hardware_estimation()
+
+  accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device)
+
+  return accuracy, y_pred, y_true   
+
+def confusion_matrix(y_true, y_pred, file_name, classes):
+  cm = confusion_matrix(y_true, y_pred)
+  df_cm = pd.DataFrame(cm / np.sum(cm, axis=1)[:, None], index = [i for i in classes], columns = [i for i in classes])
+  plt.figure()
+  sns.heatmap(df_cm, annot=True)
+  plt.title(f"Evaluation Confusion Matrix")
+  plt.savefig(file_name)
\ No newline at end of file
diff --git a/modneflib/modnef/templates/template_model.json b/modneflib/modnef/templates/template_model.json
deleted file mode 100644
index 83681fcc9dff55e34794273480a3640f0cddc024..0000000000000000000000000000000000000000
--- a/modneflib/modnef/templates/template_model.json
+++ /dev/null
@@ -1,134 +0,0 @@
-{
-  "exp_name" : "template", 
-
-  "model": {
-    "name" : "blif_model",
-
-    "input" : 2312,
-    "num_class" : 10,
-
-    "slif_layer" : {
-      "model" : "slif",
-      "in_features" : 2312,
-      "neurons" : 128,
-      "threshold" : 0.8,
-      "leak" : 0.015,
-      "min" : 0.0,
-      "rest" : 0.0,
-      "quantizer" : {
-        "kind" : "MinMaxQuantizer",
-        "parameters" : {
-          "bitwidth" : 8,
-          "signed" : null
-        }
-      },
-      "hardware" : {
-        "strategy" : "Sequential",
-        "mem_init_file" : null,
-        "variable_size" : -1
-      }
-    },
-
-    "shiftlif_layer" : {
-      "model" : "shiftlif",
-      "in_features" : 128,
-      "neurons" : 64,
-      "beta" : 0.875,
-      "threshold" : 0.8,
-      "reset_mechanism" : "subtract",
-      "quantizer" : {
-        "kind" : "DynamicScaleFactorQuantizer",
-        "parameters" : {
-          "bitwidth" : 8,
-          "signed" : null
-        }
-      },
-      "hardware" : {
-        "strategy" : "Parallel",
-        "mem_init_file" : null,
-        "variable_size" : -1
-      }
-    },
-
-    "blif_layer" : {
-      "model" : "blif",
-      "in_features" : 64,
-      "neurons" : 10,
-      "beta" : 0.9,
-      "threshold" : 0.8,
-      "reset_mechanism" : "subtract",
-      "quantizer" : {
-        "kind" : "FixedPointQuantizer",
-        "parameters" : {
-          "bitwidth" : 8,
-          "fixed_point" : -1,
-          "signed" : null
-        }
-      },
-      "hardware" : {
-        "strategy" : "Parallel",
-        "mem_init_file" : null,
-        "variable_size" : -1
-      }
-    },
-
-    "hardware" : {
-      "clock_name" : "clock",
-      "clock_freq" : 125000000,
-      "baud_rate" : 921600,
-      "txd" : "uart_txd",
-      "rxd" : "uart_rxd",
-      "queue_read_depth" : 4096,
-      "queue_write_depth" : 1024
-    }
-  },
-
-  "optimizer" : {
-    "lr" : 1e-4,
-    "betas" : [0.9, 0.99]
-  },
-
-  "best_model" : "best_my_model",
-
-  "verbose" : true,
-
-  "device" : "auto",
-
-  "train" : {
-    "name" : "Train",
-    "n_epoch" : 5,
-    "save_best_model" : true,
-    "save_history" : true,
-    "plot" : true
-  },
-
-  "eval" : {
-    "name" : "Evaluation",
-    "use_best_model" : true,
-    "conf_matrix" : true
-  },
-
-  "hardware_estimation" : {
-    "name" : "Hardware Estimation",
-    "use_best_model" : true,
-    "conf_matrix" : true
-  },
-
-  "vhdl" : {
-    "use_best_model" : false,
-    "driver_config" : "driver.yml",
-    "file_name" : null
-  },
-
-  "fpga" : {
-    "name" : "FPGA Evaluation",
-    "driver_config" : "driver.yml",
-    "board_path" : "board path here",
-    "conf_matrix" : true
-  },
-
-  "confusion_matrix" : {
-    "class" : ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
-  }
-
-}
\ No newline at end of file
diff --git a/modneflib/modnef/templates/train.py b/modneflib/modnef/templates/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a5bc0cf37461314aadec4d85cd580c28ddab691
--- /dev/null
+++ b/modneflib/modnef/templates/train.py
@@ -0,0 +1,62 @@
+import tonic
+from torch.utils.data import DataLoader
+from modnef.modnef_torch import ModNEFModelBuilder
+import os
+from snntorch.surrogate import fast_sigmoid
+from run_lib import *
+
+if __name__ == "__main__":
+
+  """Model definition"""
+  model_path = "model_template.json"
+  model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
+
+  """Optimizer"""
+  optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999))
+
+  """Loss"""
+  loss = torch.nn.CrossEntropyLoss()
+
+  """Device definition"""
+  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+
+  """Train variable definition"""
+  n_epoch = 10
+  best_model_name = "best_model"
+  verbose = True
+  save_plot = False
+  save_history = False
+  output_path = "."
+  
+  """DataSet Definition"""
+  dataset_path = f"{os.environ['HOME']}/datasets"
+
+  # data set definition, change to your dataset
+  sensor_size = tonic.datasets.NMNIST.sensor_size
+  frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10)
+
+  train_set = tonic.datasets.NMNIST(save_to=dataset_path, train=True, transform=frame_transform)
+  test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
+
+  # batch loader
+  batch_size = 64
+  
+  trainLoader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
+  testLoader = DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
+  validationLoader = None
+
+  train(
+    model=model, 
+    trainLoader=trainLoader, 
+    testLoader=testLoader, 
+    validationLoader=validationLoader,
+    optimizer=optimizer, 
+    loss=loss, 
+    device=device,  
+    n_epoch=n_epoch, 
+    best_model_name=best_model_name, 
+    verbose=verbose, 
+    save_plot=save_plot, 
+    save_history=save_history,
+    output_path=output_path
+    )
\ No newline at end of file