diff --git a/.gitignore b/.gitignore
index ad85eaeaea8d06dbf5e7d5fdd25a6041979a1eb0..fb20a1faf02095795fa73362119c99f0589684bc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,3 +16,4 @@ examples/*/*.vhd
 examples/debugger_example/*.txt
 examples/*/__pycache__
 
+__pycache__/
diff --git a/JSONConfiguration.md b/JSONConfiguration.md
new file mode 100644
index 0000000000000000000000000000000000000000..838606a737a81408079071ee55df3d9e59b36c1c
--- /dev/null
+++ b/JSONConfiguration.md
@@ -0,0 +1,247 @@
+# JSON File Configuration Explaination
+
+The file describe all differents json properties.
+A template file can be found inside **modneflib/modnef/template**. Additional commentary, including non requiered properties will be explained in this file.
+
+## ModNEF Network Description
+
+ModNEF network model properties
+
+```json
+{
+  "name" : "model_name", // name of network model
+
+  "input" : 2312,   // number of input features
+  "num_class" : 10, // number of output class 
+
+  // insert layer description
+  // layer description are described in next section
+
+  "hardware" : {                // ModNEF VHDL hardware description 
+    "clock_name" : "clock",     // name of clock signal inside VHDL file
+    "clock_freq" : 125000000,   // clock frequency of FPGA internal clock
+    "baud_rate" : 921600,       // UART baud rate
+    "txd" : "uart_txd",         // UART txd signal name
+    "rxd" : "uart_rxd",         // UART rxd signal name
+    "queue_read_depth" : 4096,  /*Optional*/ // UART component queue read depth, can be ignored and will be computed during hardware estimation
+    "queue_write_depth" : 4096  /*Optional*/ // UART component queue write depth, can be ignored and will be computed during hardware estimation
+  },
+  
+}
+```
+
+### Neuron Description
+
+In this section, we will describe 
+
+#### Beta LIF Neuron Model Description
+
+Feed forward layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "blif",               // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {              /*Optional*/ // hardware description
+      "strategy" : "Parallel",  /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,        /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,         /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,     /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,        /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,   /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "variable_size" : -1      /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+Recurrent layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "rblif",              // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {                /*Optional*/ // hardware description
+      "strategy" : "Parallel",    /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,          /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,           /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,       /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,          /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,     /*Optional*/ // vivado memory file for feed forward weight, null by default. If null, file = layer_name_mem.mem
+      "mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
+      "variable_size" : -1        /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+#### Shift Register based LIF Neuron Model Description
+
+Feed forward layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "shiftlif",           // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {              /*Optional*/ // hardware description
+      "strategy" : "Parallel",  /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,        /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,         /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,     /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,        /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,   /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "variable_size" : -1      /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+Recurrent layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "rshiftlif",          // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {                /*Optional*/ // hardware description
+      "strategy" : "Parallel",    /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,          /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,           /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,       /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,          /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,     /*Optional*/ // vivado memory file for feed forward weight, null by default. If null, file = layer_name_mem.mem
+      "mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
+      "variable_size" : -1        /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+#### Simplified LIF Neuron Model Description
+
+Feed forward layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "slif",               // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "threshold" : 0.8,              // threshold value
+    "leak" : 0.015,                 // memory leakage value
+    "min" : 0.0,                    // minimal memory voltage value
+    "rest" : 0.0,                   // resting memory potential
+    "hardware" : {              /*Optional*/ // hardware description
+      "strategy" : "Parallel",  /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,        /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_type" : null,     /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "mem_init_file" : null,   /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "variable_size" : -1      /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+Recurrent layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "rslif",              // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "threshold" : 0.8,              // threshold value
+    "leak" : 0.015,                 // memory leakage value
+    "min" : 0.0,                    // minimal memory voltage value
+    "rest" : 0.0,                   // resting memory potential
+    "hardware" : {                /*Optional*/ // hardware description
+      "strategy" : "Parallel",    /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,          /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_type" : null,       /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "mem_init_file" : null,     /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
+      "variable_size" : -1        /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+## ModNEF Project Description
+
+```json
+{
+  "exp_name" : "template", 
+
+  "model": {
+    // describe you model here
+  },
+
+  "optimizer" : {         /*Optional*/ // Adam optimizer properties, optional. If no optimizer properties, default optimizer is configured with followed values
+    "lr" : 1e-4,          // learning rate
+    "betas" : [0.9, 0.99] // betas values
+  },
+
+  "best_model" : "best_my_model", /*Optional*/ // best model file name, optional. By default best_{model.name}
+
+  "verbose" : true, /*Optional*/ // verbose mode, by default true
+
+  "device" : "auto",  /*Optional*/ // troch device, by default auto. If auto, device is automatically detected
+
+  "train" : {                 // Train configuration. Optional if you don't want to train with ModNEF Executor.
+    "name" : "Train",         // name printed during trian progression
+    "n_epoch" : 5,            // number of epoch
+    "save_best_model" : true, /*Optional*/ // save best accuracy model, by default true
+    "save_history" : true,    /*Optional*/ // save average loss, test accuracy and validation accuracy through epoch, by default true
+    "plot" : true             /*Optional*/ // save matplotlib plot, by default true
+  },
+
+  "eval" : {                  // Software evaluation configuration, Optional if you don't want to run software evaluation
+    "name" : "Evaluation",    // name printed during evaluation progression
+    "use_best_model" : true,  /* Optional*/ // use best model for evaluation, by default true
+    "conf_matrix" : true      /* Optional*/ // generate matplotlib confusion matrix, by default true
+  },
+
+  "hardware_estimation" : {         // Hardware estimation configuration, Optional if you don't want to run hardware estimation
+    "name" : "Hardware Estimation", // name printed during evaluation progression
+    "use_best_model" : true,        /* Optional*/ // use best model for evaluation, by default true
+    "conf_matrix" : true            /* Optional*/ // generate matplotlib confusion matrix, by default true
+  },
+
+  "vhdl" : {                        // VHDL generation configuration, Optional if you don't want to run vhl generation
+    "use_best_model" : false,       /* Optional*/ // use best model for evaluation, by default true
+    "driver_config" : "driver.yml", /* Optional*/ // driver configuration file, by default driver.yml
+    "file_name" : null              /* Optional*/ // VHDL file name, if null, model.name.vhd
+  },
+
+  "fpga" : {                          // FPGA evaluation configuration, Optional if you don't want to run FPGA evaluation
+    "name" : "FPGA Evaluation",       // name printed during evaluation progression
+    "driver_file" : "driver.yml",     // driver configuration file
+    "board_path" : "board path here", // path to UART board 
+    "conf_matrix" : true              /* Optional*/ // generate matplotlib confusion matrix, by default true
+  },
+
+  "confusion_matrix" : { // Necessary if you want to generate configuration matrix
+    "class" : ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]  // Name of classes
+  }
+
+}
+```
\ No newline at end of file
diff --git a/modneflib/modnef/__init__.py b/modneflib/modnef/__init__.py
index b3d7654003558f20a66f83ed41ef0eec16a8111b..e622685c6cd83192707566d1fec7984a522b90db 100644
--- a/modneflib/modnef/__init__.py
+++ b/modneflib/modnef/__init__.py
@@ -1,4 +1,5 @@
 from .arch_builder import *
 from .modnef_driver import *
 from .tools import *
-from .modnef_torch import *
\ No newline at end of file
+from .modnef_torch import *
+from .quantizer import *
\ No newline at end of file
diff --git a/modneflib/modnef/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/__pycache__/__init__.cpython-310.pyc
index 994b886f581c7f7545bdaa0b73d8e90bf2a0fb39..a633a6b32c555f8bc0a12ede59d6808044acc687 100644
Binary files a/modneflib/modnef/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/__pycache__/__init__.cpython-310.pyc
index beca1367da962b7eb394e3a253cc92cd13c99bd2..96593e53511f2055d5ca6d15b399ebe4cdaaa805 100644
Binary files a/modneflib/modnef/arch_builder/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/arch_builder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/__pycache__/modnef_builder.cpython-310.pyc b/modneflib/modnef/arch_builder/__pycache__/modnef_builder.cpython-310.pyc
index 504cbd1208501c326392c20cc12ffd618f8c7d22..13dcb3a274bcba9ebcef1c4cf40fd8fa4eabcd36 100644
Binary files a/modneflib/modnef/arch_builder/__pycache__/modnef_builder.cpython-310.pyc and b/modneflib/modnef/arch_builder/__pycache__/modnef_builder.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/__init__.cpython-310.pyc
index 49a173f5bb67900460047fbac675a8cea2b50726..0dff00858b51446fe5ccb10619411224722e0250 100644
Binary files a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif.cpython-310.pyc
index 8b4abc9bf6efed6cff7455367f07e4efdc27e1e3..5e5a6ce1aab3e0ab07ec3e9bc770217363894968 100644
Binary files a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif_debugger.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif_debugger.cpython-310.pyc
index 41b72a50c830ad82af323d49e341b407cf404b0f..80ffdc923d7506f65d9752d4590a24bf213bd066 100644
Binary files a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif_debugger.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif_debugger.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/rblif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/rblif.cpython-310.pyc
index 4fbc49752317251f2e0cfee2954b9fc19b711512..dcede5bf6638a81f47641b425d9ba73133b55709 100644
Binary files a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/rblif.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/rblif.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/blif.py b/modneflib/modnef/arch_builder/modules/BLIF/blif.py
index 0f10b9e4a0c7052e42fc1f8c99600d4239abb926..daf1c7681cab813436a564545fab1b79b7bb4a4c 100644
--- a/modneflib/modnef/arch_builder/modules/BLIF/blif.py
+++ b/modneflib/modnef/arch_builder/modules/BLIF/blif.py
@@ -12,6 +12,7 @@ from ..modnef_arch_mod import ModNEFArchMod
 from ..utilities import *
 from math import log, ceil
 from .blif_debugger import BLif_Debugger
+from modnef.quantizer import *
 
 _BLIF_DEFINITION = """
   component BLif_{0} is
@@ -110,7 +111,8 @@ class BLif(ModNEFArchMod):
                weight_fp : int = -1, 
                mem_init_file : str = None,
                strategy : str = "Parallel",
-               variable_size : int = 16  
+               variable_size : int = 16,
+               quantizer = FixedPointQuantizer(8)
               ):
     """
     Init attributes
@@ -155,14 +157,7 @@ class BLif(ModNEFArchMod):
     self.beta = beta
     self.reset = reset
 
-    if compute_fp == -1:
-      self.compute_fp = weight_size
-    else:
-      self.compute_fp = compute_fp
-
-    self.weight_size = weight_size
-    self.weight_type = weight_type
-    self.weight_fp = weight_fp
+    self.quantizer = quantizer
 
     if mem_init_file == None:
       self.mem_init_file = f"{self.name}_weight.mem"
@@ -170,7 +165,9 @@ class BLif(ModNEFArchMod):
       self.mem_init_file = mem_init_file
 
     self._strategy = strategy
-    self.variable_size = variable_size
+    self.variable_size = 16
+
+    
     
   def vhdl_component_name(self):
     """
@@ -209,62 +206,39 @@ class BLif(ModNEFArchMod):
     output_path : str = "."
       memory file output path
     """
-    
-    if self.weight_type != "w_signed" and self.weight_type != "w_unsigned" and self.weight_type != None:
-      print(f"{self.weight_type} is not supported")
-      return 
 
     weights = weight_extraction(weights, self.input_neuron, self.output_neuron)
 
-    if self.weight_type == None or self.weight_fp == -1:
-
-      w_min = min(min(weights))
-      w_max = max(max(weights))
+    if not self.quantizer.is_initialiaze:
+      self.quantizer.init_from_weight(weights)
 
-      if w_min < 0:
-        self.weight_type = "w_signed"
-      else:
-        self.weight_type = "w_unsigned"
-
-      if self.weight_fp == -1:
-
-        int_part = int(max(abs(w_min), abs(w_max)))
-
-        if int_part==0:
-          fp = 0
-        elif int_part==1:
-          fp = 1
-        else:
-          fp = ceil(log(int_part)/log(2))
-
-        if self.weight_type == "w_signed":
-          self.weight_fp = self.weight_size-fp-1
-        else:
-          self.weight_fp = self.weight_size-fp
+    bw = self.quantizer.bitwidth
 
     mem_file = open(f"{output_path}/{self.mem_init_file}", 'w')
     
-    if self.weight_type == "w_signed":
+    if self.quantizer.signed:
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.weight_size) + two_comp(to_fixed_point(weights[i][j], self.weight_fp), self.weight_size)
+          w_line = (w_line<<bw) + two_comp(self.quantizer(weights[i][j]), bw)
 
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
-      self.v_threshold = two_comp(to_fixed_point(self.v_threshold, self.compute_fp), self.variable_size)
-      self.beta = two_comp(to_fixed_point(self.beta, self.compute_fp), self.variable_size)
+      print(self.v_threshold)
+      self.v_threshold = two_comp(self.quantizer(self.v_threshold), self.variable_size)
+      print(self.v_threshold)
+      self.beta = two_comp(self.quantizer(self.beta), self.variable_size)
 
-    elif self.weight_type == "w_unsigned":
+    else:
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.weight_size) + to_fixed_point(weights[i][j], self.weight_fp)
+          w_line = (w_line<<self.weight_size) + self.quantizer(weights[i][j])
         
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
-      self.v_threshold = to_fixed_point(self.v_threshold, self.compute_fp)
-      self.beta = to_fixed_point(self.beta, self.compute_fp)
+      self.v_threshold = self.quantizer(self.v_threshold)
+      self.beta = self.quantizer(self.beta, self.beta)
     
     mem_file.close()
 
@@ -284,10 +258,10 @@ class BLif(ModNEFArchMod):
       v_threshold = self.v_threshold,
       beta = self.beta,
       reset = self.reset,
-      compute_fp = self.compute_fp,
-      weight_size = self.weight_size,
-      weight_type = self.weight_type,
-      weight_fp = self.weight_fp,
+      compute_fp = self.quantizer.fixed_point,
+      weight_size = self.quantizer.bitwidth,
+      weight_type = "w_signed" if self.quantizer.signed else "w_unsigned",
+      weight_fp = self.quantizer.fixed_point,
       mem_init_file = self.mem_init_file,
       output_file = output_file,
       variable_size=self.variable_size
@@ -312,6 +286,8 @@ class BLif(ModNEFArchMod):
     if type(self.v_threshold) != int or type(self.beta) != int:
       print("neuron hyper parameters are not int. If you set hyper parameters as float, pleasse run weight_convert before calling to_vhdl")
       return
+    
+    wt = "w_signed" if self.quantizer.signed else "w_unsigned"
 
     vhdl_file.write(f"\t{self.name} : BLif_{self._strategy} generic map(\n")
     vhdl_file.write(f"\t\tinput_neuron => {self.input_neuron},\n")
@@ -320,10 +296,10 @@ class BLif(ModNEFArchMod):
     vhdl_file.write(f"\t\tv_threshold => x\"{self._to_hex(self.v_threshold, self.variable_size)}\",\n")
     vhdl_file.write(f"\t\tbeta => x\"{self._to_hex(self.beta, self.variable_size)}\",\n")
     vhdl_file.write(f"\t\treset => \"{self.reset}\",\n")
-    vhdl_file.write(f"\t\tcompute_fp => {self.compute_fp},\n")
-    vhdl_file.write(f"\t\tweight_size => {self.weight_size},\n")
-    vhdl_file.write(f"\t\tweight_type => \"{self.weight_type}\",\n")
-    vhdl_file.write(f"\t\tweight_fp => {self.weight_fp},\n")
+    vhdl_file.write(f"\t\tcompute_fp => {self.quantizer.fixed_point},\n")
+    vhdl_file.write(f"\t\tweight_size => {self.quantizer.bitwidth},\n")
+    vhdl_file.write(f"\t\tweight_type => \"{wt}\",\n")
+    vhdl_file.write(f"\t\tweight_fp => {self.quantizer.fixed_point},\n")
     vhdl_file.write(f"\t\tmem_init_file => \"{self.mem_init_file}\"\n")
     vhdl_file.write("\t) port map(\n")
     vhdl_file.write(f"\t\ti_clk => {clock_name},\n")
diff --git a/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/__init__.cpython-310.pyc
index 0313b89029f0785da81763dd763cc417c34d4f12..25e0145301913abbb4a39ca94a8471c0f40cf179 100644
Binary files a/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/probe.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/probe.cpython-310.pyc
index 025d27c86cb4916634bc580991341d7529948ff5..25e0bb87b8145505b169c6b8ba2936f67bde205f 100644
Binary files a/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/probe.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/probe.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/__init__.cpython-310.pyc
index 4b8b352519bdebb320160c2186cdf940c56d2c37..65eb5bcbf3cdc42ec6d7af92fbfe3677f948e4f4 100644
Binary files a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/rslif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/rslif.cpython-310.pyc
index 42585a42f7d9a759c0a2944307e4c36eaf589ed8..73460da7b9914ae52c0c423146306725426af7b9 100644
Binary files a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/rslif.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/rslif.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif.cpython-310.pyc
index 2c3d72db63914c070c3bb611854ce6476686f01c..937248ad9c387cbd8ecf830f069bc71a1e05554e 100644
Binary files a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif_debugger.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif_debugger.cpython-310.pyc
index 8c54791368fe6ec77399d7eafcd1fa6da2ecf103..a3704fe76617ca12ee71efad2f51704a5e067ccc 100644
Binary files a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif_debugger.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif_debugger.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/__init__.cpython-310.pyc
index 5b336006bc2d68f872a4292e44a989684ee8312d..566c9a8d094cc450019c1ae8e1266526df36ed4e 100644
Binary files a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/rshiftlif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/rshiftlif.cpython-310.pyc
index 54c016a91cbfcf59741329fc4c576747f0053fea..a514ea545f6944369efcd3395d3fba18055971a0 100644
Binary files a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/rshiftlif.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/rshiftlif.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/shiftlif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/shiftlif.cpython-310.pyc
index f7201157a18495b2dc6efe8e58d2ebb4f81bd79b..cb0fae8d68cdb31b003c4cfcf1e3c151633c9bd4 100644
Binary files a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/shiftlif.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/shiftlif.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/__init__.cpython-310.pyc
index 7aa1604b449f29837446d2f22b53a805f4b74b37..caf794d889f13849b091312521bce0911f238d47 100644
Binary files a/modneflib/modnef/arch_builder/modules/UART/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/UART/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_1step.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_1step.cpython-310.pyc
index edc5d4ea2f5a0ebef07f8f16f02a0ad9c7a74ae9..221fc388f22d486030d600675fe0a88dc0c33cd2 100644
Binary files a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_1step.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_1step.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier.cpython-310.pyc
index 1031f5807b624699fe65cf0e3423a0c6d8b74442..668ad4a56fed0cf22c12fcd057809647baf5af89 100644
Binary files a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier_timer.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier_timer.cpython-310.pyc
index 897da078dec9b61ae3e492a84cf343cd4a94f887..74223ffeb02f0c6bacebe3d309351239a8fab1e3 100644
Binary files a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier_timer.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier_timer.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep.cpython-310.pyc
index e2b148f4a5ea72035d5422a1e2d2770232092533..d8d74e917b06fc1db3e7eaf426fa73fd8fba6aea 100644
Binary files a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep_timer.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep_timer.cpython-310.pyc
index 16115cffb25cfc60343cb1f99576c213c47ba0ed..051d4095987dafa41608a9c67b4557f963df061e 100644
Binary files a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep_timer.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep_timer.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/__init__.cpython-310.pyc
index a51edfc92ae452cb40440d421be6bec3f0d7c74d..72a993f3c9da75932ff9f49c3e4830e5803001a4 100644
Binary files a/modneflib/modnef/arch_builder/modules/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/io_arch.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/io_arch.cpython-310.pyc
index 4d96392ddfe15b191e182cd3aa304e5a41212ef9..245dadd6fd8be725f0c0f18da05b6b92521675e7 100644
Binary files a/modneflib/modnef/arch_builder/modules/__pycache__/io_arch.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/__pycache__/io_arch.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/merger.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/merger.cpython-310.pyc
index f21c44887ddbbadd156e313ba6709699a2eebe3d..7dc9c21898d101f136ca404b4df54ddd5f727362 100644
Binary files a/modneflib/modnef/arch_builder/modules/__pycache__/merger.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/__pycache__/merger.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/modnef_arch_mod.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/modnef_arch_mod.cpython-310.pyc
index dbd537fe9a3c3c215a0a421d119a235811f28397..f68f888e954b83aa933c071c7741bc21f8c53723 100644
Binary files a/modneflib/modnef/arch_builder/modules/__pycache__/modnef_arch_mod.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/__pycache__/modnef_arch_mod.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/splitter.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/splitter.cpython-310.pyc
index 3d1e766bf9ec8f673aa84f47da0bd003c979baa8..fe45a069bca6b68aa714915b24fb9d5ceab09e4a 100644
Binary files a/modneflib/modnef/arch_builder/modules/__pycache__/splitter.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/__pycache__/splitter.cpython-310.pyc differ
diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/utilities.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/utilities.cpython-310.pyc
index d5d3137b5a44eddfb4553d6c6b4cff1640631648..3ad0426bbf0a93aece2ced26b98ee31e402b62f2 100644
Binary files a/modneflib/modnef/arch_builder/modules/__pycache__/utilities.cpython-310.pyc and b/modneflib/modnef/arch_builder/modules/__pycache__/utilities.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_driver/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_driver/__pycache__/__init__.cpython-310.pyc
index cbc1e081252e5e639174f5ae3c33e76252839f87..ea62a78b0cb5b140f962fdeb196748167aaac299 100644
Binary files a/modneflib/modnef/modnef_driver/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_driver/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_driver/__pycache__/modnef_drivers.cpython-310.pyc b/modneflib/modnef/modnef_driver/__pycache__/modnef_drivers.cpython-310.pyc
index d643ce5865d1ad4547af351f25fb6b123e8797fa..d16a933fde5112586c1e0be457d7f0d24a37b790 100644
Binary files a/modneflib/modnef/modnef_driver/__pycache__/modnef_drivers.cpython-310.pyc and b/modneflib/modnef/modnef_driver/__pycache__/modnef_drivers.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_driver/drivers/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_driver/drivers/__pycache__/__init__.cpython-310.pyc
index 063a6d8e0a1751a5009e9a0755b0c7d49d8f2d98..d9a92dd63e49b0cfa5f9c3431d3cbbaa52dfe048 100644
Binary files a/modneflib/modnef/modnef_driver/drivers/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_driver/drivers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc
index d06a4e4b651d758c194df0cea4679c6ff91ca428..19f7ef27a5cc1bde43e8816a7cb74b492a48f2ef 100644
Binary files a/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/__pycache__/executor.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/executor.cpython-310.pyc
index 844b2557ea78de0a6ee1b411339dc20adb1a169f..38bb176fbb8555dadcc2f704b42be2172e1331eb 100644
Binary files a/modneflib/modnef/modnef_torch/__pycache__/executor.cpython-310.pyc and b/modneflib/modnef/modnef_torch/__pycache__/executor.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc
index d60b785a7b6cca32485dcbd426f83c4876d8e5c1..8ff8fe4de12d2329a5991d47d728d93f45554413 100644
Binary files a/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc and b/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc
index 6bc1a750c9e0f5dee467c12d6546e91b75b4bce1..b8b000b1c9b25553fcca13173f5f082ed6f80376 100644
Binary files a/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc and b/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/executor.py b/modneflib/modnef/modnef_torch/executor.py
index a1234211143c63f99f726ab7efafa4580323e437..4a75d39ee0ccc539f93069b8865fde07838d1048 100644
--- a/modneflib/modnef/modnef_torch/executor.py
+++ b/modneflib/modnef/modnef_torch/executor.py
@@ -159,7 +159,6 @@ class ModNEFExecutor():
 
     train_config = {
       "n_epoch" : 5,
-      "step_save" : -1,
       "save_best_model" : True,    
       "save_history" : True,  
       "plot" : True,
@@ -174,7 +173,6 @@ class ModNEFExecutor():
       testLoader=self.testLoader,
       validationLoader=self.validationLoader,
       n_epoch=train_config["n_epoch"],
-      step_save=train_config["step_save"],
       save_best_model=train_config["save_best_model"],
       best_model_name=self.best_model_name
     )
@@ -235,8 +233,7 @@ class ModNEFExecutor():
 
     accuracy, y_pred, y_true = self.runner.hardware_estimation(
       testLoader=self.testLoader,
-      name=he_config["name"],
-      conf_matrix=he_config["conf_matrix_class"]
+      name=he_config["name"]
     )
 
     if he_config["conf_matrix"]:
@@ -271,8 +268,7 @@ class ModNEFExecutor():
 
     accuracy, y_pred, y_true = self.runner.accuracy(
       testLoader=self.testLoader,
-      name=eval_config["name"],
-      conf_matrix=eval_config["conf_matrix_class"]
+      name=eval_config["name"]
     )
 
     if eval_config["conf_matrix"]:
@@ -315,7 +311,7 @@ class ModNEFExecutor():
 
     fpga_config = {
       "name" : "FPGA Eval",
-      "driver_file" : "driver.yml",
+      "driver_config" : "",
       "board_path" : "",
       "conf_matrix" : False,
       "conf_matrix_class" : []
@@ -331,8 +327,7 @@ class ModNEFExecutor():
       name=fpga_config["name"],
       testLoader=self.testLoader,
       board_path=fpga_config["board_path"],
-      driver_path=f"{self.output_path}/{fpga_config['driver_file']}",
-      conf_matrix=fpga_config["conf_matrix_class"]
+      driver_path=f"{self.output_path}/{fpga_config['driver_config']}"
     )
 
     if fpga_config["conf_matrix"]:
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/__init__.cpython-310.pyc
index bdf280b17bc37b26bd5d68650f91c7c7912058d0..627ee9f64f81474b17a5bef807e2c3c8c22526f3 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc
index d658e975ee0e92c7e7ebc69f78326b42da79b877..f26d3e0fda92d6cb8af14786b53848a0e8ef9259 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/__init__.cpython-310.pyc
index 829c917976b2b648660d27dd7f7d5092f33094db..594e6978c95b009fc8d7f0e6a145abf78ff24ffd 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc
index c92436ae86ab9fd99d8b250f8466bc91b7936ce1..e535ed2f81ceddb68b47ab69c7383a3d115b9a04 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc
index 657c4c99b4e80642a3f0866776704e1a24bd4f0a..3d8cbdeb81aab586d09ef7d40d314b0054d76798 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
index 9033906f2bbd904860627dd25573214d30b8516f..959bc20527bfe981e8f941d0f3fc9e690940cc26 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
@@ -16,6 +16,7 @@ from snntorch import Leaky
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron
+from modnef.quantizer import *
 
 class BLIF(Leaky, ModNEFNeuron):
   """
@@ -94,7 +95,8 @@ class BLIF(Leaky, ModNEFNeuron):
                beta,
                threshold=1.0,
                spike_grad=None,
-               reset_mechanism="subtract"
+               reset_mechanism="subtract",
+               quantizer=FixedPointQuantizer(8)
             ):
     """
     Initialize class
@@ -138,6 +140,8 @@ class BLIF(Leaky, ModNEFNeuron):
 
     self._init_mem()
 
+    self.quantizer = quantizer
+
     self.hardware_description = {
       "strategy" : "Parallel",
       "weight_size" : 8,
@@ -191,7 +195,7 @@ class BLIF(Leaky, ModNEFNeuron):
 
     Returns
     -------
-    tuple of Tensor
+    tuple of Tensorquantizer
       spk, mem
     """
     
@@ -242,8 +246,8 @@ class BLIF(Leaky, ModNEFNeuron):
       self.mem = torch.zeros_like(input_, device=self.mem.device)
 
     if self.hardware_estimation_flag:
-      input_.data = self.__quant(input_.data, self.hardware_description["compute_fp"])
-      self.mem.data = self.__quant(self.mem.data, self.hardware_description["compute_fp"])
+      input_.data = self.quantizer(input_.data, True)
+      self.mem.data = self.quantizer(self.mem.data, True)
 
     self.reset = self.mem_reset(self.mem)
 
@@ -300,7 +304,8 @@ class BLIF(Leaky, ModNEFNeuron):
         compute_fp=self.hardware_description["compute_fp"],
         mem_init_file=self.hardware_description["mem_init_file"],
         strategy=self.hardware_description["strategy"],
-        variable_size=self.hardware_description["variable_size"]
+        variable_size=self.hardware_description["variable_size"],
+        quantizer=self.quantizer
       )
 
     module.weight_convert(
@@ -310,30 +315,7 @@ class BLIF(Leaky, ModNEFNeuron):
     )
     return module
   
-  def __quant(self, data, fp, dtype = torch.int32):
-    """
-    Internal quantization function
-
-    Parameters
-    ----------
-    data : Tensor
-      input tensor to quantize
-    fp : int
-      fixed point position
-    type = torch.int32 : dtype
-      type use during quantization
-
-    Returns
-    -------
-    Tensor
-    """
-    
-    scale_factor = 2**fp
-    scaled_data = (data*scale_factor).to(dtype)
-    unscaled_data = scaled_data.to(torch.float32)/scale_factor
-    return unscaled_data
-  
-  def quantize_weight(self, size=-1, fp=-1, dtype=torch.int32):
+  def quantize_weight(self, dtype=torch.int32):
     """
     Quantize synaptic weight
 
@@ -348,37 +330,11 @@ class BLIF(Leaky, ModNEFNeuron):
     dtype = torch.int32 : dtype
       type use during quantization
     """
+    self.quantizer.init_from_weight(self.fc.weight)
 
-    if self.hardware_description["weight_type"]==None:
-      if self.fc.weight.min().item() < 0.0:
-        self.hardware_description["weight_type"] = "w_signed"
-      else:
-        self.hardware_description["weight_type"] = "w_unsigned"
-    
-    if size != -1:
-      self.hardware_description["weight_size"] = size
-
-    if fp==-1:
-      fp=self.hardware_description["weight_fp"]
-
-    if fp==-1:
-      w_min = self.fc.weight.min().item()
-      w_max = self.fc.weight.max().item()
-      int_part = int(max(abs(w_max), abs(w_min)))
-      
-      if int_part > 1:
-        int_part = ceil(log(int_part)/log(2))
-    
-      if self.hardware_description["weight_type"] == "w_signed":
-        self.hardware_description["weight_fp"] = self.hardware_description["weight_size"]-int_part-1
-      else:
-        self.hardware_description["weight_fp"] = self.hardware_description["weight_size"]-int_part
-    else:
-      self.hardware_description["weight_fp"]=fp
-
-    self.fc.weight.data = self.__quant(self.fc.weight.data, self.hardware_description["weight_fp"], dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
 
-  def quantize_parameters(self, fp=-1, dtype=torch.int32):
+  def quantize_parameters(self, dtype=torch.int32):
     """
     Quantize neuron hyper-parameters
 
@@ -391,19 +347,10 @@ class BLIF(Leaky, ModNEFNeuron):
       type use during quantization
     """
 
-    if fp ==-1:
-      if self.hardware_description["compute_fp"]==-1:
-        if self.hardware_description["weight_size"]==-1:
-          raise Exception("Impossible to fix quantization value, please fix the point position")
-        else:
-          self.hardware_description["compute_fp"] = self.hardware_description["weight_size"]
-    else:
-      self.hardware_description["compute_fp"]=fp
-
-    self.threshold.data = self.__quant(self.threshold.data, self.hardware_description["compute_fp"], dtype)
-    self.beta.data = self.__quant(self.beta.data, self.hardware_description["compute_fp"], dtype)
+    self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
+    self.beta.data = self.quantizer(self.beta.data, True, dtype)
 
-  def quantize(self, weight_size=-1, weight_fp=-1, compute_fp=-1, dtype=torch.int32):
+  def quantize(self, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -422,8 +369,8 @@ class BLIF(Leaky, ModNEFNeuron):
       type use during quantization
     """
     
-    self.quantize_weight(weight_size, weight_fp, dtype)
-    self.quantize_parameters(compute_fp, dtype)
+    self.quantize_weight(dtype)
+    self.quantize_parameters(dtype)
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/__init__.cpython-310.pyc
index 5d278f77bee4424d05d152cf1cc01563949e8e69..8784ad2e7036bca003137e0c71cb73799cd6bbf1 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc
index 5911ecaa006ced8825be3cdf2dbf92295946c564..fa994e037c22b88e892e363aa046f45f535fbab0 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc
index cb03f6ba9b5426f0196803f517f16ef5300f5f9f..b4b08468acd0513057cf39e35882417a2c7ababc 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/__init__.cpython-310.pyc
index b0d94c36d8094814dcfa4056130323bffdb3350d..1d1cc1681fe19e3794fdf5d7aff3a0dd34eb8c13 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/__init__.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/__init__.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc
index a7676b86d5c6c7ba5eb842b2ce2246eda2ca81fc..101b33dc0d90cc1a0cb9b8dffff13f51bdcc4f93 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc
index 645dd493bfb9d1e28d3f52078cc80096ae869f98..a93ed5d912f83e9b970a05c3053afac8b7e89f3f 100644
Binary files a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc and b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc differ
diff --git a/modneflib/modnef/modnef_torch/trainer.py b/modneflib/modnef/modnef_torch/trainer.py
index de8639dc3d9fdeafd84727b2f878424b0b0a2018..9bc8b764def6a89e8cce32b9a737ae70758ce156 100644
--- a/modneflib/modnef/modnef_torch/trainer.py
+++ b/modneflib/modnef/modnef_torch/trainer.py
@@ -19,8 +19,63 @@ import numpy as np
 
 
 class ModNEFTrainer():
-
-  def __init__(self, model, optimizer=None, loss=None, device=None, verbose=False, output_path="."):
+  """
+  ModNEF Model Trainer
+
+  Attributes
+  ----------
+  model : ModNEFModel
+    modnef network model
+  optimizer :
+    trainer optimizer
+  loss : 
+    loss function 
+  device : 
+    torch device
+  verbose : bool
+    verbose mod
+  output_path  str
+    output file path (step save or best model)
+
+  Methods
+  -------
+  train_1_epoch(trainLoader)
+    Train network for one epoch
+  train(trainLoader, testLoader, validationLoader, n_epoch, save_best_model, best_model_name)
+    Train network
+  accuracy(testLoader, name)
+    Run software inference
+  hardware_estimation(testLoader, name)
+    Run hardware estimation inference
+  fpga_accuracy(testLoader, board_path, driver_path, name)
+    Run FPGA inference
+  """
+
+  def __init__(self, 
+               model, 
+               optimizer=None, 
+               loss=None, 
+               device=None, 
+               verbose=False, 
+               output_path="."
+               ):
+    """
+    model : ModNEFModel | dict
+      ModNEF network model or dictionnary description of model
+    optimizer = None
+      trainer optimizer
+      If None, setup Adam Optimizer
+    loss = None
+      loss function
+      If None, CrossEntropyLoss is use
+    device = None
+      torch device
+      If None, detected automatically depending on cuda core detected or not
+    verbose = Fasle : bool
+      verbose mode
+    output_path = "." : str
+      output file path
+    """
     
     if type(model) != ModNEFModel:
       self.model = ModNEFModel(model)
@@ -46,33 +101,20 @@ class ModNEFTrainer():
 
     self.output_path=output_path
 
-    self.y_true = []
-    self.y_pred = []
-
-  @classmethod
-  def load_config(cls, config):
-
-    model = ModNEFModel(config["model"])
-    optimizer = torch.optim.Adam(model.parameters(), lr=(config["optimizer"]["lr"]), betas=config["optimizer"]["betas"])
-    loss = nn.CrossEntropyLoss()
-    verbose = config["verbose"]
-
-    return cls(
-      model=model,
-      optimizer=optimizer,
-      loss=loss,
-      verbose=verbose
-    )
-
-  def load_checkpoint(self, path):
-
-    checkpoint = torch.load(path)
+  def train_1_epoch(self, trainLoader):
+    """
+    Train network for one epoch
 
-    self.model.load_state_dict(checkpoint["model"])
-    self.optimizer.load_state_dict(checkpoint["optimizer"])
-    self.loss.load_state_dict(checkpoint["loss"])
+    Parameters
+    ----------
+    trainLoader
+      train dataset loader
 
-  def train_1_epoch(self, trainLoader):
+    Returns
+    -------
+    float
+      Average epoch loss
+    """
 
     epoch_loss = []
 
@@ -108,8 +150,36 @@ class ModNEFTrainer():
     return np.mean(epoch_loss)
       
     
-  def train(self, trainLoader, testLoader, n_epoch=50, validationLoader=None, step_save=-1, save_best_model=True, best_model_name = ""):
-
+  def train(self, trainLoader, testLoader, validationLoader=None, n_epoch=50, save_best_model=True, best_model_name = ""):
+    """
+    Train network
+
+    Parameters
+    ----------
+    trainLoader
+      train dataset loader
+    testLoader
+      test dataset loader
+    validationLoader=None
+      validation dataset loader
+      If None, validation is ignored
+    n_epoch=50 : int
+      number of epoch
+    save_best_model=True : bool
+      set to true if best accuracy model must be save
+    best_model_name="" : str
+      best model file name
+      if default, name = best_{model.name}
+
+    Returns
+    -------
+    (list, list, list, float)
+      average loss through epoch
+      test accuracy through epoch
+      validation accuracy through epoch
+      best accuracy
+    """
+    
     avg_loss_history = []
     acc_test_history = []
     acc_val_history = []
@@ -135,24 +205,33 @@ class ModNEFTrainer():
         acc_val, _, _ = self.accuracy(testLoader=validationLoader, name="Validation")
         acc_val_history.append(acc_val)
 
-      acc_test, self.y_pred, self.y_true = self.accuracy(testLoader=testLoader, name="Test")
+      acc_test, _, _ = self.accuracy(testLoader=testLoader, name="Test")
       acc_test_history.append(acc_test)
 
       if save_best_model and acc_test>best_acc:
         torch.save(self.model.state_dict(), f"{self.output_path}/best_{self.model.name}")
         best_acc = acc_test
 
-      if step_save!=-1 and epoch%step_save==0:
-        checkpoint = {
-          "model" : self.model.state_dict(),
-          "loss" : self.loss.state_dict(),
-          "optimizer" : self.optimizer.state_dict()
-        }
-        torch.save(checkpoint, f"{self.output_path}/{self.model.name}_step_save")
-
     return avg_loss_history, acc_val_history, acc_test_history, best_acc
   
-  def __run_accuracy(self, testLoader, name, conf_matrix=[]):
+  def __run_accuracy(self, testLoader, name):
+    """
+    Run inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    name : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
 
     y_true = []
     y_pred = []
@@ -191,7 +270,24 @@ class ModNEFTrainer():
     return (correct/total), y_pred, y_true
 
 
-  def accuracy(self, testLoader, name="Test", conf_matrix=[]):
+  def accuracy(self, testLoader, name="Test"):
+    """
+    Run software inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    name="Test" : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
 
     accuracy = 0
     y_pred = []
@@ -199,22 +295,62 @@ class ModNEFTrainer():
 
     self.model.eval()
 
-    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name, conf_matrix=conf_matrix)
+    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name)
 
     return accuracy, y_pred, y_true    
   
-  def hardware_estimation(self, testLoader, name="Hardware Estimation", conf_matrix=[]):
+  def hardware_estimation(self, testLoader, name="Hardware Estimation"):
+    """
+    Run hardware estimation inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    name="Hardware Estimation" : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
+
     accuracy = 0
     y_pred = []
     y_true = []
 
     self.model.hardware_estimation()
 
-    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name, conf_matrix=conf_matrix)
+    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name)
 
     return accuracy, y_pred, y_true    
   
-  def fpga_accuracy(self, testLoader, board_path, driver_path = "./driver.yml", name="FPGA eval", conf_matrix=[]):
+  def fpga_accuracy(self, testLoader, board_path, driver_path = "./driver.yml", name="FPGA eval"):
+    """
+    Run FPGA inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    board_path : str
+      path to FPGA UART path
+    driver_path = "./driver.yml" : str
+      driver configuration file
+    name = "FPGA eval" : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
+
     accuracy = 0
     y_pred = []
     y_true = []
@@ -225,7 +361,7 @@ class ModNEFTrainer():
 
     self.device = torch.device("cpu")
 
-    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name, conf_matrix=conf_matrix)
+    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name)
 
     self.device = last_device
 
diff --git a/modneflib/modnef/quantizer/__init__.py b/modneflib/modnef/quantizer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..71f898cdcd275c7ee455c6fb7063a7de5ee7fa88
--- /dev/null
+++ b/modneflib/modnef/quantizer/__init__.py
@@ -0,0 +1,5 @@
+
+from .quantizer import Quantizer
+from .fixed_point_quantizer import FixedPointQuantizer
+from .min_max_quantizer import MinMaxQuantizer
+from .dynamic_scale_quantizer import DynamicScaleFactorQuantizer
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/dynamic_scale_quantizer.py b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5e54030e06804c1ab20e8b6ac878727a32ae208
--- /dev/null
+++ b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
@@ -0,0 +1,37 @@
+import torch
+import numpy as np
+from .quantizer import Quantizer
+
+class DynamicScaleFactorQuantizer(Quantizer):
+
+  def __init__(self, bitwidth, signed=None, is_initialize=False):
+
+    super().__init__(
+      bitwidth=bitwidth,
+      signed=signed,
+      is_initialize=is_initialize
+    )
+    
+    self.scale_factor = 0
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+
+    self.is_initialiaze = True
+
+    if self.signed==None:
+      self.signed = torch.min(weight.min(), rec_weight.min())<0.0
+
+    self.scale_factor = torch.max(torch.abs(weight).max(), torch.abs(weight).max())/2**(self.bitwidth-1)
+
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+    
+    born_min = -int(self.signed)*2**(self.bitwidth-1)
+    born_max = 2**(self.bitwidth-int(self.signed))-1
+
+    scaled = torch.clamp(data/self.scale_factor, min=born_min, max=born_max).to(dtype)
+
+    if unscale:
+      return scaled*self.scale_factor
+    else:
+      return scaled
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/fixed_point_quantizer.py b/modneflib/modnef/quantizer/fixed_point_quantizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..8471872a166b11de44a9423b95565bc42811edde
--- /dev/null
+++ b/modneflib/modnef/quantizer/fixed_point_quantizer.py
@@ -0,0 +1,56 @@
+import torch
+from math import ceil, log
+from .quantizer import Quantizer
+
+class FixedPointQuantizer(Quantizer):
+
+  def __init__(self, bitwidth, fixed_point=-1, signed=None, is_initialize=False):
+
+    if bitwidth==-1 and fixed_point==-1:
+      raise Exception("You must fix at least one value to compute the other one")
+    
+    super().__init__(
+      bitwidth=bitwidth,
+      signed=signed,
+      is_initialize=is_initialize
+    )
+
+    self.fixed_point = fixed_point
+    self.scale_factor = 2**fixed_point
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+
+    self.is_initialiaze = True
+
+    if not torch.is_tensor(weight):
+      weight = torch.Tensor(weight)
+
+    if not torch.is_tensor(rec_weight):
+      rec_weight = torch.Tensor(rec_weight)
+
+    if self.signed==None:
+      self.signed = torch.min(weight.min(), rec_weight.min())<0.0
+
+    if self.fixed_point==-1:
+      int_max = int(torch.max(torch.abs(weight).max(), torch.abs(rec_weight).max()))
+
+      if int_max>1:
+        int_part_size = ceil(log(int_max)/log(2))+int(self.signed)
+      else:
+        int_part_size = int_max
+      
+      if self.bitwidth==-1:
+        self.bitwidth = int_part_size+self.fixed_point
+      elif self.fixed_point==-1:
+        self.fixed_point = self.bitwidth-int_part_size
+        self.scale_factor = 2**self.fixed_point
+
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+
+    scaled = torch.round(data*self.scale_factor).to(dtype)
+    
+    if unscale:
+      return (scaled.to(torch.float32))/self.scale_factor
+    else:
+      return scaled
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/min_max_quantizer.py b/modneflib/modnef/quantizer/min_max_quantizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc02ff3fd4ad336bacf9e7cf702fb009b69fcc31
--- /dev/null
+++ b/modneflib/modnef/quantizer/min_max_quantizer.py
@@ -0,0 +1,40 @@
+import torch
+from math import ceil, log
+from .quantizer import Quantizer
+
+class MinMaxQuantizer(Quantizer):
+
+  def __init__(self, bitwidth, signed=None, is_initialize=False):
+
+    super().__init__(
+      bitwidth=bitwidth,
+      signed=signed,
+      is_initialize=is_initialize
+    )
+    
+    self.x_min = 0
+    self.x_max = 0
+    self.b_min = 0
+    self.b_max = 0
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+
+    self.is_initialiaze = True
+
+    if self.signed==None:
+      self.signed = torch.min(weight.min(), rec_weight.min())<0.0
+
+    self.x_max = torch.max(torch.abs(weight).max(), torch.abs(rec_weight).max())
+    self.x_min = -self.x_max
+
+    self.b_max = 2**(self.bitwidth-int(self.signed))-1
+    self.b_min = -int(self.signed)*self.b_max
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+
+    scaled = ((data-self.x_min)/(self.x_max-self.x_min)*(self.b_max-self.b_min)+self.b_min).to(dtype)
+    
+    if unscale:
+      return (scaled-self.b_min)/(self.b_max-self.b_min)*(self.x_max-self.x_min)+self.x_min
+    else:
+      return scaled
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/quantizer.py b/modneflib/modnef/quantizer/quantizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8c6ae8e95870d8da99283999f7edab2d32a6d51
--- /dev/null
+++ b/modneflib/modnef/quantizer/quantizer.py
@@ -0,0 +1,29 @@
+import torch
+import numpy as np
+
+class Quantizer():
+  
+  def __init__(self, bitwidth, signed=None, is_initialize=False):
+    self.bitwidth = bitwidth
+    self.is_initialiaze = is_initialize
+    self.signed = signed
+    pass
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+    raise NotImplementedError()
+
+  def __call__(self, data, unscale=False, dtype=torch.int32):
+    
+    if isinstance(data, (int, float)):
+      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).item()
+    elif isinstance(data, list):
+      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).tolist()
+    elif isinstance(data, np.ndarray):
+      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).numpy()
+    elif torch.is_tensor(data):
+      return self._quant(data=data, unscale=unscale, dtype=dtype).detach()
+    else:
+      raise TypeError("Unsupported data type")
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+    pass
\ No newline at end of file
diff --git a/modneflib/modnef/templates/template_model.json b/modneflib/modnef/templates/template_model.json
index ff70dadea9b322c001f6bce578a3980bdd5e10e1..4b91fca4c13f9d15560e661aadd996ef38475e9f 100644
--- a/modneflib/modnef/templates/template_model.json
+++ b/modneflib/modnef/templates/template_model.json
@@ -85,7 +85,6 @@
   "train" : {
     "name" : "Train",
     "n_epoch" : 5,
-    "step_save" : -1,
     "save_best_model" : true,
     "save_history" : true,
     "plot" : true
@@ -104,7 +103,7 @@
   },
 
   "vhdl" : {
-    "best_model" : false,
+    "use_best_model" : false,
     "driver_config" : "driver.yml",
     "file_name" : null
   },
@@ -112,7 +111,7 @@
   "fpga" : {
     "name" : "FPGA Evaluation",
     "driver_file" : "driver.yml",
-    "board_path" : "/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A10OOCRD-if00-port0",
+    "board_path" : "board path here",
     "conf_matrix" : true
   },
 
diff --git a/modneflib/setup.py b/modneflib/setup.py
index 5e185d791e71a62e13b16943218eafb006deee69..23a3b1f460c1803b328cd84643cfa39546ab8ab4 100644
--- a/modneflib/setup.py
+++ b/modneflib/setup.py
@@ -6,7 +6,7 @@ setup(
         version = "1.0.0",
         description="ModNEF python librairy",
         author="Aurelie Saulquin",
-        install_requires=["networkx", "matplotlib", "pyyaml"],
+        install_requires=["networkx", "matplotlib", "pyyaml", "torch", "snntorch"],
         include_package_data=True,
         entry_points={
           "console_scripts": [
diff --git a/q b/q
new file mode 100644
index 0000000000000000000000000000000000000000..fbd676e4aac9fa26bc87b9d12e88a90f8b3dbcf5
--- /dev/null
+++ b/q
@@ -0,0 +1,30 @@
+218057c (HEAD -> reccursive) HEAD@{0}: commit (amend): add documetnation
+5bcbbb3 HEAD@{1}: commit: add documetnation
+b1eb462 HEAD@{2}: checkout: moving from 0bc064ec20991666693711d2536c114401b1cd4b to reccursive
+0bc064e (origin/main, origin/HEAD, main) HEAD@{3}: checkout: moving from reccursive to origin
+b1eb462 HEAD@{4}: checkout: moving from 6dd9be60953a44154688db676a661977eab27b18 to reccursive
+6dd9be6 HEAD@{5}: commit: add quantizer and documentation
+1d84d66 (origin/reccursive) HEAD@{6}: commit: update cache
+486cfb8 HEAD@{7}: pull (start): checkout 486cfb8a8f7c9f7adffc448a86a0bff2d443e317
+b1eb462 HEAD@{8}: commit: commit
+dd574b3 HEAD@{9}: pull: Fast-forward
+22bd59d HEAD@{10}: commit: add modnef torch and change srlif to shiftlif
+8aa6a16 HEAD@{11}: pull: Fast-forward
+00bcb74 HEAD@{12}: commit: remove print on xstep driver
+c11f4b5 HEAD@{13}: pull: Fast-forward
+0542204 HEAD@{14}: commit: add rblif modneftorch
+6e3e1ec HEAD@{15}: pull: Fast-forward
+cbf372b HEAD@{16}: commit: fix bug modnef torch
+a4f3d71 HEAD@{17}: pull: Fast-forward
+3348615 HEAD@{18}: checkout: moving from main to reccursive
+0bc064e (origin/main, origin/HEAD, main) HEAD@{19}: pull: Fast-forward
+1b686a0 HEAD@{20}: checkout: moving from reccursive to main
+3348615 HEAD@{21}: pull: Fast-forward
+f8432d6 HEAD@{22}: pull: Fast-forward
+2cc1813 HEAD@{23}: pull: Fast-forward
+cf0d4dd HEAD@{24}: commit: add test bug fix classifer
+9c9df18 HEAD@{25}: pull: Fast-forward
+55646fa HEAD@{26}: commit: bug classifier
+fc87783 HEAD@{27}: commit: add blif test and bug fix
+12ae129 HEAD@{28}: checkout: moving from main to reccursive
+1b686a0 HEAD@{29}: clone: from gitlab-ssh.univ-lille.fr:bioinsp/ModNEF.git