diff --git a/ModNEF_Sources/modules/bias.vhd b/ModNEF_Sources/modules/bias.vhd
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/ModNEF_Sources/modules/neurons/BLIF/blif_parallel.vhd b/ModNEF_Sources/modules/neurons/BLIF/blif_parallel.vhd
index 261ab777c14df5f2064313aaf7f02826909e069d..6564a366f137591f58d099f726cf2dbaec4f64bb 100644
--- a/ModNEF_Sources/modules/neurons/BLIF/blif_parallel.vhd
+++ b/ModNEF_Sources/modules/neurons/BLIF/blif_parallel.vhd
@@ -130,7 +130,7 @@ architecture Behavioral of BLif_Parallel is
 
   -- type definition
   type reception_state_t    is (idle, request, get_data);
-  type transmission_state_t is (idle, voltage_update, check_arbitration, request, accept, wait_arbitration, arbitration_finish);
+  type transmission_state_t is (idle, voltage_update, check_arbitration, request, accept, wait_arbitration);
 
   -- ram signals
   signal data_read  : std_logic_vector((output_neuron*weight_size)-1 downto 0);
@@ -249,8 +249,9 @@ begin
           when check_arbitration =>
             
             if spikes = no_spike then
-              transmission_state <= arbitration_finish;
               o_emu_busy <= '0';
+              transmission_state <= idle;
+              tr_fsm_en := '0';
             else
               transmission_state <= request;
               arb_spikes <= spikes;
@@ -278,15 +279,12 @@ begin
           when wait_arbitration =>
             start_arb <= '0';
             if arb_busy = '0' then
-              transmission_state <= arbitration_finish;
+              transmission_state <= idle;
+              o_emu_busy <= '0';
+              tr_fsm_en := '0';
             else
               transmission_state <= wait_arbitration;
             end if;  
-              
-          when arbitration_finish =>
-            transmission_state <= idle;
-            o_emu_busy <= '0';
-            tr_fsm_en := '0';
         end case;
       end if;
     end if;
diff --git a/ModNEF_Sources/modules/neurons/BLIF/blif_sequential.vhd b/ModNEF_Sources/modules/neurons/BLIF/blif_sequential.vhd
index 00dd851f1bd502d223f72fb5fc0f16b827974522..6cc760a32b972fb134be7b245c68ab851d931694 100644
--- a/ModNEF_Sources/modules/neurons/BLIF/blif_sequential.vhd
+++ b/ModNEF_Sources/modules/neurons/BLIF/blif_sequential.vhd
@@ -88,7 +88,7 @@ architecture Behavioral of BLif_Sequential is
   -- type definition
   type array_t              is array(output_neuron-1 downto 0) of std_logic_vector(variable_size-1 downto 0);
   type transmission_state_t is (idle, request, accept, get_voltage, emulate, set_voltage, emulate_finish);
-  type reception_state_t    is (idle, request, wait_data, get_data, update_current);
+  type reception_state_t    is (idle, request, get_data);
   
   -- ram signals
   signal data_read  : std_logic_vector((output_neuron*weight_size)-1 downto 0);
@@ -151,13 +151,6 @@ begin
               reception_state <= request;
             end if;
 
-          when wait_data =>
-            if i_emu_busy = '1' then
-              reception_state <= get_data;
-            else
-              reception_state <= wait_data;
-            end if;
-
           when get_data =>
             spike_flag <= i_spike_flag;
             if i_emu_busy='0' and spike_flag = '0' then
@@ -169,10 +162,6 @@ begin
               current_en <= '1';
               reception_state <= get_data;
             end if;
-
-          when update_current =>
-            reception_state <= idle;
-            rec_fsm_en := '0';
         end case;
       end if;
     end if;
@@ -256,7 +245,6 @@ begin
           when accept =>
             if i_ack <= '0' then
               transmission_state <= get_voltage;
-              o_emu_busy <= '1';
             else
               transmission_state <= accept;
               o_req <= '0';
diff --git a/ModNEF_Sources/modules/neurons/BLIF/rblif_parallel.vhd b/ModNEF_Sources/modules/neurons/BLIF/rblif_parallel.vhd
index 16f9528ab45ea3f75c77da2d565725076ed70007..4fdb9c13587989385207042fa7b9404062d3e456 100644
--- a/ModNEF_Sources/modules/neurons/BLIF/rblif_parallel.vhd
+++ b/ModNEF_Sources/modules/neurons/BLIF/rblif_parallel.vhd
@@ -7,7 +7,7 @@
 -- Authors : Aurelie Saulquin
 -- Email : aurelie.saulquin@univ-lille.fr
 --
--- Version : 1.1.0
+-- Version : 1.1.1
 -- Version comment : stable version 
 --
 -- Licenses : cern-ohl-s-2.0
@@ -251,7 +251,6 @@ begin
 
     if i_start_emu = '1' then
       tr_fsm_en := '1';
-      transmission_neuron_en <= '1';
     end if;
     
     if rising_edge(i_clk) then
@@ -260,8 +259,8 @@ begin
         start_calc <= '0';
         o_emu_busy <= '0';
         o_req <= '0';
-        rec_ram_en <= '1';
-        rec_neuron_en <= '1';
+        rec_ram_en <= '0';
+        rec_neuron_en <= '0';
         rec_spike_flag <= '0';
       else
         case transmission_state is
@@ -283,7 +282,7 @@ begin
             end if;
 
           when voltage_update =>
-            transmission_neuron_en <= '0';
+            transmission_neuron_en <= '1';
             start_calc <= '0';
             transmission_state <= check_arbitration;
 
@@ -311,6 +310,7 @@ begin
               transmission_state <= wait_arbitration;
               start_arb <= '1';
               rec_ram_en <= '1';
+              rec_neuron_en <= '1';
               
               rec_spike_flag <= arb_spike_flag;
             else
@@ -331,6 +331,7 @@ begin
             transmission_state <= idle;
             o_emu_busy <= '0';
             rec_neuron_en <= '0';
+            rec_ram_en <= '0';
             tr_fsm_en := '0';
         end case;
       end if;
diff --git a/ModNEF_Sources/modules/neurons/BLIF/rblif_sequential.vhd b/ModNEF_Sources/modules/neurons/BLIF/rblif_sequential.vhd
index 11b43b5285e66126a4b6477f85ebcb8862569a1f..745760a8d81f52b31415b706c15b616f02e1a734 100644
--- a/ModNEF_Sources/modules/neurons/BLIF/rblif_sequential.vhd
+++ b/ModNEF_Sources/modules/neurons/BLIF/rblif_sequential.vhd
@@ -88,7 +88,7 @@ architecture Behavioral of RBLif_Sequential is
 
   -- type definition
   type array_t is array(output_neuron-1 downto 0) of std_logic_vector(variable_size-1 downto 0);
-  type reception_state_t is (idle, request, wait_data, get_data, update_current);
+  type reception_state_t is (idle, request, get_data);
   type transmission_state_t is (idle, request, accept, get_voltage, emulate, set_voltage, emulate_finish);
 
   -- output signals
@@ -161,13 +161,6 @@ begin
               reception_state <= request;
             end if;
 
-          when wait_data =>
-            if i_emu_busy = '1' then
-              reception_state <= get_data;
-            else
-              reception_state <= wait_data;
-            end if;
-
           when get_data =>
             spike_flag <= i_spike_flag;
             if i_emu_busy='0' and spike_flag = '0' then
@@ -179,10 +172,6 @@ begin
               current_en <= '1';
               reception_state <= get_data;
             end if;
-
-          when update_current =>
-            reception_state <= idle;
-            rec_fsm_en := '0';
         end case;
       end if;
     end if;
@@ -305,7 +294,6 @@ begin
           when accept =>
             if i_ack <= '0' then
               transmission_state <= get_voltage;
-              o_emu_busy <= '1';
               rec_ram_en <= '1';
               rec_current_en <= '1';
             else
diff --git a/ModNEF_Sources/modules/neurons/SLIF/rslif_parallel.vhd b/ModNEF_Sources/modules/neurons/SLIF/rslif_parallel.vhd
index 4e181835525b78a2f39c9cf9fed7fa1dc8c5b3d8..894bed2072a5453db321baf8ad6d1490976d80c1 100644
--- a/ModNEF_Sources/modules/neurons/SLIF/rslif_parallel.vhd
+++ b/ModNEF_Sources/modules/neurons/SLIF/rslif_parallel.vhd
@@ -7,7 +7,7 @@
 -- Authors : Aurelie Saulquin
 -- Email : aurelie.saulquin@univ-lille.fr
 --
--- Version : 1.1.0
+-- Version : 1.1.1
 -- Version comment : stable version 
 --
 -- Licenses : cern-ohl-s-2.0
@@ -240,8 +240,8 @@ begin
         start_calc <= '0';
         o_emu_busy <= '0';
         o_req <= '0';
-        rec_ram_en <= '1';
-        rec_neuron_en <= '1';
+        rec_ram_en <= '0';
+        rec_neuron_en <= '0';
         rec_spike_flag <= '0';
       else
         case transmission_state is
@@ -263,7 +263,7 @@ begin
             end if;
 
           when voltage_update =>
-            transmission_neuron_en <= '0';
+            transmission_neuron_en <= '1';
             start_calc <= '0';
             transmission_state <= check_arbitration;
 
@@ -291,6 +291,7 @@ begin
               transmission_state <= wait_arbitration;
               start_arb <= '1';
               rec_ram_en <= '1';
+              rec_neuron_en <= '1';
               
               rec_spike_flag <= arb_spike_flag;
             else
@@ -311,6 +312,7 @@ begin
             transmission_state <= idle;
             o_emu_busy <= '0';
             rec_neuron_en <= '0';
+            rec_ram_en <= '0';
             tr_fsm_en := '0';
         end case;
       end if;
@@ -338,7 +340,7 @@ begin
     mem_init_file => mem_init_file_rec
   ) port map (
     i_clk => i_clk,
-    i_en => '1',
+    i_en => rec_ram_en,
     i_addr => output_aer,
     o_data => rec_data_read
   );
diff --git a/ModNEF_Sources/modules/neurons/SLIF/rslif_sequential.vhd b/ModNEF_Sources/modules/neurons/SLIF/rslif_sequential.vhd
index d975f60b74bf2c96d96b90b0badec180b1358667..cc01a68a48500e5cf7503511c0ee54662336fb81 100644
--- a/ModNEF_Sources/modules/neurons/SLIF/rslif_sequential.vhd
+++ b/ModNEF_Sources/modules/neurons/SLIF/rslif_sequential.vhd
@@ -88,7 +88,7 @@ architecture Behavioral of RSLif_Sequential is
   
   -- type definition
   type array_t              is array(output_neuron-1 downto 0) of std_logic_vector(variable_size-1 downto 0);
-  type reception_state_t    is (idle, request, wait_data, get_data, update_current);
+  type reception_state_t    is (idle, request, get_data);
   type transmission_state_t is (idle, request, accept, get_voltage, emulate, set_voltage, emulate_finish);
 
   -- output signals
@@ -161,13 +161,6 @@ begin
               reception_state <= request;
             end if;
 
-          when wait_data =>
-            if i_emu_busy = '1' then
-              reception_state <= get_data;
-            else
-              reception_state <= wait_data;
-            end if;
-
           when get_data =>
             spike_flag <= i_spike_flag;
             if i_emu_busy='0' and spike_flag = '0' then
@@ -179,10 +172,6 @@ begin
               current_en <= '1';
               reception_state <= get_data;
             end if;
-
-          when update_current =>
-            reception_state <= idle;
-            rec_fsm_en := '0';
         end case;
       end if;
     end if;
@@ -305,7 +294,6 @@ begin
           when accept =>
             if i_ack <= '0' then
               transmission_state <= get_voltage;
-              o_emu_busy <= '1';
               rec_ram_en <= '1';
               rec_current_en <= '1';
             else
diff --git a/ModNEF_Sources/modules/neurons/SLIF/simplified_lif.vhd b/ModNEF_Sources/modules/neurons/SLIF/simplified_lif.vhd
index 892bcdb717be626b5addded26c3f64f4791eba9c..793a13bfd95c3740f58a3a44a11cc8b728d4e1b5 100644
--- a/ModNEF_Sources/modules/neurons/SLIF/simplified_lif.vhd
+++ b/ModNEF_Sources/modules/neurons/SLIF/simplified_lif.vhd
@@ -7,7 +7,7 @@
 -- Authors : Aurelie Saulquin
 -- Email : aurelie.saulquin@univ-lille.fr
 --
--- Version : 1.2.0
+-- Version : 1.3.0
 -- Version comment : stable version 
 --
 -- Licenses : cern-ohl-s-2.0
@@ -70,6 +70,8 @@ begin
   o_spike <= spike;
 
   process(i_clk, i_inc_I, i_calc, i_en)
+    variable I : std_logic_vector(weight_size-1 downto 0);
+    variable I_rec : std_logic_vector(weight_size-1 downto 0);
   begin
     if rising_edge(i_clk) then
       if i_reset = '1' then
@@ -78,14 +80,21 @@ begin
       
       if i_en = '1' then
         if weight_signed then
-          if spike_flag = '1' or spike_flag_rec = '1' then
-            if spike_flag = '1' and spike_flag_rec = '0' then
-              V <= std_logic_vector(signed(V)+signed(weight));
-            elsif spike_flag = '0' and spike_flag_rec = '1' then
-              V <= std_logic_vector(signed(V)+signed(weight_rec));
+          if i_inc_I = '1' or i_inc_I_rec = '1' then
+            
+            if i_inc_I = '1' then
+              I := std_logic_vector(signed(i_w));
             else
-              V <= std_logic_vector(signed(V)+signed(weight)+signed(weight_rec));
-            end if; 
+              I := (others=>'0');
+            end if;
+
+            if i_inc_I_rec = '1' then
+              I_rec := std_logic_vector(signed(i_w_rec));
+            else
+              I_rec := (others=>'0');
+            end if;
+
+            V <= std_logic_vector(signed(V) + signed(I) + signed(I_rec));
           elsif i_calc = '1' then
             if signed(V) >= signed(v_threshold+v_leak) then
               spike <= '1';
@@ -99,15 +108,24 @@ begin
             end if;
           end if;
         else
-          if spike_flag = '1' or spike_flag_rec = '1' then
-            if spike_flag = '1' and spike_flag_rec = '0' then
-              V <= std_logic_vector(unsigned(V)+unsigned(weight));
-            elsif spike_flag = '0' and spike_flag_rec = '1' then
-              V <= std_logic_vector(unsigned(V)+unsigned(weight_rec));
-            else
-              V <= std_logic_vector(unsigned(V)+unsigned(weight)+unsigned(weight_rec));
-            end if; 
-          elsif i_calc = '1' then
+        if i_inc_I = '1' or i_inc_I_rec = '1' then
+            
+          if i_inc_I = '1' then
+            I := std_logic_vector(unsigned(i_w));
+          else
+            I := (others=>'0');
+          end if;
+
+          if i_inc_I_rec = '1' then
+            I_rec := std_logic_vector(unsigned(i_w_rec));
+          else
+            I_rec := (others=>'0');
+          end if;
+
+          V <= std_logic_vector(unsigned(V) + unsigned(I) + unsigned(I_rec));
+
+        elsif i_calc = '1' then
+
             if unsigned(V) >= unsigned(v_threshold+v_leak) then
               spike <= '1';
               V <= V_rest;
@@ -121,10 +139,10 @@ begin
           end if;
         end if;
 
-        spike_flag <= i_inc_I;
-        weight <= i_w;
-        spike_flag_rec <= i_inc_I_rec;
-        weight_rec <= i_w_rec;
+        -- spike_flag <= i_inc_I;
+        -- weight <= i_w;
+        -- spike_flag_rec <= i_inc_I_rec;
+        -- weight_rec <= i_w_rec;
 
       end if;
     end if;
diff --git a/ModNEF_Sources/modules/neurons/SLIF/slif_parallel.vhd b/ModNEF_Sources/modules/neurons/SLIF/slif_parallel.vhd
index 5a50090fb96cc39e42ff9e7d7a7eed3466601172..8c4b605ffa4f26a0b6f51955599f9ad9414c04ad 100644
--- a/ModNEF_Sources/modules/neurons/SLIF/slif_parallel.vhd
+++ b/ModNEF_Sources/modules/neurons/SLIF/slif_parallel.vhd
@@ -131,7 +131,7 @@ architecture Behavioral of SLif_Parallel is
 
   -- type definition
   type reception_state_t    is (idle, request, get_data);
-  type transmission_state_t is (idle, voltage_update, check_arbitration, request, accept, wait_arbitration, arbitration_finish);
+  type transmission_state_t is (idle, voltage_update, check_arbitration, request, accept, wait_arbitration);
 
   -- ram signals
   signal data_read  : std_logic_vector((output_neuron*weight_size)-1 downto 0);
@@ -253,8 +253,9 @@ begin
 
           when check_arbitration =>
             if spikes = no_spike then
-              transmission_state <= arbitration_finish;
+              transmission_state <= idle;
               o_emu_busy <= '0';
+              tr_fsm_en := '0';
             else
               transmission_state <= request;
               arb_spikes <= spikes;
@@ -281,15 +282,12 @@ begin
           when wait_arbitration =>
             start_arb <= '0';
             if arb_busy = '0' then
-              transmission_state <= arbitration_finish;
+              transmission_state <= idle;
+              o_emu_busy <= '0';
+              tr_fsm_en := '0';
             else
               transmission_state <= wait_arbitration;
             end if;  
-              
-          when arbitration_finish =>
-            transmission_state <= idle;
-            o_emu_busy <= '0';
-            tr_fsm_en := '0';
         end case;
       end if;
     end if;
diff --git a/ModNEF_Sources/modules/neurons/SLIF/slif_sequential.vhd b/ModNEF_Sources/modules/neurons/SLIF/slif_sequential.vhd
index 391eaa6fb463493dfd5be29910427856ac309f98..362d9829eab1ec718b882841ff1ebc0af2cbcfa3 100644
--- a/ModNEF_Sources/modules/neurons/SLIF/slif_sequential.vhd
+++ b/ModNEF_Sources/modules/neurons/SLIF/slif_sequential.vhd
@@ -89,7 +89,7 @@ architecture Behavioral of SLif_Sequential is
   -- type definition
   type array_t              is array(output_neuron-1 downto 0) of std_logic_vector(variable_size-1 downto 0);
   type transmission_state_t is (idle, request, accept, get_voltage, emulate, set_voltage, emulate_finish);
-  type reception_state_t    is (idle, request, wait_data, get_data, update_current);
+  type reception_state_t    is (idle, request, get_data);
 
   -- ram signals
   signal data_read  : std_logic_vector((output_neuron*weight_size)-1 downto 0);
@@ -149,13 +149,6 @@ begin
               reception_state <= request;
             end if;
 
-          when wait_data =>
-            if i_emu_busy = '1' then
-              reception_state <= get_data;
-            else
-              reception_state <= wait_data;
-            end if;
-
           when get_data =>
             spike_flag <= i_spike_flag;
             if i_emu_busy='0' and spike_flag = '0' then
@@ -167,10 +160,6 @@ begin
               current_en <= '1';
               reception_state <= get_data;
             end if;
-
-          when update_current =>
-            reception_state <= idle;
-            rec_fsm_en := '0';
         end case;
       end if;
     end if;
@@ -259,7 +248,6 @@ begin
           when accept =>
             if i_ack <= '0' then
               transmission_state <= get_voltage;
-              o_emu_busy <= '1';
             else
               transmission_state <= accept;
               o_req <= '0';
diff --git a/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_parallel.vhd b/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_parallel.vhd
index 1af3b8a8e0c779c114cdb0e08196d8d2ecdf3323..8d3d6adc1cbdc7b3ce296e84588644fca8e5e398 100644
--- a/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_parallel.vhd
+++ b/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_parallel.vhd
@@ -7,7 +7,7 @@
 -- Authors : Aurelie Saulquin
 -- Email : aurelie.saulquin@univ-lille.fr
 --
--- Version : 1.1.0
+-- Version : 1.1.1
 -- Version comment : stable version 
 --
 -- Licenses : cern-ohl-s-2.0
@@ -237,8 +237,8 @@ begin
         start_calc <= '0';
         o_emu_busy <= '0';
         o_req <= '0';
-        rec_ram_en <= '1';
-        rec_neuron_en <= '1';
+        rec_ram_en <= '0';
+        rec_neuron_en <= '0';
         rec_spike_flag <= '0';
       else
         case transmission_state is
@@ -260,7 +260,7 @@ begin
             end if;
 
           when voltage_update =>
-            transmission_neuron_en <= '0';
+            transmission_neuron_en <= '1';
             start_calc <= '0';
             transmission_state <= check_arbitration;
 
@@ -288,6 +288,7 @@ begin
               transmission_state <= wait_arbitration;
               start_arb <= '1';
               rec_ram_en <= '1';
+              rec_neuron_en <= '1';
               
               rec_spike_flag <= arb_spike_flag;
             else
@@ -308,6 +309,7 @@ begin
             transmission_state <= idle;
             o_emu_busy <= '0';
             rec_neuron_en <= '0';
+            rec_ram_en <= '0';
             tr_fsm_en := '0';
         end case;
       end if;
@@ -335,7 +337,7 @@ begin
     mem_init_file => mem_init_file_rec
   ) port map (
     i_clk => i_clk,
-    i_en => '1',
+    i_en => rec_ram_en,
     i_addr => output_aer,
     o_data => rec_data_read
   );
diff --git a/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_sequential.vhd b/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_sequential.vhd
index fa3c0cb267fb8f7124fa9c6932ea4135e6cbc1b1..9854bca3ccc5d7d3abca4c02f7abbabce6066530 100644
--- a/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_sequential.vhd
+++ b/ModNEF_Sources/modules/neurons/ShiftLif/rshiftlif_sequential.vhd
@@ -85,7 +85,7 @@ architecture Behavioral of RShiftLif_Sequential is
 
   -- type definition
   type array_t              is array(output_neuron-1 downto 0) of std_logic_vector(variable_size-1 downto 0);
-  type reception_state_t    is (idle, request, wait_data, get_data, update_current);
+  type reception_state_t    is (idle, request, get_data);
   type transmission_state_t is (idle, request, accept, get_voltage, emulate, set_voltage, emulate_finish);
 
   -- output signals
@@ -158,13 +158,6 @@ begin
               reception_state <= request;
             end if;
 
-          when wait_data =>
-            if i_emu_busy = '1' then
-              reception_state <= get_data;
-            else
-              reception_state <= wait_data;
-            end if;
-
           when get_data =>
             spike_flag <= i_spike_flag;
             if i_emu_busy='0' and spike_flag = '0' then
@@ -176,10 +169,6 @@ begin
               current_en <= '1';
               reception_state <= get_data;
             end if;
-
-          when update_current =>
-            reception_state <= idle;
-            rec_fsm_en := '0';
         end case;
       end if;
     end if;
@@ -302,7 +291,6 @@ begin
           when accept =>
             if i_ack <= '0' then
               transmission_state <= get_voltage;
-              o_emu_busy <= '1';
               rec_ram_en <= '1';
               rec_current_en <= '1';
             else
diff --git a/ModNEF_Sources/modules/neurons/ShiftLif/shift_lif.vhd b/ModNEF_Sources/modules/neurons/ShiftLif/shift_lif.vhd
index 84e97fa113973069f2746b271a066ee365640a92..9f6b46094e4d795df63e85559d716f5a1980e758 100644
--- a/ModNEF_Sources/modules/neurons/ShiftLif/shift_lif.vhd
+++ b/ModNEF_Sources/modules/neurons/ShiftLif/shift_lif.vhd
@@ -7,7 +7,7 @@
 -- Authors : Aurelie Saulquin
 -- Email : aurelie.saulquin@univ-lille.fr
 --
--- Version : 1.1.0
+-- Version : 1.2.0
 -- Version comment : stable version 
 --
 -- Licenses : cern-ohl-s-2.0
@@ -69,6 +69,9 @@ begin
   o_spike <= spike;
 
   process(i_clk, i_inc_I, i_calc, i_en)
+  variable I : std_logic_vector(weight_size-1 downto 0);
+    variable I_rec : std_logic_vector(weight_size-1 downto 0);
+    variable v_buff : std_logic_vector(variable_size-1 downto 0);
   begin
     if rising_edge(i_clk) then
       if i_reset = '1' then
@@ -77,46 +80,62 @@ begin
 
       if i_en = '1' then
         if weight_signed then
-          if spike_flag = '1' or spike_flag_rec = '1' then
-            if spike_flag = '1' and spike_flag_rec = '0' then
-              V <= std_logic_vector(signed(V) + signed(weight));
-            elsif spike_flag = '0' and spike_flag_rec = '1' then
-              V <= std_logic_vector(signed(V) + signed(weight_rec));
+          if i_inc_I = '1' or i_inc_I_rec = '1' then
+            
+            if i_inc_I = '1' then
+              I := std_logic_vector(signed(i_w));
             else
-              V <= std_logic_vector(signed(V) + signed(weight) + signed(weight_rec));
+              I := (others=>'0');
             end if;
+
+            if i_inc_I_rec = '1' then
+              I_rec := std_logic_vector(signed(i_w_rec));
+            else
+              I_rec := (others=>'0');
+            end if;
+
+            V <= std_logic_vector(signed(V) + signed(I) + signed(I_rec));
           elsif i_calc='1' then
-            if signed(V) >= signed(v_threshold) then
+            V_buff := std_logic_vector(signed(V)-signed(shift_right(signed(V), shift)));
+            if signed(V_buff) >= signed(v_threshold) then
               spike <= '1';
               if reset = "zero" then
                 V <= (others=>'0');
               else 
-                V <= std_logic_vector(signed(V) - signed(v_threshold));
+                V <= std_logic_vector(signed(V_buff) - signed(v_threshold));
               end if;
             else
-              V <= std_logic_vector(signed(V)-signed(shift_right(signed(V), shift)));
+              V <= V_buff;
               spike <= '0';
             end if;
           end if;
         else
-          if spike_flag = '1' or spike_flag_rec = '1' then
-            if spike_flag = '1' and spike_flag_rec = '0' then
-              V <= std_logic_vector(unsigned(V) + unsigned(weight));
-            elsif spike_flag = '0' and spike_flag_rec = '1' then
-              V <= std_logic_vector(unsigned(V) + unsigned(weight_rec));
+          if i_inc_I = '1' or i_inc_I_rec = '1' then
+              
+            if i_inc_I = '1' then
+              I := std_logic_vector(unsigned(i_w));
             else
-              V <= std_logic_vector(unsigned(V) + unsigned(weight) + unsigned(weight_rec));
+              I := (others=>'0');
             end if;
+
+            if i_inc_I_rec = '1' then
+              I_rec := std_logic_vector(unsigned(i_w_rec));
+            else
+              I_rec := (others=>'0');
+            end if;
+
+            V <= std_logic_vector(unsigned(V) + unsigned(I) + unsigned(I_rec));
           elsif i_calc='1' then
-            if unsigned(V) >= unsigned(v_threshold) then
+            V_buff := std_logic_vector(unsigned(V)-unsigned(shift_right(unsigned(V), shift)));
+            if unsigned(V_buff) >= unsigned(v_threshold) then
               spike <= '1';
               if reset = "zero" then
                 V <= (others=>'0');
               else 
-                V <= std_logic_vector(unsigned(V) - unsigned(v_threshold));
+                V <= std_logic_vector(unsigned(V_buff) - unsigned(v_threshold));
               end if;
             else
-              V <= std_logic_vector(unsigned(V)-unsigned(shift_right(unsigned(V), shift)));
+              V <= V_buff;
               spike <= '0';
             end if;
           end if;
diff --git a/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_parallel.vhd b/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_parallel.vhd
index f03d9e803f6075ec6b5d67ec9ba78260e68f7720..32a3aca72754c8862b01753e55ee7938935498a4 100644
--- a/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_parallel.vhd
+++ b/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_parallel.vhd
@@ -339,3 +339,4 @@ begin
   end generate neuron_generation;
 
 end Behavioral;
+
diff --git a/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_sequential.vhd b/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_sequential.vhd
index b86bb30c4323e616681da8a1b8f5900ef8a1f751..0533fdc7e7f1b63ce0966a51cea9b063cc8c4219 100644
--- a/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_sequential.vhd
+++ b/ModNEF_Sources/modules/neurons/ShiftLif/shiftlif_sequential.vhd
@@ -85,7 +85,7 @@ architecture Behavioral of ShiftLif_Sequential is
 
   -- type definition
   type array_t is array(output_neuron-1 downto 0) of std_logic_vector(variable_size-1 downto 0);
-  type reception_state_t is (idle, request, wait_data, get_data, update_current);
+  type reception_state_t is (idle, request, get_data);
   type transmission_state_t is (idle, request, accept, get_voltage, emulate, set_voltage, emulate_finish);
 
   -- ram signals
@@ -146,13 +146,6 @@ begin
               reception_state <= request;
             end if;
 
-          when wait_data =>
-            if i_emu_busy = '1' then
-              reception_state <= get_data;
-            else
-              reception_state <= wait_data;
-            end if;
-
           when get_data =>
             spike_flag <= i_spike_flag;
             if i_emu_busy='0' and spike_flag = '0' then
@@ -164,10 +157,6 @@ begin
               current_en <= '1';
               reception_state <= get_data;
             end if;
-
-          when update_current =>
-            reception_state <= idle;
-            rec_fsm_en := '0';
         end case;
       end if;
     end if;
@@ -252,7 +241,6 @@ begin
           when accept =>
             if i_ack <= '0' then
               transmission_state <= get_voltage;
-              o_emu_busy <= '1';
             else
               transmission_state <= accept;
               o_req <= '0';
diff --git a/ModNEF_Sources/modules/uart/uart_1step.vhd b/ModNEF_Sources/modules/uart/uart_1step.vhd
deleted file mode 100644
index 50ecbe828baaa98cfa3d61176d11c04d9a3b0244..0000000000000000000000000000000000000000
--- a/ModNEF_Sources/modules/uart/uart_1step.vhd
+++ /dev/null
@@ -1,390 +0,0 @@
-----------------------------------------------------------------------------------
---
--- Project : ModNEF
--- Component name : uart_1step
--- Depencies : uart_controller
---
--- Authors : Aurelie Saulquin
--- Email : aurelie.saulquin@univ-lille.fr
---
--- Version : 1.0
--- Version comment : stable version 
---
--- Licenses : cern-ohl-s-2.0
---
--- Description : 
--- UART component where one data transmission is use for one emulation step
--- Component will receive data, send all data to network and receive data 
--- from network and transmit it to computer
--- 
-----------------------------------------------------------------------------------
-
-library IEEE;
-use IEEE.std_logic_1164.all;
-use work.math.all;
-
-entity uart_1Step is
-  generic(
-    clk_freq          : integer := 100_000_000;
-    baud_rate         : integer := 115_200;
-
-    queue_read_depth  : integer := 32;
-    queue_read_type   : string := "fifo";
-
-    queue_write_type  : string := "fifo";
-
-    input_layer_size  : integer := 8;
-    output_layer_size : integer := 8
-  );
-  port (
-    i_clk             : in  std_logic;
-    i_en              : in  std_logic;
-
-    i_tx              : in  std_logic;
-    o_rx              : out std_logic;
-
-    i_emu_ready       : in  std_logic;
-    o_start_emu       : out std_logic;
-    o_reset_membrane  : out std_logic;
-
-    i_req             : in  std_logic;
-    o_ack             : out std_logic;
-    i_emu_busy        : in  std_logic;
-    i_spike_flag      : in  std_logic;
-    i_aer             : in  std_logic_vector(log_b(output_layer_size, 2)-1 downto 0);
-
-    o_req             : out std_logic;
-    i_ack             : in  std_logic;
-    o_emu_busy        : out std_logic;
-    o_spike_flag      : out std_logic;
-    o_aer             : out std_logic_vector(log_b(input_layer_size, 2)-1 downto 0)
-  );
-end uart_1Step;
-
-architecture Behavioral of uart_1Step is
-
-  component uart_controller is
-    generic(
-      clk_freq          : integer := 100_000_000;
-      baud_rate         : integer := 115_200;
-      oversamp_rate     : integer := 16;
-      
-      queue_read_depth  : integer := 64;
-      queue_read_width  : integer := 1;
-      queue_read_type   : string  := "fifo";
-
-      queue_write_depth : integer := 16;
-      queue_write_width : integer := 1;
-      queue_write_type  : string := "fifo"   -- fifo or lifo
-    );
-    port(
-      i_clk                 : in  std_logic;
-      i_en                  : in  std_logic;
-      o_busy                : out std_logic;
-      o_reset_detected      : out std_logic;
-
-      -- UART pins
-      i_tx                  : in  std_logic;
-      o_rx                  : out std_logic;
-      o_uart_busy           : out std_logic;       
-      
-      -- read I/O
-      o_read_data           : out std_logic_vector(queue_read_width*8-1 downto 0);
-      i_read_pop            : in  std_logic;
-      o_read_busy           : out std_logic;
-      o_read_queue_empty    : out std_logic;
-      o_read_queue_full     : out std_logic;
-
-      -- write I/O
-      i_start_transmission  : in  std_logic;
-      i_write_data          : in  std_logic_vector(queue_write_width*8-1 downto 0);
-      i_write_push          : in  std_logic;
-      o_write_busy          : out std_logic;
-      o_write_queue_empty   : out std_logic;
-      o_write_queue_full    : out std_logic
-        
-    );
-  end component;
-  
-  -- type definition
-  type emu_state_t              is (idle, wait_data, check_emu, emulate, wait_out_aer, send_aer, wait_transmission);
-  type uart_to_network_state_t  is (idle, check_emu, request, accept, transfert);
-  type network_to_uart_state_t  is (idle, wait_request, accept, wait_aer);
-
-  -- queue constant definition
-  --constant queue_read_depth : integer := 255;
-  constant queue_read_width : integer := log_b(queue_read_depth, 256);
-
-  constant queue_write_depth : integer := output_layer_size;
-  constant queue_write_width : integer := log_b(output_layer_size, 256);
-
-  -- read queue signals
-  signal read_data  : std_logic_vector(queue_read_width*8-1 downto 0) := (others=>'0');
-  signal read_pop   : std_logic := '0';
-  signal read_busy  : std_logic;
-  signal read_empty : std_logic;
-
-  -- write queue signals
-  signal write_data : std_logic_vector(queue_write_width*8-1 downto 0) := (others=>'0');
-  signal write_push : std_logic := '0';
-  signal write_busy : std_logic;
-  
-  -- uart signals
-  signal start_uart_transmission : std_logic := '0';
-
-  -- emulation signals
-  signal emu_state : emu_state_t := idle;
-  signal start_emu : std_logic;
-
-  -- membrane reset signals
-  signal reset_detected       : std_logic := '0';
-  signal reset_membrane       : std_logic := '0';
-
-  -- uart to network signals
-  signal uart_to_network_state  : uart_to_network_state_t := idle; 
-  signal uart_to_network_busy   : std_logic := '0';
-
-  -- network to uart signals
-  signal network_to_uart_state  : network_to_uart_state_t := idle;
-  signal network_to_uart_busy   : std_logic := '0';
-
-begin
-
-  o_start_emu <= start_emu;
-
-  o_reset_membrane <= reset_membrane;
-
-  -- controller FSM
-  process(i_clk, i_en)
-  begin
-    if rising_edge(i_clk) then
-      if i_en = '0' then
-        emu_state <= idle;
-        start_emu <= '0';
-        start_uart_transmission <= '0';
-      else
-        case emu_state is
-          when idle => 
-            start_emu <= '0';
-            start_uart_transmission <= '0';
-            reset_membrane <= '0';
-            if read_busy = '1' then
-              emu_state <= wait_data;
-            else
-              emu_state <= idle;
-            end if;
-
-          when wait_data =>
-            if read_busy = '0' then
-              emu_state <= check_emu;
-            else
-              emu_state <= wait_data;
-            end if;
-
-          when check_emu =>
-            if i_emu_ready = '1' then
-              emu_state <= emulate;
-              start_emu <= '1';
-            else
-              emu_state <= check_emu;
-            end if;
-
-          when emulate =>
-            start_emu <= '0';
-            if network_to_uart_busy = '1' then
-              emu_state <= wait_out_aer;
-            else
-              emu_state <= emulate;
-            end if;
-
-          when wait_out_aer =>
-            if i_emu_ready = '1' then
-              emu_state <= send_aer;
-              start_uart_transmission <= '1';
-              reset_membrane <= reset_detected;
-            else
-              emu_state <= wait_out_aer;
-            end if;
-
-          when send_aer =>
-            start_uart_transmission <= '0';
-            reset_membrane <= '0';
-            if  write_busy = '1' then
-              emu_state <= wait_transmission;
-            else
-              emu_state <= send_aer;
-            end if;
-
-          when wait_transmission =>
-            if write_busy = '0' then
-              emu_state <= idle;
-            else
-              emu_state <= wait_transmission;
-            end if;
-        end case;
-      end if;
-    end if;
-  end process;
-
-  -- Controller to network FSM
-  o_aer <= read_data(log_b(input_layer_size, 2)-1 downto 0) when uart_to_network_state = transfert else (others=>'0');
-  process(i_clk, i_en)
-  begin
-    if rising_edge(i_clk) then
-      if i_en = '0' then
-        o_req <= '0';
-        o_spike_flag <= '0';
-        o_emu_busy <= '0';
-        read_pop <= '0';
-        uart_to_network_busy <= '0';
-        uart_to_network_state <= idle;
-      else
-        case uart_to_network_state is
-          when idle =>
-            o_req <= '0';
-            o_spike_flag <= '0';
-            read_pop <= '0';
-            if start_emu = '1' then
-              uart_to_network_state <= check_emu;
-              o_emu_busy <= '1';
-              uart_to_network_busy <= '1';
-            else
-              uart_to_network_state <= idle;
-              o_emu_busy <= '0';
-              uart_to_network_busy <= '0';
-            end if;
-
-          when check_emu =>
-            if read_empty = '1' then
-              uart_to_network_state <= idle;
-              o_emu_busy <= '0';
-              uart_to_network_busy <= '0';
-            else
-              uart_to_network_state <= request;
-              o_req <= '1';
-            end if;
-
-          when request =>
-            if i_ack = '1' then
-              uart_to_network_state <= accept;
-              o_req <= '0';
-            else
-              uart_to_network_state <= request;
-              o_req <= '1';
-            end if;
-
-          when accept =>
-            if i_ack = '0' then
-              uart_to_network_state <= transfert;
-              read_pop <= '1';
-            else
-              uart_to_network_state <= accept;
-            end if;
-
-          when transfert =>
-            if read_empty = '1' then
-              uart_to_network_state <= idle;
-              o_emu_busy <= '0';
-              read_pop <= '0';
-              o_spike_flag <= '0';
-              uart_to_network_busy <= '0';
-            else
-              uart_to_network_state <= transfert;
-              read_pop <= '1';
-              o_spike_flag <= '1';
-            end if;
-        end case;
-      end if;
-    end if;
-  end process;
-  
-
-  write_data(log_b(output_layer_size, 2)-1 downto 0) <= i_aer when network_to_uart_state = wait_aer else (others=>'0');
-  write_push <= i_spike_flag when network_to_uart_state = wait_aer else '0';
-
-  -- Network to Controller FSM
-  process(i_clk, i_en)
-  begin
-    if i_en = '0' then
-      network_to_uart_state <= idle;
-      network_to_uart_busy <= '0';
-      o_ack <= '0';
-    else
-      if rising_edge(i_clk) then
-        case network_to_uart_state is
-          when idle =>
-            o_ack <= '0';
-            if i_emu_busy = '1' then
-              network_to_uart_state <= wait_request;
-              network_to_uart_busy <= '1';
-            else
-              network_to_uart_state <= idle;
-              network_to_uart_busy <= '0';
-            end if;
-
-          when wait_request =>
-            if i_emu_busy = '0' then
-              network_to_uart_state <= idle;
-              network_to_uart_busy <= '0';
-            elsif i_req = '1' then
-              o_ack <= '1';
-              network_to_uart_state <= accept;
-            else
-              network_to_uart_state <= wait_request;
-            end if;
-
-          when accept =>
-            if i_req = '0' then
-              network_to_uart_state <= wait_aer;
-              o_ack <= '0';
-            else
-              network_to_uart_state <= accept;
-              o_ack <= '1';
-            end if;
-
-          when wait_aer => 
-            if i_emu_busy = '0' then
-              network_to_uart_state <= idle;
-              network_to_uart_busy <= '0';
-            else 
-              network_to_uart_state <= wait_aer;
-            end if;
-        end case;
-      end if;
-    end if;
-  end process;
-
-  c_uart_controller : uart_controller generic map(
-    clk_freq => clk_freq,
-    baud_rate => baud_rate,
-    oversamp_rate => 16,
-
-    queue_read_depth => queue_read_depth,
-    queue_read_width => queue_read_width,
-    queue_read_type => queue_read_type,
-
-    queue_write_depth => queue_write_depth,
-    queue_write_width => queue_write_width,
-    queue_write_type => queue_write_type
-  ) port map(
-    i_clk => i_clk,
-    i_en => i_en,
-    o_busy => open,
-    o_reset_detected => reset_detected,
-    i_tx => i_tx,
-    o_rx => o_rx,
-    o_uart_busy => open,
-    o_read_data => read_data,
-    i_read_pop => read_pop,
-    o_read_busy => read_busy,
-    o_read_queue_empty => read_empty,
-    o_read_queue_full => open,
-    i_start_transmission => start_uart_transmission,
-    i_write_data => write_data,
-    i_write_push => write_push,
-    o_write_busy => write_busy,
-    o_write_queue_empty => open,
-    o_write_queue_full => open
-  );
-
-end Behavioral;
diff --git a/ModNEF_Sources/modules/uart/uart_xstep.vhd b/ModNEF_Sources/modules/uart/uart_xstep.vhd
index efa627517e6d45757e228a1550fe72c74c21bd6c..43ade23fb24a4abc96bd3e07499f0679eb360a17 100644
--- a/ModNEF_Sources/modules/uart/uart_xstep.vhd
+++ b/ModNEF_Sources/modules/uart/uart_xstep.vhd
@@ -196,7 +196,7 @@ begin
             end if;
 
           when wait_out_aer =>
-            if i_emu_ready = '1' then
+            if i_emu_ready = '1' and network_to_uart_busy='0' then
               if read_empty = '1' then -- no more data to process
                 start_uart_transmission <= '1';
                 emu_state <= send_aer;
diff --git a/ModNEF_Sources/modules/uart/uart_xstep_timer.vhd b/ModNEF_Sources/modules/uart/uart_xstep_timer.vhd
index e38d9b256bcfae0799a7d855243c3d98993b1489..53d5a403d800bb20f43c47fc359f7d19fc51dff2 100644
--- a/ModNEF_Sources/modules/uart/uart_xstep_timer.vhd
+++ b/ModNEF_Sources/modules/uart/uart_xstep_timer.vhd
@@ -220,7 +220,7 @@ begin
             end if;
 
           when wait_out_aer =>
-            if i_emu_ready = '1' then
+            if i_emu_ready = '1' and network_to_uart_busy='0' then
               count_time <= '0';
               if read_empty = '1' then -- no more data to process
                 emu_state <= push_timer;
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/blif.py b/modneflib/modnef/arch_builder/modules/BLIF/blif.py
index 4d5f09a6ff6741a947d87e0b931dc422d7b1864b..0caecf1e51726951047ea6f88f1a08202a8275d6 100644
--- a/modneflib/modnef/arch_builder/modules/BLIF/blif.py
+++ b/modneflib/modnef/arch_builder/modules/BLIF/blif.py
@@ -198,7 +198,8 @@ class BLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<bw) + two_comp(self.quantizer(weights[i][j]), bw)
+
+          w_line = (w_line<<bw) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), bw)
 
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -209,7 +210,7 @@ class BLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.weight_size) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.weight_size) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/blif_debugger.py b/modneflib/modnef/arch_builder/modules/BLIF/blif_debugger.py
index f6f7ca9881e1d6252aff9e5cdd10b15037aa1ad3..294001987bc026786cf2ce953f1cf52183b1dd93 100644
--- a/modneflib/modnef/arch_builder/modules/BLIF/blif_debugger.py
+++ b/modneflib/modnef/arch_builder/modules/BLIF/blif_debugger.py
@@ -213,7 +213,7 @@ class BLif_Debugger(ModNEFDebuggerMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
 
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -224,7 +224,7 @@ class BLif_Debugger(ModNEFDebuggerMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
diff --git a/modneflib/modnef/arch_builder/modules/BLIF/rblif.py b/modneflib/modnef/arch_builder/modules/BLIF/rblif.py
index 81f87c65b8ccbe95398c478cd6942ecdd21b4a40..d402fda94c19ba45b074b1adcaadb7ed7065a0cb 100644
--- a/modneflib/modnef/arch_builder/modules/BLIF/rblif.py
+++ b/modneflib/modnef/arch_builder/modules/BLIF/rblif.py
@@ -205,7 +205,7 @@ class RBLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
 
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -213,7 +213,7 @@ class RBLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -224,7 +224,7 @@ class RBLif(ModNEFArchMod):
       for i in range(self.output_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(rec_weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(rec_weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
 
         rec_mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -232,7 +232,7 @@ class RBLif(ModNEFArchMod):
       for i in range(self.output_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(rec_weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(rec_weights[i][j], unscale=False, clamp=True)
         
         rec_mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
     
diff --git a/modneflib/modnef/arch_builder/modules/SLIF/rslif.py b/modneflib/modnef/arch_builder/modules/SLIF/rslif.py
index 1c1561348cb39c35213b86b0ab2824b63767a7c1..b0a66a05f55d3038ef179d6ef54541924c53806e 100644
--- a/modneflib/modnef/arch_builder/modules/SLIF/rslif.py
+++ b/modneflib/modnef/arch_builder/modules/SLIF/rslif.py
@@ -214,14 +214,14 @@ class RSLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
     else:
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
     mem_file.close()
@@ -232,14 +232,14 @@ class RSLif(ModNEFArchMod):
       for i in range(self.output_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(rec_weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(rec_weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
     else:
       for i in range(self.output_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(rec_weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(rec_weights[i][j], unscale=False, clamp=True)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
     mem_file.close()
diff --git a/modneflib/modnef/arch_builder/modules/SLIF/slif.py b/modneflib/modnef/arch_builder/modules/SLIF/slif.py
index ddf5c96f6e242224437fca9c8282bcdedcaf7e6e..991c16e5ed45a422b51199bf228697b87057e7ad 100644
--- a/modneflib/modnef/arch_builder/modules/SLIF/slif.py
+++ b/modneflib/modnef/arch_builder/modules/SLIF/slif.py
@@ -201,7 +201,7 @@ class SLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
       self.v_threshold = two_comp(self.quantizer(self.v_threshold), self.variable_size)
@@ -213,7 +213,7 @@ class SLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
     
 
diff --git a/modneflib/modnef/arch_builder/modules/SLIF/slif_debugger.py b/modneflib/modnef/arch_builder/modules/SLIF/slif_debugger.py
index 7fb587c792b02f91ceb9c375469e0d68b437062e..dfca4698ce362f8a6b979bf48ee49b1bc3b56640 100644
--- a/modneflib/modnef/arch_builder/modules/SLIF/slif_debugger.py
+++ b/modneflib/modnef/arch_builder/modules/SLIF/slif_debugger.py
@@ -215,7 +215,7 @@ class SLif_Debugger(ModNEFDebuggerMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
       self.v_threshold = two_comp(self.quantizer(self.v_threshold), self.variable_size)
@@ -227,7 +227,7 @@ class SLif_Debugger(ModNEFDebuggerMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
     
 
diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/rshiftlif.py b/modneflib/modnef/arch_builder/modules/ShiftLIF/rshiftlif.py
index f3077ae9316df4d056ea3f2611867196c5adb71a..3d3427bc77b9619d1909c923ad9ee1ca6f6b5047 100644
--- a/modneflib/modnef/arch_builder/modules/ShiftLIF/rshiftlif.py
+++ b/modneflib/modnef/arch_builder/modules/ShiftLIF/rshiftlif.py
@@ -206,7 +206,7 @@ class RShiftLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
 
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -214,7 +214,7 @@ class RShiftLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.quantizer) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -226,7 +226,7 @@ class RShiftLif(ModNEFArchMod):
       for i in range(self.output_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(rec_weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(rec_weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
 
         rec_mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
@@ -234,7 +234,7 @@ class RShiftLif(ModNEFArchMod):
       for i in range(self.output_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(rec_weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(rec_weights[i][j], unscale=False, clamp=True)
         
         rec_mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
     
diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/shiftlif.py b/modneflib/modnef/arch_builder/modules/ShiftLIF/shiftlif.py
index 929ed2612460c4901afae0cb5ac1c7b210cb9c6a..4c9b3e9f6d25e787521b98b72bad682b72c394ff 100644
--- a/modneflib/modnef/arch_builder/modules/ShiftLIF/shiftlif.py
+++ b/modneflib/modnef/arch_builder/modules/ShiftLIF/shiftlif.py
@@ -196,23 +196,25 @@ class ShiftLif(ModNEFArchMod):
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j]), self.quantizer.bitwidth)
+          w_line = (w_line<<self.quantizer.bitwidth) + two_comp(self.quantizer(weights[i][j], unscale=False, clamp=True), self.quantizer.bitwidth)
 
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
-      self.v_threshold = two_comp(self.quantizer(self.v_threshold), self.variable_size)
+      self.v_threshold = two_comp(self.quantizer(self.v_threshold, unscale=False, clamp=False), self.variable_size)
+      
+      
 
     else:
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j])
+          w_line = (w_line<<self.quantizer.bitwidth) + self.quantizer(weights[i][j], unscale=False, clamp=True)
         
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
       self.v_threshold = self.quantizer(self.v_threshold)
     
-    mem_file.close()        
+    mem_file.close() 
 
   def to_debugger(self, output_file : str = ""):
     """
diff --git a/modneflib/modnef/arch_builder/modules/UART/__init__.py b/modneflib/modnef/arch_builder/modules/UART/__init__.py
index 1bf960420332ba13fce617145b3d49dc5165e617..3cbbe30479f8e038dc09b9a1db57ae6c9409eabc 100644
--- a/modneflib/modnef/arch_builder/modules/UART/__init__.py
+++ b/modneflib/modnef/arch_builder/modules/UART/__init__.py
@@ -7,7 +7,6 @@ Dependencies: uart_1step, uart_classifier, uart_classifier_timer, uart_xstep, ua
 Descriptions: UART module builder init
 """
 
-from .uart_1step import Uart_1Step
 from .uart_classifier import Uart_Classifier
 from .uart_classifier_timer import Uart_Classifier_Timer
 from .uart_xstep import Uart_XStep
diff --git a/modneflib/modnef/arch_builder/modules/UART/uart_1step.py b/modneflib/modnef/arch_builder/modules/UART/uart_1step.py
deleted file mode 100644
index 8fca341159282e2fb727e8025a2cde2f7b33fbc4..0000000000000000000000000000000000000000
--- a/modneflib/modnef/arch_builder/modules/UART/uart_1step.py
+++ /dev/null
@@ -1,242 +0,0 @@
-"""
-File name: uart_1step
-Author: Aurélie Saulquin  
-Version: 2.0.0
-License: GPL-3.0-or-later
-Contact: aurelie.saulquin@univ-lille.fr
-Dependencies: io_arch, yaml
-Descriptions: UART_1Step ModNEF archbuilder module
-"""
-
-from ..io_arch import IOArch
-import yaml
-
-_UART_1STEP_DEFINITION = """
-  component uart_1Step is
-    generic(
-      clk_freq          : integer := 100_000_000;
-      baud_rate         : integer := 115_200;
-
-      queue_read_depth  : integer := 32;
-      queue_read_type   : string  := "fifo";
-
-      queue_write_type  : string  := "fifo";
-
-      input_layer_size  : integer := 8;
-      output_layer_size : integer := 8
-    );
-    port(
-      i_clk             : in  std_logic;
-      i_en              : in  std_logic;
-
-      i_tx              : in  std_logic;
-      o_rx              : out std_logic;
-
-      i_emu_ready       : in  std_logic;
-      o_start_emu       : out std_logic;
-      o_reset_membrane  : out std_logic;
-
-      i_req             : in  std_logic;
-      o_ack             : out std_logic;
-      i_emu_busy        : in  std_logic;
-      i_spike_flag      : in  std_logic;
-      i_aer             : in  std_logic_vector(log_b(output_layer_size, 2)-1 downto 0);
-
-      o_req             : out std_logic;
-      i_ack             : in  std_logic;
-      o_emu_busy        : out std_logic;
-      o_spike_flag      : out std_logic;
-      o_aer             : out std_logic_vector(log_b(input_layer_size, 2)-1 downto 0)
-    );
-  end component;
-"""
-
-class Uart_1Step(IOArch):
-  """
-  Uart_1Step module class
-  Each UART transmission correspond to an emulation step
-
-  Attributes
-  ----------
-  name : str
-    name of module
-  input_layer_size : int
-    size in neurons of input layer
-  output_layer_size : int
-    size in neurons of output layer
-  clk_freq : int
-    board clock frequency
-  baud_rate : int
-    data baud rate
-  queue_read_type : int
-    type of reaad queue
-  queue_write_type : int
-    type of write queue
-  tx_name : str
-    name of tx signal
-  rx_name : str
-    name of rx signal
-
-  Methods
-  -------
-  vhdl_component_name()
-    return component name
-  vhdl_component_definition()
-    return vhdl component definition
-  to_vhdl(vhdl_file, pred, suc, clock_name):
-    write vhdl component instanciation 
-  to_yaml(file):
-    generate yaml configuration file for driver
-  write_io(vhdl_file):
-    write signals into entity definition section
-  """
-
-  def __init__(self, 
-               name: str,
-               input_layer_size: int, 
-               output_layer_size: int,
-               clk_freq: int, 
-               baud_rate: int, 
-               tx_name: str, 
-               rx_name: str, 
-               queue_read_depth : int,
-               queue_read_type: str = "fifo", 
-               queue_write_type: str = "fifo"
-              ):
-    """
-    Initialize attributes
-
-    Parameters
-    ----------
-    name : str
-      name of module
-    clk_freq : int
-      board clock frequency
-    baud_rate : int
-      data baud rate
-    tx_name : str
-      name of tx signal
-    rx_name : str
-      name of rx signal
-    input_layer_size : int = -1
-      size in neurons of input layer
-    output_layer_size : int = -1
-      size in neurons of output layer
-    queue_read_type : str = "fifo"
-      read queue type : "fifo" or "lifo"
-    queue_write_type : str = "fifo"
-      write queue type : "fifo" or "lifo"
-    """
-
-    self.name = name
-    
-    self.input_neuron = output_layer_size
-    self.output_neuron = input_layer_size
-
-    self.input_layer_size = input_layer_size
-    self.output_layer_size = output_layer_size
-
-    self.clk_freq = clk_freq
-    self.baud_rate = baud_rate
-
-    self.queue_read_type = queue_read_type
-    self.queue_read_depth = queue_read_depth
-    self.queue_write_type = queue_write_type
-
-    self.tx_name = tx_name
-    self.rx_name = rx_name
-  
-  def vhdl_component_name(self):
-    """
-    Module identifier use during component definition
-
-    Returns
-    -------
-    str
-    """
-
-    return "Uart_1_Step"
-
-  
-  def vhdl_component_definition(self):
-    """
-    VHDL component definition
-
-    Returns
-    -------
-    str
-    """
-
-    return _UART_1STEP_DEFINITION
-
-  def to_vhdl(self, vhdl_file, pred, suc, clock_name):
-    """
-    Write vhdl componenent 
-
-    Parameters
-    ----------
-    vhdl_file : TextIOWrapper
-      vhdl file 
-    pred : List of ModNEFArchMod 
-      list of predecessor module (1 pred for this module)
-    suc : List of ModNEFArchMod
-      list of successor module (1 suc for this module)
-    clock_name : str
-      clock signal name
-    """
-
-    vhdl_file.write(f"\t{self.name} : uart_1step generic map(\n")
-    vhdl_file.write(f"\t\tclk_freq => {self.clk_freq},\n")
-    vhdl_file.write(f"\t\tbaud_rate => {self.baud_rate},\n")
-    vhdl_file.write(f"\t\tqueue_read_depth => {self.queue_read_depth},\n")
-    vhdl_file.write(f"\t\tqueue_read_type => \"{self.queue_read_type}\",\n")
-    vhdl_file.write(f"\t\tqueue_write_type => \"{self.queue_write_type}\",\n")
-    vhdl_file.write(f"\t\tinput_layer_size => {self.input_layer_size},\n")
-    vhdl_file.write(f"\t\toutput_layer_size => {self.output_layer_size}\n")
-    vhdl_file.write(f"\t) port map(\n")
-    vhdl_file.write(f"\t\ti_clk => {clock_name},\n")
-    vhdl_file.write("\t\ti_en => '1',\n")
-    vhdl_file.write(f"\t\ti_tx => {self.tx_name},\n")
-    vhdl_file.write(f"\t\to_rx => {self.rx_name},\n")
-    vhdl_file.write("\t\ti_emu_ready => emu_ready,\n")
-    vhdl_file.write("\t\to_start_emu => start_emu,\n")
-    vhdl_file.write("\t\to_reset_membrane => reset_membrane,\n")
-    self._write_port_map(vhdl_file, pred[0].name, self.name, "in", "", False)
-    self._write_port_map(vhdl_file, self.name, suc[0].name, "out", "", True)
-    vhdl_file.write("\t);\n")
-  
-  def to_yaml(self, file):
-    """
-    Generate yaml driver description file
-
-    Parameters
-    ----------
-    file : str
-      configuration file name
-    """
-    d = {}
-
-    
-
-    d["module"] = "1Step"
-    d["input_layer_size"] = self.input_layer_size
-    d["output_layer_size"] = self.output_layer_size
-    d["baud_rate"] = self.baud_rate
-    d["queue_read_depth"] = self.queue_read_depth
-    d["queue_write_depth"] = self.output_layer_size
-
-    with open(file, 'w') as f:
-      yaml.dump(d, f)
-
-  def write_io(self, vhdl_file):
-    """
-    Write port IO in entity definition section
-
-    Parameters
-    ----------
-    vhdl_file : TextIOWrapper
-      vhdl file
-    """
-
-    vhdl_file.write(f"\t\t{self.tx_name} : in std_logic;\n")
-    vhdl_file.write(f"\t\t{self.rx_name} : out std_logic\n")
diff --git a/modneflib/modnef/modnef_driver/drivers/__init__.py b/modneflib/modnef/modnef_driver/drivers/__init__.py
index 0615fd915b881e40174ffa9e4a47c16cffbd1143..1bd6c7110c70072bc76e93d6790e6f290b03b768 100644
--- a/modneflib/modnef/modnef_driver/drivers/__init__.py
+++ b/modneflib/modnef/modnef_driver/drivers/__init__.py
@@ -11,6 +11,5 @@ from .classifier_driver import Classifier_Driver
 from .classifier_timer_driver import Classifier_Timer_Driver
 from .debugger_driver import Debugger_Driver
 from .default_driver import ModNEF_Driver
-from .single_step_driver import SingleStep_Driver
 from .xstep_driver import XStep_Driver
 from .xstep_timer_driver import XStep_Timer_Driver
\ No newline at end of file
diff --git a/modneflib/modnef/modnef_driver/drivers/single_step_driver.py b/modneflib/modnef/modnef_driver/drivers/single_step_driver.py
deleted file mode 100644
index 0feba5e3733e4253d7314b02497c62b89c23a3af..0000000000000000000000000000000000000000
--- a/modneflib/modnef/modnef_driver/drivers/single_step_driver.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""
-File name: single_step_driver
-Author: Aurélie Saulquin  
-Version: 2.0.0
-License: GPL-3.0-or-later
-Contact: aurelie.saulquin@univ-lille.fr
-Dependencies: default_driver, yaml
-Descriptions: Driver class for UART_1Step uart module
-"""
-
-from .default_driver import default_transformation
-from .default_driver import ModNEF_Driver, ClosedDriverError
-import yaml
-
-class SingleStep_Driver(ModNEF_Driver):
-  """
-  Driver of Uart_SingleStep module
-
-  Attributes
-  ----------
-  board_path : str
-    fpga driver board path
-  baud_rate : int
-    data baud rate
-  input_layer_size : int
-    number of neuron in input layer
-  output_layer_size : int
-    number of neuron in output layer
-  queue_read_depth : int
-    number of word of read queue
-  queue_write_depth : int
-    number of word of write queue
-
-  Methods
-  -------
-  from_yaml(yaml_file, board_path) : classmethod
-    create driver from yaml configuration file
-  run_sample(input_sample, transformation, reset_membrane):
-    run data communication to run a data sample
-  """
-
-  def __init__(self, board_path, baud_rate, input_layer_size, output_layer_size, queue_read_depth, queue_write_depth):
-    """
-    Constructor
-
-    Parameters
-    ----------
-    board_path : str
-      fpga driver board path
-    baud_rate : int
-      data baud rate
-    input_layer_size : int
-      number of neuron in input layer
-    output_layer_size : int
-      number of neuron in output layer
-    queue_read_depth : int
-      number of word of read queue
-    queue_write_depth : int
-      number of word of write queue
-    """
-
-    
-
-    super().__init__(board_path, baud_rate, input_layer_size, output_layer_size, queue_read_depth, queue_write_depth)
-
-  @classmethod
-  def from_yaml(cls, yaml_file, board_path):
-    """
-    classmethod
-
-    create driver from driver configuration file
-
-    Parameters
-    ----------
-    yaml_file : str
-      configuration file
-    board_path : str
-      path to board driver
-    """
-
-    
-    
-    with open(yaml_file, 'r') as f:
-      config = yaml.safe_load(f)
-
-    print("coucou")
-
-    d = cls(board_path = board_path,
-      baud_rate = config["baud_rate"],
-      input_layer_size = config["input_layer_size"],
-      output_layer_size = config["output_layer_size"],
-      queue_read_depth = config["queue_read_depth"],
-      queue_write_depth = config["queue_write_depth"]
-    )
-    return d
-  
-
-  def run_sample(self, input_sample, transformation = default_transformation, reset_membrane=False, extra_step = 0):
-    """
-    Run an entire data sample by using run_step function (for more details see run_step)
-
-    Parameters
-    ----------
-    input_sample : list
-      list of spikes of sample
-    transformation : function
-      function call to tranform input spikes to AER representation
-    reset_membrane : bool
-      set to true if reset voltage membrane after sample transmission
-
-    Returns
-    -------
-    list of list of int:
-      list of list of output AER data for all emulation step
-    """
-
-    if self._is_close:
-      raise ClosedDriverError()
-
-    sample_res = [0 for _ in range(self.output_layer_size)]
-
-    sample_aer = [transformation(s) for s in input_sample]
-
-    for es in range(extra_step):
-      sample_aer.append([])
-
-    for step in range(len(input_sample)):
-      step_spikes = sample_aer[step]      
-      if step == len(input_sample)-1:
-        step_res = self.rust_driver.data_transmission(step_spikes, reset_membrane)
-      else:
-        step_res = self.rust_driver.data_transmission(step_spikes, False)
-      for s in step_res:
-        sample_res[s] += 1
-
-    return sample_res
diff --git a/modneflib/modnef/modnef_driver/drivers/xstep_driver.py b/modneflib/modnef/modnef_driver/drivers/xstep_driver.py
index 778b95b1a72a37f0cd6a6a61a0e57aa3c37baad9..1f6d31ab51ff79cad91c1a65ac604b04625b1e7e 100644
--- a/modneflib/modnef/modnef_driver/drivers/xstep_driver.py
+++ b/modneflib/modnef/modnef_driver/drivers/xstep_driver.py
@@ -161,7 +161,6 @@ class XStep_Driver(ModNEF_Driver):
 
       if step == len(sample_aer)-1:
         emulation_result = self.rust_driver.data_transmission(data, reset_membrane)
-        
         res_step = self._unpack_data(emulation_result)
         for rs in res_step:
           for aer in rs:
diff --git a/modneflib/modnef/modnef_driver/drivers/xstep_timer_driver.py b/modneflib/modnef/modnef_driver/drivers/xstep_timer_driver.py
index 58de027609fb796bc97afccf849716105446ae7e..ebb23577999a295b418abf784e71ce968055735e 100644
--- a/modneflib/modnef/modnef_driver/drivers/xstep_timer_driver.py
+++ b/modneflib/modnef/modnef_driver/drivers/xstep_timer_driver.py
@@ -144,7 +144,7 @@ class XStep_Timer_Driver(ModNEF_Driver):
     sample_aer = [transformation(input_sample[step]) for step in range(len(input_sample))]
 
     for es in range(extra_step):
-      sample_aer.append([])
+      sample_aer.append([0, 1, 2, 3])
     
     step_send = 0
     res = [0 for _ in range(self.output_layer_size)]
@@ -155,11 +155,11 @@ class XStep_Timer_Driver(ModNEF_Driver):
 
     for step in range(len(sample_aer)):
       next_data = []
+      if(len(sample_aer[step])+len(next_data) > 256**self.queue_read_width):
+        print(f"warning, the read queue cannot encode the len of emulation step : acutal len {len(sample_aer[step])}, maximum len {256**self.qeue_read_width}")
       next_data.append(len(sample_aer[step]))
       next_data.extend(sample_aer[step])
-      if(len(sample_aer[step]) > 256**self.queue_read_width):
-        print(f"warning, the read queue cannot encode the len of emulation step : acutal len {len(sample_aer[step])}, maximum len {256**self.qeue_read_width}")
-      if len(data) + len(next_data) > self.queue_read_depth or (step_send+1)*self.output_layer_size > self.queue_write_depth-2:
+      if len(data) + len(next_data) > self.queue_read_depth or step_send*self.output_layer_size > self.queue_write_depth-2:
         emulation_result = self.rust_driver.data_transmission(data, False)
 
         res_step = self._unpack_data(emulation_result)
@@ -201,8 +201,10 @@ class XStep_Timer_Driver(ModNEF_Driver):
 
     #print((data[0]*256 + data[1])*self.clock_period)
 
+
     self.sample_time += (data[0]*256 + data[1])*self.clock_period
 
+
     while index < len(data):
       n_data = data[index]
       index += 1
diff --git a/modneflib/modnef/modnef_driver/modnef_drivers.py b/modneflib/modnef/modnef_driver/modnef_drivers.py
index be6f4db13049d828351fb97c5cb652a25359bc7f..11e2cc3226bae31637382a1f47a9c2f7fc03420f 100644
--- a/modneflib/modnef/modnef_driver/modnef_drivers.py
+++ b/modneflib/modnef/modnef_driver/modnef_drivers.py
@@ -13,7 +13,6 @@ from .drivers import *
 import yaml
 
 drivers_dict = {
-  "1Step" : SingleStep_Driver,
   "XStep" : XStep_Driver,
   "Classifier" : Classifier_Driver,
   "Debugger" : Debugger_Driver,
diff --git a/modneflib/modnef/modnef_torch/__init__.py b/modneflib/modnef/modnef_torch/__init__.py
index e57895ff347f15ba130ffa2d510ba213d5e63f39..2053dab5330a19f29ea447f8c285fdeaf2d60ad5 100644
--- a/modneflib/modnef/modnef_torch/__init__.py
+++ b/modneflib/modnef/modnef_torch/__init__.py
@@ -9,4 +9,5 @@ Descriptions: ModNEF torch lib definition
 
 from .modnef_neurons import *
 from .model_builder import ModNEFModelBuilder
-from .model import ModNEFModel
\ No newline at end of file
+from .model import ModNEFModel
+from .quantLinear import QuantLinear
\ No newline at end of file
diff --git a/modneflib/modnef/modnef_torch/model.py b/modneflib/modnef/modnef_torch/model.py
index 44443dc4da4759f9c0c785f4df1e2b3c9493cca2..f63da283c435d9340cfe85b4a6d20275fdac0e9b 100644
--- a/modneflib/modnef/modnef_torch/model.py
+++ b/modneflib/modnef/modnef_torch/model.py
@@ -1,17 +1,15 @@
 """
 File name: model
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
-Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron
+Dependencies: torch, snntorch, modnef_torch_neuron, modnef_driver
 Descriptions: ModNEF SNN Model
 """
 
-import modnef.modnef_torch.modnef_neurons as mn
 import torch.nn as nn
 import torch
-from modnef.arch_builder import *
 from modnef.modnef_driver import load_driver_from_yaml
 from modnef.modnef_torch.modnef_neurons import ModNEFNeuron
 
@@ -88,11 +86,52 @@ class ModNEFModel(nn.Module):
     for m in self.modules():
       if isinstance(m, ModNEFNeuron):
         m.hardware_estimation(hardware)
-        m.set_quant(quant)
+        m.run_quantize(quant)
 
     return super().train(mode=mode)
   
-  def quantize(self, force_init=False):
+  def init_quantizer(self):
+    """
+    initialize quantizer of laters
+    """
+
+    for m in self.modules():
+      if isinstance(m, ModNEFNeuron):
+        m.init_quantizer()
+
+  def quantize_hp(self, force_init=False):
+    """
+    Quantize neuron hyper parameters
+
+    Parameters
+    ----------
+    force_init = False : bool
+      force quantizer initialization
+    """
+
+    for m in self.modules():
+      if isinstance(m, ModNEFNeuron):
+        if force_init:
+          m.init_quantizer()
+        m.quantize_hp()
+
+  def quantize_weight(self, force_init=False):
+    """
+    Quantize synaptic weight
+
+    Parameters
+    ----------
+    force_init = False : bool
+      force quantizer initialization
+    """
+
+    for m in self.modules():
+      if isinstance(m, ModNEFNeuron):
+        if force_init:
+          m.init_quantizer()
+        m.quantize_weight()
+  
+  def quantize(self, force_init=False, clamp=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -104,13 +143,21 @@ class ModNEFModel(nn.Module):
     
     for m in self.modules():
       if isinstance(m, ModNEFNeuron):
-        m.quantize(force_init=force_init)
+        m.quantize(force_init=force_init, clamp=clamp)
+
+  def clamp(self, force_init=False):
+    """
+    Clamp synaptic weight with quantizer born
 
-  def clamp(self):
+    Parameters
+    ----------
+    force_init = False : bool
+      force quantizer initialization
+    """
 
     for m in self.modules():
       if isinstance(m, ModNEFNeuron):
-        m.clamp()
+        m.clamp(force_init=force_init)
 
   def train(self, mode : bool = True, quant : bool = False):
     """
@@ -151,6 +198,7 @@ class ModNEFModel(nn.Module):
 
     if self.driver != None:
       self.driver.close()
+      self.driver = None
 
   def forward(self, input_spikes):
     """
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
index d223c86d20afea7be6e97ceec852ed2fb7346009..99b6154532bfe2b6d09c4852c5b7db8c7f1a1963 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
@@ -1,7 +1,7 @@
 """
 File name: blif
 Author: Aurélie Saulquin  
-Version: 1.1.0
+Version: 1.2.1
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, math, snntorch, modnef.archbuilder, modnef_torch_neuron, modnef.quantizer
@@ -9,16 +9,14 @@ Descriptions: ModNEF torch BLIF neuron model
 Based on snntorch.Leaky and snntorch.LIF class
 """
 
-import torch.nn as nn
 import torch
 from math import log, ceil
-from snntorch import Leaky
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron, _quantizer
 from modnef.quantizer import *
 
-class BLIF(Leaky, ModNEFNeuron):
+class BLIF(ModNEFNeuron):
   """
   ModNEFTorch BLIF neuron model
 
@@ -111,26 +109,15 @@ class BLIF(Leaky, ModNEFNeuron):
       quantization method
     """
     
-    Leaky.__init__(
-      self=self,
-      beta=beta,
-      threshold=threshold,
-      spike_grad=spike_grad,
-      surrogate_disable=False,
-      init_hidden=False,
-      inhibition=False,
-      learn_beta=False,
-      learn_threshold=False,
-      reset_mechanism=reset_mechanism,
-      state_quant=False,
-      output=False,
-      graded_spikes_factor=1.0,
-      learn_graded_spikes_factor=False,
-    )
-
-    ModNEFNeuron.__init__(self=self, quantizer=quantizer)
-
-    self.fc = nn.Linear(in_features, out_features, bias=False)
+    super().__init__(threshold=threshold,
+                     in_features=in_features,
+                     out_features=out_features,
+                     reset_mechanism=reset_mechanism,
+                     spike_grad=spike_grad,
+                     quantizer=quantizer
+                     )
+    
+    self.register_buffer("beta", torch.tensor(beta))
 
     self._init_mem()
 
@@ -234,27 +221,30 @@ class BLIF(Leaky, ModNEFNeuron):
     if not spk==None:
       self.spk = spk
 
-    input_ = self.fc(input_)
+    quant = self.quantizer if self.quantization_flag else None
 
-    if not self.mem.shape == input_.shape:
-      self.mem = torch.zeros_like(input_, device=self.mem.device)
-
-    if self.quantization_flag:
-      input_.data = self.quantizer(input_.data, True)
-      self.mem.data = self.quantizer(self.mem.data, True)
+    forward_current = self.fc(input_, quant)
 
+    if not self.mem.shape == forward_current.shape:
+      self.mem = torch.zeros_like(forward_current, device=self.mem.device)
+    
     self.reset = self.mem_reset(self.mem)
 
+    self.mem = self.mem + forward_current
+    
     if self.reset_mechanism == "subtract":
-      self.mem = (self.mem+input_)*self.beta-self.reset*self.threshold
+      self.mem = self.mem-self.reset*self.threshold
     elif self.reset_mechanism == "zero":
-      self.mem = (self.mem+input_)*self.beta-self.reset*self.mem
-    else:
-      self.mem = self.mem*self.beta
+      self.mem = self.mem-self.reset*self.mem
 
     if self.hardware_estimation_flag:
-      self.val_min = torch.min(torch.min(input_.min(), self.mem.min()), self.val_min)
-      self.val_max = torch.max(torch.max(input_.max(), self.mem.max()), self.val_max)
+      self.val_min = torch.min(self.mem.min(), self.val_min).detach()
+      self.val_max = torch.max(self.mem.max(), self.val_max).detach()
+
+    self.mem = self.mem*self.beta
+
+    if self.quantization_flag:
+      self.mem.data = QuantizeSTE.apply(self.mem, self.quantizer)
 
     self.spk = self.fire(self.mem)
 
@@ -275,6 +265,7 @@ class BLIF(Leaky, ModNEFNeuron):
     -------
     BLIF
     """
+    
 
     if self.hardware_description["variable_size"]==-1:
       if self.hardware_estimation_flag:
@@ -304,50 +295,15 @@ class BLIF(Leaky, ModNEFNeuron):
       output_path=output_path
     )
     return module
-  
-  def quantize_weight(self):
-    """
-    Quantize synaptic weight
-    """
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight)
-
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
-
-  def quantize_parameters(self):
-    """
-    Quantize neuron hyper-parameters
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight)
-
-    self.threshold.data = self.quantizer(self.threshold.data, True)
-    self.beta.data = self.quantizer(self.beta.data, True)
-
-  def quantize(self, force_init=False):
-    """
-    Quantize synaptic weight and neuron hyper-parameters
-
-    Parameters
-    ----------
-    force_init = Fasle : bool
-      force quantizer initialization
-    """
-    
-    if force_init or not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight)
-
-    self.quantize_weight()
-    self.quantize_parameters()
-    self.quantization_flag = True
 
-  def clamp(self):
+  def quantize_hp(self):
     """
-    Clamp synaptic weight and neuron hyper-parameters
+    neuron hyper-parameters quantization.
+    We assume you already initialize quantizer
     """
 
-    self.fc.weight.data = self.quantizer.clamp(self.fc.weight.data)
+    self.threshold.data = QuantizeSTE.apply(self.threshold, self.quantizer)
+    self.beta.data = QuantizeSTE.apply(self.beta, self.quantizer)
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
index d48960ddde25bfcb3a8c02ae049cc1fafe93a86e..f930deb08db239d9e47b64cbea9f2e527e4b6b51 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
@@ -1,7 +1,7 @@
 """
 File name: rblif
 Author: Aurélie Saulquin  
-Version: 1.1.0
+Version: 1.2.1
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -9,16 +9,16 @@ Descriptions: ModNEF torch reccurrent BLIF neuron model
 Based on snntorch.RLeaky and snntorch.LIF class
 """
 
-import torch.nn as nn
 import torch
-from snntorch import Leaky
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron, _quantizer
 from math import log, ceil
 from modnef.quantizer import *
+from modnef.modnef_torch.quantLinear import QuantLinear
 
-class RBLIF(Leaky, ModNEFNeuron):
+
+class RBLIF(ModNEFNeuron):
   """
   ModNEFTorch reccurent BLIF neuron model
 
@@ -116,27 +116,17 @@ class RBLIF(Leaky, ModNEFNeuron):
       quantization method
     """
     
-    Leaky.__init__(
-      self=self,
-      beta=beta,
-      threshold=threshold,
-      spike_grad=spike_grad,
-      surrogate_disable=False,
-      init_hidden=False,
-      inhibition=False,
-      learn_beta=False,
-      learn_threshold=False,
-      reset_mechanism=reset_mechanism,
-      state_quant=False,
-      output=False,
-      graded_spikes_factor=1.0,
-      learn_graded_spikes_factor=False,
-    )
-
-    ModNEFNeuron.__init__(self=self, quantizer=quantizer)
+    super().__init__(threshold=threshold,
+                     in_features=in_features,
+                     out_features=out_features,
+                     reset_mechanism=reset_mechanism,
+                     spike_grad=spike_grad,
+                     quantizer=quantizer
+                     )
+    
+    self.register_buffer("beta", torch.tensor(beta))
 
-    self.fc = nn.Linear(in_features, out_features, bias=False)
-    self.reccurent = nn.Linear(out_features, out_features, bias=False)
+    self.reccurent = QuantLinear(out_features, out_features)
 
     self._init_mem()
 
@@ -241,33 +231,35 @@ class RBLIF(Leaky, ModNEFNeuron):
     if not spk == None:
       self.spk = spk
 
-    input_ = self.fc(input_)
+    quant = self.quantizer if self.quantization_flag else None
 
-    if not self.mem.shape == input_.shape:
-      self.mem = torch.zeros_like(input_, device=self.mem.device)
+    forward_current = self.fc(input_, quant)
 
-    if not self.spk.shape == input_.shape:
-      self.spk = torch.zeros_like(input_, device=self.spk.device)
+    if not self.mem.shape == forward_current.shape:
+      self.mem = torch.zeros_like(forward_current, device=self.mem.device)
 
-    self.reset = self.mem_reset(self.mem)
+    if not self.spk.shape == forward_current.shape:
+      self.spk = torch.zeros_like(forward_current, device=self.spk.device)
 
-    rec = self.reccurent(self.spk)
+    self.reset = self.mem_reset(self.mem)
+    
+    rec_current = self.reccurent(self.spk, quant)
 
-    if self.quantization_flag:
-      self.mem.data = self.quantizer(self.mem.data, True)
-      input_.data = self.quantizer(input_.data, True)
-      rec.data = self.quantizer(rec.data, True)
+    self.mem = self.mem + forward_current + rec_current
 
     if self.reset_mechanism == "subtract":
-      self.mem = (self.mem+input_+rec)*self.beta-self.reset*self.threshold
+      self.mem = self.mem-self.reset*self.threshold
     elif self.reset_mechanism == "zero":
-      self.mem = (self.mem+input_+rec)*self.beta-self.reset*self.mem
-    else:
-      self.mem = self.mem*self.beta
+      self.mem = self.mem-self.reset*self.mem
 
     if self.hardware_estimation_flag:
-      self.val_min = torch.min(torch.min(input_.min(), self.mem.min()), self.val_min)
-      self.val_max = torch.max(torch.max(input_.max(), self.mem.max()), self.val_max)
+      self.val_min = torch.min(self.mem.min(), self.val_min).detach()
+      self.val_max = torch.max(self.mem.max(), self.val_max).detach()
+
+    self.mem = self.mem*self.beta
+
+    if self.quantization_flag:
+      self.mem.data = QuantizeSTE.apply(self.mem, self.quantizer)
 
     self.spk = self.fire(self.mem)
 
@@ -318,52 +310,14 @@ class RBLIF(Leaky, ModNEFNeuron):
     )
     return module
   
-  def quantize_weight(self):
-    """
-    Quantize synaptic weight
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
-
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
-    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True)
-
-  def quantize_parameters(self):
-    """
-    Quantize neuron hyper-parameters
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
-
-    self.threshold.data = self.quantizer(self.threshold.data, True)
-    self.beta.data = self.quantizer(self.beta.data, True)
-
-  def quantize(self, force_init=False):
-    """
-    Quantize synaptic weight and neuron hyper-parameters
-
-    Parameters
-    ----------
-    force_init = Fasle : bool
-      force quantizer initialization
-    """
-    
-    if force_init or not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
-
-    self.quantize_weight()
-    self.quantize_parameters()
-    self.quantization_flag = True
-
-  def clamp(self):
+  def quantize_hp(self):
     """
-    Clamp synaptic weight and neuron hyper-parameters
+    neuron hyper-parameters quantization.
+    We assume you already initialize quantizer
     """
 
-    self.fc.weight.data = self.quantizer.clamp(self.fc.weight.data)
-    self.reccurent.weight.data = self.quantizer.clamp(self.reccurent.weight.data)
+    self.threshold.data = QuantizeSTE.apply(self.threshold, self.quantizer)
+    self.beta.data = QuantizeSTE.apply(self.beta, self.quantizer)
 
 
   @classmethod
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
index 73225949001afeb0d5700f6f4a5c15c78b2079aa..681b88aa426edc61ceaba9377e5a7f10da48c19f 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
@@ -1,7 +1,7 @@
 """
 File name: modnef_torch_neuron
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.0.1
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch
@@ -9,7 +9,10 @@ Descriptions: ModNEF torch neuron interface builder
 """
 
 import torch
+import torch.nn as nn
 from modnef.quantizer import *
+from snntorch._neurons import SpikingNeuron
+from ..quantLinear import QuantLinear
 
 _quantizer = {
   "FixedPointQuantizer" : FixedPointQuantizer,
@@ -17,7 +20,7 @@ _quantizer = {
   "DynamicScaleFactorQuantizer" : DynamicScaleFactorQuantizer
 }
 
-class ModNEFNeuron():
+class ModNEFNeuron(SpikingNeuron):
   """
   ModNEF torch neuron interface
 
@@ -42,7 +45,24 @@ class ModNEFNeuron():
     create and return the corresponding modnef archbuilder module from internal neuron parameters
   """
 
-  def __init__(self, quantizer : Quantizer):
+  def __init__(self,
+               in_features,
+               out_features,
+               threshold,
+               reset_mechanism,
+               spike_grad, 
+               quantizer):
+    
+    super().__init__(
+      threshold=threshold,
+      spike_grad=spike_grad,
+      reset_mechanism=reset_mechanism
+    )
+
+    self.fc = QuantLinear(in_features=in_features, out_features=out_features)
+    #self.fc = nn.Linear(in_features=in_features, out_features=out_features, bias=False)
+
+
     self.hardware_estimation_flag = False
     self.quantization_flag = False
 
@@ -53,6 +73,9 @@ class ModNEFNeuron():
 
     self.quantizer = quantizer
 
+
+    self.alpha = 0.9
+
   @classmethod
   def from_dict(cls, dict):
     """
@@ -61,7 +84,19 @@ class ModNEFNeuron():
 
     raise NotImplementedError()
   
-  def quantize_weight(self):
+  def init_quantizer(self):
+    """
+    Initialize internal or re-initialize internal quantizer
+    """
+
+    param = list(self.parameters())
+
+    if len(param)==1:
+      self.quantizer.init_from_weight(param[0])
+    else:
+      self.quantizer.init_from_weight(param[0], param[1])
+  
+  def quantize_weight(self, clamp=False):
     """
     synaptic weight quantization
 
@@ -70,16 +105,19 @@ class ModNEFNeuron():
     NotImplementedError()
     """
     
-    raise NotImplementedError()
+    for p in self.parameters():
+      p.data = QuantizeSTE.apply(p.data, self.quantizer, clamp)
+     
   
-  def quantize_parameters(self):
+  def quantize_hp(self):
     """
     neuron hyper-parameters quantization
+    We assume you've already intialize quantizer
     """
     
     raise NotImplementedError()
   
-  def quantize(self, force_init=False):
+  def quantize(self, force_init=False, clamp=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -89,16 +127,38 @@ class ModNEFNeuron():
       force quantizer initialization
     """
     
-    raise NotImplementedError()
+    if force_init:
+      self.init_quantizer()
+
+    self.quantize_weight(clamp=clamp)
+    self.quantize_hp()
   
-  def clamp(self):
+  def clamp(self, force_init=False):
     """
     Clamp synaptic weight
+
+    Parameters
+    ----------
+    force_init = Fasle : bool
+      force quantizer initialization
     """
 
-    raise NotImplementedError()
+    if force_init:
+      self.init_quantizer()
+
+    for p in self.parameters():
+      p.data = self.quantizer.clamp(p.data)
   
-  def set_quant(self, mode=False):
+  def run_quantize(self, mode=False):
+    """
+    Srtup quantization flag
+
+    Parameters
+    ----------
+    mode : bool = False
+      quantize run or not
+    """
+
     self.quantization_flag = mode
   
   def hardware_estimation(self, mode = False):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
index 694971edc6b2b40a4b7d5a2f83760d45095fd043..895adcef790cd56f99090a539b4f7d535fbf76a0 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
@@ -1,7 +1,7 @@
 """
 File name: rslif
 Author: Aurélie Saulquin  
-Version: 1.1.0
+Version: 1.2.1
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -9,16 +9,15 @@ Descriptions: ModNEF torch reccurent SLIF neuron model
 Based on snntorch.RLeaky and snntroch.LIF class
 """
 
-import torch.nn as nn
 import torch
-from snntorch import LIF
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron, _quantizer
 from math import ceil, log
-from modnef.quantizer import MinMaxQuantizer
+from modnef.quantizer import MinMaxQuantizer, QuantizeSTE
+from modnef.modnef_torch.quantLinear import QuantLinear
 
-class RSLIF(LIF, ModNEFNeuron):
+class RSLIF(ModNEFNeuron):
   """
   ModNEFTorch reccurent Simplifed LIF neuron model
 
@@ -119,34 +118,19 @@ class RSLIF(LIF, ModNEFNeuron):
       quantization function
     """
 
-    LIF.__init__(
-      self=self,
-      beta = v_leak,
-      threshold=threshold,
-      spike_grad=spike_grad,
-      surrogate_disable=False,
-      init_hidden=False,
-      inhibition=False,
-      learn_beta=False,
-      learn_threshold=False,
-      reset_mechanism="zero",
-      state_quant=False,
-      output=False,
-      graded_spikes_factor=1.0,
-      learn_graded_spikes_factor=False
-    )
-
-    ModNEFNeuron.__init__(self, quantizer=quantizer)
+    super().__init__(threshold=threshold,
+                     in_features=in_features,
+                     out_features=out_features,
+                     reset_mechanism="zero",
+                     spike_grad=spike_grad,
+                     quantizer=quantizer
+                     )
 
     self.register_buffer("v_leak", torch.as_tensor(v_leak))
     self.register_buffer("v_min", torch.as_tensor(v_min))
     self.register_buffer("v_rest", torch.as_tensor(v_rest))
 
-    self.in_features = in_features
-    self.out_features = out_features
-
-    self.fc = nn.Linear(self.in_features, self.out_features, bias=False)
-    self.reccurent = nn.Linear(self.out_features, self.out_features, bias=False)
+    self.reccurent = QuantLinear(out_features, out_features)
 
     self._init_mem()
 
@@ -253,42 +237,41 @@ class RSLIF(LIF, ModNEFNeuron):
 
     if not spk == None:
       self.spk = spk
-    
 
-    input_ = self.fc(input_)
+    quant = self.quantizer if self.quantization_flag else None
+
+    forward_current = self.fc(input_, quant)
+
+    if not self.mem.shape == forward_current.shape:
+      self.mem = torch.ones_like(forward_current, device=self.mem.device)*self.v_rest
+
+    if not self.spk.shape == forward_current.shape:
+      self.spk = torch.zeros_like(forward_current, device=self.spk.device)
 
-    if not self.mem.shape == input_.shape:
-      self.mem = torch.ones_like(input_)*self.v_rest
-    
-    if not self.spk.shape == input_.shape:
-      self.spk = torch.zeros_like(input_)
 
     self.reset = self.mem_reset(self.mem)
 
-    rec_input = self.reccurent(self.spk)
+    rec_current = self.reccurent(self.spk, quant)
 
-    if self.quantization_flag:
-      input_.data = self.quantizer(input_.data, True)
-      rec_input.data = self.quantizer(rec_input.data, True)
-      self.mem = self.quantizer(self.mem.data, True)
+    self.mem = self.mem + forward_current + rec_current 
 
-    self.mem = self.mem + input_ + rec_input
+    self.mem = self.mem-self.reset*(self.mem-self.v_rest)
 
     if self.hardware_estimation_flag:
-      self.val_min = torch.min(torch.min(input_.min(), self.mem.min()), self.val_min)
-      self.val_max = torch.max(torch.max(input_.max(), self.mem.max()), self.val_max)
+      self.val_min = torch.min(self.mem.min(), self.val_min).detach()
+      self.val_max = torch.max(self.mem.max(), self.val_max).detach()
 
+    # update neuron
     self.mem = self.mem - self.v_leak
+    min_reset = (self.mem<self.v_min).to(torch.float32)
+    self.mem = self.mem-min_reset*(self.mem-self.v_rest)
 
-    self.spk = self.fire(self.mem)
-
-    do_spike_reset = (self.spk/self.graded_spikes_factor - self.reset)
-    do_min_reset = (self.mem<self.v_min).to(torch.float32)
+    if self.quantization_flag:
+      self.mem.data = QuantizeSTE.apply(self.mem, self.quantizer)
 
-    self.mem = self.mem - do_spike_reset*(self.mem-self.v_rest)
-    self.mem = self.mem - do_min_reset*(self.mem-self.v_rest)
+    spk = self.fire(self.mem)
 
-    return self.spk, self.mem
+    return spk, self.mem
   
   def get_builder_module(self, module_name : str, output_path : str = "."):
     """
@@ -335,56 +318,18 @@ class RSLIF(LIF, ModNEFNeuron):
       output_path=output_path
     )
     return module
-  
-  def quantize_weight(self):
-    """
-    Quantize synaptic weight
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
 
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
-    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True)
-
-  def quantize_parameters(self):
+  def quantize_hp(self):
     """
-    Quantize neuron hyper-parameters
+    neuron hyper-parameters quantization
+    We assume you've already intialize quantizer
     """
 
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
-
-    self.v_leak.data = self.quantizer(self.v_leak.data, True)
-    self.v_min.data = self.quantizer(self.v_min.data, True)
-    self.v_rest.data = self.quantizer(self.v_rest.data, True)
-    self.threshold.data = self.quantizer(self.threshold, True)
+    self.v_leak.data = QuantizeSTE.apply(self.v_leak, self.quantizer)
+    self.v_min.data = QuantizeSTE.apply(self.v_min, self.quantizer)
+    self.v_rest.data = QuantizeSTE.apply(self.v_rest, self.quantizer)
+    self.threshold.data = QuantizeSTE.apply(self.threshold, self.quantizer)
 
-  def quantize(self, force_init=False):
-    """
-    Quantize synaptic weight and neuron hyper-parameters
-
-    Parameters
-    ----------
-    force_init = Fasle : bool
-      force quantizer initialization
-    """
-    
-    if force_init or not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
-
-    self.quantize_weight()
-    self.quantize_parameters()
-
-  def clamp(self):
-    """
-    Clamp synaptic weight and neuron hyper-parameters
-    """
-
-    self.fc.weight.data = self.quantizer.clamp(self.fc.weight.data)
-    self.reccurent.weight.data = self.quantizer.clamp(self.reccurent.weight.data)
-
-  
   @classmethod
   def detach_hidden(cls):
     """Returns the hidden states, detached from the current graph.
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
index fa370b99a19bbe8ea30bf26807b506bcddfa1e44..5e93c3926028c2c2799e27dcf0a2e148b28cf33a 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
@@ -1,7 +1,7 @@
 """
 File name: slif
 Author: Aurélie Saulquin  
-Version: 1.1.0
+Version: 1.2.1
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -9,16 +9,14 @@ Descriptions: ModNEF torch SLIF neuron model
 Based on snntorch.Leaky and snntroch.LIF class
 """
 
-import torch.nn as nn
 import torch
-from snntorch import LIF
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron, _quantizer
 from math import ceil, log
-from modnef.quantizer import MinMaxQuantizer
+from modnef.quantizer import MinMaxQuantizer, QuantizeSTE
 
-class SLIF(LIF, ModNEFNeuron):
+class SLIF(ModNEFNeuron):
   """
   ModNEFTorch reccurent Simplifed LIF neuron model
 
@@ -119,34 +117,18 @@ class SLIF(LIF, ModNEFNeuron):
       quantization method
     """
 
-    LIF.__init__(
-      self=self,
-      beta = v_leak,
-      threshold=threshold,
-      spike_grad=spike_grad,
-      surrogate_disable=False,
-      init_hidden=False,
-      inhibition=False,
-      learn_beta=False,
-      learn_threshold=False,
-      reset_mechanism="zero",
-      state_quant=False,
-      output=False,
-      graded_spikes_factor=1.0,
-      learn_graded_spikes_factor=False
-    )
-
-    ModNEFNeuron.__init__(self, quantizer=quantizer)
+    super().__init__(threshold=threshold,
+                     in_features=in_features,
+                     out_features=out_features,
+                     reset_mechanism="zero",
+                     spike_grad=spike_grad,
+                     quantizer=quantizer
+                     )
 
     self.register_buffer("v_leak", torch.as_tensor(v_leak))
     self.register_buffer("v_min", torch.as_tensor(v_min))
     self.register_buffer("v_rest", torch.as_tensor(v_rest))
 
-    self.in_features = in_features
-    self.out_features = out_features
-
-    self.fc = nn.Linear(self.in_features, self.out_features, bias=False)
-
     self._init_mem()
     
     self.hardware_description = {
@@ -251,32 +233,33 @@ class SLIF(LIF, ModNEFNeuron):
     if not spk == None:
       self.spk = spk
 
-    input_ = self.fc(input_)
+    quant = self.quantizer if self.quantization_flag else None
 
-    if not self.mem.shape == input_.shape:
-      self.mem = torch.ones_like(input_)*self.v_rest
-
-    if self.quantization_flag:
-      input_.data = self.quantizer(input_.data, True)
-      self.mem.data = self.quantizer(self.mem.data, True)
+    forward_current = self.fc(input_, quant)
 
+    if not self.mem.shape == forward_current.shape:
+      self.mem = torch.ones_like(forward_current, device=self.mem.device)*self.v_rest
+    
+    
     self.reset = self.mem_reset(self.mem)
 
-    self.mem = self.mem + input_
+    self.mem = self.mem + forward_current
+
+    self.mem = self.mem - self.reset*(self.mem-self.v_rest)
 
     if self.hardware_estimation_flag:
-      self.val_min = torch.min(torch.min(input_.min(), self.mem.min()), self.val_min)
-      self.val_max = torch.max(torch.max(input_.max(), self.mem.max()), self.val_max)
-      
-    self.mem = self.mem-self.v_leak
+      self.val_min = torch.min(self.mem.min(), self.val_min).detach()
+      self.val_max = torch.max(self.mem.max(), self.val_max).detach()
 
-    spk = self.fire(self.mem)
+    # update neuron
+    self.mem = self.mem - self.v_leak
+    min_reset = (self.mem<self.v_min).to(torch.float32)
+    self.mem = self.mem-min_reset*(self.mem-self.v_rest)
 
-    do_spike_reset = (spk/self.graded_spikes_factor - self.reset)
-    do_min_reset = (self.mem<self.v_min).to(torch.float32)
+    if self.quantization_flag:
+      self.mem.data = QuantizeSTE.apply(self.mem, self.quantizer)
 
-    self.mem = self.mem - do_spike_reset*(self.mem-self.v_rest)
-    self.mem = self.mem - do_min_reset*(self.mem-self.v_rest)
+    spk = self.fire(self.mem)
 
     return spk, self.mem
   
@@ -299,9 +282,7 @@ class SLIF(LIF, ModNEFNeuron):
     if self.hardware_description["variable_size"]==-1:
       if self.hardware_estimation_flag:
         val_max = max(abs(self.val_max), abs(self.val_min))
-        print(val_max)
-        val_max = self.quantizer(val_max, dtype=torch.int32)
-        print(val_max)
+        val_max = self.quantizer(val_max)
         self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8
       else:
         self.hardware_description["variable_size"]=16
@@ -326,53 +307,18 @@ class SLIF(LIF, ModNEFNeuron):
       output_path=output_path
     )
     return module
-  
-  def quantize_weight(self):
-    """
-    Quantize synaptic weight
-    """
 
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight)
-
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
-
-  def quantize_parameters(self):
+  def quantize_hp(self):
     """
-    Quantize neuron hyper-parameters
+    neuron hyper-parameters quantization
+    We assume you've already intialize quantizer
     """
 
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight)
+    self.v_leak.data = QuantizeSTE.apply(self.v_leak, self.quantizer)
+    self.v_min.data = QuantizeSTE.apply(self.v_min, self.quantizer)
+    self.v_rest.data = QuantizeSTE.apply(self.v_rest, self.quantizer)
+    self.threshold.data = QuantizeSTE.apply(self.threshold, self.quantizer)
 
-    self.v_leak.data = self.quantizer(self.v_leak.data, True)
-    self.v_min.data = self.quantizer(self.v_min.data, True)
-    self.v_rest.data = self.quantizer(self.v_rest.data, True)
-    self.threshold.data = self.quantizer(self.threshold, True)
-
-  def quantize(self, force_init=False):
-    """
-    Quantize synaptic weight and neuron hyper-parameters
-
-    Parameters
-    ----------
-    force_init = Fasle : bool
-      force quantizer initialization
-    """
-    
-    if force_init or not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight)
-
-    self.quantize_weight()
-    self.quantize_parameters()
-
-  def clamp(self):
-    """
-    Clamp synaptic weight and neuron hyper-parameters
-    """
-
-    self.fc.weight.data = self.quantizer.clamp(self.fc.weight.data)
-  
   @classmethod
   def detach_hidden(cls):
     """Returns the hidden states, detached from the current graph.
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
index 0193876ac1a13774dcc1062ba9642cacffdfbec3..07a631bc275e75bdc04896a02f0a04cc9f39d41f 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
@@ -1,7 +1,7 @@
 """
 File name: rsrlif
 Author: Aurélie Saulquin  
-Version: 1.1.0
+Version: 1.2.1
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -9,17 +9,16 @@ Descriptions: ModNEF torch reccurent Shift LIF neuron model
 Based on snntorch.RLeaky and snntorch.LIF class
 """
 
-import torch.nn as nn
 import torch
-from snntorch import LIF
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron, _quantizer
+from modnef.modnef_torch.quantLinear import QuantLinear
 from math import log, ceil
-from modnef.quantizer import DynamicScaleFactorQuantizer
+from modnef.quantizer import DynamicScaleFactorQuantizer, QuantizeSTE
 
 
-class RShiftLIF(LIF, ModNEFNeuron):
+class RShiftLIF(ModNEFNeuron):
   """
   ModNEFTorch reccurent Shift LIF neuron model
 
@@ -116,34 +115,24 @@ class RShiftLIF(LIF, ModNEFNeuron):
     quantizer = DynamicScaleFactoirQuantizer(8) : Quantizer
       quantization method
     """
+
+    super().__init__(threshold=threshold,
+                     in_features=in_features,
+                     out_features=out_features,
+                     reset_mechanism=reset_mechanism,
+                     spike_grad=spike_grad,
+                     quantizer=quantizer
+                     )
     
     self.shift = int(-log(1-beta)/log(2))
 
     if (1-2**-self.shift) != beta:
       print(f"initial value of beta ({beta}) has been change for {1-2**-self.shift} = 1-2**-{self.shift}")
       beta = 1-2**-self.shift
-    
-    LIF.__init__(
-      self=self,
-      beta=beta,
-      threshold=threshold,
-      spike_grad=spike_grad,
-      surrogate_disable=False,
-      init_hidden=False,
-      inhibition=False,
-      learn_beta=False,
-      learn_threshold=False,
-      reset_mechanism=reset_mechanism,
-      state_quant=False,
-      output=False,
-      graded_spikes_factor=1.0,
-      learn_graded_spikes_factor=False,
-    )
 
-    ModNEFNeuron.__init__(self=self, quantizer=quantizer)
+    self.reccurent = QuantLinear(out_features, out_features)
 
-    self.fc = nn.Linear(in_features, out_features, bias=False)
-    self.reccurent = nn.Linear(out_features, out_features, bias=False)
+    self.register_buffer("beta", torch.tensor(beta))
 
     self._init_mem()
 
@@ -256,36 +245,35 @@ class RShiftLIF(LIF, ModNEFNeuron):
     if not spk == None:
       self.spk = spk
 
-    input_ = self.fc(input_)
-
-    if not self.mem.shape == input_.shape:
-      self.mem = torch.zeros_like(input_, device=self.mem.device)
+    quant = self.quantizer if self.quantization_flag else None
 
-    if not self.spk.shape == input_.shape:
-      self.spk = torch.zeros_like(input_, device=self.spk.device)
+    forward_current = self.fc(input_, quant)
 
-    self.reset = self.mem_reset(self.mem)
-
-    rec_input = self.reccurent(self.spk)
+    if not self.mem.shape == forward_current.shape:
+      self.mem = torch.zeros_like(forward_current, device=self.mem.device)
+    
+    if not self.spk.shape == forward_current.shape:
+      self.spk = torch.zeros_like(forward_current, device=self.spk.device)
 
-    if self.quantization_flag:
-      self.mem.data = self.quantizer(self.mem.data, True)
-      input_.data = self.quantizer(input_.data, True)
-      rec_input.data = self.quantizer(rec_input.data, True)
+    rec_current = self.reccurent(self.spk, quant)
 
+    self.reset = self.mem_reset(self.mem)
 
-    self.mem = self.mem+input_+rec_input
+    self.mem = self.mem+forward_current+rec_current
 
     if self.reset_mechanism == "subtract":
-      self.mem = self.mem-self.__shift(self.mem)-self.reset*self.threshold
+      self.mem = self.mem-self.reset*self.threshold
     elif self.reset_mechanism == "zero":
-      self.mem = self.mem-self.__shift(self.mem)-self.reset*self.mem
-    else:
-      self.mem = self.mem*self.beta
+      self.mem = self.mem-self.reset*self.mem
 
     if self.hardware_estimation_flag:
-      self.val_min = torch.min(torch.min(input_.min(), self.mem.min()), self.val_min)
-      self.val_max = torch.max(torch.max(input_.max(), self.mem.max()), self.val_max)
+      self.val_min = torch.min(self.mem.min(), self.val_min).detach()
+      self.val_max = torch.max(self.mem.max(), self.val_max).detach()
+
+    self.mem = self.mem-self.__shift(self.mem)
+
+    if self.quantization_flag:
+      self.mem.data = QuantizeSTE.apply(self.mem, self.quantizer)
 
     self.spk = self.fire(self.mem)
 
@@ -310,7 +298,7 @@ class RShiftLIF(LIF, ModNEFNeuron):
     if self.hardware_description["variable_size"]==-1:
       if self.hardware_estimation_flag:
         val_max = max(abs(self.val_max), abs(self.val_min))
-        val_max = val_max*2**(self.hardware_description["compute_fp"])
+        val_max = self.quantizer(self.val_max)
         self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8
       else:
         self.hardware_description["variable_size"]=16
@@ -337,51 +325,13 @@ class RShiftLIF(LIF, ModNEFNeuron):
     )
     return module
 
-  def quantize_weight(self):
-    """
-    Quantize synaptic weight
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
-
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
-    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True)
-
-  def quantize_parameters(self):
-    """
-    Quantize neuron hyper-parameters
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
-
-    self.threshold.data = self.quantizer(self.threshold.data, True)
-    self.beta.data = self.quantizer(self.beta.data, True)
-
-  def quantize(self, force_init=False):
-    """
-    Quantize synaptic weight and neuron hyper-parameters
-
-    Parameters
-    ----------
-    force_init = Fasle : bool
-      force quantizer initialization
-    """
-    
-    if force_init or not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
-
-    self.quantize_weight()
-    self.quantize_parameters()
-
-  def clamp(self):
+  def quantize_hp(self):
     """
-    Clamp synaptic weight and neuron hyper-parameters
+    neuron hyper-parameters quantization.
+    We assume you already initialize quantizer
     """
 
-    self.fc.weight.data = self.quantizer.clamp(self.fc.weight.data)
-    self.reccurent.weight.data = self.quantizer.clamp(self.reccurent.weight.data)
+    self.threshold.data = QuantizeSTE.apply(self.threshold, self.quantizer)
 
 
   @classmethod
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
index 9bb6d1df8eeff1ab9044378d73e3625e4a9e4438..92698f819dd7b2e07dc12e64893015a40af2716d 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
@@ -1,7 +1,7 @@
 """
 File name: srlif
 Author: Aurélie Saulquin  
-Version: 1.1.0
+Version: 1.2.1
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -9,16 +9,17 @@ Descriptions: ModNEF torch Shift LIF neuron model
 Based on snntorch.Leaky and snntroch.LIF class
 """
 
-import torch.nn as nn
 import torch
-from snntorch import LIF
+from snntorch.surrogate import fast_sigmoid
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron, _quantizer
 from math import log, ceil
-from modnef.quantizer import DynamicScaleFactorQuantizer
+from modnef.quantizer import DynamicScaleFactorQuantizer, QuantizeSTE
 
-class ShiftLIF(LIF, ModNEFNeuron):
+    
+
+class ShiftLIF(ModNEFNeuron):
   """
   ModNEFTorch Shift LIF neuron model
 
@@ -86,7 +87,7 @@ class ShiftLIF(LIF, ModNEFNeuron):
                out_features,
                beta,
                threshold=1.0,
-               spike_grad=None,
+               spike_grad=fast_sigmoid(slope=25),
                reset_mechanism="subtract",
                quantizer=DynamicScaleFactorQuantizer(8)
             ):
@@ -110,6 +111,17 @@ class ShiftLIF(LIF, ModNEFNeuron):
     quantizer = DynamicScaleFactorQuantizer(8) : Quantizer
       quantization method
     """
+
+    super().__init__(threshold=threshold,
+                     in_features=in_features,
+                     out_features=out_features,
+                     reset_mechanism=reset_mechanism,
+                     spike_grad=spike_grad,
+                     quantizer=quantizer
+                     )
+
+
+    
     
     self.shift = int(-log(1-beta)/log(2))
 
@@ -117,26 +129,8 @@ class ShiftLIF(LIF, ModNEFNeuron):
       print(f"initial value of beta ({beta}) has been change for {1-2**-self.shift} = 1-2**-{self.shift}")
       beta = 1-2**-self.shift
 
-    LIF.__init__(
-      self=self,
-      beta=beta,
-      threshold=threshold,
-      spike_grad=spike_grad,
-      surrogate_disable=False,
-      init_hidden=False,
-      inhibition=False,
-      learn_beta=False,
-      learn_threshold=False,
-      reset_mechanism=reset_mechanism,
-      state_quant=False,
-      output=False,
-      graded_spikes_factor=1.0,
-      learn_graded_spikes_factor=False,
-    )
 
-    ModNEFNeuron.__init__(self=self, quantizer=quantizer)
-
-    self.fc = nn.Linear(in_features, out_features, bias=False)
+    self.register_buffer("beta", torch.tensor(beta))
 
     self._init_mem()
 
@@ -249,29 +243,30 @@ class ShiftLIF(LIF, ModNEFNeuron):
     if not spk == None:
       self.spk = spk
 
-    input_ = self.fc(input_)
+    quant = self.quantizer if self.quantization_flag else None
 
-    if not self.mem.shape == input_.shape:
-      self.mem = torch.zeros_like(input_, device=self.mem.device)
+    forward_current = self.fc(input_, quant)
 
-    self.reset = self.mem_reset(self.mem)
+    if not self.mem.shape == forward_current.shape:
+      self.mem = torch.zeros_like(forward_current, device=self.mem.device)
 
-    if self.quantization_flag:
-      self.mem.data = self.quantizer(self.mem.data, True)
-      input_.data = self.quantizer(input_.data, True)
+    self.reset = self.mem_reset(self.mem)
 
-    self.mem = self.mem+input_
+    self.mem = self.mem+forward_current
 
     if self.reset_mechanism == "subtract":
-      self.mem = self.mem-self.__shift(self.mem)-self.reset*self.threshold
+      self.mem = self.mem-self.reset*self.threshold
     elif self.reset_mechanism == "zero":
-      self.mem = self.mem-self.__shift(self.mem)-self.reset*self.mem
-    else:
-      self.mem = self.mem-self.__shift(self.mem)
+      self.mem = self.mem-self.reset*self.mem
 
     if self.hardware_estimation_flag:
-      self.val_min = torch.min(torch.min(input_.min(), self.mem.min()), self.val_min)
-      self.val_max = torch.max(torch.max(input_.max(), self.mem.max()), self.val_max)
+      self.val_min = torch.min(self.mem.min(), self.val_min).detach()
+      self.val_max = torch.max(self.mem.max(), self.val_max).detach()
+
+    self.mem = self.mem-self.__shift(self.mem)
+
+    if self.quantization_flag:
+      self.mem.data = QuantizeSTE.apply(self.mem, self.quantizer)
 
     self.spk = self.fire(self.mem)
 
@@ -301,6 +296,8 @@ class ShiftLIF(LIF, ModNEFNeuron):
       else:
         self.hardware_description["variable_size"]=16
 
+    #self.clamp(force_init=True)
+
     module = builder.ShiftLif(
         name=module_name,
         input_neuron=self.fc.in_features,
@@ -321,48 +318,13 @@ class ShiftLIF(LIF, ModNEFNeuron):
     )
     return module
 
-  def quantize_weight(self):
-    """
-    Quantize synaptic weight
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight)
-
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
-
-  def quantize_parameters(self):
-    """
-    Quantize neuron hyper-parameters
-    """
-
-    if not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(weight=self.fc.weight)
-
-    self.threshold.data = self.quantizer(self.threshold.data, True)
-    
-  def quantize(self, force_init=False):
-    """
-    Quantize synaptic weight and neuron hyper-parameters
-
-    Parameters
-    ----------
-    force_init = Fasle : bool
-      force quantizer initialization
-    """
-    
-    if force_init or not self.quantizer.is_initialize:
-      self.quantizer.init_from_weight(self.fc.weight)
-
-    self.quantize_weight()
-    self.quantize_parameters()
-
-  def clamp(self):
+  def quantize_hp(self):
     """
-    Clamp synaptic weight and neuron hyper-parameters
+    neuron hyper-parameters quantization.
+    We assume you already initialize quantizer
     """
 
-    self.fc.weight.data = self.quantizer.clamp(self.fc.weight.data)
+    self.threshold.data = QuantizeSTE.apply(self.threshold, self.quantizer)
 
 
   @classmethod
diff --git a/modneflib/modnef/modnef_torch/quantLinear.py b/modneflib/modnef/modnef_torch/quantLinear.py
new file mode 100644
index 0000000000000000000000000000000000000000..f22f0a520133df3baa9723a1db5dc1311fbeaa61
--- /dev/null
+++ b/modneflib/modnef/modnef_torch/quantLinear.py
@@ -0,0 +1,59 @@
+"""
+File name: quantLinear
+Author: Aurélie Saulquin  
+Version: 0.1.1
+License: GPL-3.0-or-later
+Contact: aurelie.saulquin@univ-lille.fr
+Dependencies: torch, modnef.quantizer
+Descriptions: Quantized Linear torch layer
+"""
+
+import torch.nn as nn
+from modnef.quantizer import QuantizeSTE
+
+class QuantLinear(nn.Linear):
+  """
+  Quantized Linear torch layer
+  Extended from torch.nn.Linear
+
+  Methods
+  -------
+  forward(x, quantizer=None)
+    Apply linear forward, if quantizer!=None, quantized weight are used for linear
+  """
+
+  def __init__(self, in_features : int, out_features : int):
+    """
+    Initialize class
+
+    Parameters
+    ----------
+    in_features : int
+      input features of layer
+    out_features : int
+      output features of layer
+    """
+
+    super().__init__(in_features=in_features, out_features=out_features, bias=False)
+
+  def forward(self, x, quantizer=None):
+    """
+    Apply linear forward, if quantizer!=None, quantized weight are used for linear
+
+    Parameters
+    ----------
+    x : Torch
+      input spikes
+    quantizer = None : Quantizer
+      quantization method.
+      If None, full precision weight are used for linear
+    """
+
+    if quantizer!=None:
+      w = QuantizeSTE.apply(self.weight, quantizer)
+      #w.data = quantizer.clamp(w)
+    else:
+      w = self.weight
+
+
+    return nn.functional.linear(x, w)
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/__init__.py b/modneflib/modnef/quantizer/__init__.py
index 2455aa927340a4e69eb2cdd0f63a8b2e10270107..7ca95c0dde63c9d819a40ee3cd54da799384ebe1 100644
--- a/modneflib/modnef/quantizer/__init__.py
+++ b/modneflib/modnef/quantizer/__init__.py
@@ -10,4 +10,6 @@ Descriptions: ModNEF quantizer method
 from .quantizer import Quantizer
 from .fixed_point_quantizer import FixedPointQuantizer
 from .min_max_quantizer import MinMaxQuantizer
-from .dynamic_scale_quantizer import DynamicScaleFactorQuantizer
\ No newline at end of file
+from .dynamic_scale_quantizer import DynamicScaleFactorQuantizer
+from .ste_quantizer import QuantizeSTE
+from .quantizer_scheduler import QuantizerScheduler
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/dynamic_scale_quantizer.py b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
index 724a8f4b739bc34c14ea691dd19cfc8df795bcba..f1d7497a8893891ea15c8d2446f4a2b512848b1c 100644
--- a/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
+++ b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
@@ -110,11 +110,11 @@ class DynamicScaleFactorQuantizer(Quantizer):
 
     self.is_initialize = True
 
-    if torch.is_tensor(weight):
+    if not torch.is_tensor(weight):
       weight = torch.Tensor(weight)
 
-    if torch.is_tensor(rec_weight):
-      rec_weight = torch.Tensor(weight)
+    if not torch.is_tensor(rec_weight):
+      rec_weight = torch.Tensor(rec_weight)
 
     if self.signed==None:
       self.signed = torch.min(weight.min(), rec_weight.min())<0.0
@@ -130,7 +130,7 @@ class DynamicScaleFactorQuantizer(Quantizer):
     #self.scale_factor = torch.max(torch.abs(weight).max(), torch.abs(weight).max())/2**(self.bitwidth-1)
 
 
-  def _quant(self, data, unscale) -> torch.Tensor:
+  def _quant(self, data) -> torch.Tensor:
     """
     Apply quantization
 
@@ -138,8 +138,6 @@ class DynamicScaleFactorQuantizer(Quantizer):
     ----------
     data : Tensor
       data to quantize
-    unscale = False : bool
-      If true, apply quantization and then, unquantize data to simulate quantization
 
     Returns
     -------
@@ -147,8 +145,20 @@ class DynamicScaleFactorQuantizer(Quantizer):
     """
 
     scaled = torch.round(data/self.scale_factor).to(self.dtype)
+    
+    return scaled
+    
+  def _unquant(self, data) -> torch.Tensor:
+    """
+    Unquantize data
 
-    if unscale:
-      return scaled*self.scale_factor
-    else:
-      return scaled
\ No newline at end of file
+    Parameters
+    ----------
+    data : Tensor
+      data to unquatize
+
+    Returns
+    -------
+    Tensor
+    """
+    return data*self.scale_factor
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/fixed_point_quantizer.py b/modneflib/modnef/quantizer/fixed_point_quantizer.py
index de6c789ff486c8df6c7abb86325d8c458086028d..16b08e53490ba071295f0c960eac0fc53c44335d 100644
--- a/modneflib/modnef/quantizer/fixed_point_quantizer.py
+++ b/modneflib/modnef/quantizer/fixed_point_quantizer.py
@@ -67,9 +67,6 @@ class FixedPointQuantizer(Quantizer):
     dtype = torch.int32 : torch.dtype
       type use during conversion
     """
-
-    if bitwidth==-1 and fixed_point==-1:
-      raise Exception("You must fix at least one value to compute the other one")
     
     super().__init__(
       bitwidth=bitwidth,
@@ -145,9 +142,9 @@ class FixedPointQuantizer(Quantizer):
       elif self.fixed_point==-1:
         self.fixed_point = self.bitwidth-int_part_size
         self.scale_factor = 2**self.fixed_point
+  
 
-
-  def _quant(self, data, unscale) -> torch.Tensor:
+  def _quant(self, data) -> torch.Tensor:
     """
     Apply quantization
 
@@ -155,8 +152,6 @@ class FixedPointQuantizer(Quantizer):
     ----------
     data : Tensor
       data to quantize
-    unscale = False : bool
-      If true, apply quantization and then, unquantize data to simulate quantization
 
     Returns
     -------
@@ -167,13 +162,20 @@ class FixedPointQuantizer(Quantizer):
 
     #scaled = torch.clamp(scaled, -2**(self.bitwidth-1), 2**(self.bitwidth-1)-1)
     
-    if unscale:
-      return (scaled.to(torch.float32))/self.scale_factor
-    else:
-      return scaled
+    return scaled
     
-  def _clamp(self, data):
-    b_min = (-2**(self.bitwidth-int(self.signed))*int(self.signed))/self.scale_factor
-    b_max = (2**(self.bitwidth-int(self.signed))-1)/self.scale_factor
+  def _unquant(self, data) -> torch.Tensor:
+    """
+    Unquantize data
+
+    Parameters
+    ----------
+    data : Tensor
+      data to unquatize
+
+    Returns
+    -------
+    Tensor
+    """
 
-    return torch.clamp(data, min=b_min, max=b_max)
\ No newline at end of file
+    return (data.to(torch.float32))/self.scale_factor
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/min_max_quantizer.py b/modneflib/modnef/quantizer/min_max_quantizer.py
index a700f6bf2e99bda84af2f530e925af551cdf1c10..6ca813180dfc9d0de66ff4428564c6240b9f4649 100644
--- a/modneflib/modnef/quantizer/min_max_quantizer.py
+++ b/modneflib/modnef/quantizer/min_max_quantizer.py
@@ -134,7 +134,7 @@ class MinMaxQuantizer(Quantizer):
     self.b_max = 2**(self.bitwidth-int(self.signed))-1
     self.b_min = -int(self.signed)*self.b_max
 
-  def _quant(self, data, unscale) -> torch.Tensor:
+  def _quant(self, data) -> torch.Tensor:
     """
     Apply quantization
 
@@ -142,8 +142,6 @@ class MinMaxQuantizer(Quantizer):
     ----------
     data : Tensor
       data to quantize
-    unscale = False : bool
-      If true, apply quantization and then, unquantize data to simulate quantization
 
     Returns
     -------
@@ -152,7 +150,20 @@ class MinMaxQuantizer(Quantizer):
 
     scaled = ((data-self.x_min)/(self.x_max-self.x_min)*(self.b_max-self.b_min)+self.b_min).to(self.dtype)
     
-    if unscale:
-      return (scaled-self.b_min)/(self.b_max-self.b_min)*(self.x_max-self.x_min)+self.x_min
-    else:
-      return scaled
\ No newline at end of file
+    return scaled
+    
+  def _unquant(self, data) -> torch.Tensor:
+    """
+    Unquantize data
+
+    Parameters
+    ----------
+    data : Tensor
+      data to unquatize
+
+    Returns
+    -------
+    Tensor
+    """
+
+    return (data-self.b_min)/(self.b_max-self.b_min)*(self.x_max-self.x_min)+self.x_min
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/quantizer.py b/modneflib/modnef/quantizer/quantizer.py
index 235981d76cf33be6881d9f6d16181aa68fc76c5e..20914b619575f2200ecf21dcb152e902eef42c85 100644
--- a/modneflib/modnef/quantizer/quantizer.py
+++ b/modneflib/modnef/quantizer/quantizer.py
@@ -91,7 +91,7 @@ class Quantizer():
     
     raise NotImplementedError()
 
-  def __call__(self, data, unscale=False):
+  def __call__(self, data, unscale=False, clamp=False):
     """
     Call quantization function
 
@@ -106,19 +106,34 @@ class Quantizer():
     -------
     int | float | list | numpy.array | Tensor (depending on type of data)
     """
+
+    if not torch.is_tensor(data):
+      tdata = torch.tensor(data, dtype=torch.float32)
+    else:
+      tdata = data
+
+    qdata = self._quant(tdata)
+
+    if clamp:
+      born_min = torch.tensor(-int(self.signed)*2**(self.bitwidth-1)).to(qdata.device)
+      born_max = torch.tensor(2**(self.bitwidth-int(self.signed))-1).to(qdata.device)
+      qdata = torch.clamp(qdata, min=born_min, max=born_max)
+
+    if unscale:
+      qdata = self._unquant(qdata).to(torch.float32)
     
     if isinstance(data, (int, float)):
-      return self._quant(data=torch.tensor(data), unscale=unscale).item()
+      return qdata.item()
     elif isinstance(data, list):
-      return self._quant(data=torch.tensor(data), unscale=unscale).tolist()
+      return qdata.tolist()
     elif isinstance(data, np.ndarray):
-      return self._quant(data=torch.tensor(data), unscale=unscale).numpy()
+      return qdata.numpy()
     elif torch.is_tensor(data):
-      return self._quant(data=data, unscale=unscale).detach()
+      return qdata.detach()
     else:
       raise TypeError("Unsupported data type")
 
-  def _quant(self, data, unscale) -> torch.Tensor:
+  def _quant(self, data) -> torch.Tensor:
     """
     Apply quantization
 
@@ -126,34 +141,46 @@ class Quantizer():
     ----------
     data : Tensor
       data to quantize
-    unscale = False : bool
-      If true, apply quantization and then, unquantize data to simulate quantization
 
     Returns
     -------
     Tensor
     """
 
-    pass
+    raise NotImplementedError()
+
+  def _unquant(self, data) -> torch.Tensor:
+    """
+    Unquantize data
+
+    Parameters
+    ----------
+    data : Tensor
+      data to unquatize
+
+    Returns
+    -------
+    Tensor
+    """
+
+    raise NotImplementedError()
 
   def clamp(self, data):
     """
-    Call quantization function
+    Call clamp function
 
     Parameters
     ----------
     data : int | float | list | numpy.array | Tensor
       data to quantize
-    unscale = False : bool
-      If true, apply quantization and then, unquantize data to simulate quantization
 
     Returns
     -------
     int | float | list | numpy.array | Tensor (depending on type of data)
     """
 
-    born_min = -int(self.signed)*2**(self.bitwidth-1)
-    born_max = 2**(self.bitwidth-int(self.signed))-1
+    born_min = self._unquant(torch.tensor(-int(self.signed)*2**(self.bitwidth-1))).item()
+    born_max = self._unquant(torch.tensor(2**(self.bitwidth-int(self.signed))-1)).item()
     
     if isinstance(data, (int, float)):
       return torch.clamp(torch.tensor(data), min=born_min, max=born_max).item()
diff --git a/modneflib/modnef/quantizer/quantizer_scheduler.py b/modneflib/modnef/quantizer/quantizer_scheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d51f186aabc5136fe00984dfee9ff6938a084f6
--- /dev/null
+++ b/modneflib/modnef/quantizer/quantizer_scheduler.py
@@ -0,0 +1,55 @@
+"""
+File name: quantizer_scheduler
+Author: Aurélie Saulquin  
+Version: 0.1.0
+License: GPL-3.0-or-later
+Contact: aurelie.saulquin@univ-lille.fr
+Dependencies: modnef.modnef_torch
+Descriptions: ModNEF quantizer scheduler
+"""
+
+from modnef.modnef_torch import ModNEFNeuron
+
+class QuantizerScheduler():
+
+  def __init__(self, model, bit_range, T, quantizationMethod):
+
+    self.num_bits = [i for i in range(bit_range[0], bit_range[1]-1, -1)]
+
+    self.bit_counter = 0
+    self.epoch_counter = 0
+
+    self.bitwidth = self.num_bits[self.bit_counter]
+
+    self.period = T
+    self.epoch_max = self.period*(len(self.num_bits)-1)
+
+    self.model = model
+
+    self.quantizationMethod = quantizationMethod
+
+    self.__update()
+
+    
+
+  def __update(self):
+    for m in self.model.modules():
+      if isinstance(m, ModNEFNeuron):
+        m.quantizer = self.quantizationMethod(self.bitwidth)
+        m.init_quantizer()
+        m.quantize_hp()
+
+  def step(self):
+
+    self.epoch_counter += 1
+
+    if self.epoch_counter > self.epoch_max:
+      return
+    else:
+      if self.epoch_counter%self.period==0:
+        self.bit_counter += 1
+        self.bitwidth = self.num_bits[self.bit_counter]
+        self.__update()
+
+  def save_model(self):
+    return self.epoch_counter >= self.epoch_max
diff --git a/modneflib/modnef/quantizer/ste_quantizer.py b/modneflib/modnef/quantizer/ste_quantizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b66832e320fbb464ae48abcb65b866b9ace0d662
--- /dev/null
+++ b/modneflib/modnef/quantizer/ste_quantizer.py
@@ -0,0 +1,62 @@
+"""
+File name: ste_quantizer
+Author: Aurélie Saulquin  
+Version: 0.1.0
+License: GPL-3.0-or-later
+Contact: aurelie.saulquin@univ-lille.fr
+Dependencies: torch
+Descriptions: Straight-Throught Estimator quantization method
+"""
+
+import torch
+
+class QuantizeSTE(torch.autograd.Function):
+  """
+  Straight-Throught Estimator quantization method
+
+  Methods
+  -------
+  @staticmethod
+  forward(ctx, data, quantizer)
+    Apply quantization method to data
+  @staticmethod
+  backward(ctx, grad_output)
+    Returns backward gradient
+  """
+  
+  @staticmethod
+  def forward(ctx, data, quantizer, clamp=False):
+    """
+    Apply quantization method to data
+
+    Parameters
+    ----------
+    ctx : torch.autograd.function.BackwardCFunction
+      Autograd context used to store variables for the backward pass
+    data : Tensor
+      data to quantize
+    quantizer : Quantizer
+      quantization method applied to data
+    """
+    
+    q_data = quantizer(data, unscale=True, clamp=clamp)
+
+
+    ctx.scale = quantizer.scale_factor
+
+    return q_data
+
+  @staticmethod
+  def backward(ctx, grad_output):
+    """
+    Return backward gradient without modificiation
+
+    Parameters
+    ----------
+    ctx : torch.autograd.function.BackwardCFunction
+      Autograd context used to store variables for the backward pass
+    grad_output : Tensor
+      gradient
+    """
+    
+    return grad_output, None
\ No newline at end of file
diff --git a/modneflib/modnef/templates/dataset.py b/modneflib/modnef/templates/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0b86fc5a43069f74cf733e33b7dd16c7d8360af
--- /dev/null
+++ b/modneflib/modnef/templates/dataset.py
@@ -0,0 +1,20 @@
+import os
+import tonic
+from torch.utils.data import DataLoader
+
+"""DataSet Definition"""
+dataset_path = f"{os.environ['HOME']}/datasets"
+
+# data set definition, change to your dataset
+sensor_size = tonic.datasets.NMNIST.sensor_size
+frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=5)
+
+train_set = tonic.datasets.NMNIST(save_to=dataset_path, train=True, transform=frame_transform)
+test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
+
+# batch loader
+batch_size = 64
+
+trainLoader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
+testLoader = DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
+validationLoader = None
\ No newline at end of file
diff --git a/modneflib/modnef/templates/evaluation.py b/modneflib/modnef/templates/evaluation.py
index aa74e22091b28a52bb36892ac081df0c918c7803..8fa94db51748bc8254de854504312ab584edd0ad 100644
--- a/modneflib/modnef/templates/evaluation.py
+++ b/modneflib/modnef/templates/evaluation.py
@@ -6,19 +6,26 @@ from snntorch.surrogate import fast_sigmoid
 import torch
 from run_lib import *
 import sys
+from model import MyModel
+from dataset import *
 
 if __name__ == "__main__":
 
   """Experience name"""
   exp_name = "Evaluation"
 
+  """Device definition"""
+  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+
   """Model definition"""
-  model_path = "model_template.json"
   best_model_name = "best_model"
 
-  model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
+  # model_path = "model_template.json"
+  # model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
+
+  model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25))
 
-  model.load_state_dict(torch.load(best_model_name))
+  model.load_state_dict(torch.load(best_model_name, map_location=device))
 
 
   """Kind of run
@@ -29,16 +36,12 @@ if __name__ == "__main__":
   
   kind = sys.argv[1] 
 
-  """Device definition"""
-  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
-
   """Evaluation variable definition"""
   verbose = True
-  save_conf_matrix = False
   output_path = "."
 
   """FPGA file definition"""
-  driver_config_path = "driver_config"
+  driver_config_path = "driver_config.yml"
   board_path = ""
 
 
@@ -46,24 +49,16 @@ if __name__ == "__main__":
   acc = 0.0
   y_true = None
   y_pred = None
-  save_conf_matrix = False
-  conf_matrix_file = "confusion_matrix.png"
-  conf_matrix_classes = [str(i) for i in range(10)]
+  run_time = None
   
-  """DataSet Definition"""
-  dataset_path = f"{os.environ['HOME']}/datasets"
-
-  # data set definition, change to your dataset
-  sensor_size = tonic.datasets.NMNIST.sensor_size
-  frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10)
-
-  test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
+  save_conf_matrix = False
+  conf_matrix_file = "confusion_matrix.svg"
+  num_class = 10
+  conf_matrix_classes = [str(i) for i in range(num_class)]
 
-  # batch loader
-  batch_size = 64
+  save_array = False
+  
   
-  testLoader = DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
-
   if kind == "eval":
     acc, y_pred, y_true = evaluation(
       model=model, 
@@ -74,6 +69,7 @@ if __name__ == "__main__":
       quant=False
       )
   elif kind == "qeval":
+    model.quantize(force_init=True, clamp=True)
     acc, y_pred, y_true = evaluation(
       model=model, 
       testLoader=testLoader, 
@@ -83,7 +79,7 @@ if __name__ == "__main__":
       quant=True
       )
   elif kind == "feval":
-    acc, y_pred, y_true = fpga_evaluation(
+    acc, y_pred, y_true, run_time = fpga_evaluation(
       model=model, 
       driver_config=driver_config_path,
       board_path=board_path,
@@ -96,9 +92,15 @@ if __name__ == "__main__":
     exit(-1)
 
   if save_conf_matrix:
-    confusion_matrix(
+    conf_matrix(
       y_true=y_true,
       y_pred=y_pred,
       file_name=conf_matrix_file,
       classes=conf_matrix_classes
-      )
\ No newline at end of file
+      )
+
+  if save_array:
+    np.save(f"{output_path}/y_true.npy", y_true)
+    np.save(f"{output_path}/y_pred.npy", y_pred)
+    if kind=="feval":
+      np.save(f"{output_path}/run_time.npy", run_time)
\ No newline at end of file
diff --git a/modneflib/modnef/templates/model.py b/modneflib/modnef/templates/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..6868daf3e509f7cf5215a18f678c2b364a809f62
--- /dev/null
+++ b/modneflib/modnef/templates/model.py
@@ -0,0 +1,184 @@
+import modnef.modnef_torch as mt
+from modnef.arch_builder import *
+from snntorch.surrogate import fast_sigmoid
+from modnef.quantizer import *
+import torch
+
+class MyModel(mt.ModNEFModel):
+
+  def __init__(self, name, spike_grad=fast_sigmoid(slope=25)):
+
+    super().__init__()
+
+    self.name = name
+    
+    self.layer1 = mt.SLIF(in_features=2312,
+                          out_features=128,
+                          threshold=0.8,
+                          v_leak=0.015,
+                          v_min=0.0,
+                          v_rest=0.0,
+                          spike_grad=spike_grad,
+                          quantizer=MinMaxQuantizer(
+                            bitwidth=8,
+                            signed=None
+                          ))
+    
+    self.layer2 = mt.ShiftLIF(in_features=128,
+                              out_features=64,
+                              threshold=0.8,
+                              beta=0.875,
+                              reset_mechanism="subtract",
+                              spike_grad=spike_grad,
+                              quantizer=DynamicScaleFactorQuantizer(
+                                bitwidth=8,
+                                signed=None
+                              ))
+    
+    self.layer3 = mt.BLIF(in_features=64,
+                          out_features=10,
+                          threshold=0.8,
+                          beta=0.9,
+                          reset_mechanism="subtract",
+                          spike_grad=spike_grad,
+                          quantizer=FixedPointQuantizer(
+                            bitwidth=8,
+                            fixed_point=7,
+                            signed=None
+                          ))
+    
+  def software_forward(self, input_spikes):
+    """
+    Run layers upate
+
+    Parameters
+    ----------
+    input_spikes : Tensor
+      input spikes
+
+    Returns
+    -------
+    tuple of tensor
+      output_spike, output_mem
+    """
+    
+    spk1, mem1 = self.layer1.reset_mem()
+    spk2, mem2 = self.layer2.reset_mem()
+    spk3, mem3 = self.layer3.reset_mem()
+
+    spk_rec = []
+    mem_rec = []
+
+    batch_size = input_spikes.shape[0]
+    n_steps = input_spikes.shape[1]
+
+    for step in range(n_steps):
+      x = input_spikes[:, step].reshape(batch_size, -1)
+
+      spk1, mem1 = self.layer1(x, mem1, spk1)
+      spk2, mem2 = self.layer2(spk1, mem2, spk2)
+      spk3, mem3 = self.layer3(spk2, mem3, spk3)
+
+      spk_rec.append(spk3)
+      mem_rec.append(mem3)
+
+    return torch.stack(spk_rec, dim=0), torch.stack(mem_rec, dim=0)
+  
+  def fpga_forward(self, input_spikes):
+    """
+    Transmit input spike to FPGA
+
+    Parameters
+    ----------
+    input_spikes : Tensor
+      input spikes
+
+    Returns
+    -------
+    tuple of tensor
+      output_spike, None
+    """
+
+    def to_aer(input):
+      input = input.reshape(-1).to(torch.int32)
+
+      aer = []
+      for i in range(input.shape[0]):
+        for _ in range(input[i]):
+          aer.append(i)
+
+      return aer
+    
+    if self.driver == None:
+      raise Exception("please open fpga driver before")
+    
+    batch_result = []
+
+    run_time = []
+
+    n_layer = 0
+
+    for m in self.modules():
+      if isinstance(m, mt.ModNEFNeuron):
+        n_layer += 1
+
+    for sample in input_spikes:
+      sample_res = self.driver.run_sample(sample, to_aer, True, n_layer)
+      run_time.append(self.driver.sample_time)
+      batch_result.append([sample_res])
+
+    return torch.tensor(batch_result).permute(1, 0, 2), None, run_time
+  
+  def to_vhdl(self, file_name=None, output_path = ".", driver_config_path = "./driver.yml"):
+    """
+    Generate VHDL file of model
+
+    Parameters
+    ----------
+    file_name = None : str
+      VHDL file name
+      if default, file name is model name
+    output_path = "." : str
+      output file path
+    driver_config_path = "./driver.yml" : str
+      driver configuration file
+    """
+    
+    if file_name==None:
+      file_name = f"{output_path}/{self.name}.vhd"
+
+    builder = ModNEFBuilder(self.name, 2312, 10)
+
+
+    uart = Uart_XStep_Timer(
+      name="uart",
+      input_layer_size=2312,
+      output_layer_size=10,
+      clk_freq=125_000_000,
+      baud_rate=921_600,
+      queue_read_depth=10240,
+      queue_write_depth=1024,
+      tx_name="uart_txd",
+      rx_name="uart_rxd"
+    )
+
+    builder.add_module(uart)
+    builder.set_io(uart)
+
+    layer1_module = self.layer1.get_builder_module(f"{self.name}_layer1", output_path)
+    builder.add_module(layer1_module)
+
+    layer2_module = self.layer2.get_builder_module(f"{self.name}_layer2", output_path)
+    builder.add_module(layer2_module)
+
+    layer3_module = self.layer3.get_builder_module(f"{self.name}_layer3", output_path)
+    builder.add_module(layer3_module)
+
+    builder.add_link(uart, layer1_module)
+    builder.add_link(layer1_module, layer2_module)
+    builder.add_link(layer2_module, layer3_module)
+    builder.add_link(layer3_module, uart)
+    
+
+    builder.get_driver_yaml(f"{output_path}/{driver_config_path}")
+    builder.to_vhdl(file_name, "clock")
\ No newline at end of file
diff --git a/modneflib/modnef/templates/run_lib.py b/modneflib/modnef/templates/run_lib.py
index 07ccb6d0bdb2cc18feca1332ccf238051cf1a1b9..7cee0717607bc16f362d25196ceb235622db2662 100644
--- a/modneflib/modnef/templates/run_lib.py
+++ b/modneflib/modnef/templates/run_lib.py
@@ -1,4 +1,3 @@
-import torch.nn as nn
 from snntorch import surrogate
 import torch
 spike_grad = surrogate.fast_sigmoid(slope=25)
@@ -9,8 +8,7 @@ import matplotlib.pyplot as plt
 from sklearn.metrics import confusion_matrix
 import seaborn as sns
 
-
-def train_1_epoch(model, trainLoader, optimizer, loss, device, verbose):
+def train_1_epoch(model, trainLoader, optimizer, loss, qat, device, verbose):
   epoch_loss = []
 
   if verbose:
@@ -20,13 +18,17 @@ def train_1_epoch(model, trainLoader, optimizer, loss, device, verbose):
     loader = trainLoader
 
   for _, (data, target) in enumerate(loader):
-    model.train()
+    model.train(quant=qat)
+
+    """Prepare data"""
     data = data.to(device)
     data = data.squeeze(0)
     target = target.to(device)
     
+    """Forward Pass"""
     spk_rec, mem_rec = model(data)
 
+    """Prepare backward"""
     loss_val = torch.zeros((1), dtype=torch.float, device=device)
 
     for step in range(data.shape[1]):
@@ -34,6 +36,7 @@ def train_1_epoch(model, trainLoader, optimizer, loss, device, verbose):
 
     epoch_loss.append(loss_val.item())
 
+    """Backward"""
     model.zero_grad()
     loss_val.backward()
     optimizer.step()
@@ -44,33 +47,78 @@ def train_1_epoch(model, trainLoader, optimizer, loss, device, verbose):
 
   return np.mean(epoch_loss)
 
-def train(model, trainLoader, testLoader, optimizer, loss, device=torch.device("cpu"), validationLoader=None, n_epoch=10, best_model_name="best_model", verbose=True, save_plot=False, save_history=False, output_path="."):
+def train(model, 
+          trainLoader, 
+          testLoader, 
+          optimizer, 
+          loss, 
+          lr_scheduler = None,
+          qat = False, 
+          qat_scheduler = None,
+          device=torch.device("cpu"), 
+          validationLoader=None, 
+          n_epoch=10, 
+          best_model_name="best_model", 
+          verbose=True, 
+          save_plot=False, 
+          save_history=False, 
+          output_path="."
+          ):
+  
   avg_loss_history = []
   acc_test_history = []
   acc_val_history = []
-
+  lr_val_history = []
+  bitwidth_val_history = []
 
   best_acc = 0
 
   model = model.to(device)
 
+  if qat: # we prepare model for QAT
+    model.init_quantizer()
+    model.quantize_hp()
+
   for epoch in range(n_epoch):
     if verbose:
       print(f"---------- Epoch : {epoch} ----------")
     
-    epoch_loss = train_1_epoch(model=model, trainLoader=trainLoader, optimizer=optimizer, loss=loss, device=device, verbose=verbose)
+    """Model training"""
+    epoch_loss = train_1_epoch(model=model, trainLoader=trainLoader, optimizer=optimizer, loss=loss, device=device, verbose=verbose, qat=qat)
     avg_loss_history.append(epoch_loss)
 
+    """Model Validation"""
     if validationLoader!=None:
-      acc_val, _, _ = evaluation(model=model, testLoader=validationLoader, name="Validation", verbose=verbose, device=device)
+      acc_val, _, _ = evaluation(model=model, testLoader=validationLoader, name="Validation", verbose=verbose, device=device, quant=qat)
       acc_val_history.append(acc_val)
 
-    acc_test, _, _ = evaluation(model=model, testLoader=testLoader, name="Test", verbose=verbose, device=device)
+    """Model evaluation in test"""
+    acc_test, _, _ = evaluation(model=model, testLoader=testLoader, name="Test", verbose=verbose, device=device, quant=qat)
     acc_test_history.append(acc_test)
 
-    if best_model_name!="" and acc_test>best_acc:
-      torch.save(model.state_dict(), best_model_name)
-      best_acc = acc_test
+    """Save best model"""
+    if best_model_name!="" and acc_test>best_acc: 
+      if not qat:
+        torch.save(model.state_dict(), f"{output_path}/{best_model_name}")
+        best_acc = acc_test
+      else: #if QAT
+        if qat_scheduler==None: # no QAT scheduler so we save our model
+          torch.save(model.state_dict(), f"{output_path}/{best_model_name}")
+          best_acc = acc_test
+        elif qat_scheduler.save_model(): # if QAT scheduler, we need to check if we quantize at the target bitwidth
+          torch.save(model.state_dict(), f"{output_path}/{best_model_name}")
+          best_acc = acc_test
+
+    """Update schedulers"""
+    if lr_scheduler!=None:
+      lr_val_history.append(lr_scheduler.get_last_lr()[0])
+      lr_scheduler.step()
+
+    if qat:
+      model.clamp() 
+      if qat_scheduler!=None:
+        bitwidth_val_history.append(qat_scheduler.bitwidth)
+        qat_scheduler.step()
 
   if save_history:
     np.save(f"{output_path}/loss.npy", np.array(avg_loss_history))
@@ -79,13 +127,19 @@ def train(model, trainLoader, testLoader, optimizer, loss, device=torch.device("
     if len(acc_val_history)!=0:
       np.save(f"{output_path}/acc_validation.npy", np.array(acc_val_history))
 
+    if lr_scheduler!=None:
+      np.save(f"{output_path}/lr_scheduler.npy", np.array(lr_scheduler))
+
+    if qat and qat_scheduler!=None:
+      np.save(f"{output_path}/qat_scheudler_bitwidth.npy", np.array(bitwidth_val_history))
+
   if save_plot:
     plt.figure()  # Create a new figure
     plt.plot([i for i in range(n_epoch)], avg_loss_history)
     plt.title('Average Loss')
     plt.xlabel("Epoch")
     plt.ylabel("Loss")
-    plt.savefig(f"{output_path}/loss.png")
+    plt.savefig(f"{output_path}/loss.svg")
     
     plt.figure()
     if len(acc_val_history)!=0:
@@ -96,7 +150,25 @@ def train(model, trainLoader, testLoader, optimizer, loss, device=torch.device("
     plt.title("Accuracy")
     plt.xlabel("Epoch")
     plt.ylabel("Accuracy")
-    plt.savefig(f"{output_path}/accuracy.png")
+    plt.savefig(f"{output_path}/accuracy.svg")
+
+    if lr_scheduler!=None:
+      plt.figure()
+      plt.plot([i for i in range(n_epoch)], lr_val_history, label="LR Value")
+      plt.legend()
+      plt.title("LR Values")
+      plt.xlabel("Epoch")
+      plt.ylabel("learning rate")
+      plt.savefig(f"{output_path}/lr_values.svg")
+
+    if qat and qat_scheduler!=None:
+      plt.figure()
+      plt.plot([i for i in range(n_epoch)], lr_val_history, label="bitwidth")
+      plt.legend()
+      plt.title("Quantizer bitwidth")
+      plt.xlabel("Epoch")
+      plt.ylabel("bitwidth")
+      plt.savefig(f"{output_path}/quant_bitwidth.svg")
 
   return avg_loss_history, acc_val_history, acc_test_history, best_acc
 
@@ -154,8 +226,8 @@ def __run_accuracy(model, testLoader, name, verbose, device):
       del spk_rec
       del mem_rec
 
-    y_true = torch.stack(y_true).reshape(-1)
-    y_pred = torch.stack(y_pred).reshape(-1)
+    y_true = torch.stack(y_true).reshape(-1).cpu().numpy()
+    y_pred = torch.stack(y_pred).reshape(-1).cpu().numpy()
     
       
     return (correct/total), y_pred, y_true
@@ -186,7 +258,6 @@ def hardware_estimation(model, testLoader, name="Hardware Estimation", device=to
 
   return accuracy, y_pred, y_true
 
-
 def fpga_evaluation(model, testLoader, board_path, driver_config, name="FPGA Evaluation", verbose=False):
   accuracy = 0
   y_pred = []
@@ -198,9 +269,51 @@ def fpga_evaluation(model, testLoader, board_path, driver_config, name="FPGA Eva
 
   model.fpga_eval(board_path, driver_config)
 
-  accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device)
+  y_true = []
+  y_pred = []
+  run_time = []
+  correct = 0
+  total = 0
 
-  return accuracy, y_pred, y_true   
+  if verbose:
+    bar_format = "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]"
+    loader = tqdm(testLoader, desc=name, bar_format=bar_format)
+  else:
+    loader = testLoader
+
+
+  for _, (data, target) in enumerate(loader):
+    
+    data = data.to(device)
+    target = target.to(device)
+
+    y_true.append(target)
+
+    spk_rec, mem_rec, batch_speed = model(data)
+
+    run_time.extend(batch_speed)
+
+    output = (spk_rec.sum(dim=0))/data.shape[1]
+    predicted = output.argmax(dim=1).to(device)
+    correct += predicted.eq(target.view_as(predicted)).sum().item()
+    y_pred.append(predicted)
+    total += target.size(0)
+
+    if verbose:
+      loader.set_postfix_str(f"Accuracy : {np.mean(correct/total*100):0<3.2f} Run Time : {np.mean(batch_speed)*1e6:.3f} µs")
+
+    del data
+    del target
+    del spk_rec
+
+  y_true = torch.stack(y_true).cou().reshape(-1).numpy()
+  y_pred = torch.stack(y_pred).coup().reshape(-1).numpy()
+  run_time = np.array(run_time)
+  
+    
+  accuracy = (correct/total)
+
+  return accuracy, y_pred, y_true, run_time
 
 def conf_matrix(y_true, y_pred, file_name, classes):
   cm = confusion_matrix(y_true, y_pred)
diff --git a/modneflib/modnef/templates/train.py b/modneflib/modnef/templates/train.py
index 03603ecb67073dd27e7d2c58b4651504b9655ab8..5d4866c4ec4e7faf5724bd760bdf042b3d2ebd2a 100644
--- a/modneflib/modnef/templates/train.py
+++ b/modneflib/modnef/templates/train.py
@@ -1,19 +1,21 @@
-import tonic
-from torch.utils.data import DataLoader
-from modnef.modnef_torch import ModNEFModelBuilder
-import os
 from snntorch.surrogate import fast_sigmoid
 from run_lib import *
 import torch
+from model import MyModel
+from dataset import *
+from modnef.quantizer import QuantizerScheduler
 
 if __name__ == "__main__":
 
   """Model definition"""
-  model_path = "model_template.json"
-  model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
+  # model_path = "model_template.json"
+  # model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
+
+  model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25))
 
   """Optimizer"""
   optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999))
+  lr_scheduler = None #torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=5, eta_min=5e-4)
 
   """Loss"""
   loss = torch.nn.CrossEntropyLoss()
@@ -22,29 +24,17 @@ if __name__ == "__main__":
   device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
 
   """Train variable definition"""
-  n_epoch = 1
+  n_epoch = 2
+  qat = False
   best_model_name = "best_model"
   verbose = True
   save_plot = False
   save_history = False
   output_path = "."
-  
-  """DataSet Definition"""
-  dataset_path = f"{os.environ['HOME']}/datasets"
-
-  # data set definition, change to your dataset
-  sensor_size = tonic.datasets.NMNIST.sensor_size
-  frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10)
-
-  train_set = tonic.datasets.NMNIST(save_to=dataset_path, train=True, transform=frame_transform)
-  test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
 
-  # batch loader
-  batch_size = 64
-  
-  trainLoader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
-  testLoader = DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
-  validationLoader = None
+  """Quantization Aware Training"""
+  qat=False
+  qat_scheduler = None #QuantizerScheduler(model, (8,3), 3, lambda x : FixedPointQuantizer(x, x-1, True, True))
 
   train(
     model=model, 
@@ -53,6 +43,9 @@ if __name__ == "__main__":
     validationLoader=validationLoader,
     optimizer=optimizer, 
     loss=loss, 
+    lr_scheduler = lr_scheduler,
+    qat = qat, 
+    qat_scheduler = qat_scheduler,
     device=device,  
     n_epoch=n_epoch, 
     best_model_name=best_model_name, 
diff --git a/modneflib/modnef/templates/vhdl_generation.py b/modneflib/modnef/templates/vhdl_generation.py
index 82dbb71fda818e276264ed04b3e2068791d4c0ca..3ba0a9bc435ea769ed5b8f4fb48441eea649abf0 100644
--- a/modneflib/modnef/templates/vhdl_generation.py
+++ b/modneflib/modnef/templates/vhdl_generation.py
@@ -5,23 +5,26 @@ import os
 from snntorch.surrogate import fast_sigmoid
 from run_lib import *
 import torch
+from model import MyModel
+from dataset import *
 
 if __name__ == "__main__":
 
   """Experience name"""
   exp_name = "Evaluation"
 
+  """Device definition"""
+  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+
   """Model definition"""
-  model_path = "model_template.json"
   best_model_name = "best_model"
 
-  model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
+  # model_path = "model_template.json"
+  # model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
 
-  model.load_state_dict(torch.load(best_model_name))
+  model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25))
 
-
-  """Device definition"""
-  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
+  model.load_state_dict(torch.load(best_model_name, map_location=device))
 
   """Hardware Estimation variable definition"""
   verbose = True
@@ -30,21 +33,10 @@ if __name__ == "__main__":
   """VHDL file definition"""
   output_path = "."
   file_name = "template_vhdl_model.vhd"
-  driver_config_path = "driver_config"
-  
-  """DataSet Definition"""
-  dataset_path = f"{os.environ['HOME']}/datasets"
-
-  # data set definition, change to your dataset
-  sensor_size = tonic.datasets.NMNIST.sensor_size
-  frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10)
+  driver_config_path = "driver_config.yml"
 
-  test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
-
-  # batch loader
-  batch_size = 64
-  
-  testLoader = DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last = True, collate_fn = tonic.collation.PadTensors(batch_first=True))
+  """Prepare model for hardware estimation"""
+  model.quantize(force_init=True, clamp=True)
 
   acc, y_pred, y_true = hardware_estimation(
     model=model,