diff --git a/modneflib/modnef/modnef_torch/model.py b/modneflib/modnef/modnef_torch/model.py
index f35d4867b87f1fc55b94ffa0d86ced1e5ec1a766..0451bfe54d6ac45bd058e23f06f59f3e4cf78e5c 100644
--- a/modneflib/modnef/modnef_torch/model.py
+++ b/modneflib/modnef/modnef_torch/model.py
@@ -91,6 +91,20 @@ class ModNEFModel(nn.Module):
         m.set_quant(quant)
 
     return super().train(mode=mode)
+  
+  def quantize(self, force_init=False):
+    """
+    Quantize synaptic weight and neuron hyper-parameters
+
+    Parameters
+    ----------
+    force_init = Fasle : bool
+      force quantizer initialization
+    """
+    
+    for m in self.modules():
+      if isinstance(m, ModNEFNeuron):
+        m.quantize(force_init=force_init)
 
   def train(self, mode : bool = True, quant : bool = False):
     """
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
index 78cac1056f1e6295267eb826fdda8606968546bd..3c83a40741ef86dc3818e88d41df8ecd1de3cd48 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
@@ -1,7 +1,7 @@
 """
 File name: blif
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, math, snntorch, modnef.archbuilder, modnef_torch_neuron, modnef.quantizer
@@ -56,11 +56,11 @@ class BLIF(Leaky, ModNEFNeuron):
     Update membrane voltage
   get_builder_module(module_name, output_path=".")
     Create ModNEFArchBuilder from internal neuron description
-  quantize_weight(dtype=torch.int32)
+  quantize_weight()
     Quantize synaptic weight
-  quantize_parameters(dtype=torch.int32)
+  quantize_parameters()
     Quantize neuron hyper-parameters
-  quantize(dtype=torch.int32)
+  quantize(force_init=False)
     Quantize synaptic weight and neuron hyper-parameters
   hardware_estimation()
     Toggle hardware estimation calculation
@@ -305,51 +305,41 @@ class BLIF(Leaky, ModNEFNeuron):
     )
     return module
   
-  def quantize_weight(self, dtype=torch.int32):
+  def quantize_weight(self):
     """
     Quantize synaptic weight
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight)
 
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
 
-  def quantize_parameters(self, dtype=torch.int32):
+  def quantize_parameters(self):
     """
     Quantize neuron hyper-parameters
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight)
 
-    self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
-    self.beta.data = self.quantizer(self.beta.data, True, dtype)
+    self.threshold.data = self.quantizer(self.threshold.data, True)
+    self.beta.data = self.quantizer(self.beta.data, True)
 
-  def quantize(self, force_init=False, dtype=torch.int32):
+  def quantize(self, force_init=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
     Parameters
     ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
+    force_init = Fasle : bool
+      force quantizer initialization
     """
     
     if force_init or not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight)
 
-    self.quantize_weight(dtype)
-    self.quantize_parameters(dtype)
+    self.quantize_weight()
+    self.quantize_parameters()
     self.quantization_flag = True
 
   @classmethod
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
index 06b766ea749a6148c206a61b9b524f9c545d2398..f0ddcdd17ba6fc38b7207a5107a7469a3a550f61 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py
@@ -1,7 +1,7 @@
 """
 File name: rblif
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -58,11 +58,11 @@ class RBLIF(Leaky, ModNEFNeuron):
     Update membrane voltage
   get_builder_module(module_name, output_path=".")
     Create ModNEFArchBuilder from internal neuron description
-  quantize_weight(dtype=torch.int32)
+  quantize_weight()
     Quantize synaptic weight
-  quantize_parameters(dtype=torch.int32)
+  quantize_parameters()
     Quantize neuron hyper-parameters
-  quantize(dtype=torch.int32)
+  quantize(force_init=False)
     Quantize synaptic weight and neuron hyper-parameters
   hardware_estimation()
     Toggle hardware estimation calculation
@@ -318,53 +318,43 @@ class RBLIF(Leaky, ModNEFNeuron):
     )
     return module
   
-  def quantize_weight(self, dtype=torch.int32):
+  def quantize_weight(self):
     """
     Quantize synaptic weight
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
-    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True, dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
+    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True)
 
-  def quantize_parameters(self, dtype=torch.int32):
+  def quantize_parameters(self):
     """
     Quantize neuron hyper-parameters
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
-    self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
-    self.beta.data = self.quantizer(self.beta.data, True, dtype)
+    self.threshold.data = self.quantizer(self.threshold.data, True)
+    self.beta.data = self.quantizer(self.beta.data, True)
 
-  def quantize(self, force_init=False, dtype=torch.int32):
+  def quantize(self, force_init=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
     Parameters
     ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
+    force_init = Fasle : bool
+      force quantizer initialization
     """
     
     if force_init or not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
-    self.quantize_weight(dtype)
-    self.quantize_parameters(dtype)
+    self.quantize_weight()
+    self.quantize_parameters()
     self.quantization_flag = True
 
   @classmethod
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
index 544c76574ff3760e9f1c7a4eaba3dc7579357a3d..dc491839d2d9ecb2f63fdecd6786eb0871f82261 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py
@@ -79,23 +79,20 @@ class ModNEFNeuron():
     
     raise NotImplementedError()
   
-  def quantize(self, force_init=False, dtype=torch.int32):
+  def quantize(self, force_init=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
     Parameters
     ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
+    force_init = Fasle : bool
+      force quantizer initialization
     """
     
     raise NotImplementedError()
   
   def set_quant(self, mode=False):
     self.quantization_flag = mode
-
-    if mode:
-      self.quantize(False)
   
   def hardware_estimation(self, mode = False):
     """
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
index 0f6415a9cde82fd9c3a6fb2e3d5c9eb678b88315..c950a7664ab40907a166c7f8e75f513ff0261cc7 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py
@@ -1,7 +1,7 @@
 """
 File name: rslif
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -60,11 +60,11 @@ class RSLIF(LIF, ModNEFNeuron):
     Update membrane voltage
   get_builder_module(module_name, output_path=".")
     Create ModNEFArchBuilder from internal neuron description
-  quantize_weight(dtype=torch.int32)
+  quantize_weight()
     Quantize synaptic weight
-  quantize_parameters(dtype=torch.int32)
+  quantize_parameters(d)
     Quantize neuron hyper-parameters
-  quantize(dtype=torch.int32)
+  quantize(force_init=False)
     Quantize synaptic weight and neuron hyper-parameters
   hardware_estimation()
     Toggle hardware estimation calculation
@@ -336,56 +336,45 @@ class RSLIF(LIF, ModNEFNeuron):
     )
     return module
   
-  def quantize_weight(self, dtype=torch.int32):
+  def quantize_weight(self):
     """
     Quantize synaptic weight
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
 
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
-    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True, dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
+    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True)
 
-  def quantize_parameters(self, dtype=torch.int32):
+  def quantize_parameters(self):
     """
     Quantize neuron hyper-parameters
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
 
-    self.v_leak.data = self.quantizer(self.v_leak.data, True, dtype)
-    self.v_min.data = self.quantizer(self.v_min.data, True, dtype)
-    self.v_rest.data = self.quantizer(self.v_rest.data, True, dtype)
-    self.threshold.data = self.quantizer(self.threshold, True, dtype)
+    self.v_leak.data = self.quantizer(self.v_leak.data, True)
+    self.v_min.data = self.quantizer(self.v_min.data, True)
+    self.v_rest.data = self.quantizer(self.v_rest.data, True)
+    self.threshold.data = self.quantizer(self.threshold, True)
 
-  def quantize(self, force_init=False, dtype=torch.int32):
+  def quantize(self, force_init=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
     Parameters
     ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
+    force_init = Fasle : bool
+      force quantizer initialization
     """
     
     if force_init or not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
-    self.quantize_weight(dtype)
-    self.quantize_parameters(dtype)
-    self.quantization_flag = True
+    self.quantize_weight()
+    self.quantize_parameters()
   
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
index 0bd22d68a4ae1794bdee5f9e3295d54b99b65554..315a07d3bb5aa351e55de8d05d1e126ac6868d7b 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py
@@ -1,7 +1,7 @@
 """
 File name: slif
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -60,11 +60,11 @@ class SLIF(LIF, ModNEFNeuron):
     Update membrane voltage
   get_builder_module(module_name, output_path=".")
     Create ModNEFArchBuilder from internal neuron description
-  quantize_weight(dtype=torch.int32)
+  quantize_weight()
     Quantize synaptic weight
-  quantize_parameters(dtype=torch.int32)
+  quantize_parameters()
     Quantize neuron hyper-parameters
-  quantize(dtype=torch.int32)
+  quantize(force_init=False)
     Quantize synaptic weight and neuron hyper-parameters
   hardware_estimation()
     Toggle hardware estimation calculation
@@ -327,55 +327,44 @@ class SLIF(LIF, ModNEFNeuron):
     )
     return module
   
-  def quantize_weight(self, dtype=torch.int32):
+  def quantize_weight(self):
     """
     Quantize synaptic weight
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight)
 
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
 
-  def quantize_parameters(self, dtype=torch.int32):
+  def quantize_parameters(self):
     """
     Quantize neuron hyper-parameters
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight)
 
-    self.v_leak.data = self.quantizer(self.v_leak.data, True, dtype)
-    self.v_min.data = self.quantizer(self.v_min.data, True, dtype)
-    self.v_rest.data = self.quantizer(self.v_rest.data, True, dtype)
-    self.threshold.data = self.quantizer(self.threshold, True, dtype)
+    self.v_leak.data = self.quantizer(self.v_leak.data, True)
+    self.v_min.data = self.quantizer(self.v_min.data, True)
+    self.v_rest.data = self.quantizer(self.v_rest.data, True)
+    self.threshold.data = self.quantizer(self.threshold, True)
 
-  def quantize(self, force_init=False, dtype=torch.int32):
+  def quantize(self, force_init=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
     Parameters
     ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
+    force_init = Fasle : bool
+      force quantizer initialization
     """
     
     if force_init or not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight)
 
-    self.quantize_weight(dtype)
-    self.quantize_parameters(dtype)
-    self.quantization_flag = True
+    self.quantize_weight()
+    self.quantize_parameters()
 
   
   @classmethod
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
index 692a5f1c2f96ab374c93294bd2034e67f143127d..71194acf8b16bc96d8b4e73e99c30b5abdee643c 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py
@@ -1,7 +1,7 @@
 """
 File name: rsrlif
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -59,11 +59,11 @@ class RShiftLIF(LIF, ModNEFNeuron):
     Update membrane voltage
   get_builder_module(module_name, output_path=".")
     Create ModNEFArchBuilder from internal neuron description
-  quantize_weight(dtype=torch.int32)
+  quantize_weight()
     Quantize synaptic weight
-  quantize_parameters(dtype=torch.int32)
+  quantize_parameters()
     Quantize neuron hyper-parameters
-  quantize(dtype=torch.int32)
+  quantize(force_init=False)
     Quantize synaptic weight and neuron hyper-parameters
   hardware_estimation()
     Toggle hardware estimation calculation
@@ -336,54 +336,43 @@ class RShiftLIF(LIF, ModNEFNeuron):
     )
     return module
 
-  def quantize_weight(self, dtype=torch.int32):
+  def quantize_weight(self):
     """
     Quantize synaptic weight
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
 
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
-    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True, dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
+    self.reccurent.weight.data = self.quantizer(self.reccurent.weight.data, True)
 
-  def quantize_parameters(self, dtype=torch.int32):
+  def quantize_parameters(self):
     """
     Quantize neuron hyper-parameters
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight, rec_weight=self.reccurent.weight)
 
-    self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
-    self.beta.data = self.quantizer(self.beta.data, True, dtype)
+    self.threshold.data = self.quantizer(self.threshold.data, True)
+    self.beta.data = self.quantizer(self.beta.data, True)
 
-  def quantize(self, force_init=False, dtype=torch.int32):
+  def quantize(self, force_init=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
     Parameters
     ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
+    force_init = Fasle : bool
+      force quantizer initialization
     """
     
     if force_init or not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight, self.reccurent.weight)
 
-    self.quantize_weight(dtype)
-    self.quantize_parameters(dtype)
-    self.quantization_flag = True
+    self.quantize_weight()
+    self.quantize_parameters()
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
index af97a7028e2513a556eafc1fcd7c68ba24c2c827..adcd3d47e51813f2c8206eb4a386d23c7f8c7922 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/shiftlif.py
@@ -1,7 +1,7 @@
 """
 File name: srlif
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, snntorch, modnef.archbuilder, modnef_torch_neuron, math, modnef.quantizer
@@ -56,11 +56,11 @@ class ShiftLIF(LIF, ModNEFNeuron):
     Update membrane voltage
   get_builder_module(module_name, output_path=".")
     Create ModNEFArchBuilder from internal neuron description
-  quantize_weight(dtype=torch.int32)
+  quantize_weight()
     Quantize synaptic weight
-  quantize_parameters(dtype=torch.int32)
+  quantize_parameters()
     Quantize neuron hyper-parameters
-  quantize(dtype=torch.int32)
+  quantize(force_init=False)
     Quantize synaptic weight and neuron hyper-parameters
   hardware_estimation()
     Toggle hardware estimation calculation
@@ -321,53 +321,42 @@ class ShiftLIF(LIF, ModNEFNeuron):
     )
     return module
 
-  def quantize_weight(self, dtype=torch.int32):
+  def quantize_weight(self):
     """
     Quantize synaptic weight
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight)
 
-    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True)
 
-  def quantize_parameters(self, dtype=torch.int32):
+  def quantize_parameters(self):
     """
     Quantize neuron hyper-parameters
-
-    Parameters
-    ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
     """
 
     if not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(weight=self.fc.weight)
 
-    self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
-    self.beta.data = self.quantizer(self.beta.data, True, dtype)
+    self.threshold.data = self.quantizer(self.threshold.data, True)
+    self.beta.data = self.quantizer(self.beta.data, True)
     
-  def quantize(self, force_init=False, dtype=torch.int32):
+  def quantize(self, force_init=False):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
     Parameters
     ----------
-    dtype = torch.int32 : dtype
-      type use during quantization
+    force_init = Fasle : bool
+      force quantizer initialization
     """
     
     if force_init or not self.quantizer.is_initialize:
       self.quantizer.init_from_weight(self.fc.weight)
 
-    self.quantize_weight(dtype)
-    self.quantize_parameters(dtype)
-    self.quantization_flag = True
+    self.quantize_weight()
+    self.quantize_parameters()
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/quantizer/dynamic_scale_quantizer.py b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
index c6e95d4ba58fb06030c540c4d13cd7d0ee8213dd..4f4b096ed47a376e2c0a323f1e5f94be62a78e87 100644
--- a/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
+++ b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
@@ -1,7 +1,7 @@
 """
 File name: dynamic_scale_quantizer
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, modnef.quantizer.Quantizer
@@ -23,6 +23,8 @@ class DynamicScaleFactorQuantizer(Quantizer):
     set to true if quantizer is signed, false if not
   is_initializer : bool
     set to true when quantizer parameters are initialize
+  dtype = torch.int32 : torch.dtype
+    type use during quantization
   scale_factor : float
     scale factor
 
@@ -33,14 +35,15 @@ class DynamicScaleFactorQuantizer(Quantizer):
     generate quantizer from dictionnary description
   init_from_weight(weight, rec_weight)
     initialize quantizer parameters from synaptic weight
-  __call__(data, unscale, dtype)
+  __call__(data, unscale)
     Call quantization function, if unscale=true, return dequantization data
   """
 
   def __init__(self, 
                bitwidth, 
                signed=None, 
-               is_initialize=False
+               is_initialize=False,
+               dtype=torch.int32
                ):
     """
     Construct class
@@ -54,12 +57,15 @@ class DynamicScaleFactorQuantizer(Quantizer):
       If None, determine during initialization
     is_initialize = False : bool
       set to true when quantizer parameters are initialize
+    dtype = troch.int32 : torch.dtype
+      type use during quatization
     """
 
     super().__init__(
       bitwidth=bitwidth,
       signed=signed,
-      is_initialize=is_initialize
+      is_initialize=is_initialize,
+      dtype=dtype
     )
     
     self.scale_factor = 0
@@ -124,7 +130,7 @@ class DynamicScaleFactorQuantizer(Quantizer):
     #self.scale_factor = torch.max(torch.abs(weight).max(), torch.abs(weight).max())/2**(self.bitwidth-1)
 
 
-  def _quant(self, data, unscale, dtype=torch.int32) -> torch.Tensor:
+  def _quant(self, data, unscale) -> torch.Tensor:
     """
     Apply quantization
 
@@ -134,8 +140,6 @@ class DynamicScaleFactorQuantizer(Quantizer):
       data to quantize
     unscale = False : bool
       If true, apply quantization and then, unquantize data to simulate quantization
-    dtype=torch.int32 : dtype
-      data type
 
     Returns
     -------
@@ -146,7 +150,7 @@ class DynamicScaleFactorQuantizer(Quantizer):
     born_max = 2**(self.bitwidth-int(self.signed))-1
 
     #scaled = torch.clamp(data/self.scale_factor, min=born_min, max=born_max).to(dtype)
-    scaled = torch.round(data/self.scale_factor).to(dtype)
+    scaled = torch.round(data/self.scale_factor).to(self.dtype)
 
     if unscale:
       return scaled*self.scale_factor
diff --git a/modneflib/modnef/quantizer/fixed_point_quantizer.py b/modneflib/modnef/quantizer/fixed_point_quantizer.py
index afb36ac589bdd4ce07eb80651799c9cef0aa7da1..564fcebe2681e8463269d70d3423a3500145ed6b 100644
--- a/modneflib/modnef/quantizer/fixed_point_quantizer.py
+++ b/modneflib/modnef/quantizer/fixed_point_quantizer.py
@@ -1,7 +1,7 @@
 """
 File name: fixed_point_quantizer
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, math, modnef.quantizer.Quantizer
@@ -24,6 +24,8 @@ class FixedPointQuantizer(Quantizer):
     set to true if quantizer is signed, false if not
   is_initializer : bool
     set to true when quantizer parameters are initialize
+  dtype = torch.int32 : torch.dtype
+    type use during quantization
   fixed_point : int
     fixed point position
   scale_factor : int
@@ -36,7 +38,7 @@ class FixedPointQuantizer(Quantizer):
     generate quantizer from dictionnary description
   init_from_weight(weight, rec_weight)
     initialize quantizer parameters from synaptic weight
-  __call__(data, unscale, dtype)
+  __call__(data, unscale)
     Call quantization function, if unscale=true, return dequantization data
   """
 
@@ -44,7 +46,8 @@ class FixedPointQuantizer(Quantizer):
                bitwidth, 
                fixed_point=-1, 
                signed=None, 
-               is_initialize=False
+               is_initialize=False,
+               dtype=torch.int32
                ):
     """
     Construct class
@@ -61,6 +64,8 @@ class FixedPointQuantizer(Quantizer):
       If None, determine during initialization
     is_initialize = False : bool
       set to true when quantizer parameters are initialize
+    dtype = torch.int32 : torch.dtype
+      type use during conversion
     """
 
     if bitwidth==-1 and fixed_point==-1:
@@ -69,7 +74,8 @@ class FixedPointQuantizer(Quantizer):
     super().__init__(
       bitwidth=bitwidth,
       signed=signed,
-      is_initialize=is_initialize
+      is_initialize=is_initialize,
+      dtype=dtype
     )
 
     self.fixed_point = fixed_point
@@ -141,7 +147,7 @@ class FixedPointQuantizer(Quantizer):
         self.scale_factor = 2**self.fixed_point
 
 
-  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+  def _quant(self, data, unscale) -> torch.Tensor:
     """
     Apply quantization
 
@@ -151,15 +157,13 @@ class FixedPointQuantizer(Quantizer):
       data to quantize
     unscale = False : bool
       If true, apply quantization and then, unquantize data to simulate quantization
-    dtype=torch.int32 : dtype
-      data type
 
     Returns
     -------
     Tensor
     """
 
-    scaled = torch.round(data*self.scale_factor).to(dtype)
+    scaled = torch.round(data*self.scale_factor).to(self.dtype)
     
     if unscale:
       return (scaled.to(torch.float32))/self.scale_factor
diff --git a/modneflib/modnef/quantizer/min_max_quantizer.py b/modneflib/modnef/quantizer/min_max_quantizer.py
index 28d1cf37ec934a0eda9b3541af7998776eadaff2..a700f6bf2e99bda84af2f530e925af551cdf1c10 100644
--- a/modneflib/modnef/quantizer/min_max_quantizer.py
+++ b/modneflib/modnef/quantizer/min_max_quantizer.py
@@ -1,7 +1,7 @@
 """
 File name: quantizer
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, modnef.quantizer.Quantizer
@@ -23,6 +23,8 @@ class MinMaxQuantizer(Quantizer):
     set to true if quantizer is signed, false if not
   is_initializer : bool
     set to true when quantizer parameters are initialize
+  dtype : torch.dtype
+    type use during quantization
   x_min : float
     minimal value of weight
   x_max : float
@@ -39,14 +41,15 @@ class MinMaxQuantizer(Quantizer):
     generate quantizer from dictionnary description
   init_from_weight(weight, rec_weight)
     initialize quantizer parameters from synaptic weight
-  __call__(data, unscale, dtype)
+  __call__(data, unscale)
     Call quantization function, if unscale=true, return dequantization data
   """
 
   def __init__(self, 
                bitwidth, 
                signed=None, 
-               is_initialize=False
+               is_initialize=False,
+               dtype=torch.int32
                ):
     """
     Construct class
@@ -60,12 +63,15 @@ class MinMaxQuantizer(Quantizer):
       If None, determine during initialization
     is_initialize = False : bool
       set to true when quantizer parameters are initialize
+    dtype = torch.int32 : toch.dtype
+      type use during quantization
     """
 
     super().__init__(
       bitwidth=bitwidth,
       signed=signed,
-      is_initialize=is_initialize
+      is_initialize=is_initialize,
+      dtype=dtype
     )
     
     self.x_min = 0
@@ -128,7 +134,7 @@ class MinMaxQuantizer(Quantizer):
     self.b_max = 2**(self.bitwidth-int(self.signed))-1
     self.b_min = -int(self.signed)*self.b_max
 
-  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+  def _quant(self, data, unscale) -> torch.Tensor:
     """
     Apply quantization
 
@@ -138,15 +144,13 @@ class MinMaxQuantizer(Quantizer):
       data to quantize
     unscale = False : bool
       If true, apply quantization and then, unquantize data to simulate quantization
-    dtype=torch.int32 : dtype
-      data type
 
     Returns
     -------
     Tensor
     """
 
-    scaled = ((data-self.x_min)/(self.x_max-self.x_min)*(self.b_max-self.b_min)+self.b_min).to(dtype)
+    scaled = ((data-self.x_min)/(self.x_max-self.x_min)*(self.b_max-self.b_min)+self.b_min).to(self.dtype)
     
     if unscale:
       return (scaled-self.b_min)/(self.b_max-self.b_min)*(self.x_max-self.x_min)+self.x_min
diff --git a/modneflib/modnef/quantizer/quantizer.py b/modneflib/modnef/quantizer/quantizer.py
index 94fc8f516a76032b1d0c92837f93d4633baeabce..5a37433edf8980502f7dfe3a9187b38f64dafe58 100644
--- a/modneflib/modnef/quantizer/quantizer.py
+++ b/modneflib/modnef/quantizer/quantizer.py
@@ -1,7 +1,7 @@
 """
 File name: quantizer
 Author: Aurélie Saulquin  
-Version: 1.0.0
+Version: 1.1.0
 License: GPL-3.0-or-later
 Contact: aurelie.saulquin@univ-lille.fr
 Dependencies: torch, numpy
@@ -22,7 +22,9 @@ class Quantizer():
   signed : bool
     set to true if quantizer is signed, false if not
   is_initializer : bool
-    set to true when quantizer parameters are initialize
+    set to true when quantizer parameters are initialize*
+  dtype : torch.dtype
+    type use during quantization
 
   Methods
   -------
@@ -31,14 +33,15 @@ class Quantizer():
     generate quantizer from dictionnary description
   init_from_weight(weight, rec_weight)
     initialize quantizer parameters from synaptic weight
-  __call__(data, unscale, dtype)
+  __call__(data, unscale)
     Call quantization function, if unscale=true, return dequantization data
   """
   
   def __init__(self, 
                bitwidth, 
                signed=None, 
-               is_initialize=False
+               is_initialize=False,
+               dtype=torch.int32
                ):
     """
     Construct class
@@ -52,12 +55,14 @@ class Quantizer():
       If None, determine during initialization
     is_initialize = False : bool
       set to true when quantizer parameters are initialize
+    dtype = torch.int32 : torch.dtype
+      type use during quantization
     """
 
     self.bitwidth = bitwidth
     self.is_initialize = is_initialize
     self.signed = signed
-    pass
+    self.dtype=dtype
 
   @classmethod
   def from_dict(cls, dict):
@@ -86,7 +91,7 @@ class Quantizer():
     
     raise NotImplementedError()
 
-  def __call__(self, data, unscale=False, dtype=torch.int16):
+  def __call__(self, data, unscale=False):
     """
     Call quantization function
 
@@ -96,8 +101,6 @@ class Quantizer():
       data to quantize
     unscale = False : bool
       If true, apply quantization and then, unquantize data to simulate quantization
-    dtype=torch.int32 : dtype
-      data type
 
     Returns
     -------
@@ -105,17 +108,17 @@ class Quantizer():
     """
     
     if isinstance(data, (int, float)):
-      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).item()
+      return self._quant(data=torch.tensor(data), unscale=unscale).item()
     elif isinstance(data, list):
-      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).tolist()
+      return self._quant(data=torch.tensor(data), unscale=unscale).tolist()
     elif isinstance(data, np.ndarray):
-      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).numpy()
+      return self._quant(data=torch.tensor(data), unscale=unscale).numpy()
     elif torch.is_tensor(data):
-      return self._quant(data=data, unscale=unscale, dtype=dtype).detach()
+      return self._quant(data=data, unscale=unscale).detach()
     else:
       raise TypeError("Unsupported data type")
 
-  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+  def _quant(self, data, unscale) -> torch.Tensor:
     """
     Apply quantization
 
@@ -125,8 +128,6 @@ class Quantizer():
       data to quantize
     unscale = False : bool
       If true, apply quantization and then, unquantize data to simulate quantization
-    dtype=torch.int32 : dtype
-      data type
 
     Returns
     -------
diff --git a/modneflib/setup.py b/modneflib/setup.py
index 191c203518ffd4859c031747e55798c5abfede63..6f562080ee3cec9da7ee8f928aaf14ce14f0dd8c 100644
--- a/modneflib/setup.py
+++ b/modneflib/setup.py
@@ -3,7 +3,7 @@ from setuptools import find_packages, setup
 setup(
         name = "modnef",
         packages=find_packages(),
-        version = "=2.0.0",
+        version = "2.0.0",
         description="ModNEF python librairy",
         author="Aurelie Saulquin",
         install_requires=["networkx", "matplotlib", "pyyaml", "torch", "snntorch"],