From 12db91cc5a37f9e0b93deb3688e668155adfd354 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Aur=C3=A9lie=20saulquin?= <aurelie.saulq@gmail.com>
Date: Wed, 26 Feb 2025 15:25:26 +0100
Subject: [PATCH] add quantizer and documentation

---
 .gitignore                                    |   1 +
 JSONConfiguration.md                          | 247 ++++++++++++++++++
 modneflib/modnef/__init__.py                  |   3 +-
 .../__pycache__/__init__.cpython-310.pyc      | Bin 254 -> 275 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 429 -> 431 bytes
 .../modnef_builder.cpython-310.pyc            | Bin 14163 -> 14165 bytes
 .../BLIF/__pycache__/__init__.cpython-310.pyc | Bin 516 -> 518 bytes
 .../BLIF/__pycache__/blif.cpython-310.pyc     | Bin 9376 -> 9127 bytes
 .../__pycache__/blif_debugger.cpython-310.pyc | Bin 10092 -> 10094 bytes
 .../BLIF/__pycache__/rblif.cpython-310.pyc    | Bin 10107 -> 10109 bytes
 .../modnef/arch_builder/modules/BLIF/blif.py  |  84 +++---
 .../__pycache__/__init__.cpython-310.pyc      | Bin 513 -> 515 bytes
 .../__pycache__/probe.cpython-310.pyc         | Bin 3997 -> 3999 bytes
 .../SLIF/__pycache__/__init__.cpython-310.pyc | Bin 522 -> 524 bytes
 .../SLIF/__pycache__/rslif.cpython-310.pyc    | Bin 9619 -> 9621 bytes
 .../SLIF/__pycache__/slif.cpython-310.pyc     | Bin 8833 -> 8835 bytes
 .../__pycache__/slif_debugger.cpython-310.pyc | Bin 9477 -> 9479 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 479 -> 481 bytes
 .../__pycache__/rshiftlif.cpython-310.pyc     | Bin 10175 -> 10177 bytes
 .../__pycache__/shiftlif.cpython-310.pyc      | Bin 9313 -> 9315 bytes
 .../UART/__pycache__/__init__.cpython-310.pyc | Bin 716 -> 718 bytes
 .../__pycache__/uart_1step.cpython-310.pyc    | Bin 6547 -> 6549 bytes
 .../uart_classifier.cpython-310.pyc           | Bin 6646 -> 6648 bytes
 .../uart_classifier_timer.cpython-310.pyc     | Bin 6768 -> 6770 bytes
 .../__pycache__/uart_xstep.cpython-310.pyc    | Bin 6812 -> 6814 bytes
 .../uart_xstep_timer.cpython-310.pyc          | Bin 7142 -> 7144 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 771 -> 773 bytes
 .../__pycache__/io_arch.cpython-310.pyc       | Bin 4434 -> 4436 bytes
 .../__pycache__/merger.cpython-310.pyc        | Bin 4327 -> 4329 bytes
 .../modnef_arch_mod.cpython-310.pyc           | Bin 5565 -> 5567 bytes
 .../__pycache__/splitter.cpython-310.pyc      | Bin 3609 -> 3611 bytes
 .../__pycache__/utilities.cpython-310.pyc     | Bin 5565 -> 5567 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 483 -> 485 bytes
 .../modnef_drivers.cpython-310.pyc            | Bin 1306 -> 1308 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 907 -> 909 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 336 -> 336 bytes
 .../__pycache__/executor.cpython-310.pyc      | Bin 8425 -> 8342 bytes
 .../__pycache__/model.cpython-310.pyc         | Bin 7061 -> 8854 bytes
 .../__pycache__/trainer.cpython-310.pyc       | Bin 5499 -> 7815 bytes
 modneflib/modnef/modnef_torch/executor.py     |  13 +-
 .../__pycache__/__init__.cpython-310.pyc      | Bin 565 -> 567 bytes
 .../modnef_torch_neuron.cpython-310.pyc       | Bin 3380 -> 3382 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 476 -> 478 bytes
 .../__pycache__/blif.cpython-310.pyc          | Bin 11406 -> 10321 bytes
 .../__pycache__/rblif.cpython-310.pyc         | Bin 11830 -> 11832 bytes
 .../modnef_neurons/blif_model/blif.py         |  91 ++-----
 .../__pycache__/__init__.cpython-310.pyc      | Bin 487 -> 489 bytes
 .../__pycache__/rslif.cpython-310.pyc         | Bin 11540 -> 11542 bytes
 .../__pycache__/slif.cpython-310.pyc          | Bin 11321 -> 11323 bytes
 .../__pycache__/__init__.cpython-310.pyc      | Bin 506 -> 508 bytes
 .../__pycache__/rshiftlif.cpython-310.pyc     | Bin 12396 -> 12398 bytes
 .../__pycache__/shiftlif.cpython-310.pyc      | Bin 11951 -> 11953 bytes
 modneflib/modnef/modnef_torch/trainer.py      | 226 ++++++++++++----
 modneflib/modnef/quantizer/__init__.py        |   5 +
 .../quantizer/dynamic_scale_quantizer.py      |  37 +++
 .../modnef/quantizer/fixed_point_quantizer.py |  56 ++++
 .../modnef/quantizer/min_max_quantizer.py     |  40 +++
 modneflib/modnef/quantizer/quantizer.py       |  29 ++
 .../modnef/templates/template_model.json      |   5 +-
 modneflib/setup.py                            |   2 +-
 q                                             |  30 +++
 61 files changed, 684 insertions(+), 185 deletions(-)
 create mode 100644 JSONConfiguration.md
 create mode 100644 modneflib/modnef/quantizer/__init__.py
 create mode 100644 modneflib/modnef/quantizer/dynamic_scale_quantizer.py
 create mode 100644 modneflib/modnef/quantizer/fixed_point_quantizer.py
 create mode 100644 modneflib/modnef/quantizer/min_max_quantizer.py
 create mode 100644 modneflib/modnef/quantizer/quantizer.py
 create mode 100644 q

diff --git a/.gitignore b/.gitignore
index ad85eae..fb20a1f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,3 +16,4 @@ examples/*/*.vhd
 examples/debugger_example/*.txt
 examples/*/__pycache__
 
+__pycache__/
diff --git a/JSONConfiguration.md b/JSONConfiguration.md
new file mode 100644
index 0000000..838606a
--- /dev/null
+++ b/JSONConfiguration.md
@@ -0,0 +1,247 @@
+# JSON File Configuration Explaination
+
+The file describe all differents json properties.
+A template file can be found inside **modneflib/modnef/template**. Additional commentary, including non requiered properties will be explained in this file.
+
+## ModNEF Network Description
+
+ModNEF network model properties
+
+```json
+{
+  "name" : "model_name", // name of network model
+
+  "input" : 2312,   // number of input features
+  "num_class" : 10, // number of output class 
+
+  // insert layer description
+  // layer description are described in next section
+
+  "hardware" : {                // ModNEF VHDL hardware description 
+    "clock_name" : "clock",     // name of clock signal inside VHDL file
+    "clock_freq" : 125000000,   // clock frequency of FPGA internal clock
+    "baud_rate" : 921600,       // UART baud rate
+    "txd" : "uart_txd",         // UART txd signal name
+    "rxd" : "uart_rxd",         // UART rxd signal name
+    "queue_read_depth" : 4096,  /*Optional*/ // UART component queue read depth, can be ignored and will be computed during hardware estimation
+    "queue_write_depth" : 4096  /*Optional*/ // UART component queue write depth, can be ignored and will be computed during hardware estimation
+  },
+  
+}
+```
+
+### Neuron Description
+
+In this section, we will describe 
+
+#### Beta LIF Neuron Model Description
+
+Feed forward layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "blif",               // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {              /*Optional*/ // hardware description
+      "strategy" : "Parallel",  /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,        /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,         /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,     /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,        /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,   /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "variable_size" : -1      /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+Recurrent layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "rblif",              // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {                /*Optional*/ // hardware description
+      "strategy" : "Parallel",    /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,          /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,           /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,       /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,          /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,     /*Optional*/ // vivado memory file for feed forward weight, null by default. If null, file = layer_name_mem.mem
+      "mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
+      "variable_size" : -1        /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+#### Shift Register based LIF Neuron Model Description
+
+Feed forward layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "shiftlif",           // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {              /*Optional*/ // hardware description
+      "strategy" : "Parallel",  /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,        /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,         /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,     /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,        /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,   /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "variable_size" : -1      /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+Recurrent layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "rshiftlif",          // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "beta" : 0.9,                   // membrane decay value
+    "threshold" : 0.8,              // threshold value
+    "reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
+    "hardware" : {                /*Optional*/ // hardware description
+      "strategy" : "Parallel",    /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,          /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_fp" : -1,           /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
+      "weight_type" : null,       /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "compute_fp" : -1,          /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
+      "mem_init_file" : null,     /*Optional*/ // vivado memory file for feed forward weight, null by default. If null, file = layer_name_mem.mem
+      "mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
+      "variable_size" : -1        /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+#### Simplified LIF Neuron Model Description
+
+Feed forward layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "slif",               // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "threshold" : 0.8,              // threshold value
+    "leak" : 0.015,                 // memory leakage value
+    "min" : 0.0,                    // minimal memory voltage value
+    "rest" : 0.0,                   // resting memory potential
+    "hardware" : {              /*Optional*/ // hardware description
+      "strategy" : "Parallel",  /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,        /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_type" : null,     /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "mem_init_file" : null,   /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "variable_size" : -1      /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+Recurrent layer
+
+```json
+{
+  "name of layer" : {               // name of layer (will be the name of VHDL component implementation)
+    "model" : "rslif",              // neuron model
+    "in_features" : 64,             // number of input features
+    "neurons" : 10,                 // number of simulated/emeulated neurons
+    "threshold" : 0.8,              // threshold value
+    "leak" : 0.015,                 // memory leakage value
+    "min" : 0.0,                    // minimal memory voltage value
+    "rest" : 0.0,                   // resting memory potential
+    "hardware" : {                /*Optional*/ // hardware description
+      "strategy" : "Parallel",    /*Optional*/ // hardware emulation strategy, Parallel by default
+      "weight_size" : 8,          /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
+      "weight_type" : null,       /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
+      "mem_init_file" : null,     /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
+      "mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
+      "variable_size" : -1        /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
+    }
+  }
+}
+```
+
+## ModNEF Project Description
+
+```json
+{
+  "exp_name" : "template", 
+
+  "model": {
+    // describe you model here
+  },
+
+  "optimizer" : {         /*Optional*/ // Adam optimizer properties, optional. If no optimizer properties, default optimizer is configured with followed values
+    "lr" : 1e-4,          // learning rate
+    "betas" : [0.9, 0.99] // betas values
+  },
+
+  "best_model" : "best_my_model", /*Optional*/ // best model file name, optional. By default best_{model.name}
+
+  "verbose" : true, /*Optional*/ // verbose mode, by default true
+
+  "device" : "auto",  /*Optional*/ // troch device, by default auto. If auto, device is automatically detected
+
+  "train" : {                 // Train configuration. Optional if you don't want to train with ModNEF Executor.
+    "name" : "Train",         // name printed during trian progression
+    "n_epoch" : 5,            // number of epoch
+    "save_best_model" : true, /*Optional*/ // save best accuracy model, by default true
+    "save_history" : true,    /*Optional*/ // save average loss, test accuracy and validation accuracy through epoch, by default true
+    "plot" : true             /*Optional*/ // save matplotlib plot, by default true
+  },
+
+  "eval" : {                  // Software evaluation configuration, Optional if you don't want to run software evaluation
+    "name" : "Evaluation",    // name printed during evaluation progression
+    "use_best_model" : true,  /* Optional*/ // use best model for evaluation, by default true
+    "conf_matrix" : true      /* Optional*/ // generate matplotlib confusion matrix, by default true
+  },
+
+  "hardware_estimation" : {         // Hardware estimation configuration, Optional if you don't want to run hardware estimation
+    "name" : "Hardware Estimation", // name printed during evaluation progression
+    "use_best_model" : true,        /* Optional*/ // use best model for evaluation, by default true
+    "conf_matrix" : true            /* Optional*/ // generate matplotlib confusion matrix, by default true
+  },
+
+  "vhdl" : {                        // VHDL generation configuration, Optional if you don't want to run vhl generation
+    "use_best_model" : false,       /* Optional*/ // use best model for evaluation, by default true
+    "driver_config" : "driver.yml", /* Optional*/ // driver configuration file, by default driver.yml
+    "file_name" : null              /* Optional*/ // VHDL file name, if null, model.name.vhd
+  },
+
+  "fpga" : {                          // FPGA evaluation configuration, Optional if you don't want to run FPGA evaluation
+    "name" : "FPGA Evaluation",       // name printed during evaluation progression
+    "driver_file" : "driver.yml",     // driver configuration file
+    "board_path" : "board path here", // path to UART board 
+    "conf_matrix" : true              /* Optional*/ // generate matplotlib confusion matrix, by default true
+  },
+
+  "confusion_matrix" : { // Necessary if you want to generate configuration matrix
+    "class" : ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]  // Name of classes
+  }
+
+}
+```
\ No newline at end of file
diff --git a/modneflib/modnef/__init__.py b/modneflib/modnef/__init__.py
index b3d7654..e622685 100644
--- a/modneflib/modnef/__init__.py
+++ b/modneflib/modnef/__init__.py
@@ -1,4 +1,5 @@
 from .arch_builder import *
 from .modnef_driver import *
 from .tools import *
-from .modnef_torch import *
\ No newline at end of file
+from .modnef_torch import *
+from .quantizer import *
\ No newline at end of file
diff --git a/modneflib/modnef/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/__pycache__/__init__.cpython-310.pyc
index 994b886f581c7f7545bdaa0b73d8e90bf2a0fb39..a633a6b32c555f8bc0a12ede59d6808044acc687 100644
GIT binary patch
delta 95
zcmeyzIGKqzpO=@5fq{X+P<nrQ(?nibMxBY;DpCw7j5#bJ3@J>(44TX@85tNDG#PI(
vYWZohPE0fsxy4ynnwVFTS(RF}lA(x$fq|ijW8$1JK9E5i3>=Ig$ioN#2ssoK

delta 74
zcmbQt^pBA@pO=@5fq{X+sAp$-=0sjuMwN-$Dnd-b44TX@85tNDG#PI(YWZohOw2Um
eT**+x&cMJ>#6EF-7!L;n0|N&G2O|d~4<i6sPYwhC

diff --git a/modneflib/modnef/arch_builder/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/__pycache__/__init__.cpython-310.pyc
index beca1367da962b7eb394e3a253cc92cd13c99bd2..96593e53511f2055d5ca6d15b399ebe4cdaaa805 100644
GIT binary patch
delta 40
vcmZ3>yq=jmpO=@5fq{YHx#a$h+}4cz{`wjDxvBc8xv52or75YCgBha$)xQg-

delta 38
ucmZ3_yq1|epO=@5fq{YH`kg%+xvd#_ef2Z)b5r#bON&yA6DNl<MgaiMq6>`x

diff --git a/modneflib/modnef/arch_builder/__pycache__/modnef_builder.cpython-310.pyc b/modneflib/modnef/arch_builder/__pycache__/modnef_builder.cpython-310.pyc
index 504cbd1208501c326392c20cc12ffd618f8c7d22..13dcb3a274bcba9ebcef1c4cf40fd8fa4eabcd36 100644
GIT binary patch
delta 41
wcmcbdcQubYpO=@5fq{V`^~t`C+|s=Kq52v5xvBc8xv52or75YKjd>Nc027}K{r~^~

delta 39
ucmcbbcR7zcpO=@5fq{V`=iZ);+|s<f!TK5bxvBbzrA4X5iJMJ%6|?~OZwxd5

diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/__init__.cpython-310.pyc
index 49a173f5bb67900460047fbac675a8cea2b50726..0dff00858b51446fe5ccb10619411224722e0250 100644
GIT binary patch
delta 40
vcmZo+X=CBe=jG*MU|?WKeX?&OcQYe@jDALbZmNE2Zfa3tX-ewk*^HS0*lG+)

delta 38
tcmZo;X<^~c=jG*MU|?X7y0d2^cQYezlzv8jZmND_X;Er%;^euEnE<!{3oHNt

diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif.cpython-310.pyc
index 8b4abc9bf6efed6cff7455367f07e4efdc27e1e3..5e5a6ce1aab3e0ab07ec3e9bc770217363894968 100644
GIT binary patch
delta 2507
zcmZ4Bx!j#EpO=@5fq{V`Sz&*Ao!Ue`8OD%_+7{9*Da<*XAq*)TQC#i}DV!->Eet7K
zsZ7nxQQRBLwzJkRVqjoMWr$)-VTfW%VT@u<VM<|6VM$?1VM$?4VM}F6VQ*$k<4EC1
zWliBsWlP~oWlP~sWl!NrWl!NvWlP~pWlP~tWla%CWla%GWlIrCWla%IWla%DWlNK4
zXGmjA5ls<m;f>-*5l@k5VTj^PkxY?lVTj^N<xY`KV@gq#Ns(<~jN(aANs&)wO=C(?
zNKtHIjp9vFN>OfMh~jH!U}1>j-+Y?QfR*Vb$K)T}v!wVzzJy{Bo0Wlqfti7Up*U>v
z8Xh%6z8Z#j)*9{<#%!h{g&Kw|w%H6ROmmrP7~<LUm}|H}bPZ1pLp(<ca}7g0XA0wH
zRo*i!Nq$w_@#U$R=@}(@xv9BDybKHsnq0S7i%Sbqi*9km$7kkcmc+*w2{AA*6bXX}
zF$M;PTbzZZiFqZNRjEZqAg2^bf(7G?ONtUpQqwDoq(R2;-(o3F%}JY_C}g7v7AcZr
zU|?9uc#AVWJ~=0`xHvw3CBv@>{fzwFRQ=T4)S|@Fl+?+)guKON!I8tjP%Og0z#w48
z#t4Q?lhuUR8pwmqC{h5qMHx=0fE*xPmXVSZpPZjtke`>DR}!C>n47w0GPB5LQIMu0
zHMnZ9K9$L*Mda9afCM!rzZWsC=VV}D;9y{2a0Ufx4<iFZ4O0zc4MP?~4MP@VGjlD7
z&6LGlEL6jg#k7E>hH)VyBS?-Zi?x=egsp~oHbV+X4f||{xy&g{%}lkdAeC7hMKvXy
zAaj}-T^M2wYS~hlYuUjfj43P<3@NNBtj)|W4E3=>wH!6<H7s@vC0sQO3qY2$Nix)M
zNHQ#BtYK+p5@)DoFEp=VUBI2fR>D)m*33AYA%(q!r-mKO;+V?{a!{dG32zNU7GE<X
zBg`CzEWU*xhw(Rqk^>(Wd4YwDHT4Wx5CsBxE@0O&fZeNy%e}DzVCOPq3EDBBI~VNK
zK7vkd1nYx3Pbr=c$`z<#$P!oxa;rcM>jI&L42%pZoWTs5T$`thA7iZl#i)OavAl={
z6m#q#0+j!MF*^KWRJg^+<)<lJBmfc>1QDPVcZ)Bx7@UqX6LT^Xt5R<XfD?6EQGPBo
z>EGf=$}B0*Oex8@#gbo;ng>bGY{i-Bd8sM4Sc?+#(o=77l$7U#5|>%QE!Og)%#zew
zY$f^e8L1VwSPP0W^GYCj1eEe283mMfZ?Pul<QJ!Eauz8}c9GI$W=blb+#@9!1Wtfp
z0-SL9AUQBTIX|x~wW#DUC;{q$l0H8N6AvQ?lNb{R6CWcBqZp$SqY@JrlNcixqXZ)#
zGe|^^QHhC<NocaDv?HU}<Zx+4y)2FzhAj3hP?BTM;wq9%Va#JpVFHVRWVpeJnkR+1
zmp7O}lV$QW=?IBi9Ody)-*T15m*zp(n!J+}g`~7WIlf2~6q+C}6^Vmb5+DMU>x<k$
zERe4?d5btE#|g>SgVloro;xkGA~hwxARiPwCLo1oAOfrptc3}ZkHKbug9A)}!-Tse
zKRzWjsWd%3wdgA-KtLukak4NXAgEN}V`O7wVPs-tVPyMUCE(<fnKt>Pj3gf$a$epy
z`Gbs3Jwq027F&^23UeM~3QH|(4dVj#6i^1`n9Y#FR>P3R3Cc~7JOq{lg%#Iqh7``Z
z%%FS)7UQa6$l^v5<E~-I;z1MRsbN?E%7vg@$Fq=WHbV+;J(4k?gbtEs25E&zBgyiE
zvM)#rk{BpUux9b2StD2j@{|CYs89_nnr`75hOBx_?IJY{S%R|}Qbds~5`+6yyoLqM
zU<tUKB$&mpK&XbXhABl#k^!9NI8vljWKv`ovVn?4upCI786q#0B9|hQA`g}X1-$~u
zh!mz^22I7y^W|PMaus=i(uOAk1H<Mng;YjHx5*zACF?;I&n;$9`T^HI)*v;Ylu!gt
zNtPfkxCR23@Q8d5N?o_u;~^Qe2%I9lKx)B>5S0FI@yCNRaeP63QAvDmVu2<bI4$1d
z1Xm(ynK`MqSPF_JcPI%v-C`;)DFUZVFab`U>?Qf}AeDTK3=GAfyecEW!N|fWz$C*Y
z!U%$LjB<=3jAD#pj6#fJldmf&a4;6RfvQD;$ui1%^}=qTtPudp8G*36^cHh*Nl{S%
zs48Pk%gIkHDFRi5Md~1jYlB3EQc}|rOLI!%Q!7e}5|c|Z^Ye;aL9*T;A_Am@7h1O_
zr{?6u$0NB~5u^m<^de3M1_pTxP?ib-=Mol1CSxW>1Y~1m`84^WvZ=5;2M-^&fRTVC
zSc{(~Pf^TdNfnRaTLQWHDS4@BdWcqlCdhP<!ALeD0*k{YH$SB`C)JJ-WGg6^SQvP?
QI2Z*Oc^EkuS(rrB0kT9xng9R*

delta 2778
zcmZ4PzQB_&pO=@5fq{Wx<F!5MUYZm6WEg!WYFh|%L~*(^q;RHiwJ@Y`r7|@$M{#Ye
z+s;})hk=11l_82Tg&~S5g)xemlZlg=lZBIslZBI&lPyI#g*}xeg(H<Ug)@~kg)5aU
zg*%lkg(sCYg*TNog)fyQg+G-gMIe<mMKF~mMJSaeML3l;O{$$CjWI<eMYM%CiakXv
zMZAR}iX%lLMY4q<iZhifMJkOcMLI>Mg)vH=J4Gc$E|n#XDMda-p@lVyCq*$usf8hm
zx1E87A&PJFO*RA8$$UK1rT9Ref?^Pxm4ShQnSp_!xMK2t9<^Zh6vk|(BFP%I8ish*
zJmwm95Dn(B)o|7@#Ix6M)iA_!)NrRTWiu5S)-Yso&Spqqp379j5YLszT*D2bYj|oH
z;<;;hYZ&5rQkXW2@t$F+F9L<8Uln(Jd1_{QMu}c-YOW^lE!N`Fg4Cj09P#m)d6^~g
z@kN3R3=Bm=AVL&Gh=2%T5FrL4#6bke)kP8v3=Fq8<BLm*5=&CkD~qHU7#KA9Zm|@n
z=A;#YRq%t<3V;ZZnUl{*aZipDQj%TCc#AVWJ~=0`xHvw3CBv^U{fzwFRQ<%#qSWHV
z$t#7t6=fJ07*aqXBFDhMAYjVI2!b3;9E@yCY)l~bWNzWL;<5}33@aInKx&E<LAHPi
zrOD@oB^cLFej&VB6r`d^1ug}Z-ykB#wi_g<Hu;K(aXk+M0|N&G1A{Zj!XzdJh8l(}
zh7`tZ<|0WjiwVwR2C+&QL87x6Qds6PH8V0Yq%a3F)G*b6M44(BvY4BhYe8&=6y|KE
zV(l7+ES413Y^I{R64nyd8pdYEX2x2U61EaHD4Vsuh9!k9o1w@)g|UVqo*|Dp1r(7W
zN5C|)m#~#^)UY%&mT=UtHZwLeH8a+-)v%?oWiu5;rm)qr=Ly!ZrLbo+6(yvw*Rtnv
zm2j4D)vz@)mT;wTG&4#wG&9z+gB%S`SQRx4S==@35)3Kq5)3sA@%1cuY%rl3hIp0|
zo*L%a3@My73|YLh8Rjyla5Xd4a=^7eDB-JN$YN?{bYX}!uH{T&tK|ZVFoTRsVNGFe
zW_DqS6|Ln4rP3^JJBAYe5`h}V8cs=u8g5Cjx0*rW#NEs!&QQx$xS+m<V}W1_cZpC9
zXEWn$h7_I>p&BkQi+3*TLPkc0!pUGG7_x+G7_vmbnngimyf9Q;3{6}t&l~JqrfjAO
zj78dD=Rq8-kHx|D5Ely~xfsK}V3+<N=+M_C0yRj^j28y;;m(T}tKnE6zK{WuW;FQ{
z4>B?^aBW^Ac7Sp6QYo=|zbZk6%wmPS{1S!Y(t?8gqLS1UaGot<0i{-U5W&d6!0?Mv
z{}yBUFGh!7j0(3Hxilq;KsoakYe7+FUP%!m(}A+;E#}<Jyj#q<i50h)GxJJrF()P!
z7qNj%0+l*NpnQFcCBGmwuSg;Yl%H9P67$kiZ*i2A=f@}K=N8=JE6I;f%dALEi7&_p
z8Ngazlv$E`i>)L-J|ndPQdoeC2vGJ17aO-&lXLQmQ#E;t<R&Xh>PoYg$AjDeq7o}^
zab)I|#1|wMmE2-VE0`Q8DOs-wat)YJ0=a+>QaZ&a=jWBB7L^<W<w_q01_n8P0Y(-^
z0VWY99wr_}NMXRmB*w(S#Ky?M$i^tb#Kk1U$ipPY#KpwN$igVbsKKbjB*7%b$OGjo
zFe)(#FbOd7FjWaV`DCW)fo+<6TGEKoYVtoxMY$S=EKouRC4DY%mH=gBo)pGj-e3kz
zrpdli5%rq9*oro=zl%gc9s!31C|JRfQUr<va7kI@4Uz=~k|u8vsJJ1^9A5?oNIA;{
zDfYmIg2N6>fCG@bBtJeSHK{Z`J++9Rk%6HYR6sIuvM^%6$^FtAe4umyau2xpTQ&Kp
zv`#(a0@f7f8ip)3P$^f#ki|ZmA%zu^8EZgtpz?xaHbV;gTxL)P1&e_L4owUkkZ59D
zH4F>5YZ$UXLCdv}X*NR&H<BqlAlZ6QF<Qfrg(=Nj!;r;;rUg_S@uFG6U&D~ahb9Kf
zn*3;b1#1|x1klt8)zmX&3C?Cn5k_*C2;85dH7sZv#Ncw`U>3syp&G^-rW6TDhAdD>
zv870+NTo<GWMgEgfyjZxnIZBiGAU9ivS2w-$jX6CU`t^NX3&)1+#&m#adWMFDr3GE
z0|SHGE#~~vl8`DEPEBhrP2pQCC6xuKko;!~N?PC=+X%$61`*&27+n7$3ItH<yTu+4
z&APag>*PR1QD)X6xyhM|sy4_~D=2@mm*mHnWu)Y2fRdID0|SGM00$!rqYRS>BM8bd
z%7JMRTV(PDMFmxMNEyb>z`)=KX8IL@s@x)8P<w%~2;?_S{>eWS_3D|6ONxp>p1H-E
zmXn`YQUq!T6sdx|qY1KFC?z#5u{5V7KDDBxC^5MtGe57$6C?{NABw_3N_e3Si{#Xt
zocMSox66anf$~QYCj$e6IjB?t6?hyBER0OXOpGAN0V0?fSw2rbqGT%U&B4RRE#N62
z3D)4J$z2pZnMc{fzDONp637rF(-FCk!zMRBr8Fni4pdSXgJOY&frpEOQGk($k%dV_
F9RSH<SI__e

diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif_debugger.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/blif_debugger.cpython-310.pyc
index 41b72a50c830ad82af323d49e341b407cf404b0f..80ffdc923d7506f65d9752d4590a24bf213bd066 100644
GIT binary patch
delta 41
wcmaFk_s)+ypO=@5fq{V`^~t`C+`=OKiTWA&xvBc8xv52or75YKwM86-0TQ7N1ONa4

delta 39
vcmaFo_r{MqpO=@5fq{Wx&9yxnxrIe|<MlK0b5r#bON&yA6F2LMI0^#*{Vxo1

diff --git a/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/rblif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/BLIF/__pycache__/rblif.cpython-310.pyc
index 4fbc49752317251f2e0cfee2954b9fc19b711512..dcede5bf6638a81f47641b425d9ba73133b55709 100644
GIT binary patch
delta 41
wcmezE_t%d*pO=@5fq{V`^~t`C+&m)uk@^|=xvBc8xv52or75YKRYbyt0Tv()A^-pY

delta 39
vcmezC_uG#<pO=@5fq{Wx@3lP}xp_o*!}T-rb5r#bON&yA6E~}ggbM=z0$B{9

diff --git a/modneflib/modnef/arch_builder/modules/BLIF/blif.py b/modneflib/modnef/arch_builder/modules/BLIF/blif.py
index 0f10b9e..daf1c76 100644
--- a/modneflib/modnef/arch_builder/modules/BLIF/blif.py
+++ b/modneflib/modnef/arch_builder/modules/BLIF/blif.py
@@ -12,6 +12,7 @@ from ..modnef_arch_mod import ModNEFArchMod
 from ..utilities import *
 from math import log, ceil
 from .blif_debugger import BLif_Debugger
+from modnef.quantizer import *
 
 _BLIF_DEFINITION = """
   component BLif_{0} is
@@ -110,7 +111,8 @@ class BLif(ModNEFArchMod):
                weight_fp : int = -1, 
                mem_init_file : str = None,
                strategy : str = "Parallel",
-               variable_size : int = 16  
+               variable_size : int = 16,
+               quantizer = FixedPointQuantizer(8)
               ):
     """
     Init attributes
@@ -155,14 +157,7 @@ class BLif(ModNEFArchMod):
     self.beta = beta
     self.reset = reset
 
-    if compute_fp == -1:
-      self.compute_fp = weight_size
-    else:
-      self.compute_fp = compute_fp
-
-    self.weight_size = weight_size
-    self.weight_type = weight_type
-    self.weight_fp = weight_fp
+    self.quantizer = quantizer
 
     if mem_init_file == None:
       self.mem_init_file = f"{self.name}_weight.mem"
@@ -170,7 +165,9 @@ class BLif(ModNEFArchMod):
       self.mem_init_file = mem_init_file
 
     self._strategy = strategy
-    self.variable_size = variable_size
+    self.variable_size = 16
+
+    
     
   def vhdl_component_name(self):
     """
@@ -209,62 +206,39 @@ class BLif(ModNEFArchMod):
     output_path : str = "."
       memory file output path
     """
-    
-    if self.weight_type != "w_signed" and self.weight_type != "w_unsigned" and self.weight_type != None:
-      print(f"{self.weight_type} is not supported")
-      return 
 
     weights = weight_extraction(weights, self.input_neuron, self.output_neuron)
 
-    if self.weight_type == None or self.weight_fp == -1:
-
-      w_min = min(min(weights))
-      w_max = max(max(weights))
+    if not self.quantizer.is_initialiaze:
+      self.quantizer.init_from_weight(weights)
 
-      if w_min < 0:
-        self.weight_type = "w_signed"
-      else:
-        self.weight_type = "w_unsigned"
-
-      if self.weight_fp == -1:
-
-        int_part = int(max(abs(w_min), abs(w_max)))
-
-        if int_part==0:
-          fp = 0
-        elif int_part==1:
-          fp = 1
-        else:
-          fp = ceil(log(int_part)/log(2))
-
-        if self.weight_type == "w_signed":
-          self.weight_fp = self.weight_size-fp-1
-        else:
-          self.weight_fp = self.weight_size-fp
+    bw = self.quantizer.bitwidth
 
     mem_file = open(f"{output_path}/{self.mem_init_file}", 'w')
     
-    if self.weight_type == "w_signed":
+    if self.quantizer.signed:
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.weight_size) + two_comp(to_fixed_point(weights[i][j], self.weight_fp), self.weight_size)
+          w_line = (w_line<<bw) + two_comp(self.quantizer(weights[i][j]), bw)
 
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
-      self.v_threshold = two_comp(to_fixed_point(self.v_threshold, self.compute_fp), self.variable_size)
-      self.beta = two_comp(to_fixed_point(self.beta, self.compute_fp), self.variable_size)
+      print(self.v_threshold)
+      self.v_threshold = two_comp(self.quantizer(self.v_threshold), self.variable_size)
+      print(self.v_threshold)
+      self.beta = two_comp(self.quantizer(self.beta), self.variable_size)
 
-    elif self.weight_type == "w_unsigned":
+    else:
       for i in range(self.input_neuron):
         w_line = 0
         for j in range(self.output_neuron-1, -1, -1):
-          w_line = (w_line<<self.weight_size) + to_fixed_point(weights[i][j], self.weight_fp)
+          w_line = (w_line<<self.weight_size) + self.quantizer(weights[i][j])
         
         mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
 
-      self.v_threshold = to_fixed_point(self.v_threshold, self.compute_fp)
-      self.beta = to_fixed_point(self.beta, self.compute_fp)
+      self.v_threshold = self.quantizer(self.v_threshold)
+      self.beta = self.quantizer(self.beta, self.beta)
     
     mem_file.close()
 
@@ -284,10 +258,10 @@ class BLif(ModNEFArchMod):
       v_threshold = self.v_threshold,
       beta = self.beta,
       reset = self.reset,
-      compute_fp = self.compute_fp,
-      weight_size = self.weight_size,
-      weight_type = self.weight_type,
-      weight_fp = self.weight_fp,
+      compute_fp = self.quantizer.fixed_point,
+      weight_size = self.quantizer.bitwidth,
+      weight_type = "w_signed" if self.quantizer.signed else "w_unsigned",
+      weight_fp = self.quantizer.fixed_point,
       mem_init_file = self.mem_init_file,
       output_file = output_file,
       variable_size=self.variable_size
@@ -312,6 +286,8 @@ class BLif(ModNEFArchMod):
     if type(self.v_threshold) != int or type(self.beta) != int:
       print("neuron hyper parameters are not int. If you set hyper parameters as float, pleasse run weight_convert before calling to_vhdl")
       return
+    
+    wt = "w_signed" if self.quantizer.signed else "w_unsigned"
 
     vhdl_file.write(f"\t{self.name} : BLif_{self._strategy} generic map(\n")
     vhdl_file.write(f"\t\tinput_neuron => {self.input_neuron},\n")
@@ -320,10 +296,10 @@ class BLif(ModNEFArchMod):
     vhdl_file.write(f"\t\tv_threshold => x\"{self._to_hex(self.v_threshold, self.variable_size)}\",\n")
     vhdl_file.write(f"\t\tbeta => x\"{self._to_hex(self.beta, self.variable_size)}\",\n")
     vhdl_file.write(f"\t\treset => \"{self.reset}\",\n")
-    vhdl_file.write(f"\t\tcompute_fp => {self.compute_fp},\n")
-    vhdl_file.write(f"\t\tweight_size => {self.weight_size},\n")
-    vhdl_file.write(f"\t\tweight_type => \"{self.weight_type}\",\n")
-    vhdl_file.write(f"\t\tweight_fp => {self.weight_fp},\n")
+    vhdl_file.write(f"\t\tcompute_fp => {self.quantizer.fixed_point},\n")
+    vhdl_file.write(f"\t\tweight_size => {self.quantizer.bitwidth},\n")
+    vhdl_file.write(f"\t\tweight_type => \"{wt}\",\n")
+    vhdl_file.write(f"\t\tweight_fp => {self.quantizer.fixed_point},\n")
     vhdl_file.write(f"\t\tmem_init_file => \"{self.mem_init_file}\"\n")
     vhdl_file.write("\t) port map(\n")
     vhdl_file.write(f"\t\ti_clk => {clock_name},\n")
diff --git a/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/__init__.cpython-310.pyc
index 0313b89029f0785da81763dd763cc417c34d4f12..25e0145301913abbb4a39ca94a8471c0f40cf179 100644
GIT binary patch
delta 40
vcmZo<X=dTh=jG*MU|?WKeX?&OcMT(df__GRZmNE2Zfa3tX-ewkDU7)Q*C7l(

delta 38
tcmZo>X=LHf=jG*MU|?V{zq4l}cMT(NoPI`rZmND_X;Er%;^b+Jxd6GH3q1e;

diff --git a/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/probe.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/Debugger/__pycache__/probe.cpython-310.pyc
index 025d27c86cb4916634bc580991341d7529948ff5..25e0bb87b8145505b169c6b8ba2936f67bde205f 100644
GIT binary patch
delta 41
xcmbO$KVP0ZpO=@5fq{V`^~t`C+@0+FvHBVLxvBc8xv52or75YK7qb6h2LSii4TS&z

delta 39
vcmbO)KUbbRpO=@5fq{Xc=K7wE+@0*a(fS$rxvBbzrA4X5iJKR*|6vCJ-rEes

diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/__init__.cpython-310.pyc
index 4b8b352519bdebb320160c2186cdf940c56d2c37..65eb5bcbf3cdc42ec6d7af92fbfe3677f948e4f4 100644
GIT binary patch
delta 40
vcmeBT>0#l{=jG*MU|?WKeX?&OcPAr%jDALbZmNE2Zfa3tX-ewkg^Za1+SCkU

delta 38
tcmeBS>0;r|=jG*MU|?WKzO!c|cPArnlzv8jZmND_X;Er%;^f7QnE=6N3ylB(

diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/rslif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/rslif.cpython-310.pyc
index 42585a42f7d9a759c0a2944307e4c36eaf589ed8..73460da7b9914ae52c0c423146306725426af7b9 100644
GIT binary patch
delta 41
xcmbR2J=L2#pO=@5fq{V`^~t`C+!aFnk@^|=xvBc8xv52or75YKCkQbM0|5Uq49oxk

delta 39
vcmbR0J=vQ(pO=@5fq{YH-HkmPxhsTt!}T-rb5r#bON&yA6E{y1VipDf@oo%$

diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif.cpython-310.pyc
index 2c3d72db63914c070c3bb611854ce6476686f01c..937248ad9c387cbd8ecf830f069bc71a1e05554e 100644
GIT binary patch
delta 41
xcmZp4ZFc3(=jG*MU|?WKeX?&O_gn$~2>p!w+*JM4+|;7P(v;NATLsPt0s#E(4X6MB

delta 39
vcmZp6ZFJ?%=jG*MU|?X_b92u|?zsZIVfq>QxvBbzrA4X5iJP|zoDl>7>P!tO

diff --git a/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif_debugger.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/SLIF/__pycache__/slif_debugger.cpython-310.pyc
index 8c54791368fe6ec77399d7eafcd1fa6da2ecf103..a3704fe76617ca12ee71efad2f51704a5e067ccc 100644
GIT binary patch
delta 41
wcmZqmYWL#K=jG*MU|?WKeX?&O_a{OAME#8X+*JM4+|;7P(v;NATtdHv00rp{pa1{>

delta 39
ucmZqoYW3pI=jG*MU|?V<zPV>3_a{N#c>Rq0+*JL<(xTMj#LYZHzl8wm><rid

diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/__init__.cpython-310.pyc
index 5b336006bc2d68f872a4292e44a989684ee8312d..566c9a8d094cc450019c1ae8e1266526df36ed4e 100644
GIT binary patch
delta 40
vcmcc5{E(SDpO=@5fq{V`^~t`C+%b&&3HllNxvBc8xv52or75YCOBk~O@K+4B

delta 38
tcmaFJe4m*+pO=@5fq{V`;Le_n+%b&2arzngxvBbzrA4X5iIdA1vjNl)3(f!l

diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/rshiftlif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/rshiftlif.cpython-310.pyc
index 54c016a91cbfcf59741329fc4c576747f0053fea..a514ea545f6944369efcd3395d3fba18055971a0 100644
GIT binary patch
delta 41
wcmdn*f6$*hpO=@5fq{V`^~t`C+!iAIiTWA&xvBc8xv52or75YK14QzL0SO`vBme*a

delta 39
ucmX@;zu%ubpO=@5fq{X+{KlS*+!i9d@%kD0xvBbzrA4X5iJOB&@`VBDW(*$y

diff --git a/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/shiftlif.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/ShiftLIF/__pycache__/shiftlif.cpython-310.pyc
index f7201157a18495b2dc6efe8e58d2ebb4f81bd79b..cb0fae8d68cdb31b003c4cfcf1e3c151633c9bd4 100644
GIT binary patch
delta 41
xcmaFp@z{eqpO=@5fq{V`^~t`C+?$2?6ZA9kb5r$Gb5n~FOH)!epB9=X2ml_R4sZYf

delta 39
vcmaFt@z8@ipO=@5fq{YH&h<STxi<^(#_4C|=cei>mKLQJCvH9~G))iy4L=SK

diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/__init__.cpython-310.pyc
index 7aa1604b449f29837446d2f22b53a805f4b74b37..caf794d889f13849b091312521bce0911f238d47 100644
GIT binary patch
delta 40
vcmX@ZdXAMlpO=@5fq{V`^~t`C-0n>LG5Q(#xvBc8xv52or75YC<C$^+=;aJ*

delta 38
tcmX@ddWMxdpO=@5fq{Xc<<6ds-0n=gQTiGAxvBbzrA4X5iIWqVaskpN3$_3N

diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_1step.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_1step.cpython-310.pyc
index edc5d4ea2f5a0ebef07f8f16f02a0ad9c7a74ae9..221fc388f22d486030d600675fe0a88dc0c33cd2 100644
GIT binary patch
delta 41
xcmbPiJk^*xpO=@5fq{V`^~t`C+<STX<McD~b5r$Gb5n~FOH)!eU*+A$4FCX)4b%Vt

delta 39
vcmbPgJlU8#pO=@5fq{X6|JI(3+<SR>WArofb5r#bON&yA6E|Py-Ny|8+#C$4

diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier.cpython-310.pyc
index 1031f5807b624699fe65cf0e3423a0c6d8b74442..668ad4a56fed0cf22c12fcd057809647baf5af89 100644
GIT binary patch
delta 41
wcmexn{KJ?#pO=@5fq{V`^~t`C+?jm*$@&@jxvBc8xv52or75YK8~IAP0Tzo5umAu6

delta 39
ucmexi{LPp<pO=@5fq{X+^VXh?+?jm5iTWA&xvBbzrA4X5iJP1GO1S~|4-CHm

diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier_timer.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_classifier_timer.cpython-310.pyc
index 897da078dec9b61ae3e492a84cf343cd4a94f887..74223ffeb02f0c6bacebe3d309351239a8fab1e3 100644
GIT binary patch
delta 41
xcmexh^2vldpO=@5fq{V`^~t`C++BS9nfe*|xvBc8xv52or75YK7xA6r1^^kJ4o3h0

delta 39
vcmexl^1*~VpO=@5fq{WR_tu_`++BRU>G~P@xvBbzrA4X5iJO=3o#O@o_Ye&_

diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep.cpython-310.pyc
index e2b148f4a5ea72035d5422a1e2d2770232092533..d8d74e917b06fc1db3e7eaf426fa73fd8fba6aea 100644
GIT binary patch
delta 41
xcmbPZI?t3lpO=@5fq{V`^~t`C-23?W<McD~b5r$Gb5n~FOH)!eU*kK*4FCi@4fX&4

delta 39
vcmbPdI>(edpO=@5fq{Xc{MMe0-23==WArofb5r#bON&yA6F1-BJH`zF>MRX2

diff --git a/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep_timer.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/UART/__pycache__/uart_xstep_timer.cpython-310.pyc
index 16115cffb25cfc60343cb1f99576c213c47ba0ed..051d4095987dafa41608a9c67b4557f963df061e 100644
GIT binary patch
delta 41
xcmaE6{=%F)pO=@5fq{V`^~t`C+>817Q}i?Pb5r$Gb5n~FOH)!e@8;L$0RR|#4aNWf

delta 39
vcmaE1{>+>^pO=@5fq{V`_ST+_+>7~nlk_w4b5r#bON&yA6F2YW*X98L`o9e6

diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/__init__.cpython-310.pyc
index a51edfc92ae452cb40440d421be6bec3f0d7c74d..72a993f3c9da75932ff9f49c3e4830e5803001a4 100644
GIT binary patch
delta 40
vcmZo>Yh~lk=jG*MU|?WKeX?&OcR3S(xPC@{ZmNE2Zfa3tX-ewkex?!t*GmjM

delta 38
tcmZo=Yi8rl=jG*MU|?X_b!X2;?s6vHQ2mVj+*JL<(xTMj#K{wxN&v>33%&pV

diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/io_arch.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/io_arch.cpython-310.pyc
index 4d96392ddfe15b191e182cd3aa304e5a41212ef9..245dadd6fd8be725f0c0f18da05b6b92521675e7 100644
GIT binary patch
delta 41
xcmcblbVZ3fpO=@5fq{V`^~t`C+<mP4Vfq>QxvBc8xv52or75YKm$9zl1^@_+4X6MB

delta 39
vcmcbjbV-RjpO=@5fq{Wx-K{+vx%*goL-aH9b5r#bON&yA6F0A5UBe9k^a%|d

diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/merger.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/merger.cpython-310.pyc
index f21c44887ddbbadd156e313ba6709699a2eebe3d..7dc9c21898d101f136ca404b4df54ddd5f727362 100644
GIT binary patch
delta 41
xcmaE^_)?KOpO=@5fq{V`^~t`C+z&YUL-jNAb5r$Gb5n~FOH)!e|KNPj1^^gF4!HmT

delta 39
vcmaE<_*{`YpO=@5fq{YH%&k2exgT)y2J2_!=cei>mKLQJCvN`5`JN2`1>+7&

diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/modnef_arch_mod.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/modnef_arch_mod.cpython-310.pyc
index dbd537fe9a3c3c215a0a421d119a235811f28397..f68f888e954b83aa933c071c7741bc21f8c53723 100644
GIT binary patch
delta 41
xcmdn1y<eL<pO=@5fq{V`^~t`C+?(0><McD~b5r$Gb5n~FOH)!epJw~S2LK7`4lMux

delta 39
vcmdn5y;qw%pO=@5fq{X+;`W}6+?&~WWArofb5r#bON&yA6E~k_`@{zT=~oRb

diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/splitter.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/splitter.cpython-310.pyc
index 3d1e766bf9ec8f673aa84f47da0bd003c979baa8..fe45a069bca6b68aa714915b24fb9d5ceab09e4a 100644
GIT binary patch
delta 41
xcmbO!Gh2o`pO=@5fq{V`^~t`C+;`de!}T-rb5r$Gb5n~FOH)!ee`Qx;0|5MB4L|?@

delta 39
vcmbO&GgF2;pO=@5fq{V`_V%8Q+;`b|L-jNAb5r#bON&yA6E}ZnS78GH;35oU

diff --git a/modneflib/modnef/arch_builder/modules/__pycache__/utilities.cpython-310.pyc b/modneflib/modnef/arch_builder/modules/__pycache__/utilities.cpython-310.pyc
index d5d3137b5a44eddfb4553d6c6b4cff1640631648..3ad0426bbf0a93aece2ced26b98ee31e402b62f2 100644
GIT binary patch
delta 41
xcmdn1y<eL<pO=@5fq{V`^~t`C+=49p5&9YVxvBc8xv52or75YKHCd(z0s!}741@px

delta 39
vcmdn5y;qw%pO=@5fq{Xc@%EmL+=48;Vfq>QxvBbzrA4X5iJP@qrU(K6-<%7~

diff --git a/modneflib/modnef/modnef_driver/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_driver/__pycache__/__init__.cpython-310.pyc
index cbc1e081252e5e639174f5ae3c33e76252839f87..ea62a78b0cb5b140f962fdeb196748167aaac299 100644
GIT binary patch
delta 40
vcmaFN{FIqHpO=@5fq{V`^~t`C+<A=r0s0yFxvBc8xv52or75YC+Zba3@)Hci

delta 38
ucmaFL{Fs?LpO=@5fq{YH_PsqDx$_u#{q!^Pb5r#bON&yA6DM~t#sUE2dkkp+

diff --git a/modneflib/modnef/modnef_driver/__pycache__/modnef_drivers.cpython-310.pyc b/modneflib/modnef/modnef_driver/__pycache__/modnef_drivers.cpython-310.pyc
index d643ce5865d1ad4547af351f25fb6b123e8797fa..d16a933fde5112586c1e0be457d7f0d24a37b790 100644
GIT binary patch
delta 41
xcmbQmHHV8kpO=@5fq{V`^~t`C+-sQm!}K%qb5r$Gb5n~FOH)!eA7Kt-1OV~y4B!9&

delta 39
vcmbQkHH(WopO=@5fq{YH?Y%u4xz{lBhUjPH=cei>mKLQJCvHB*9L5L$<xLEi

diff --git a/modneflib/modnef/modnef_driver/drivers/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_driver/drivers/__pycache__/__init__.cpython-310.pyc
index 063a6d8e0a1751a5009e9a0755b0c7d49d8f2d98..d9a92dd63e49b0cfa5f9c3431d3cbbaa52dfe048 100644
GIT binary patch
delta 40
vcmeBX?`7xC=jG*MU|?WKeX?&OHybm5gnmYTZmNE2Zfa3tX-ev3Ip$md&mjwF

delta 38
tcmeBW?`G%D=jG*MU|?WacW=)|ZZ>A#F#U}D+*JL<(xTMj#K{WGxd6T}3g7?$

diff --git a/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/__init__.cpython-310.pyc
index d06a4e4b651d758c194df0cea4679c6ff91ca428..19f7ef27a5cc1bde43e8816a7cb74b492a48f2ef 100644
GIT binary patch
delta 20
bcmcb>bb*OGpO=@5fq{X+S$h9QZdXPCG3Eqf

delta 20
bcmcb>bb*OGpO=@5fq{XcK5EZKZdXPCH1!19

diff --git a/modneflib/modnef/modnef_torch/__pycache__/executor.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/executor.cpython-310.pyc
index 844b2557ea78de0a6ee1b411339dc20adb1a169f..38bb176fbb8555dadcc2f704b42be2172e1331eb 100644
GIT binary patch
delta 1946
zcmaFqIL(nSpO=@5fq{YHx7_~pP}PlmiQN4D`WgATsrsq8sYQvUDXEidxSt4fFfcGU
zGcYg|Ut(lnNMTH2N@1G3MNE700v?fiR!N2$#uPS5h8l(}wiNbWwiuRL=316o)>^h2
z#uN@ohN8q0_5~a@3|XAB8B#dsGL^6|;HqI>$heS+kpW4D3o643mEo>oX=Y5}&Spl@
z&jVG(0#Q}N7?#2-$x!sXgl7ToLIy^L5}pNoDSRpXEes|6HH;~&k_^p^E)30#HOye^
zcoqnx2rOg-s}oESgtCNEgrF>8s56Bp_w$O@gDhWN!jr|9B7((4mK4zxv0i47Sy_B3
z;#gF$rbxh5h}5ty5UpWb$e7Mp!<NpNA_<dOAO>}}K#CO96;PL{mGCSOf(yc&COwx4
zq%1`ym;r<}WhZ;`$?$||vfN_Ni%%`cPtKU!$fvFC!oa{#<jTOnpvhLG0b;9z2u%<H
zmnxD3aor|A<g;LLXJBBMyirzivKhZK<KD?t{3W^`AXx`E;R$Df#T-Fg9|i`7qacRw
zWN85-Vs#vY=%^BCV054STVSUO$QULzCPols`@_dB!pO$V!U%$FjBJc7jC{X1I49o`
zjA1mJY$-HbBt@W?DF%_oI3*d1W^R5bWXouf2{M_Dfq~%`YjRFvadA-=NFbYmf#DaM
zeo|_2Nqmt30|UcKrXp}~7)(wO(J~^^pC3U+`7$stRGCfA6H}UePehzCVe&VT)fyon
z3z>^cbBiKCY9b-ryy9Ez8L5dSxrqfu(ID}-$tOh<*v^6Sk;P<vF-;bpe;lln<HURz
z)h4eN>lR7j0ecJVF-Y1inm9Q?e6K|YNVhDAfcvkg0K|p+uBZ^i1$nKg2*lzB5yg|&
z%E}QNCnAgt3`M?^CrIeBmohLggcOxeJ|dB6+W;~M99E!EEvf`@t3U+E&qdWB7TnK8
zH6U&+h=BSxOH$SfY(gVQun9yogNPOo0Zv6=0-TgU{`&z68$AXF25@4S>?kG<38Q<G
zA8m3#X_=)c7o-&wp+!<4Rz8T32N7T~a4>+Cz-2X=i*hG(ONYzVgH#EF2(YDK0&Fu#
zyVT?c>0mw~c=49PoX#-0oJX@BT(lLb)i7i+q_9gefC{u4#uQE)P;tkY#Z<$X!UZn+
zm}?kQxWO!z*$gQ>HH@HgX*R=L<`lkOmKbJ8kq9X$i`q+AYglVonwe_ZO4zd4YuHLS
zvN&tlQusj$J4G->u$3uAFpWu)L7G7+MJPqM1ysUvN-}^7G7P@R<c%`Ix}c&6RGxAz
z;7bt)l@BFc3;1govOtcJfR-47!3-&a!3>&`lRwHB*S9h-Ft};5g7ZTWC>U>XCFLg;
zrNkE`mShxx6B;;q$$>%)%mt?aM7jf~woXviV$=s4pveL;44hm*61TWhiZaVmiy%fq
z(j)@|gI_Wu1H<I4vSP%hO@CP06q(E;CkaoVI&u;9q$NmD65=Sb1LaTdw1V`+_~iV&
zw9ND(aH=3Sp@IU$n2~{@*nojyGPArq3(p@8_R04o<tMw#&k``^(Be?ykt_mLnVRB7
y6DNO^_f!FA5hYMSs)Foh%`43<s4N0yp<8SPiFqlB#YL5q;}nz`YbG}-qyqqhc%W1O

delta 2077
zcmbQ{_|lOtpO=@5fq{YHuI0XT3$=}WiQK%t`WgATsrrefMXAM!lk2#jh;uS9FgP<X
zFcjZnWMD{POkql42EoZIq%|im;1Q{3lVqr2OkoF;9Fhz*3|VX`oW1NZEVazFEVZn)
zY&DE2T#^h$sU_?SIBFQOIA=4YaL;8bVPC*i!@Q7jArm75k_-=2h7~HqUBlAMn8KUQ
zjG~_ps)}VHqYB7?8pafUNrs}=B|Hmw7cww1l<+LzOA$yBY+)$juVGAKn;gh1Traec
z5iBE^A`E2-rHDXT!cd2a&Sip%K+Hi{zrKVgi!VhCVWuQQGouSbGh+=)ig=1dFEhyO
zEWQ*;EGk%2q*5g6p-Mz*SQm)auq|XvXRKjMXH1cXX<i@(b-F-`4Ac?GPKAlVoGJ@(
zYKmMi0|;x%SBY{dC@3g|6eVWnDdgs-q~?J6FIgEF7+(JWFZus}h$ibT_PqGig8bx+
zTb#uusRi-HiDjviv-z}5T^Se{irheiI|BoQCVP<vh^-DHG(iNae32wb%wzIFJ_{C4
z1_p-7jv|ti#rT~W_fL-EFVXb^$vVIZZ#WAq<_O~YF)%P32QmC7|K>L$R>uj5jwpc!
zM$gH&1$Jr#FfcGMu`w}1Alq*~4iQE+W)?;eWMc$D7Dm3G99)xk2*xnlOcoWIEmgyq
z!XwF$BGAhmgGiyAk_<(2HXjkPWi-tO*}=xZz;KH-IVZ8WxF`oCkjucp@QY19DYdvH
zzQ}-qfng<c5jb=}?4s<+h9X+Tdg2SnJpaiZB6f^PlaGt6)(8Vx##~&QTNDM7iiU9W
zif^%Jq$ZZ+CKeRMg2WRh&k{{wy8z0IR+Gdu+1MC){&KKQ))n(%)R|l*)(!F*ha^J^
z57=j5ztu3Nuu3u%O_?kyzSklPWQZ(?fP1m12*ibZu&5Zs1^cK3#Nq}KrIYz2<cRf~
z1S11Ok^kgK30?Mb1_p+ZqRPp=5}EZ)AcMfc1q#@rY7n;uM1Z_rR10Fky<SuY;?{!*
zh_^xMk)y~C6e`@QWr;cQ$@zI{ndwEsASGavn?bA=5YY-E+CT(2seuVc1|tTBB9O;_
zgTfBvJy?=q`@_dB!pOlS#K^&<#>mFR0!}jEU|lNt(IyXMHA_)G$UIO2D3SuP3PFTC
zhyaU$!wjqhF0090lt1~BR5)YfWG!hKM!CuE(!uqhpn;a3Ea?m>taDgv8EY7_7*g0I
z8C)1*xoeqVyc#A@X<EZ%!%)ZzDO-!wz;dA2<pjy4aM^%LTE;A<8pafEaH-1-N!K+D
zSuC>|Qb79nBpGHi%ms;aNiw9=^Y^mGFhdGusG(gYtTn7PEX_={Y$a@2>@{p999f(-
zY@m{}mnlUkMW~f2MJSC)l0ljwML0#I1ys;;fgIb+h`|@_Wv*o};ab2ADa5jPYuHo7
zQp9^f#XHvmz7z=saGA!nfFJ5YNoe^c6wHt!6wIJ0HF=YaaXl!+i`p3&7~C{j!P)E<
zcS=!aS!z*yT4qk_Ev}^e#G;h=g2a-HB5)$k0Y!rxhyZiJsSA;&K~Y`Q!@$7si&4L*
z8x)xkLm@dABmp-bVkD?KEFwMChd@&Oa#=*GzbzY~M_MwUoG2%2L{xHSU?>84*_@Gq
zq1b5h202AgQSh6CWAatGodTvD+8oL}l0_UK?}`^qnLJP4Qw5y4lt96x3d%;Td8N4p
dl|`V;cZ;ncF)t;txTtzEw}LWb-DG`*bO2Vyz$yR$

diff --git a/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/model.cpython-310.pyc
index d60b785a7b6cca32485dcbd426f83c4876d8e5c1..8ff8fe4de12d2329a5991d47d728d93f45554413 100644
GIT binary patch
literal 8854
zcmd1j<>g{vU|`^v-k-itje+4Yh=YvT85kHG7#J9en-~}vQW&BbQW#U1au{=&m>3ut
za+#x87$G98xolBvP&RuMdkSL;a}GxcNRB0kGnXrh3oOT)!=1|$#goe$#mmT$%96#G
z%96#O%916J%916R%916N%916V!ZwF3O2nNZg*}C%g&~C_l`TuOnK?=<g(;Xple1KW
zk%58BEi)%oAulmE)k+~ZKP5GX%dxa1BfrQ>!LhXH@XDObRE6Ng(wxH5%sd4JuCUai
z;>`R!D+L2RLp=j7pUmXcyy8?V1@{0SU1L21-TWfmoWzpUA};6rypqJ^5-Wwo(xTLy
z%v8N%s38ued6{LpIhi>*sd{NeTrQ~vsd*`>dC8fn#a0R>`9;YYIts;kc~Dkveo9_y
znqFd2az;{VW==|Kkq*d}d8ujfU={Issij5vd0Z~3#mPmP1tppJdBs)=zWFJBu5Jp!
zetsZkY7UoEVsUDULVh0HWIdnM#Oz9i#Jm(FAx}4j<ebFfVy>5<X!Fx#yv3-c$#_dN
zCqFSIKBXwLEVU>;ttdY?zA`a4N0aFmOLA&v&MoGg{B%vWTP#UAnQ6CJi@+31F@yuA
zIEphe(@H=BoJBBZGROvG%nIccPhntSNM(p(Oks#(N@0v*Zf8hiOkql4ZsCYxNnuH0
zZDELFO<_x6Z()dHOHoYWOl41F0;}Ol;cnrL;sCRGQg~Z9qBv8OQutE@S{S3aQUp_k
zS{S0ZQ-o7QS{R~uQbbe4S{R~uQ^ZpwS{S1EQY2HPS{S1EQ>0U5S{R}PQj}BVQxsYl
zqXgR-SQw&&f*CYbZgE4x)fbdX5>GHQFmNe=!WR_OkQmEI&H%+uY7Q5df`Vg7Nl|7}
zX-R4^mx6*W9B?Us6PtpSLUBnEmx2P6o1X^Nsi2UPSeaT>tN=|7dU<(zzWFJoIjJt0
z$t7S7Pz6X@a#M2^tQ1mU3UX6(lZq1aQWeVbOL7v^Qx$SkbMuQT74p**5_58(7H}yj
z6c=P8Ys)V!DJU&bC@#p%PA$ezoSdJRmYH5!ln6>>$lAcUMgdhC<dn?(yu_kPE(HZh
z(ouk=OBZlT0{a9KpbBmQ?v4s@;f%zhl=8%))c6!cqE@g{fO{V%uYfF{mS3b0=HcR_
zke-^C3UMPS5`0rjGV)W9Vt`9Q0i2OEP~58n@pOE8QDTZF*kw+j+y)IPP*6ZTsgVo{
zzm(FV%)E4k%)HE!%*33`DsYH^w82~!pITg!nF|)y0EsIo1gDlLz+@E=vI=QAiRoMl
z3MEB}nR!S`A-NioA{0Pg11reO%mXP-ElbS7rxqk%npgr+oK}#Y7!MM`r2`!HAO#>T
z`FUIlV1LEurshI*c*4Ay3JVTMYT{B*NY2SGPKA~XP}R;LAxMNmqMb_t<bm?Uq7;qH
zyn@n_cu4Ys1WQn99?a0vg2a+kP?}6Fi3ca}cvyHrqY9z}B_NR#KuJDo+AhhDFUv@Q
z1(Z7^Rir}FNE)cfN6J3Q%+S&YM1j~G3=9k)y10mefuV$<nPCCrLIzN^Q^OF?RKmP~
zWg$ZiLp*B=Qw>8r8<=Fz1c`8fNzN4J8isf-Fv(rRSi=y{lfqKN5YL+eF1b=TQaDq%
zQn*ui=CIc=#PiiKWbxN9)-a{;_A)Utq_6}tX!2F90jENwu!oe)V9|iYqQu<PlGGwl
zx<)Ho6wopvQmLQ-O;7NYpIMTh2QD-+^Yfrt6;vi<W~YKPLaKt5LRx8FGAO@;?Nv}H
zE-fm`PfsjKRRHCb%+x$cfpm-2w=&qz&+X;^|NsB{-C|A7$xqJCbcifw&twqbW4Ogp
znpjj4Us90*WffJVWHJaaFk~{YFfeGc-{JymhzC`|Fm_r|YT+%;q{Pyc_@cy;)LYCT
z-ONQ5DYpa)OH)fz<BL*3l~QU!NyaTfh)8)+W(la!C@9I$WV*!*_a?~wx46K`4(u;Y
zfm^J_r3I-)w>aYCL76x{{uT?^Zl=7vTU?03_7)pBAr{|Ki-#0c;F3Qco-mQJc|0g1
z-;zO92hV#DS?1i-+*{1W1=+VGP}M>6MP^m%Epb$tywcowaKsl$GB7aQVuNI<TOzQm
zfm}r1Vg(m$n#{LYic@paz&eVg7#J8<GT!2hhZqwdzmnmXkA6meZmNE2Zfa3tX-cZT
zOMY@`Zfaghv3^NLYH_MQ#IO30rbkX@5|jm{AngtiCpAZ}pt4AYfq}sb)T+~9U|`_l
zXJg`GVuL_NHYPR@W@2JyVnjfWpF&(!A`nZ!0jdX!1(a+E%7UOqfiuYLOAHJQHH<C{
zu_?98HOwiD*-S-zHH<aPlAuh-P{>rnoWhjNR3rfxV+4yaXEPNk!Nr)sVl3HAMdlDO
z7lv5wS{4Yqh6Q9+7IO`Y4MU+8NM$W+3Tq8R7E2AQ4Fj0P3SvQ2*Rs{HrLajdEMQy6
zP|IG!Uc$bBV<AHsLs5JQdkwoJ!vfA4wi>2|Of?KyTp(SAOeNek?6VnC*yl1eGcqz1
zGNrHvGiY*DodahqGAbLezCl+I6v={;DJwWZ-{OR)-dh~71nYN;6<m{6aVcaLE0pGC
z=jE4c@_@A!sWLDy6sds-P*N&V2C-DY=H=v<rxxAffz*oeU;}QkWT#dZ-{J(fM?tNP
zA_cJ0f}+g45>1XGEs$<drU7SxTg=(1mA4qPZ*hThG^C{Afv_Rv#4T<J4{UgmE=V_+
z&;zkF85kHULFo|WFb*CLCLTr(Mg$gNWcts-%*DjT$ic+J#KR=PD8R(>qDl}sjf3-M
zG9##hfnp{G22k<G08Zc*pajmafB|01FxD_+F}X0r^4Bt^fY~*Sk__?8AW;wtR0A<G
z6mo$(GR#$K;N*Z(H)1w26cj@IG+A%4fGYbUMUcP1K_&r<8;s(Nqp&nFuOzc7btMxx
zJa4gr!w4;OZV6*CU;!w!co-NMI9S;jxfppE1sGZWRmmfV2o_x-w^%^op9~5bXefcJ
zVHQ|*Yy|2#f+|!7P@u$v1IC3RmZO#l#Lr@?VFCqGA!i9oGeZ_@4Py#ZFJmx+CUcbr
zJb)mH2wD}QH?0&D+%y@BKry1p1_>f1SU45gfmqVuL<Vk+X)=QY1+2Qr5hSS%A`q#@
zg@J)#H7FcF-C{OyIPfv@F`}jmuu)jk1-ShIN`4@(#iM%|B~?H?ynuBfLokCTQx&46
zgyC6G+`}7sAm8HlW|1w(JD?7G5t_$965G%{#>H4AfE>Lbn||@==|ie4y~^Ail;R!a
zI}iqibulPg)i7i+fD3l+62=;)8pdW&+lrxvVF433Pk?yLDUit2!XBBJEm;KxzYtB9
zB6W~QK$bzg1d38<<lW**%1<mxi7!Yj$+*P}acVrIKtNCBobcNGAjs>W>WPaLR0eP{
zR*52e9Ig%{R^indC{{uKbzz7VtOX@v25_EcT*wdusxp{r7;PA87_yi_bRopoOjSx?
zUn8{$!I`lb+P4Es`f0L2ib2kz)Z)|<P$RwwRDwdvQgBp)lP8z}IpP)vxc!rxntKN1
zDK=0Q&Z@x3@*lN=hABev5vVK!WmIqsf?5F}AEhv6GZ*oISg;mzFatQ&n5v|~PQ=zf
zP*CvGWCF($xanA&s>uk+T3~mA39w^%koy}~5Dw-8IamTY4v-b1lqMhtgHi(6(V)f=
z$f-pfH4F<FYZw<Y27?nEgr&*&YZKVX7@ZWbDE_(^TmwS8Ito?_A*p%A`H<c|sDFj%
zWrO;t3JO7~C8b4qD6M(09VMj&IjNxDX9==?NH-l~xQ+spmz$ak)}qN=1WMnxq);1@
z;6$00Uj*uh-I7C<OU}<NC@lev?Lg!;p`|?!vM12913T2PJFsYA<L6@JVyY5Gjs~b2
zlo$Y|F;M0K$3P0WJWpY2VW<Tqe1;TIi;cI0F@>d>v57IAp_Uok>JzDD0mah-rW%$S
zrWDo^P&LTX%(#%HmbHd?0c#CwI^#l6t4gqhZ2@}?^Fqc29H1eh6pn?=Da>i$vA(Ju
z;NS@<O3W+H%`8y>`wi0TS18E`cUU2DF<9c&FFy~|9ad04$#kHa5LAo=gMuB@?5<&0
zz>vbYkZ}QH2~!sHLdIH#bcR~S5>R-trZ7n|G=o~r0<}yaks27E9Tct$KrK{Is7VzH
z)qo3Dzal<RNeRl>n(VjOi&Bd-5(`pqF_q-sVg-*j-eS$nD={{@#afh@m!5iy6(Y};
zSO6Ns)MP2r2i1<uiK#`m7&C7%#-pbhwvzn##MGkyAh&~T5@KXw5@Hl!<N>#zs<e=k
zPJBFalGU@x$xlwqDYnyt7~xkXSCEsMSe&YmUyz!o08$DYWlBm-%P&d|c?t4G5r}vR
z%JZ6>kn)AowIVqcJWP6vtEe<DzBn<rASd+}b53d=xa2H~0EIvZ$RBKwOm~aDAhjsB
zv?NuN4N@otfK<eQRPZDvmLzAy7o`@L=9JuGgPO$!p+Umv0l<gqs}G=n5CAoj__??^
z7+IJ&m?Rka7$umhq;LlXN@n6_U|;|ZtAjIB4<iGp_yV^fYZ$W_Qy3*d*$TvBl4Pi5
zPG_iPNoS}9b!wQ3WJ{Q{SU~k`7HbM~FH<dB30n;_II-l^vZt`ra?~)Uu%xiGGS#rB
zF-bBkU<YNqg-o@apcZJ6Z4Fxqa~4MpX9`maTQ8G2LoHhkLl&nCL#$0LR}DuBsFM{B
z5~<;mWT*k@;R3T5vbaHX3VSazxTfQ(;R4rm+#otn8m<G>ZQ_sw)qzCltKq8UD0HY{
zSpcecxN0~g89;?W4J(+>4dWLof?d9&gtr7d;*!E4$xy<)fUkyaA>#spg$&J1&7hi?
zKZV1F0j`EqlA(sVL=fbzW=3(4dxS&~DmiT!N|>{RYgkekL7@O<vqIS5LWir!7nD{&
ziLfY^fq_Aj@fKT3YFTD-DkP_AGTvfN$;{PczQqPEj^dFjYjKcH36Lg{B2Ydr3I_=&
zf(UTxWUWdq$}cX;011SFh#*jkWG*hvg`{IdwGT?sx0o~YN^UXdCRP-I+FG|*GD}i(
zixNTlKpn1Iti>gX$=RCRh!lQ{3zE2tL8B7vdGW<1sRhNi*ozCY<BL+0Z?Wg5=7JgA
zka3md{L;LVTPz?|pgvYHbPVqnV+EvX46ek%1h^&_ijPOHbwn8%7>YrX9Q>f36blmz
zBO8+dBM%b`qXZ)xlMo{p69=OaqZtz$qX3f-BO8+nqZXqKQ<WTQRzWuwCA)xXeo*NN
z&Mu&85?s6RK<XY)=?H2CfM`$xDrAB-Tx!8J2TJD(_rN1qE&e(L(!N9)n1l6&PzUe8
zW`UYU;C>nps6_`;2y4HxK@2Oh1Vtq{e8B`L9B&DMG=Lk4@DVFFP#}YAU`{bmqX{MO
zFqQk=V$@3pRXMQE7$*Y*1E|aa2Ozk-VFb6?d25(vGl0fHvlwPGq%hBAsbwqy<t9*$
z0afhG&CK8`oF#>Yl|hoBgtY`zUa+LFfGVgGwiMQ8Mrnp-Mso&c25@=6lEQ|fjt!f-
z6m|@A*dgW!gY0EVVX<LAQUfysRSlaBLkW8d2dHcSx40RySW-Z_9z=u6E)WeF8e#zr
z_<#n4Qn)1{=JQB0r117~*0R>HEa0eN1tppqmIa(p7Av?+<Z@w%m8fM04+Mb*hPZ3k
zK?6gz9AG^hP(3^~Y&9GUnHU*rIBMAnCxOc{wi;Hj2+IOk=rYe{n9B^+0UG`Sxs6Yf
z0i+6?RQVI-K@*ywIVF7K-QZ+}KZ$|{$iago3bqQMh6vJZl>*WbI%v=oY?^{XW|~4u
zYFc7xPKk~JNH=7XE3+8d(T1ypHdjF%E(Kc!B|Rk+qam^o13*f^hNE=Lz+E-4PNbeC
zsv!{ds96bYB-k{+UyS-yEP7=bDMg?%_9X)&149w0pe>36m7px3q3R-GP<9dl5uzYM
z3{(ztf=8)t2|$W!NWKD>j)D+ZLuW0(LLA9C+3}zu>>{usAdA3u++r`O0Eg=>_96(Q
zNC;#$Kg4WaNLdCR4oodV9}oj&#3E4fb&D$$G@c$2T#{OFiz_iD1vDg9nv;5q4U~m5
z^T9R1Eur+(l6dg2Q)&@R0S8ENPG(;AEq;(JB&5L8xwqJ%6DFFR;7Z^YCny#nZsmr0
z5ELV~grJ%r1rInnZm~nHDQW-(A$Lw<aS7B$aOr=GEf+k2iPjt{>SbVH2m+NzR^T>}
z5Tg*229pM(3X=*WXvmm_k%f^139^0X;^AUsVd4T+28=w6Y>YfiB1~e8sC^EYHyA@S
z8H-#&fy?YyR11nlFh>|P=nqPb@$t8~;-RjLkH5teA75CSm;;qzkB?8uPmYfVk8T%%
zhX0D3Kwe`>%FoX!asinJ8U!iw0I|T;vk!<B2qOGIgg=O=0TCci-r|LhzbB{W<isPl
zi@_r+pwhYsG*|{2JuQ|41qK5PBaaS;K8LypHxD-#HwQNd2#9c~au^EnfYoY>7flB_
zNeMDeuLq$Zqw_EhWca7}77w!IEj}m@Cd-tYcZ&nuzSqmkD*{<x1a4voL)3v6AHWjQ
zEfJ_7go4if6@e<&TP(SWB^gDCRLbJy<LP#bH3&?x1VcDriX#}bA^;@783bb%nSgx5
w2AO>U`w%I)ASyKu8%SER1GTEaVa38A#0?sU0gcECFtRX%k_`m2{SxB>0Isbq<p2Nx

delta 3528
zcmbQ{I@O#npO=@5fq{Wx%lUojHd-6`ESc)pFfcHrGDI<^Fhnu6Go&%5Fs3lIa6~bu
zvZOGlu(U8VGe)tdu%@uJFhsGXu%~deFhsGZsHJeFa->1jbEojM@J4Zh_420hwQxjn
zrKqO}qzJY!MscSIr3kk$MDe7Eq=>dKMDeDGrHHpMMDe9aq)4_fMDeFMNTo=(FhmKY
z$fU@&FhmKa$fd})FhmKZXrw5mD7P?138$!}sJ1XfiL^7YFhq$4GiYkw;`Yr?@pE<a
z%}+_qnY^1V*_NS(A)Ysd1>{bU8&fz_xKg<1u-7oe^VKk9@z*fcFs1PHGBGlwFa$Gb
z^7`Fk^{ou{^K*Oo|NsC0lQ}t>8COp3<?!>7$Ve<oDNig)jZZBu$;?eG$;{7-Ps>S6
zzr~kUke(QyT9%jt;WOu^=H6m1F37&cm6=yiS`uHJS(SQ=Gp{r^J~=0`xVT7aay+M@
zniK;ALy<HC1H($jTb%I_srdMn48OeeGxBp&^%F~rQi~HO@8GoOmt|mJkY`|EDAr(L
zVBlh!%*N$e&&t5Sz{0@5;0zMiVPIe=VOYRe!?=*KmbrwfhB<{%lA(qvg{haRmZgSe
z0doy=3bQ1`LPkc08kS%NO%}f*u>Fz@3=Fqei%SwqQZ-p_v6kc)C1>1X$;nSlDdGW1
z@q?K0VEOoz%;XYH)?3WUImNeF>I)J}GQiHi#hjN~a*H(=6yimCAWwk_eGtotfq}sh
z<UkIP!<hw`_!wE3ssxbYNDpK|GBYUUK`|!-0|Ur&V2|BnU|>jLtYxlYbYY0i0C^>a
zDVwQCpr)R&hD8z-w+w|WH7qI2*-S-pHDEE2VyGM=SdJx|sYnANSHlp`1Quh>W-79U
zh`BJta@Vp#*fp#mGqad$SZx>z4L~Yu*-{v47_wMu*lZZUELIS!hOr)GMJ;;`dkUK*
z!veO247D6J93|`vI2JOLF%+eiu-9-%GA!V%VXt9Y$W+6S#Rbw<$Wp>x!!es7g?%nl
zGb1BIAqzOsarhPSfkI0T6ymJlG*hGwVspUKT#-2g149*;LT0f-X<l|-ez~R$PZ3CY
zks3%NNO6%Sh@}D|R40e=XvrypOkgc2%FHX#<Sfz#Nr0mql;nymCeP$CRs}~Cm;lF?
z76Su=Hz>A1$%%!DgNcKQhmm9QTOK9G=aU6^L+Tl87_yjZ7$q4NFfU|aWGLhUIe#US
zUx+5lEf!D)C{hAh1Gbr?urx8RB(o}YB@@{4TdX;Wm8nH&Ddd(g%!^3bY758>pybEF
z$i~RU$ipbW$nvjB-gmMpUrHuJA!iA5GblzFQ<!=ggBdiL{oFJei@X>Z7&KWS4p4@<
zqDTfD2qi^{nR%MbV0VBO7I}bFfV>HD57_%4ukQv~4JrrO7$+<6M+HD_TfnjqR5~#E
zxnZ}W$Q5LsJBUCt6eMv7$<Uwt-cAhJOhw!!j5SO(jLl3n4Dk##3=5b*0h+>C!w}C5
z3e+MOkat*$KpDOWlsh0cfIJ5B$K;Iy@+#nn0}~+U-{OQ9LnlG@fpZ+=<Szng-VDJE
zD;fPXS&G0B!&#JCoLUl}o0?ms3UV+w$$*^>HVaIE4dlqo%LJ>t05Xpal!F;3pA@vU
zWyoeO;;CWCVpzZkb~QsVxbQ3r0J)h7?B-jn$vOGOshW(CFb5k7CiExA2}v_DPOcIP
z<Ai8nE>fP{CG2DhN};!Sz^OmJxF9n-wYUhZ15AJ&#GaO4RGwIras%cbHm1ppA}*Yu
z@Z>FFOktkvCnC+rGC4~`Re&{xErop{a|%NmQ!s-jhu`D~(H5T~LvV7)%quZAy2V<Q
zn3tY<i?ui-u^{ypTVg>$YF>&aOOXXg4Rc~@(JjWzTa5AOj$kXvk55c3dN^52%xQA2
zm@%Wq<b`4ix}cyfiUWlw*bcUm)V$*SqFd|*sYSV^C8?ThkbnpWsYsgqPE1V`>@P5(
z53-RjJ{}x_P(R)W`B8v@fq{c*vaNW(BdB~~Okqf2Xl1HlPh*l~SioMxl)|!*sg@Iz
zDT{1t*h-kQIBGakm{M4Knamk#*=iWFI9(WGZECq{I8qoU^Ge7wvQ5^NFlS_&oGRhX
z$Ub?agm?nG4MPcU3Aidu;gDpgVJ;B_>1$>b2NiolA`Hz;&7f@0pTc3oP{N!gT*H#W
z2(kgpW`(f9Va(}Q#0c_TGAI==-eOBhEz3+!ovbS<UXMs;;vf|gAQd7-K_GT4hya%w
z;BaHDN-fGSF3JOm#(;=OP}ndRm*#>KFH(vHh4n4w%)F9Y%(;mb;L`6FOJ+%GZc#c&
zAE-zKS8U1InjGBFz@L0t($@%_^}*o|CcuF&6d#|QpIcB`l3ElGjezf<2mq<zV4CbD
z)oIHR%m7KROkkJX;sIqMSmCu2QnuV;OU}<r%S<nF0677q8IpSRC-+K=b4xQaFcgEb
z5eMVs_0lT!$xP514@80TIJ`Osm0I8$kF%Bu)IgC0#RWqRlMO>56S#B$=QKvJr;8fV
z>;iEcKn(_jbpnhm|EnaBYu)11lF6r}^@PD$r-o@Z1E}c9Vwla4!Zev%M!KGvl|hoB
zgtY{eh*&@^hi1kSwiK3TMrnp-Mso&c1{;u*SW;L~)Uir3V5&=D!!U;pVvaDx3}zb!
zBsE|&f=bv@P!+M-FqE*Tu!GWi4MP?)s7eN>SB5N>6i#sXfSm~zhg9M$DO^x-Zb^m|
zp2^>3G8q{sC(4R?@=7u+gf;m1{C+X&SFz}oWuz2=66Z?>Mh1o=Pzo(d0VO?_(!`>Y
zB4JRVihu}F5FrLiz??;LlOM}P%Y&-nTfC6Kbt=uwNl7h2u9YXp%Ih&IPVSMnW7M8}
zTHeyO9b_zbPGWIMd~SY9X-+D*GP=bE<shXRkX2I{7#Ng6F=+)V3m7LmD)^!%98jVG
zVQ_QFkAVRcrVK6&vAngQW(PwJlO#hHC<8NrSSgG)427V;1B-xoHQ>4&6s##sbC_zG
zOIQ}L)-cyF!UKreugDVQU8W)fkb9&U80r}`*}!=KlKw!Aid$TWCL*MeWhqW9OVwn7
zWIR)lml<=4%s?ifd2kK`1A`C9gP<l57n1}d4-*?R3nSY<0anz8BG^F25KTsrA|H^0
znEi@cK<0rtVw0CEsMHsM8p^l0!L97v)RK(+lp;_Iv<TF&De?q4f+Z<GKc@&((?cp$
ze~<~_f+z&UiU1LzAR-JzG=m6GUMd1bLlMZSw|JpV_2kr?oOtBgL5IPUfuRUgoPe9o
zAg2_A>Q)X09wsIwE=CScj!Y4D9(FEv4t5R}4i*k}4haqw4i+vJ0Zp(DP4Qdo@$o77
q$?@@%%M=6I<UzI@PClzxtYi&xJDV>gxqw<mka7hnC?_W=nE(KOX9)iQ

diff --git a/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc b/modneflib/modnef/modnef_torch/__pycache__/trainer.cpython-310.pyc
index 6bc1a750c9e0f5dee467c12d6546e91b75b4bce1..b8b000b1c9b25553fcca13173f5f082ed6f80376 100644
GIT binary patch
literal 7815
zcmd1j<>g{vU|{(2a$ovxMFxh)APzESW?*1&U|?V<PGVqSNMVR#NMTH2%3;i9ieh2}
zv6*w2b6KKTz-*=*)+p9gmMpdumK4@r#whj_#uT<3j$Do?4n~F)_7sjB&Rni2E~r}W
zC?0o)6wVZ`7KRkAW~M0K6sBMXP3}?^Mg|5hx6GVWg}lVvR4av&qQuO+)FLj&(vpn)
zA}a;Q(xSsFb23vEf)h(~3QIHd6co6^Qj3Z+^Yg3}4D<~347hwUlT-7GQ>_%-1AKIi
z^$c|Li*$1mOHzxtob&Ta5|c};6cS5|Qgbp>^@^c}IF#mPmg(kX=H#U6r4@0xq!y&+
zrKIL1XQmcgDU{?FC1>a;6zAnZS-JTsd8ui7iABj7Nu`-NDXB#|3J|e)u!{J+)Y78-
zJT8~i;^d;tf|AVqykaW_-~1FmS2u`h3Qj(rZVC`(Ag!r6TuzC_sVNHid2oC5d{Ps$
zD-{y+Qjml^-4v2@5{rwuUV@^@Pm}Q$XK`s!QGR-2N$N{U1_lOA##^k#Ir#;tnvAzt
zN(xhQUot{OxgqxZ=BK3QB!e^~V|YBrFfcHrGDI<^Fhnt>Fh((RGJzp8CrgS{3Tp~m
z3u6=uC@xzVqF7U;Q+QH%TNtC*QutE%TNt9)Q)E&EQ-oR=qc~DzQ$$ikTNtA_Q{+-4
zQY2d#qqx8^EPsm^;*bzX#3t@%VqoA>fQ1g&c?wWDE-nQH$C8qw%%swi)M73L1zkAc
zQh)@T0xVp?8o3k{z~XtSX$pC%CFS`=*$`!13JUoJC7HRIRjEY^R$yh&goP02Qc%dr
zFD_QFQUJ??*=ePD$)M;138kc#WhSS>q`?ub01@L-P$)|+O3E(=$tLCJ=YS1>3FYRe
zfUGYqDJU(8FGwuOP*5l?DFQ3W2Ma2sfs#8|Mx(eSwLqabu`E>~zeph|wYUW0eoatN
z_@<U*<fkBo1D66Md&C>YrxxTVXJ~-^<ddJ6l3Jt*wg4QE@SsS`FH*?QOH}|XhG|CD
zq@z%hT3iA#Ku4i0F()%65#)WCNFKx<9k2`ILC%ZM1*In)uu~!Icu>B@Vmp_DLSk}q
zX;ETwr3SJs2<1Vgc?!k(X(i=}MX3szd1<Ldsd>q%TnY*qiA5<O!T8kTlFVGNi|`l$
zQ>}ndjbd6_L3(05R;MK8Cl;lEqew>~r6{v3wFt^Vb)#E=yCaH;er~rI^^)0<G8Bl-
z%D}+j3@STUFfcHbFw`(MGnO!BGZ*ocFoM`M4Dn2P%r%T4x`ru*F`KDKv4lB`C5yF&
zA&Y4N+d_sErWEEBmR{x>hIsZo<{BmtUBe7ksaL|WfO8>34MRLv9&-&y1y>CVSjMe{
zIg5J%&q9VG`x52_yeX^;8S_}cEVhLpyLj{1ka;z%H4O25HEcBu@%$-_!3>)0iI+en
zFf=2j=AdRUg&KvF%;XYq+Jfg<l$-`iw;&~%`FVMXMU@IEh%!(iKMj(a!P*h|(N@7P
zKMxeoAWOkXIJF2RTY|(r)4-~96pB+zN(&SmQxbC({9#!dY#=zpqL>91fn{N^8U+Qo
zInG7-#l^09B}MrKl|G<+pINL>S`2XpBwH(>7>1VNQQVZ0T9TSvlA5BBSXz<~N}$P!
zIXNJ|gQ}6tymU~po?M!esF0jrl&Sz%onNGomk$X{SYctS;FegNgHnzl3Kvjb1A7Bf
zZh#Y~f~|s*o|1wUyi8G0fXE_B6|fe+bX{ke*N@Dk>@}Iv&z=E+H}>h;?P532eSdGS
z$#jb;r|1@IQff(J@hujRTW>KZ7nEuW++r!IEJ!T^CFonMAP3~!Vg(1qEmm+0-eLho
z#VxLa#G=I9)RNSq;#-_x2~bgcizzSfmH>K4-(mqpOc4(Q1H&zz%;NaOvc$}s#H5_m
zTWpYkzr_v>j$7Oizkq$J$$pEaI5j7&h@XLhp-2Qoh=T}Ftyd%kVo5VFFsx)Kl4W3E
z_~oack)NBYpPHLmlvtXQs_&AYT$-DjS5mBBl95`Rst>7z^dZ%DPG%C61*IVMxIQ#7
z>J?Pp;)svW%*!l^k2eElX&D9v1|BIMMm8o6CK)gZVJk86FtIUlF;$5{%m<ghsYQA)
z&B;s*2mlHIP`H8%2?j<6hIEEnrW%GUh9dqH##&}jF_FSl!<@q0%UsKn$5g{u%TmHr
z!_v&?!VsGp!&J)}!&J*w%U;8f#k_!JAp;{r4ch`x;lZ|$v6iidZ2@}<3z)?Y7iF(u
zSioGvwvaJ~sg|RbvxFmyGljK;Ba5qsA&V`At(Uo$tAx9TEsH0GRgwX04^J(34Ob1r
z0^S<V8g5C38uo=u<_xu5g`PD`3;1fd7Vs}*Sjfo80Cs}_*bQ6@1i>^zme2xWD2t(}
zs)l8OND6z2=mN1CriF|t9J5*GGJ$j!io(Q#88kVoR)Z5OIM{GhTVMqN@YDw8!kY!U
zU@kb?Aaxd)53ME@QW8rNi&IM!a=<kts7h5(2udv}Ey@E`s_^CoSWj9`ej+4mDkwOX
zr4}WoL+ed&BLS@BmtS>Gd{SajZ8eCfQK-&~Ps=T-)vqqeFG<V+vlXJNQ*#mvic?c+
zZK{h>a}zW3GV{`Fbrh<L5=&CSN~#O;i%ZfnD{5nJv4Z`*lIa!;sMNg0m6TW%pO#;g
zn^;l=YQz<>fm(7*w^&oaSrU})tJuLsmzBaVHUk@DJ)<;DaY$AGRX(>kQ}aqe>Axfu
zoM&#af&=CjQ%QaiNb@cB;=<C@)T-1X0Z>+Ctx7G*FTTYJ_W3Q=qQt!P)LX2@8Hoj{
zMWBp!i!HGLRG!^p$t+3Dy~PPq9-m&6m~x9FDKR;_Jh3PRoGWj!fEqEk_={6Z;-P+v
zFD@y%#gtcYizPQTF;A1H2$X+raf6-X1CESaT;N7YJh)^~0Xc^a5=*xj<8QHmBJLJj
zNn%lYYRN73;)3k>qSWMD?76AAU<L<BSA1DwPLVOF7y#vea3N462Qr8kYz?f9lmW`V
zIt&a9Ec|RtEQ|t-EKDLyT#P)7Ak4=k!X(5b#>mIS#l*qL#mL4a!N}CW#atzgnw?Qh
zd}+kMzz}ka0i^*T#K6D+&)dHkz<HZJouQT^ouQU9g|U_^g|U`9g{hXOhAo9To2f`V
zg$0!JvlwSH%w?)&1F;u?$^vF^rOi;n0?qfqwY)V9S*%3{C2T3IHN3MKQrPA)H$!qD
zdktd>doN=xUk&>Ljv78t?gBR#n2Y>s7_vBPm{T}XID465m}~iK1!@IqI2Ul#@Pk!>
zWSLU9pt6EsS&ju9HG*JS)}pc+!5W@ymZGu}t_9p+xAA})A6X##!Tc<cyO|d<F*4K$
z*76i8*Ra=c)^ODDq_G4uX!0aBL)!QXkcK@hV?#0^RkAXuHbyim!MRfb+?GYp^B{%j
z?OI!;<_AI!n@*(W1CqkbVuj4~y!@h6P&;2i0oq8mH8oJMQpn7M6(V`1xk;dQO&YxU
zt$^N)whbvNMXUNj#Z^hZLJ3GBGY#C#hBu=jjVXoP(&7?@q*SnBV4ow}<>0ostrBYU
z03;373hsD-8n<A53JRHN3Mr{+iKRIuI^gcNf-Ts__-b&)uLn|D3yC#c#imA1W^sv*
z0x}6MPaquv1%*UtQ3}ej3MCmu`K9R@NC5(kG+65#s|uu$$EyzPPnZg@2mK($s1vAE
zU<FCXR|zAk4h2_GA4<VWp^6)+qp8VQWDZi#6$bAOtz^2zg-C3-SU|yYizOtrxTK0r
zA8f29AGp9Rat9ShnjivH-WKVCSfG~5E!Kjf%)F8!(9lYeKFD|um_s3DF$<^*c#EsJ
z1XN3an$ty~wvDC`xZngksK^bZO#l%X&~lU=n*46@gN#7xqut^MI}IF@;B;|IAh9eR
zT;RrMWEPj?7ggR8NK8(S2YC%H!VeNDOU!`_uvNwv6s4xzVylb?CCXbIU{e#5lW(zt
zOYkBKkh|ES+KQ||Jdo$W#zIPEaQO@>nu|0T7#L=P3S<LNfh@<u$im3N1i~Cle2fB2
zJWOni0*pLNT#Ot{B8*&&;DVY-iIIhosey;7N*GI_oD3StfHko|)fxw^7p4YjU@@mN
z)Uu>7*0QF68cCqWmS_r7Ejy^ORl`)n4sL95fE!yJB}_FO;6_kF3{x#<3{x!^s4=vF
zxrQMN)Ue{J;fAxhYnT_X)^LN1ONIq(HC*5(7Edj24Nn$m9E_!xv4$&)1JtaNW~k+>
z;R7|iQdoK!;U@6cu-5P|0GY`RDwG#8E?{5C0P4q?Gt{!yuq<E&iGrGVHQWn$Qy3RA
znlnJEiK6Nnjs<)vY$f~)1Zr4o*rXX!*hLsnnpn~$f(wLdm=`iG5KiG-2x?k__$+Wf
z3q)THOAT`xb1;J@SJhi^egSpJ5&dwm5P{MfK6(IF49(4;+8b1kK`L~x3@FQ>^jW|H
z`DsYqYOp$-#SFMmK`v+D1q^hI0jym?0hCrV!Cf=Ruo<ZQSAZ0!2mwg3pa9C9N}whd
z<1OZt%v^ADtBTtZYM_FZ!pjtp5;X<}hL`{U|NpNkf@o_Mse_~tSsj$A!DYZL=Hk-a
zB0W$xU`s4Y&rPhj#gtlji@hu}wLCttxCoT;Z?P1EdP^oCBTPXAsE+|D2w00t5|guU
zu@|L+8)BLQkP-lt`5~oCC`f$>$awbT{Gy`N<PuQ-tRyj~2-J!yvII$kGGUQ5h-CvJ
zY(az_NDCXJdv%Kw9!zLC^cG)yd{Jp0e7NK$JhKXeGb<y!4aLmE#KXwN#K$DYB*G}e
z$i>LV$i}3=$kZUfB*4hWTqT2=Sy7EG@?c<KK*_qGrY;LK>wX3W45&v0%DU;G?7)!4
zuz(TTw_vJatYJ!F0=H|JYguYpYnW?TYFNPunYrpOI30izGM14gupAXrw5<}T5<snO
z2SLJi1*K_bNNQwBElbR~B@>Sl8>o@7k_|nLd4UoPD2)|?QyaMa1{2^y93(5q$iVO!
z)SqPmrwcAnn;N4N46EXP341^X^b}m-19~`PzbF&z{6ulsuoNgV@sIIQKVGpocu+^P
zFf4$OhD?jHLD854BEXRcCcrV6&%nT-$H>4?l*_=tP^E~HHuSKV^ovJNA2L3wSDBmh
zixWH`3QAbXpl&Cu_T>cC>Ch}(90Zy_VytCNXQ*XMX8;dcFk~^-fQC1idO_U=hAgI9
zjuPetOev5q52*9QQo~rolEMPc+N|KA4i4~WDQgW|4Ldl4vnEy|CkwDcXj5Y$#)eVq
zE%2y)NxlNu=1|9=5CxD3H~~UJ2t0HJ9;8DGGSsvKQK*odpO==IURnem-v*D~fsFwd
zAhrrhU>iVzjH($dJ)jk*Cetl0MCjb&g?J|(;+-O1P<jKGK&+4wh!Z^i4)O?i5CUAy
zfs>{tCsMgn1gf66p+<p%6jIxP%9<j0Npy=lC$YFB9x`Z%UUTt+?SYj!-i!<k#h^|(
zi#Vv_5@2Fu;$aejjMbra?osq<vikY?xfOxZvL=gPkt3+9Uj*tvXflE;EltLvJW%<>
zR8#=+Xd#Hr4H_&2#Y=qrEv|UbtVn52YJB`Hp7{8}(!?C7410WhN`7*De33jzBe*93
zY7T>gq6q9yP_`^82g!o7DuMuKT2RAQ6jU99#!@&KSQwd<m>40D>lcR+mm!A(mj#zB
zpKK9Gr6yldH7H0qz+<<1d3i-3y|*}ElbS`Kfk|*&+~P?C1!{3-dTxGZ$}O(qg3Rnx
z@HkHqD0hP+5jq^4SDIT;Sp;sefb2ku0kB&^&b-B8lbfGXnv-e=8o`3Z00R#<4<iR7
L4;XSV3NZr!)Ff!D

delta 3430
zcmZp-{jJ59&&$ijz`($edwx&4y5vSa7N+_V1_p*yhA74qhA5^KxfGTZ))vMn=2Vsx
zwiNajhGxbn))bBu&K8C!wiK=u?iPk9_7wRPffT_O#wd<-hBU?$p%md3#wgAdk#vSM
zrWDZ>u@=TCt`u=FPa;LKg)xddMJh!)ogs}mMJ7eIg)xdJg)x{xQ{fh`Z+;4wpQ~F)
zQDSCZYSCnOR(ZxPlhavM>KU?`iWEzjvskiNYZ$Va7O*X3NMTH2N@4D0u3?C0&ttA(
z0?{?hDIk@4B^(Pl7c$f^#B=2_*ML-T)v$nN+)9|UxEJs&WGJ#PVP3$S!m^Mtj|I$P
zUC3C&5YL;(hRmyBtzn4gt6>wXVTk8X2N@^;CIwR%f*CZ~CckEvlUm7iiz%n*7Hd*!
zNn-IWmgLfu#9PeC1*Muola)E57<nhxap-&BVylcVDJo3`(FH}RDVprJSc+3~(uxEa
z7#NB~L4*W|kOC3XAVP+Lfng;>kpcq)!!IBGjQreG{lwCu)Z)a+uQ<H8#TghFia~Nb
zOq1<7CD_=Q*qFE&Cr5Is*0V7%Fo3W#NJ$U_149Wz4Py$UBttV}EmH|&7E=~;4buXa
zg&<KTNrn_=Nf?Vok|Bk)m${a?gcTIPwJbG^DQuDqwX8J^HB2?kH7qr(DeS#0!3>%l
zen<}I2051pM1Yb~eG$m{nk+~Pz&-<uvKO&~)CqzJArK)9B1Axh7>EFQOOx#ub8=4c
zEw<$RytK@8Z03MN1wtq?FfiQW&dE<qiHE4KHv#z@WF!x>8Y2fI2O|>`6C(t&{i%}l
z%}+t3Fg+xV$xNV(0L9D<3=Av`3=H5vv;zm?0>&D~g^aaK;Pl5_!vqS_g^Y|0H4IrS
z3qX7(Nrr`tjP)P^R<HmwSbzbXR#=cj3=|-mY(=18xWxi;0@#zc_(9D0;*!LY)cBOl
z<Pt=>&}1%>2YHmGAh9Il7FTjcYI1f#er8??BoIIep$Hrj{5f{upnxj?dw_$1fq{!j
zfJuN+gt1B%TL{3_Z!Y9iU}W^0Y{y;CC^dN>x3~!?;)+;7wsWTDmFA`vC6=T@+{;=5
z%BQ!OO7e?9>Gl?Tabam{YE^2H;ABo71xAU<T0H8EqLYJo)EVU`SMkU)>QA1*W2p-Y
zPIPzkf{ltdj884dPtNcLMWGHT3YaGU<MF9~`Tzg_{~@;+l0hDU#t;K2ia~s^5B(S!
z7}6PPIno(wInx<xxl$Nvxl<Twc~Tf_d285Hn6jCQ#8a4S7_u0$7-uugWvXQdu@`_M
zlNp@x8A@1cn41}07-DN{`Dz%lSc?ow*iu+(_+~St)U(cIZid7(J19%^GS>3fa4g`c
z;RnY*sAOR-@~dIU;;dmwVNc=cWr|_06{r=g6|CV}z)>RrRt1t}O5udc3W3U|8isi8
z8qNhAH9}w&Y(*6{LN&bEEJYP1JPUZi4&(!sV)a@4vl&vjKmu7H*E26<Vq~Zhs^zU=
zP2tXFE?Q8-SHmjCkiwYFRI~u3Y5`bVmH?C%1kouxbC_#|!3GQ0Fn~=2TLCgLg>Nnk
z$jn0J8jc#S8qONtG*B+*_bXxq<w{1s$=?O^1)V@?fGaF9Co?6nBr`uxbF#UhVttV>
z0|SF5GdM*Qff6=2)&64Czr___T#{N4Uz}K$swo7K2Ng#}pbQF5Fhv$1(?RL$7HdHf
zC^ds3zQ_t>4@Y8ha%oXwa%GVmNRcdvfaDjDdAGP=xx^#6qzF`0BCOUF0q2z>kaLRy
zL1wY%L6X%ifik$`eex4iQj2bJ!kswzwqSS+!m?W&NvXvp@rlXFw^+exvIvyUZm}mO
zC&!m1<`lVt3<fzez9hA{q(~FwK5#Y!6QI0UWX!<8kPk}9HjtFuFQiZ}!NkSL!=%K>
z!YIPT!2~Lu*#63~GBxlpRSBc!OK^rr29-gu1P}5)Ji)6$5<E*fLoI6xV=Y?>BP2-|
ziKZ~sa+EOEFxD{DaDeK^TFyMC8pc}A5~do?dT<I)h+(SbieakduH~uWUI5A!EDITH
zxoddfY@QmH1*|nZ;AF|LfUSmmA!7_vEpIJf4R02E3S$aOFJldN7Do!JB!e_VEq@Jv
z7H0}8NEB{@Kn+`szyhuso(0^X46u-K0sBIR1@$}&8O#}K*=kr9u!4jc8EV*Tcoy)c
zFfL>?XMp;kx`uNBUkY0Z{{oO9?9vP=>>>;c8B;iBv&>}zIjK;(L~wyn4a-8t1;Qzu
z3mI!!z<gFXpS6aqhFzMWhP8$zjX9V>lM9-VwL!&Lk?v$+QKfoAP>g}ot_Zls0vB;b
zMj%l{0tF>SaLT>KTwI!4WC@CNw#1_J+{B7oOsR#p*vm3g%i|M^i>yI1EXA2ssYUi6
zBOE}4BZvUS+AY@NlEmcfTkJ)t#TkhOshWaC^`P<`oIHy_nc@~VsP>A_O)M$OtSADf
zc2K&%#h#pBRFs-ra*MSjza%lINFQXR3y1(Ez#=yg%N;~`fCx{JE7<Z&OA1O$ZgGOz
z8JWo?sVV4%Enj?mkyvRSC~<?c*lbWDHGm{i8!=zDU<L+;WR}T(tl|-$%@`OMQW$H&
z=`0--a|~Gw3m6wNfa^Uba4KU?Vd`a$VXkGVWvyicH4<vrK%u^p84~I%nGxPB0_6u#
zVKMogxP})yB%^}zC^!(nvPIz_pMX3M2?lU}0u$g&1(H1kaxbXC!NSDC$im3QBsMu(
zLR!2^*dwthr981HRl&8mBr_LWNJ0D&SyT!#wG2^I-4f1#DvnQuD=w-8sj31IU>m^%
z*xG6a28MHRYZ;-|GI23Y7Lihptm4trPbtbQOD)o?%+0CdbPI5IR7fpL%t;0{#9;Xs
z)H(tsKu|$a90CbfwseMCc2LCyDz>vgc@`9!pyHh&i>a1#@&qXn#Trn~0tGNzEqg6T
z4QCBQJQFB^*KmM>S(DXovV)9rJs-%`<sc8TLQ*VeT0weZJjfN0!WNVe!5-A)LJId9
zP?F(>I6l50u_Oa$Ox)tmNh~gjPf0DyOio2lQoPDw+hB$EO;AvQ8hD_9Vqp|uVq@ZA
z5}PEeC&F6f!@$7c2d*_jiu^$C<((WZt5RR21a?VsPGWI!ZfZ$JeoB!l$V*xv0$eYE
z%2TihAV~xiB}I)OS#UHV2yldf3QAD3x){{1<Y3@pVv^-z<ly7r;8Nky;Zx`15#cTZ
zsnO)S#U3A@lCPW`AAgIZB)=#*LoY9{2&C&4M{!;rSfmKlG%OOFEGVa}4Q{Z5TCulS
Z^Gb6IDvQ8b9b^<zn1CIlI5}QU0|3HD3LyXh

diff --git a/modneflib/modnef/modnef_torch/executor.py b/modneflib/modnef/modnef_torch/executor.py
index a123421..4a75d39 100644
--- a/modneflib/modnef/modnef_torch/executor.py
+++ b/modneflib/modnef/modnef_torch/executor.py
@@ -159,7 +159,6 @@ class ModNEFExecutor():
 
     train_config = {
       "n_epoch" : 5,
-      "step_save" : -1,
       "save_best_model" : True,    
       "save_history" : True,  
       "plot" : True,
@@ -174,7 +173,6 @@ class ModNEFExecutor():
       testLoader=self.testLoader,
       validationLoader=self.validationLoader,
       n_epoch=train_config["n_epoch"],
-      step_save=train_config["step_save"],
       save_best_model=train_config["save_best_model"],
       best_model_name=self.best_model_name
     )
@@ -235,8 +233,7 @@ class ModNEFExecutor():
 
     accuracy, y_pred, y_true = self.runner.hardware_estimation(
       testLoader=self.testLoader,
-      name=he_config["name"],
-      conf_matrix=he_config["conf_matrix_class"]
+      name=he_config["name"]
     )
 
     if he_config["conf_matrix"]:
@@ -271,8 +268,7 @@ class ModNEFExecutor():
 
     accuracy, y_pred, y_true = self.runner.accuracy(
       testLoader=self.testLoader,
-      name=eval_config["name"],
-      conf_matrix=eval_config["conf_matrix_class"]
+      name=eval_config["name"]
     )
 
     if eval_config["conf_matrix"]:
@@ -315,7 +311,7 @@ class ModNEFExecutor():
 
     fpga_config = {
       "name" : "FPGA Eval",
-      "driver_file" : "driver.yml",
+      "driver_config" : "",
       "board_path" : "",
       "conf_matrix" : False,
       "conf_matrix_class" : []
@@ -331,8 +327,7 @@ class ModNEFExecutor():
       name=fpga_config["name"],
       testLoader=self.testLoader,
       board_path=fpga_config["board_path"],
-      driver_path=f"{self.output_path}/{fpga_config['driver_file']}",
-      conf_matrix=fpga_config["conf_matrix_class"]
+      driver_path=f"{self.output_path}/{fpga_config['driver_config']}"
     )
 
     if fpga_config["conf_matrix"]:
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/__init__.cpython-310.pyc
index bdf280b17bc37b26bd5d68650f91c7c7912058d0..627ee9f64f81474b17a5bef807e2c3c8c22526f3 100644
GIT binary patch
delta 39
vcmdnWvYmx1pO=@5fq{V`^~pxAjg0(p`WgATsrsq8sYQvUDXEiBFy;aP)!7V9

delta 37
tcmdnavXzA^pO=@5fq{WR^z25ijf}i8`WgATsrrefMXAM!lTR_`0sypn3mpIe

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/__pycache__/modnef_torch_neuron.cpython-310.pyc
index d658e975ee0e92c7e7ebc69f78326b42da79b877..f26d3e0fda92d6cb8af14786b53848a0e8ef9259 100644
GIT binary patch
delta 40
vcmdlYwM~jEpO=@5fq{V`^~pvqMOOYy{fzwFRQ=T4)S|@Fl+?`@tbf=6+%*g!

delta 38
tcmdlcwMB|6pO=@5fq{X+$7&;&A}epYenx(7s(xZ=QEGAGW-Hb|>;SRr3X}i<

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/__init__.cpython-310.pyc
index 829c917976b2b648660d27dd7f7d5092f33094db..594e6978c95b009fc8d7f0e6a145abf78ff24ffd 100644
GIT binary patch
delta 39
ucmcb^e2<wcpO=@5fq{V`^~pxAKt}#d{fzwFRQ=T4)S|@Fl+?)?j3ofrn+xjz

delta 37
scmcb|e21AUpO=@5fq{YHkj_S~Kt|ql{fzwFRQ<%#qSWHV$ytmg0J#qe#{d8T

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/blif.cpython-310.pyc
index c92436ae86ab9fd99d8b250f8466bc91b7936ce1..e535ed2f81ceddb68b47ab69c7383a3d115b9a04 100644
GIT binary patch
delta 2233
zcmeB+ycocj&&$ijz`(%pUuu8)F0F}tGK@tNwZo-S7*kkt_(K@n8B#b>I9nJ}I8!-N
z`I=dx1U61-Vys`qz`&5o5XG3n5XF?j7{#2zl){|ClFE|8+RT{7lERk4-ohBgn#u;z
z+{_rop2C&F-NF#Xk;0S0+rkjVnZlRC-@*{Zm7<s;kRsT^7{#5Ulp>tUlOmGJlg5%F
znj+T18pWH+m&KpPm?EAc(ZU!d0Od)h7)rG;MhQZ>(kU`6j8Q_VY$>uSaxDzaj8Vd=
zY$@_73ShQK3Ue@nrt;=nOuQTlFF6<(7?ME*$jcxMVhb=ZFgP<XFcjxZX5#i|t6{HU
zh-aT1$Sumqu{oVPi%Eiqfq_Aj@fJ%`W@7Ozj)26X#GIVeoFeYYFL_fLIVbz`8E|nH
zmL}$vWLBjXO|Ih;XXYs4nLL9}UZ0nNfuV>GMDT+MQ4pcUz`(GQp-7K`f#FxGenx(7
zs(xy2YEfcoO6uhMd@<ZHAg5R`Ffg#lOm^c}VPxB!$#2SNsl&j)aEm=JwX`Tdueb<d
zFvv_zwjz*pkuJ!5RH-8V$=3x8)j`(ZVuR=gTMZ%fL56as73Jr~r(`CV%$}?-_*5C>
z-6D_!iY!6qfgFlZU8Kptz_4wyxlkoz&g6|k{v0rmPyR2YWN!sBkE<%RD8D#9Co?-0
zVvHTg7_eDVAQq~Dw>XPZi&IPDb5nB<OimEqtLgw!gH0h<W?p7Vd`@a&cICOrP9mm^
zZj&oS0_rVm7#48UFlKS4FfU}PVT|Vl^BJ-jpdt)e3}7*?8ip)}g^V=}@!T~GSv)B$
z*-S;DAh8-oafTX(EM5^136kRzVUS>`VTfnQlL4z_&1Ndf!m5%%gdqj0lpU-9YD_%H
zDE6XhB`gd0LAZu-0slgf7z2o2$W+6SB>*Dh1xi>J2!e18;{rjb3c-a8Ah|3d5E(C2
z!>~Yj^L9~PMiW~G28NI#P<Fk=0t#bI5lHxggH{6+^sL1hi3O=eplB$v2Zf{EWEpW;
z%_1$3Buh$SN#ZTe+|=B7aFE|(1ye<!6a-CBlMBQ(yg>1Pi!C#+ptK|&$pW-!V^7O3
zDo-p*xdlqDAoDqd*%&#P*ciE(_?Sc(*_Z?vMJ7KI59WeruV<6(C60^LFk}g)@b>c6
zved9-3Dq!Uab*dYh-{XS<YQ#yo~$mV#K<?<OUjUucXExCHdB$-<Rwz-9HK?qAm6x5
zzAR;?2v6#_SkrRy6H6c&44mx2UeTJYDy{DV@<mYqNLL_;@CJndTW)?zX-+C6HG};F
z&HzH`sU`7ArI|S?sYUS+CA^Fb48<0cXGm+au`sfIViBBtTv{c72~=c2F^J6yD>*=B
zf#QDwV+}(VQx@|=Mn;Ajh6SJ)XUbyEVo71FVO+=zk&9<ZVG3r@WKNvTIC-^-{ANq}
zI>wX`KTTFhED3;9IXFv}r)H*SloWwtwkRIt30F|j!BhnD+AY?UlF9-MukgdNV?0E&
z9mpRn3=9kc{Bn~wDoXf+oDKB>1Beaxf&l{q%*l*dOmGJ?#xp@Z!i*%!9M1&t#O53&
zNk+{OKTQ^}>mgAAawa4}rGQ)w3LHp~>ra+dk>Cl0xk-#yYO=qIj6TR6$ZleW1*-(i
zO^nb01qCDH<^?KsOvY&-qtihIx+S2Ph8IhTFw3}v`8KDkdoo&qO14|<DXAri$r<r0
znTkw6>REFVD^rUgt^~UmoG^HysxmTDQd0BEH^0*GVhji6LO)H;qD+w8Ss)@CMC5=7
zP<4Wo`#@F|flI$UkSy3>Fab852b}%lA%-_j?$*+od_>E#{+6&?W<_dBKz?RkNg%9N
zDN+Twk~=vkvA8%lwIm}yrN|s)B&akl0+*PMAg&)s6?1V(QBe|z3n~Y0v1aC#7#kHK
z!YczLRRAIiK?K-A2*MTQwjz*6K$)o+oU<8N7@5477?~K^7`c8j*>f>+NO5p+@Nw~o
z81otO3G)dsiE%M9aq=;O^=S$gl}_HMEzD>&`KWex&@F-7{FJ=ZG(A|-ECQ=S3Tv?C
kAZu@N*g&!$DA_U;i-0OY1|Dt>Mgc}1Mjl2EFcxD10GcM~lK=n!

delta 3279
zcmcZ@&=<*<&&$ijz`($8>ioWRIirbuGK^UhwZnzn8B#b>I9nJ}I8!-N`I=dx_&3gJ
zVyxfCz`&5o5XG3n5XF?j7{$!V#L3LblA@f#n!?t?7{!vxn!=vK(ZbNo7{!*tnZnh=
z5XGLtox;<?5XF(go5I(^5XG6IlER-N(83tSm7<y=l**kVoXVZXk|L5K+QJ&elggXL
zm&TYPmLlH57{#9=ks<*WVNQ`uk!o>ej1qtfFr`SR$h0s<34)YB#aL2gQ{-A0ql8jf
zQ{+<=z|Ig(Wld2`Q3A6?Qka7oG}ShrW8&qIPG$#r8;U_}0R{#JX9fm_;<U-Xx%?*w
zaEmgsZ%*URVq)Z;e34g_=_SYH7rd#A9FzU{3=%{#5{pvG6N^&gQ&NkQi!uvJGV}8^
z*^9Ut7#MDGXXeGHr6!h?7Nr*7;>j;9f%A%Z7#J9ectHdoh!A05U|7jeq|3m-@GDtA
zBR@A)KM|xWaq<Vg81870Ys?rJ7+9nyhx4m!uH`pjwA2RKz@C>{T9ltxjAS0jC{4B^
zkaUp_$ZUjFAgLn0$*%+q)j_u2VuR=gTMHrdK!$Rr73Jr~r(`CVOrPu|_*5C>mm+hJ
zQ5GNq<Vb|-B25MchE0>bgen=6Cm#{=pL|bDWU_%Uue~M6D6Xp1qWt3coXqT0hzYhJ
z6Trquf>@|#-Qp}tElw?o&rQwUGr2~1uc|#r4K{^bnR%Hd@j0o9*_Edz$BCFS22GwK
z5>Rhn!?1v(hB1q?h9Qe9g;|neAyW-wJSSL`A&UV;oFR(=EW=&Hkj1c&v4$a@r-mVm
zH-#mesVEvGR>LUHP{WYLCjufta{M9;5)3sA@eFw~V3n-dOhpCAD(m4&8Q@CU!3v<p
z#Dk1tFPc-rvOoZYYZw;@ECh)$farxxH4IsTATnOCgk^yc2-h$!5Q3@@S_twELzXaD
zg>VhS0uhkeV5c+IFk~^-Y`!Kc$7o>#3N$4U0m{&~SU^#rDGCXGaL8+bVurOiBe5X0
z2oy_2b|A&JlQqO;&2RC<JNbCJ#m5(xCgzpgVo6CXNi5O=Y2eIF&5Z}g&n;FkRRm5>
zkkmCfPh7(blumB3W#$!>mc%1jhZe`|Y57IviA5>bK#3Y;GKVl5BL@>3BNr1NlL#Xl
zlNh7a<OkxxT>P+n_-L}N#Bre%-d^5XmKv5UK~Uso36%(MmXhRSWR#h#DW$~7JK0Cd
zkdbF{os{<Eby6xEB49@sIZwVNWhMj93%6L)a`F>PAQ=zra*@e~()x_tlM|(-HP9R?
zl%84=pH!Nelag8#pPQdjnv=@O$iPr+HhGD(HVfNl7J<oErB&)#phX9W0u>+3FlU4O
zmcm%WB+O9DQo~roBEqnMp@w-OV=Ze9>jH)n#w@0Vj5RFM47F@EY{3kgOo?oa3=CWf
z3JMCIc_pbud5Jj+;7G`<N(2=u3TdTz$sh(7SWQ4;QDSatN$O-iF)?FMEGbwigrw#b
z=NEyc6%@enpilx9QYguXnUxBbPAix!A*(idmXh7%O=7b3NE(aS7#J8dnQt+b<bxB(
zEp~8_85?Or3bi6vP-0+8E4am)Qc_uvdW)wxIWZ?SJ}oi1B){kuH<*_a4|35h-qJiI
zE~Lx=2Qs*rVTUF%OU8NzhGGr|1_llxK1KmX7RD+bP#V;O$`{EpFfb&e2Qw=yn3WhA
zz&R*|QIa8rDVwPXlsOr*m=-WEWLUtmkRgRRo1sWNg$0yXQW$L*@>o+?VeA^l6t-;U
zqI%UDPzGX1VYgwZVM<}kW-8JHE8&o2sAYnRRDrS>!q{4nfgo#GA=cEgmavtumax~b
zHZzv6*RV7*HZwIdg5oxXGn=I-x`d;Ivxcpiv4k^)tC>-n0W8Om#Q<`4Cb-ba2V2Q5
z$xy>4!H~iUDr&MAQaEfF@&u4&z-*8xYYnKp0IR5BSil8wUKY4GOW}|NB{gP<SUh(M
zQ!s-jcj9cu$+-&hn=@qU80(7^85kJ+(m|;fgl}<_#}{X&=cT6H;wq0X&4aMPL0=>f
zDjpOV7#LnMf>_`L!UM@)prWKG9wh1kN){}cC8@bZULYoOW?o5A6i5b?A&QtlEEf;~
zN<W(H^+n*)f~7dKDzzvTB%TJ+#abSpo0)eDL?u?-;>gS^i7!YjDnU;={LqY%8eg88
znVwM+0LqtY3=9l>0wRn&j9g3{Ok9i*EWjwhD8|UaB*MtYB*iGk#Kok-R3+k6@8jvF
zhh|DL$fcmb0AUatp2k`j7#M08Qy8<EisZrJ&m_qJ=E;G1%ut?K31bRNGh-7ZF)>R*
z(j^lp%|R0wlMO=+!vdxn28%4_EKmw!NMVv>s9{<NiYn%K7KmgPD~23vJWC2|FoP!B
z=8bZajP;O+0T=S1*r{^%%q_?-F3wEKNd@Kgw9E=veutDDWr;bZsX7V;IjM=osR|(J
zl8jV^g8a<95`}{N;!IGv;-|?5j?`P6t`*6t;D$_*3n*s5%@k0yfm<1x%thSwAYpi{
zp~sFeJX{N)g;Y^-EhvVBKzW>(kCBg&gHeEqsey}0hDm|3N&=kQv1m#LMF1!=Kp4aZ
zVNladwpb1nBcKA6v4*LJWg#;oC|nt9m}^)TGHp&$sAKXh0lBdZRG6`X(;6Zf7lFeK
zl-|%?&H;18G>~&aMGl{^z-D1(Pev<H`{x#WN@_`Baz^}0rXpi-a?MGsOf5n;g%_$S
zBQqr>HE+%4X)0cf;h<XCPm{B#0_3bp5K#ppszC&(>;o6JkeV1|RS~$MR11;?mzZDz
zTx{}yE8lpC;oBygtLgAC7jc3za}lV%pPZy-HF=|&a6PEK2yUDefm=}yAfx<1j%F?{
zDJn_=aY2>;E!NDu5@RDsOvA&hs2n5@j^+js3v4}t06C-x<Z4h8pcquqaWJqjGI=pE
zGBI-fVzS|4<S>)s;Nsxp;t?_CGY}FN&=rwn660cI;^bolYt$4d0%fXO93}Zh$r*Zi
zc}1Y?TVy%8M7`S%tO6-i!4`w;yv1Pysfg`BB~dXbJ+UzGaC0yUF!C_+Fmf>RFp4n&
E0QTkP@c;k-

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/__pycache__/rblif.cpython-310.pyc
index 657c4c99b4e80642a3f0866776704e1a24bd4f0a..3d8cbdeb81aab586d09ef7d40d314b0054d76798 100644
GIT binary patch
delta 40
wcmdlMvm=HppO=@5fq{V`^~pxAZT$Rc`WgATsrsq8sYQvUDXE*!@@L8c0RPVoX8-^I

delta 38
ucmdlHvn_@zpO=@5fq{YH+WC!K+xU4?^fU5vQ}q)|i&BddH=pOvlmh_nuMGMC

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
index 9033906..959bc20 100644
--- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
+++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/blif.py
@@ -16,6 +16,7 @@ from snntorch import Leaky
 import modnef.arch_builder as builder
 from modnef.arch_builder.modules.utilities import *
 from ..modnef_torch_neuron import ModNEFNeuron
+from modnef.quantizer import *
 
 class BLIF(Leaky, ModNEFNeuron):
   """
@@ -94,7 +95,8 @@ class BLIF(Leaky, ModNEFNeuron):
                beta,
                threshold=1.0,
                spike_grad=None,
-               reset_mechanism="subtract"
+               reset_mechanism="subtract",
+               quantizer=FixedPointQuantizer(8)
             ):
     """
     Initialize class
@@ -138,6 +140,8 @@ class BLIF(Leaky, ModNEFNeuron):
 
     self._init_mem()
 
+    self.quantizer = quantizer
+
     self.hardware_description = {
       "strategy" : "Parallel",
       "weight_size" : 8,
@@ -191,7 +195,7 @@ class BLIF(Leaky, ModNEFNeuron):
 
     Returns
     -------
-    tuple of Tensor
+    tuple of Tensorquantizer
       spk, mem
     """
     
@@ -242,8 +246,8 @@ class BLIF(Leaky, ModNEFNeuron):
       self.mem = torch.zeros_like(input_, device=self.mem.device)
 
     if self.hardware_estimation_flag:
-      input_.data = self.__quant(input_.data, self.hardware_description["compute_fp"])
-      self.mem.data = self.__quant(self.mem.data, self.hardware_description["compute_fp"])
+      input_.data = self.quantizer(input_.data, True)
+      self.mem.data = self.quantizer(self.mem.data, True)
 
     self.reset = self.mem_reset(self.mem)
 
@@ -300,7 +304,8 @@ class BLIF(Leaky, ModNEFNeuron):
         compute_fp=self.hardware_description["compute_fp"],
         mem_init_file=self.hardware_description["mem_init_file"],
         strategy=self.hardware_description["strategy"],
-        variable_size=self.hardware_description["variable_size"]
+        variable_size=self.hardware_description["variable_size"],
+        quantizer=self.quantizer
       )
 
     module.weight_convert(
@@ -310,30 +315,7 @@ class BLIF(Leaky, ModNEFNeuron):
     )
     return module
   
-  def __quant(self, data, fp, dtype = torch.int32):
-    """
-    Internal quantization function
-
-    Parameters
-    ----------
-    data : Tensor
-      input tensor to quantize
-    fp : int
-      fixed point position
-    type = torch.int32 : dtype
-      type use during quantization
-
-    Returns
-    -------
-    Tensor
-    """
-    
-    scale_factor = 2**fp
-    scaled_data = (data*scale_factor).to(dtype)
-    unscaled_data = scaled_data.to(torch.float32)/scale_factor
-    return unscaled_data
-  
-  def quantize_weight(self, size=-1, fp=-1, dtype=torch.int32):
+  def quantize_weight(self, dtype=torch.int32):
     """
     Quantize synaptic weight
 
@@ -348,37 +330,11 @@ class BLIF(Leaky, ModNEFNeuron):
     dtype = torch.int32 : dtype
       type use during quantization
     """
+    self.quantizer.init_from_weight(self.fc.weight)
 
-    if self.hardware_description["weight_type"]==None:
-      if self.fc.weight.min().item() < 0.0:
-        self.hardware_description["weight_type"] = "w_signed"
-      else:
-        self.hardware_description["weight_type"] = "w_unsigned"
-    
-    if size != -1:
-      self.hardware_description["weight_size"] = size
-
-    if fp==-1:
-      fp=self.hardware_description["weight_fp"]
-
-    if fp==-1:
-      w_min = self.fc.weight.min().item()
-      w_max = self.fc.weight.max().item()
-      int_part = int(max(abs(w_max), abs(w_min)))
-      
-      if int_part > 1:
-        int_part = ceil(log(int_part)/log(2))
-    
-      if self.hardware_description["weight_type"] == "w_signed":
-        self.hardware_description["weight_fp"] = self.hardware_description["weight_size"]-int_part-1
-      else:
-        self.hardware_description["weight_fp"] = self.hardware_description["weight_size"]-int_part
-    else:
-      self.hardware_description["weight_fp"]=fp
-
-    self.fc.weight.data = self.__quant(self.fc.weight.data, self.hardware_description["weight_fp"], dtype)
+    self.fc.weight.data = self.quantizer(self.fc.weight.data, True, dtype)
 
-  def quantize_parameters(self, fp=-1, dtype=torch.int32):
+  def quantize_parameters(self, dtype=torch.int32):
     """
     Quantize neuron hyper-parameters
 
@@ -391,19 +347,10 @@ class BLIF(Leaky, ModNEFNeuron):
       type use during quantization
     """
 
-    if fp ==-1:
-      if self.hardware_description["compute_fp"]==-1:
-        if self.hardware_description["weight_size"]==-1:
-          raise Exception("Impossible to fix quantization value, please fix the point position")
-        else:
-          self.hardware_description["compute_fp"] = self.hardware_description["weight_size"]
-    else:
-      self.hardware_description["compute_fp"]=fp
-
-    self.threshold.data = self.__quant(self.threshold.data, self.hardware_description["compute_fp"], dtype)
-    self.beta.data = self.__quant(self.beta.data, self.hardware_description["compute_fp"], dtype)
+    self.threshold.data = self.quantizer(self.threshold.data, True, dtype)
+    self.beta.data = self.quantizer(self.beta.data, True, dtype)
 
-  def quantize(self, weight_size=-1, weight_fp=-1, compute_fp=-1, dtype=torch.int32):
+  def quantize(self, dtype=torch.int32):
     """
     Quantize synaptic weight and neuron hyper-parameters
 
@@ -422,8 +369,8 @@ class BLIF(Leaky, ModNEFNeuron):
       type use during quantization
     """
     
-    self.quantize_weight(weight_size, weight_fp, dtype)
-    self.quantize_parameters(compute_fp, dtype)
+    self.quantize_weight(dtype)
+    self.quantize_parameters(dtype)
 
   @classmethod
   def detach_hidden(cls):
diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/__init__.cpython-310.pyc
index 5d278f77bee4424d05d152cf1cc01563949e8e69..8784ad2e7036bca003137e0c71cb73799cd6bbf1 100644
GIT binary patch
delta 39
ucmaFP{F0d~pO=@5fq{V`^~pxA7)Jg~{fzwFRQ=T4)S|@Fl+?+^j3ofv@(dRM

delta 37
scmaFK{G6F9pO=@5fq{YHv(84Y7)IW7{fzwFRQ<%#qSWHV$)$`X0KvQqBme*a

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/rslif.cpython-310.pyc
index 5911ecaa006ced8825be3cdf2dbf92295946c564..fa994e037c22b88e892e363aa046f45f535fbab0 100644
GIT binary patch
delta 40
wcmbOdH7$xOpO=@5fq{V`^~pxAdHnoo`WgATsrsq8sYQvUDXE*c^2f^p0P;Qz<NyEw

delta 38
ucmbOhH6@BGpO=@5fq{YH&iRd8^Z0pF^fU5vQ}q)|i&BddH*e>UmjwXgbPRm}

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/__pycache__/slif.cpython-310.pyc
index cb03f6ba9b5426f0196803f517f16ef5300f5f9f..b4b08468acd0513057cf39e35882417a2c7ababc 100644
GIT binary patch
delta 40
wcmdlPu{(k*pO=@5fq{V`^~pxAZhrn${fzwFRQ=T4)S|@Fl+?|O_@~PN0Q*J_IsgCw

delta 38
ucmdlTu`_}zpO=@5fq{YH{`rku-Tb`C`WgATsrrefMXAM!o0srUmjM9lV+_^+

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/__init__.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/__init__.cpython-310.pyc
index b0d94c36d8094814dcfa4056130323bffdb3350d..1d1cc1681fe19e3794fdf5d7aff3a0dd34eb8c13 100644
GIT binary patch
delta 39
ucmeyx{D+w<pO=@5fq{V`^~pxAJVyR3{fzwFRQ=T4)S|@Fl+?+sjHLkNYz$)n

delta 37
scmeyv{EL|@pO=@5fq{V``TRz%JVxFO{fzwFRQ<%#qSWHV$?c4#0LkeKg8%>k

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/rshiftlif.cpython-310.pyc
index a7676b86d5c6c7ba5eb842b2ce2246eda2ca81fc..101b33dc0d90cc1a0cb9b8dffff13f51bdcc4f93 100644
GIT binary patch
delta 40
wcmaEp@GgNXpO=@5fq{V`^~pxA=>q)O`WgATsrsq8sYQvUDXE*+3G9;x01ssi`Tzg`

delta 38
ucmaEt@FsyPpO=@5fq{YH+4+rJ(*<}l^)vEwQ}q)|i&BddH*XNwCl3Js5DlFG

diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/__pycache__/shiftlif.cpython-310.pyc
index 645dd493bfb9d1e28d3f52078cc80096ae869f98..a93ed5d912f83e9b970a05c3053afac8b7e89f3f 100644
GIT binary patch
delta 40
wcmZ1<yD^q4pO=@5fq{V`^~pxA`TYD@`WgATsrsq8sYQvUDXE*c@&A+q0Q~$7!2kdN

delta 38
ucmdlOyFQjHpO=@5fq{YH)%lHF^Z9u*^fU5vQ}q)|i&BddH}By8DF*=V4h?7k

diff --git a/modneflib/modnef/modnef_torch/trainer.py b/modneflib/modnef/modnef_torch/trainer.py
index de8639d..9bc8b76 100644
--- a/modneflib/modnef/modnef_torch/trainer.py
+++ b/modneflib/modnef/modnef_torch/trainer.py
@@ -19,8 +19,63 @@ import numpy as np
 
 
 class ModNEFTrainer():
-
-  def __init__(self, model, optimizer=None, loss=None, device=None, verbose=False, output_path="."):
+  """
+  ModNEF Model Trainer
+
+  Attributes
+  ----------
+  model : ModNEFModel
+    modnef network model
+  optimizer :
+    trainer optimizer
+  loss : 
+    loss function 
+  device : 
+    torch device
+  verbose : bool
+    verbose mod
+  output_path  str
+    output file path (step save or best model)
+
+  Methods
+  -------
+  train_1_epoch(trainLoader)
+    Train network for one epoch
+  train(trainLoader, testLoader, validationLoader, n_epoch, save_best_model, best_model_name)
+    Train network
+  accuracy(testLoader, name)
+    Run software inference
+  hardware_estimation(testLoader, name)
+    Run hardware estimation inference
+  fpga_accuracy(testLoader, board_path, driver_path, name)
+    Run FPGA inference
+  """
+
+  def __init__(self, 
+               model, 
+               optimizer=None, 
+               loss=None, 
+               device=None, 
+               verbose=False, 
+               output_path="."
+               ):
+    """
+    model : ModNEFModel | dict
+      ModNEF network model or dictionnary description of model
+    optimizer = None
+      trainer optimizer
+      If None, setup Adam Optimizer
+    loss = None
+      loss function
+      If None, CrossEntropyLoss is use
+    device = None
+      torch device
+      If None, detected automatically depending on cuda core detected or not
+    verbose = Fasle : bool
+      verbose mode
+    output_path = "." : str
+      output file path
+    """
     
     if type(model) != ModNEFModel:
       self.model = ModNEFModel(model)
@@ -46,33 +101,20 @@ class ModNEFTrainer():
 
     self.output_path=output_path
 
-    self.y_true = []
-    self.y_pred = []
-
-  @classmethod
-  def load_config(cls, config):
-
-    model = ModNEFModel(config["model"])
-    optimizer = torch.optim.Adam(model.parameters(), lr=(config["optimizer"]["lr"]), betas=config["optimizer"]["betas"])
-    loss = nn.CrossEntropyLoss()
-    verbose = config["verbose"]
-
-    return cls(
-      model=model,
-      optimizer=optimizer,
-      loss=loss,
-      verbose=verbose
-    )
-
-  def load_checkpoint(self, path):
-
-    checkpoint = torch.load(path)
+  def train_1_epoch(self, trainLoader):
+    """
+    Train network for one epoch
 
-    self.model.load_state_dict(checkpoint["model"])
-    self.optimizer.load_state_dict(checkpoint["optimizer"])
-    self.loss.load_state_dict(checkpoint["loss"])
+    Parameters
+    ----------
+    trainLoader
+      train dataset loader
 
-  def train_1_epoch(self, trainLoader):
+    Returns
+    -------
+    float
+      Average epoch loss
+    """
 
     epoch_loss = []
 
@@ -108,8 +150,36 @@ class ModNEFTrainer():
     return np.mean(epoch_loss)
       
     
-  def train(self, trainLoader, testLoader, n_epoch=50, validationLoader=None, step_save=-1, save_best_model=True, best_model_name = ""):
-
+  def train(self, trainLoader, testLoader, validationLoader=None, n_epoch=50, save_best_model=True, best_model_name = ""):
+    """
+    Train network
+
+    Parameters
+    ----------
+    trainLoader
+      train dataset loader
+    testLoader
+      test dataset loader
+    validationLoader=None
+      validation dataset loader
+      If None, validation is ignored
+    n_epoch=50 : int
+      number of epoch
+    save_best_model=True : bool
+      set to true if best accuracy model must be save
+    best_model_name="" : str
+      best model file name
+      if default, name = best_{model.name}
+
+    Returns
+    -------
+    (list, list, list, float)
+      average loss through epoch
+      test accuracy through epoch
+      validation accuracy through epoch
+      best accuracy
+    """
+    
     avg_loss_history = []
     acc_test_history = []
     acc_val_history = []
@@ -135,24 +205,33 @@ class ModNEFTrainer():
         acc_val, _, _ = self.accuracy(testLoader=validationLoader, name="Validation")
         acc_val_history.append(acc_val)
 
-      acc_test, self.y_pred, self.y_true = self.accuracy(testLoader=testLoader, name="Test")
+      acc_test, _, _ = self.accuracy(testLoader=testLoader, name="Test")
       acc_test_history.append(acc_test)
 
       if save_best_model and acc_test>best_acc:
         torch.save(self.model.state_dict(), f"{self.output_path}/best_{self.model.name}")
         best_acc = acc_test
 
-      if step_save!=-1 and epoch%step_save==0:
-        checkpoint = {
-          "model" : self.model.state_dict(),
-          "loss" : self.loss.state_dict(),
-          "optimizer" : self.optimizer.state_dict()
-        }
-        torch.save(checkpoint, f"{self.output_path}/{self.model.name}_step_save")
-
     return avg_loss_history, acc_val_history, acc_test_history, best_acc
   
-  def __run_accuracy(self, testLoader, name, conf_matrix=[]):
+  def __run_accuracy(self, testLoader, name):
+    """
+    Run inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    name : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
 
     y_true = []
     y_pred = []
@@ -191,7 +270,24 @@ class ModNEFTrainer():
     return (correct/total), y_pred, y_true
 
 
-  def accuracy(self, testLoader, name="Test", conf_matrix=[]):
+  def accuracy(self, testLoader, name="Test"):
+    """
+    Run software inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    name="Test" : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
 
     accuracy = 0
     y_pred = []
@@ -199,22 +295,62 @@ class ModNEFTrainer():
 
     self.model.eval()
 
-    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name, conf_matrix=conf_matrix)
+    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name)
 
     return accuracy, y_pred, y_true    
   
-  def hardware_estimation(self, testLoader, name="Hardware Estimation", conf_matrix=[]):
+  def hardware_estimation(self, testLoader, name="Hardware Estimation"):
+    """
+    Run hardware estimation inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    name="Hardware Estimation" : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
+
     accuracy = 0
     y_pred = []
     y_true = []
 
     self.model.hardware_estimation()
 
-    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name, conf_matrix=conf_matrix)
+    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name)
 
     return accuracy, y_pred, y_true    
   
-  def fpga_accuracy(self, testLoader, board_path, driver_path = "./driver.yml", name="FPGA eval", conf_matrix=[]):
+  def fpga_accuracy(self, testLoader, board_path, driver_path = "./driver.yml", name="FPGA eval"):
+    """
+    Run FPGA inference
+
+    Parameters
+    ----------
+    testLoader
+      test dataset loader
+    board_path : str
+      path to FPGA UART path
+    driver_path = "./driver.yml" : str
+      driver configuration file
+    name = "FPGA eval" : str
+      name of inference
+
+    Returns
+    -------
+    (float, list, list)
+      accuracy
+      predicted class
+      true class
+    """
+
     accuracy = 0
     y_pred = []
     y_true = []
@@ -225,7 +361,7 @@ class ModNEFTrainer():
 
     self.device = torch.device("cpu")
 
-    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name, conf_matrix=conf_matrix)
+    accuracy, y_pred, y_true = self.__run_accuracy(testLoader=testLoader, name=name)
 
     self.device = last_device
 
diff --git a/modneflib/modnef/quantizer/__init__.py b/modneflib/modnef/quantizer/__init__.py
new file mode 100644
index 0000000..71f898c
--- /dev/null
+++ b/modneflib/modnef/quantizer/__init__.py
@@ -0,0 +1,5 @@
+
+from .quantizer import Quantizer
+from .fixed_point_quantizer import FixedPointQuantizer
+from .min_max_quantizer import MinMaxQuantizer
+from .dynamic_scale_quantizer import DynamicScaleFactorQuantizer
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/dynamic_scale_quantizer.py b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
new file mode 100644
index 0000000..d5e5403
--- /dev/null
+++ b/modneflib/modnef/quantizer/dynamic_scale_quantizer.py
@@ -0,0 +1,37 @@
+import torch
+import numpy as np
+from .quantizer import Quantizer
+
+class DynamicScaleFactorQuantizer(Quantizer):
+
+  def __init__(self, bitwidth, signed=None, is_initialize=False):
+
+    super().__init__(
+      bitwidth=bitwidth,
+      signed=signed,
+      is_initialize=is_initialize
+    )
+    
+    self.scale_factor = 0
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+
+    self.is_initialiaze = True
+
+    if self.signed==None:
+      self.signed = torch.min(weight.min(), rec_weight.min())<0.0
+
+    self.scale_factor = torch.max(torch.abs(weight).max(), torch.abs(weight).max())/2**(self.bitwidth-1)
+
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+    
+    born_min = -int(self.signed)*2**(self.bitwidth-1)
+    born_max = 2**(self.bitwidth-int(self.signed))-1
+
+    scaled = torch.clamp(data/self.scale_factor, min=born_min, max=born_max).to(dtype)
+
+    if unscale:
+      return scaled*self.scale_factor
+    else:
+      return scaled
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/fixed_point_quantizer.py b/modneflib/modnef/quantizer/fixed_point_quantizer.py
new file mode 100644
index 0000000..8471872
--- /dev/null
+++ b/modneflib/modnef/quantizer/fixed_point_quantizer.py
@@ -0,0 +1,56 @@
+import torch
+from math import ceil, log
+from .quantizer import Quantizer
+
+class FixedPointQuantizer(Quantizer):
+
+  def __init__(self, bitwidth, fixed_point=-1, signed=None, is_initialize=False):
+
+    if bitwidth==-1 and fixed_point==-1:
+      raise Exception("You must fix at least one value to compute the other one")
+    
+    super().__init__(
+      bitwidth=bitwidth,
+      signed=signed,
+      is_initialize=is_initialize
+    )
+
+    self.fixed_point = fixed_point
+    self.scale_factor = 2**fixed_point
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+
+    self.is_initialiaze = True
+
+    if not torch.is_tensor(weight):
+      weight = torch.Tensor(weight)
+
+    if not torch.is_tensor(rec_weight):
+      rec_weight = torch.Tensor(rec_weight)
+
+    if self.signed==None:
+      self.signed = torch.min(weight.min(), rec_weight.min())<0.0
+
+    if self.fixed_point==-1:
+      int_max = int(torch.max(torch.abs(weight).max(), torch.abs(rec_weight).max()))
+
+      if int_max>1:
+        int_part_size = ceil(log(int_max)/log(2))+int(self.signed)
+      else:
+        int_part_size = int_max
+      
+      if self.bitwidth==-1:
+        self.bitwidth = int_part_size+self.fixed_point
+      elif self.fixed_point==-1:
+        self.fixed_point = self.bitwidth-int_part_size
+        self.scale_factor = 2**self.fixed_point
+
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+
+    scaled = torch.round(data*self.scale_factor).to(dtype)
+    
+    if unscale:
+      return (scaled.to(torch.float32))/self.scale_factor
+    else:
+      return scaled
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/min_max_quantizer.py b/modneflib/modnef/quantizer/min_max_quantizer.py
new file mode 100644
index 0000000..cc02ff3
--- /dev/null
+++ b/modneflib/modnef/quantizer/min_max_quantizer.py
@@ -0,0 +1,40 @@
+import torch
+from math import ceil, log
+from .quantizer import Quantizer
+
+class MinMaxQuantizer(Quantizer):
+
+  def __init__(self, bitwidth, signed=None, is_initialize=False):
+
+    super().__init__(
+      bitwidth=bitwidth,
+      signed=signed,
+      is_initialize=is_initialize
+    )
+    
+    self.x_min = 0
+    self.x_max = 0
+    self.b_min = 0
+    self.b_max = 0
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+
+    self.is_initialiaze = True
+
+    if self.signed==None:
+      self.signed = torch.min(weight.min(), rec_weight.min())<0.0
+
+    self.x_max = torch.max(torch.abs(weight).max(), torch.abs(rec_weight).max())
+    self.x_min = -self.x_max
+
+    self.b_max = 2**(self.bitwidth-int(self.signed))-1
+    self.b_min = -int(self.signed)*self.b_max
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+
+    scaled = ((data-self.x_min)/(self.x_max-self.x_min)*(self.b_max-self.b_min)+self.b_min).to(dtype)
+    
+    if unscale:
+      return (scaled-self.b_min)/(self.b_max-self.b_min)*(self.x_max-self.x_min)+self.x_min
+    else:
+      return scaled
\ No newline at end of file
diff --git a/modneflib/modnef/quantizer/quantizer.py b/modneflib/modnef/quantizer/quantizer.py
new file mode 100644
index 0000000..f8c6ae8
--- /dev/null
+++ b/modneflib/modnef/quantizer/quantizer.py
@@ -0,0 +1,29 @@
+import torch
+import numpy as np
+
+class Quantizer():
+  
+  def __init__(self, bitwidth, signed=None, is_initialize=False):
+    self.bitwidth = bitwidth
+    self.is_initialiaze = is_initialize
+    self.signed = signed
+    pass
+
+  def init_from_weight(self, weight, rec_weight=torch.zeros((1))):
+    raise NotImplementedError()
+
+  def __call__(self, data, unscale=False, dtype=torch.int32):
+    
+    if isinstance(data, (int, float)):
+      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).item()
+    elif isinstance(data, list):
+      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).tolist()
+    elif isinstance(data, np.ndarray):
+      return self._quant(data=torch.tensor(data), unscale=unscale, dtype=dtype).numpy()
+    elif torch.is_tensor(data):
+      return self._quant(data=data, unscale=unscale, dtype=dtype).detach()
+    else:
+      raise TypeError("Unsupported data type")
+
+  def _quant(self, data, unscale, dtype) -> torch.Tensor:
+    pass
\ No newline at end of file
diff --git a/modneflib/modnef/templates/template_model.json b/modneflib/modnef/templates/template_model.json
index ff70dad..4b91fca 100644
--- a/modneflib/modnef/templates/template_model.json
+++ b/modneflib/modnef/templates/template_model.json
@@ -85,7 +85,6 @@
   "train" : {
     "name" : "Train",
     "n_epoch" : 5,
-    "step_save" : -1,
     "save_best_model" : true,
     "save_history" : true,
     "plot" : true
@@ -104,7 +103,7 @@
   },
 
   "vhdl" : {
-    "best_model" : false,
+    "use_best_model" : false,
     "driver_config" : "driver.yml",
     "file_name" : null
   },
@@ -112,7 +111,7 @@
   "fpga" : {
     "name" : "FPGA Evaluation",
     "driver_file" : "driver.yml",
-    "board_path" : "/dev/serial/by-id/usb-FTDI_FT232R_USB_UART_A10OOCRD-if00-port0",
+    "board_path" : "board path here",
     "conf_matrix" : true
   },
 
diff --git a/modneflib/setup.py b/modneflib/setup.py
index 5e185d7..23a3b1f 100644
--- a/modneflib/setup.py
+++ b/modneflib/setup.py
@@ -6,7 +6,7 @@ setup(
         version = "1.0.0",
         description="ModNEF python librairy",
         author="Aurelie Saulquin",
-        install_requires=["networkx", "matplotlib", "pyyaml"],
+        install_requires=["networkx", "matplotlib", "pyyaml", "torch", "snntorch"],
         include_package_data=True,
         entry_points={
           "console_scripts": [
diff --git a/q b/q
new file mode 100644
index 0000000..fbd676e
--- /dev/null
+++ b/q
@@ -0,0 +1,30 @@
+218057c (HEAD -> reccursive) HEAD@{0}: commit (amend): add documetnation
+5bcbbb3 HEAD@{1}: commit: add documetnation
+b1eb462 HEAD@{2}: checkout: moving from 0bc064ec20991666693711d2536c114401b1cd4b to reccursive
+0bc064e (origin/main, origin/HEAD, main) HEAD@{3}: checkout: moving from reccursive to origin
+b1eb462 HEAD@{4}: checkout: moving from 6dd9be60953a44154688db676a661977eab27b18 to reccursive
+6dd9be6 HEAD@{5}: commit: add quantizer and documentation
+1d84d66 (origin/reccursive) HEAD@{6}: commit: update cache
+486cfb8 HEAD@{7}: pull (start): checkout 486cfb8a8f7c9f7adffc448a86a0bff2d443e317
+b1eb462 HEAD@{8}: commit: commit
+dd574b3 HEAD@{9}: pull: Fast-forward
+22bd59d HEAD@{10}: commit: add modnef torch and change srlif to shiftlif
+8aa6a16 HEAD@{11}: pull: Fast-forward
+00bcb74 HEAD@{12}: commit: remove print on xstep driver
+c11f4b5 HEAD@{13}: pull: Fast-forward
+0542204 HEAD@{14}: commit: add rblif modneftorch
+6e3e1ec HEAD@{15}: pull: Fast-forward
+cbf372b HEAD@{16}: commit: fix bug modnef torch
+a4f3d71 HEAD@{17}: pull: Fast-forward
+3348615 HEAD@{18}: checkout: moving from main to reccursive
+0bc064e (origin/main, origin/HEAD, main) HEAD@{19}: pull: Fast-forward
+1b686a0 HEAD@{20}: checkout: moving from reccursive to main
+3348615 HEAD@{21}: pull: Fast-forward
+f8432d6 HEAD@{22}: pull: Fast-forward
+2cc1813 HEAD@{23}: pull: Fast-forward
+cf0d4dd HEAD@{24}: commit: add test bug fix classifer
+9c9df18 HEAD@{25}: pull: Fast-forward
+55646fa HEAD@{26}: commit: bug classifier
+fc87783 HEAD@{27}: commit: add blif test and bug fix
+12ae129 HEAD@{28}: checkout: moving from main to reccursive
+1b686a0 HEAD@{29}: clone: from gitlab-ssh.univ-lille.fr:bioinsp/ModNEF.git
-- 
GitLab