Skip to content
Snippets Groups Projects
Commit 12db91cc authored by Aurélie saulquin's avatar Aurélie saulquin
Browse files

add quantizer and documentation

parent 1d84d667
No related branches found
No related tags found
No related merge requests found
Showing
with 280 additions and 55 deletions
......@@ -16,3 +16,4 @@ examples/*/*.vhd
examples/debugger_example/*.txt
examples/*/__pycache__
__pycache__/
# JSON File Configuration Explaination
The file describe all differents json properties.
A template file can be found inside **modneflib/modnef/template**. Additional commentary, including non requiered properties will be explained in this file.
## ModNEF Network Description
ModNEF network model properties
```json
{
"name" : "model_name", // name of network model
"input" : 2312, // number of input features
"num_class" : 10, // number of output class
// insert layer description
// layer description are described in next section
"hardware" : { // ModNEF VHDL hardware description
"clock_name" : "clock", // name of clock signal inside VHDL file
"clock_freq" : 125000000, // clock frequency of FPGA internal clock
"baud_rate" : 921600, // UART baud rate
"txd" : "uart_txd", // UART txd signal name
"rxd" : "uart_rxd", // UART rxd signal name
"queue_read_depth" : 4096, /*Optional*/ // UART component queue read depth, can be ignored and will be computed during hardware estimation
"queue_write_depth" : 4096 /*Optional*/ // UART component queue write depth, can be ignored and will be computed during hardware estimation
},
}
```
### Neuron Description
In this section, we will describe
#### Beta LIF Neuron Model Description
Feed forward layer
```json
{
"name of layer" : { // name of layer (will be the name of VHDL component implementation)
"model" : "blif", // neuron model
"in_features" : 64, // number of input features
"neurons" : 10, // number of simulated/emeulated neurons
"beta" : 0.9, // membrane decay value
"threshold" : 0.8, // threshold value
"reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
"hardware" : { /*Optional*/ // hardware description
"strategy" : "Parallel", /*Optional*/ // hardware emulation strategy, Parallel by default
"weight_size" : 8, /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
"weight_fp" : -1, /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
"weight_type" : null, /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
"compute_fp" : -1, /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
"mem_init_file" : null, /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
"variable_size" : -1 /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
}
}
}
```
Recurrent layer
```json
{
"name of layer" : { // name of layer (will be the name of VHDL component implementation)
"model" : "rblif", // neuron model
"in_features" : 64, // number of input features
"neurons" : 10, // number of simulated/emeulated neurons
"beta" : 0.9, // membrane decay value
"threshold" : 0.8, // threshold value
"reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
"hardware" : { /*Optional*/ // hardware description
"strategy" : "Parallel", /*Optional*/ // hardware emulation strategy, Parallel by default
"weight_size" : 8, /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
"weight_fp" : -1, /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
"weight_type" : null, /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
"compute_fp" : -1, /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
"mem_init_file" : null, /*Optional*/ // vivado memory file for feed forward weight, null by default. If null, file = layer_name_mem.mem
"mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
"variable_size" : -1 /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
}
}
}
```
#### Shift Register based LIF Neuron Model Description
Feed forward layer
```json
{
"name of layer" : { // name of layer (will be the name of VHDL component implementation)
"model" : "shiftlif", // neuron model
"in_features" : 64, // number of input features
"neurons" : 10, // number of simulated/emeulated neurons
"beta" : 0.9, // membrane decay value
"threshold" : 0.8, // threshold value
"reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
"hardware" : { /*Optional*/ // hardware description
"strategy" : "Parallel", /*Optional*/ // hardware emulation strategy, Parallel by default
"weight_size" : 8, /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
"weight_fp" : -1, /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
"weight_type" : null, /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
"compute_fp" : -1, /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
"mem_init_file" : null, /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
"variable_size" : -1 /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
}
}
}
```
Recurrent layer
```json
{
"name of layer" : { // name of layer (will be the name of VHDL component implementation)
"model" : "rshiftlif", // neuron model
"in_features" : 64, // number of input features
"neurons" : 10, // number of simulated/emeulated neurons
"beta" : 0.9, // membrane decay value
"threshold" : 0.8, // threshold value
"reset_mechanism" : "subtract", // reset mechanism, can be subtract or zero
"hardware" : { /*Optional*/ // hardware description
"strategy" : "Parallel", /*Optional*/ // hardware emulation strategy, Parallel by default
"weight_size" : 8, /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
"weight_fp" : -1, /*Optional*/ // weight fixed point position, -1 by default. If default, computed automatically depending weight bitwidth weight values
"weight_type" : null, /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
"compute_fp" : -1, /*Optional*/ // neuron hyper parameters fixed point value, -1 by default. If -1, compute_fp=weight_size
"mem_init_file" : null, /*Optional*/ // vivado memory file for feed forward weight, null by default. If null, file = layer_name_mem.mem
"mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
"variable_size" : -1 /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
}
}
}
```
#### Simplified LIF Neuron Model Description
Feed forward layer
```json
{
"name of layer" : { // name of layer (will be the name of VHDL component implementation)
"model" : "slif", // neuron model
"in_features" : 64, // number of input features
"neurons" : 10, // number of simulated/emeulated neurons
"threshold" : 0.8, // threshold value
"leak" : 0.015, // memory leakage value
"min" : 0.0, // minimal memory voltage value
"rest" : 0.0, // resting memory potential
"hardware" : { /*Optional*/ // hardware description
"strategy" : "Parallel", /*Optional*/ // hardware emulation strategy, Parallel by default
"weight_size" : 8, /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
"weight_type" : null, /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
"mem_init_file" : null, /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
"variable_size" : -1 /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
}
}
}
```
Recurrent layer
```json
{
"name of layer" : { // name of layer (will be the name of VHDL component implementation)
"model" : "rslif", // neuron model
"in_features" : 64, // number of input features
"neurons" : 10, // number of simulated/emeulated neurons
"threshold" : 0.8, // threshold value
"leak" : 0.015, // memory leakage value
"min" : 0.0, // minimal memory voltage value
"rest" : 0.0, // resting memory potential
"hardware" : { /*Optional*/ // hardware description
"strategy" : "Parallel", /*Optional*/ // hardware emulation strategy, Parallel by default
"weight_size" : 8, /*Optional*/ // synaptic quantized weight bitwidth, 8 by default
"weight_type" : null, /*Optional*/ // weight type, can be null, w_signed, w_unsigned. If null, determined depending on weights values
"mem_init_file" : null, /*Optional*/ // vivado memory file, null by default. If null, file = layer_name_mem.mem
"mem_init_file_rec" : null, /*Optional*/ // vivado memory file for recurrent weight, null by default. If null, file = layer_name_mem_rec.mem
"variable_size" : -1 /*Optional*/ // computational variable bitwidth, -1 by default. If -1, computed during hardware estimation
}
}
}
```
## ModNEF Project Description
```json
{
"exp_name" : "template",
"model": {
// describe you model here
},
"optimizer" : { /*Optional*/ // Adam optimizer properties, optional. If no optimizer properties, default optimizer is configured with followed values
"lr" : 1e-4, // learning rate
"betas" : [0.9, 0.99] // betas values
},
"best_model" : "best_my_model", /*Optional*/ // best model file name, optional. By default best_{model.name}
"verbose" : true, /*Optional*/ // verbose mode, by default true
"device" : "auto", /*Optional*/ // troch device, by default auto. If auto, device is automatically detected
"train" : { // Train configuration. Optional if you don't want to train with ModNEF Executor.
"name" : "Train", // name printed during trian progression
"n_epoch" : 5, // number of epoch
"save_best_model" : true, /*Optional*/ // save best accuracy model, by default true
"save_history" : true, /*Optional*/ // save average loss, test accuracy and validation accuracy through epoch, by default true
"plot" : true /*Optional*/ // save matplotlib plot, by default true
},
"eval" : { // Software evaluation configuration, Optional if you don't want to run software evaluation
"name" : "Evaluation", // name printed during evaluation progression
"use_best_model" : true, /* Optional*/ // use best model for evaluation, by default true
"conf_matrix" : true /* Optional*/ // generate matplotlib confusion matrix, by default true
},
"hardware_estimation" : { // Hardware estimation configuration, Optional if you don't want to run hardware estimation
"name" : "Hardware Estimation", // name printed during evaluation progression
"use_best_model" : true, /* Optional*/ // use best model for evaluation, by default true
"conf_matrix" : true /* Optional*/ // generate matplotlib confusion matrix, by default true
},
"vhdl" : { // VHDL generation configuration, Optional if you don't want to run vhl generation
"use_best_model" : false, /* Optional*/ // use best model for evaluation, by default true
"driver_config" : "driver.yml", /* Optional*/ // driver configuration file, by default driver.yml
"file_name" : null /* Optional*/ // VHDL file name, if null, model.name.vhd
},
"fpga" : { // FPGA evaluation configuration, Optional if you don't want to run FPGA evaluation
"name" : "FPGA Evaluation", // name printed during evaluation progression
"driver_file" : "driver.yml", // driver configuration file
"board_path" : "board path here", // path to UART board
"conf_matrix" : true /* Optional*/ // generate matplotlib confusion matrix, by default true
},
"confusion_matrix" : { // Necessary if you want to generate configuration matrix
"class" : ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] // Name of classes
}
}
```
\ No newline at end of file
......@@ -2,3 +2,4 @@ from .arch_builder import *
from .modnef_driver import *
from .tools import *
from .modnef_torch import *
from .quantizer import *
\ No newline at end of file
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
......@@ -12,6 +12,7 @@ from ..modnef_arch_mod import ModNEFArchMod
from ..utilities import *
from math import log, ceil
from .blif_debugger import BLif_Debugger
from modnef.quantizer import *
_BLIF_DEFINITION = """
component BLif_{0} is
......@@ -110,7 +111,8 @@ class BLif(ModNEFArchMod):
weight_fp : int = -1,
mem_init_file : str = None,
strategy : str = "Parallel",
variable_size : int = 16
variable_size : int = 16,
quantizer = FixedPointQuantizer(8)
):
"""
Init attributes
......@@ -155,14 +157,7 @@ class BLif(ModNEFArchMod):
self.beta = beta
self.reset = reset
if compute_fp == -1:
self.compute_fp = weight_size
else:
self.compute_fp = compute_fp
self.weight_size = weight_size
self.weight_type = weight_type
self.weight_fp = weight_fp
self.quantizer = quantizer
if mem_init_file == None:
self.mem_init_file = f"{self.name}_weight.mem"
......@@ -170,7 +165,9 @@ class BLif(ModNEFArchMod):
self.mem_init_file = mem_init_file
self._strategy = strategy
self.variable_size = variable_size
self.variable_size = 16
def vhdl_component_name(self):
"""
......@@ -210,61 +207,38 @@ class BLif(ModNEFArchMod):
memory file output path
"""
if self.weight_type != "w_signed" and self.weight_type != "w_unsigned" and self.weight_type != None:
print(f"{self.weight_type} is not supported")
return
weights = weight_extraction(weights, self.input_neuron, self.output_neuron)
if self.weight_type == None or self.weight_fp == -1:
if not self.quantizer.is_initialiaze:
self.quantizer.init_from_weight(weights)
w_min = min(min(weights))
w_max = max(max(weights))
if w_min < 0:
self.weight_type = "w_signed"
else:
self.weight_type = "w_unsigned"
if self.weight_fp == -1:
int_part = int(max(abs(w_min), abs(w_max)))
if int_part==0:
fp = 0
elif int_part==1:
fp = 1
else:
fp = ceil(log(int_part)/log(2))
if self.weight_type == "w_signed":
self.weight_fp = self.weight_size-fp-1
else:
self.weight_fp = self.weight_size-fp
bw = self.quantizer.bitwidth
mem_file = open(f"{output_path}/{self.mem_init_file}", 'w')
if self.weight_type == "w_signed":
if self.quantizer.signed:
for i in range(self.input_neuron):
w_line = 0
for j in range(self.output_neuron-1, -1, -1):
w_line = (w_line<<self.weight_size) + two_comp(to_fixed_point(weights[i][j], self.weight_fp), self.weight_size)
w_line = (w_line<<bw) + two_comp(self.quantizer(weights[i][j]), bw)
mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
self.v_threshold = two_comp(to_fixed_point(self.v_threshold, self.compute_fp), self.variable_size)
self.beta = two_comp(to_fixed_point(self.beta, self.compute_fp), self.variable_size)
print(self.v_threshold)
self.v_threshold = two_comp(self.quantizer(self.v_threshold), self.variable_size)
print(self.v_threshold)
self.beta = two_comp(self.quantizer(self.beta), self.variable_size)
elif self.weight_type == "w_unsigned":
else:
for i in range(self.input_neuron):
w_line = 0
for j in range(self.output_neuron-1, -1, -1):
w_line = (w_line<<self.weight_size) + to_fixed_point(weights[i][j], self.weight_fp)
w_line = (w_line<<self.weight_size) + self.quantizer(weights[i][j])
mem_file.write(f"@{to_hex(i)} {to_hex(w_line)}\n")
self.v_threshold = to_fixed_point(self.v_threshold, self.compute_fp)
self.beta = to_fixed_point(self.beta, self.compute_fp)
self.v_threshold = self.quantizer(self.v_threshold)
self.beta = self.quantizer(self.beta, self.beta)
mem_file.close()
......@@ -284,10 +258,10 @@ class BLif(ModNEFArchMod):
v_threshold = self.v_threshold,
beta = self.beta,
reset = self.reset,
compute_fp = self.compute_fp,
weight_size = self.weight_size,
weight_type = self.weight_type,
weight_fp = self.weight_fp,
compute_fp = self.quantizer.fixed_point,
weight_size = self.quantizer.bitwidth,
weight_type = "w_signed" if self.quantizer.signed else "w_unsigned",
weight_fp = self.quantizer.fixed_point,
mem_init_file = self.mem_init_file,
output_file = output_file,
variable_size=self.variable_size
......@@ -313,6 +287,8 @@ class BLif(ModNEFArchMod):
print("neuron hyper parameters are not int. If you set hyper parameters as float, pleasse run weight_convert before calling to_vhdl")
return
wt = "w_signed" if self.quantizer.signed else "w_unsigned"
vhdl_file.write(f"\t{self.name} : BLif_{self._strategy} generic map(\n")
vhdl_file.write(f"\t\tinput_neuron => {self.input_neuron},\n")
vhdl_file.write(f"\t\toutput_neuron => {self.output_neuron},\n")
......@@ -320,10 +296,10 @@ class BLif(ModNEFArchMod):
vhdl_file.write(f"\t\tv_threshold => x\"{self._to_hex(self.v_threshold, self.variable_size)}\",\n")
vhdl_file.write(f"\t\tbeta => x\"{self._to_hex(self.beta, self.variable_size)}\",\n")
vhdl_file.write(f"\t\treset => \"{self.reset}\",\n")
vhdl_file.write(f"\t\tcompute_fp => {self.compute_fp},\n")
vhdl_file.write(f"\t\tweight_size => {self.weight_size},\n")
vhdl_file.write(f"\t\tweight_type => \"{self.weight_type}\",\n")
vhdl_file.write(f"\t\tweight_fp => {self.weight_fp},\n")
vhdl_file.write(f"\t\tcompute_fp => {self.quantizer.fixed_point},\n")
vhdl_file.write(f"\t\tweight_size => {self.quantizer.bitwidth},\n")
vhdl_file.write(f"\t\tweight_type => \"{wt}\",\n")
vhdl_file.write(f"\t\tweight_fp => {self.quantizer.fixed_point},\n")
vhdl_file.write(f"\t\tmem_init_file => \"{self.mem_init_file}\"\n")
vhdl_file.write("\t) port map(\n")
vhdl_file.write(f"\t\ti_clk => {clock_name},\n")
......
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment