diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py index 29bd28444b1fd195c469d1b70132cbbc1a8a9b63..01f59f90e268fd3d374aa169783b9021733365f8 100644 --- a/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py +++ b/modneflib/modnef/modnef_torch/modnef_neurons/blif_model/rblif.py @@ -126,7 +126,7 @@ class RBLIF(ModNEFNeuron): self.register_buffer("beta", torch.tensor(beta)) - self.reccurent = nn.Linear(out_features, out_features, bias=True) + self.reccurent = nn.Linear(out_features, out_features, bias=False) self._init_mem() diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py index 08e37abea078bacf98ae9e6ef2dd2c53e3bd3c6a..c4a980045968e8badcfcd59622ffefba18444f5b 100644 --- a/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py +++ b/modneflib/modnef/modnef_torch/modnef_neurons/modnef_torch_neuron.py @@ -89,10 +89,8 @@ class ModNEFNeuron(SpikingNeuron): if len(param)==1: self.quantizer.init_from_weight(param[0]) - print("init no rec") else: self.quantizer.init_from_weight(param[0], param[1]) - print("init rec") def quantize_weight(self, unscale : bool = True): """ @@ -105,8 +103,6 @@ class ModNEFNeuron(SpikingNeuron): for p in self.parameters(): p.data = self.quantizer(p.data, unscale=unscale) - print(p) - print("quantize weight") def quantize_hp(self, unscale : bool = True): """ diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py index 190ddc84029476ef7c36e144c4fc9c57d3c21403..7d9c21fdbc95d7cdadf010d5eb7b5faa2cda8bf3 100644 --- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py +++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/rslif.py @@ -131,7 +131,7 @@ class RSLIF(ModNEFNeuron): self.register_buffer("v_min", torch.as_tensor(v_min)) self.register_buffer("v_rest", torch.as_tensor(v_rest)) - self.reccurent = nn.Linear(self.out_features, self.out_features, bias=False) + self.reccurent = nn.Linear(out_features, out_features, bias=False) self._init_mem() diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py index 78cc36b27622e701758b8f10b18ecb17634dd84c..089519bf6bf4942e47bd25d36a12c28ffdd356ad 100644 --- a/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py +++ b/modneflib/modnef/modnef_torch/modnef_neurons/slif_model/slif.py @@ -287,7 +287,7 @@ class SLIF(ModNEFNeuron): if self.hardware_estimation_flag: val_max = max(abs(self.val_max), abs(self.val_min)) print(val_max) - val_max = self.quantizer(val_max, dtype=torch.int32) + val_max = self.quantizer(val_max) print(val_max) self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8 else: diff --git a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py index e6354a2086b3b742ffbf6270df64e31e8f46c8a7..3cd77b3f33527d1cbeb15ed0cada7384f00b5e30 100644 --- a/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py +++ b/modneflib/modnef/modnef_torch/modnef_neurons/srlif_model/rshiftlif.py @@ -303,7 +303,7 @@ class RShiftLIF(ModNEFNeuron): if self.hardware_description["variable_size"]==-1: if self.hardware_estimation_flag: val_max = max(abs(self.val_max), abs(self.val_min)) - val_max = val_max*2**(self.hardware_description["compute_fp"]) + val_max = self.quantizer(self.val_max) self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8 else: self.hardware_description["variable_size"]=16 diff --git a/modneflib/modnef/templates/evaluation.py b/modneflib/modnef/templates/evaluation.py index aa74e22091b28a52bb36892ac081df0c918c7803..ebcc8f8f457513ab00649a1ccae38446d243a2cf 100644 --- a/modneflib/modnef/templates/evaluation.py +++ b/modneflib/modnef/templates/evaluation.py @@ -6,6 +6,7 @@ from snntorch.surrogate import fast_sigmoid import torch from run_lib import * import sys +from model import MyModel if __name__ == "__main__": @@ -13,10 +14,12 @@ if __name__ == "__main__": exp_name = "Evaluation" """Model definition""" - model_path = "model_template.json" best_model_name = "best_model" - model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) + # model_path = "model_template.json" + # model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) + + model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25)) model.load_state_dict(torch.load(best_model_name)) diff --git a/modneflib/modnef/templates/model.py b/modneflib/modnef/templates/model.py new file mode 100644 index 0000000000000000000000000000000000000000..ac2fb76d6257efaf126d5552ecfb0cd259bc83fc --- /dev/null +++ b/modneflib/modnef/templates/model.py @@ -0,0 +1,175 @@ +import modnef.modnef_torch as mt +from modnef.arch_builder import * +from snntorch.surrogate import fast_sigmoid +from modnef.quantizer import * +import torch + +class MyModel(mt.ModNEFModel): + + def __init__(self, name, spike_grad=fast_sigmoid(slope=25)): + + super().__init__() + + self.name = name + + self.layer1 = mt.SLIF(in_features=2312, + out_features=128, + threshold=0.8, + v_leak=0.015, + v_min=0.0, + v_rest=0.0, + spike_grad=spike_grad, + quantizer=MinMaxQuantizer( + bitwidth=8, + signed=None + )) + + self.layer2 = mt.ShiftLIF(in_features=128, + out_features=64, + threshold=0.8, + beta=0.875, + reset_mechanism="subtract", + spike_grad=spike_grad, + quantizer=DynamicScaleFactorQuantizer( + bitwidth=8, + signed=None + )) + + self.layer3 = mt.BLIF(in_features=64, + out_features=10, + threshold=0.8, + beta=0.9, + reset_mechanism="subtract", + spike_grad=spike_grad, + quantizer=FixedPointQuantizer( + bitwidth=8, + fixed_point=7, + signed=None + )) + + def software_forward(self, input_spikes): + """ + Run layers upate + + Parameters + ---------- + input_spikes : Tensor + input spikes + + Returns + ------- + tuple of tensor + output_spike, output_mem + """ + + spk1, mem1 = self.layer1.reset_mem() + spk2, mem2 = self.layer2.reset_mem() + spk3, mem3 = self.layer3.reset_mem() + + spk_rec = [] + mem_rec = [] + + batch_size = input_spikes.shape[0] + n_steps = input_spikes.shape[1] + + for step in range(n_steps): + x = input_spikes[:, step].reshape(batch_size, -1) + + spk1, mem1 = self.layer1(x, mem1, spk1) + spk2, mem2 = self.layer2(spk1, mem2, spk2) + spk3, mem3 = self.layer3(spk2, mem3, spk3) + + spk_rec.append(spk3) + mem_rec.append(mem3) + + return torch.stack(spk_rec, dim=0), torch.stack(mem_rec, dim=0) + + def fpga_forward(self, input_spikes): + """ + Transmit input spike to FPGA + + Parameters + ---------- + input_spikes : Tensor + input spikes + + Returns + ------- + tuple of tensor + output_spike, None + """ + + def to_aer(input): + input = input.reshape(-1).to(torch.int32) + + aer = [] + for i in range(input.shape[0]): + for _ in range(input[i]): + aer.append(i) + + return aer + + if self.driver == None: + raise Exception("please open fpga driver before") + + batch_result = [] + + for sample in input_spikes: + sample_res = self.driver.run_sample(sample, to_aer, True, len(self.layers)) + batch_result.append([sample_res]) + + return torch.tensor(batch_result).permute(1, 0, 2), None + + def to_vhdl(self, file_name=None, output_path = ".", driver_config_path = "./driver.yml"): + """ + Generate VHDL file of model + + Parameters + ---------- + file_name = None : str + VHDL file name + if default, file name is model name + output_path = "." : str + output file path + driver_config_path = "./driver.yml" : str + driver configuration file + """ + + if file_name==None: + file_name = f"{output_path}/{self.name}.vhd" + + builder = ModNEFBuilder(self.name, 2312, 10) + + + uart = Uart_XStep( + name="uart", + input_layer_size=2312, + output_layer_size=10, + clk_freq=125_000_000, + baud_rate=921_600, + queue_read_depth=10240, + queue_write_depth=1024, + tx_name="uart_rxd", + rx_name="uart_txd" + ) + + builder.add_module(uart) + builder.set_io(uart) + + layer1_module = self.layer1.get_builder_module(f"{self.name}_layer1", output_path) + builder.add_module(layer1_module) + + layer2_module = self.layer2.get_builder_module(f"{self.name}_layer2", output_path) + builder.add_module(layer2_module) + + layer3_module = self.layer3.get_builder_module(f"{self.name}_layer3", output_path) + builder.add_module(layer3_module) + + builder.add_link(uart, layer1_module) + builder.add_link(layer1_module, layer2_module) + builder.add_link(layer2_module, layer3_module) + builder.add_link(layer3_module, uart) + + + builder.get_driver_yaml(f"{output_path}/{driver_config_path}") + builder.to_vhdl(file_name, "clock") \ No newline at end of file diff --git a/modneflib/modnef/templates/run_lib.py b/modneflib/modnef/templates/run_lib.py index 21f536be3f3a60baa6bc7e834a4635c1eb9c9182..85a13529268e8ad7a81c86f0c78467adda158dbd 100644 --- a/modneflib/modnef/templates/run_lib.py +++ b/modneflib/modnef/templates/run_lib.py @@ -169,6 +169,9 @@ def evaluation(model, testLoader, name="Evaluation", device=torch.device("cpu"), model.eval(quant) + if quant: + model.quantize(force_init=True) + accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device) return accuracy, y_pred, y_true @@ -182,6 +185,8 @@ def hardware_estimation(model, testLoader, name="Hardware Estimation", device=to model.hardware_estimation() + model.quantize(force_init=True) + accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device) return accuracy, y_pred, y_true diff --git a/modneflib/modnef/templates/train.py b/modneflib/modnef/templates/train.py index 03603ecb67073dd27e7d2c58b4651504b9655ab8..b67de4512337ec34d5c96066df3ed7e8f815a98a 100644 --- a/modneflib/modnef/templates/train.py +++ b/modneflib/modnef/templates/train.py @@ -5,12 +5,15 @@ import os from snntorch.surrogate import fast_sigmoid from run_lib import * import torch +from model import MyModel if __name__ == "__main__": """Model definition""" - model_path = "model_template.json" - model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) + # model_path = "model_template.json" + # model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) + + model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25)) """Optimizer""" optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999)) @@ -22,7 +25,7 @@ if __name__ == "__main__": device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") """Train variable definition""" - n_epoch = 1 + n_epoch = 2 best_model_name = "best_model" verbose = True save_plot = False @@ -34,7 +37,7 @@ if __name__ == "__main__": # data set definition, change to your dataset sensor_size = tonic.datasets.NMNIST.sensor_size - frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10) + frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=5) train_set = tonic.datasets.NMNIST(save_to=dataset_path, train=True, transform=frame_transform) test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform) diff --git a/modneflib/modnef/templates/vhdl_generation.py b/modneflib/modnef/templates/vhdl_generation.py index 82dbb71fda818e276264ed04b3e2068791d4c0ca..fcca214d8959aa06374ae7c4c93aa428ab9fd7a2 100644 --- a/modneflib/modnef/templates/vhdl_generation.py +++ b/modneflib/modnef/templates/vhdl_generation.py @@ -5,6 +5,7 @@ import os from snntorch.surrogate import fast_sigmoid from run_lib import * import torch +from model import MyModel if __name__ == "__main__": @@ -12,10 +13,12 @@ if __name__ == "__main__": exp_name = "Evaluation" """Model definition""" - model_path = "model_template.json" best_model_name = "best_model" - model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) + # model_path = "model_template.json" + # model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) + + model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25)) model.load_state_dict(torch.load(best_model_name))