Skip to content
Snippets Groups Projects
Commit 77bbfb1a authored by ahoni's avatar ahoni
Browse files

add model to template

parent e872088a
Branches
No related tags found
1 merge request!3Dev
...@@ -126,7 +126,7 @@ class RBLIF(ModNEFNeuron): ...@@ -126,7 +126,7 @@ class RBLIF(ModNEFNeuron):
self.register_buffer("beta", torch.tensor(beta)) self.register_buffer("beta", torch.tensor(beta))
self.reccurent = nn.Linear(out_features, out_features, bias=True) self.reccurent = nn.Linear(out_features, out_features, bias=False)
self._init_mem() self._init_mem()
......
...@@ -89,10 +89,8 @@ class ModNEFNeuron(SpikingNeuron): ...@@ -89,10 +89,8 @@ class ModNEFNeuron(SpikingNeuron):
if len(param)==1: if len(param)==1:
self.quantizer.init_from_weight(param[0]) self.quantizer.init_from_weight(param[0])
print("init no rec")
else: else:
self.quantizer.init_from_weight(param[0], param[1]) self.quantizer.init_from_weight(param[0], param[1])
print("init rec")
def quantize_weight(self, unscale : bool = True): def quantize_weight(self, unscale : bool = True):
""" """
...@@ -105,8 +103,6 @@ class ModNEFNeuron(SpikingNeuron): ...@@ -105,8 +103,6 @@ class ModNEFNeuron(SpikingNeuron):
for p in self.parameters(): for p in self.parameters():
p.data = self.quantizer(p.data, unscale=unscale) p.data = self.quantizer(p.data, unscale=unscale)
print(p)
print("quantize weight")
def quantize_hp(self, unscale : bool = True): def quantize_hp(self, unscale : bool = True):
""" """
......
...@@ -131,7 +131,7 @@ class RSLIF(ModNEFNeuron): ...@@ -131,7 +131,7 @@ class RSLIF(ModNEFNeuron):
self.register_buffer("v_min", torch.as_tensor(v_min)) self.register_buffer("v_min", torch.as_tensor(v_min))
self.register_buffer("v_rest", torch.as_tensor(v_rest)) self.register_buffer("v_rest", torch.as_tensor(v_rest))
self.reccurent = nn.Linear(self.out_features, self.out_features, bias=False) self.reccurent = nn.Linear(out_features, out_features, bias=False)
self._init_mem() self._init_mem()
......
...@@ -287,7 +287,7 @@ class SLIF(ModNEFNeuron): ...@@ -287,7 +287,7 @@ class SLIF(ModNEFNeuron):
if self.hardware_estimation_flag: if self.hardware_estimation_flag:
val_max = max(abs(self.val_max), abs(self.val_min)) val_max = max(abs(self.val_max), abs(self.val_min))
print(val_max) print(val_max)
val_max = self.quantizer(val_max, dtype=torch.int32) val_max = self.quantizer(val_max)
print(val_max) print(val_max)
self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8 self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8
else: else:
......
...@@ -303,7 +303,7 @@ class RShiftLIF(ModNEFNeuron): ...@@ -303,7 +303,7 @@ class RShiftLIF(ModNEFNeuron):
if self.hardware_description["variable_size"]==-1: if self.hardware_description["variable_size"]==-1:
if self.hardware_estimation_flag: if self.hardware_estimation_flag:
val_max = max(abs(self.val_max), abs(self.val_min)) val_max = max(abs(self.val_max), abs(self.val_min))
val_max = val_max*2**(self.hardware_description["compute_fp"]) val_max = self.quantizer(self.val_max)
self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8 self.hardware_description["variable_size"] = ceil(log(val_max)/log(256))*8
else: else:
self.hardware_description["variable_size"]=16 self.hardware_description["variable_size"]=16
......
...@@ -6,6 +6,7 @@ from snntorch.surrogate import fast_sigmoid ...@@ -6,6 +6,7 @@ from snntorch.surrogate import fast_sigmoid
import torch import torch
from run_lib import * from run_lib import *
import sys import sys
from model import MyModel
if __name__ == "__main__": if __name__ == "__main__":
...@@ -13,10 +14,12 @@ if __name__ == "__main__": ...@@ -13,10 +14,12 @@ if __name__ == "__main__":
exp_name = "Evaluation" exp_name = "Evaluation"
"""Model definition""" """Model definition"""
model_path = "model_template.json"
best_model_name = "best_model" best_model_name = "best_model"
model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) # model_path = "model_template.json"
# model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25))
model.load_state_dict(torch.load(best_model_name)) model.load_state_dict(torch.load(best_model_name))
......
import modnef.modnef_torch as mt
from modnef.arch_builder import *
from snntorch.surrogate import fast_sigmoid
from modnef.quantizer import *
import torch
class MyModel(mt.ModNEFModel):
def __init__(self, name, spike_grad=fast_sigmoid(slope=25)):
super().__init__()
self.name = name
self.layer1 = mt.SLIF(in_features=2312,
out_features=128,
threshold=0.8,
v_leak=0.015,
v_min=0.0,
v_rest=0.0,
spike_grad=spike_grad,
quantizer=MinMaxQuantizer(
bitwidth=8,
signed=None
))
self.layer2 = mt.ShiftLIF(in_features=128,
out_features=64,
threshold=0.8,
beta=0.875,
reset_mechanism="subtract",
spike_grad=spike_grad,
quantizer=DynamicScaleFactorQuantizer(
bitwidth=8,
signed=None
))
self.layer3 = mt.BLIF(in_features=64,
out_features=10,
threshold=0.8,
beta=0.9,
reset_mechanism="subtract",
spike_grad=spike_grad,
quantizer=FixedPointQuantizer(
bitwidth=8,
fixed_point=7,
signed=None
))
def software_forward(self, input_spikes):
"""
Run layers upate
Parameters
----------
input_spikes : Tensor
input spikes
Returns
-------
tuple of tensor
output_spike, output_mem
"""
spk1, mem1 = self.layer1.reset_mem()
spk2, mem2 = self.layer2.reset_mem()
spk3, mem3 = self.layer3.reset_mem()
spk_rec = []
mem_rec = []
batch_size = input_spikes.shape[0]
n_steps = input_spikes.shape[1]
for step in range(n_steps):
x = input_spikes[:, step].reshape(batch_size, -1)
spk1, mem1 = self.layer1(x, mem1, spk1)
spk2, mem2 = self.layer2(spk1, mem2, spk2)
spk3, mem3 = self.layer3(spk2, mem3, spk3)
spk_rec.append(spk3)
mem_rec.append(mem3)
return torch.stack(spk_rec, dim=0), torch.stack(mem_rec, dim=0)
def fpga_forward(self, input_spikes):
"""
Transmit input spike to FPGA
Parameters
----------
input_spikes : Tensor
input spikes
Returns
-------
tuple of tensor
output_spike, None
"""
def to_aer(input):
input = input.reshape(-1).to(torch.int32)
aer = []
for i in range(input.shape[0]):
for _ in range(input[i]):
aer.append(i)
return aer
if self.driver == None:
raise Exception("please open fpga driver before")
batch_result = []
for sample in input_spikes:
sample_res = self.driver.run_sample(sample, to_aer, True, len(self.layers))
batch_result.append([sample_res])
return torch.tensor(batch_result).permute(1, 0, 2), None
def to_vhdl(self, file_name=None, output_path = ".", driver_config_path = "./driver.yml"):
"""
Generate VHDL file of model
Parameters
----------
file_name = None : str
VHDL file name
if default, file name is model name
output_path = "." : str
output file path
driver_config_path = "./driver.yml" : str
driver configuration file
"""
if file_name==None:
file_name = f"{output_path}/{self.name}.vhd"
builder = ModNEFBuilder(self.name, 2312, 10)
uart = Uart_XStep(
name="uart",
input_layer_size=2312,
output_layer_size=10,
clk_freq=125_000_000,
baud_rate=921_600,
queue_read_depth=10240,
queue_write_depth=1024,
tx_name="uart_rxd",
rx_name="uart_txd"
)
builder.add_module(uart)
builder.set_io(uart)
layer1_module = self.layer1.get_builder_module(f"{self.name}_layer1", output_path)
builder.add_module(layer1_module)
layer2_module = self.layer2.get_builder_module(f"{self.name}_layer2", output_path)
builder.add_module(layer2_module)
layer3_module = self.layer3.get_builder_module(f"{self.name}_layer3", output_path)
builder.add_module(layer3_module)
builder.add_link(uart, layer1_module)
builder.add_link(layer1_module, layer2_module)
builder.add_link(layer2_module, layer3_module)
builder.add_link(layer3_module, uart)
builder.get_driver_yaml(f"{output_path}/{driver_config_path}")
builder.to_vhdl(file_name, "clock")
\ No newline at end of file
...@@ -169,6 +169,9 @@ def evaluation(model, testLoader, name="Evaluation", device=torch.device("cpu"), ...@@ -169,6 +169,9 @@ def evaluation(model, testLoader, name="Evaluation", device=torch.device("cpu"),
model.eval(quant) model.eval(quant)
if quant:
model.quantize(force_init=True)
accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device) accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device)
return accuracy, y_pred, y_true return accuracy, y_pred, y_true
...@@ -182,6 +185,8 @@ def hardware_estimation(model, testLoader, name="Hardware Estimation", device=to ...@@ -182,6 +185,8 @@ def hardware_estimation(model, testLoader, name="Hardware Estimation", device=to
model.hardware_estimation() model.hardware_estimation()
model.quantize(force_init=True)
accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device) accuracy, y_pred, y_true = __run_accuracy(model=model, testLoader=testLoader, name=name, verbose=verbose, device=device)
return accuracy, y_pred, y_true return accuracy, y_pred, y_true
......
...@@ -5,12 +5,15 @@ import os ...@@ -5,12 +5,15 @@ import os
from snntorch.surrogate import fast_sigmoid from snntorch.surrogate import fast_sigmoid
from run_lib import * from run_lib import *
import torch import torch
from model import MyModel
if __name__ == "__main__": if __name__ == "__main__":
"""Model definition""" """Model definition"""
model_path = "model_template.json" # model_path = "model_template.json"
model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) # model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25))
"""Optimizer""" """Optimizer"""
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999)) optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999))
...@@ -22,7 +25,7 @@ if __name__ == "__main__": ...@@ -22,7 +25,7 @@ if __name__ == "__main__":
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
"""Train variable definition""" """Train variable definition"""
n_epoch = 1 n_epoch = 2
best_model_name = "best_model" best_model_name = "best_model"
verbose = True verbose = True
save_plot = False save_plot = False
...@@ -34,7 +37,7 @@ if __name__ == "__main__": ...@@ -34,7 +37,7 @@ if __name__ == "__main__":
# data set definition, change to your dataset # data set definition, change to your dataset
sensor_size = tonic.datasets.NMNIST.sensor_size sensor_size = tonic.datasets.NMNIST.sensor_size
frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=10) frame_transform = tonic.transforms.ToFrame(sensor_size=sensor_size, n_time_bins=5)
train_set = tonic.datasets.NMNIST(save_to=dataset_path, train=True, transform=frame_transform) train_set = tonic.datasets.NMNIST(save_to=dataset_path, train=True, transform=frame_transform)
test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform) test_set = tonic.datasets.NMNIST(save_to=dataset_path, train=False, transform=frame_transform)
......
...@@ -5,6 +5,7 @@ import os ...@@ -5,6 +5,7 @@ import os
from snntorch.surrogate import fast_sigmoid from snntorch.surrogate import fast_sigmoid
from run_lib import * from run_lib import *
import torch import torch
from model import MyModel
if __name__ == "__main__": if __name__ == "__main__":
...@@ -12,10 +13,12 @@ if __name__ == "__main__": ...@@ -12,10 +13,12 @@ if __name__ == "__main__":
exp_name = "Evaluation" exp_name = "Evaluation"
"""Model definition""" """Model definition"""
model_path = "model_template.json"
best_model_name = "best_model" best_model_name = "best_model"
model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25)) # model_path = "model_template.json"
# model = ModNEFModelBuilder(model_path, spike_grad=fast_sigmoid(slope=25))
model = MyModel("template_model", spike_grad=fast_sigmoid(slope=25))
model.load_state_dict(torch.load(best_model_name)) model.load_state_dict(torch.load(best_model_name))
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment