Skip to content
Snippets Groups Projects
Commit 2c659038 authored by Hammouda Elbez's avatar Hammouda Elbez :computer:
Browse files

Added Norse code and updated CSNN Conv class

parent a9074e75
Branches
No related tags found
No related merge requests found
Showing
with 42 additions and 43 deletions
...@@ -42,7 +42,7 @@ int main(int argc, char** argv) { ...@@ -42,7 +42,7 @@ int main(int argc, char** argv) {
float beta_n= 2.5f; float beta_n= 2.5f;
float prune_max_threshold = 0.7f; float prune_max_threshold = 0.7f;
auto& fc1 = experiment.template push_layer<layer::Convolution>("fc1", 28, 28, 400); auto& fc1 = experiment.template push_layer<layer::Convolution>("fc1", 28, 28, 50);
fc1.template parameter<float>("annealing").set(0.95f); fc1.template parameter<float>("annealing").set(0.95f);
fc1.template parameter<float>("min_th").set(1.0f); fc1.template parameter<float>("min_th").set(1.0f);
fc1.template parameter<float>("t_obj").set(t_obj); fc1.template parameter<float>("t_obj").set(t_obj);
...@@ -56,7 +56,7 @@ int main(int argc, char** argv) { ...@@ -56,7 +56,7 @@ int main(int argc, char** argv) {
alpha = alpha*2; alpha = alpha*2;
auto& fc2 = experiment.template push_layer<layer::Convolution>("fc2", 1, 1, 1600); auto& fc2 = experiment.template push_layer<layer::Convolution>("fc2", 1, 1, 128);
fc2.template parameter<float>("annealing").set(0.95f); fc2.template parameter<float>("annealing").set(0.95f);
fc2.template parameter<float>("min_th").set(1.0f); fc2.template parameter<float>("min_th").set(1.0f);
fc2.template parameter<float>("t_obj").set(t_obj); fc2.template parameter<float>("t_obj").set(t_obj);
...@@ -68,8 +68,8 @@ int main(int argc, char** argv) { ...@@ -68,8 +68,8 @@ int main(int argc, char** argv) {
fc2.template parameter<Tensor<float>>("th").template distribution<distribution::Gaussian>(10.0, 0.1); fc2.template parameter<Tensor<float>>("th").template distribution<distribution::Gaussian>(10.0, 0.1);
fc2.template parameter<STDP>("stdp").template set<stdp::Simplified>(alpha_p,alpha_n,beta_p,beta_n); fc2.template parameter<STDP>("stdp").template set<stdp::Simplified>(alpha_p,alpha_n,beta_p,beta_n);
experiment.add_train_step(fc1, 25); experiment.add_train_step(fc1, 10);
experiment.add_train_step(fc2, 25); experiment.add_train_step(fc2, 10);
experiment.run(10000); experiment.run(10000);
......
...@@ -273,7 +273,7 @@ void _priv::DenseImpl::train(const std::vector<Spike>& input_spike, const Tensor ...@@ -273,7 +273,7 @@ void _priv::DenseImpl::train(const std::vector<Spike>& input_spike, const Tensor
for(size_t x=0; x<_model._filter_width; x++) { for(size_t x=0; x<_model._filter_width; x++) {
for(size_t y=0; y<_model._filter_height; y++) { for(size_t y=0; y<_model._filter_height; y++) {
for(size_t zi=0; zi<_model._input_depth; zi++) { for(size_t zi=0; zi<_model._input_depth; zi++) {
if(_model._cropedVector.at(x, y, zi, z) != 1){ if(_model._cropedVector.at(x, y, zi, z) != 1.0f){
w.at(x, y, zi, z) = _model._stdp->process(w.at(x, y, zi, z), input_time.at(x, y, zi), spike.time); w.at(x, y, zi, z) = _model._stdp->process(w.at(x, y, zi, z), input_time.at(x, y, zi), spike.time);
_model.synaptic_updates+=1; _model.synaptic_updates+=1;
} }
......
import sys import sys
sys.path.append('../../') sys.path.append('../')
import torch import torch
import numpy as np import numpy as np
from compression import ProgressiveCompression from compression import ProgressiveCompression
from torch.utils.data import SubsetRandomSampler from norse.torch import LIFCell
from norse.torch import LIFCell, LICell
from norse.torch.module.leaky_integrator import LILinearCell from norse.torch.module.leaky_integrator import LILinearCell
from norse.torch import LIFParameters from norse.torch import LIFParameters
from norse.torch.module import encode, SequentialState from norse.torch.module import encode
from datetime import datetime from datetime import datetime
import torchvision import torchvision
import os import os
...@@ -21,18 +20,19 @@ torch.manual_seed(0) ...@@ -21,18 +20,19 @@ torch.manual_seed(0)
random.seed(0) random.seed(0)
np.random.seed(0) np.random.seed(0)
MAXTH = [0.3,0.4,0.5,0.6,0.7] # MAXTH = [0.3,0.4,0.5,0.6,0.7]
ALPHA = [0.005] # [0.002,0.004,0.006,0.008,0.01] # ALPHA = [0.005]
REINFORCEMENT = [True] # [False, True] REINFORCEMENT = [True]
apply_compression = False COMPRESSION = [False, True]
LAYERWISE = [False, True]
for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, REINFORCEMENT))): for maxTh, Alpha, reinforcement, compression, layerwise in np.array(list(itertools.product(MAXTH, ALPHA, REINFORCEMENT, COMPRESSION, LAYERWISE))):
try: try:
os.mkdir("CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)) os.mkdir("CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement))
except OSError as error: except OSError as error:
print(error) print(error)
for i in range(2): for i in range(10):
before = datetime.now() before = datetime.now()
file = open("CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before), 'w+') file = open("CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before), 'w+')
...@@ -175,7 +175,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -175,7 +175,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
EPOCHS = 100 # Increase this for improved accuracy EPOCHS = 100 # Increase this for improved accuracy
if torch.cuda.is_available(): if torch.cuda.is_available():
DEVICE = torch.device(sys.argv[1]) DEVICE = torch.device("cuda")
else: else:
DEVICE = torch.device("cpu") DEVICE = torch.device("cpu")
...@@ -185,8 +185,8 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -185,8 +185,8 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
optimizer = torch.optim.Adam(model.parameters(), lr=LR) optimizer = torch.optim.Adam(model.parameters(), lr=LR)
# compression # compression
if (apply_compression): if (compression):
progressive_compression = ProgressiveCompression(NorseModel=model, maxThreshold=maxTh, alphaP=Alpha, alphaN=-Alpha, to_file=True, apply_reinforcement=reinforcement, file= file) progressive_compression = ProgressiveCompression(NorseModel=model, maxThreshold=maxTh, alphaP=Alpha, alphaN=-Alpha, to_file=True, apply_reinforcement=reinforcement, file= file, layerwise=layerwise)
training_losses = [] training_losses = []
mean_losses = [] mean_losses = []
...@@ -203,7 +203,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -203,7 +203,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
mean_losses.append(mean_loss) mean_losses.append(mean_loss)
test_losses.append(test_loss) test_losses.append(test_loss)
accuracies.append(accuracy) accuracies.append(accuracy)
if (apply_compression): if (compression):
progressive_compression.apply() progressive_compression.apply()
print(f"final accuracy: {accuracies[-1]}") print(f"final accuracy: {accuracies[-1]}")
...@@ -211,9 +211,8 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -211,9 +211,8 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
file.write("time:"+str(datetime.now() - before)+"\n") file.write("time:"+str(datetime.now() - before)+"\n")
with open("CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".pkl",'wb') as f:
torch.save(model,"CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".norse") torch.save(model,"CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".norse")
if (apply_compression): if (compression):
pickle.dump([mean_losses,test_losses,accuracies,progressive_compression.weights,progressive_compression.compressions,progressive_compression.thresholds_p,progressive_compression.thresholds_n], f) torch.save([mean_losses,test_losses,accuracies,progressive_compression.weights,progressive_compression.compressions,progressive_compression.thresholds_p,progressive_compression.thresholds_n], "CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".pkl")
else: else:
pickle.dump([mean_losses,test_losses,accuracies], f) torch.save([mean_losses,test_losses,accuracies], "CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/CIFAR10_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".pkl")
import sys import sys
sys.path.append('../../') sys.path.append('../')
import torch import torch
import numpy as np import numpy as np
...@@ -21,18 +21,19 @@ torch.manual_seed(0) ...@@ -21,18 +21,19 @@ torch.manual_seed(0)
random.seed(0) random.seed(0)
np.random.seed(0) np.random.seed(0)
MAXTH = [0.3,0.4,0.5,0.6,0.7] # MAXTH = [0.3,0.4,0.5,0.6,0.7]
ALPHA = [0.005] # [0.002,0.004,0.006,0.008,0.01] # ALPHA = [0.005]
REINFORCEMENT = [True] # [False, True] REINFORCEMENT = [True]
apply_compression = True COMPRESSION = [False, True]
LAYERWISE = [False, True]
for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, REINFORCEMENT))): for maxTh, Alpha, reinforcement, compression, layerwise in np.array(list(itertools.product(MAXTH, ALPHA, REINFORCEMENT, COMPRESSION, LAYERWISE))):
try: try:
os.mkdir("FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)) os.mkdir("FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement))
except OSError as error: except OSError as error:
print(error) print(error)
for i in range(2): for i in range(10):
before = datetime.now() before = datetime.now()
file = open("FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before), 'w+') file = open("FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before), 'w+')
...@@ -47,7 +48,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -47,7 +48,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
) )
train_data = torchvision.datasets.ImageFolder( train_data = torchvision.datasets.ImageFolder(
root="/home/hammouda/Desktop/Work/falez-csnn-simulator/Datasets/FaceMotor/TrainingSet/", root="FaceMotor/TrainingSet/",
transform=transform transform=transform
) )
...@@ -60,7 +61,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -60,7 +61,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
test_loader = torch.utils.data.DataLoader( test_loader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder( torchvision.datasets.ImageFolder(
root="/home/hammouda/Desktop/Work/falez-csnn-simulator/Datasets/FaceMotor/TestingSet/", root="FaceMotor/TestingSet/",
transform=transform transform=transform
), ),
batch_size=BATCH_SIZE, batch_size=BATCH_SIZE,
...@@ -167,7 +168,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -167,7 +168,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
T = 35 T = 35
LR = 1e-4 LR = 1e-4
EPOCHS = 15 # Increase this for improved accuracy EPOCHS = 100 # Increase this for improved accuracy
if torch.cuda.is_available(): if torch.cuda.is_available():
DEVICE = torch.device("cuda") DEVICE = torch.device("cuda")
...@@ -180,7 +181,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -180,7 +181,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
optimizer = torch.optim.Adam(model.parameters(), lr=LR) optimizer = torch.optim.Adam(model.parameters(), lr=LR)
# compression # compression
if (apply_compression): if (compression):
progressive_compression = ProgressiveCompression(NorseModel=model, maxThreshold=maxTh, alphaP=Alpha, alphaN=-Alpha, to_file=True, apply_reinforcement=reinforcement, file= file) progressive_compression = ProgressiveCompression(NorseModel=model, maxThreshold=maxTh, alphaP=Alpha, alphaN=-Alpha, to_file=True, apply_reinforcement=reinforcement, file= file)
training_losses = [] training_losses = []
...@@ -198,7 +199,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -198,7 +199,7 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
mean_losses.append(mean_loss) mean_losses.append(mean_loss)
test_losses.append(test_loss) test_losses.append(test_loss)
accuracies.append(accuracy) accuracies.append(accuracy)
if (apply_compression): if (compression):
progressive_compression.apply() progressive_compression.apply()
print(f"final accuracy: {accuracies[-1]}") print(f"final accuracy: {accuracies[-1]}")
...@@ -206,9 +207,8 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA, ...@@ -206,9 +207,8 @@ for maxTh, Alpha, reinforcement in np.array(list(itertools.product(MAXTH, ALPHA,
file.write("time:"+str(datetime.now() - before)+"\n") file.write("time:"+str(datetime.now() - before)+"\n")
with open("FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".pkl",'wb') as f:
torch.save(model,"FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".norse") torch.save(model,"FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".norse")
if (apply_compression): if (compression):
pickle.dump([mean_losses,test_losses,accuracies,progressive_compression.weights,progressive_compression.compressions,progressive_compression.thresholds_p,progressive_compression.thresholds_n], f) torch.save([mean_losses,test_losses,accuracies,progressive_compression.weights,progressive_compression.compressions,progressive_compression.thresholds_p,progressive_compression.thresholds_n],"FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".pkl")
else: else:
pickle.dump([mean_losses,test_losses,accuracies], f) torch.save([mean_losses,test_losses,accuracies],"FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"/FACEMOTOR_CONV_maxTh:"+str(maxTh)+"_Alpha:"+str(Alpha)+"_"+"reinforcement:"+str(reinforcement)+"_"+str(before)+".pkl")
Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0001.jpg

27.5 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0003.jpg

29.6 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0005.jpg

23.7 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0007.jpg

28.9 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0009.jpg

18.9 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0011.jpg

16.1 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0012.jpg

34.2 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0014.jpg

18.6 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0016.jpg

34.9 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0018.jpg

14.2 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0020.jpg

24.8 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0022.jpg

22.4 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0023.jpg

20.9 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0025.jpg

21.4 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0027.jpg

15.7 KiB

Norse/Caltech Face-Motor/FaceMotor/LearningSet/Face/image_0029.jpg

25.1 KiB

0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment