diff --git a/CSNN-Simulator/apps/MnistForSpiNNaker.cpp b/CSNN-Simulator/apps/MnistForSpiNNaker.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65ed956e4557cea4032000306c5683030eb1c010 --- /dev/null +++ b/CSNN-Simulator/apps/MnistForSpiNNaker.cpp @@ -0,0 +1,78 @@ +#include "Experiment.h" +#include "dataset/Mnist.h" +#include "stdp/Simplified.h" +#include "layer/Convolution.h" +#include "Distribution.h" +#include "execution/OptimizedLayerByLayer.h" + +int main(int argc, char** argv) { + + std::string name = "mnist_spinn"; + float do_prune = atof(argv[1]); + float do_reinforcement = atof(argv[2]); + + if(do_prune == 1){ + name = name + "_pruned"; + } + if(do_reinforcement == 1){ + name = name + "_reinforced"; + } + + Experiment<OptimizedLayerByLayer> experiment(argc, argv, name); + + experiment.template input<LatencyCoding>(); + + const char* input_path_ptr = "../Datasets/MNIST/"; + + if(input_path_ptr == nullptr) { + throw std::runtime_error("Require to define INPUT_PATH variable"); + } + + std::string input_path(input_path_ptr); + + experiment.template add_train<dataset::Mnist>(input_path+"train-images.idx3-ubyte", input_path+"train-labels.idx1-ubyte"); + experiment.template add_test<dataset::Mnist>(input_path+"t10k-images.idx3-ubyte", input_path+"t10k-labels.idx1-ubyte"); + + float th_lr = 1.0f; + float t_obj = 0.50f; + float alpha = 0.05f; + float alpha_p= 0.01f; + float alpha_n= 0.005f; + float beta_p= 1.5f; + float beta_n= 2.5f; + float prune_max_threshold = 0.7f; + + auto& fc1 = experiment.template push_layer<layer::Convolution>("fc1", 28, 28, 400); + fc1.template parameter<float>("annealing").set(0.95f); + fc1.template parameter<float>("min_th").set(1.0f); + fc1.template parameter<float>("t_obj").set(t_obj); + fc1.template parameter<float>("lr_th").set(th_lr); + fc1.template parameter<float>("doPrune").set(do_prune); + fc1.template parameter<float>("doReinforcement").set(do_reinforcement); + fc1.template parameter<float>("prune_max_threshold").set(prune_max_threshold); + fc1.template parameter<Tensor<float>>("w").template distribution<distribution::Uniform>(0.0, 1.0); + fc1.template parameter<Tensor<float>>("th").template distribution<distribution::Gaussian>(8.0, 0.1); + fc1.template parameter<STDP>("stdp").template set<stdp::Simplified>(alpha_p,alpha_n,beta_p,beta_n); + + alpha = alpha*2; + + auto& fc2 = experiment.template push_layer<layer::Convolution>("fc2", 1, 1, 1600); + fc2.template parameter<float>("annealing").set(0.95f); + fc2.template parameter<float>("min_th").set(1.0f); + fc2.template parameter<float>("t_obj").set(t_obj); + fc2.template parameter<float>("lr_th").set(th_lr); + fc2.template parameter<float>("doPrune").set(do_prune); + fc2.template parameter<float>("doReinforcement").set(do_reinforcement); + fc2.template parameter<float>("prune_max_threshold").set(prune_max_threshold); + fc2.template parameter<Tensor<float>>("w").template distribution<distribution::Uniform>(0.0, 1.0); + fc2.template parameter<Tensor<float>>("th").template distribution<distribution::Gaussian>(10.0, 0.1); + fc2.template parameter<STDP>("stdp").template set<stdp::Simplified>(alpha_p,alpha_n,beta_p,beta_n); + + experiment.add_train_step(fc1, 25); + experiment.add_train_step(fc2, 25); + + experiment.run(10000); + + return experiment.wait(); + +} \ No newline at end of file diff --git a/README.md b/README.md index 44d2c1053eab314deb0e8ce1770479b1eb814671..16d49939ad761ef238ae6a8a405ace6f6e25b8a8 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ Run the following commands inside the CSNN folder: Remember to build again if you change the source code. ### How to use CSNN -Once the `make` command is finished, you should see binary files which represent each simulation. +Once the `make` command is done, you should see binary files representing each simulation. Run a simulation: @@ -34,12 +34,20 @@ For example: to run MNIST simulation without compression and reinforcement: ./Mnist 0 0 -In the `apps` folder, you find the source code for each simulation where you can change the architecture, the network parameters, or activate the [layerwise compression](https://gitlab.univ-lille.fr/hammouda.elbez/progressive-layer-based-compression-for-convolutional-spiking-neural-network/-/blob/main/CSNN-Simulator/apps/Mnist.cpp#L21). +In the `apps` folder, you will find the source code for each simulation, where you can change the architecture and the network parameters or activate the [layerwise compression](https://gitlab.univ-lille.fr/hammouda.elbez/progressive-layer-based-compression-for-convolutional-spiking-neural-network/-/blob/main/CSNN-Simulator/apps/Mnist.cpp#L21). ## Going from CSNN to SpiNNaker +First, run the simulation of the two dense layers using CSNN: + + ./MnistForSpiNNaker 0 0 + +then + + ./MnistForSpiNNaker 1 1 + To transfer the learned weights from CSNN to SpiNNaker, we use the following command: - ./Weight_extractor [binary file geenrated from a simulation] [name_layer] + ./Weight_extractor [binary file generated from a simulation] [name_layer] For example: diff --git a/SpiNNaker/README.MD b/SpiNNaker/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..5cd2309fb6ce4edfe3ae1a9f77b199b4efd2f9af --- /dev/null +++ b/SpiNNaker/README.MD @@ -0,0 +1,18 @@ +## Files naming +In order to properly use the provided scripts, make sure to rename the binary weights extracted from the csnn-simulator as follows: + +As an example, we have the next architecture with MNIST dataset: + +`Input(784) x Fc1(400) x Fc2(1600)` + + + weights_fc1_X: + Weights between Input and Fc1 layer + weights_fc2_X: + Weights between Fc1 and Fc2 layer + + X = type of network [baseline or compressed] + +>`binary file` : are from the csnn-simulator (Weight Extractor), which <span style="color:red;">needs to be renamed by the user</span> + +>`.txt file` : represents the generated connections file for Spynnaker \ No newline at end of file diff --git a/SpiNNaker/SpiNNakerRun.ipynb b/SpiNNaker/SpiNNakerRun.ipynb index b40d6a6aaa2e0fd7b3d868960cc4d8c8a1947ad8..8f63e6ce52ec0e12041c5f4edf2f6c527c9b27e9 100644 --- a/SpiNNaker/SpiNNakerRun.ipynb +++ b/SpiNNaker/SpiNNakerRun.ipynb @@ -30,18 +30,24 @@ "\n" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Initialization\n" + ] + }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "# Initialization\n", "\n", "# Encoding parameters\n", - "SMAPLES_INTERVAL=10#\n", + "SMAPLES_INTERVAL=10\n", "TIME_STEP=1\n", - "NUM_STEPS=20#20\n", + "NUM_STEPS=20\n", "\n", "# Simulator parameters\n", "timestep=[1]\n", @@ -61,6 +67,14 @@ "os.mkdir(f'{path}/{str(now)}') " ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Dataset and preparation\n", + "#### Passing data (Ex. mnistTest.data[0:10]: 10 digits from test set of MNIST dataset) to convert_to_latency_code_V4, to generate SpikeSourcArray for input population " + ] + }, { "cell_type": "code", "execution_count": 7, @@ -75,7 +89,6 @@ } ], "source": [ - "# Dataset and preparation\n", "transform=transforms.Compose([\n", " transforms.Resize((28,28)),\n", " transforms.Grayscale(),\n", @@ -90,6 +103,16 @@ "runtime =time" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Main loop\n", + "#### Will call the sim.run for different values (from range(5,10,5)) for **tau_refrac**\n", + "#### Also using other loops you can see the effects of other Simulator (number_of_neurons_per_core, time_step, timeScaleFactor) or neuron parameters\n", + "#### weights_files (generated from the CSNN binary weight files) is used for loading the text files as the connections between the popoulations" + ] + }, { "cell_type": "code", "execution_count": null, @@ -687,7 +710,6 @@ " if not os.path.exists(f'{path}/{str(now)}/{param_name}_{str(param_val)}/{str(time)}'): # 2\n", " os.mkdir(f'{path}/{str(now)}/{param_name}_{str(param_val)}/{str(time)}')\n", "\n", - " total_mem=[]\n", " spikes_count=[[] for _ in range(len(tests_name))]\n", " for n_test, test_name in enumerate(tests_name):\n", "\n",