diff --git a/src/Modules/Neuron/layout.py b/src/Modules/Neuron/layout.py
index d65d410e4d1e0c0096693534086a9930ac8bc34b..b0679f189c213a9f237e1bf14ccd273c3d7e485d 100755
--- a/src/Modules/Neuron/layout.py
+++ b/src/Modules/Neuron/layout.py
@@ -69,8 +69,6 @@ class layout(layoutOp):
                                     options=[{'label': str(i), 'value': str(i)} for i in (
                                         i for i in self.g.Layer_Neuron if i != "Input")],
                                     multi=False,
-                                    value=[{'label': str(i), 'value': str(i)} for i in (
-                                        i for i in self.g.Layer_Neuron if i != "Input")][0]["value"],
                                     style={'width': '150px', "marginLeft": "10px", "textAlign": "start"}),
                                 dcc.Dropdown(
                                     id='NeuronFilterNeuron',
diff --git a/src/Modules/Synapse/spark.py b/src/Modules/Synapse/spark.py
index 8bc9edf62640b028493b194b1375eb5677627e59..68db8c7a3890c6f449ab3055e93d57e50f6370ec 100755
--- a/src/Modules/Synapse/spark.py
+++ b/src/Modules/Synapse/spark.py
@@ -1,6 +1,7 @@
 """ Spark pre-processing operations.
 """
 
+import pandas as pd
 import pymongo
 import traceback
 from pyspark.sql import functions as F
@@ -33,23 +34,18 @@ class spark(sparkOp):
                 if self.g.sparkSession == None:
                     self.g.createSparkSession()
                 # --------------------------------------------------
-                df = self.g.sparkSession.read.format("com.mongodb.spark.sql") \
-                    .option("spark.mongodb.input.uri", self.MONGODBURL + self.g.name + "."+self.DOCUMENT_NAME+"?authSource=admin&readPreference=primaryPreferred") \
-                    .option("pipeline", "[{ $sort: { T: 1 } },{$group : { _id : {To:'$To', C:'$C', index:'$index', L:'$L'}, T : { $last: '$T'},V : { $last: '$V'} } }]")
-
-                df = df.load()
-            
+                col = pymongo.collection.Collection(self.g.db, self.DOCUMENT_NAME)
+                globalSynapseWeights = col.aggregate([{ "$sort": { "T": 1 } },{"$group" : { "_id" : {"To":'$To', "C":'$C', "index":'$index', "L":'$L'}, "T" : { "$last": '$T'},"V" : { "$last": '$V'} } }])
+             
                 # Data save into MongoDB ---------------------------------
-
-                df.write.format("com.mongodb.spark.sql.DefaultSource") \
-                    .option("spark.mongodb.output.uri",
-                            self.MONGODBURL + self.g.name + "."+self.OUTPUT_DOCUMENT_NAME+"?authSource=admin&readPreference=primaryPreferred").mode('append').save()
+                col = pymongo.collection.Collection(self.g.db, self.OUTPUT_DOCUMENT_NAME)
+                globalSynapseWeights = pd.DataFrame(list(globalSynapseWeights))
+                col.insert_many(globalSynapseWeights.to_dict('records'))
 
                 # Indexes creation ---------------------------------------
 
                 print("Indexes creation (please wait...)")
 
-                col = pymongo.collection.Collection(self.g.db, self.OUTPUT_DOCUMENT_NAME)
                 col.create_index([("_id.L", 1)])
                 col.create_index([("_id", 1)])
                 col.create_index([("_id.To", 1),("_id.C", 1)])