Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
B
BigData
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Simon Majorczyk
BigData
Commits
6df505c5
Commit
6df505c5
authored
3 months ago
by
Mohamed Sebabti
Browse files
Options
Downloads
Patches
Plain Diff
prediction
parent
04dd3a52
No related branches found
No related tags found
No related merge requests found
Changes
2
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
predictions_final.csv
+127980
-0
127980 additions, 0 deletions
predictions_final.csv
recup_predict.py
+37
-63
37 additions, 63 deletions
recup_predict.py
with
128017 additions
and
63 deletions
predictions_final.csv
0 → 100644
+
127980
−
0
View file @
6df505c5
This diff is collapsed.
Click to expand it.
recup_predict.py
+
37
−
63
View file @
6df505c5
...
@@ -3,63 +3,22 @@ import pandas as pd
...
@@ -3,63 +3,22 @@ import pandas as pd
import
pickle
import
pickle
from
sklearn.preprocessing
import
StandardScaler
from
sklearn.preprocessing
import
StandardScaler
# 📂 Charger le dataset complet
# 📂 Charger les datasets
df
=
pd
.
read_csv
(
"
data_sup_0popularity.csv
"
)
df_sup0
=
pd
.
read_csv
(
"
data_sup_0popularity.csv
"
)
df_binaire
=
pd
.
read_csv
(
"
data_binaire.csv
"
)
# 📂 Charger le modèle et
le
scaler
# 📂 Charger le
s
modèle
s
et scaler
s
with
open
(
"
random_forest_model_sup0.pkl
"
,
"
rb
"
)
as
model_file
:
with
open
(
"
random_forest_model_sup0.pkl
"
,
"
rb
"
)
as
model_file
:
rf
=
pickle
.
load
(
model_file
)
model_sup0
=
pickle
.
load
(
model_file
)
with
open
(
"
scaler_sup0.pkl
"
,
"
rb
"
)
as
scaler_file
:
with
open
(
"
scaler_sup0.pkl
"
,
"
rb
"
)
as
scaler_file
:
scaler
=
pickle
.
load
(
scaler_file
)
scaler
_sup0
=
pickle
.
load
(
scaler_file
)
# ✅ Vérifier que les features attendues sont bien présentes
expected_features
=
[
'
year
'
,
'
acousticness
'
,
'
danceability
'
,
'
energy
'
,
'
explicit
'
,
'
instrumentalness
'
,
'
key
'
,
'
liveness
'
,
'
loudness
'
,
'
mode
'
,
'
speechiness
'
,
'
tempo
'
,
'
valence
'
,
'
nb_caracteres_sans_espaces
'
,
'
nb_artistes
'
,
'
featuring
'
,
'
duree_minute
'
,
'
categorie_annee
'
,
'
categorie_tempo
'
]
# ⚠️ Vérifier si des colonnes sont manquantes
missing_features
=
[
col
for
col
in
expected_features
if
col
not
in
df
.
columns
]
if
missing_features
:
raise
ValueError
(
f
"
⚠️ Il manque ces colonnes dans le dataset :
{
missing_features
}
"
)
# 🎯 Extraire uniquement l'ID et les features
X
=
df
[
expected_features
]
ids
=
df
[
'
id
'
]
# Stocker l'ID à part
# 📏 Appliquer le scaler sur les features
X_scaled
=
scaler
.
transform
(
X
)
# 🔮 Faire les prédictions
predictions
=
rf
.
predict
(
X_scaled
)
# 📊 Créer un DataFrame avec seulement l'ID et la Prédiction
df_results
=
pd
.
DataFrame
({
"
id
"
:
ids
,
"
predictions
"
:
predictions
})
# 💾 Sauvegarder le DataFrame avec uniquement ID + Prédictions en CSV
df_results
.
to_csv
(
"
predictions_sup0.csv
"
,
index
=
False
)
# 📊 Afficher les premières lignes pour vérifier
print
(
df_results
.
head
(
10
))
print
(
"
\n
✅ Prédictions sauvegardées dans
'
predictions_sup0.csv
'
avec uniquement ID et Prédiction !
"
)
import
numpy
as
np
import
pandas
as
pd
import
pickle
from
sklearn.preprocessing
import
StandardScaler
# 📂 Charger le dataset complet
df
=
pd
.
read_csv
(
"
data_binaire.csv
"
)
# Assurez-vous d'avoir le bon fichier
# 📂 Charger le modèle et le scaler
with
open
(
"
random_forest_model_binaire.pkl
"
,
"
rb
"
)
as
model_file
:
with
open
(
"
random_forest_model_binaire.pkl
"
,
"
rb
"
)
as
model_file
:
rf
=
pickle
.
load
(
model_file
)
model_binaire
=
pickle
.
load
(
model_file
)
with
open
(
"
scaler_binaire.pkl
"
,
"
rb
"
)
as
scaler_file
:
with
open
(
"
scaler_binaire.pkl
"
,
"
rb
"
)
as
scaler_file
:
scaler
=
pickle
.
load
(
scaler_file
)
scaler
_binaire
=
pickle
.
load
(
scaler_file
)
# ✅ Vérifier que les features attendues sont bien présentes
# ✅ Vérifier que les features attendues sont bien présentes
expected_features
=
[
'
year
'
,
'
acousticness
'
,
'
danceability
'
,
'
energy
'
,
'
explicit
'
,
expected_features
=
[
'
year
'
,
'
acousticness
'
,
'
danceability
'
,
'
energy
'
,
'
explicit
'
,
...
@@ -67,28 +26,43 @@ expected_features = ['year', 'acousticness', 'danceability', 'energy', 'explicit
...
@@ -67,28 +26,43 @@ expected_features = ['year', 'acousticness', 'danceability', 'energy', 'explicit
'
speechiness
'
,
'
tempo
'
,
'
valence
'
,
'
nb_caracteres_sans_espaces
'
,
'
speechiness
'
,
'
tempo
'
,
'
valence
'
,
'
nb_caracteres_sans_espaces
'
,
'
nb_artistes
'
,
'
featuring
'
,
'
duree_minute
'
,
'
categorie_annee
'
,
'
categorie_tempo
'
]
'
nb_artistes
'
,
'
featuring
'
,
'
duree_minute
'
,
'
categorie_annee
'
,
'
categorie_tempo
'
]
# ⚠️ Vérifier si des colonnes sont manquantes
for
df
,
name
in
zip
([
df_sup0
,
df_binaire
],
[
"
sup0
"
,
"
binaire
"
]):
missing_features
=
[
col
for
col
in
expected_features
if
col
not
in
df
.
columns
]
missing_features
=
[
col
for
col
in
expected_features
if
col
not
in
df
.
columns
]
if
missing_features
:
if
missing_features
:
raise
ValueError
(
f
"
⚠️ Il manque ces colonnes dans le dataset :
{
missing_features
}
"
)
raise
ValueError
(
f
"
⚠️ Il manque ces colonnes dans le dataset
{
name
}
:
{
missing_features
}
"
)
# 🎯 Extraire uniquement l'ID et les features
# 🎯 Extraire uniquement l'ID et les features
X
=
df
[
expected_features
]
X_sup0
=
df_sup0
[
expected_features
]
ids
=
df
[
'
id
'
]
# Stocker l'ID à part
ids_sup0
=
df_sup0
[
'
id
'
]
X_binaire
=
df_binaire
[
expected_features
]
ids_binaire
=
df_binaire
[
'
id
'
]
# 📏 Appliquer le scaler sur les features
# 📏 Appliquer le scaler sur les features
X_scaled
=
scaler
.
transform
(
X
)
X_sup0_scaled
=
scaler_sup0
.
transform
(
X_sup0
)
X_binaire_scaled
=
scaler_binaire
.
transform
(
X_binaire
)
# 🔮 Faire les prédictions
predictions_sup0
=
model_sup0
.
predict
(
X_sup0_scaled
)
predictions_binaire
=
model_binaire
.
predict
(
X_binaire_scaled
).
astype
(
int
)
# Conversion en entier pour binaire
# 🔮 Faire les prédictions (0 ou 1)
# 📊 Fusionner les résultats dans un seul DataFrame
predictions
=
rf
.
predict
(
X_scaled
)
df_results
=
pd
.
DataFrame
({
"
id
"
:
ids_binaire
,
"
predictions_binaire
"
:
predictions_binaire
}).
merge
(
pd
.
DataFrame
({
"
id
"
:
ids_sup0
,
"
predictions_sup0
"
:
predictions_sup0
}),
on
=
"
id
"
,
how
=
"
outer
"
# Fusion en gardant tous les IDs
)
#
📊 Créer un DataFrame avec seulement l'ID et la Prédiction
#
🛠️ Appliquer la logique demandée
df_results
=
pd
.
DataFrame
({
"
id
"
:
ids
,
"
predictions
"
:
predictions
.
astype
(
int
)})
# Conversion en entier (0 ou
1)
df_results
[
"
final_prediction
"
]
=
df_results
.
apply
(
lambda
row
:
row
[
"
predictions_sup0
"
]
if
row
[
"
predictions_binaire
"
]
==
1
else
0
,
axis
=
1
)
# 💾 Sauvegarder le
DataFrame avec uniquement ID + Prédictions en CSV
# 💾 Sauvegarder le
s prédictions dans un fichier unique
df_results
.
to_csv
(
"
predictions_
b
ina
ire
.csv
"
,
index
=
False
)
df_results
[[
"
id
"
,
"
final_prediction
"
]]
.
to_csv
(
"
predictions_
f
ina
l
.csv
"
,
index
=
False
)
# 📊 Afficher les premières lignes pour vérifier
# 📊 Afficher les premières lignes pour vérifier
print
(
df_results
.
head
(
10
))
print
(
df_results
.
head
(
10
))
print
(
"
\n
✅
P
rédictions sauvegardées dans
'
predictions_
b
ina
ire
.csv
'
avec
uniquement ID et Prédiction
!
"
)
print
(
"
\n
✅
Toutes les p
rédictions
sont
sauvegardées dans
'
predictions_
f
ina
l
.csv
'
avec
la logique demandée
!
"
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment