Skip to content
Snippets Groups Projects
Commit 893adce2 authored by solenebernard's avatar solenebernard
Browse files

changes

parent 2393f11f
No related branches found
No related tags found
No related merge requests found
No preview for this file type
hill/
......@@ -5,7 +5,8 @@ module load pytorch-gpu/py3/1.7.1
python main.py --label='70' \
--begin_step=1 \
--permutation_files='./models/permutation_files.npy' \
--serveur='no' \
--server='yes' \
--name_account='srp' \
--number_steps=1 \
--folder_model='./models/' \
--data_dir_prot='./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/' \
......@@ -51,6 +52,56 @@ python main.py --label='70' \
python main.py --label='70' \
--begin_step=2 \
--permutation_files='./models/permutation_files.npy' \
--server='yes' \
--name_account='srp' \
--number_steps=1 \
--folder_model='./models/' \
--data_dir_prot='./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/' \
--data_dir_cover='/gpfswork/rech/srp/commun/JPEG_75_512/c_coeffs/' \
--data_dir_stego_0='/gpfswork/rech/srp/commun/JPEG_75_512/J_UNI_0_4_npy/' \
--cost_dir='/gpfswork/rech/srp/commun/JPEG_75_512/costs/' \
--strategy='minmax' \
--image_size=512 \
--QF=75 \
--train_size=4000 \
--valid_size=1000 \
--test_size=5000 \
--emb_rate=0.4 \
--model='xunet,srnet,efnet' \
--version_eff='b0' \
--stride=1 \
--batch_size_classif_ef=16 \
--batch_size_eval_ef=20 \
--epoch_num_ef=30 \
--CL_ef='yes' \
--start_emb_rate_ef=1.2 \
--pair_training_ef='no' \
--batch_size_classif_xu=30 \
--batch_size_eval_xu=30 \
--epoch_num_xu=30 \
--CL_xu='yes' \
--start_emb_rate_xu=1.2 \
--pair_training_xu='yes' \
--batch_size_classif_sr=16 \
--batch_size_eval_sr=30 \
--epoch_num_sr=40 \
--CL_sr='yes' \
--start_emb_rate_sr=1.4 \
--pair_training_sr='yes' \
--n_iter_max_backpack=200 \
--tau_0=5. \
--precision=0.01 \
--N_samples=2 \
--attack='SGE' \
--attack_last='no' \
--lr=0.01 \
--batch_adv=20
......
......@@ -68,7 +68,7 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir
image_size, QF, folder_model, permutation_files, version_eff, stride, n_loops,
model, train_size, valid_size, test_size, attack, attack_last, emb_rate,
batch_adv, n_iter_max_backpack, N_samples, tau_0, precision, lr,
num_of_threads, training_dictionnary, spatial, strategy, serveur):
num_of_threads, training_dictionnary, spatial, strategy, server, name_account):
n_images = train_size+valid_size+test_size
models = model.split(',')
......@@ -94,9 +94,9 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir
command = 'script_attack.py ' + \
custom_command('attack', iteration_step, model)
if(serveur):
if(server):
command += ' --idx_start=$SLURM_ARRAY_TASK_ID'
run_job('attack', label, command, iteration_step,
run_job('attack', label, command, iteration_step, name_account,
gpu=True, num_batch=num_batch)
wait(label)
else:
......@@ -119,13 +119,13 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir
+ ' --iteration_f=' + \
str(i)+' --iteration_adv='+str(iteration_step)
if(serveur):
if(server):
run_job('eval_'+str(my_model), label,
command, iteration_step, gpu=True)
command, iteration_step, name_account, gpu=True)
else:
os.system('python '+ command)
if(serveur):
if(server):
wait(label)
# GENERATION OF THE TRAIN DATA BASE
......@@ -140,13 +140,13 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir
command = 'script_train.py' + \
custom_command('train', iteration_step, my_model)
if(serveur):
run_job('train_'+my_model, label, command, iteration_step,
if(server):
run_job('train_'+my_model, label, command, iteration_step, name_account,
num_of_threads=num_of_threads, gpu=True)
else:
os.system('python '+ command)
if(serveur):
if(server):
wait(label)
# EVALUATE NEW CLASSIFIERS ON ALL STEGO DATA BASES AND ON COVER
......@@ -164,13 +164,13 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir
+ ' --iteration_f='+str(iteration_step) + \
' --iteration_adv='+str(i)
if(serveur):
if(server):
run_job('eval_'+str(model), label,
command, iteration_step, gpu=True)
command, iteration_step, name_account, gpu=True)
else:
os.system('python '+ command)
if(serveur):
if(server):
wait(label)
for my_model in models:
......@@ -180,7 +180,7 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir
return(True)
def run_protocol(begin_step, serveur, number_steps, label, data_dir_prot, data_dir_cover, data_dir_stego_0, cost_dir,
def run_protocol(begin_step, server, name_account, number_steps, label, data_dir_prot, data_dir_cover, data_dir_stego_0, cost_dir,
image_size, QF, folder_model, permutation_files, version_eff, stride, n_loops,
model, train_size, valid_size, test_size, attack, attack_last, emb_rate,
batch_adv, n_iter_max_backpack, N_samples, tau_0, precision, lr, strategy,
......@@ -208,7 +208,7 @@ def run_protocol(begin_step, serveur, number_steps, label, data_dir_prot, data_d
image_size, QF, folder_model, permutation_files, version_eff, stride, n_loops,
model, train_size, valid_size, test_size, attack, attack_last, emb_rate,
batch_adv, n_iter_max_backpack, N_samples, tau_0, precision, lr,
num_of_threads, training_dictionnary, spatial, strategy, serveur)
num_of_threads, training_dictionnary, spatial, strategy, server, name_account)
iteration_step += 1
......@@ -217,7 +217,8 @@ if __name__ == '__main__':
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument('--begin_step', type=int)
argparser.add_argument('--serveur', type=str, default='no')
argparser.add_argument('--server', type=str, default='no')
argparser.add_argument('--name_account', type=str, default='srp', help='If run on a server, name of the account from which launch the computations')
argparser.add_argument('--number_steps', type=int, default=10)
argparser.add_argument('--folder_model', type=str,
help='The path to the folder where the architecture of models are saved')
......@@ -301,6 +302,6 @@ if __name__ == '__main__':
params = argparser.parse_args()
params.serveur = params.serveur=='yes'
params.server = params.server=='yes'
run_protocol(**vars(params))
......@@ -77,11 +77,6 @@ def run_training(iteration_step, model, net, trainGlobalConfig, data_dir_prot, s
save_path=save_path, model_str=model, train_on_cost_map=train_on_cost_map)
print(f'{fitter.base_dir}')
# if(model == 'efnet'):
# # Load the pretrained model
# fitter.load(folder_model+version_eff+"-imagenet")
if(load_checkpoint is not None):
save_path_load = save_path
#save_path_load = data_dir_prot + 'train_'+model+'_'+str(iteration_step) +'/'
......@@ -142,6 +137,7 @@ def train(iteration_step, model, folder_model, data_dir_prot, permutation_files,
train_valid_test_transforms = [
get_train_transforms(), get_valid_transforms(), None]
# TRAIN FIRST WITH COST MAPS
datasets = [DatasetRetriever(image_names, folder_model, QF, emb_rate, image_size,
data_dir_cover, data_dir_stego_0, cost_dir, data_dir_prot,
H1_filter, L1_filter, L2_filter, indexs_db, train_on_cost_map=True,
......@@ -171,12 +167,26 @@ def train(iteration_step, model, folder_model, data_dir_prot, permutation_files,
start_emb_rate = emb_rate
# Train first with cost map
train, val, test = run_training(iteration_step, model, net, trainGlobalConfig, data_dir_prot, start_emb_rate,
emb_rate, pair_training, version_eff, load_model,
train_dataset, validation_dataset, test_dataset, folder_model, train_on_cost_map=True)
print(train, val, test)
# TRAIN THEN WITH STEGO IMAGES
if(model == 'efnet'):
net = get_net_ef(version_eff, stride).to(device)
trainGlobalConfig = TrainGlobalConfig_ef(
num_of_threads, batch_size_classif, epoch_num)
elif(model == 'xunet'):
net = get_net_xu(folder_model, n_loops, image_size).to(device)
trainGlobalConfig = TrainGlobalConfig_xu(
num_of_threads, batch_size_classif, epoch_num)
elif(model == 'srnet'):
net = get_net_sr(image_size).to(device)
net.init()
trainGlobalConfig = TrainGlobalConfig_sr(
num_of_threads, batch_size_classif, epoch_num)
datasets = [DatasetRetriever(image_names, folder_model, QF, emb_rate, image_size,
data_dir_cover, data_dir_stego_0, cost_dir, data_dir_prot,
H1_filter, L1_filter, L2_filter, indexs_db, train_on_cost_map=False,
......
......@@ -120,7 +120,7 @@ def write_command(mode, iteration_step, model, data_dir_prot, data_dir_cover, da
return(com)
def run_job(mode, label, command, iteration, num_of_threads=None,
def run_job(mode, label, command, iteration, name_account, num_of_threads=None,
num_batch=None, gpu=True):
name = label + '_' + str(iteration) + '_' + mode
......@@ -136,14 +136,17 @@ def run_job(mode, label, command, iteration, num_of_threads=None,
fh.writelines("#SBATCH --ntasks=1\n")
fh.writelines("#SBATCH --hint=nomultithread\n")
fh.writelines("#SBATCH --time=15:00:00 \n")
fh.writelines("#SBATCH --account=%s@gpu\n" % name_account)
else:
fh.writelines("#SBATCH --time=2:00:00 \n")
fh.writelines("#SBATCH --account=%s@cpu\n" % name_account)
if(mode == 'attack'):
fh.writelines("#SBATCH -C v100-32g\n")
fh.writelines("#SBATCH --array="+str(0)+'-'+str(num_batch)+" \n")
fh.writelines("module purge\n")
fh.writelines("module load pytorch-gpu/py3/1.7.1\n")
# WRITE LOAD MODULES
fh.writelines("python -u " + command)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment