diff --git a/.gitattributes b/.gitattributes index 7dbbbeba6f1ba99bb69d3cb54ac22a1f38bbeffc..2f857663850f1bb2335867d6095cad7108c66df2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,4 @@ *.zip filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text +*imagenet filter=lfs diff=lfs merge=lfs -text +*zip filter=lfs diff=lfs merge=lfs -text diff --git a/__pycache__/backpack.cpython-38.pyc b/__pycache__/backpack.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48c5dc6eba19b6ba709b597f0ee0cd8646602c82 Binary files /dev/null and b/__pycache__/backpack.cpython-38.pyc differ diff --git a/__pycache__/data_loader.cpython-38.pyc b/__pycache__/data_loader.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7f4807451086de42aa55bfe620f05a77c1ee05c Binary files /dev/null and b/__pycache__/data_loader.cpython-38.pyc differ diff --git a/__pycache__/double_tanh.cpython-38.pyc b/__pycache__/double_tanh.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1017b16cbe59177059966465e3bec7fb9ae0066a Binary files /dev/null and b/__pycache__/double_tanh.cpython-38.pyc differ diff --git a/__pycache__/generate_train_db.cpython-38.pyc b/__pycache__/generate_train_db.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa5f977257241ff5679a465518c66046665bdd48 Binary files /dev/null and b/__pycache__/generate_train_db.cpython-38.pyc differ diff --git a/__pycache__/tools_stegano.cpython-38.pyc b/__pycache__/tools_stegano.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..437e01bf79242aeb640767b4d7fff3175e987ad6 Binary files /dev/null and b/__pycache__/tools_stegano.cpython-38.pyc differ diff --git a/__pycache__/write_description.cpython-38.pyc b/__pycache__/write_description.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0a26193f0ec39519b2a11f3ae5ca76699f3805c Binary files /dev/null and b/__pycache__/write_description.cpython-38.pyc differ diff --git a/__pycache__/write_jobs.cpython-38.pyc b/__pycache__/write_jobs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8546e5017605e0935f03a36dfc297ee3cdf3b1f Binary files /dev/null and b/__pycache__/write_jobs.cpython-38.pyc differ diff --git a/backpack.py b/backpack.py index d741fad2388a9cbba37d26e8ac811f0f81d0032b..7d2b35627fb7dda8b5398484b4aaf58c73201c16 100644 --- a/backpack.py +++ b/backpack.py @@ -11,6 +11,7 @@ class BackPack(nn.Module): def __init__(self, image_size, QF, folder_model, c_coeffs, rho_0, entropy, N, nets, ecdf_list, attack): super(BackPack, self).__init__() self.net_decompress = IDCT8_Net(image_size, QF, folder_model) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.N = N self.entropy = torch.tensor(entropy) self.im_size = image_size @@ -20,7 +21,7 @@ class BackPack(nn.Module): self.spatial_cover = self.net_decompress.forward( torch.reshape(self.c_coeffs, (1, 1, self.im_size, self.im_size)))/255 self.nets = nets - proba_cover = [torch.nn.Softmax(dim=1)(net.forward(self.spatial_cover.cuda().float()))[0, 1] + proba_cover = [torch.nn.Softmax(dim=1)(net.forward(self.spatial_cover.to(self.device).float()))[0, 1] for net in nets] self.proba_cover = np.array( [ecdf(x.cpu().detach().numpy()) for x, ecdf in zip(proba_cover, ecdf_list)]) @@ -53,7 +54,7 @@ class BackPack(nn.Module): self.c_coeffs+b, (self.N, 1, self.im_size, self.im_size)) spatial_image_soft = self.net_decompress.forward(stego_soft)/255 - logits = [torch.reshape(net.forward(spatial_image_soft.cuda().float()), (1, self.N, 2)) + logits = [torch.reshape(net.forward(spatial_image_soft.to(self.device).float()), (1, self.N, 2)) for net in self.nets] logits = torch.cat(logits) probas_soft = torch.nn.Softmax(dim=2)(logits)[:, :, -1] @@ -70,7 +71,7 @@ class BackPack(nn.Module): stego_hard = torch.reshape( self.c_coeffs+b_hard, (self.N, 1, self.im_size, self.im_size)) spatial_image_hard = self.net_decompress.forward(stego_hard)/255 - logits = [torch.reshape(net.forward(spatial_image_hard.cuda().float()), (1, self.N, 2)) + logits = [torch.reshape(net.forward(spatial_image_hard.to(self.device).float()), (1, self.N, 2)) for net in self.nets] logits = torch.cat(logits) probas_hard = torch.nn.Softmax(dim=2)(logits)[:, :, -1] diff --git a/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_1/index.npy b/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_1/index.npy index d955b641fe68d7e7d16dc888e0f32fefcb02ea32..c5964c066ff5dfe00013dada4286874e4d560e2b 100644 Binary files a/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_1/index.npy and b/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_1/index.npy differ diff --git a/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_2/index.npy b/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_2/index.npy index 071c23a508bab5f7e26bac4609243e0f5e8434cd..64ca831a08a6b5c43d6f8ffab185c35b1b07c1e2 100644 Binary files a/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_2/index.npy and b/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_train_2/index.npy differ diff --git a/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/description.txt b/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/description.txt index 47ba2c0ea8ab122c0b7c75d881ebbea86790a8a7..a5d9dd8c92228eeb229cbf5306f843bb9da6fdf4 100644 --- a/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/description.txt +++ b/experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/description.txt @@ -1,4 +1,4 @@ -Jun-10-2021Launch of the protocol, starting from iteration 6 to 7 +Jul-18-2021Launch of the protocol, starting from iteration 1 to 2 Number of CPUs called : 10 PARAMETERS @@ -6,15 +6,15 @@ Image characteristics - QF = 75 - Image size = 512 - Embedding rate = 0.4 bpnzAC -- Cover images are taken in folder /gpfswork/rech/srp/commun/JPEG_75_512/c_coeffs/ -- Stego images are taken in folder /gpfswork/rech/srp/commun/JPEG_75_512/J_UNI_0_4_npy/ -- Cost maps are taken in folder /gpfswork/rech/srp/commun/JPEG_75_512/costs/ +- Cover images are taken in folder ./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/cover/c_coeffs/ +- Stego images are taken in folder ./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_adv_0/J_UNI_0_4_npy/ +- Cost maps are taken in folder ./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/cover/costs/ Protocol setup - Strategy =minmax Model description -- The 3 model architectures are efnet,xunet,srnet with the following setup : +- The 3 model architectures are xunet,srnet,efnet with the following setup : - Efficient-net version is b0 pretrained on image-net - First conv stem is with stride = 1 @@ -24,26 +24,26 @@ Training setup - Train size = 4000 - Valid size = 1000 - Test size = 5000 -- Files permutation, which order determines train, valid and test sets is /gpfswork/rech/srp/commun/python3/tifs_protocol_efficientnet/models/permutation_files.npy -- Model efnet is trained during 30 epochs -- Pair training is not used -- Batch size is 8 -- Curriculum is used : the embedding rate starts from 1.0 and decreases every two epochs by factor 0.9 to reach target embedding rate 0.4 - +- Files permutation, which order determines train, valid and test sets is ./models/permutation_files.npy - Model xunet is trained during 30 epochs - Pair training is used -- Batch size is 2*32 +- Batch size is 2*30 - Curriculum is used : the embedding rate starts from 1.2 and decreases every two epochs by factor 0.9 to reach target embedding rate 0.4 -- Model srnet is trained during 30 epochs +- Model srnet is trained during 40 epochs - Pair training is used - Batch size is 2*16 -- Curriculum is used : the embedding rate starts from 1.0 and decreases every two epochs by factor 0.9 to reach target embedding rate 0.4 +- Curriculum is used : the embedding rate starts from 1.4 and decreases every two epochs by factor 0.9 to reach target embedding rate 0.4 + +- Model efnet is trained during 30 epochs +- Pair training is not used +- Batch size is 16 +- Curriculum is used : the embedding rate starts from 1.2 and decreases every two epochs by factor 0.9 to reach target embedding rate 0.4 Attack setup - The smoothing function is SGE -- Maximum number of steps is 2000 -- Number of samples is 1 -- Tau is initialized with value 10.0 and decreases by factor 0.5 when needed +- Maximum number of steps is 200 +- Number of samples is 2 +- Tau is initialized with value 5.0 and decreases by factor 0.5 when needed - The exit condition is required to be respected with precision = 0.01 diff --git a/job.slurm b/job.slurm index 469cfff9dfb241186e42819f249c933f02e367a2..f66305468033701135dc7fb9d0deaf2deb70d834 100644 --- a/job.slurm +++ b/job.slurm @@ -4,15 +4,20 @@ module load pytorch-gpu/py3/1.7.1 python main.py --label='70' \ --begin_step=1 \ + --permutation_files='./models/permutation_files.npy' \ + --serveur='no' \ --number_steps=1 \ - --folder_model= './models/' \ - --data_dir_prot='./experiment/' \ - --data_dir_cover= \ - --data_dir_stego_0= \ - --cost_dir= \ + --folder_model='./models/' \ + --data_dir_prot='./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/' \ + --data_dir_cover='./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/cover/c_coeffs/' \ + --data_dir_stego_0='./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/data_adv_0/J_UNI_0_4_npy/' \ + --cost_dir='./experience_512_75_0_4_SGE_efnetB0s1_xunet_srnet/cover/costs/' \ --strategy='minmax' \ --image_size=512 \ --QF=75 \ + --train_size=4000 \ + --valid_size=1000 \ + --test_size=5000 \ --emb_rate=0.4 \ --model='xunet,srnet,efnet' \ --version_eff='b0' \ @@ -41,7 +46,9 @@ python main.py --label='70' \ --N_samples=2 \ --attack='SGE' \ --attack_last='no' \ - --lr=0.01 + --lr=0.01 \ + --batch_adv=10000 + diff --git a/main.py b/main.py index fffb76f41fc1f42297ca29c6f85d7999e02c3ef4..9088f2da1ea58c977c610929f6bf33d0f54301a4 100644 --- a/main.py +++ b/main.py @@ -68,7 +68,7 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir image_size, QF, folder_model, permutation_files, version_eff, stride, n_loops, model, train_size, valid_size, test_size, attack, attack_last, emb_rate, batch_adv, n_iter_max_backpack, N_samples, tau_0, precision, lr, - num_of_threads, training_dictionnary, spatial, strategy): + num_of_threads, training_dictionnary, spatial, strategy, serveur): n_images = train_size+valid_size+test_size models = model.split(',') @@ -80,19 +80,27 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir batch_adv, n_iter_max_backpack, N_samples, tau_0, precision, lr, num_of_threads, training_dictionnary, spatial)) - if(iteration_step > 0): + if(iteration_step==0): + create_folder(data_dir_prot, ['data_adv_0', 'cover']) + + else: # GENERATE ADV DATA BASE OF THE BEST LAST CLASSIFIER directory_adv = data_dir_prot+'data_adv_'+str(iteration_step)+'/' - create_folder(directory_adv, ['adv_final', 'adv_cost', ]) + create_folder(directory_adv, ['adv_final', 'adv_cost']) print('Generating adv step ' + str(iteration_step)) num_batch = n_images // batch_adv command = 'script_attack.py ' + \ custom_command('attack', iteration_step, model) - run_job('attack', label, command, iteration_step, - gpu=True, num_batch=num_batch) - wait(label) + + if(serveur): + command += ' --idx_start=$SLURM_ARRAY_TASK_ID' + run_job('attack', label, command, iteration_step, + gpu=True, num_batch=num_batch) + wait(label) + else: + os.system('python '+ command + ' --idx_start=0') # EVALUATION OF ALL THE CLASSIFIERS ON THE NEW ADV DATA BASE for i in range(iteration_step): @@ -110,10 +118,15 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir command = 'script_evaluate_classif.py' + custom_command('classif', iteration_step, my_model) \ + ' --iteration_f=' + \ str(i)+' --iteration_adv='+str(iteration_step) - run_job('eval_'+str(my_model), label, - command, iteration_step, gpu=True) - wait(label) + if(serveur): + run_job('eval_'+str(my_model), label, + command, iteration_step, gpu=True) + else: + os.system('python '+ command) + + if(serveur): + wait(label) # GENERATION OF THE TRAIN DATA BASE generate_train_db(iteration_step, strategy, models, @@ -126,10 +139,15 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir 'train_'+my_model+'_'+str(iteration_step)]) command = 'script_train.py' + \ custom_command('train', iteration_step, my_model) - run_job('train_'+my_model, label, command, iteration_step, - num_of_threads=num_of_threads, gpu=True) - - wait(label) + + if(serveur): + run_job('train_'+my_model, label, command, iteration_step, + num_of_threads=num_of_threads, gpu=True) + else: + os.system('python '+ command) + + if(serveur): + wait(label) # EVALUATE NEW CLASSIFIERS ON ALL STEGO DATA BASES AND ON COVER for i in range(-1, iteration_step+1): # -1 for cover @@ -145,10 +163,15 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir command = 'script_evaluate_classif.py' + custom_command('classif', iteration_step, my_model) \ + ' --iteration_f='+str(iteration_step) + \ ' --iteration_adv='+str(i) - run_job('eval_'+str(model), label, + + if(serveur): + run_job('eval_'+str(model), label, command, iteration_step, gpu=True) + else: + os.system('python '+ command) - wait(label) + if(serveur): + wait(label) for my_model in models: print(my_model, p_error(iteration_step, my_model, @@ -157,7 +180,7 @@ def run_iteration(iteration_step, label, data_dir_prot, data_dir_cover, data_dir return(True) -def run_protocol(begin_step, number_steps, label, data_dir_prot, data_dir_cover, data_dir_stego_0, cost_dir, +def run_protocol(begin_step, serveur, number_steps, label, data_dir_prot, data_dir_cover, data_dir_stego_0, cost_dir, image_size, QF, folder_model, permutation_files, version_eff, stride, n_loops, model, train_size, valid_size, test_size, attack, attack_last, emb_rate, batch_adv, n_iter_max_backpack, N_samples, tau_0, precision, lr, strategy, @@ -185,7 +208,7 @@ def run_protocol(begin_step, number_steps, label, data_dir_prot, data_dir_cover, image_size, QF, folder_model, permutation_files, version_eff, stride, n_loops, model, train_size, valid_size, test_size, attack, attack_last, emb_rate, batch_adv, n_iter_max_backpack, N_samples, tau_0, precision, lr, - num_of_threads, training_dictionnary, spatial, strategy) + num_of_threads, training_dictionnary, spatial, strategy, serveur) iteration_step += 1 @@ -194,6 +217,7 @@ if __name__ == '__main__': argparser = argparse.ArgumentParser(sys.argv[0]) argparser.add_argument('--begin_step', type=int) + argparser.add_argument('--serveur', type=str, default='no') argparser.add_argument('--number_steps', type=int, default=10) argparser.add_argument('--folder_model', type=str, help='The path to the folder where the architecture of models are saved') @@ -233,7 +257,7 @@ if __name__ == '__main__': # FOR ADVERSARIAL COST MAP argparser.add_argument('--attack', type=str) - argparser.add_argument('--attack_last', type=str) + argparser.add_argument('--attack_last', type=str,default='no') argparser.add_argument('--lr', type=float) argparser.add_argument('--batch_adv', type=int, default=100) argparser.add_argument('--n_iter_max_backpack', type=int) @@ -277,4 +301,6 @@ if __name__ == '__main__': params = argparser.parse_args() + params.serveur = params.serveur=='yes' + run_protocol(**vars(params)) diff --git a/models/__pycache__/efficientnet.cpython-38.pyc b/models/__pycache__/efficientnet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d024863d74b8f1fdbd07e3ca868b9f24c8d1eaa Binary files /dev/null and b/models/__pycache__/efficientnet.cpython-38.pyc differ diff --git a/models/__pycache__/srnet.cpython-38.pyc b/models/__pycache__/srnet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..325394d4de8fae5bbe07eb36e159e4fec5e7a3e8 Binary files /dev/null and b/models/__pycache__/srnet.cpython-38.pyc differ diff --git a/models/__pycache__/xunet.cpython-38.pyc b/models/__pycache__/xunet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3653e51e97116599c5a203397a3756ac69b7f61 Binary files /dev/null and b/models/__pycache__/xunet.cpython-38.pyc differ diff --git a/models/b0-imagenet b/models/b0-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..8af13f5d03467af21b6f5c7b1a0317dc33cf4405 --- /dev/null +++ b/models/b0-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f6b91eeaf11ae3f183c074d992d0dd46ae8083c5409839910adf75fa37a457f +size 16280354 diff --git a/models/b1-imagenet b/models/b1-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..de242969d560f1797270a4beffe02cbf2d586caf --- /dev/null +++ b/models/b1-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5337dfe968fe895881d14d467a4cc28ffb722d6c60af865385ab6eb2f46345ea +size 26414008 diff --git a/models/b2-imagenet b/models/b2-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..ec7cc08fd8f17b7a0e9c25a4628c69a582011081 --- /dev/null +++ b/models/b2-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a395e2bfc1b12ac21dcc180533d1f63d804d2b8d63da509bc59b5f1c8ade714 +size 31188427 diff --git a/models/b3-imagenet b/models/b3-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..7ad2675e3ed76fe8c27868944009cf68e2d0db4f --- /dev/null +++ b/models/b3-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:379fa0b5aa441d96db49798eed2fc4807194be78b640bdb3445d13e4b69ac7f0 +size 43262576 diff --git a/models/b4-imagenet b/models/b4-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..16046dcc6b1c329d97faadc50fb970c80a4380ce --- /dev/null +++ b/models/b4-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45568352ed41d951ecfddc9a407f3eec1b5c7676f6aab01c07f9bd7c0d36af40 +size 70852911 diff --git a/models/b5-imagenet b/models/b5-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..0782cc7bfbbdd10b53c22700e08711bebaa3af77 --- /dev/null +++ b/models/b5-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ad1ee81a291ea0e3c4f3f516ffd3e4a97409285215307879920ace7d03227ab +size 114244801 diff --git a/models/b6-imagenet b/models/b6-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..c90a095e8dd246c1b3ed41ad0430490213e91ad6 --- /dev/null +++ b/models/b6-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d490294b132c799cd81a54a42cf8d32c559ec3ff113678a20989728819c5354d +size 164060370 diff --git a/models/b7-imagenet b/models/b7-imagenet new file mode 100644 index 0000000000000000000000000000000000000000..bc8f046a6026a23802046263f0b4a2fe6969d14e --- /dev/null +++ b/models/b7-imagenet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7845496fff8bdb1c90714b10330d684b14938ed3a5c46ee4a7566c5cc2761ac4 +size 256657098 diff --git a/models/xunet.py b/models/xunet.py index ffe1e5a0b6c8458fe1e850a4cbbc8d52603a5961..7f2bcf80f51e2c1f0b3b489d8d2276e1ff405730 100644 --- a/models/xunet.py +++ b/models/xunet.py @@ -9,9 +9,10 @@ class get_net(nn.Module): def __init__(self, folder_model, n_loops, image_size): super(get_net, self).__init__() + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.n_loops = n_loops self.DCT4_kernel = torch.tensor(np.load(folder_model+'DCT_4.npy')\ - .reshape((4,4,16,1)).transpose((2,3,0,1))).cuda().float() + .reshape((4,4,16,1)).transpose((2,3,0,1))).to(self.device).float() self.im_size = image_size def _conv2d(in_channels, out_channels, stride): diff --git a/script_attack.py b/script_attack.py index e5b22165bb1ce1d9697ed96bd746f73b1ebf82b3..4afb326de0802887ae635d7b362a99508b8587c6 100644 --- a/script_attack.py +++ b/script_attack.py @@ -1,4 +1,7 @@ # TOOLS +import sys, os +sys.path.append('models/') + from statsmodels.distributions.empirical_distribution import ECDF import numpy as np from backpack import BackPack @@ -16,7 +19,6 @@ import torch import torch.nn as nn import torch.nn.functional as F -sys.path.append('models/') def backpack_attack(data_dir_cover, cost_dir, image_size, QF, folder_model, emb_rate, @@ -91,6 +93,7 @@ def run_attack(iteration_step, folder_model, data_dir_prot, data_dir_cover, cost models = model.split(',') attack_last = attack_last == 'yes' + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") nets = [] @@ -102,11 +105,11 @@ def run_attack(iteration_step, folder_model, data_dir_prot, data_dir_cover, cost for i in range(n_classifier_min, iteration_step): for model in models: if(model == 'efnet'): - net = get_net_ef(version_eff, stride).cuda() + net = get_net_ef(version_eff, stride).to(device) elif(model == 'xunet'): - net = get_net_xu(folder_model, n_loops, image_size).cuda() + net = get_net_xu(folder_model, n_loops, image_size).to(device) elif(model == 'srnet'): - net = get_net_sr(image_size).cuda() + net = get_net_sr(image_size).to(device) paths = os.listdir(data_dir_prot+'train_'+model+'_'+str(i)+'/') paths = [int(x.split('-')[-1][:-9]) diff --git a/script_evaluate_classif.py b/script_evaluate_classif.py index d8080547172bb68863be25a4b216d93e20204c4d..62ae888f50c6cb9fc65703a08d03109b383b6eb9 100644 --- a/script_evaluate_classif.py +++ b/script_evaluate_classif.py @@ -1,3 +1,6 @@ +import sys, os +sys.path.append('models/') + from srnet import TrainGlobalConfig as TrainGlobalConfig_sr from srnet import get_net as get_net_sr from xunet import TrainGlobalConfig as TrainGlobalConfig_xu diff --git a/script_train.py b/script_train.py index 233fe7fd86048609f57b98898fc55559e58ecb73..8a37be4bc7299aba594cc6a7a918ca728a22a722 100644 --- a/script_train.py +++ b/script_train.py @@ -1,3 +1,5 @@ +import sys, os +sys.path.append('models/') from torch.utils.data import Dataset, DataLoader import argparse import sklearn @@ -10,13 +12,11 @@ from xunet import TrainGlobalConfig as TrainGlobalConfig_xu from xunet import get_net as get_net_xu from efficientnet import TrainGlobalConfig as TrainGlobalConfig_ef from efficientnet import get_net as get_net_ef -from catalyst.data.sampler import BalanceClassSampler +#from catalyst.data.sampler import BalanceClassSampler from data_loader import load_dataset, DatasetRetriever, get_train_transforms, get_valid_transforms from train import Fitter -import sys -import os -sys.path.append('models/') + def my_collate(batch, pair_training=False): @@ -35,7 +35,7 @@ def my_collate(batch, pair_training=False): def run_training(iteration_step, model, net, trainGlobalConfig, data_dir_prot, start_emb_rate, emb_rate, pair_training, version_eff, load_checkpoint, train_dataset, validation_dataset, test_dataset, folder_model, train_on_cost_map): - device = torch.device('cuda:0') + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") train_dataset.emb_rate = start_emb_rate train_loader = torch.utils.data.DataLoader( @@ -77,9 +77,10 @@ def run_training(iteration_step, model, net, trainGlobalConfig, data_dir_prot, s save_path=save_path, model_str=model, train_on_cost_map=train_on_cost_map) print(f'{fitter.base_dir}') - if(model == 'efnet'): - # Load the pretrained model - fitter.load(folder_model+version_eff+"-imagenet") + + # if(model == 'efnet'): + # # Load the pretrained model + # fitter.load(folder_model+version_eff+"-imagenet") if(load_checkpoint is not None): save_path_load = save_path @@ -122,6 +123,7 @@ def train(iteration_step, model, folder_model, data_dir_prot, permutation_files, pair_training = pair_training == 'yes' spatial = spatial == 'yes' train_on_cost_map = train_on_cost_map == 'yes' + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") dataset = load_dataset(iteration_step, permutation_files, train_size, valid_size, test_size, data_dir_prot, pair_training=pair_training) @@ -150,15 +152,15 @@ def train(iteration_step, model, folder_model, data_dir_prot, permutation_files, train_dataset, validation_dataset, test_dataset = datasets[0], datasets[1], datasets[2] if(model == 'efnet'): - net = get_net_ef(version_eff, stride).cuda() + net = get_net_ef(version_eff, stride).to(device) trainGlobalConfig = TrainGlobalConfig_ef( num_of_threads, batch_size_classif, epoch_num) elif(model == 'xunet'): - net = get_net_xu(folder_model, n_loops, image_size).cuda() + net = get_net_xu(folder_model, n_loops, image_size).to(device) trainGlobalConfig = TrainGlobalConfig_xu( num_of_threads, batch_size_classif, epoch_num) elif(model == 'srnet'): - net = get_net_sr(image_size).cuda() + net = get_net_sr(image_size).to(device) net.init() trainGlobalConfig = TrainGlobalConfig_sr( num_of_threads, batch_size_classif, epoch_num) @@ -168,6 +170,7 @@ def train(iteration_step, model, folder_model, data_dir_prot, permutation_files, else: start_emb_rate = emb_rate + # Train first with cost map train, val, test = run_training(iteration_step, model, net, trainGlobalConfig, data_dir_prot, start_emb_rate, emb_rate, pair_training, version_eff, load_model, diff --git a/write_jobs.py b/write_jobs.py index 15c9a017a45ac3b8a27ec352ad5a1f141828ce8d..4e32f13d3579b2d501da8f72d652397e7ea5c50d 100644 --- a/write_jobs.py +++ b/write_jobs.py @@ -84,7 +84,6 @@ def write_command(mode, iteration_step, model, data_dir_prot, data_dir_cover, da com += ' --attack_last=' + str(attack_last) com += ' --emb_rate=' + str(emb_rate) com += ' --cost_dir=' + str(cost_dir) - com += ' --idx_start=$SLURM_ARRAY_TASK_ID' com += ' --batch_adv=' + str(batch_adv) com += ' --n_iter_max_backpack=' + str(n_iter_max_backpack) com += ' --N_samples=' + str(N_samples)