Parcourir la source

Test of transfer learning network

Jérôme BUISINE il y a 4 ans
Parent
commit
483d4ab0af
3 fichiers modifiés avec 72 ajouts et 8 suppressions
  1. 1 1
      modules
  2. 60 0
      run_tl_test.sh
  3. 11 7
      train_model.py

+ 1 - 1
modules

@@ -1 +1 @@
-Subproject commit d5de038bdccaa58ff2123d5227482dc6c0ea2500
+Subproject commit 486200b98250aa94209bb0d3ceda5af7096c49c8

+ 60 - 0
run_tl_test.sh

@@ -0,0 +1,60 @@
+#!/bin/bash
+
+erased=$1
+
+# file which contains model names we want to use for simulation
+file_path="results/models_comparisons.csv"
+
+if [ "${erased}" == "Y" ]; then
+    echo "Previous data file erased..."
+    rm ${file_path}
+    mkdir -p results
+    touch ${file_path}
+
+    # add of header
+    echo 'model_name; global_train_size; global_test_size; filtered_train_size; filtered_test_size; f1_train; f1_test; recall_train; recall_test; presicion_train; precision_test; acc_train; acc_test; roc_auc_train; roc_auc_test;' >> ${file_path}
+fi
+
+renderer="all"
+scenes="A, B, C, D, E, F, G, H, I"
+
+svd_metric="svd_reconstruction"
+ipca_metric="ipca_reconstruction"
+fast_ica_metric="fast_ica_reconstruction"
+
+all_features="${svd_metric},${ipca_metric},${fast_ica_metric}"
+
+
+# RUN LATER
+# compute using all transformation methods
+ipca_batch_size=55
+begin=100
+end=200
+ipca_component=30
+fast_ica_component=60
+zone=12
+
+
+OUTPUT_DATA_FILE="${svd_metric}_B${begin}_E${end}_${ipca_metric}__N${ipca_component}_BS${ipca_batch_size}_${fast_ica_metric}_N${fast_ica_component}_nb_zones_${zone}"
+
+python generate/generate_reconstructed_data.py --features ${svd_metric} --params "${begin}, ${end}"
+
+python generate/generate_reconstructed_data.py --features ${ipca_component} --params "${component},${ipca_batch_size}"
+
+python generate/generate_reconstructed_data.py --features ${fast_ica_component} --params "${component}"
+
+
+if grep -xq "${OUTPUT_DATA_FILE}" "${file_path}"; then
+  
+  echo "Transformation combination model ${OUTPUT_DATA_FILE} already generated"
+
+else
+
+  echo "Run computation for Transformation combination model ${OUTPUT_DATA_FILE}"
+
+  params="${begin}, ${end} :: ${ipca_component}, ${ipca_batch_size} :: ${fast_ica_component}"
+
+  python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metric ${all_features} --renderer ${renderer} --scenes ${scenes} --params "${params}" --nb_zones ${zone} --random 1
+  
+  python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
+fi

+ 11 - 7
train_model.py

@@ -1,18 +1,20 @@
+# main imports
 import numpy as np
 import pandas as pd
 import sys, os, argparse
 import json
 
-import cv2
+# model imports
+from modules.models import cnn_models as models
+from keras import backend as K
+from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
 
+# image processing imports
+import cv2
 from sklearn.utils import shuffle
 
+# config imports
 import custom_config as cfg
-from modules.models import cnn_models as models
-
-from keras import backend as K
-
-from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
 
 def main():
 
@@ -20,6 +22,7 @@ def main():
 
     parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)', required=True)
     parser.add_argument('--output', type=str, help='output file name desired for model (without .json extension)', required=True)
+    parser.add_argument('--tl', type=int, help='use or not of transfer learning (`VGG network`)', default=False)
     parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=cfg.keras_batch)
     parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
     parser.add_argument('--val_size', type=int, help='percent of validation data during training process', default=cfg.val_dataset_size)
@@ -28,6 +31,7 @@ def main():
 
     p_data_file  = args.data
     p_output     = args.output
+    p_tl         = args.tl
     p_batch_size = args.batch_size
     p_epochs     = args.epochs
     p_val_size   = args.val_size
@@ -129,7 +133,7 @@ def main():
     # 2. Getting model
     #######################
 
-    model = models.get_model(n_channels, input_shape)
+    model = models.get_model(n_channels, input_shape, tl)
     model.summary()
  
     model.fit(x_data_train, y_dataset_train.values, validation_split=p_val_size, epochs=p_epochs, batch_size=p_batch_size)