Browse Source

Add of all models into this project and no longer in common modules

Jérôme BUISINE 1 year ago
parent
commit
8fc7c020c1
4 changed files with 220 additions and 5 deletions
  1. 210 0
      cnn_models.py
  2. 1 1
      modules
  3. 1 1
      run_tl_test.sh
  4. 8 3
      train_model.py

+ 210 - 0
cnn_models.py

@@ -0,0 +1,210 @@
+# main imports
+import sys
+
+# model imports
+from keras.preprocessing.image import ImageDataGenerator
+from keras.models import Sequential, Model
+from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv3D, MaxPooling3D, AveragePooling3D
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
+from keras.applications.vgg19 import VGG19
+from keras import backend as K
+import tensorflow as tf
+
+# configuration and modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.models import metrics
+
+def generate_model_2D(_input_shape):
+
+    model = Sequential()
+
+    model.add(Conv2D(60, (2, 2), input_shape=_input_shape))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(40, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(20, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Flatten())
+
+    model.add(Dense(140))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(120))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(80))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(40))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(20))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(1))
+    model.add(Activation('sigmoid'))
+
+    model.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy', metrics.auc])
+
+    return model
+
+
+def generate_model_3D(_input_shape):
+
+    model = Sequential()
+
+    print(_input_shape)
+
+    model.add(Conv3D(60, (1, 2, 2), input_shape=_input_shape))
+    model.add(Activation('relu'))
+    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
+
+    model.add(Conv3D(40, (1, 2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
+
+    model.add(Conv3D(20, (1, 2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
+
+    model.add(Flatten())
+
+    model.add(Dense(140))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(120))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(80))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(40))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(20))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(1))
+    model.add(Activation('sigmoid'))
+
+    model.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy', metrics.auc])
+
+    return model
+
+
+# using transfer learning (VGG19)
+def generate_model_3D_TL(_input_shape):
+
+    # load pre-trained model
+    model = VGG19(weights='imagenet', include_top=False, input_shape=_input_shape)
+    # display model layers
+    model.summary()
+
+    # do not train convolutional layers
+    for layer in model.layers[:5]:
+        layer.trainable = False
+
+    '''predictions_model = Sequential(model)
+
+    predictions_model.add(Flatten(model.output))
+
+    predictions_model.add(Dense(1024))
+    predictions_model.add(Activation('relu'))
+    predictions_model.add(BatchNormalization())
+    predictions_model.add(Dropout(0.5))
+
+    predictions_model.add(Dense(512))
+    predictions_model.add(Activation('relu'))
+    predictions_model.add(BatchNormalization())
+    predictions_model.add(Dropout(0.5))
+
+    predictions_model.add(Dense(256))
+    predictions_model.add(Activation('relu'))
+    predictions_model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    predictions_model.add(Dense(100))
+    predictions_model.add(Activation('relu'))
+    predictions_model.add(BatchNormalization())
+    predictions_model.add(Dropout(0.5))
+
+    predictions_model.add(Dense(20))
+    predictions_model.add(Activation('relu'))
+    predictions_model.add(BatchNormalization())
+    predictions_model.add(Dropout(0.5))
+
+    predictions_model.add(Dense(1))
+    predictions_model.add(Activation('sigmoid'))'''
+
+    # adding custom Layers 
+    x = model.output
+    x = Flatten()(x)
+    x = Dense(1024, activation="relu")(x)
+    x = BatchNormalization()(x)
+    x = Dropout(0.5)(x)
+    x = Dense(256, activation="relu")(x)
+    x = BatchNormalization()(x)
+    x = Dropout(0.5)(x)
+    x = Dense(64, activation="relu")(x)
+    x = BatchNormalization()(x)
+    x = Dropout(0.5)(x)
+    x = Dense(16, activation="relu")(x)
+    predictions = Dense(1, activation="softmax")(x)
+
+    # creating the final model 
+    model_final = Model(input=model.input, output=predictions)
+
+    model_final.summary()
+
+    model_final.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy', metrics.auc])
+
+    return model_final
+
+
+def get_model(n_channels, _input_shape, tl=False):
+    
+    if tl:
+        if n_channels == 3:
+            return generate_model_3D_TL(_input_shape)
+        else:
+            print("Can't use transfer learning with only 1 channel")
+
+    if n_channels == 1:
+        return generate_model_2D(_input_shape)
+
+    if n_channels == 3:
+        return generate_model_3D(_input_shape)

+ 1 - 1
modules

@@ -1 +1 @@
-Subproject commit 486200b98250aa94209bb0d3ceda5af7096c49c8
+Subproject commit 4d35e5fbc4cb7145bf524a609d9500da4a4433df

+ 1 - 1
run_tl_test.sh

@@ -56,5 +56,5 @@ else
 
   python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metric ${all_features} --renderer ${renderer} --scenes ${scenes} --params "${params}" --nb_zones ${zone} --random 1
   
-  python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
+  python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} --tl 1 &
 fi

+ 8 - 3
train_model.py

@@ -5,7 +5,7 @@ import sys, os, argparse
 import json
 
 # model imports
-from modules.models import cnn_models as models
+import cnn_models as models
 from keras import backend as K
 from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
 
@@ -14,15 +14,18 @@ import cv2
 from sklearn.utils import shuffle
 
 # config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
 import custom_config as cfg
 
+
 def main():
 
     parser = argparse.ArgumentParser(description="Train Keras model and save it into .json file")
 
     parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)', required=True)
     parser.add_argument('--output', type=str, help='output file name desired for model (without .json extension)', required=True)
-    parser.add_argument('--tl', type=int, help='use or not of transfer learning (`VGG network`)', default=False)
+    parser.add_argument('--tl', type=int, help='use or not of transfer learning (`VGG network`)', default=0, choices=[0, 1])
     parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=cfg.keras_batch)
     parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
     parser.add_argument('--val_size', type=int, help='percent of validation data during training process', default=cfg.val_dataset_size)
@@ -133,13 +136,15 @@ def main():
     # 2. Getting model
     #######################
 
-    model = models.get_model(n_channels, input_shape, tl)
+    model = models.get_model(n_channels, input_shape, p_tl)
     model.summary()
  
     model.fit(x_data_train, y_dataset_train.values, validation_split=p_val_size, epochs=p_epochs, batch_size=p_batch_size)
 
     score = model.evaluate(x_data_test, y_dataset_test, batch_size=p_batch_size)
 
+    print("Accuracy score on test dataset ", score)
+
     if not os.path.exists(cfg.saved_models_folder):
         os.makedirs(cfg.saved_models_folder)