Parcourir la source

update cnn parameters

Jérôme BUISINE il y a 3 ans
Parent
commit
24d5230ef5
2 fichiers modifiés avec 21 ajouts et 19 suppressions
  1. 17 17
      cnn_models.py
  2. 4 2
      train_lstm_weighted.py

+ 17 - 17
cnn_models.py

@@ -2,11 +2,11 @@
 import sys
 
 # model imports
-from keras.preprocessing.image import ImageDataGenerator
+# from keras.preprocessing.image import ImageDataGenerator
 from keras.models import Sequential, Model
 from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv3D, MaxPooling3D, AveragePooling3D
 from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
-from keras.applications.vgg19 import VGG19
+# from keras.applications.vgg19 import VGG19
 from keras import backend as K
 import tensorflow as tf
 
@@ -92,30 +92,30 @@ def generate_model_3D(_input_shape):
     model.add(Flatten())
 
     model.add(Dense(256))
-    model.add(Activation('relu'))
     model.add(BatchNormalization())
     model.add(Dropout(0.5))
+    model.add(Activation('relu'))
 
     model.add(Dense(128))
-    model.add(Activation('relu'))
     model.add(BatchNormalization())
     model.add(Dropout(0.5))
+    model.add(Activation('relu'))
 
     model.add(Dense(64))
-    model.add(Activation('relu'))
     model.add(BatchNormalization())
     model.add(Dropout(0.5))
+    model.add(Activation('relu'))
 
     model.add(Dense(20))
-    model.add(Activation('relu'))
     model.add(BatchNormalization())
     model.add(Dropout(0.5))
+    model.add(Activation('relu'))
 
     model.add(Dense(2))
     model.add(Activation('sigmoid'))
 
-    model.compile(loss='categorical_crossentropy',
-                  optimizer='rmsprop',
+    model.compile(loss='binary_crossentropy',
+                  optimizer='adam',
                   #metrics=['accuracy', metrics.auc])
                   metrics=['accuracy'])
 
@@ -123,7 +123,7 @@ def generate_model_3D(_input_shape):
 
 
 # using transfer learning (VGG19)
-def generate_model_3D_TL(_input_shape):
+'''def generate_model_3D_TL(_input_shape):
 
     # load pre-trained model
     model = VGG19(weights='imagenet', include_top=False, input_shape=_input_shape)
@@ -134,7 +134,7 @@ def generate_model_3D_TL(_input_shape):
     for layer in model.layers[:5]:
         layer.trainable = False
 
-    '''predictions_model = Sequential(model)
+    predictions_model = Sequential(model)
 
     predictions_model.add(Flatten(model.output))
 
@@ -164,7 +164,7 @@ def generate_model_3D_TL(_input_shape):
     predictions_model.add(Dropout(0.5))
 
     predictions_model.add(Dense(1))
-    predictions_model.add(Activation('sigmoid'))'''
+    predictions_model.add(Activation('sigmoid'))
 
     # adding custom Layers 
     x = model.output
@@ -191,16 +191,16 @@ def generate_model_3D_TL(_input_shape):
                 #   metrics=['accuracy', metrics.auc])
                   metrics=['accuracy'])
 
-    return model_final
+    return model_final'''
 
 
 def get_model(n_channels, _input_shape, _tl=False):
     
-    if _tl:
-        if n_channels == 3:
-            return generate_model_3D_TL(_input_shape)
-        else:
-            print("Can't use transfer learning with only 1 channel")
+    # if _tl:
+    #     if n_channels == 3:
+    #         return generate_model_3D_TL(_input_shape)
+    #     else:
+    #         print("Can't use transfer learning with only 1 channel")
 
     if n_channels == 1:
         return generate_model_2D(_input_shape)

+ 4 - 2
train_lstm_weighted.py

@@ -154,9 +154,11 @@ def create_model(_input_shape):
     model.add(Dropout(0.5))
 
     model.add(Flatten())
-    model.add(Dense(512, activation='sigmoid'))
+    model.add(Dense(512, activation='relu'))
+    model.add(BatchNormalization())
     model.add(Dropout(0.5))
-    model.add(Dense(128, activation='sigmoid'))
+    model.add(Dense(128, activation='relu'))
+    model.add(BatchNormalization())
     model.add(Dropout(0.5))
     model.add(Dense(1, activation='sigmoid'))
     model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])