|
@@ -0,0 +1,210 @@
|
|
|
|
+
|
|
|
|
+import sys
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+from keras.preprocessing.image import ImageDataGenerator
|
|
|
|
+from keras.models import Sequential, Model
|
|
|
|
+from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv3D, MaxPooling3D, AveragePooling3D
|
|
|
|
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
|
|
|
|
+from keras.applications.vgg19 import VGG19
|
|
|
|
+from keras import backend as K
|
|
|
|
+import tensorflow as tf
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+sys.path.insert(0, '')
|
|
|
|
+
|
|
|
|
+import custom_config as cfg
|
|
|
|
+from modules.models import metrics
|
|
|
|
+
|
|
|
|
+def generate_model_2D(_input_shape):
|
|
|
|
+
|
|
|
|
+ model = Sequential()
|
|
|
|
+
|
|
|
|
+ model.add(Conv2D(60, (2, 2), input_shape=_input_shape))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(MaxPooling2D(pool_size=(2, 2)))
|
|
|
|
+
|
|
|
|
+ model.add(Conv2D(40, (2, 2)))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(MaxPooling2D(pool_size=(2, 2)))
|
|
|
|
+
|
|
|
|
+ model.add(Conv2D(20, (2, 2)))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(MaxPooling2D(pool_size=(2, 2)))
|
|
|
|
+
|
|
|
|
+ model.add(Flatten())
|
|
|
|
+
|
|
|
|
+ model.add(Dense(140))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(120))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(80))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(40))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(20))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(1))
|
|
|
|
+ model.add(Activation('sigmoid'))
|
|
|
|
+
|
|
|
|
+ model.compile(loss='binary_crossentropy',
|
|
|
|
+ optimizer='rmsprop',
|
|
|
|
+ metrics=['accuracy', metrics.auc])
|
|
|
|
+
|
|
|
|
+ return model
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def generate_model_3D(_input_shape):
|
|
|
|
+
|
|
|
|
+ model = Sequential()
|
|
|
|
+
|
|
|
|
+ print(_input_shape)
|
|
|
|
+
|
|
|
|
+ model.add(Conv3D(60, (1, 2, 2), input_shape=_input_shape))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(MaxPooling3D(pool_size=(1, 2, 2)))
|
|
|
|
+
|
|
|
|
+ model.add(Conv3D(40, (1, 2, 2)))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(MaxPooling3D(pool_size=(1, 2, 2)))
|
|
|
|
+
|
|
|
|
+ model.add(Conv3D(20, (1, 2, 2)))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(MaxPooling3D(pool_size=(1, 2, 2)))
|
|
|
|
+
|
|
|
|
+ model.add(Flatten())
|
|
|
|
+
|
|
|
|
+ model.add(Dense(140))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(120))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(80))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(40))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(20))
|
|
|
|
+ model.add(Activation('relu'))
|
|
|
|
+ model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ model.add(Dense(1))
|
|
|
|
+ model.add(Activation('sigmoid'))
|
|
|
|
+
|
|
|
|
+ model.compile(loss='binary_crossentropy',
|
|
|
|
+ optimizer='rmsprop',
|
|
|
|
+ metrics=['accuracy', metrics.auc])
|
|
|
|
+
|
|
|
|
+ return model
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def generate_model_3D_TL(_input_shape):
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ model = VGG19(weights='imagenet', include_top=False, input_shape=_input_shape)
|
|
|
|
+
|
|
|
|
+ model.summary()
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ for layer in model.layers[:5]:
|
|
|
|
+ layer.trainable = False
|
|
|
|
+
|
|
|
|
+ '''predictions_model = Sequential(model)
|
|
|
|
+
|
|
|
|
+ predictions_model.add(Flatten(model.output))
|
|
|
|
+
|
|
|
|
+ predictions_model.add(Dense(1024))
|
|
|
|
+ predictions_model.add(Activation('relu'))
|
|
|
|
+ predictions_model.add(BatchNormalization())
|
|
|
|
+ predictions_model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ predictions_model.add(Dense(512))
|
|
|
|
+ predictions_model.add(Activation('relu'))
|
|
|
|
+ predictions_model.add(BatchNormalization())
|
|
|
|
+ predictions_model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ predictions_model.add(Dense(256))
|
|
|
|
+ predictions_model.add(Activation('relu'))
|
|
|
|
+ predictions_model.add(BatchNormalization())
|
|
|
|
+ model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ predictions_model.add(Dense(100))
|
|
|
|
+ predictions_model.add(Activation('relu'))
|
|
|
|
+ predictions_model.add(BatchNormalization())
|
|
|
|
+ predictions_model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ predictions_model.add(Dense(20))
|
|
|
|
+ predictions_model.add(Activation('relu'))
|
|
|
|
+ predictions_model.add(BatchNormalization())
|
|
|
|
+ predictions_model.add(Dropout(0.5))
|
|
|
|
+
|
|
|
|
+ predictions_model.add(Dense(1))
|
|
|
|
+ predictions_model.add(Activation('sigmoid'))'''
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ x = model.output
|
|
|
|
+ x = Flatten()(x)
|
|
|
|
+ x = Dense(1024, activation="relu")(x)
|
|
|
|
+ x = BatchNormalization()(x)
|
|
|
|
+ x = Dropout(0.5)(x)
|
|
|
|
+ x = Dense(256, activation="relu")(x)
|
|
|
|
+ x = BatchNormalization()(x)
|
|
|
|
+ x = Dropout(0.5)(x)
|
|
|
|
+ x = Dense(64, activation="relu")(x)
|
|
|
|
+ x = BatchNormalization()(x)
|
|
|
|
+ x = Dropout(0.5)(x)
|
|
|
|
+ x = Dense(16, activation="relu")(x)
|
|
|
|
+ predictions = Dense(1, activation="softmax")(x)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ model_final = Model(input=model.input, output=predictions)
|
|
|
|
+
|
|
|
|
+ model_final.summary()
|
|
|
|
+
|
|
|
|
+ model_final.compile(loss='binary_crossentropy',
|
|
|
|
+ optimizer='rmsprop',
|
|
|
|
+ metrics=['accuracy', metrics.auc])
|
|
|
|
+
|
|
|
|
+ return model_final
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def get_model(n_channels, _input_shape, tl=False):
|
|
|
|
+
|
|
|
|
+ if tl:
|
|
|
|
+ if n_channels == 3:
|
|
|
|
+ return generate_model_3D_TL(_input_shape)
|
|
|
|
+ else:
|
|
|
|
+ print("Can't use transfer learning with only 1 channel")
|
|
|
|
+
|
|
|
|
+ if n_channels == 1:
|
|
|
|
+ return generate_model_2D(_input_shape)
|
|
|
|
+
|
|
|
|
+ if n_channels == 3:
|
|
|
|
+ return generate_model_3D(_input_shape)
|