123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206 |
- # model imports
- from keras.preprocessing.image import ImageDataGenerator
- from keras.models import Sequential, Model
- from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv3D, MaxPooling3D, AveragePooling3D
- from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
- from keras.applications.vgg19 import VGG19
- from keras import backend as K
- import tensorflow as tf
- # configuration imports
- from . import metrics
- from ..config import cnn_config as cfg
- def generate_model_2D(_input_shape):
- model = Sequential()
- model.add(Conv2D(60, (2, 2), input_shape=_input_shape))
- model.add(Activation('relu'))
- model.add(MaxPooling2D(pool_size=(2, 2)))
- model.add(Conv2D(40, (2, 2)))
- model.add(Activation('relu'))
- model.add(MaxPooling2D(pool_size=(2, 2)))
- model.add(Conv2D(20, (2, 2)))
- model.add(Activation('relu'))
- model.add(MaxPooling2D(pool_size=(2, 2)))
- model.add(Flatten())
- model.add(Dense(140))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(120))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(80))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(40))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(20))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(1))
- model.add(Activation('sigmoid'))
- model.compile(loss='binary_crossentropy',
- optimizer='rmsprop',
- metrics=['accuracy', metrics.auc])
- return model
- def generate_model_3D(_input_shape):
- model = Sequential()
- print(_input_shape)
- model.add(Conv3D(60, (1, 2, 2), input_shape=_input_shape))
- model.add(Activation('relu'))
- model.add(MaxPooling3D(pool_size=(1, 2, 2)))
- model.add(Conv3D(40, (1, 2, 2)))
- model.add(Activation('relu'))
- model.add(MaxPooling3D(pool_size=(1, 2, 2)))
- model.add(Conv3D(20, (1, 2, 2)))
- model.add(Activation('relu'))
- model.add(MaxPooling3D(pool_size=(1, 2, 2)))
- model.add(Flatten())
- model.add(Dense(140))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(120))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(80))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(40))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(20))
- model.add(Activation('relu'))
- model.add(BatchNormalization())
- model.add(Dropout(0.5))
- model.add(Dense(1))
- model.add(Activation('sigmoid'))
- model.compile(loss='binary_crossentropy',
- optimizer='rmsprop',
- metrics=['accuracy', metrics.auc])
- return model
- # using transfer learning (VGG19)
- def generate_model_3D_TL(_input_shape):
- # load pre-trained model
- model = VGG19(weights='imagenet', include_top=False, input_shape=_input_shape)
- # display model layers
- model.summary()
- # do not train convolutional layers
- for layer in model.layers[:5]:
- layer.trainable = False
- predictions_model = Sequential(model)
- #Adding custom Layers
- '''predictions_model.add(Flatten(model.output))
- predictions_model.add(Dense(1024))
- predictions_model.add(Activation('relu'))
- predictions_model.add(BatchNormalization())
- predictions_model.add(Dropout(0.5))
- predictions_model.add(Dense(512))
- predictions_model.add(Activation('relu'))
- predictions_model.add(BatchNormalization())
- predictions_model.add(Dropout(0.5))
- predictions_model.add(Dense(256))
- predictions_model.add(Activation('relu'))
- predictions_model.add(BatchNormalization())
- model.add(Dropout(0.5))
- predictions_model.add(Dense(100))
- predictions_model.add(Activation('relu'))
- predictions_model.add(BatchNormalization())
- predictions_model.add(Dropout(0.5))
- predictions_model.add(Dense(20))
- predictions_model.add(Activation('relu'))
- predictions_model.add(BatchNormalization())
- predictions_model.add(Dropout(0.5))
- predictions_model.add(Dense(1))
- predictions_model.add(Activation('sigmoid'))'''
- # adding custom Layers
- x = model.output
- x = Flatten()(x)
- x = Dense(1024, activation="relu")(x)
- x = BatchNormalization()(x)
- x = Dropout(0.5)(x)
- x = Dense(256, activation="relu")(x)
- x = BatchNormalization()(x)
- x = Dropout(0.5)(x)
- x = Dense(64, activation="relu")(x)
- x = BatchNormalization()(x)
- x = Dropout(0.5)(x)
- x = Dense(16, activation="relu")(x)
- predictions = Dense(1, activation="softmax")(x)
- # creating the final model
- model_final = Model(input=model.input, output=predictions)
- model_final.summary()
- model_final.compile(loss='binary_crossentropy',
- optimizer='rmsprop',
- metrics=['accuracy', metrics.auc])
- return model_final
- def get_model(n_channels, _input_shape, tl=False):
-
- if tl:
- if n_channels == 3:
- return generate_model_3D_TL(_input_shape)
- else:
- print("Can't use transfer learning with only 1 channel")
- if n_channels == 1:
- return generate_model_2D(_input_shape)
- if n_channels == 3:
- return generate_model_3D(_input_shape)
|