train_model.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. import numpy as np
  2. import pandas as pd
  3. import sys, os, argparse
  4. import json
  5. import cv2
  6. from sklearn.utils import shuffle
  7. from keras.preprocessing.image import ImageDataGenerator
  8. from keras.models import Sequential
  9. from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
  10. from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
  11. from keras import backend as K
  12. import tensorflow as tf
  13. from keras.utils import plot_model
  14. from modules.utils import config as cfg
  15. from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
  16. img_width, img_height = 200, 200
  17. batch_size = 32
  18. def auc(y_true, y_pred):
  19. auc = tf.metrics.auc(y_true, y_pred)[1]
  20. K.get_session().run(tf.local_variables_initializer())
  21. #K.get_session().run(tf.local_variables_initializer())
  22. return auc
  23. def generate_model(_input_shape):
  24. model = Sequential()
  25. model.add(Conv2D(60, (2, 2), input_shape=_input_shape))
  26. model.add(Activation('relu'))
  27. model.add(MaxPooling2D(pool_size=(2, 2)))
  28. model.add(Conv2D(40, (2, 2)))
  29. model.add(Activation('relu'))
  30. model.add(MaxPooling2D(pool_size=(2, 2)))
  31. model.add(Conv2D(20, (2, 2)))
  32. model.add(Activation('relu'))
  33. model.add(MaxPooling2D(pool_size=(2, 2)))
  34. model.add(Flatten())
  35. model.add(Dense(140))
  36. model.add(Activation('relu'))
  37. model.add(BatchNormalization())
  38. model.add(Dropout(0.4))
  39. model.add(Dense(120))
  40. model.add(Activation('relu'))
  41. model.add(BatchNormalization())
  42. model.add(Dropout(0.4))
  43. model.add(Dense(80))
  44. model.add(Activation('relu'))
  45. model.add(BatchNormalization())
  46. model.add(Dropout(0.4))
  47. model.add(Dense(40))
  48. model.add(Activation('relu'))
  49. model.add(BatchNormalization())
  50. model.add(Dropout(0.4))
  51. model.add(Dense(20))
  52. model.add(Activation('relu'))
  53. model.add(BatchNormalization())
  54. model.add(Dropout(0.4))
  55. model.add(Dense(1))
  56. model.add(Activation('sigmoid'))
  57. model.compile(loss='binary_crossentropy',
  58. optimizer='rmsprop',
  59. metrics=['accuracy', auc])
  60. return model
  61. def main():
  62. parser = argparse.ArgumentParser(description="Train Keras model and save it into .json file")
  63. parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)', required=True)
  64. parser.add_argument('--output', type=str, help='output file name desired for model (without .json extension)', required=True)
  65. parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=cfg.keras_batch)
  66. parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
  67. parser.add_argument('--val_size', type=int, help='percent of validation data during training process', default=cfg.val_dataset_size)
  68. parser.add_argument('--n_channels', type=int, help='number of canals for 3D', default=1)
  69. args = parser.parse_args()
  70. p_data_file = args.data
  71. p_output = args.output
  72. p_batch_size = args.batch_size
  73. p_epochs = args.epochs
  74. p_val_size = args.val_size
  75. p_n_channels = args.n_channels
  76. # specify the number of dimensions
  77. if K.image_data_format() == 'channels_first':
  78. input_shape = (p_n_channels, img_width, img_height)
  79. else:
  80. input_shape = (img_width, img_height, p_n_channels)
  81. ########################
  82. # 1. Get and prepare data
  83. ########################
  84. print("Preparing data...")
  85. dataset_train = pd.read_csv(p_data_file + '.train', header=None, sep=";")
  86. dataset_test = pd.read_csv(p_data_file + '.test', header=None, sep=";")
  87. print("Train set size : ", len(dataset_train))
  88. print("Test set size : ", len(dataset_test))
  89. # default first shuffle of data
  90. dataset_train = shuffle(dataset_train)
  91. dataset_test = shuffle(dataset_test)
  92. print("Reading all images data...")
  93. # `:` is the separator used for getting each img path
  94. if p_n_channels > 1:
  95. dataset_train[1] = dataset_train[1].split(':').apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
  96. dataset_test[1] = dataset_test[1].split(':').apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
  97. else:
  98. dataset_train[1] = dataset_train[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
  99. dataset_test[1] = dataset_test[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
  100. # get dataset with equal number of classes occurences
  101. noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 1]
  102. not_noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 0]
  103. nb_noisy_train = len(noisy_df_train.index)
  104. noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 1]
  105. not_noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 0]
  106. nb_noisy_test = len(noisy_df_test.index)
  107. final_df_train = pd.concat([not_noisy_df_train[0:nb_noisy_train], noisy_df_train])
  108. final_df_test = pd.concat([not_noisy_df_test[0:nb_noisy_test], noisy_df_test])
  109. # shuffle data another time
  110. final_df_train = shuffle(final_df_train)
  111. final_df_test = shuffle(final_df_test)
  112. final_df_train_size = len(final_df_train.index)
  113. final_df_test_size = len(final_df_test.index)
  114. # use of the whole data set for training
  115. x_dataset_train = final_df_train.ix[:,1:]
  116. x_dataset_test = final_df_test.ix[:,1:]
  117. y_dataset_train = final_df_train.ix[:,0]
  118. y_dataset_test = final_df_test.ix[:,0]
  119. x_data_train = []
  120. for item in x_dataset_train.values:
  121. #print("Item is here", item)
  122. x_data_train.append(item[0])
  123. x_data_train = np.array(x_data_train)
  124. x_data_test = []
  125. for item in x_dataset_test.values:
  126. #print("Item is here", item)
  127. x_data_test.append(item[0])
  128. x_data_test = np.array(x_data_test)
  129. print("End of loading data..")
  130. print("Train set size (after balancing) : ", final_df_train_size)
  131. print("Test set size (after balancing) : ", final_df_test_size)
  132. #######################
  133. # 2. Getting model
  134. #######################
  135. model = generate_model(input_shape)
  136. model.summary()
  137. model.fit(x_data_train, y_dataset_train.values, validation_split=p_val_size, epochs=p_epochs, batch_size=p_batch_size)
  138. score = model.evaluate(x_data_test, y_dataset_test, batch_size=p_batch_size)
  139. if not os.path.exists(cfg.saved_models_folder):
  140. os.makedirs(cfg.saved_models_folder)
  141. # save the model into HDF5 file
  142. model_output_path = os.path.join(cfg.saved_models_folder, p_output + '.json')
  143. json_model_content = model.to_json()
  144. with open(model_output_path, 'w') as f:
  145. print("Model saved into ", model_output_path)
  146. json.dump(json_model_content, f, indent=4)
  147. model.save_weights(model_output_path.replace('.json', '.h5'))
  148. # Get results obtained from model
  149. y_train_prediction = model.predict(x_data_train)
  150. y_test_prediction = model.predict(x_data_test)
  151. y_train_prediction = [1 if x > 0.5 else 0 for x in y_train_prediction]
  152. y_test_prediction = [1 if x > 0.5 else 0 for x in y_test_prediction]
  153. acc_train_score = accuracy_score(y_dataset_train, y_train_prediction)
  154. acc_test_score = accuracy_score(y_dataset_test, y_test_prediction)
  155. f1_train_score = f1_score(y_dataset_train, y_train_prediction)
  156. f1_test_score = f1_score(y_dataset_test, y_test_prediction)
  157. recall_train_score = recall_score(y_dataset_train, y_train_prediction)
  158. recall_test_score = recall_score(y_dataset_test, y_test_prediction)
  159. pres_train_score = precision_score(y_dataset_train, y_train_prediction)
  160. pres_test_score = precision_score(y_dataset_test, y_test_prediction)
  161. roc_train_score = roc_auc_score(y_dataset_train, y_train_prediction)
  162. roc_test_score = roc_auc_score(y_dataset_test, y_test_prediction)
  163. # save model performance
  164. if not os.path.exists(cfg.models_information_folder):
  165. os.makedirs(cfg.models_information_folder)
  166. perf_file_path = os.path.join(cfg.models_information_folder, cfg.csv_model_comparisons_filename)
  167. with open(perf_file_path, 'a') as f:
  168. line = p_output + ';' + str(len(dataset_train)) + ';' + str(len(dataset_test)) + ';' \
  169. + str(final_df_train_size) + ';' + str(final_df_test_size) + ';' \
  170. + str(acc_train_score) + ';' + str(acc_test_score) + ';' \
  171. + str(f1_train_score) + ';' + str(f1_test_score) + ';' \
  172. + str(recall_train_score) + ';' + str(recall_test_score) + ';' \
  173. + str(pres_train_score) + ';' + str(pres_test_score) + ';' \
  174. + str(roc_train_score) + ';' + str(roc_test_score) + '\n'
  175. f.write(line)
  176. if __name__== "__main__":
  177. main()