train_model.py 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. # main imports
  2. import numpy as np
  3. import pandas as pd
  4. import sys, os, argparse
  5. import json
  6. # model imports
  7. import cnn_models as models
  8. import tensorflow as tf
  9. import keras
  10. from keras import backend as K
  11. from keras.callbacks import ModelCheckpoint
  12. from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
  13. # image processing imports
  14. import cv2
  15. from sklearn.utils import shuffle
  16. # config imports
  17. sys.path.insert(0, '') # trick to enable import of main folder module
  18. import custom_config as cfg
  19. def main():
  20. # default keras configuration
  21. #config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 8})
  22. #sess = tf.Session(config=config)
  23. #keras.backend.set_session(sess)
  24. parser = argparse.ArgumentParser(description="Train Keras model and save it into .json file")
  25. parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)', required=True)
  26. parser.add_argument('--output', type=str, help='output file name desired for model (without .json extension)', required=True)
  27. parser.add_argument('--tl', type=int, help='use or not of transfer learning (`VGG network`)', default=0, choices=[0, 1])
  28. parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=cfg.keras_batch)
  29. parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
  30. parser.add_argument('--val_size', type=float, help='percent of validation data during training process', default=cfg.val_dataset_size)
  31. args = parser.parse_args()
  32. p_data_file = args.data
  33. p_output = args.output
  34. p_tl = args.tl
  35. p_batch_size = args.batch_size
  36. p_epochs = args.epochs
  37. p_val_size = args.val_size
  38. initial_epoch = 0
  39. ########################
  40. # 1. Get and prepare data
  41. ########################
  42. print("Preparing data...")
  43. dataset_train = pd.read_csv(p_data_file + '.train', header=None, sep=";")
  44. dataset_test = pd.read_csv(p_data_file + '.test', header=None, sep=";")
  45. print("Train set size : ", len(dataset_train))
  46. print("Test set size : ", len(dataset_test))
  47. # default first shuffle of data
  48. dataset_train = shuffle(dataset_train)
  49. dataset_test = shuffle(dataset_test)
  50. print("Reading all images data...")
  51. # getting number of chanel
  52. n_channels = len(dataset_train[1][1].split('::'))
  53. print("Number of channels : ", n_channels)
  54. img_width, img_height = cfg.keras_img_size
  55. # specify the number of dimensions
  56. if K.image_data_format() == 'channels_first':
  57. if n_channels > 1:
  58. input_shape = (1, n_channels, img_width, img_height)
  59. else:
  60. input_shape = (n_channels, img_width, img_height)
  61. else:
  62. if n_channels > 1:
  63. input_shape = (1, img_width, img_height, n_channels)
  64. else:
  65. input_shape = (img_width, img_height, n_channels)
  66. # `:` is the separator used for getting each img path
  67. if n_channels > 1:
  68. dataset_train[1] = dataset_train[1].apply(lambda x: [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')])
  69. dataset_test[1] = dataset_test[1].apply(lambda x: [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')])
  70. else:
  71. dataset_train[1] = dataset_train[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE))
  72. dataset_test[1] = dataset_test[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE))
  73. # reshape array data
  74. dataset_train[1] = dataset_train[1].apply(lambda x: np.array(x).reshape(input_shape))
  75. dataset_test[1] = dataset_test[1].apply(lambda x: np.array(x).reshape(input_shape))
  76. # get dataset with equal number of classes occurences
  77. noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 1]
  78. not_noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 0]
  79. nb_noisy_train = len(noisy_df_train.index)
  80. noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 1]
  81. not_noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 0]
  82. nb_noisy_test = len(noisy_df_test.index)
  83. final_df_train = pd.concat([not_noisy_df_train[0:nb_noisy_train], noisy_df_train])
  84. final_df_test = pd.concat([not_noisy_df_test[0:nb_noisy_test], noisy_df_test])
  85. # shuffle data another time
  86. final_df_train = shuffle(final_df_train)
  87. final_df_test = shuffle(final_df_test)
  88. final_df_train_size = len(final_df_train.index)
  89. final_df_test_size = len(final_df_test.index)
  90. # use of the whole data set for training
  91. x_dataset_train = final_df_train.ix[:,1:]
  92. x_dataset_test = final_df_test.ix[:,1:]
  93. y_dataset_train = final_df_train.ix[:,0]
  94. y_dataset_test = final_df_test.ix[:,0]
  95. x_data_train = []
  96. for item in x_dataset_train.values:
  97. #print("Item is here", item)
  98. x_data_train.append(item[0])
  99. x_data_train = np.array(x_data_train)
  100. x_data_test = []
  101. for item in x_dataset_test.values:
  102. #print("Item is here", item)
  103. x_data_test.append(item[0])
  104. x_data_test = np.array(x_data_test)
  105. print("End of loading data..")
  106. print("Train set size (after balancing) : ", final_df_train_size)
  107. print("Test set size (after balancing) : ", final_df_test_size)
  108. #######################
  109. # 2. Getting model
  110. #######################
  111. # create backup folder for current model
  112. model_backup_folder = os.path.join(cfg.backup_model_folder, p_output)
  113. if not os.path.exists(model_backup_folder):
  114. os.makedirs(model_backup_folder)
  115. # add of callback models
  116. filepath = os.path.join(cfg.backup_model_folder, p_output, p_output + "__{epoch:02d}.hdf5")
  117. checkpoint = ModelCheckpoint(filepath, monitor='val_auc', verbose=1, save_best_only=True, mode='max')
  118. callbacks_list = [checkpoint]
  119. model = models.get_model(n_channels, input_shape, p_tl)
  120. model.summary()
  121. # check if backup already exists
  122. backups = sorted(os.listdir(model_backup_folder))
  123. if len(backups) > 0:
  124. # TODO : check of initial epoch
  125. last_backup = backups[-1]
  126. last_epoch = int(last_backup.split('__')[1].replace('.hdf5', ''))
  127. initial_epoch = last_epoch
  128. print("Previous backup model found.. ")
  129. print("Restart from epoch ", last_epoch)
  130. model.fit(x_data_train, y_dataset_train.values, validation_split=p_val_size, initial_epoch=initial_epoch, epochs=p_epochs, batch_size=p_batch_size, callbacks=callbacks_list)
  131. score = model.evaluate(x_data_test, y_dataset_test, batch_size=p_batch_size)
  132. print("Accuracy score on test dataset ", score)
  133. if not os.path.exists(cfg.saved_models_folder):
  134. os.makedirs(cfg.saved_models_folder)
  135. # save the model into HDF5 file
  136. model_output_path = os.path.join(cfg.saved_models_folder, p_output + '.json')
  137. json_model_content = model.to_json()
  138. with open(model_output_path, 'w') as f:
  139. print("Model saved into ", model_output_path)
  140. json.dump(json_model_content, f, indent=4)
  141. model.save_weights(model_output_path.replace('.json', '.h5'))
  142. # Get results obtained from model
  143. y_train_prediction = model.predict(x_data_train)
  144. y_test_prediction = model.predict(x_data_test)
  145. y_train_prediction = [1 if x > 0.5 else 0 for x in y_train_prediction]
  146. y_test_prediction = [1 if x > 0.5 else 0 for x in y_test_prediction]
  147. acc_train_score = accuracy_score(y_dataset_train, y_train_prediction)
  148. acc_test_score = accuracy_score(y_dataset_test, y_test_prediction)
  149. f1_train_score = f1_score(y_dataset_train, y_train_prediction)
  150. f1_test_score = f1_score(y_dataset_test, y_test_prediction)
  151. recall_train_score = recall_score(y_dataset_train, y_train_prediction)
  152. recall_test_score = recall_score(y_dataset_test, y_test_prediction)
  153. pres_train_score = precision_score(y_dataset_train, y_train_prediction)
  154. pres_test_score = precision_score(y_dataset_test, y_test_prediction)
  155. roc_train_score = roc_auc_score(y_dataset_train, y_train_prediction)
  156. roc_test_score = roc_auc_score(y_dataset_test, y_test_prediction)
  157. # save model performance
  158. if not os.path.exists(cfg.results_information_folder):
  159. os.makedirs(cfg.results_information_folder)
  160. perf_file_path = os.path.join(cfg.results_information_folder, cfg.csv_model_comparisons_filename)
  161. # write header if necessary
  162. if not os.path.exists(perf_file_path):
  163. with open(perf_file_path, 'w') as f:
  164. f.write(cfg.perf_train_header_file)
  165. # add information into file
  166. with open(perf_file_path, 'a') as f:
  167. line = p_output + ';' + str(len(dataset_train)) + ';' + str(len(dataset_test)) + ';' \
  168. + str(final_df_train_size) + ';' + str(final_df_test_size) + ';' \
  169. + str(acc_train_score) + ';' + str(acc_test_score) + ';' \
  170. + str(f1_train_score) + ';' + str(f1_test_score) + ';' \
  171. + str(recall_train_score) + ';' + str(recall_test_score) + ';' \
  172. + str(pres_train_score) + ';' + str(pres_test_score) + ';' \
  173. + str(roc_train_score) + ';' + str(roc_test_score) + '\n'
  174. f.write(line)
  175. if __name__== "__main__":
  176. main()