train_model.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. # main imports
  2. import numpy as np
  3. import pandas as pd
  4. import sys, os, argparse
  5. import json
  6. # model imports
  7. import cnn_models as models
  8. import tensorflow as tf
  9. import keras
  10. from keras import backend as K
  11. from keras.callbacks import ModelCheckpoint
  12. from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
  13. from keras.utils import to_categorical
  14. # image processing imports
  15. import cv2
  16. from sklearn.utils import shuffle
  17. # config imports
  18. sys.path.insert(0, '') # trick to enable import of main folder module
  19. import custom_config as cfg
  20. def main():
  21. parser = argparse.ArgumentParser(description="Train Keras model and save it into .json file")
  22. parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .val)', required=True)
  23. parser.add_argument('--output', type=str, help='output file name desired for model (without .json extension)', required=True)
  24. parser.add_argument('--tl', type=int, help='use or not of transfer learning (`VGG network`)', default=0, choices=[0, 1])
  25. parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=cfg.keras_batch)
  26. parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
  27. parser.add_argument('--balancing', type=int, help='specify if balacing of classes is done or not', default="1")
  28. #parser.add_argument('--val_size', type=float, help='percent of validation data during training process', default=cfg.val_dataset_size)
  29. args = parser.parse_args()
  30. p_data_file = args.data
  31. p_output = args.output
  32. p_tl = args.tl
  33. p_batch_size = args.batch_size
  34. p_epochs = args.epochs
  35. p_balancing = bool(args.balancing)
  36. #p_val_size = args.val_size
  37. initial_epoch = 0
  38. ########################
  39. # 1. Get and prepare data
  40. ########################
  41. print("Preparing data...")
  42. dataset_train = pd.read_csv(p_data_file + '.train', header=None, sep=";")
  43. dataset_val = pd.read_csv(p_data_file + '.val', header=None, sep=";")
  44. print("Train set size : ", len(dataset_train))
  45. print("val set size : ", len(dataset_val))
  46. # default first shuffle of data
  47. dataset_train = shuffle(dataset_train)
  48. dataset_val = shuffle(dataset_val)
  49. print("Reading all images data...")
  50. # getting number of chanel
  51. n_channels = len(dataset_train[1][1].split('::'))
  52. print("Number of channels : ", n_channels)
  53. img_width, img_height = cfg.keras_img_size
  54. # specify the number of dimensions
  55. if K.image_data_format() == 'channels_first':
  56. if n_channels > 1:
  57. input_shape = (1, n_channels, img_width, img_height)
  58. else:
  59. input_shape = (n_channels, img_width, img_height)
  60. else:
  61. if n_channels > 1:
  62. input_shape = (1, img_width, img_height, n_channels)
  63. else:
  64. input_shape = (img_width, img_height, n_channels)
  65. # get dataset with equal number of classes occurences if wished
  66. if p_balancing:
  67. print("Balancing of data")
  68. noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 1]
  69. not_noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 0]
  70. nb_noisy_train = len(noisy_df_train.index)
  71. noisy_df_val = dataset_val[dataset_val.iloc[:, 0] == 1]
  72. not_noisy_df_val = dataset_val[dataset_val.iloc[:, 0] == 0]
  73. nb_noisy_val = len(noisy_df_val.index)
  74. final_df_train = pd.concat([not_noisy_df_train[0:nb_noisy_train], noisy_df_train])
  75. final_df_val = pd.concat([not_noisy_df_val[0:nb_noisy_val], noisy_df_val])
  76. else:
  77. print("No balancing of data")
  78. final_df_train = dataset_train
  79. final_df_val = dataset_val
  80. # `:` is the separator used for getting each img path
  81. if n_channels > 1:
  82. final_df_train[1] = final_df_train[1].apply(lambda x: [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')])
  83. final_df_val[1] = final_df_val[1].apply(lambda x: [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')])
  84. else:
  85. final_df_train[1] = final_df_train[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE))
  86. final_df_val[1] = final_df_val[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE))
  87. # reshape array data
  88. final_df_train[1] = final_df_train[1].apply(lambda x: np.array(x).reshape(input_shape))
  89. final_df_val[1] = final_df_val[1].apply(lambda x: np.array(x).reshape(input_shape))
  90. # shuffle data another time
  91. final_df_train = shuffle(final_df_train)
  92. final_df_val = shuffle(final_df_val)
  93. final_df_train_size = len(final_df_train.index)
  94. final_df_val_size = len(final_df_val.index)
  95. validation_split = final_df_val_size / (final_df_train_size + final_df_val_size)
  96. print("----------------------------------------------------------")
  97. print("Validation size is based of `.val` content")
  98. print("Validation split is now set at", validation_split)
  99. print("----------------------------------------------------------")
  100. # use of the whole data set for training
  101. x_dataset_train = final_df_train.iloc[:,1:]
  102. x_dataset_val = final_df_val.iloc[:,1:]
  103. y_dataset_train = final_df_train.iloc[:,0]
  104. y_dataset_val = final_df_val.iloc[:,0]
  105. x_data_train = []
  106. for item in x_dataset_train.values:
  107. #print("Item is here", item)
  108. x_data_train.append(item[0])
  109. x_data_train = np.array(x_data_train)
  110. x_data_val = []
  111. for item in x_dataset_val.values:
  112. #print("Item is here", item)
  113. x_data_val.append(item[0])
  114. x_data_val = np.array(x_data_val)
  115. print("End of loading data..")
  116. print("Train set size (after balancing) : ", final_df_train_size)
  117. print("val set size (after balancing) : ", final_df_val_size)
  118. #######################
  119. # 2. Getting model
  120. #######################
  121. # create backup folder for current model
  122. model_backup_folder = os.path.join(cfg.backup_model_folder, p_output)
  123. if not os.path.exists(model_backup_folder):
  124. os.makedirs(model_backup_folder)
  125. # add of callback models
  126. filepath = os.path.join(cfg.backup_model_folder, p_output, p_output + "-{auc:02f}-{val_auc:02f}__{epoch:02d}.hdf5")
  127. checkpoint = ModelCheckpoint(filepath, monitor='val_auc', verbose=1, save_best_only=True, mode='max')
  128. callbacks_list = [checkpoint]
  129. # check if backup already exists
  130. weights_filepath = None
  131. backups = sorted(os.listdir(model_backup_folder))
  132. if len(backups) > 0:
  133. # retrieve last backup epoch of model
  134. last_model_backup = None
  135. max_last_epoch = 0
  136. for backup in backups:
  137. last_epoch = int(backup.split('__')[1].replace('.hdf5', ''))
  138. if last_epoch > max_last_epoch and last_epoch < p_epochs:
  139. max_last_epoch = last_epoch
  140. last_model_backup = backup
  141. if last_model_backup is None:
  142. print("Epochs asked is already computer. Noee")
  143. sys.exit(1)
  144. initial_epoch = max_last_epoch
  145. print("-------------------------------------------------")
  146. print("Previous backup model found", last_model_backup, "with already", initial_epoch, "done...")
  147. print("Resuming from epoch", str(initial_epoch + 1))
  148. print("-------------------------------------------------")
  149. # load weights
  150. weights_filepath = os.path.join(model_backup_folder, last_model_backup)
  151. model = models.get_model(n_channels, input_shape, p_tl, weights_filepath)
  152. model.summary()
  153. # concatenate train and validation data (`validation_split` param will do the separation into keras model)
  154. y_data = np.concatenate([y_dataset_train.values, y_dataset_val.values])
  155. x_data = np.concatenate([x_data_train, x_data_val])
  156. y_data_categorical = to_categorical(y_data)
  157. #print(y_data_categorical)
  158. # validation split parameter will use the last `%` data, so here, data will really validate our model
  159. model.fit(x_data, y_data_categorical, validation_split=validation_split, initial_epoch=initial_epoch, epochs=p_epochs, batch_size=p_batch_size, callbacks=callbacks_list)
  160. y_dataset_val_categorical = to_categorical(y_dataset_val)
  161. score = model.evaluate(x_data_val, y_dataset_val_categorical, batch_size=p_batch_size)
  162. print("Accuracy score on val dataset ", score)
  163. if not os.path.exists(cfg.saved_models_folder):
  164. os.makedirs(cfg.saved_models_folder)
  165. # save the model into HDF5 file
  166. model_output_path = os.path.join(cfg.saved_models_folder, p_output + '.json')
  167. json_model_content = model.to_json()
  168. with open(model_output_path, 'w') as f:
  169. print("Model saved into ", model_output_path)
  170. json.dump(json_model_content, f, indent=4)
  171. model.save_weights(model_output_path.replace('.json', '.h5'))
  172. # Get results obtained from model
  173. y_train_prediction = model.predict(x_data_train)
  174. y_val_prediction = model.predict(x_data_val)
  175. # y_train_prediction = [1 if x > 0.5 else 0 for x in y_train_prediction]
  176. # y_val_prediction = [1 if x > 0.5 else 0 for x in y_val_prediction]
  177. y_train_prediction = np.argmax(y_train_prediction, axis=1)
  178. y_val_prediction = np.argmax(y_val_prediction, axis=1)
  179. acc_train_score = accuracy_score(y_dataset_train, y_train_prediction)
  180. acc_val_score = accuracy_score(y_dataset_val, y_val_prediction)
  181. f1_train_score = f1_score(y_dataset_train, y_train_prediction)
  182. f1_val_score = f1_score(y_dataset_val, y_val_prediction)
  183. recall_train_score = recall_score(y_dataset_train, y_train_prediction)
  184. recall_val_score = recall_score(y_dataset_val, y_val_prediction)
  185. pres_train_score = precision_score(y_dataset_train, y_train_prediction)
  186. pres_val_score = precision_score(y_dataset_val, y_val_prediction)
  187. roc_train_score = roc_auc_score(y_dataset_train, y_train_prediction)
  188. roc_val_score = roc_auc_score(y_dataset_val, y_val_prediction)
  189. # save model performance
  190. if not os.path.exists(cfg.results_information_folder):
  191. os.makedirs(cfg.results_information_folder)
  192. perf_file_path = os.path.join(cfg.results_information_folder, cfg.csv_model_comparisons_filename)
  193. # write header if necessary
  194. if not os.path.exists(perf_file_path):
  195. with open(perf_file_path, 'w') as f:
  196. f.write(cfg.perf_train_header_file)
  197. # add information into file
  198. with open(perf_file_path, 'a') as f:
  199. line = p_output + ';' + str(len(dataset_train)) + ';' + str(len(dataset_val)) + ';' \
  200. + str(final_df_train_size) + ';' + str(final_df_val_size) + ';' \
  201. + str(acc_train_score) + ';' + str(acc_val_score) + ';' \
  202. + str(f1_train_score) + ';' + str(f1_val_score) + ';' \
  203. + str(recall_train_score) + ';' + str(recall_val_score) + ';' \
  204. + str(pres_train_score) + ';' + str(pres_val_score) + ';' \
  205. + str(roc_train_score) + ';' + str(roc_val_score) + '\n'
  206. f.write(line)
  207. print("You can now run your model with your own `test` dataset")
  208. if __name__== "__main__":
  209. main()