train_model.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. # main imports
  2. import numpy as np
  3. import pandas as pd
  4. import sys, os, argparse
  5. import json
  6. # model imports
  7. import cnn_models as models
  8. import tensorflow as tf
  9. import keras
  10. from keras.models import load_model
  11. from keras import backend as K
  12. from keras.callbacks import ModelCheckpoint
  13. from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
  14. from keras.utils import to_categorical
  15. # image processing imports
  16. import cv2
  17. from sklearn.utils import shuffle
  18. from sklearn.model_selection import train_test_split
  19. # config imports
  20. sys.path.insert(0, '') # trick to enable import of main folder module
  21. import custom_config as cfg
  22. # counter param
  23. n_counter = 0
  24. def write_progress(progress):
  25. '''
  26. Display progress information as progress bar
  27. '''
  28. barWidth = 180
  29. output_str = "["
  30. pos = barWidth * progress
  31. for i in range(barWidth):
  32. if i < pos:
  33. output_str = output_str + "="
  34. elif i == pos:
  35. output_str = output_str + ">"
  36. else:
  37. output_str = output_str + " "
  38. output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
  39. print(output_str)
  40. sys.stdout.write("\033[F")
  41. def main():
  42. parser = argparse.ArgumentParser(description="Train Keras model and save it into .json file")
  43. parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)', required=True)
  44. parser.add_argument('--output', type=str, help='output file name desired for model (without .json extension)', required=True)
  45. parser.add_argument('--tl', type=int, help='use or not of transfer learning (`VGG network`)', default=0, choices=[0, 1])
  46. parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=64)
  47. parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=30)
  48. parser.add_argument('--chanels', type=int, help="given number of chanels if necessary", default=0)
  49. parser.add_argument('--size', type=str, help="Size of input images", default="100, 100")
  50. parser.add_argument('--val_size', type=float, help='percent of validation data during training process', default=0.3)
  51. args = parser.parse_args()
  52. p_data_file = args.data
  53. p_output = args.output
  54. p_tl = args.tl
  55. p_batch_size = args.batch_size
  56. p_epochs = args.epochs
  57. p_chanels = args.chanels
  58. p_size = args.size.split(',')
  59. p_val_size = args.val_size
  60. #p_val_size = args.val_size
  61. initial_epoch = 0
  62. ########################
  63. # 1. Get and prepare data
  64. ########################
  65. print('-----------------------------')
  66. print("----- Preparing data... -----")
  67. dataset_train = pd.read_csv(p_data_file + '.train', header=None, sep=";")
  68. dataset_test = pd.read_csv(p_data_file + '.test', header=None, sep=";")
  69. print("-- Train set size : ", len(dataset_train))
  70. print("-- Test set size : ", len(dataset_test))
  71. # default first shuffle of data
  72. dataset_train = shuffle(dataset_train)
  73. dataset_test = shuffle(dataset_test)
  74. print('-----------------------------')
  75. print("--Reading all images data...")
  76. # getting number of chanel
  77. if p_chanels == 0:
  78. n_chanels = len(dataset_train[1][1].split('::'))
  79. else:
  80. n_chanels = p_chanels
  81. print("-- Number of chanels : ", n_chanels)
  82. img_width, img_height = [ int(s) for s in p_size ]
  83. # specify the number of dimensions
  84. if K.image_data_format() == 'chanels_first':
  85. if n_chanels > 1:
  86. input_shape = (1, n_chanels, img_width, img_height)
  87. else:
  88. input_shape = (n_chanels, img_width, img_height)
  89. else:
  90. if n_chanels > 1:
  91. input_shape = (1, img_width, img_height, n_chanels)
  92. else:
  93. input_shape = (img_width, img_height, n_chanels)
  94. # getting weighted class over the whole dataset
  95. noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 1]
  96. not_noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 0]
  97. nb_noisy_train = len(noisy_df_train.index)
  98. nb_not_noisy_train = len(not_noisy_df_train.index)
  99. noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 1]
  100. not_noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 0]
  101. nb_noisy_test = len(noisy_df_test.index)
  102. nb_not_noisy_test = len(not_noisy_df_test.index)
  103. noisy_samples = nb_noisy_test + nb_noisy_train
  104. not_noisy_samples = nb_not_noisy_test + nb_not_noisy_train
  105. total_samples = noisy_samples + not_noisy_samples
  106. print('-----------------------------')
  107. print('---- Dataset information ----')
  108. print('-- noisy:', noisy_samples)
  109. print('-- not_noisy:', not_noisy_samples)
  110. print('-- total:', total_samples)
  111. print('-----------------------------')
  112. class_weight = {
  113. 0: (noisy_samples / float(total_samples)),
  114. 1: (not_noisy_samples / float(total_samples)),
  115. }
  116. final_df_train = dataset_train
  117. final_df_test = dataset_test
  118. def load_multiple_greyscale(x):
  119. # update progress
  120. global n_counter
  121. n_counter += 1
  122. write_progress(n_counter / float(total_samples))
  123. return [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')]
  124. def load_greyscale(x):
  125. # update progress
  126. global n_counter
  127. n_counter += 1
  128. write_progress(n_counter / float(total_samples))
  129. return cv2.imread(x, cv2.IMREAD_GRAYSCALE)
  130. def load_rgb(x):
  131. # update progress
  132. global n_counter
  133. n_counter += 1
  134. write_progress(n_counter / float(total_samples))
  135. return cv2.imread(x)
  136. print('---- Loading dataset.... ----')
  137. print('-----------------------------\n')
  138. # check if specific number of chanels is used
  139. if p_chanels == 0:
  140. # `::` is the separator used for getting each img path
  141. if n_chanels > 1:
  142. final_df_train[1] = final_df_train[1].apply(lambda x: load_multiple_greyscale(x))
  143. final_df_test[1] = final_df_test[1].apply(lambda x: load_multiple_greyscale(x))
  144. else:
  145. final_df_train[1] = final_df_train[1].apply(lambda x: load_greyscale(x))
  146. final_df_test[1] = final_df_test[1].apply(lambda x: load_greyscale(x))
  147. else:
  148. final_df_train[1] = final_df_train[1].apply(lambda x: load_rgb(x))
  149. final_df_test[1] = final_df_test[1].apply(lambda x: load_rgb(x))
  150. # reshape array data
  151. final_df_train[1] = final_df_train[1].apply(lambda x: np.array(x).reshape(input_shape))
  152. final_df_test[1] = final_df_test[1].apply(lambda x: np.array(x).reshape(input_shape))
  153. # shuffle data another time
  154. final_df_train = shuffle(final_df_train)
  155. final_df_test = shuffle(final_df_test)
  156. print('\n-----------------------------')
  157. print("Validation split is now set at", p_val_size)
  158. print('-----------------------------')
  159. # use of the whole data set for training
  160. x_dataset_train = final_df_train.iloc[:,1:]
  161. x_dataset_test = final_df_test.iloc[:,1:]
  162. y_dataset_train = final_df_train.iloc[:,0]
  163. y_dataset_test = final_df_test.iloc[:,0]
  164. x_data_train = []
  165. for item in x_dataset_train.values:
  166. #print("Item is here", item)
  167. x_data_train.append(item[0])
  168. x_data_train = np.array(x_data_train)
  169. x_data_test = []
  170. for item in x_dataset_test.values:
  171. #print("Item is here", item)
  172. x_data_test.append(item[0])
  173. x_data_test = np.array(x_data_test)
  174. #######################
  175. # 2. Getting model
  176. #######################
  177. # create backup folder for current model
  178. model_backup_folder = os.path.join(cfg.backup_model_folder, p_output)
  179. if not os.path.exists(model_backup_folder):
  180. os.makedirs(model_backup_folder)
  181. # add of callback models
  182. filepath = os.path.join(cfg.backup_model_folder, p_output, p_output + "-_{epoch:03d}.h5")
  183. checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=0, mode='max')
  184. callbacks_list = [checkpoint]
  185. # check if backup already exists
  186. backups = sorted(os.listdir(model_backup_folder))
  187. if len(backups) > 0:
  188. last_backup_file = backups[-1]
  189. last_backup_file_path = os.path.join(model_backup_folder, last_backup_file)
  190. model = load_model(last_backup_file_path)
  191. # get initial epoch
  192. initial_epoch = int(last_backup_file.split('_')[-1].replace('.h5', ''))
  193. print('-----------------------------')
  194. print('-- Restore model from backup...')
  195. print('-- Restart training @epoch:', initial_epoch)
  196. print('-----------------------------')
  197. else:
  198. model = models.get_model(n_chanels, input_shape, p_tl)
  199. model.summary()
  200. # prepare train and validation dataset
  201. X_train, X_val, y_train, y_val = train_test_split(x_data_train, y_dataset_train, test_size=p_val_size, shuffle=False)
  202. y_train = to_categorical(y_train)
  203. y_val = to_categorical(y_val)
  204. y_test = to_categorical(y_dataset_test)
  205. print('-----------------------------')
  206. print("-- Fitting model with custom class_weight", class_weight)
  207. print('-----------------------------')
  208. model.fit(X_train, y_train,
  209. validation_data=(X_val, y_val),
  210. initial_epoch=initial_epoch,
  211. epochs=p_epochs,
  212. batch_size=p_batch_size,
  213. callbacks=callbacks_list,
  214. class_weight=class_weight)
  215. score = model.evaluate(X_val, y_val, batch_size=p_batch_size)
  216. print("Accuracy score on val dataset ", score)
  217. if not os.path.exists(cfg.output_models):
  218. os.makedirs(cfg.output_models)
  219. # save the model into H5 file
  220. model_output_path = os.path.join(cfg.output_models, p_output + '.h5')
  221. model.save(model_output_path)
  222. # Get results obtained from model
  223. y_train_prediction = model.predict(tf.convert_to_tensor(np.asarray(X_train)))
  224. y_val_prediction = model.predict(tf.convert_to_tensor(np.asarray(X_val)))
  225. y_test_prediction = model.predict(tf.convert_to_tensor(np.asarray(x_dataset_test)))
  226. y_train_prediction = np.argmax(y_train_prediction, axis=1)
  227. y_val_prediction = np.argmax(y_val_prediction, axis=1)
  228. y_test_prediction = np.argmax(y_test_prediction, axis=1)
  229. acc_train_score = accuracy_score(y_train, y_train_prediction)
  230. acc_val_score = accuracy_score(y_val, y_val_prediction)
  231. acc_test_score = accuracy_score(y_test, y_test_prediction)
  232. roc_train_score = roc_auc_score(y_train, y_train_prediction)
  233. roc_val_score = roc_auc_score(y_val, y_val_prediction)
  234. roc_test_score = roc_auc_score(y_test, y_val_prediction)
  235. # save model performance
  236. if not os.path.exists(cfg.output_results_folder):
  237. os.makedirs(cfg.output_results_folder)
  238. perf_file_path = os.path.join(cfg.output_results_folder, cfg.csv_model_comparisons_filename)
  239. # write header if necessary
  240. if not os.path.exists(perf_file_path):
  241. with open(perf_file_path, 'w') as f:
  242. f.write('name;train_acc;val_acc;test_acc;train_auc;val_auc;test_auc;\n')
  243. # add information into file
  244. with open(perf_file_path, 'a') as f:
  245. line = p_output + ';' + str(acc_train_score) + ';' + str(acc_val_score) + ';' \
  246. + str(acc_test_score) + ';' + str(roc_train_score) + ';' \
  247. + str(roc_val_score) + ';' + str(roc_test_score) + '\n'
  248. f.write(line)
  249. print("You can now run your model with your own `test` dataset")
  250. if __name__== "__main__":
  251. main()