classification_cnn_keras_cross_validation.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. '''This script goes along the blog post
  2. "Building powerful image classification models using very little data"
  3. from blog.keras.io.
  4. ```
  5. data/
  6. train/
  7. final/
  8. final001.png
  9. final002.png
  10. ...
  11. noisy/
  12. noisy001.png
  13. noisy002.png
  14. ...
  15. validation/
  16. final/
  17. final001.png
  18. final002.png
  19. ...
  20. noisy/
  21. noisy001.png
  22. noisy002.png
  23. ...
  24. ```
  25. '''
  26. import sys, os, getopt
  27. import json
  28. from keras.preprocessing.image import ImageDataGenerator
  29. from keras.models import Sequential
  30. from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
  31. from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
  32. from keras import backend as K
  33. from keras.utils import plot_model
  34. from ipfml import tf_model_helper
  35. # local functions import (metrics preprocessing)
  36. import preprocessing_functions
  37. ##########################################
  38. # Global parameters (with default value) #
  39. ##########################################
  40. img_width, img_height = 100, 100
  41. train_data_dir = 'data/train'
  42. validation_data_dir = 'data/validation'
  43. nb_train_samples = 7200
  44. nb_validation_samples = 3600
  45. epochs = 50
  46. batch_size = 16
  47. input_shape = (3, img_width, img_height)
  48. ###########################################
  49. '''
  50. Method which returns model to train
  51. @return : DirectoryIterator
  52. '''
  53. def generate_model():
  54. # create your model using this function
  55. model = Sequential()
  56. model.add(Conv2D(60, (2, 2), input_shape=input_shape))
  57. model.add(Activation('relu'))
  58. model.add(MaxPooling2D(pool_size=(2, 2)))
  59. model.add(Conv2D(40, (2, 2)))
  60. model.add(Activation('relu'))
  61. model.add(MaxPooling2D(pool_size=(2, 2)))
  62. model.add(Conv2D(20, (2, 2)))
  63. model.add(Activation('relu'))
  64. model.add(MaxPooling2D(pool_size=(2, 2)))
  65. model.add(Flatten())
  66. model.add(Dense(140))
  67. model.add(Activation('relu'))
  68. model.add(BatchNormalization())
  69. model.add(Dropout(0.3))
  70. model.add(Dense(120))
  71. model.add(Activation('relu'))
  72. model.add(BatchNormalization())
  73. model.add(Dropout(0.3))
  74. model.add(Dense(80))
  75. model.add(Activation('relu'))
  76. model.add(BatchNormalization())
  77. model.add(Dropout(0.2))
  78. model.add(Dense(40))
  79. model.add(Activation('relu'))
  80. model.add(BatchNormalization())
  81. model.add(Dropout(0.2))
  82. model.add(Dense(20))
  83. model.add(Activation('relu'))
  84. model.add(BatchNormalization())
  85. model.add(Dropout(0.2))
  86. model.add(Dense(1))
  87. model.add(Activation('sigmoid'))
  88. model.compile(loss='binary_crossentropy',
  89. optimizer='rmsprop',
  90. metrics=['accuracy'])
  91. return model
  92. def load_data():
  93. # load your data using this function
  94. # this is the augmentation configuration we will use for training
  95. train_datagen = ImageDataGenerator(
  96. rescale=1. / 255,
  97. shear_range=0.2,
  98. zoom_range=0.2,
  99. horizontal_flip=True)
  100. train_generator = train_datagen.flow_from_directory(
  101. train_data_dir,
  102. target_size=(img_width, img_height),
  103. batch_size=batch_size,
  104. class_mode='binary')
  105. return train_generator
  106. def train_and_evaluate_model(model, data_train, data_test):
  107. return model.fit_generator(
  108. data_train,
  109. steps_per_epoch=nb_train_samples // batch_size,
  110. epochs=epochs,
  111. shuffle=True,
  112. validation_data=data_test,
  113. validation_steps=nb_validation_samples // batch_size)
  114. def main():
  115. # update global variable and not local
  116. global batch_size
  117. global epochs
  118. global img_width
  119. global img_height
  120. global input_shape
  121. global train_data_dir
  122. global validation_data_dir
  123. global nb_train_samples
  124. global nb_validation_samples
  125. if len(sys.argv) <= 1:
  126. print('Run with default parameters...')
  127. print('classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx')
  128. sys.exit(2)
  129. try:
  130. opts, args = getopt.getopt(sys.argv[1:], "ho:d:b:e:i", ["help", "output=", "directory=", "batch_size=", "epochs=", "img="])
  131. except getopt.GetoptError:
  132. # print help information and exit:
  133. print('classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx')
  134. sys.exit(2)
  135. for o, a in opts:
  136. if o == "-h":
  137. print('classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx')
  138. sys.exit()
  139. elif o in ("-o", "--output"):
  140. filename = a
  141. elif o in ("-b", "--batch_size"):
  142. batch_size = int(a)
  143. elif o in ("-e", "--epochs"):
  144. epochs = int(a)
  145. elif o in ("-d", "--directory"):
  146. directory = a
  147. elif o in ("-i", "--img"):
  148. img_height = int(a)
  149. img_width = int(a)
  150. else:
  151. assert False, "unhandled option"
  152. # 3 because we have 3 color canals
  153. if K.image_data_format() == 'channels_first':
  154. input_shape = (3, img_width, img_height)
  155. else:
  156. input_shape = (img_width, img_height, 3)
  157. # configuration
  158. with open('config.json') as json_data:
  159. d = json.load(json_data)
  160. train_data_dir = d['train_data_dir']
  161. validation_data_dir = d['train_validation_dir']
  162. try:
  163. nb_train_samples = d[str(img_width)]['nb_train_samples']
  164. nb_validation_samples = d[str(img_width)]['nb_validation_samples']
  165. except:
  166. print("--img parameter missing of invalid (--image_width xx --img_height xx)")
  167. sys.exit(2)
  168. # load of model
  169. model = generate_model()
  170. model.summary()
  171. data_generator = ImageDataGenerator(rescale=1./255, validation_split=0.33)
  172. # check if possible to not do this thing each time
  173. train_generator = data_generator.flow_from_directory(train_data_dir, target_size=(img_width, img_height), shuffle=True, seed=13,
  174. class_mode='binary', batch_size=batch_size, subset="training")
  175. validation_generator = data_generator.flow_from_directory(train_data_dir, target_size=(img_width, img_height), shuffle=True, seed=13,
  176. class_mode='binary', batch_size=batch_size, subset="validation")
  177. # now run model
  178. history = train_and_evaluate_model(model, train_generator, validation_generator)
  179. print("directory %s " % directory)
  180. if(directory):
  181. print('Your model information will be saved into %s...' % directory)
  182. # if user needs output files
  183. if(filename):
  184. # update filename by folder
  185. if(directory):
  186. # create folder if necessary
  187. if not os.path.exists(directory):
  188. os.makedirs(directory)
  189. filename = directory + "/" + filename
  190. # save plot file history
  191. tf_model_helper.save(history, filename)
  192. plot_model(model, to_file=str(('%s.png' % filename)))
  193. model.save_weights(str('%s.h5' % filename))
  194. if __name__ == "__main__":
  195. main()