classification_cnn_keras.py 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. '''This script goes along the blog post
  2. "Building powerful image classification models using very little data"
  3. from blog.keras.io.
  4. ```
  5. data/
  6. train/
  7. final/
  8. final001.png
  9. final002.png
  10. ...
  11. noisy/
  12. noisy001.png
  13. noisy002.png
  14. ...
  15. validation/
  16. final/
  17. final001.png
  18. final002.png
  19. ...
  20. noisy/
  21. noisy001.png
  22. noisy002.png
  23. ...
  24. ```
  25. '''
  26. from keras.preprocessing.image import ImageDataGenerator
  27. from keras.models import Sequential
  28. from keras.layers import Conv2D, MaxPooling2D
  29. from keras.layers import Activation, Dropout, Flatten, Dense
  30. from keras import backend as K
  31. # dimensions of our images.
  32. img_width, img_height = 20, 20
  33. train_data_dir = 'data/train'
  34. validation_data_dir = 'data/validation'
  35. nb_train_samples = 115200
  36. nb_validation_samples = 57600
  37. epochs = 50
  38. batch_size = 16
  39. if K.image_data_format() == 'channels_first':
  40. input_shape = (3, img_width, img_height)
  41. else:
  42. input_shape = (img_width, img_height, 3)
  43. model = Sequential()
  44. model.add(Conv2D(40, (3, 3), input_shape=input_shape))
  45. model.add(Activation('relu'))
  46. model.add(MaxPooling2D(pool_size=(2, 2)))
  47. model.add(Conv2D(20, (3, 3)))
  48. model.add(Activation('relu'))
  49. model.add(MaxPooling2D(pool_size=(2, 2)))
  50. model.add(Conv2D(40, (2, 2)))
  51. model.add(Activation('relu'))
  52. model.add(MaxPooling2D(pool_size=(2, 2)))
  53. model.add(Flatten())
  54. model.add(Dense(40))
  55. model.add(Activation('relu'))
  56. model.add(Dropout(0.5))
  57. model.add(Dense(1))
  58. model.add(Activation('sigmoid'))
  59. model.compile(loss='binary_crossentropy',
  60. optimizer='rmsprop',
  61. metrics=['accuracy'])
  62. # this is the augmentation configuration we will use for training
  63. train_datagen = ImageDataGenerator(
  64. rescale=1. / 255,
  65. shear_range=0.2,
  66. zoom_range=0.2,
  67. horizontal_flip=True)
  68. # this is the augmentation configuration we will use for testing:
  69. # only rescaling
  70. test_datagen = ImageDataGenerator(rescale=1. / 255)
  71. train_generator = train_datagen.flow_from_directory(
  72. train_data_dir,
  73. target_size=(img_width, img_height),
  74. batch_size=batch_size,
  75. class_mode='binary')
  76. validation_generator = test_datagen.flow_from_directory(
  77. validation_data_dir,
  78. target_size=(img_width, img_height),
  79. batch_size=batch_size,
  80. class_mode='binary')
  81. model.fit_generator(
  82. train_generator,
  83. steps_per_epoch=nb_train_samples // batch_size,
  84. epochs=epochs,
  85. validation_data=validation_generator,
  86. validation_steps=nb_validation_samples // batch_size)
  87. model.save_weights('noise_classification_32_16_16_32_07_img20.h5')