classification_cnn_keras_svd.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. '''This script goes along the blog post
  2. "Building powerful image classification models using very little data"
  3. from blog.keras.io.
  4. ```
  5. data/
  6. train/
  7. final/
  8. final001.png
  9. final002.png
  10. ...
  11. noisy/
  12. noisy001.png
  13. noisy002.png
  14. ...
  15. validation/
  16. final/
  17. final001.png
  18. final002.png
  19. ...
  20. noisy/
  21. noisy001.png
  22. noisy002.png
  23. ...
  24. ```
  25. '''
  26. from keras.preprocessing.image import ImageDataGenerator
  27. from keras.models import Sequential
  28. from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
  29. from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
  30. from keras.optimizers import Adam
  31. from keras.regularizers import l2
  32. from keras import backend as K
  33. from numpy.linalg import svd
  34. import tensorflow as tf
  35. import numpy as np
  36. from PIL import Image
  37. from scipy import misc
  38. import matplotlib.pyplot as plt
  39. import keras as k
  40. # dimensions of our images.
  41. img_width, img_height = int(100), 1
  42. train_data_dir = 'data/train'
  43. validation_data_dir = 'data/validation'
  44. nb_train_samples = 7200
  45. nb_validation_samples = 3600
  46. epochs = 200
  47. batch_size = 30
  48. # configuration
  49. config = tf.ConfigProto(intra_op_parallelism_threads=6, inter_op_parallelism_threads=6, \
  50. allow_soft_placement=True, device_count = {'CPU': 6})
  51. session = tf.Session(config=config)
  52. K.set_session(session)
  53. def svd_singular(image):
  54. U, s, V = svd(image, full_matrices=False)
  55. s = s[0:img_width]
  56. result = s.reshape([img_width, 1, 1]) # one shape per canal
  57. return result
  58. if K.image_data_format() == 'channels_first':
  59. input_shape = (3, img_width, img_height)
  60. else:
  61. input_shape = (img_width, img_height, 3)
  62. model = Sequential()
  63. model.add(Conv2D(100, (2, 1), input_shape=input_shape))
  64. model.add(Activation('relu'))
  65. model.add(MaxPooling2D(pool_size=(2, 1)))
  66. model.add(Conv2D(80, (2, 1)))
  67. model.add(Activation('relu'))
  68. model.add(AveragePooling2D(pool_size=(2, 1)))
  69. model.add(Conv2D(50, (2, 1)))
  70. model.add(Activation('relu'))
  71. model.add(MaxPooling2D(pool_size=(2, 1)))
  72. model.add(Flatten())
  73. model.add(BatchNormalization())
  74. model.add(Dense(300, kernel_regularizer=l2(0.01)))
  75. model.add(Activation('relu'))
  76. model.add(Dropout(0.4))
  77. model.add(Dense(30, kernel_regularizer=l2(0.01)))
  78. model.add(BatchNormalization())
  79. model.add(Activation('relu'))
  80. model.add(Dropout(0.3))
  81. model.add(Dense(100, kernel_regularizer=l2(0.01)))
  82. model.add(BatchNormalization())
  83. model.add(Activation('relu'))
  84. model.add(Dropout(0.2))
  85. model.add(Dense(20, kernel_regularizer=l2(0.01)))
  86. model.add(BatchNormalization())
  87. model.add(Activation('relu'))
  88. model.add(Dropout(0.1))
  89. model.add(Dense(1))
  90. model.add(Activation('sigmoid'))
  91. model.compile(loss='binary_crossentropy',
  92. optimizer='rmsprop',
  93. metrics=['accuracy'])
  94. # this is the augmentation configuration we will use for training
  95. train_datagen = ImageDataGenerator(
  96. #rescale=1. / 255,
  97. #shear_range=0.2,
  98. #zoom_range=0.2,
  99. #horizontal_flip=True,
  100. preprocessing_function=svd_singular)
  101. # this is the augmentation configuration we will use for testing:
  102. # only rescaling
  103. test_datagen = ImageDataGenerator(
  104. #rescale=1. / 255,
  105. preprocessing_function=svd_singular)
  106. train_generator = train_datagen.flow_from_directory(
  107. train_data_dir,
  108. target_size=(img_width, img_height),
  109. batch_size=batch_size,
  110. class_mode='binary')
  111. validation_generator = test_datagen.flow_from_directory(
  112. validation_data_dir,
  113. target_size=(img_width, img_height),
  114. batch_size=batch_size,
  115. class_mode='binary')
  116. model.summary()
  117. model.fit_generator(
  118. train_generator,
  119. steps_per_epoch=nb_train_samples // batch_size,
  120. epochs=epochs,
  121. validation_data=validation_generator,
  122. validation_steps=nb_validation_samples // batch_size)
  123. model.save_weights('noise_classification_img100.h5')