|
@@ -0,0 +1,103 @@
|
|
|
|
+'''This script goes along the blog post
|
|
|
|
+"Building powerful image classification models using very little data"
|
|
|
|
+from blog.keras.io.
|
|
|
|
+```
|
|
|
|
+data/
|
|
|
|
+ train/
|
|
|
|
+ final/
|
|
|
|
+ final001.png
|
|
|
|
+ final002.png
|
|
|
|
+ ...
|
|
|
|
+ noisy/
|
|
|
|
+ noisy001.png
|
|
|
|
+ noisy002.png
|
|
|
|
+ ...
|
|
|
|
+ validation/
|
|
|
|
+ final/
|
|
|
|
+ final001.png
|
|
|
|
+ final002.png
|
|
|
|
+ ...
|
|
|
|
+ noisy/
|
|
|
|
+ noisy001.png
|
|
|
|
+ noisy002.png
|
|
|
|
+ ...
|
|
|
|
+```
|
|
|
|
+'''
|
|
|
|
+
|
|
|
|
+from keras.preprocessing.image import ImageDataGenerator
|
|
|
|
+from keras.models import Sequential
|
|
|
|
+from keras.layers import Conv2D, MaxPooling2D
|
|
|
|
+from keras.layers import Activation, Dropout, Flatten, Dense
|
|
|
|
+from keras import backend as K
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+# dimensions of our images.
|
|
|
|
+img_width, img_height = 20, 20
|
|
|
|
+
|
|
|
|
+train_data_dir = 'data/train'
|
|
|
|
+validation_data_dir = 'data/validation'
|
|
|
|
+nb_train_samples = 115200
|
|
|
|
+nb_validation_samples = 57600
|
|
|
|
+epochs = 50
|
|
|
|
+batch_size = 16
|
|
|
|
+
|
|
|
|
+if K.image_data_format() == 'channels_first':
|
|
|
|
+ input_shape = (3, img_width, img_height)
|
|
|
|
+else:
|
|
|
|
+ input_shape = (img_width, img_height, 3)
|
|
|
|
+
|
|
|
|
+model = Sequential()
|
|
|
|
+model.add(Conv2D(40, (3, 3), input_shape=input_shape))
|
|
|
|
+model.add(Activation('relu'))
|
|
|
|
+model.add(MaxPooling2D(pool_size=(2, 2)))
|
|
|
|
+
|
|
|
|
+model.add(Conv2D(20, (3, 3)))
|
|
|
|
+model.add(Activation('relu'))
|
|
|
|
+model.add(MaxPooling2D(pool_size=(2, 2)))
|
|
|
|
+
|
|
|
|
+model.add(Conv2D(40, (2, 2)))
|
|
|
|
+model.add(Activation('relu'))
|
|
|
|
+model.add(MaxPooling2D(pool_size=(2, 2)))
|
|
|
|
+
|
|
|
|
+model.add(Flatten())
|
|
|
|
+model.add(Dense(40))
|
|
|
|
+model.add(Activation('relu'))
|
|
|
|
+model.add(Dropout(0.5))
|
|
|
|
+model.add(Dense(1))
|
|
|
|
+model.add(Activation('sigmoid'))
|
|
|
|
+
|
|
|
|
+model.compile(loss='binary_crossentropy',
|
|
|
|
+ optimizer='rmsprop',
|
|
|
|
+ metrics=['accuracy'])
|
|
|
|
+
|
|
|
|
+# this is the augmentation configuration we will use for training
|
|
|
|
+train_datagen = ImageDataGenerator(
|
|
|
|
+ rescale=1. / 255,
|
|
|
|
+ shear_range=0.2,
|
|
|
|
+ zoom_range=0.2,
|
|
|
|
+ horizontal_flip=True)
|
|
|
|
+
|
|
|
|
+# this is the augmentation configuration we will use for testing:
|
|
|
|
+# only rescaling
|
|
|
|
+test_datagen = ImageDataGenerator(rescale=1. / 255)
|
|
|
|
+
|
|
|
|
+train_generator = train_datagen.flow_from_directory(
|
|
|
|
+ train_data_dir,
|
|
|
|
+ target_size=(img_width, img_height),
|
|
|
|
+ batch_size=batch_size,
|
|
|
|
+ class_mode='binary')
|
|
|
|
+
|
|
|
|
+validation_generator = test_datagen.flow_from_directory(
|
|
|
|
+ validation_data_dir,
|
|
|
|
+ target_size=(img_width, img_height),
|
|
|
|
+ batch_size=batch_size,
|
|
|
|
+ class_mode='binary')
|
|
|
|
+
|
|
|
|
+model.fit_generator(
|
|
|
|
+ train_generator,
|
|
|
|
+ steps_per_epoch=nb_train_samples // batch_size,
|
|
|
|
+ epochs=epochs,
|
|
|
|
+ validation_data=validation_generator,
|
|
|
|
+ validation_steps=nb_validation_samples // batch_size)
|
|
|
|
+
|
|
|
|
+model.save_weights('noise_classification_32_16_16_32_07_img20.h5')
|