Parcourir la source

Add of model using SVD of images

jbuisine il y a 5 ans
Parent
commit
2c8ad26c6b

+ 1 - 0
.gitignore

@@ -1,2 +1,3 @@
 # project data
 data
+.python-version

+ 9 - 0
RESULTS.md

@@ -0,0 +1,9 @@
+# 1. Create database
+    - 6 scenes for train
+    - 3 scenes for validation
+    - Equilibrer noise / final classes
+
+# 2. Test CNN (check if size is correct)
+
+# 3. Results
+    - noise_classification_img100.h5 :: loss: 0.1551 - acc: 0.9393 - val_loss: 1.2858 - val_acc: 0.7845

+ 0 - 11
TODO.md

@@ -1,11 +0,0 @@
-# 1. Create database 
-    - 6 scenes for train
-    - 3 scenes for validation
-    - Equilibrer noise / final classes
-
-# 2. Test CNN (check if size is correct)
-
-# 3. Results 
-    - noise_classification_32_16_16_32.h5 : 81.15%
-    - noise_classification_64_32_32_64.h5 : loss: 0.4416 - acc: 0.7993 - val_loss: 0.9338 - val_acc: 0.6943
-

+ 21 - 11
classification_cnn_keras.py

@@ -26,20 +26,20 @@ data/
 
 from keras.preprocessing.image import ImageDataGenerator
 from keras.models import Sequential
-from keras.layers import Conv2D, MaxPooling2D
+from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
 from keras.layers import Activation, Dropout, Flatten, Dense
 from keras import backend as K
 
 
 # dimensions of our images.
-img_width, img_height = 20, 20
+img_width, img_height = 100, 100
 
 train_data_dir = 'data/train'
 validation_data_dir = 'data/validation'
-nb_train_samples = 115200
-nb_validation_samples = 57600
+nb_train_samples = 7200
+nb_validation_samples = 3600
 epochs = 50
-batch_size = 16
+batch_size = 30
 
 if K.image_data_format() == 'channels_first':
     input_shape = (3, img_width, img_height)
@@ -47,22 +47,31 @@ else:
     input_shape = (img_width, img_height, 3)
 
 model = Sequential()
-model.add(Conv2D(40, (3, 3), input_shape=input_shape))
+model.add(Conv2D(60, (2, 2), input_shape=input_shape))
 model.add(Activation('relu'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 
-model.add(Conv2D(20, (3, 3)))
+model.add(Conv2D(40, (2, 2)))
 model.add(Activation('relu'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 
-model.add(Conv2D(40, (2, 2)))
+model.add(Conv2D(20, (2, 2)))
+model.add(Activation('relu'))
+model.add(MaxPooling2D(pool_size=(2, 2)))
+
+model.add(Conv2D(10, (2, 2)))
 model.add(Activation('relu'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 
 model.add(Flatten())
-model.add(Dense(40))
+model.add(Dense(60))
+model.add(Activation('relu'))
+model.add(Dropout(0.4))
+
+model.add(Dense(30))
 model.add(Activation('relu'))
-model.add(Dropout(0.5))
+model.add(Dropout(0.2))
+
 model.add(Dense(1))
 model.add(Activation('sigmoid'))
 
@@ -93,6 +102,7 @@ validation_generator = test_datagen.flow_from_directory(
     batch_size=batch_size,
     class_mode='binary')
 
+model.summary()
 model.fit_generator(
     train_generator,
     steps_per_epoch=nb_train_samples // batch_size,
@@ -100,4 +110,4 @@ model.fit_generator(
     validation_data=validation_generator,
     validation_steps=nb_validation_samples // batch_size)
 
-model.save_weights('noise_classification_32_16_16_32_07_img20.h5')
+model.save_weights('noise_classification_img100.h5')

+ 157 - 0
classification_cnn_keras_cross_validation.py

@@ -0,0 +1,157 @@
+'''This script goes along the blog post
+"Building powerful image classification models using very little data"
+from blog.keras.io.
+```
+data/
+    train/
+        final/
+            final001.png
+            final002.png
+            ...
+        noisy/
+            noisy001.png
+            noisy002.png
+            ...
+    validation/
+        final/
+            final001.png
+            final002.png
+            ...
+        noisy/
+            noisy001.png
+            noisy002.png
+            ...
+```
+'''
+
+from keras.preprocessing.image import ImageDataGenerator
+from keras.models import Sequential
+from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
+from keras.layers import Activation, Dropout, Flatten, Dense
+from keras import backend as K
+from sklearn.cross_validation import StratifiedKFold
+from keras.utils import plot_model
+
+
+# dimensions of our images.
+img_width, img_height = 100, 100
+
+train_data_dir = 'data/train'
+validation_data_dir = 'data/validation'
+nb_train_samples = 7200
+nb_validation_samples = 3600
+epochs = 50
+batch_size = 16
+
+if K.image_data_format() == 'channels_first':
+    input_shape = (3, img_width, img_height)
+else:
+    input_shape = (img_width, img_height, 3)
+
+
+
+def create_model():
+    # create your model using this function
+    model = Sequential()
+    model.add(Conv2D(60, (2, 2), input_shape=input_shape))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(40, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(20, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(40, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(20, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Flatten())
+
+    model.add(Dense(256))
+    model.add(Activation('relu'))
+    model.add(Dropout(0.2))
+
+    model.add(Dense(128))
+    model.add(Activation('relu'))
+    model.add(Dropout(0.2))
+
+    model.add(Dense(64))
+    model.add(Activation('relu'))
+    model.add(Dropout(0.2))
+
+    model.add(Dense(32))
+    model.add(Activation('relu'))
+    model.add(Dropout(0.05))
+
+    model.add(Dense(1))
+    model.add(Activation('sigmoid'))
+
+    model.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy'])
+
+    model.summary()
+    plot_model(model, to_file='noise_classification_img100.png', show_shapes=True)
+    return model
+
+def load_data():
+    # load your data using this function
+    # this is the augmentation configuration we will use for training
+    train_datagen = ImageDataGenerator(
+        rescale=1. / 255,
+        shear_range=0.2,
+        zoom_range=0.2,
+        horizontal_flip=True)
+
+    # this is the augmentation configuration we will use for testing:
+    # only rescaling
+    test_datagen = ImageDataGenerator(rescale=1. / 255)
+
+    train_generator = train_datagen.flow_from_directory(
+        train_data_dir,
+        target_size=(img_width, img_height),
+        batch_size=batch_size,
+        class_mode='binary')
+
+    return train_generator
+
+    #validation_generator = test_datagen.flow_from_directory(
+    #    validation_data_dir,
+    #    target_size=(img_width, img_height),
+    #    batch_size=batch_size,
+    #    class_mode='binary')
+
+def train_and_evaluate_model(model, data_train, data_test):
+
+    model.fit_generator(
+        data_train,
+        steps_per_epoch=nb_train_samples // batch_size,
+        epochs=epochs,
+        shuffle=True,
+        validation_data=data_test,
+        validation_steps=nb_validation_samples // batch_size)
+
+if __name__ == "__main__":
+    n_folds = 10
+
+    data_generator = ImageDataGenerator(rescale=1./255, validation_split=0.33)
+
+    # check if possible to not do this thing each time
+    train_generator = data_generator.flow_from_directory(train_data_dir, target_size=(img_width, img_height), shuffle=True, seed=13,
+                                                         class_mode='binary', batch_size=batch_size, subset="training")
+
+    validation_generator = data_generator.flow_from_directory(train_data_dir, target_size=(img_width, img_height), shuffle=True, seed=13,
+                                                         class_mode='binary', batch_size=batch_size, subset="validation")
+
+    model = create_model()
+    train_and_evaluate_model(model, train_generator, validation_generator)
+
+    model.save_weights('noise_classification_img100.h5')

+ 146 - 0
classification_cnn_keras_svd.py

@@ -0,0 +1,146 @@
+'''This script goes along the blog post
+"Building powerful image classification models using very little data"
+from blog.keras.io.
+```
+data/
+    train/
+        final/
+            final001.png
+            final002.png
+            ...
+        noisy/
+            noisy001.png
+            noisy002.png
+            ...
+    validation/
+        final/
+            final001.png
+            final002.png
+            ...
+        noisy/
+            noisy001.png
+            noisy002.png
+            ...
+```
+'''
+
+from keras.preprocessing.image import ImageDataGenerator
+from keras.models import Sequential
+from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
+from keras.optimizers import Adam
+from keras.regularizers import l2
+from keras import backend as K
+from numpy.linalg import svd
+import tensorflow as tf
+import numpy as np
+from PIL import Image
+
+from scipy import misc
+import matplotlib.pyplot as plt
+import keras as k
+
+# dimensions of our images.
+img_width, img_height = int(100), 1
+
+train_data_dir = 'data/train'
+validation_data_dir = 'data/validation'
+nb_train_samples = 7200
+nb_validation_samples = 3600
+epochs = 200
+batch_size = 30
+
+# configuration
+config = tf.ConfigProto(intra_op_parallelism_threads=6, inter_op_parallelism_threads=6, \
+                        allow_soft_placement=True, device_count = {'CPU': 6})
+session = tf.Session(config=config)
+K.set_session(session)
+
+def svd_singular(image):
+    U, s, V = svd(image, full_matrices=False)
+    s = s[0:img_width]
+    result = s.reshape([img_width, 1, 1]) # one shape per canal
+    return result
+
+if K.image_data_format() == 'channels_first':
+    input_shape = (3, img_width, img_height)
+else:
+    input_shape = (img_width, img_height, 3)
+
+model = Sequential()
+
+model.add(Conv2D(100, (2, 1), input_shape=input_shape))
+model.add(Activation('relu'))
+model.add(MaxPooling2D(pool_size=(2, 1)))
+
+model.add(Conv2D(80, (2, 1)))
+model.add(Activation('relu'))
+model.add(AveragePooling2D(pool_size=(2, 1)))
+
+model.add(Conv2D(50, (2, 1)))
+model.add(Activation('relu'))
+model.add(MaxPooling2D(pool_size=(2, 1)))
+
+model.add(Flatten())
+model.add(BatchNormalization())
+model.add(Dense(300, kernel_regularizer=l2(0.01)))
+model.add(Activation('relu'))
+model.add(Dropout(0.4))
+
+model.add(Dense(30, kernel_regularizer=l2(0.01)))
+model.add(BatchNormalization())
+model.add(Activation('relu'))
+model.add(Dropout(0.3))
+
+model.add(Dense(100, kernel_regularizer=l2(0.01)))
+model.add(BatchNormalization())
+model.add(Activation('relu'))
+model.add(Dropout(0.2))
+
+model.add(Dense(20, kernel_regularizer=l2(0.01)))
+model.add(BatchNormalization())
+model.add(Activation('relu'))
+model.add(Dropout(0.1))
+
+model.add(Dense(1))
+model.add(Activation('sigmoid'))
+
+model.compile(loss='binary_crossentropy',
+              optimizer='rmsprop',
+              metrics=['accuracy'])
+
+# this is the augmentation configuration we will use for training
+train_datagen = ImageDataGenerator(
+    #rescale=1. / 255,
+    #shear_range=0.2,
+    #zoom_range=0.2,
+    #horizontal_flip=True,
+    preprocessing_function=svd_singular)
+# this is the augmentation configuration we will use for testing:
+# only rescaling
+test_datagen = ImageDataGenerator(
+    #rescale=1. / 255,
+    preprocessing_function=svd_singular)
+
+train_generator = train_datagen.flow_from_directory(
+    train_data_dir,
+    target_size=(img_width, img_height),
+    batch_size=batch_size,
+    class_mode='binary')
+
+validation_generator = test_datagen.flow_from_directory(
+    validation_data_dir,
+    target_size=(img_width, img_height),
+    batch_size=batch_size,
+    class_mode='binary')
+
+
+model.summary()
+model.fit_generator(
+    train_generator,
+    steps_per_epoch=nb_train_samples // batch_size,
+    epochs=epochs,
+    validation_data=validation_generator,
+    validation_steps=nb_validation_samples // batch_size)
+
+model.save_weights('noise_classification_img100.h5')

+ 1 - 7
generate_dataset.py

@@ -7,18 +7,12 @@ Created on Fri Sep 14 21:02:42 2018
 """
 
 from __future__ import print_function
-import keras
-from keras.datasets import cifar10
-from keras.preprocessing.image import ImageDataGenerator
-from keras.models import Sequential
-from keras.layers import Dense, Dropout, Activation, Flatten
-from keras.layers import Conv2D, MaxPooling2D
 import os, glob, image_slicer
 from PIL import Image
 
 # show to create own dataset https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d
 
-NUMBER_SUB_IMAGES = 1600
+NUMBER_SUB_IMAGES = 100
 
 def init_directory():
     if not os.path.exists('data'):

BIN
models/noise_classification_32_16_16_32.h5


BIN
models/noise_classification_32_16_16_32_07.h5


BIN
models/noise_classification_32_16_16_32_07_img20.h5


BIN
models/noise_classification_64_32_32_64.h5


+ 3 - 0
requirements.txt

@@ -1,4 +1,7 @@
 Pillow
 keras
 tensorflow
+sklearn
 image_slicer
+pydot
+matplotlib