Parcourir la source

Merge branch 'release/v0.0.7'

Jerome Buisine il y a 5 ans
Parent
commit
860841a3bb

+ 7 - 3
README.md

@@ -14,18 +14,22 @@ python generate_dataset.py
 ```
 
 It will split scenes and generate all data you need for your neural network.
-You can specify the number of sub images you want in the script by modifying **_NUMBER_SUB_IMAGES_** variables.
+You can specify the number of sub images you want in the script by modifying **_NUMBER_SUB_IMAGES_** variable or using parameter.
+
+```
+python generate_dataset.py --nb xxxx
+```
 
 There are 3 kinds of Neural Networks :
 - **classification_cnn_keras.py** : *based on cropped images and do convolution*
 - **classification_cnn_keras_cross_validation.py** : *based on cropped images and do convolution. Data are randomly split for training*
 - **classification_cnn_keras_svd.py** : *based on svd metrics of image*
 
-Note that the image input size need to change in you used specific size for your croped images.
 
 After your built your neural network in classification_cnn_keras.py, you just have to run it :
+
 ```
-classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx (or --image_width xx --img_height xx)
+python classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx (or --image_width xx --img_height xx)
 ```
 
 A config file in json is available and keeps in memory all image sizes available.

+ 27 - 9
classification_cnn_keras.py

@@ -29,12 +29,14 @@ import json
 from keras.preprocessing.image import ImageDataGenerator
 from keras.models import Sequential
 from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
-from keras.layers import Activation, Dropout, Flatten, Dense
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
 from keras import backend as K
 from keras.utils import plot_model
 
-from modules.model_helper import plot_info
+from ipfml import tf_model_helper
 
+# local functions import (metrics preprocessing)
+import preprocessing_functions
 
 ##########################################
 # Global parameters (with default value) #
@@ -59,6 +61,7 @@ Method which returns model to train
 def generate_model():
 
     model = Sequential()
+
     model.add(Conv2D(60, (2, 2), input_shape=input_shape))
     model.add(Activation('relu'))
     model.add(MaxPooling2D(pool_size=(2, 2)))
@@ -71,17 +74,31 @@ def generate_model():
     model.add(Activation('relu'))
     model.add(MaxPooling2D(pool_size=(2, 2)))
 
-    model.add(Conv2D(10, (2, 2)))
+    model.add(Flatten())
+
+    model.add(Dense(140))
     model.add(Activation('relu'))
-    model.add(MaxPooling2D(pool_size=(2, 2)))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.3))
 
-    model.add(Flatten())
-    model.add(Dense(60))
+    model.add(Dense(120))
     model.add(Activation('relu'))
-    model.add(Dropout(0.4))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.3))
+
+    model.add(Dense(80))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.2))
+
+    model.add(Dense(40))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.2))
 
-    model.add(Dense(30))
+    model.add(Dense(20))
     model.add(Activation('relu'))
+    model.add(BatchNormalization())
     model.add(Dropout(0.2))
 
     model.add(Dense(1))
@@ -162,6 +179,7 @@ def main():
             filename = a
         elif o in ("-b", "--batch_size"):
             batch_size = int(a)
+            print(batch_size)
         elif o in ("-e", "--epochs"):
             epochs = int(a)
         elif o in ("-d", "--directory"):
@@ -216,7 +234,7 @@ def main():
             filename = directory + "/" + filename
 
         # save plot file history
-        plot_info.save(history, filename)
+        tf_model_helper.save(history, filename)
 
         plot_model(model, to_file=str(('%s.png' % filename)))
         model.save_weights(str('%s.h5' % filename))

+ 20 - 16
classification_cnn_keras_cross_validation.py

@@ -29,11 +29,14 @@ import json
 from keras.preprocessing.image import ImageDataGenerator
 from keras.models import Sequential
 from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
-from keras.layers import Activation, Dropout, Flatten, Dense
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
 from keras import backend as K
 from keras.utils import plot_model
 
-from modules.model_helper import plot_info
+from ipfml import tf_model_helper
+
+# local functions import (metrics preprocessing)
+import preprocessing_functions
 
 ##########################################
 # Global parameters (with default value) #
@@ -70,31 +73,32 @@ def generate_model():
     model.add(Activation('relu'))
     model.add(MaxPooling2D(pool_size=(2, 2)))
 
-    model.add(Conv2D(40, (2, 2)))
-    model.add(Activation('relu'))
-    model.add(MaxPooling2D(pool_size=(2, 2)))
+    model.add(Flatten())
 
-    model.add(Conv2D(20, (2, 2)))
+    model.add(Dense(140))
     model.add(Activation('relu'))
-    model.add(MaxPooling2D(pool_size=(2, 2)))
-
-    model.add(Flatten())
+    model.add(BatchNormalization())
+    model.add(Dropout(0.3))
 
-    model.add(Dense(256))
+    model.add(Dense(120))
     model.add(Activation('relu'))
-    model.add(Dropout(0.2))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.3))
 
-    model.add(Dense(128))
+    model.add(Dense(80))
     model.add(Activation('relu'))
+    model.add(BatchNormalization())
     model.add(Dropout(0.2))
 
-    model.add(Dense(64))
+    model.add(Dense(40))
     model.add(Activation('relu'))
+    model.add(BatchNormalization())
     model.add(Dropout(0.2))
 
-    model.add(Dense(32))
+    model.add(Dense(20))
     model.add(Activation('relu'))
-    model.add(Dropout(0.05))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.2))
 
     model.add(Dense(1))
     model.add(Activation('sigmoid'))
@@ -223,7 +227,7 @@ def main():
             filename = directory + "/" + filename
 
         # save plot file history
-        plot_info.save(history, filename)
+        tf_model_helper.save(history, filename)
 
         plot_model(model, to_file=str(('%s.png' % filename)))
         model.save_weights(str('%s.h5' % filename))

+ 24 - 37
classification_cnn_keras_svd.py

@@ -35,13 +35,14 @@ from keras.regularizers import l2
 from keras import backend as K
 from keras.utils import plot_model
 
-import matplotlib.pyplot as plt
-
 import tensorflow as tf
 import numpy as np
 
-from modules.model_helper import plot_info
-from modules.image_metrics import svd_metric
+from ipfml import tf_model_helper
+from ipfml import metrics
+
+# local functions import
+import preprocessing_functions
 
 ##########################################
 # Global parameters (with default value) #
@@ -67,57 +68,43 @@ def generate_model():
 
     model = Sequential()
 
-    model.add(Conv2D(100, (2, 1), input_shape=input_shape))
+    model.add(Conv2D(60, (2, 1), input_shape=input_shape))
     model.add(Activation('relu'))
+    model.add(BatchNormalization())
     model.add(MaxPooling2D(pool_size=(2, 1)))
 
-    model.add(Conv2D(80, (2, 1)))
+    model.add(Conv2D(40, (2, 1)))
     model.add(Activation('relu'))
-    model.add(AveragePooling2D(pool_size=(2, 1)))
+    model.add(MaxPooling2D(pool_size=(2, 1)))
 
-    model.add(Conv2D(50, (2, 1)))
+    model.add(Conv2D(30, (2, 1)))
     model.add(Activation('relu'))
     model.add(MaxPooling2D(pool_size=(2, 1)))
 
     model.add(Flatten())
-    model.add(Dense(50, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
+    model.add(Dense(150, kernel_regularizer=l2(0.01)))
     model.add(BatchNormalization())
-    model.add(Dropout(0.1))
-
-    model.add(Dense(100, kernel_regularizer=l2(0.01)))
     model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.1))
+    model.add(Dropout(0.2))
 
-    model.add(Dense(200, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
+    model.add(Dense(120, kernel_regularizer=l2(0.01)))
     model.add(BatchNormalization())
+    model.add(Activation('relu'))
     model.add(Dropout(0.2))
 
-    model.add(Dense(300, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
+    model.add(Dense(80, kernel_regularizer=l2(0.01)))
     model.add(BatchNormalization())
-    model.add(Dropout(0.3))
-
-    model.add(Dense(200, kernel_regularizer=l2(0.01)))
     model.add(Activation('relu'))
-    model.add(BatchNormalization())
     model.add(Dropout(0.2))
 
-    model.add(Dense(100, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
+    model.add(Dense(40, kernel_regularizer=l2(0.01)))
     model.add(BatchNormalization())
-    model.add(Dropout(0.1))
-
-    model.add(Dense(50, kernel_regularizer=l2(0.01)))
     model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.1))
+    model.add(Dropout(0.2))
 
     model.add(Dense(20, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
     model.add(BatchNormalization())
+    model.add(Activation('relu'))
     model.add(Dropout(0.1))
 
     model.add(Dense(1))
@@ -137,11 +124,11 @@ def load_train_data():
 
     # this is the augmentation configuration we will use for training
     train_datagen = ImageDataGenerator(
-        rescale=1. / 255,
+        #rescale=1. / 255,
         #shear_range=0.2,
         #zoom_range=0.2,
         #horizontal_flip=True,
-        preprocessing_function=svd_metric.get_s_model_data)
+        preprocessing_function=preprocessing_functions.get_s_model_data)
 
     train_generator = train_datagen.flow_from_directory(
         train_data_dir,
@@ -160,8 +147,8 @@ def load_validation_data():
     # this is the augmentation configuration we will use for testing:
     # only rescaling
     test_datagen = ImageDataGenerator(
-        rescale=1. / 255,
-        preprocessing_function=svd_metric.get_s_model_data)
+        #rescale=1. / 255,
+        preprocessing_function=preprocessing_functions.get_s_model_data)
 
     validation_generator = test_datagen.flow_from_directory(
         validation_data_dir,
@@ -257,9 +244,9 @@ def main():
             filename = directory + "/" + filename
 
         # save plot file history
-        plot_info.save(history, filename)
+        tf_model_helper.save(history, filename)
 
-        plot_model(model, to_file=str(('%s.png' % filename)))
+        plot_model(model, to_file=str(('%s.png' % filename)), show_shapes=True)
         model.save_weights(str('%s.h5' % filename))
 
 

+ 275 - 0
classification_cnn_keras_svd_img.py

@@ -0,0 +1,275 @@
+'''This script goes along the blog post
+"Building powerful image classification models using very little data"
+from blog.keras.io.
+```
+data/
+    train/
+        final/
+            final001.png
+            final002.png
+            ...
+        noisy/
+            noisy001.png
+            noisy002.png
+            ...
+    validation/
+        final/
+            final001.png
+            final002.png
+            ...
+        noisy/
+            noisy001.png
+            noisy002.png
+            ...
+```
+'''
+import sys, os, getopt
+import json
+
+from keras.preprocessing.image import ImageDataGenerator
+from keras.models import Sequential
+from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Cropping2D
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
+from keras.optimizers import Adam
+from keras.regularizers import l2
+from keras import backend as K
+from keras.utils import plot_model
+
+import tensorflow as tf
+import numpy as np
+
+import matplotlib.pyplot as plt
+
+# preprocessing of images
+from path import Path
+from PIL import Image
+import shutil
+import time
+
+# local functions import (metrics preprocessing)
+import preprocessing_functions
+
+##########################################
+# Global parameters (with default value) #
+#### ######################################
+img_width, img_height = 100, 100
+
+train_data_dir = 'data_svd_**img_size**/train'
+validation_data_dir = 'data_svd_**img_size**/validation'
+nb_train_samples = 7200
+nb_validation_samples = 3600
+epochs = 50
+batch_size = 16
+
+input_shape = (3, img_width, img_height)
+
+###########################################
+
+def init_directory(img_size, generate_data):
+
+    img_size_str = str(img_size)
+
+    svd_data_folder = str('data_svd_' + img_size_str)
+
+    if os.path.exists(svd_data_folder) and 'y' in generate_data:
+        print("Removing all previous data...")
+
+        shutil.rmtree(svd_data_folder)
+
+    if not os.path.exists(svd_data_folder):
+        print("Creating new data... Just take coffee... Or two...")
+        os.makedirs(str(train_data_dir.replace('**img_size**', img_size_str) + '/final'))
+        os.makedirs(str(train_data_dir.replace('**img_size**', img_size_str) + '/noisy'))
+
+        os.makedirs(str(validation_data_dir.replace('**img_size**', img_size_str) + '/final'))
+        os.makedirs(str(validation_data_dir.replace('**img_size**', img_size_str) + '/noisy'))
+
+        for f in Path('./data').walkfiles():
+            if 'png' in f:
+                img = Image.open(f)
+                new_img = preprocessing_functions.get_s_model_data_img(img)
+                new_img_path = f.replace('./data', str('./' + svd_data_folder))
+                new_img.save(new_img_path)
+                print(new_img_path)
+
+
+'''
+Method which returns model to train
+@return : DirectoryIterator
+'''
+def generate_model():
+
+    model = Sequential()
+
+    model.add(Cropping2D(cropping=((20, 20), (20, 20)), input_shape=input_shape))
+
+    model.add(Conv2D(50, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(AveragePooling2D(pool_size=(2, 2)))
+
+    model.add(Flatten())
+
+    model.add(Dense(100, kernel_regularizer=l2(0.01)))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.2))
+
+    model.add(Dense(100, kernel_regularizer=l2(0.01)))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.5))
+
+    model.add(Dense(1))
+    model.add(Activation('sigmoid'))
+
+    model.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy'])
+
+    return model
+
+'''
+Method which loads train data
+@return : DirectoryIterator
+'''
+def load_train_data():
+
+    # this is the augmentation configuration we will use for training
+    train_datagen = ImageDataGenerator(
+        rescale=1. / 255,
+        #shear_range=0.2,
+        #zoom_range=0.2,
+        #horizontal_flip=True,
+        #preprocessing_function=preprocessing_functions.get_s_model_data_img
+        )
+
+    train_generator = train_datagen.flow_from_directory(
+        train_data_dir,
+        target_size=(img_width, img_height),
+        batch_size=batch_size,
+        class_mode='binary')
+
+    return train_generator
+
+'''
+Method which loads validation data
+@return : DirectoryIterator
+'''
+def load_validation_data():
+
+    # this is the augmentation configuration we will use for testing:
+    # only rescaling
+    test_datagen = ImageDataGenerator(
+        rescale=1. / 255,
+        #preprocessing_function=preprocessing_functions.get_s_model_data_img
+        )
+
+    validation_generator = test_datagen.flow_from_directory(
+        validation_data_dir,
+        target_size=(img_width, img_height),
+        batch_size=batch_size,
+        class_mode='binary')
+
+    return validation_generator
+
+def main():
+
+    # update global variable and not local
+    global batch_size
+    global epochs
+    global input_shape
+    global train_data_dir
+    global validation_data_dir
+    global nb_train_samples
+    global nb_validation_samples
+
+    if len(sys.argv) <= 1:
+        print('Run with default parameters...')
+        print('classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx --generate (y/n)')
+        sys.exit(2)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "ho:d:b:e:i:g", ["help", "output=", "directory=", "batch_size=", "epochs=", "img=", "generate="])
+    except getopt.GetoptError:
+        # print help information and exit:
+        print('classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx --generate (y/n)')
+        sys.exit(2)
+    for o, a in opts:
+        if o == "-h":
+            print('classification_cnn_keras_svd.py --directory xxxx --output xxxxx --batch_size xx --epochs xx --img xx --generate (y/n)')
+            sys.exit()
+        elif o in ("-o", "--output"):
+            filename = a
+        elif o in ("-b", "--batch_size"):
+            batch_size = int(a)
+        elif o in ("-e", "--epochs"):
+            epochs = int(a)
+        elif o in ("-d", "--directory"):
+            directory = a
+        elif o in ("-i", "--img"):
+            image_size = int(a)
+        elif o in ("-g", "--generate"):
+            generate_data = a
+        else:
+            assert False, "unhandled option"
+
+    # 3 because we have 3 color canals
+    if K.image_data_format() == 'channels_first':
+        input_shape = (3, img_width, img_height)
+    else:
+        input_shape = (img_width, img_height, 3)
+
+    img_str_size = str(image_size)
+    train_data_dir = str(train_data_dir.replace('**img_size**', img_str_size))
+    validation_data_dir = str(validation_data_dir.replace('**img_size**', img_str_size))
+
+    # configuration
+    with open('config.json') as json_data:
+        d = json.load(json_data)
+
+        try:
+            nb_train_samples = d[str(image_size)]['nb_train_samples']
+            nb_validation_samples = d[str(image_size)]['nb_validation_samples']
+        except:
+             print("--img parameter missing of invalid (--image_width xx --img_height xx)")
+             sys.exit(2)
+
+
+    init_directory(image_size, generate_data)
+    # load of model
+    model = generate_model()
+    model.summary()
+
+    if(directory):
+        print('Your model information will be saved into %s...' % directory)
+
+    history = model.fit_generator(
+        load_train_data(),
+        steps_per_epoch=nb_train_samples // batch_size,
+        epochs=epochs,
+        validation_data=load_validation_data(),
+        validation_steps=nb_validation_samples // batch_size)
+
+    # if user needs output files
+    if(filename):
+
+        # update filename by folder
+        if(directory):
+            # create folder if necessary
+            if not os.path.exists(directory):
+                os.makedirs(directory)
+            filename = directory + "/" + filename
+
+        fig_size = plt.rcParams["figure.figsize"]
+        fig_size[0] = 9
+        fig_size[1] = 9
+        plt.rcParams["figure.figsize"] = fig_size
+
+        # save plot file history
+        plot_info.save(history, filename)
+
+        plot_model(model, to_file=str(('%s.png' % filename)), show_shapes=True)
+        model.save_weights(str('%s.h5' % filename))
+
+
+if __name__ == "__main__":
+    main()

+ 5 - 4
generate_dataset.py

@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 """
 Created on Fri Sep 14 21:02:42 2018
@@ -55,13 +55,14 @@ def main():
         print('generate_dataset.py --nb xxxx')
         sys.exit(2)
     try:
-        opts, args = getopt.getopt(sys.argv[1:], "hn", ["help", "nb="])
+        opts, args = getopt.getopt(sys.argv[1:], "h:n", ["help", "nb="])
     except getopt.GetoptError:
         # print help information and exit:
         print('generate_dataset.py --nb xxxx')
         sys.exit(2)
     for o, a in opts:
-        if o == "-h":
+
+        if o == "--help":
             print('generate_dataset.py --nb xxxx')
             print('20x20 : 1600')
             print('40x40 : 400')
@@ -69,7 +70,7 @@ def main():
             print('80x80 : 100')
             print('100x100 : 64')
             sys.exit()
-        elif o == '-n':
+        elif o == '--nb':
             NUMBER_SUB_IMAGES = int(a)
 
     init_directory()

+ 0 - 0
modules/__init__.py


+ 0 - 0
modules/image_metrics/__init__.py


+ 0 - 30
modules/image_metrics/svd_metric.py

@@ -1,30 +0,0 @@
-# module file which contains all image metrics used in project
-
-from numpy.linalg import svd
-from PIL import Image
-from scipy import misc
-
-'''
-Method which extracts SVD features from image and returns 's' vector
-@return 's' vector
-'''
-def get_s_model_data(image):
-    U, s, V = svd(image, full_matrices=False)
-    size = len(s)
-    result = s.reshape([size, 1, 1]) # one shape per canal
-    return result
-
-def get(image):
-    return svd(image, full_matrices=False)
-
-def get_s(image):
-    U, s, V = svd(image, full_matrices=False)
-    return s
-
-def get_U(image):
-    U, s, V = svd(image, full_matrices=False)
-    return U
-
-def get_V(image):
-    U, s, V = svd(image, full_matrices=False)
-    return V

+ 0 - 0
modules/model_helper/__init__.py


+ 0 - 47
modules/model_helper/plot_info.py

@@ -1,47 +0,0 @@
-# module filewhich contains helpful display function
-
-import matplotlib.pyplot as plt
-
-'''
-Function which saves data from neural network model
-'''
-def save(history, filename):
-    # summarize history for accuracy
-    plt.plot(history.history['acc'])
-    plt.plot(history.history['val_acc'])
-    plt.title('model accuracy')
-    plt.ylabel('accuracy')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.savefig(str('%s_accuracy.png' % filename))
-
-    # clear plt history
-    plt.gcf().clear()
-
-    # summarize history for loss
-    plt.plot(history.history['loss'])
-    plt.plot(history.history['val_loss'])
-    plt.title('model loss')
-    plt.ylabel('loss')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.savefig(str('%s_loss.png' % filename))
-
-def show(history, filename):
-    # summarize history for accuracy
-    plt.plot(history.history['acc'])
-    plt.plot(history.history['val_acc'])
-    plt.title('model accuracy')
-    plt.ylabel('accuracy')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.show()
-
-    # summarize history for loss
-    plt.plot(history.history['loss'])
-    plt.plot(history.history['val_loss'])
-    plt.title('model loss')
-    plt.ylabel('loss')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.show()

+ 43 - 0
preprocessing_functions.py

@@ -0,0 +1,43 @@
+from numpy.linalg import svd
+from PIL import Image
+import matplotlib.pyplot as plt
+from scipy import misc
+
+import time
+
+import numpy as np
+from sklearn import preprocessing
+import ipfml as iml
+
+def get_s_model_data(image):
+
+    s = iml.metrics.get_SVD_s(image)
+    size = len(s)
+
+    # normalized output
+    output_normalized = preprocessing.normalize(s, norm='l1', axis=0, copy=True, return_norm=False)
+
+    result = output_normalized.reshape([size, 1, 3])
+
+    return result
+
+def get_s_model_data_img(image, ):
+    fig_size = plt.rcParams["figure.figsize"]
+    fig_size[0] = 1
+    fig_size[1] = 1
+    plt.rcParams["figure.figsize"] = fig_size
+
+    s = iml.metrics.get_SVD_s(image)
+
+    plt.figure()   # create a new figure
+
+    output_normalized = preprocessing.normalize(s, norm='l1', axis=0, copy=True, return_norm=False)
+    plt.plot(output_normalized[70:100, 0])
+    plt.plot(output_normalized[70:100:, 1])
+    plt.plot(output_normalized[70:100:, 2])
+
+    img = iml.image_processing.fig2img(plt.gcf())
+
+    plt.close('all')
+
+    return img

+ 2 - 0
requirements.txt

@@ -5,3 +5,5 @@ sklearn
 image_slicer
 pydot
 matplotlib
+path.py
+IPFML

+ 16 - 0
run.sh

@@ -0,0 +1,16 @@
+#!/bin/bash
+
+size=$1
+
+if [ -z ${size} ]; then
+  echo "Need size parameter : ./run.sh 20";
+else
+  echo "Run algorithms with image of size ${size}.."
+fi
+
+python classification_cnn_keras.py --directory ../models/$size/ --output cnn_model --batch_size 32 --epochs 150 --img $size
+python classification_cnn_keras_cross_validation.py --directory ../models/$size/ --output cnn_cross_validation_model --batch_size 32 --epochs 150 --img $size
+python classification_cnn_keras_svd.py --directory ../models/$size/ --output svd_model --batch_size 32 --epochs 150 --img $size
+
+#python classification_cnn_keras_svd_img.py --directory ../models/$size/ --output svd_img_model --batch_size 32 --epochs 150 --img $size --generate y
+python classification_cnn_keras_svd_img.py --directory ../models/$size/ --output svd_img_model --batch_size 32 --epochs 150 --img $size --generate n