Browse Source

Update of metric and add of run script

jbuisine 3 years ago
parent
commit
b69fde4c72
4 changed files with 30 additions and 23 deletions
  1. 5 22
      classification_cnn_keras_svd.py
  2. 9 1
      modules/image_metrics/svd_metric.py
  3. 3 0
      modules/model_helper/plot_info.py
  4. 13 0
      run.sh

+ 5 - 22
classification_cnn_keras_svd.py

@@ -35,8 +35,6 @@ from keras.regularizers import l2
 from keras import backend as K
 from keras.utils import plot_model
 
-import matplotlib.pyplot as plt
-
 import tensorflow as tf
 import numpy as np
 
@@ -85,22 +83,7 @@ def generate_model():
     model.add(BatchNormalization())
     model.add(Dropout(0.1))
 
-    model.add(Dense(100, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.1))
-
-    model.add(Dense(200, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.2))
-
-    model.add(Dense(300, kernel_regularizer=l2(0.01)))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.3))
-
-    model.add(Dense(200, kernel_regularizer=l2(0.01)))
+    model.add(Dense(70, kernel_regularizer=l2(0.01)))
     model.add(Activation('relu'))
     model.add(BatchNormalization())
     model.add(Dropout(0.2))
@@ -108,7 +91,7 @@ def generate_model():
     model.add(Dense(100, kernel_regularizer=l2(0.01)))
     model.add(Activation('relu'))
     model.add(BatchNormalization())
-    model.add(Dropout(0.1))
+    model.add(Dropout(0.2))
 
     model.add(Dense(50, kernel_regularizer=l2(0.01)))
     model.add(Activation('relu'))
@@ -137,7 +120,7 @@ def load_train_data():
 
     # this is the augmentation configuration we will use for training
     train_datagen = ImageDataGenerator(
-        rescale=1. / 255,
+        #rescale=1. / 255,
         #shear_range=0.2,
         #zoom_range=0.2,
         #horizontal_flip=True,
@@ -160,7 +143,7 @@ def load_validation_data():
     # this is the augmentation configuration we will use for testing:
     # only rescaling
     test_datagen = ImageDataGenerator(
-        rescale=1. / 255,
+        #rescale=1. / 255,
         preprocessing_function=svd_metric.get_s_model_data)
 
     validation_generator = test_datagen.flow_from_directory(
@@ -259,7 +242,7 @@ def main():
         # save plot file history
         plot_info.save(history, filename)
 
-        plot_model(model, to_file=str(('%s.png' % filename)))
+        plot_model(model, to_file=str(('%s.png' % filename)), show_shapes=True)
         model.save_weights(str('%s.h5' % filename))
 
 

+ 9 - 1
modules/image_metrics/svd_metric.py

@@ -4,6 +4,10 @@ from numpy.linalg import svd
 from PIL import Image
 from scipy import misc
 
+import time
+import numpy as np
+from sklearn import preprocessing
+
 '''
 Method which extracts SVD features from image and returns 's' vector
 @return 's' vector
@@ -11,7 +15,11 @@ Method which extracts SVD features from image and returns 's' vector
 def get_s_model_data(image):
     U, s, V = svd(image, full_matrices=False)
     size = len(s)
-    result = s.reshape([size, 1, 3]) # one shape per canal
+
+    # normalized output
+    output_normalized = preprocessing.normalize(s, norm='l2', axis=1, copy=True, return_norm=False)
+
+    result = output_normalized.reshape([size, 1, 3])
     return result
 
 def get(image):

+ 3 - 0
modules/model_helper/plot_info.py

@@ -1,5 +1,8 @@
 # module filewhich contains helpful display function
 
+# avoid tk issue
+import matplotlib
+matplotlib.use('agg')
 import matplotlib.pyplot as plt
 
 '''

+ 13 - 0
run.sh

@@ -0,0 +1,13 @@
+#!/bin/bash
+
+size=$1
+
+if [ -z ${size} ]; then
+  echo "Run algorithms with image of size ${size}.."
+else 
+  echo "Need size parameter : ./run.sh 20"; 
+fi
+
+python classification_cnn_keras_svd.py --directory ../models/$size/ --output svd_model --batch_size 32 --epochs 150 --img $size
+python classification_cnn_keras.py --directory ../models/$size/ --output cnn_model --batch_size 32 --epochs 150 --img $size
+python classification_cnn_keras_cross_validation.py --directory ../models/$size/ --output cnn_cross_validation_model --batch_size 32 --epochs 150 --img $size