Parcourir la source

Update code with use of IPFML package

Jerome Buisine il y a 5 ans
Parent
commit
153ef98eae

+ 4 - 2
classification_cnn_keras.py

@@ -33,8 +33,10 @@ from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
 from keras import backend as K
 from keras.utils import plot_model
 
-from modules.model_helper import plot_info
+from ipfml import tf_model_helper
 
+# local functions import (metrics preprocessing)
+import preprocessing_functions
 
 ##########################################
 # Global parameters (with default value) #
@@ -232,7 +234,7 @@ def main():
             filename = directory + "/" + filename
 
         # save plot file history
-        plot_info.save(history, filename)
+        tf_model_helper.save(history, filename)
 
         plot_model(model, to_file=str(('%s.png' % filename)))
         model.save_weights(str('%s.h5' % filename))

+ 5 - 2
classification_cnn_keras_cross_validation.py

@@ -33,7 +33,10 @@ from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
 from keras import backend as K
 from keras.utils import plot_model
 
-from modules.model_helper import plot_info
+from ipfml import tf_model_helper
+
+# local functions import (metrics preprocessing)
+import preprocessing_functions
 
 ##########################################
 # Global parameters (with default value) #
@@ -224,7 +227,7 @@ def main():
             filename = directory + "/" + filename
 
         # save plot file history
-        plot_info.save(history, filename)
+        tf_model_helper.save(history, filename)
 
         plot_model(model, to_file=str(('%s.png' % filename)))
         model.save_weights(str('%s.h5' % filename))

+ 8 - 5
classification_cnn_keras_svd.py

@@ -38,8 +38,11 @@ from keras.utils import plot_model
 import tensorflow as tf
 import numpy as np
 
-from modules.model_helper import plot_info
-from modules.image_metrics import svd_metric
+from ipfml import tf_model_helper
+from ipfml import metrics
+
+# local functions import
+import preprocessing_functions
 
 ##########################################
 # Global parameters (with default value) #
@@ -125,7 +128,7 @@ def load_train_data():
         #shear_range=0.2,
         #zoom_range=0.2,
         #horizontal_flip=True,
-        preprocessing_function=svd_metric.get_s_model_data)
+        preprocessing_function=preprocessing_functions.get_s_model_data)
 
     train_generator = train_datagen.flow_from_directory(
         train_data_dir,
@@ -145,7 +148,7 @@ def load_validation_data():
     # only rescaling
     test_datagen = ImageDataGenerator(
         #rescale=1. / 255,
-        preprocessing_function=svd_metric.get_s_model_data)
+        preprocessing_function=preprocessing_functions.get_s_model_data)
 
     validation_generator = test_datagen.flow_from_directory(
         validation_data_dir,
@@ -241,7 +244,7 @@ def main():
             filename = directory + "/" + filename
 
         # save plot file history
-        plot_info.save(history, filename)
+        tf_model_helper.save(history, filename)
 
         plot_model(model, to_file=str(('%s.png' % filename)), show_shapes=True)
         model.save_weights(str('%s.h5' % filename))

+ 6 - 6
classification_cnn_keras_svd_img.py

@@ -38,9 +38,6 @@ from keras.utils import plot_model
 import tensorflow as tf
 import numpy as np
 
-from modules.model_helper import plot_info
-from modules.image_metrics import svd_metric
-
 import matplotlib.pyplot as plt
 
 # preprocessing of images
@@ -49,6 +46,9 @@ from PIL import Image
 import shutil
 import time
 
+# local functions import (metrics preprocessing)
+import preprocessing_functions
+
 ##########################################
 # Global parameters (with default value) #
 #### ######################################
@@ -87,7 +87,7 @@ def init_directory(img_size, generate_data):
         for f in Path('./data').walkfiles():
             if 'png' in f:
                 img = Image.open(f)
-                new_img = svd_metric.get_s_model_data_img(img)
+                new_img = preprocessing_functions.get_s_model_data_img(img)
                 new_img_path = f.replace('./data', str('./' + svd_data_folder))
                 new_img.save(new_img_path)
                 print(new_img_path)
@@ -140,7 +140,7 @@ def load_train_data():
         #shear_range=0.2,
         #zoom_range=0.2,
         #horizontal_flip=True,
-        #preprocessing_function=svd_metric.get_s_model_data_img
+        #preprocessing_function=preprocessing_functions.get_s_model_data_img
         )
 
     train_generator = train_datagen.flow_from_directory(
@@ -161,7 +161,7 @@ def load_validation_data():
     # only rescaling
     test_datagen = ImageDataGenerator(
         rescale=1. / 255,
-        #preprocessing_function=svd_metric.get_s_model_data_img
+        #preprocessing_function=preprocessing_functions.get_s_model_data_img
         )
 
     validation_generator = test_datagen.flow_from_directory(

+ 1 - 1
generate_dataset.py

@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 """
 Created on Fri Sep 14 21:02:42 2018

+ 0 - 0
modules/__init__.py


+ 0 - 0
modules/image_metrics/__init__.py


+ 0 - 0
modules/model_helper/__init__.py


+ 0 - 32
modules/model_helper/image_conversion.py

@@ -1,32 +0,0 @@
-from PIL import Image
-
-import numpy as np
-
-def fig2data(fig):
-    """
-    @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
-    @param fig a matplotlib figure
-    @return a numpy 3D array of RGBA values
-    """
-    # draw the renderer
-    fig.canvas.draw()
- 
-    # Get the RGBA buffer from the figure
-    w,h = fig.canvas.get_width_height()
-    buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
-    buf.shape = (w, h, 3)
- 
-    # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
-    buf = np.roll(buf, 3, axis=2)
-    return buf
-    
-def fig2img(fig):
-    """
-    @brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
-    @param fig a matplotlib figure
-    @return a Python Imaging Library (PIL) image : default size (480,640,3)
-    """
-    # put the figure pixmap into a numpy array
-    buf = fig2data(fig)
-    w, h, d = buf.shape
-    return Image.frombytes("RGB", (w, h), buf.tostring())

+ 0 - 50
modules/model_helper/plot_info.py

@@ -1,50 +0,0 @@
-# module filewhich contains helpful display function
-
-# avoid tk issue
-import matplotlib
-#matplotlib.use('agg')
-import matplotlib.pyplot as plt
-
-'''
-Function which saves data from neural network model
-'''
-def save(history, filename):
-    # summarize history for accuracy
-    plt.plot(history.history['acc'])
-    plt.plot(history.history['val_acc'])
-    plt.title('model accuracy')
-    plt.ylabel('accuracy')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.savefig(str('%s_accuracy.png' % filename))
-
-    # clear plt history
-    plt.gcf().clear()
-
-    # summarize history for loss
-    plt.plot(history.history['loss'])
-    plt.plot(history.history['val_loss'])
-    plt.title('model loss')
-    plt.ylabel('loss')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.savefig(str('%s_loss.png' % filename))
-
-def show(history, filename):
-    # summarize history for accuracy
-    plt.plot(history.history['acc'])
-    plt.plot(history.history['val_acc'])
-    plt.title('model accuracy')
-    plt.ylabel('accuracy')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.show()
-
-    # summarize history for loss
-    plt.plot(history.history['loss'])
-    plt.plot(history.history['val_loss'])
-    plt.title('model loss')
-    plt.ylabel('loss')
-    plt.xlabel('epoch')
-    plt.legend(['train', 'test'], loc='upper left')
-    plt.show()

+ 7 - 28
modules/image_metrics/svd_metric.py

@@ -1,5 +1,3 @@
-# module file which contains all image metrics used in project
-
 from numpy.linalg import svd
 from PIL import Image
 import matplotlib.pyplot as plt
@@ -9,15 +7,11 @@ import time
 
 import numpy as np
 from sklearn import preprocessing
+import ipfml as iml
 
-import modules.model_helper.image_conversion as img_c
-
-'''
-Method which extracts SVD features from image and returns 's' vector
-@return 's' vector
-'''
 def get_s_model_data(image):
-    U, s, V = svd(image, full_matrices=False)
+
+    s = iml.metrics.get_SVD_s(image)
     size = len(s)
 
     # normalized output
@@ -27,13 +21,13 @@ def get_s_model_data(image):
 
     return result
 
-def get_s_model_data_img(image):
+def get_s_model_data_img(image, ):
     fig_size = plt.rcParams["figure.figsize"]
     fig_size[0] = 1
     fig_size[1] = 1
     plt.rcParams["figure.figsize"] = fig_size
 
-    U, s, V = svd(image, full_matrices=False)
+    s = iml.metrics.get_SVD_s(image)
 
     plt.figure()   # create a new figure
 
@@ -42,23 +36,8 @@ def get_s_model_data_img(image):
     plt.plot(output_normalized[70:100:, 1])
     plt.plot(output_normalized[70:100:, 2])
 
-    img = img_c.fig2img(plt.gcf())
+    img = iml.image_processing.fig2img(plt.gcf())
 
     plt.close('all')
 
-    return img
-
-def get(image):
-    return svd(image, full_matrices=False)
-
-def get_s(image):
-    U, s, V = svd(image, full_matrices=False)
-    return s
-
-def get_U(image):
-    U, s, V = svd(image, full_matrices=False)
-    return U
-
-def get_V(image):
-    U, s, V = svd(image, full_matrices=False)
-    return V
+    return img

+ 1 - 0
requirements.txt

@@ -6,3 +6,4 @@ image_slicer
 pydot
 matplotlib
 path.py
+IPFML

+ 4 - 4
run.sh

@@ -8,9 +8,9 @@ else
   echo "Run algorithms with image of size ${size}.."
 fi
 
-# python classification_cnn_keras.py --directory ../models/$size/ --output cnn_model --batch_size 32 --epochs 150 --img $size
-# python classification_cnn_keras_cross_validation.py --directory ../models/$size/ --output cnn_cross_validation_model --batch_size 32 --epochs 150 --img $size
-# python classification_cnn_keras_svd.py --directory ../models/$size/ --output svd_model --batch_size 32 --epochs 150 --img $size
+python classification_cnn_keras.py --directory ../models/$size/ --output cnn_model --batch_size 32 --epochs 150 --img $size
+python classification_cnn_keras_cross_validation.py --directory ../models/$size/ --output cnn_cross_validation_model --batch_size 32 --epochs 150 --img $size
+python classification_cnn_keras_svd.py --directory ../models/$size/ --output svd_model --batch_size 32 --epochs 150 --img $size
 
-python classification_cnn_keras_svd_img.py --directory ../models/$size/ --output svd_img_model --batch_size 32 --epochs 150 --img $size --generate y
+#python classification_cnn_keras_svd_img.py --directory ../models/$size/ --output svd_img_model --batch_size 32 --epochs 150 --img $size --generate y
 python classification_cnn_keras_svd_img.py --directory ../models/$size/ --output svd_img_model --batch_size 32 --epochs 150 --img $size --generate n