Parcourir la source

First version of common modules project

Jérôme BUISINE il y a 4 ans
Parent
commit
c88a9227f2
11 fichiers modifiés avec 318 ajouts et 0 suppressions
  1. 5 0
      .gitignore
  2. 8 0
      LICENSE
  3. 18 0
      README.md
  4. 0 0
      __init__.py
  5. 61 0
      classes/Transformation.py
  6. 0 0
      classes/__init__.py
  7. 8 0
      models/metrics.py
  8. 125 0
      models/models.py
  9. 0 0
      utils/__init__.py
  10. 49 0
      utils/config.py
  11. 44 0
      utils/data.py

+ 5 - 0
.gitignore

@@ -0,0 +1,5 @@
+# IDE folder
+.vscode
+
+ # python cache
+__pycache__

+ 8 - 0
LICENSE

@@ -0,0 +1,8 @@
+MIT License
+Copyright (c) 2019 prise-3d
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 18 - 0
README.md

@@ -0,0 +1,18 @@
+# Deep Learning modules
+
+## Description
+
+Project which contains dependencies used in few developed projects:
+
+- [Noise Detection CNN](https://github.com/prise-3d/Thesis-NoiseDetection-CNN.git)
+- [Denoising autoencoder](https://github.com/prise-3d/Thesis-Denoising-autoencoder.git)
+
+## Add as dependency
+
+```bash
+git add submodule https://github.com/prise-3d/Thesis-DeepLearning-modules.git modules
+```
+
+## License
+
+[The MIT License](https://github.com/prise-3d/Thesis-DeepLearning-modules/blob/master/LICENSE)

+ 0 - 0
__init__.py


+ 61 - 0
classes/Transformation.py

@@ -0,0 +1,61 @@
+import os
+
+from transformation_functions import svd_reconstruction, fast_ica_reconstruction, ipca_reconstruction
+
+# Transformation class to store transformation method of image and get usefull information
+class Transformation():
+
+    def __init__(self, _transformation, _param):
+        self.transformation = _transformation
+        self.param = _param
+
+    def getTransformedImage(self, img):
+
+        if self.transformation == 'svd_reconstruction':
+            begin, end = list(map(int, self.param.split(',')))
+            data = svd_reconstruction(img, [begin, end])
+
+        if self.transformation == 'ipca_reconstruction':
+            n_components, batch_size = list(map(int, self.param.split(',')))
+            data = ipca_reconstruction(img, n_components, batch_size)
+
+        if self.transformation == 'fast_ica_reconstruction':
+            n_components = self.param
+            data = fast_ica_reconstruction(img, n_components)
+
+        if self.transformation == 'static':
+            # static content, we keep input as it is
+            data = img
+
+        return data
+    
+    def getTransformationPath(self):
+
+        path = self.transformation
+
+        if self.transformation == 'svd_reconstruction':
+            begin, end = list(map(int, self.param.split(',')))
+            path = os.path.join(path, str(begin) + '_' + str(end))
+
+        if self.transformation == 'ipca_reconstruction':
+            n_components, batch_size = list(map(int, self.param.split(',')))
+            path = os.path.join(path, 'N' + str(n_components) + '_' + str(batch_size))
+
+        if self.transformation == 'fast_ica_reconstruction':
+            n_components = self.param
+            path = os.path.join(path, 'N' + str(n_components))
+
+        if self.transformation == 'static':
+            # param contains the whole path of image
+            path = self.param
+
+        return path
+
+    def getName(self):
+        return self.transformation
+
+    def getParam(self):
+        return self.param
+
+    def __str__( self ):
+        return self.transformation + ' transformation with parameter : ' + self.param

+ 0 - 0
classes/__init__.py


+ 8 - 0
models/metrics.py

@@ -0,0 +1,8 @@
+from keras import backend as K
+import tensorflow as tf
+
+def auc(y_true, y_pred):
+    auc = tf.metrics.auc(y_true, y_pred)[1]
+    K.get_session().run(tf.local_variables_initializer())
+    
+    return auc

+ 125 - 0
models/models.py

@@ -0,0 +1,125 @@
+from keras.preprocessing.image import ImageDataGenerator
+from keras.models import Sequential
+from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv3D, MaxPooling3D, AveragePooling3D
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
+from keras import backend as K
+import tensorflow as tf
+
+from modules.utils import config as cfg
+from modules.models import metrics
+
+
+def generate_model_2D(_input_shape):
+
+    model = Sequential()
+
+    model.add(Conv2D(60, (2, 2), input_shape=_input_shape))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(40, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Conv2D(20, (2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling2D(pool_size=(2, 2)))
+
+    model.add(Flatten())
+
+    model.add(Dense(140))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(120))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(80))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(40))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(20))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(1))
+    model.add(Activation('sigmoid'))
+
+    model.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy', metrics.auc])
+
+    return model
+
+def generate_model_3D(_input_shape):
+
+    model = Sequential()
+
+    print(_input_shape)
+
+    model.add(Conv3D(60, (1, 2, 2), input_shape=_input_shape))
+    model.add(Activation('relu'))
+    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
+
+    model.add(Conv3D(40, (1, 2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
+
+    model.add(Conv3D(20, (1, 2, 2)))
+    model.add(Activation('relu'))
+    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
+
+    model.add(Flatten())
+
+    model.add(Dense(140))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(120))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(80))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(40))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(20))
+    model.add(Activation('relu'))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Dense(1))
+    model.add(Activation('sigmoid'))
+
+    model.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy', metrics.auc])
+
+    return model
+
+
+def get_model(n_channels, _input_shape):
+
+    if n_channels == 1:
+        return generate_model_2D(_input_shape)
+
+    if n_channels == 3:
+        return generate_model_3D(_input_shape)

+ 0 - 0
utils/__init__.py


+ 49 - 0
utils/config.py

@@ -0,0 +1,49 @@
+import numpy as np
+
+zone_folder                     = "zone"
+output_data_folder              = 'data'
+dataset_path                    = 'dataset'
+threshold_map_folder            = 'threshold_map'
+models_information_folder       = 'models_info'
+saved_models_folder             = 'saved_models'
+min_max_custom_folder           = 'custom_norm'
+learned_zones_folder            = 'learned_zones'
+correlation_indices_folder      = 'corr_indices'
+
+csv_model_comparisons_filename  = "models_comparisons.csv"
+seuil_expe_filename             = 'seuilExpe'
+min_max_filename_extension      = "_min_max_values"
+config_filename                 = "config"
+
+noisy_folder                    = 'noisy'
+not_noisy_folder                = 'notNoisy'
+
+models_names_list               = ["svm_model","ensemble_model","ensemble_model_v2","deep_keras"]
+
+# define all scenes values
+renderer_choices                = ['all', 'maxwell', 'igloo', 'cycle']
+
+scenes_names                    = ['Appart1opt02', 'Bureau1', 'Cendrier', 'Cuisine01', 'EchecsBas', 'PNDVuePlongeante', 'SdbCentre', 'SdbDroite', 'Selles']
+scenes_indices                  = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
+
+maxwell_scenes_names            = ['Appart1opt02', 'Cuisine01', 'SdbCentre', 'SdbDroite']
+maxwell_scenes_indices          = ['A', 'D', 'G', 'H']
+
+igloo_scenes_names              = ['Bureau1', 'PNDVuePlongeante']
+igloo_scenes_indices            = ['B', 'F']
+
+cycle_scenes_names              = ['EchecBas', 'Selles']
+cycle_scenes_indices            = ['E', 'I']
+
+normalization_choices           = ['svd', 'svdn', 'svdne']
+zones_indices                   = np.arange(16)
+
+metric_choices_labels           = ['all',  'static', 'svd_reconstruction', 'fast_ica_reconstruction', 'ipca_reconstruction']
+
+post_image_name_separator       = '___'
+
+keras_epochs                    = 30
+keras_batch                     = 32
+val_dataset_size                = 0.2
+
+keras_img_size                  = (200, 200)

+ 44 - 0
utils/data.py

@@ -0,0 +1,44 @@
+from ipfml import processing, metrics, utils
+from modules.utils.config import *
+from transformation_functions import svd_reconstruction
+
+from PIL import Image
+from skimage import color
+from sklearn.decomposition import FastICA
+from sklearn.decomposition import IncrementalPCA
+from sklearn.decomposition import TruncatedSVD
+from numpy.linalg import svd as lin_svd
+
+from scipy.signal import medfilt2d, wiener, cwt
+import pywt
+
+import numpy as np
+
+
+_scenes_names_prefix   = '_scenes_names'
+_scenes_indices_prefix = '_scenes_indices'
+
+# store all variables from current module context
+context_vars = vars()
+
+
+def get_renderer_scenes_indices(renderer_name):
+
+    if renderer_name not in renderer_choices:
+        raise ValueError("Unknown renderer name")
+
+    if renderer_name == 'all':
+        return scenes_indices
+    else:
+        return context_vars[renderer_name + _scenes_indices_prefix]
+
+def get_renderer_scenes_names(renderer_name):
+
+    if renderer_name not in renderer_choices:
+        raise ValueError("Unknown renderer name")
+
+    if renderer_name == 'all':
+        return scenes_names
+    else:
+        return context_vars[renderer_name + _scenes_names_prefix]
+