Parcourir la source

Add of static transformation

Jérôme BUISINE il y a 4 ans
Parent
commit
630b46743f

+ 3 - 0
.gitmodules

@@ -0,0 +1,3 @@
+[submodule "modules"]
+	path = modules
+	url = https://github.com/prise-3d/Thesis-DeepLearning-modules.git

+ 7 - 0
README.md

@@ -2,7 +2,11 @@
 
 ## Requirements
 
+```bash
+git clone --recursive https://github.com/prise-3d/Thesis-NoiseDetection-CNN.git XXXXX
 ```
+
+```bash
 pip install -r requirements.txt
 ```
 
@@ -30,6 +34,9 @@ List of expected parameter by reconstruction method:
 - **fast_ica_reconstruction:**  Fast Iterative Component Analysis
   - Param definition: *number of components used for compression*
   - Example: *"50"*
+- **static** Use static file to manage (such as z-buffer, normals card...)
+  - Param definition: *Name of image of scene need to be in {sceneName}/static/xxxx.png*
+  - Example: *"img.png"*
 
 **__Example:__**
 ```bash

+ 78 - 12
generate_dataset.py

@@ -13,13 +13,12 @@ import time
 import json
 
 from PIL import Image
-from ipfml import processing, metrics, utils
+from ipfml.processing.segmentation import divide_in_blocks
 from skimage import color
 
 from modules.utils import config as cfg
 from modules.utils import data as dt
 
-from transformation_functions import svd_reconstruction
 from modules.classes.Transformation import Transformation
 
 # getting configuration information
@@ -53,6 +52,8 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
     if not os.path.exists(output_data_folder):
         os.makedirs(output_data_folder)
 
+    zones_indices = zones
+
     train_file_data = []
     test_file_data  = []
 
@@ -61,12 +62,10 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
     scenes = [s for s in scenes if min_max_filename not in s]
 
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(_scenes_list):
+    for folder_scene in _scenes_list:
 
         scene_path = os.path.join(dataset_path, folder_scene)
 
-        zones_indices = zones
-
         # shuffle list of zones (=> randomly choose zones)
         # only in random mode
         if _random:
@@ -101,8 +100,56 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
             metrics_path = []
 
             for transformation in _transformations:
-                metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
-                metrics_path.append(metric_interval_path)
+                
+                # check if it's a static content and create augmented images if necessary
+                if transformation.getName() == 'static':
+                    
+                    # {sceneName}/zoneXX/static
+                    static_metric_path = os.path.join(zone_path, transformation.getName())
+
+                    # img.png
+                    image_name = transformation.getParam().split('/')[-1]
+
+                    # {sceneName}/zoneXX/static/img
+                    image_folder = image_name.replace('.png', '')
+                    image_folder_path = os.path.join(static_metric_path, image_folder)
+                    
+                    if not os.path.exists(image_folder_path):
+                        os.makedirs(image_folder_path)
+
+                    metrics_path.append(image_folder_path)
+
+                    # get image path to manage
+                    # {sceneName}/static/img.png
+                    transform_image_path = os.path.join(scene_path, transformation.getName(), image_name) 
+                    static_transform_image = Image.open(transform_image_path)
+
+                    static_transform_image_block = divide_in_blocks(static_transform_image, cfg.keras_img_size)[id_zone]
+
+                    # compute augmented images if necessary
+                    rotations = [0, 90, 180, 270]
+                    img_flip_labels = ['original', 'horizontal', 'vertical', 'both']
+
+                    horizontal_img = static_transform_image_block.transpose(Image.FLIP_LEFT_RIGHT)
+                    vertical_img = static_transform_image_block.transpose(Image.FLIP_TOP_BOTTOM)
+                    both_img = static_transform_image_block.transpose(Image.TRANSPOSE)
+
+                    flip_images = [static_transform_image_block, horizontal_img, vertical_img, both_img]
+
+                    # rotate and flip image to increase dataset size
+                    for id, flip in enumerate(flip_images):
+                        for rotation in rotations:
+                            rotated_output_img = flip.rotate(rotation)
+
+                            output_reconstructed_filename = image_folder + cfg.post_image_name_separator
+                            output_reconstructed_filename = output_reconstructed_filename + img_flip_labels[id] + '_' + str(rotation) + '.png'
+                            output_reconstructed_path = os.path.join(image_folder_path, output_reconstructed_filename)
+
+                            if not os.path.exists(output_reconstructed_path):
+                                rotated_output_img.save(output_reconstructed_path)
+                else:
+                    metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
+                    metrics_path.append(metric_interval_path)
 
             # as labels are same for each metric
             for label in os.listdir(metrics_path[0]):
@@ -116,19 +163,35 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
                 # getting images list for each metric
                 metrics_images_list = []
                     
-                for label_path in label_metrics_path:
-                    images = sorted(os.listdir(label_path))
-                    metrics_images_list.append(images)
+                for index_metric, label_path in enumerate(label_metrics_path):
+
+                    if _transformations[index_metric].getName() == 'static':
+                        # by default append nothing..
+                        metrics_images_list.append([])
+                    else:
+                        images = sorted(os.listdir(label_path))
+                        metrics_images_list.append(images)
 
                 # construct each line using all images path of each
                 for index_image in range(0, len(metrics_images_list[0])):
                     
                     images_path = []
 
+                    # get information about rotation and flip from first transformation (need to be a not static transformation)
+                    current_post_fix =  metrics_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
+
                     # getting images with same index and hence name for each metric (transformation)
                     for index_metric in range(0, len(metrics_path)):
-                        img_path = metrics_images_list[index_metric][index_image]
-                        images_path.append(os.path.join(label_metrics_path[index_metric], img_path))
+
+                        # custom behavior for static transformation (need to check specific image)
+                        if _transformations[index_metric].getName() == 'static':
+                            # add static path with selecting correct data augmented image
+                            image_name = _transformations[index_metric].getParam().split('/')[-1].replace('.png', '')
+                            img_path = os.path.join(metrics_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)
+                            images_path.append(img_path)
+                        else:
+                            img_path = metrics_images_list[index_metric][index_image]
+                            images_path.append(os.path.join(label_metrics_path[index_metric], img_path))
 
                     if label == cfg.noisy_folder:
                         line = '1;'
@@ -202,6 +265,9 @@ def main():
 
         transformations.append(Transformation(metric, p_params[id]))
 
+    if transformations[0].getName() == 'static':
+        raise ValueError("The first transformation in list cannot be static")
+
     # list all possibles choices of renderer
     scenes_list = dt.get_renderer_scenes_names(p_renderer)
     scenes_indices = dt.get_renderer_scenes_indices(p_renderer)

+ 2 - 2
generate_reconstructed_data.py

@@ -158,8 +158,8 @@ def generate_data(transformation):
                     for rotation in rotations:
                         rotated_output_img = flip.rotate(rotation)
 
-                        output_reconstructed_filename = img_path.split('/')[-1].replace('.png', '') + '_' + zones_folder[id_block]
-                        output_reconstructed_filename = output_reconstructed_filename + '_' + img_flip_labels[id] + '_' + str(rotation) + '.png'
+                        output_reconstructed_filename = img_path.split('/')[-1].replace('.png', '') + '_' + zones_folder[id_block] + cfg.post_image_name_separator
+                        output_reconstructed_filename = output_reconstructed_filename + img_flip_labels[id] + '_' + str(rotation) + '.png'
                         output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
 
                         rotated_output_img.save(output_reconstructed_path)

+ 1 - 0
modules

@@ -0,0 +1 @@
+Subproject commit 7a90d64b01aec2afec43925d9eb77900fae0949e

+ 0 - 0
modules/__init__.py


+ 0 - 53
modules/classes/Transformation.py

@@ -1,53 +0,0 @@
-import os
-
-from transformation_functions import svd_reconstruction, fast_ica_reconstruction, ipca_reconstruction
-
-# Transformation class to store transformation method of image and get usefull information
-class Transformation():
-
-    def __init__(self, _transformation, _param):
-        self.transformation = _transformation
-        self.param = _param
-
-    def getTransformedImage(self, img):
-
-        if self.transformation == 'svd_reconstruction':
-            begin, end = list(map(int, self.param.split(',')))
-            data = svd_reconstruction(img, [begin, end])
-
-        if self.transformation == 'ipca_reconstruction':
-            n_components, batch_size = list(map(int, self.param.split(',')))
-            data = ipca_reconstruction(img, n_components, batch_size)
-
-        if self.transformation == 'fast_ica_reconstruction':
-            n_components = self.param
-            data = fast_ica_reconstruction(img, n_components)
-
-        return data
-    
-    def getTransformationPath(self):
-
-        path = self.transformation
-
-        if self.transformation == 'svd_reconstruction':
-            begin, end = list(map(int, self.param.split(',')))
-            path = os.path.join(path, str(begin) + '_' + str(end))
-
-        if self.transformation == 'ipca_reconstruction':
-            n_components, batch_size = list(map(int, self.param.split(',')))
-            path = os.path.join(path, 'N' + str(n_components) + '_' + str(batch_size))
-
-        if self.transformation == 'fast_ica_reconstruction':
-            n_components = self.param
-            path = os.path.join(path, 'N' + str(n_components))
-
-        return path
-
-    def getName(self):
-        return self.transformation
-
-    def getParam(self):
-        return self.param
-
-    def __str__( self ):
-        return self.transformation + ' transformation with parameter : ' + self.param

+ 0 - 0
modules/classes/__init__.py


+ 0 - 8
modules/models/metrics.py

@@ -1,8 +0,0 @@
-from keras import backend as K
-import tensorflow as tf
-
-def auc(y_true, y_pred):
-    auc = tf.metrics.auc(y_true, y_pred)[1]
-    K.get_session().run(tf.local_variables_initializer())
-    
-    return auc

+ 0 - 125
modules/models/models.py

@@ -1,125 +0,0 @@
-from keras.preprocessing.image import ImageDataGenerator
-from keras.models import Sequential
-from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv3D, MaxPooling3D, AveragePooling3D
-from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
-from keras import backend as K
-import tensorflow as tf
-
-from modules.utils import config as cfg
-from modules.models import metrics
-
-
-def generate_model_2D(_input_shape):
-
-    model = Sequential()
-
-    model.add(Conv2D(60, (2, 2), input_shape=_input_shape))
-    model.add(Activation('relu'))
-    model.add(MaxPooling2D(pool_size=(2, 2)))
-
-    model.add(Conv2D(40, (2, 2)))
-    model.add(Activation('relu'))
-    model.add(MaxPooling2D(pool_size=(2, 2)))
-
-    model.add(Conv2D(20, (2, 2)))
-    model.add(Activation('relu'))
-    model.add(MaxPooling2D(pool_size=(2, 2)))
-
-    model.add(Flatten())
-
-    model.add(Dense(140))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(120))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(80))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(40))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(20))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(1))
-    model.add(Activation('sigmoid'))
-
-    model.compile(loss='binary_crossentropy',
-                  optimizer='rmsprop',
-                  metrics=['accuracy', metrics.auc])
-
-    return model
-
-def generate_model_3D(_input_shape):
-
-    model = Sequential()
-
-    print(_input_shape)
-
-    model.add(Conv3D(60, (1, 2, 2), input_shape=_input_shape))
-    model.add(Activation('relu'))
-    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
-
-    model.add(Conv3D(40, (1, 2, 2)))
-    model.add(Activation('relu'))
-    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
-
-    model.add(Conv3D(20, (1, 2, 2)))
-    model.add(Activation('relu'))
-    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
-
-    model.add(Flatten())
-
-    model.add(Dense(140))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(120))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(80))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(40))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(20))
-    model.add(Activation('relu'))
-    model.add(BatchNormalization())
-    model.add(Dropout(0.4))
-
-    model.add(Dense(1))
-    model.add(Activation('sigmoid'))
-
-    model.compile(loss='binary_crossentropy',
-                  optimizer='rmsprop',
-                  metrics=['accuracy', metrics.auc])
-
-    return model
-
-
-def get_model(n_channels, _input_shape):
-
-    if n_channels == 1:
-        return generate_model_2D(_input_shape)
-
-    if n_channels == 3:
-        return generate_model_3D(_input_shape)

+ 0 - 0
modules/utils/__init__.py


+ 0 - 47
modules/utils/config.py

@@ -1,47 +0,0 @@
-import numpy as np
-
-zone_folder                     = "zone"
-output_data_folder              = 'data'
-dataset_path                    = 'dataset'
-threshold_map_folder            = 'threshold_map'
-models_information_folder       = 'models_info'
-saved_models_folder             = 'saved_models'
-min_max_custom_folder           = 'custom_norm'
-learned_zones_folder            = 'learned_zones'
-correlation_indices_folder      = 'corr_indices'
-
-csv_model_comparisons_filename  = "models_comparisons.csv"
-seuil_expe_filename             = 'seuilExpe'
-min_max_filename_extension      = "_min_max_values"
-config_filename                 = "config"
-
-noisy_folder                    = 'noisy'
-not_noisy_folder                = 'notNoisy'
-
-models_names_list               = ["svm_model","ensemble_model","ensemble_model_v2","deep_keras"]
-
-# define all scenes values
-renderer_choices                = ['all', 'maxwell', 'igloo', 'cycle']
-
-scenes_names                    = ['Appart1opt02', 'Bureau1', 'Cendrier', 'Cuisine01', 'EchecsBas', 'PNDVuePlongeante', 'SdbCentre', 'SdbDroite', 'Selles']
-scenes_indices                  = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
-
-maxwell_scenes_names            = ['Appart1opt02', 'Cuisine01', 'SdbCentre', 'SdbDroite']
-maxwell_scenes_indices          = ['A', 'D', 'G', 'H']
-
-igloo_scenes_names              = ['Bureau1', 'PNDVuePlongeante']
-igloo_scenes_indices            = ['B', 'F']
-
-cycle_scenes_names              = ['EchecBas', 'Selles']
-cycle_scenes_indices            = ['E', 'I']
-
-normalization_choices           = ['svd', 'svdn', 'svdne']
-zones_indices                   = np.arange(16)
-
-metric_choices_labels           = ['all', 'svd_reconstruction', 'fast_ica_reconstruction', 'ipca_reconstruction']
-
-keras_epochs                    = 30
-keras_batch                     = 32
-val_dataset_size                = 0.2
-
-keras_img_size                  = (200, 200)

+ 0 - 44
modules/utils/data.py

@@ -1,44 +0,0 @@
-from ipfml import processing, metrics, utils
-from modules.utils.config import *
-from transformation_functions import svd_reconstruction
-
-from PIL import Image
-from skimage import color
-from sklearn.decomposition import FastICA
-from sklearn.decomposition import IncrementalPCA
-from sklearn.decomposition import TruncatedSVD
-from numpy.linalg import svd as lin_svd
-
-from scipy.signal import medfilt2d, wiener, cwt
-import pywt
-
-import numpy as np
-
-
-_scenes_names_prefix   = '_scenes_names'
-_scenes_indices_prefix = '_scenes_indices'
-
-# store all variables from current module context
-context_vars = vars()
-
-
-def get_renderer_scenes_indices(renderer_name):
-
-    if renderer_name not in renderer_choices:
-        raise ValueError("Unknown renderer name")
-
-    if renderer_name == 'all':
-        return scenes_indices
-    else:
-        return context_vars[renderer_name + _scenes_indices_prefix]
-
-def get_renderer_scenes_names(renderer_name):
-
-    if renderer_name not in renderer_choices:
-        raise ValueError("Unknown renderer name")
-
-    if renderer_name == 'all':
-        return scenes_names
-    else:
-        return context_vars[renderer_name + _scenes_names_prefix]
-

+ 3 - 3
requirements.txt

@@ -2,8 +2,8 @@ Pillow
 keras
 tensorflow
 sklearn
-image_slicer
-pydot
 matplotlib
 path.py
-IPFML
+ipfml
+cv2
+json

+ 0 - 49
transformation_functions.py

@@ -1,49 +0,0 @@
-from numpy.linalg import svd
-from sklearn.decomposition import FastICA, IncrementalPCA
-
-import numpy as np
-
-from ipfml import metrics
-
-def svd_reconstruction(img, interval):
-    
-    begin, end = interval
-    lab_img = metrics.get_LAB_L(img)
-    lab_img = np.array(lab_img, 'uint8')
-    
-    U, s, V = svd(lab_img, full_matrices=True)
-    
-    # reconstruction using specific interval
-    smat = np.zeros((end-begin, end-begin), dtype=complex)
-    smat[:, :] = np.diag(s[begin:end])
-    output_img = np.dot(U[:, begin:end],  np.dot(smat, V[begin:end, :]))
-        
-    return output_img
-
-
-def fast_ica_reconstruction(img, components):
-
-    lab_img = metrics.get_LAB_L(img)
-    lab_img = np.array(lab_img, 'uint8')
-
-    ica = FastICA(n_components = 50)
-    # run ICA on image
-    ica.fit(lab_img)
-    # reconstruct image with independent components
-    image_ica = ica.fit_transform(lab_img)
-    restored_image = ica.inverse_transform(image_ica)
-
-    return restored_image
-
-
-def ipca_reconstruction(img, components, _batch_size=25):
-
-    lab_img = metrics.get_LAB_L(img)
-    lab_img = np.array(lab_img, 'uint8')
-
-    transformer = IncrementalPCA(n_components=components, batch_size=_batch_size)
-
-    transformed_image = transformer.fit_transform(lab_img) 
-    restored_image = transformer.inverse_transform(transformed_image)
-
-    return restored_image