Parcourir la source

Refactoring and scripts updates

Jérôme BUISINE il y a 4 ans
Parent
commit
138c254d1d
9 fichiers modifiés avec 127 ajouts et 150 suppressions
  1. 3 0
      .gitmodules
  2. 3 3
      README.md
  3. 0 0
      __init__.py
  4. 27 0
      custom_config.py
  5. 43 43
      generate_dataset.py
  6. 40 49
      generate_reconstructed_data.py
  7. 10 6
      image_denoising.py
  8. 1 0
      modules
  9. 0 49
      transformation_functions.py

+ 3 - 0
.gitmodules

@@ -0,0 +1,3 @@
+[submodule "modules"]
+	path = modules
+	url = https://github.com/prise-3d/Thesis-CommonModules.git

+ 3 - 3
README.md

@@ -30,12 +30,12 @@ pip install -r requirements.txt
 
 Generate reconstructed data from specific method of reconstruction (run only once time or clean data folder before):
 ```
-python generate_reconstructed_data.py -h
+python generate/generate_reconstructed_data.py -h
 ```
 
 Generate custom dataset from one reconstructed method or multiples (implemented later)
 ```
-python generate_dataset.py -h
+python generate/generate_dataset.py -h
 ```
 
 ### Reconstruction parameter (--params)
@@ -56,7 +56,7 @@ List of expected parameter by reconstruction method:
 
 **__Example:__**
 ```bash
-python generate_dataset.py --output data/output_data_filename --metrics "svd_reconstruction, ipca_reconstruction, fast_ica_reconstruction" --renderer "maxwell" --scenes "A, D, G, H" --params "100, 200 :: 50, 10 :: 50" --nb_zones 10 --random 1 --only_noisy 1
+python generate/generate_dataset.py --output data/output_data_filename --metrics "svd_reconstruction, ipca_reconstruction, fast_ica_reconstruction" --renderer "maxwell" --scenes "A, D, G, H" --params "100, 200 :: 50, 10 :: 50" --nb_zones 10 --random 1 --only_noisy 1
 ```
 
 Then, run the model:

+ 0 - 0
__init__.py


+ 27 - 0
custom_config.py

@@ -0,0 +1,27 @@
+from modules.config.cnn_config import *
+
+# store all variables from cnn config
+context_vars = vars()
+
+# Custom config used for redefined config variables if necessary
+
+# folders
+
+## noisy_folder                    = 'noisy'
+## not_noisy_folder                = 'notNoisy'
+
+# file or extensions
+
+## post_image_name_separator       = '___'
+
+# variables
+
+## features_choices_labels         = ['static', 'svd_reconstruction', 'fast_ica_reconstruction', 'ipca_reconstruction']
+
+# parameters
+
+keras_epochs                    = 200
+## keras_batch                     = 32
+## val_dataset_size                = 0.2
+
+## keras_img_size                  = (200, 200)

+ 43 - 43
generate_dataset.py

@@ -6,19 +6,20 @@ Created on Wed Jun 19 11:47:42 2019
 @author: jbuisine
 """
 
+# main imports
 import sys, os, argparse
 import numpy as np
 import random
-import time
-import json
 
+# images processing imports
 from PIL import Image
 from ipfml.processing.segmentation import divide_in_blocks
-from skimage import color
 
-from modules.utils import config as cfg
-from modules.utils import data as dt
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
+import custom_config  as cfg
+from modules.utils import data as dt
 from modules.classes.Transformation import Transformation
 
 # getting configuration information
@@ -30,12 +31,11 @@ min_max_filename        = cfg.min_max_filename_extension
 # define all scenes values
 scenes_list             = cfg.scenes_names
 scenes_indexes          = cfg.scenes_indices
-choices                 = cfg.normalization_choices
 dataset_path            = cfg.dataset_path
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 
 generic_output_file_svd = '_random.csv'
@@ -117,8 +117,8 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
             # get list of all augmented ref images
             ref_augmented_images = [os.path.join(zone_path, f) for f in os.listdir(zone_path) if ref_image_name_prefix in f]
 
-            # custom path for interval of reconstruction and metric
-            metrics_path = []
+            # custom path for interval of reconstruction and features
+            features_path = []
 
             for transformation in _transformations:
                 
@@ -126,19 +126,19 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
                 if transformation.getName() == 'static':
                     
                     # {sceneName}/zoneXX/static
-                    static_metric_path = os.path.join(zone_path, transformation.getName())
+                    static_features_path = os.path.join(zone_path, transformation.getName())
 
                     # img.png
                     image_name = transformation.getParam().split('/')[-1]
 
                     # {sceneName}/zoneXX/static/img
                     image_prefix_name = image_name.replace('.png', '')
-                    image_folder_path = os.path.join(static_metric_path, image_prefix_name)
+                    image_folder_path = os.path.join(static_features_path, image_prefix_name)
                     
                     if not os.path.exists(image_folder_path):
                         os.makedirs(image_folder_path)
 
-                    metrics_path.append(image_folder_path)
+                    features_path.append(image_folder_path)
 
                     # get image path to manage
                     # {sceneName}/static/img.png
@@ -151,52 +151,52 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
                     dt.augmented_data_image(static_transform_image_block, image_folder_path, image_prefix_name)
 
                 else:
-                    metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
-                    metrics_path.append(metric_interval_path)
+                    features_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
+                    features_path.append(features_interval_path)
 
-            # as labels are same for each metric
-            for label in os.listdir(metrics_path[0]):
+            # as labels are same for each features
+            for label in os.listdir(features_path[0]):
 
                 if (label == cfg.not_noisy_folder and _only_noisy == 0) or label == cfg.noisy_folder:
                     
-                    label_metrics_path = []
+                    label_features_path = []
 
-                    for path in metrics_path:
+                    for path in features_path:
                         label_path = os.path.join(path, label)
-                        label_metrics_path.append(label_path)
+                        label_features_path.append(label_path)
 
-                    # getting images list for each metric
-                    metrics_images_list = []
+                    # getting images list for each features
+                    features_images_list = []
                         
-                    for index_metric, label_path in enumerate(label_metrics_path):
+                    for index_features, label_path in enumerate(label_features_path):
 
-                        if _transformations[index_metric].getName() == 'static':
+                        if _transformations[index_features].getName() == 'static':
                             # by default append nothing..
-                            metrics_images_list.append([])
+                            features_images_list.append([])
                         else:
                             images = sorted(os.listdir(label_path))
-                            metrics_images_list.append(images)
+                            features_images_list.append(images)
 
                     # construct each line using all images path of each
-                    for index_image in range(0, len(metrics_images_list[0])):
+                    for index_image in range(0, len(features_images_list[0])):
                         
                         images_path = []
 
                         # get information about rotation and flip from first transformation (need to be a not static transformation)
-                        current_post_fix =  metrics_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
+                        current_post_fix =  features_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
 
-                        # getting images with same index and hence name for each metric (transformation)
-                        for index_metric in range(0, len(metrics_path)):
+                        # getting images with same index and hence name for each features (transformation)
+                        for index_features in range(0, len(features_path)):
 
                             # custom behavior for static transformation (need to check specific image)
-                            if _transformations[index_metric].getName() == 'static':
+                            if _transformations[index_features].getName() == 'static':
                                 # add static path with selecting correct data augmented image
-                                image_name = _transformations[index_metric].getParam().split('/')[-1].replace('.png', '')
-                                img_path = os.path.join(metrics_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)
+                                image_name = _transformations[index_features].getParam().split('/')[-1].replace('.png', '')
+                                img_path = os.path.join(features_path[index_features], image_name + cfg.post_image_name_separator + current_post_fix)
                                 images_path.append(img_path)
                             else:
-                                img_path = metrics_images_list[index_metric][index_image]
-                                images_path.append(os.path.join(label_metrics_path[index_metric], img_path))
+                                img_path = features_images_list[index_features][index_image]
+                                images_path.append(os.path.join(label_features_path[index_features], img_path))
 
                         # get information about rotation and flip
                         current_post_fix = images_path[0].split(cfg.post_image_name_separator)[-1]
@@ -238,15 +238,15 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
 
 def main():
 
-    parser = argparse.ArgumentParser(description="Compute specific dataset for model using of metric")
+    parser = argparse.ArgumentParser(description="Compute specific dataset for model using of features")
 
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
-    parser.add_argument('--metrics', type=str, 
-                                     help="list of metrics choice in order to compute data",
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
                                      default='svd_reconstruction, ipca_reconstruction',
                                      required=True)
     parser.add_argument('--params', type=str, 
-                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    help="list of specific param for each features choice (See README.md for further information in 3D mode)", 
                                     default='100, 200 :: 50, 25',
                                     required=True)
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
@@ -258,7 +258,7 @@ def main():
     args = parser.parse_args()
 
     p_filename   = args.output
-    p_metrics    = list(map(str.strip, args.metrics.split(',')))
+    p_features   = list(map(str.strip, args.features.split(',')))
     p_params     = list(map(str.strip, args.params.split('::')))
     p_scenes     = args.scenes.split(',')
     p_nb_zones   = args.nb_zones
@@ -269,12 +269,12 @@ def main():
     # create list of Transformation
     transformations = []
 
-    for id, metric in enumerate(p_metrics):
+    for id, features in enumerate(p_features):
 
-        if metric not in metric_choices:
-            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
+        if features not in features_choices:
+            raise ValueError("Unknown features, please select a correct features : ", features_choices)
 
-        transformations.append(Transformation(metric, p_params[id]))
+        transformations.append(Transformation(features, p_params[id]))
 
     # list all possibles choices of renderer
     scenes_list = dt.get_renderer_scenes_names(p_renderer)

+ 40 - 49
generate_reconstructed_data.py

@@ -6,17 +6,19 @@ Created on Wed Jun 19 11:47:42 2019
 @author: jbuisine
 """
 
+# main imports
 import sys, os, argparse
 import numpy as np
-import random
-import time
-import json
 
+# images processing imports
 from PIL import Image
-from ipfml import processing, metrics, utils
-from skimage import color
+from ipfml.processing.segmentation import divide_in_blocks
 
-from modules.utils import config as cfg
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils.data import get_scene_image_quality
 from modules.classes.Transformation import Transformation
 
 # getting configuration information
@@ -27,12 +29,11 @@ min_max_filename        = cfg.min_max_filename_extension
 # define all scenes values
 scenes_list             = cfg.scenes_names
 scenes_indexes          = cfg.scenes_indices
-choices                 = cfg.normalization_choices
 path                    = cfg.dataset_path
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 
 generic_output_file_svd = '_random.csv'
@@ -64,7 +65,7 @@ def generate_data(transformation):
 
         # construct each zones folder name
         zones_folder = []
-        metrics_folder = []
+        features_folder = []
         zones_threshold = []
 
         # get zones list info
@@ -80,45 +81,40 @@ def generate_data(transformation):
             with open(os.path.join(zone_path, cfg.seuil_expe_filename)) as f:
                 zones_threshold.append(int(f.readline()))
 
-            # custom path for metric
-            metric_path = os.path.join(zone_path, transformation.getName())
+            # custom path for feature
+            feature_path = os.path.join(zone_path, transformation.getName())
 
-            if not os.path.exists(metric_path):
-                os.makedirs(metric_path)
+            if not os.path.exists(feature_path):
+                os.makedirs(feature_path)
 
-            # custom path for interval of reconstruction and metric
-            metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
-            metrics_folder.append(metric_interval_path)
+            # custom path for interval of reconstruction and feature
+            feature_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
+            features_folder.append(feature_interval_path)
 
-            if not os.path.exists(metric_interval_path):
-                os.makedirs(metric_interval_path)
+            if not os.path.exists(feature_interval_path):
+                os.makedirs(feature_interval_path)
 
             # create for each zone the labels folder
             labels = [cfg.not_noisy_folder, cfg.noisy_folder]
 
             for label in labels:
-                label_folder = os.path.join(metric_interval_path, label)
+                label_folder = os.path.join(feature_interval_path, label)
 
                 if not os.path.exists(label_folder):
                     os.makedirs(label_folder)
 
-        
 
-        current_counter_index = int(start_index_image)
-        end_counter_index = int(end_index_image)
+        # get all images of folder
+        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+        number_scene_image = len(scene_images)
 
         # for each images
-        while(current_counter_index <= end_counter_index):
-
-            current_counter_index_str = str(current_counter_index)
-
-            while len(start_index_image) > len(current_counter_index_str):
-                current_counter_index_str = "0" + current_counter_index_str
-
-            img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+        for id_img, img_path in enumerate(scene_images):
 
             current_img = Image.open(img_path)
-            img_blocks = processing.divide_in_blocks(current_img, cfg.keras_img_size)
+            img_blocks = divide_in_blocks(current_img, cfg.keras_img_size)
+
+            current_quality_index = int(get_scene_image_quality(img_path))
 
             for id_block, block in enumerate(img_blocks):
 
@@ -135,10 +131,10 @@ def generate_data(transformation):
                 # current output image
                 output_block_img = Image.fromarray(output_block)
 
-                label_path = metrics_folder[id_block]
+                label_path = features_folder[id_block]
 
                 # get label folder for block
-                if current_counter_index > zones_threshold[id_block]:
+                if current_quality_index > zones_threshold[id_block]:
                     label_path = os.path.join(label_path, cfg.not_noisy_folder)
                 else:
                     label_path = os.path.join(label_path, cfg.noisy_folder)
@@ -164,14 +160,9 @@ def generate_data(transformation):
 
                         rotated_output_img.save(output_reconstructed_path)
 
-
-            start_index_image_int = int(start_index_image)
-            print(transformation.getName() + "_" + folder_scene + " - " + "{0:.2f}".format((current_counter_index - start_index_image_int) / (end_counter_index - start_index_image_int)* 100.) + "%")
+            print(transformation.getName() + "_" + folder_scene + " - " + "{0:.2f}".format(((id_img + 1) / number_scene_image)* 100.) + "%")
             sys.stdout.write("\033[F")
 
-            current_counter_index += step_counter
-
-
         print('\n')
 
     print("%s_%s : end of data generation\n" % (transformation.getName(), transformation.getParam()))
@@ -179,32 +170,32 @@ def generate_data(transformation):
 
 def main():
 
-    parser = argparse.ArgumentParser(description="Compute and prepare data of metric of all scenes using specific interval if necessary")
+    parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes using specific interval if necessary")
 
-    parser.add_argument('--metrics', type=str, 
-                                     help="list of metrics choice in order to compute data",
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
                                      default='svd_reconstruction, ipca_reconstruction',
                                      required=True)
     parser.add_argument('--params', type=str, 
-                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    help="list of specific param for each feature choice (See README.md for further information in 3D mode)", 
                                     default='100, 200 :: 50, 25',
                                     required=True)
 
     args = parser.parse_args()
 
-    p_metrics  = list(map(str.strip, args.metrics.split(',')))
+    p_features = list(map(str.strip, args.features.split(',')))
     p_params   = list(map(str.strip, args.params.split('::')))
 
     transformations = []
 
-    for id, metric in enumerate(p_metrics):
+    for id, feature in enumerate(p_features):
 
-        if metric not in metric_choices:
-            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
+        if feature not in features_choices:
+            raise ValueError("Unknown feature, please select a correct feature : ", features_choices)
 
-        transformations.append(Transformation(metric, p_params[id]))
+        transformations.append(Transformation(feature, p_params[id]))
 
-    # generate all or specific metric data
+    # generate all or specific feature data
     for transformation in transformations:
         generate_data(transformation)
 

+ 10 - 6
image_denoising.py

@@ -1,18 +1,22 @@
-from keras.layers import Input, Conv3D, MaxPooling3D, UpSampling3D
-from keras.models import Model
-from keras import backend as K
-from keras.callbacks import TensorBoard
-
+# main imports
 import os
 import json
 import pandas as pd
 import numpy as np
 import argparse
 
+# model imports
+from keras.layers import Input, Conv3D, MaxPooling3D, UpSampling3D
+from keras.models import Model
+from keras import backend as K
+from keras.callbacks import TensorBoard
 from sklearn.utils import shuffle
+
+# image processing imports
 import cv2
 
-from modules.utils import config as cfg
+# modules imports
+import custom_config as cfg
 
 def generate_model(input_shape):
 

+ 1 - 0
modules

@@ -0,0 +1 @@
+Subproject commit 7e65b752b6367df1177992f28ebffbc14cb62adb

+ 0 - 49
transformation_functions.py

@@ -1,49 +0,0 @@
-from numpy.linalg import svd
-from sklearn.decomposition import FastICA, IncrementalPCA
-
-import numpy as np
-
-from ipfml import metrics
-
-def svd_reconstruction(img, interval):
-    
-    begin, end = interval
-    lab_img = metrics.get_LAB_L(img)
-    lab_img = np.array(lab_img, 'uint8')
-    
-    U, s, V = svd(lab_img, full_matrices=True)
-    
-    # reconstruction using specific interval
-    smat = np.zeros((end-begin, end-begin), dtype=complex)
-    smat[:, :] = np.diag(s[begin:end])
-    output_img = np.dot(U[:, begin:end],  np.dot(smat, V[begin:end, :]))
-        
-    return output_img
-
-
-def fast_ica_reconstruction(img, components):
-
-    lab_img = metrics.get_LAB_L(img)
-    lab_img = np.array(lab_img, 'uint8')
-
-    ica = FastICA(n_components = 50)
-    # run ICA on image
-    ica.fit(lab_img)
-    # reconstruct image with independent components
-    image_ica = ica.fit_transform(lab_img)
-    restored_image = ica.inverse_transform(image_ica)
-
-    return restored_image
-
-
-def ipca_reconstruction(img, components, _batch_size=25):
-
-    lab_img = metrics.get_LAB_L(img)
-    lab_img = np.array(lab_img, 'uint8')
-
-    transformer = IncrementalPCA(n_components=components, batch_size=_batch_size)
-
-    transformed_image = transformer.fit_transform(lab_img) 
-    restored_image = transformer.inverse_transform(transformed_image)
-
-    return restored_image