Parcourir la source

Refactoring and new project architecture

Jérôme BUISINE il y a 4 ans
Parent
commit
cafb09f54f
11 fichiers modifiés avec 178 ajouts et 172 suppressions
  1. 2 1
      .gitignore
  2. 0 0
      __init__.py
  3. 27 0
      custom_config.py
  4. 11 7
      display_simulation_curves.py
  5. 31 32
      generate_dataset.py
  6. 40 61
      generate_reconstructed_data.py
  7. 1 1
      modules
  8. 20 17
      predict_noisy_image.py
  9. 32 39
      predict_seuil_expe_curve.py
  10. 9 9
      run.sh
  11. 5 5
      train_model.py

+ 2 - 1
.gitignore

@@ -14,4 +14,5 @@ saved_models
 # data
 learned_zones
 dataset
-models_info
+models_info
+results

+ 0 - 0
__init__.py


+ 27 - 0
custom_config.py

@@ -0,0 +1,27 @@
+from modules.config.cnn_config import *
+
+# store all variables from cnn config
+context_vars = vars()
+
+# Custom config used for redefined config variables if necessary
+
+# folders
+
+## noisy_folder                    = 'noisy'
+## not_noisy_folder                = 'notNoisy'
+
+# file or extensions
+
+## post_image_name_separator       = '___'
+
+# variables
+
+## features_choices_labels         = ['static', 'svd_reconstruction', 'fast_ica_reconstruction', 'ipca_reconstruction']
+
+# parameters
+
+keras_epochs                    = 50
+## keras_batch                     = 32
+## val_dataset_size                = 0.2
+
+## keras_img_size                  = (200, 200)

+ 11 - 7
display_simulation_curves.py

@@ -1,13 +1,19 @@
+
+# main imports
 import numpy as np
 import pandas as pd
+import os, sys, argparse
 
+# display imports
 import matplotlib.pyplot as plt
-import os, sys, argparse
 
-from modules.utils import config as cfg
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 
+# parameters from config and others
 learned_zones_folder = cfg.learned_zones_folder
-models_name          = cfg.models_names_list
 label_freq           = 6
 
 def display_curves(folder_path, model_name):
@@ -18,10 +24,8 @@ def display_curves(folder_path, model_name):
     @return nothing
     """
 
-    for name in models_name:
-        if name in model_name:
-            data_filename = model_name
-            learned_zones_folder_path = os.path.join(learned_zones_folder, data_filename)
+    data_filename = model_name
+    learned_zones_folder_path = os.path.join(learned_zones_folder, data_filename)
 
     data_files = [x for x in os.listdir(folder_path) if '.png' not in x]
 

+ 31 - 32
generate_dataset.py

@@ -6,19 +6,19 @@ Created on Wed Jun 19 11:47:42 2019
 @author: jbuisine
 """
 
+# main imports
 import sys, os, argparse
 import numpy as np
-import random
-import time
-import json
 
+# images processing imports
 from PIL import Image
 from ipfml.processing.segmentation import divide_in_blocks
-from skimage import color
 
-from modules.utils import config as cfg
-from modules.utils import data as dt
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
+import custom_config  as cfg
+from modules.utils import data as dt
 from modules.classes.Transformation import Transformation
 
 # getting configuration information
@@ -30,12 +30,11 @@ min_max_filename        = cfg.min_max_filename_extension
 # define all scenes values
 scenes_list             = cfg.scenes_names
 scenes_indexes          = cfg.scenes_indices
-choices                 = cfg.normalization_choices
 dataset_path            = cfg.dataset_path
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 
 generic_output_file_svd = '_random.csv'
@@ -97,7 +96,7 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
 
             # custom path for interval of reconstruction and metric
 
-            metrics_path = []
+            features_path = []
 
             for transformation in _transformations:
                 
@@ -117,7 +116,7 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
                     if not os.path.exists(image_folder_path):
                         os.makedirs(image_folder_path)
 
-                    metrics_path.append(image_folder_path)
+                    features_path.append(image_folder_path)
 
                     # get image path to manage
                     # {sceneName}/static/img.png
@@ -130,49 +129,49 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
 
                 else:
                     metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
-                    metrics_path.append(metric_interval_path)
+                    features_path.append(metric_interval_path)
 
             # as labels are same for each metric
-            for label in os.listdir(metrics_path[0]):
+            for label in os.listdir(features_path[0]):
 
-                label_metrics_path = []
+                label_features_path = []
 
-                for path in metrics_path:
+                for path in features_path:
                     label_path = os.path.join(path, label)
-                    label_metrics_path.append(label_path)
+                    label_features_path.append(label_path)
 
                 # getting images list for each metric
-                metrics_images_list = []
+                features_images_list = []
                     
-                for index_metric, label_path in enumerate(label_metrics_path):
+                for index_metric, label_path in enumerate(label_features_path):
 
                     if _transformations[index_metric].getName() == 'static':
                         # by default append nothing..
-                        metrics_images_list.append([])
+                        features_images_list.append([])
                     else:
                         images = sorted(os.listdir(label_path))
-                        metrics_images_list.append(images)
+                        features_images_list.append(images)
 
                 # construct each line using all images path of each
-                for index_image in range(0, len(metrics_images_list[0])):
+                for index_image in range(0, len(features_images_list[0])):
                     
                     images_path = []
 
                     # get information about rotation and flip from first transformation (need to be a not static transformation)
-                    current_post_fix =  metrics_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
+                    current_post_fix =  features_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
 
                     # getting images with same index and hence name for each metric (transformation)
-                    for index_metric in range(0, len(metrics_path)):
+                    for index_metric in range(0, len(features_path)):
 
                         # custom behavior for static transformation (need to check specific image)
                         if _transformations[index_metric].getName() == 'static':
                             # add static path with selecting correct data augmented image
                             image_name = _transformations[index_metric].getParam().split('/')[-1].replace('.png', '')
-                            img_path = os.path.join(metrics_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)
+                            img_path = os.path.join(features_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)
                             images_path.append(img_path)
                         else:
-                            img_path = metrics_images_list[index_metric][index_image]
-                            images_path.append(os.path.join(label_metrics_path[index_metric], img_path))
+                            img_path = features_images_list[index_metric][index_image]
+                            images_path.append(os.path.join(label_features_path[index_metric], img_path))
 
                     if label == cfg.noisy_folder:
                         line = '1;'
@@ -213,8 +212,8 @@ def main():
     parser = argparse.ArgumentParser(description="Compute specific dataset for model using of metric")
 
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
-    parser.add_argument('--metrics', type=str, 
-                                     help="list of metrics choice in order to compute data",
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
                                      default='svd_reconstruction, ipca_reconstruction',
                                      required=True)
     parser.add_argument('--params', type=str, 
@@ -229,7 +228,7 @@ def main():
     args = parser.parse_args()
 
     p_filename = args.output
-    p_metrics  = list(map(str.strip, args.metrics.split(',')))
+    p_features  = list(map(str.strip, args.features.split(',')))
     p_params   = list(map(str.strip, args.params.split('::')))
     p_scenes   = args.scenes.split(',')
     p_nb_zones = args.nb_zones
@@ -239,12 +238,12 @@ def main():
     # create list of Transformation
     transformations = []
 
-    for id, metric in enumerate(p_metrics):
+    for id, feature in enumerate(p_features):
 
-        if metric not in metric_choices:
-            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
+        if feature not in features_choices:
+            raise ValueError("Unknown metric, please select a correct metric : ", features_choices)
 
-        transformations.append(Transformation(metric, p_params[id]))
+        transformations.append(Transformation(feature, p_params[id]))
 
     if transformations[0].getName() == 'static':
         raise ValueError("The first transformation in list cannot be static")

+ 40 - 61
generate_reconstructed_data.py

@@ -6,17 +6,19 @@ Created on Wed Jun 19 11:47:42 2019
 @author: jbuisine
 """
 
+# main imports
 import sys, os, argparse
 import numpy as np
-import random
-import time
-import json
 
+# images processing imports
 from PIL import Image
-from ipfml import processing, metrics, utils
-from skimage import color
+from ipfml.processing.segmentation import divide_in_blocks
 
-from modules.utils import config as cfg
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils.data import get_scene_image_quality
 from modules.classes.Transformation import Transformation
 
 # getting configuration information
@@ -27,12 +29,11 @@ min_max_filename        = cfg.min_max_filename_extension
 # define all scenes values
 scenes_list             = cfg.scenes_names
 scenes_indexes          = cfg.scenes_indices
-choices                 = cfg.normalization_choices
 path                    = cfg.dataset_path
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 
 generic_output_file_svd = '_random.csv'
@@ -53,18 +54,9 @@ def generate_data(transformation):
         print(folder_scene)
         scene_path = os.path.join(path, folder_scene)
 
-        config_file_path = os.path.join(scene_path, config_filename)
-
-        with open(config_file_path, "r") as config_file:
-            last_image_name = config_file.readline().strip()
-            prefix_image_name = config_file.readline().strip()
-            start_index_image = config_file.readline().strip()
-            end_index_image = config_file.readline().strip()
-            step_counter = int(config_file.readline().strip())
-
         # construct each zones folder name
         zones_folder = []
-        metrics_folder = []
+        features_folder = []
         zones_threshold = []
 
         # get zones list info
@@ -80,45 +72,39 @@ def generate_data(transformation):
             with open(os.path.join(zone_path, cfg.seuil_expe_filename)) as f:
                 zones_threshold.append(int(f.readline()))
 
-            # custom path for metric
-            metric_path = os.path.join(zone_path, transformation.getName())
+            # custom path for feature
+            feature_path = os.path.join(zone_path, transformation.getName())
 
-            if not os.path.exists(metric_path):
-                os.makedirs(metric_path)
+            if not os.path.exists(feature_path):
+                os.makedirs(feature_path)
 
-            # custom path for interval of reconstruction and metric
-            metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
-            metrics_folder.append(metric_interval_path)
+            # custom path for interval of reconstruction and feature
+            feature_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
+            features_folder.append(feature_interval_path)
 
-            if not os.path.exists(metric_interval_path):
-                os.makedirs(metric_interval_path)
+            if not os.path.exists(feature_interval_path):
+                os.makedirs(feature_interval_path)
 
             # create for each zone the labels folder
             labels = [cfg.not_noisy_folder, cfg.noisy_folder]
 
             for label in labels:
-                label_folder = os.path.join(metric_interval_path, label)
+                label_folder = os.path.join(feature_interval_path, label)
 
                 if not os.path.exists(label_folder):
                     os.makedirs(label_folder)
 
-        
-
-        current_counter_index = int(start_index_image)
-        end_counter_index = int(end_index_image)
+        # get all images of folder
+        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+        number_scene_image = len(scene_images)
 
         # for each images
-        while(current_counter_index <= end_counter_index):
-
-            current_counter_index_str = str(current_counter_index)
-
-            while len(start_index_image) > len(current_counter_index_str):
-                current_counter_index_str = "0" + current_counter_index_str
-
-            img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+        for id_img, img_path in enumerate(scene_images):
 
             current_img = Image.open(img_path)
-            img_blocks = processing.divide_in_blocks(current_img, cfg.keras_img_size)
+            img_blocks = divide_in_blocks(current_img, cfg.keras_img_size)
+
+            current_quality_index = int(get_scene_image_quality(img_path))
 
             for id_block, block in enumerate(img_blocks):
 
@@ -127,18 +113,16 @@ def generate_data(transformation):
                 ##########################
                 
                 # pass block to grey level
-
-
                 output_block = transformation.getTransformedImage(block)
                 output_block = np.array(output_block, 'uint8')
                 
                 # current output image
                 output_block_img = Image.fromarray(output_block)
 
-                label_path = metrics_folder[id_block]
+                label_path = features_folder[id_block]
 
                 # get label folder for block
-                if current_counter_index > zones_threshold[id_block]:
+                if current_quality_index > zones_threshold[id_block]:
                     label_path = os.path.join(label_path, cfg.not_noisy_folder)
                 else:
                     label_path = os.path.join(label_path, cfg.noisy_folder)
@@ -164,14 +148,9 @@ def generate_data(transformation):
 
                         rotated_output_img.save(output_reconstructed_path)
 
-
-            start_index_image_int = int(start_index_image)
-            print(transformation.getName() + "_" + folder_scene + " - " + "{0:.2f}".format((current_counter_index - start_index_image_int) / (end_counter_index - start_index_image_int)* 100.) + "%")
+            print(transformation.getName() + "_" + folder_scene + " - " + "{0:.2f}".format(((id_img + 1) / number_scene_image)* 100.) + "%")
             sys.stdout.write("\033[F")
 
-            current_counter_index += step_counter
-
-
         print('\n')
 
     print("%s_%s : end of data generation\n" % (transformation.getName(), transformation.getParam()))
@@ -179,32 +158,32 @@ def generate_data(transformation):
 
 def main():
 
-    parser = argparse.ArgumentParser(description="Compute and prepare data of metric of all scenes using specific interval if necessary")
+    parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes using specific interval if necessary")
 
-    parser.add_argument('--metrics', type=str, 
-                                     help="list of metrics choice in order to compute data",
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
                                      default='svd_reconstruction, ipca_reconstruction',
                                      required=True)
     parser.add_argument('--params', type=str, 
-                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    help="list of specific param for each feature choice (See README.md for further information in 3D mode)", 
                                     default='100, 200 :: 50, 25',
                                     required=True)
 
     args = parser.parse_args()
 
-    p_metrics  = list(map(str.strip, args.metrics.split(',')))
+    p_features  = list(map(str.strip, args.features.split(',')))
     p_params   = list(map(str.strip, args.params.split('::')))
 
     transformations = []
 
-    for id, metric in enumerate(p_metrics):
+    for id, feature in enumerate(p_features):
 
-        if metric not in metric_choices:
-            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
+        if feature not in features_choices or feature == 'static':
+            raise ValueError("Unknown feature, please select a correct feature (`static` excluded) : ", features_choices)
 
-        transformations.append(Transformation(metric, p_params[id]))
+        transformations.append(Transformation(feature, p_params[id]))
 
-    # generate all or specific metric data
+    # generate all or specific feature data
     for transformation in transformations:
         generate_data(transformation)
 

+ 1 - 1
modules

@@ -1 +1 @@
-Subproject commit 670ff4f4b984534d477ebee6616197427b4833f2
+Subproject commit 4123a5edbdf5563c9ec04147279b9b9a2dc185b6

+ 20 - 17
predict_noisy_image.py

@@ -1,23 +1,26 @@
-from sklearn.externals import joblib
-
+# main imports
+import sys, os, argparse, json
 import numpy as np
 
+# image processing imports
 from ipfml import processing, utils
 from PIL import Image
 
-import sys, os, argparse, json
-
+# model imports
+from sklearn.externals import joblib
 from keras.models import model_from_json
 
-from modules.utils import config as cfg
-from modules.utils import data as dt
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
+import custom_config as cfg
+from modules.utils import data as dt
 from modules.classes.Transformation import Transformation
 
+# parameters from config
 path                  = cfg.dataset_path
 min_max_ext           = cfg.min_max_filename_extension
-metric_choices        = cfg.metric_choices_labels
-normalization_choices = cfg.normalization_choices
+features_choices      = cfg.features_choices_labels
 
 custom_min_max_folder = cfg.min_max_custom_folder
 
@@ -27,12 +30,12 @@ def main():
     parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
 
     parser.add_argument('--image', type=str, help='Image path')
-    parser.add_argument('--metrics', type=str, 
-                                     help="list of metrics choice in order to compute data",
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
                                      default='svd_reconstruction, ipca_reconstruction',
                                      required=True)
     parser.add_argument('--params', type=str, 
-                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    help="list of specific param for each feature choice (See README.md for further information in 3D mode)", 
                                     default='100, 200 :: 50, 25',
                                     required=True)
     parser.add_argument('--model', type=str, help='.json file of keras model')
@@ -40,7 +43,7 @@ def main():
     args = parser.parse_args()
 
     p_img_file   = args.image
-    p_metrics    = list(map(str.strip, args.metrics.split(',')))
+    p_features    = list(map(str.strip, args.features.split(',')))
     p_params     = list(map(str.strip, args.params.split('::')))
     p_model_file = args.model
 
@@ -52,19 +55,19 @@ def main():
 
         model.compile(loss='binary_crossentropy',
                     optimizer='rmsprop',
-                    metrics=['accuracy'])
+                    features=['accuracy'])
 
     # load image
     img = Image.open(p_img_file)
 
     transformations = []
 
-    for id, metric in enumerate(p_metrics):
+    for id, feature in enumerate(p_features):
 
-        if metric not in metric_choices:
-            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
+        if feature not in feature_choices:
+            raise ValueError("Unknown feature, please select a correct feature : ", feature_choices)
 
-        transformations.append(Transformation(metric, p_params[id]))
+        transformations.append(Transformation(feature, p_params[id]))
 
     # getting transformed image
     transformed_images = []

+ 32 - 39
predict_seuil_expe_curve.py

@@ -1,17 +1,22 @@
-from sklearn.externals import joblib
-
+# main imports
+import sys, os, argparse
+import subprocess
 import numpy as np
 
-from ipfml import processing
+# image processing imports
+from ipfml.processing.segmentation import divide_in_blocks
 from PIL import Image
 
-import sys, os, argparse
-import subprocess
-import time
+# model imports
+from sklearn.externals import joblib
 
-from modules.utils import config as cfg
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 from modules.utils import data as dt
 
+# parameters from config and others
 config_filename           = cfg.config_filename
 scenes_path               = cfg.dataset_path
 min_max_filename          = cfg.min_max_filename_extension
@@ -22,8 +27,7 @@ threshold_map_file_prefix = cfg.threshold_map_folder + "_"
 
 zones                     = cfg.zones_indices
 maxwell_scenes            = cfg.maxwell_scenes_names
-normalization_choices     = cfg.normalization_choices
-metric_choices            = cfg.metric_choices_labels
+features_choices          = cfg.features_choices_labels
 
 simulation_curves_zones   = "simulation_curves_zones_"
 tmp_filename              = '/tmp/__model__img_to_predict.png'
@@ -35,8 +39,8 @@ def main():
 
     parser = argparse.ArgumentParser(description="Script which predicts threshold using specific keras model")
 
-    parser.add_argument('--metrics', type=str, 
-                                     help="list of metrics choice in order to compute data",
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
                                      default='svd_reconstruction, ipca_reconstruction',
                                      required=True)
     parser.add_argument('--params', type=str, 
@@ -52,7 +56,7 @@ def main():
 
     args = parser.parse_args()
 
-    p_metrics    = list(map(str.strip, args.metrics.split(',')))
+    p_features    = list(map(str.strip, args.features.split(',')))
     p_params     = list(map(str.strip, args.params.split('::')))
     p_model_file = args.model
     p_renderer   = args.renderer
@@ -73,14 +77,14 @@ def main():
 
             scene_path = os.path.join(scenes_path, folder_scene)
 
-            config_path = os.path.join(scene_path, config_filename)
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+            number_scene_image = len(scene_images)
 
-            with open(config_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+            # using first two images find the step of quality used
+            quality_step_image  = dt.get_scene_image_quality(scene_images[1]) - start_quality_image
 
             threshold_expes = []
             threshold_expes_found = []
@@ -100,26 +104,17 @@ def main():
                     threshold_expes.append(threshold)
 
                     # Initialize default data to get detected model threshold found
-                    threshold_expes_found.append(int(end_index_image)) # by default use max
-
-                block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_index_image) + ";" + str(step_counter))
-
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
-
-            print(current_counter_index)
+                    threshold_expes_found.append(int(end_quality_image)) # by default use max
 
-            while(current_counter_index <= end_counter_index):
+                block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_quality_image) + ";" + str(quality_step_image))
 
-                current_counter_index_str = str(current_counter_index)
-
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
-
-                img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+            # for each images
+            for id_img, img_path in enumerate(scene_images):
 
                 current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, cfg.keras_img_size)
+                img_blocks = divide_in_blocks(current_img, cfg.keras_img_size)
+
+                current_quality_image = dt.get_scene_image_quality(img_path)
 
                 for id_block, block in enumerate(img_blocks):
 
@@ -130,7 +125,7 @@ def main():
                         block.save(tmp_file_path)
 
                         python_cmd = "python predict_noisy_image.py --image " + tmp_file_path + \
-                                        " --metrics " + p_metrics + \
+                                        " --features " + p_features + \
                                         " --params " + p_params + \
                                         " --model " + p_model_file 
 
@@ -147,9 +142,8 @@ def main():
                         # save here in specific file of block all the predictions done
                         block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
 
-                        print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+                        print(str(id_block) + " : " + str(current_quality_image) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
 
-                current_counter_index += step_counter
                 print("------------------------")
                 print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
                 print("------------------------")
@@ -174,7 +168,6 @@ def main():
             print("------------------------")
 
             print("Model predictions are saved into %s" % map_filename)
-            time.sleep(2)
 
 
 if __name__== "__main__":

+ 9 - 9
run.sh

@@ -3,7 +3,7 @@
 erased=$1
 
 # file which contains model names we want to use for simulation
-file_path="models_info/models_comparisons.csv"
+file_path="results/models_comparisons.csv"
 
 if [ "${erased}" == "Y" ]; then
     echo "Previous data file erased..."
@@ -22,14 +22,14 @@ svd_metric="svd_reconstruction"
 ipca_metric="ipca_reconstruction"
 fast_ica_metric="fast_ica_reconstruction"
 
-all_metrics="${svd_metric},${ipca_metric},${fast_ica_metric}"
+all_features="${svd_metric},${ipca_metric},${fast_ica_metric}"
 
 # First compute svd_reconstruction
 
 for begin in {80,85,90,95,100,105,110}; do
   for end in {150,160,170,180,190,200}; do
   
-    python generate_reconstructed_data.py --metric ${svd_metric} --param "${begin}, ${end}"
+    python generate/generate_reconstructed_data.py --metric ${svd_metric} --param "${begin}, ${end}"
 
     for zone in {6,8,10,12}; do
       OUTPUT_DATA_FILE="${svd_metric}_nb_zones_${zone}_B${begin}_E${end}"
@@ -42,7 +42,7 @@ for begin in {80,85,90,95,100,105,110}; do
       
         echo "Run computation for SVD model ${OUTPUT_DATA_FILE}"
 
-        python generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metrics ${svd_metric} --renderer ${renderer} --scenes ${scenes} --params "${begin}, ${end}" --nb_zones ${zone} --random 1
+        python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --features ${svd_metric} --renderer ${renderer} --scenes ${scenes} --params "${begin}, ${end}" --nb_zones ${zone} --random 1
         
         python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
       fi
@@ -55,7 +55,7 @@ done
 ipca_batch_size=55
 
 for component in {10,15,20,25,30,35,45,50}; do
-  python generate_reconstructed_data.py --metric ${ipca_metric} --param "${component},${ipca_batch_size}"
+  python generate/generate_reconstructed_data.py --metric ${ipca_metric} --param "${component},${ipca_batch_size}"
 
   for zone in {6,8,10,12}; do
     OUTPUT_DATA_FILE="${ipca_metric}_nb_zones_${zone}_N${component}_BS${ipca_batch_size}"
@@ -68,7 +68,7 @@ for component in {10,15,20,25,30,35,45,50}; do
     
       echo "Run computation for IPCA model ${OUTPUT_DATA_FILE}"
 
-      python generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metrics ${ipca_metric} --renderer ${renderer} --scenes ${scenes} --params "${component},${ipca_batch_size}" --nb_zones ${zone} --random 1
+      python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --features ${ipca_metric} --renderer ${renderer} --scenes ${scenes} --params "${component},${ipca_batch_size}" --nb_zones ${zone} --random 1
       python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
     fi
   done
@@ -78,7 +78,7 @@ done
 # computation of fast_ica_reconstruction
 
 for component in {50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200}; do
-  python generate_reconstructed_data.py --metric ${fast_ica_metric} --param "${component}"
+  python generate/generate_reconstructed_data.py --metric ${fast_ica_metric} --param "${component}"
 
   for zone in {6,8,10,12}; do
     OUTPUT_DATA_FILE="${fast_ica_metric}_nb_zones_${zone}_N${component}"
@@ -91,7 +91,7 @@ for component in {50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200}; d
     
       echo "Run computation for Fast ICA model ${OUTPUT_DATA_FILE}"
 
-      python generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metrics ${fast_ica_metric} --renderer ${renderer} --scenes ${scenes} --params "${component}" --nb_zones ${zone} --random 1
+      python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --features ${fast_ica_metric} --renderer ${renderer} --scenes ${scenes} --params "${component}" --nb_zones ${zone} --random 1
       
       python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
     fi
@@ -120,7 +120,7 @@ for begin in {80,85,90,95,100,105,110}; do
 
             params="${begin}, ${end} :: ${ipca_component}, ${ipca_batch_size} :: ${fast_ica_component}"
 
-            python generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metric ${all_metrics} --renderer ${renderer} --scenes ${scenes} --params "${params}" --nb_zones ${zone} --random 1
+            python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metric ${all_features} --renderer ${renderer} --scenes ${scenes} --params "${params}" --nb_zones ${zone} --random 1
             
             python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
           fi

+ 5 - 5
train_model.py

@@ -7,8 +7,8 @@ import cv2
 
 from sklearn.utils import shuffle
 
-from modules.utils import config as cfg
-from modules.models import models
+import custom_config as cfg
+from modules.models import cnn_models as models
 
 from keras import backend as K
 
@@ -172,10 +172,10 @@ def main():
     roc_test_score = roc_auc_score(y_dataset_test, y_test_prediction)
 
     # save model performance
-    if not os.path.exists(cfg.models_information_folder):
-        os.makedirs(cfg.models_information_folder)
+    if not os.path.exists(cfg.results_information_folder):
+        os.makedirs(cfg.results_information_folder)
 
-    perf_file_path = os.path.join(cfg.models_information_folder, cfg.csv_model_comparisons_filename)
+    perf_file_path = os.path.join(cfg.results_information_folder, cfg.csv_model_comparisons_filename)
 
     with open(perf_file_path, 'a') as f:
         line = p_output + ';' + str(len(dataset_train)) + ';' + str(len(dataset_test)) + ';' \