Parcourir la source

Merge branch 'release/v0.2.9'

Jérôme BUISINE il y a 5 ans
Parent
commit
952e1a573c
77 fichiers modifiés avec 1129 ajouts et 1391 suppressions
  1. 14 14
      .gitignore
  2. 3 0
      .gitmodules
  3. 8 7
      README.md
  4. 0 0
      __init__.py
  5. 1 1
      analysis/corr_analysys.ipynb
  6. 19 0
      custom_config.py
  7. 57 85
      modules/utils/data.py
  8. 3 3
      generateAndTrain_maxwell.sh
  9. 3 3
      generateAndTrain_maxwell_custom.sh
  10. 3 3
      generateAndTrain_maxwell_custom_center.sh
  11. 3 3
      generateAndTrain_maxwell_custom_filters.sh
  12. 3 3
      generateAndTrain_maxwell_custom_filters_center.sh
  13. 3 3
      generateAndTrain_maxwell_custom_filters_split.sh
  14. 3 3
      generateAndTrain_maxwell_custom_split.sh
  15. 1 0
      dataset
  16. 32 50
      display_bits_shifted_scene.py
  17. 176 0
      display/display_scenes_zones.py
  18. 37 48
      display_scenes_zones_shifted.py
  19. 10 4
      display_simulation_curves.py
  20. 33 60
      display_svd_area_data_scene.py
  21. 35 67
      display_svd_area_scenes.py
  22. 48 77
      display_svd_data_error_scene.py
  23. 38 68
      display_svd_data_scene.py
  24. 46 70
      display_svd_zone_scene.py
  25. 9 0
      display/generate_metrics_curve.sh
  26. 0 249
      display_scenes_zones.py
  27. 37 57
      generate_all_data.py
  28. 35 39
      generate_data_model.py
  29. 36 40
      generate_data_model_corr_random.py
  30. 34 37
      generate_data_model_random.py
  31. 34 37
      generate_data_model_random_center.py
  32. 34 37
      generate_data_model_random_split.py
  33. 0 6
      generate_all_simulate_curves.sh
  34. 0 7
      generate_metrics_curve.sh
  35. 1 0
      modules/models.py
  36. 1 0
      modules
  37. 0 0
      modules/utils/__init__.py
  38. 0 41
      modules/utils/config.py
  39. 13 9
      save_model_result_in_md.py
  40. 18 11
      save_model_result_in_md_maxwell.py
  41. 1 1
      testModelByScene.sh
  42. 1 1
      testModelByScene_maxwell.sh
  43. 18 13
      predict_noisy_image_svd.py
  44. 44 44
      predict_seuil_expe.py
  45. 39 42
      predict_seuil_expe_maxwell.py
  46. 38 43
      predict_seuil_expe_maxwell_curve.py
  47. 10 6
      prediction_scene.py
  48. 1 1
      runAll_display_data_scene.sh
  49. 3 3
      runAll_maxwell.sh
  50. 4 4
      runAll_maxwell_area.sh
  51. 4 4
      runAll_maxwell_area_normed.sh
  52. 4 4
      runAll_maxwell_corr_custom.sh
  53. 3 3
      runAll_maxwell_custom.sh
  54. 3 3
      runAll_maxwell_custom_center.sh
  55. 3 3
      runAll_maxwell_custom_filters.sh
  56. 3 3
      runAll_maxwell_custom_filters_center.sh
  57. 3 3
      runAll_maxwell_custom_filters_split.sh
  58. 3 3
      runAll_maxwell_custom_filters_stats.sh
  59. 3 3
      runAll_maxwell_custom_filters_stats_center.sh
  60. 3 3
      runAll_maxwell_custom_filters_stats_split.sh
  61. 3 3
      runAll_maxwell_custom_split.sh
  62. 4 4
      runAll_maxwell_keras.sh
  63. 4 4
      runAll_maxwell_keras_corr.sh
  64. 4 4
      runAll_maxwell_keras_corr_custom.sh
  65. 4 4
      runAll_maxwell_mscn_var.sh
  66. 4 4
      runAll_maxwell_sub_blocks_stats.sh
  67. 4 4
      runAll_maxwell_sub_blocks_stats_reduced.sh
  68. 6 0
      simulation/generate_all_simulate_curves.sh
  69. 2 2
      run_maxwell_simulation.sh
  70. 3 3
      run_maxwell_simulation_corr_custom.sh
  71. 2 2
      run_maxwell_simulation_custom.sh
  72. 2 2
      run_maxwell_simulation_custom_filters.sh
  73. 39 0
      simulation/run_maxwell_simulation_filters_statistics.sh
  74. 2 2
      run_maxwell_simulation_keras_corr_custom.sh
  75. 2 2
      run_maxwell_simulation_keras_custom.sh
  76. 10 13
      deep_network_keras_svd.py
  77. 12 6
      train_model.py

+ 14 - 14
.gitignore

@@ -1,25 +1,25 @@
 # project data
 # project data
-data/*
-saved_models/*
-threshold_map/*
-models_info/*
-custom_norm/*
-learned_zones/*
-corr_indices/*
+data
+saved_models
+threshold_map
+models_info
+custom_norm
+learned_zones
+corr_indices
+results
+metric_curves
 .ipynb_checkpoints
 .ipynb_checkpoints
 
 
-# simulate_models.csv
-
-fichiersSVD_light
+# dataset and files
+simulate_models*.csv
+dataset
 
 
+# python cache
 .python-version
 .python-version
 __pycache__
 __pycache__
 
 
 # by default avoid model files and png files
 # by default avoid model files and png files
-saved_models/*.h5
+saved_models
 *.png
 *.png
 !saved_models/*.png
 !saved_models/*.png
 .vscode
 .vscode
-
-# simulate models .csv file
-simulate_models*.csv

+ 3 - 0
.gitmodules

@@ -0,0 +1,3 @@
+[submodule "modules"]
+	path = modules
+	url = https://github.com/prise-3d/Thesis-CommonModules.git

+ 8 - 7
README.md

@@ -9,7 +9,7 @@ pip install -r requirements.txt
 Generate all needed data for each metrics (which requires the the whole dataset. In order to get it, you need to contact us).
 Generate all needed data for each metrics (which requires the the whole dataset. In order to get it, you need to contact us).
 
 
 ```bash
 ```bash
-python generate_all_data.py --metric all
+python generate/generate_all_data.py --metric all
 ```
 ```
 
 
 For noise detection, many metrics are available:
 For noise detection, many metrics are available:
@@ -24,7 +24,7 @@ For noise detection, many metrics are available:
 
 
 You can also specify metric you want to compute and image step to avoid some images:
 You can also specify metric you want to compute and image step to avoid some images:
 ```bash
 ```bash
-python generate_all_data.py --metric mscn --step 50
+python generate/generate_all_data.py --metric mscn --step 50
 ```
 ```
 
 
 - **step**: keep only image if image id % 50 == 0 (assumption is that keeping spaced data will let model better fit).
 - **step**: keep only image if image id % 50 == 0 (assumption is that keeping spaced data will let model better fit).
@@ -38,7 +38,8 @@ python generate_all_data.py --metric mscn --step 50
 - **train_model.py**: script which is used to run specific model available.
 - **train_model.py**: script which is used to run specific model available.
 - **data/\***: folder which will contain all *.train* & *.test* files in order to train model.
 - **data/\***: folder which will contain all *.train* & *.test* files in order to train model.
 - **saved_models/*.joblib**: all scikit learn models saved.
 - **saved_models/*.joblib**: all scikit learn models saved.
-- **models_info/***: all markdown files generated to get quick information about model performance and prediction. This folder contains also **model_comparisons.csv** obtained after running runAll_maxwell.sh script.
+- **models_info/***: all markdown files generated to get quick information about model performance and prediction. 
+- **results**: This folder contains **model_comparisons.csv** obtained after running runAll_maxwell_*.sh script.
 - **modules/\***: contains all modules usefull for the whole project (such as configuration variables)
 - **modules/\***: contains all modules usefull for the whole project (such as configuration variables)
 
 
 ### Scripts for generating data files
 ### Scripts for generating data files
@@ -52,9 +53,9 @@ Two scripts can be used for generating data in order to fit model:
 **Remark**: Note here that all python script have *--help* command.
 **Remark**: Note here that all python script have *--help* command.
 
 
 ```
 ```
-python generate_data_model.py --help
+python generate/generate_data_model.py --help
 
 
-python generate_data_model.py --output xxxx --interval 0,20  --kind svdne --scenes "A, B, D" --zones "0, 1, 2" --percent 0.7 --sep: --rowindex 1 --custom custom_min_max_filename
+python generate/generate_data_model.py --output xxxx --interval 0,20  --kind svdne --scenes "A, B, D" --zones "0, 1, 2" --percent 0.7 --sep: --rowindex 1 --custom custom_min_max_filename
 ```
 ```
 
 
 Parameters explained:
 Parameters explained:
@@ -162,7 +163,7 @@ The content will be divised into two parts:
 The previous script need to already have ran to obtain and display treshold maps on this markdown file.
 The previous script need to already have ran to obtain and display treshold maps on this markdown file.
 
 
 ```bash
 ```bash
-python save_model_result_in_md.py --interval "xx,xx" --model saved_models/xxxx.joblib --mode ["svd", "svdn", "svdne"] --metric ['lab', 'mscn']
+python others/save_model_result_in_md.py --interval "xx,xx" --model saved_models/xxxx.joblib --mode ["svd", "svdn", "svdne"] --metric ['lab', 'mscn']
 ```
 ```
 
 
 Parameters list:
 Parameters list:
@@ -178,4 +179,4 @@ All others bash scripts are used to combine and run multiple model combinations.
 
 
 ## License
 ## License
 
 
-[The MIT license](https://github.com/prise-3d/Thesis-NoiseDetection-metrics/blob/master/LICENSE)
+[The MIT license](https://github.com/prise-3d/Thesis-NoiseDetection-attributes/blob/master/LICENSE)

modules/__init__.py → __init__.py


+ 1 - 1
analysis/corr_analysys.ipynb

@@ -39,7 +39,7 @@
     "data_file = \"data/temp.train\"\n",
     "data_file = \"data/temp.train\"\n",
     "interval = 16\n",
     "interval = 16\n",
     "\n",
     "\n",
-    "!python generate_data_model_random.py --output data/temp --interval \"0, 16\"  --kind svdne --metric sub_blocks_area --scenes \"A, D, G, H\" --nb_zones 16 --random 1 --percent 1.0 --step 10 --each 1 --renderer maxwell --custom temp_min_max_values"
+    "!python generate/generate_data_model_random.py --output data/temp --interval \"0, 16\"  --kind svdne --metric sub_blocks_area --scenes \"A, D, G, H\" --nb_zones 16 --random 1 --percent 1.0 --step 10 --each 1 --renderer maxwell --custom temp_min_max_values"
    ]
    ]
   },
   },
   {
   {

+ 19 - 0
custom_config.py

@@ -0,0 +1,19 @@
+from modules.config.attributes_config import *
+
+# store all variables from global config
+context_vars = vars()
+
+# folders
+## min_max_custom_folder           = 'custom_norm'
+## correlation_indices_folder      = 'corr_indices'
+
+# variables
+## features_choices_labels         = ['lab', 'mscn', 'low_bits_2', 'low_bits_3', 'low_bits_4', 'low_bits_5', 'low_bits_6','low_bits_4_shifted_2', 'sub_blocks_stats', 'sub_blocks_area', 'sub_blocks_stats_reduced', 'sub_blocks_area_normed', 'mscn_var_4', 'mscn_var_16', 'mscn_var_64', 'mscn_var_16_max', 'mscn_var_64_max', 'ica_diff', 'svd_trunc_diff', 'ipca_diff', 'svd_reconstruct', 'highest_sv_std_filters', 'lowest_sv_std_filters', 'highest_wave_sv_std_filters', 'lowest_wave_sv_std_filters']
+
+## models_names_list               = ["svm_model","ensemble_model","ensemble_model_v2","deep_keras"]
+## normalization_choices           = ['svd', 'svdn', 'svdne']
+
+# parameters
+## keras_epochs                    = 500
+## keras_batch                     = 32
+## val_dataset_size                = 0.2

+ 57 - 85
modules/utils/data.py

@@ -1,25 +1,26 @@
-from ipfml import processing, metrics, utils
-from modules.utils.config import *
+# main imports
+import numpy as np
+import sys
 
 
+# image transform imports
 from PIL import Image
 from PIL import Image
 from skimage import color
 from skimage import color
 from sklearn.decomposition import FastICA
 from sklearn.decomposition import FastICA
 from sklearn.decomposition import IncrementalPCA
 from sklearn.decomposition import IncrementalPCA
 from sklearn.decomposition import TruncatedSVD
 from sklearn.decomposition import TruncatedSVD
 from numpy.linalg import svd as lin_svd
 from numpy.linalg import svd as lin_svd
-
 from scipy.signal import medfilt2d, wiener, cwt
 from scipy.signal import medfilt2d, wiener, cwt
 import pywt
 import pywt
-
 import cv2
 import cv2
-import numpy as np
 
 
+from ipfml.processing import transform, compression, segmentation
+from ipfml import utils
 
 
-_scenes_names_prefix   = '_scenes_names'
-_scenes_indices_prefix = '_scenes_indices'
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-# store all variables from current module context
-context_vars = vars()
+import custom_config as cfg
+from modules.utils import data as dt
 
 
 
 
 def get_svd_data(data_type, block):
 def get_svd_data(data_type, block):
@@ -31,11 +32,11 @@ def get_svd_data(data_type, block):
 
 
         block_file_path = '/tmp/lab_img.png'
         block_file_path = '/tmp/lab_img.png'
         block.save(block_file_path)
         block.save(block_file_path)
-        data = processing.get_LAB_L_SVD_s(Image.open(block_file_path))
+        data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))
 
 
     if data_type == 'mscn':
     if data_type == 'mscn':
 
 
-        img_mscn_revisited = processing.rgb_to_mscn(block)
+        img_mscn_revisited = transform.rgb_to_mscn(block)
 
 
         # save tmp as img
         # save tmp as img
         img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
         img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
@@ -44,47 +45,47 @@ def get_svd_data(data_type, block):
         img_block = Image.open(mscn_revisited_file_path)
         img_block = Image.open(mscn_revisited_file_path)
 
 
         # extract from temp image
         # extract from temp image
-        data = metrics.get_SVD_s(img_block)
+        data = compression.get_SVD_s(img_block)
 
 
     """if data_type == 'mscn':
     """if data_type == 'mscn':
 
 
         img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
         img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
-        img_mscn = processing.calculate_mscn_coefficients(img_gray, 7)
-        img_mscn_norm = processing.normalize_2D_arr(img_mscn)
+        img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
+        img_mscn_norm = transform.normalize_2D_arr(img_mscn)
 
 
         img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
         img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
 
 
-        data = metrics.get_SVD_s(img_mscn_gray)
+        data = compression.get_SVD_s(img_mscn_gray)
     """
     """
 
 
     if data_type == 'low_bits_6':
     if data_type == 'low_bits_6':
 
 
-        low_bits_6 = processing.rgb_to_LAB_L_low_bits(block, 6)
-        data = metrics.get_SVD_s(low_bits_6)
+        low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
+        data = compression.get_SVD_s(low_bits_6)
 
 
     if data_type == 'low_bits_5':
     if data_type == 'low_bits_5':
 
 
-        low_bits_5 = processing.rgb_to_LAB_L_low_bits(block, 5)
-        data = metrics.get_SVD_s(low_bits_5)
+        low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
+        data = compression.get_SVD_s(low_bits_5)
 
 
     if data_type == 'low_bits_4':
     if data_type == 'low_bits_4':
 
 
-        low_bits_4 = processing.rgb_to_LAB_L_low_bits(block, 4)
-        data = metrics.get_SVD_s(low_bits_4)
+        low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
+        data = compression.get_SVD_s(low_bits_4)
 
 
     if data_type == 'low_bits_3':
     if data_type == 'low_bits_3':
 
 
-        low_bits_3 = processing.rgb_to_LAB_L_low_bits(block, 3)
-        data = metrics.get_SVD_s(low_bits_3)
+        low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
+        data = compression.get_SVD_s(low_bits_3)
 
 
     if data_type == 'low_bits_2':
     if data_type == 'low_bits_2':
 
 
-        low_bits_2 = processing.rgb_to_LAB_L_low_bits(block, 2)
-        data = metrics.get_SVD_s(low_bits_2)
+        low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
+        data = compression.get_SVD_s(low_bits_2)
 
 
     if data_type == 'low_bits_4_shifted_2':
     if data_type == 'low_bits_4_shifted_2':
 
 
-        data = metrics.get_SVD_s(processing.rgb_to_LAB_L_bits(block, (3, 6)))
+        data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(block, (3, 6)))
 
 
     if data_type == 'sub_blocks_stats':
     if data_type == 'sub_blocks_stats':
 
 
@@ -92,14 +93,14 @@ def get_svd_data(data_type, block):
         width, height, _= block.shape
         width, height, _= block.shape
         sub_width, sub_height = int(width / 4), int(height / 4)
         sub_width, sub_height = int(width / 4), int(height / 4)
 
 
-        sub_blocks = processing.divide_in_blocks(block, (sub_width, sub_height))
+        sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
 
 
         data = []
         data = []
 
 
         for sub_b in sub_blocks:
         for sub_b in sub_blocks:
 
 
             # by default use the whole lab L canal
             # by default use the whole lab L canal
-            l_svd_data = np.array(processing.get_LAB_L_SVD_s(sub_b))
+            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
 
 
             # get information we want from svd
             # get information we want from svd
             data.append(np.mean(l_svd_data))
             data.append(np.mean(l_svd_data))
@@ -120,14 +121,14 @@ def get_svd_data(data_type, block):
         width, height, _= block.shape
         width, height, _= block.shape
         sub_width, sub_height = int(width / 4), int(height / 4)
         sub_width, sub_height = int(width / 4), int(height / 4)
 
 
-        sub_blocks = processing.divide_in_blocks(block, (sub_width, sub_height))
+        sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
 
 
         data = []
         data = []
 
 
         for sub_b in sub_blocks:
         for sub_b in sub_blocks:
 
 
             # by default use the whole lab L canal
             # by default use the whole lab L canal
-            l_svd_data = np.array(processing.get_LAB_L_SVD_s(sub_b))
+            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
 
 
             # get information we want from svd
             # get information we want from svd
             data.append(np.mean(l_svd_data))
             data.append(np.mean(l_svd_data))
@@ -145,14 +146,14 @@ def get_svd_data(data_type, block):
         width, height, _= block.shape
         width, height, _= block.shape
         sub_width, sub_height = int(width / 8), int(height / 8)
         sub_width, sub_height = int(width / 8), int(height / 8)
 
 
-        sub_blocks = processing.divide_in_blocks(block, (sub_width, sub_height))
+        sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
 
 
         data = []
         data = []
 
 
         for sub_b in sub_blocks:
         for sub_b in sub_blocks:
 
 
             # by default use the whole lab L canal
             # by default use the whole lab L canal
-            l_svd_data = np.array(processing.get_LAB_L_SVD_s(sub_b))
+            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
 
 
             area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
             area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
             data.append(area_under_curve)
             data.append(area_under_curve)
@@ -166,14 +167,14 @@ def get_svd_data(data_type, block):
         width, height, _= block.shape
         width, height, _= block.shape
         sub_width, sub_height = int(width / 8), int(height / 8)
         sub_width, sub_height = int(width / 8), int(height / 8)
 
 
-        sub_blocks = processing.divide_in_blocks(block, (sub_width, sub_height))
+        sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
 
 
         data = []
         data = []
 
 
         for sub_b in sub_blocks:
         for sub_b in sub_blocks:
 
 
             # by default use the whole lab L canal
             # by default use the whole lab L canal
-            l_svd_data = np.array(processing.get_LAB_L_SVD_s(sub_b))
+            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
             l_svd_data = utils.normalize_arr(l_svd_data)
             l_svd_data = utils.normalize_arr(l_svd_data)
 
 
             area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
             area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
@@ -211,7 +212,7 @@ def get_svd_data(data_type, block):
         data = data[indices]
         data = data[indices]
 
 
     if data_type == 'ica_diff':
     if data_type == 'ica_diff':
-        current_image = metrics.get_LAB_L(block)
+        current_image = transform.get_LAB_L(block)
 
 
         ica = FastICA(n_components=50)
         ica = FastICA(n_components=50)
         ica.fit(current_image)
         ica.fit(current_image)
@@ -222,14 +223,14 @@ def get_svd_data(data_type, block):
         final_image = utils.normalize_2D_arr(image_restored)
         final_image = utils.normalize_2D_arr(image_restored)
         final_image = np.array(final_image * 255, 'uint8')
         final_image = np.array(final_image * 255, 'uint8')
 
 
-        sv_values = utils.normalize_arr(metrics.get_SVD_s(current_image))
-        ica_sv_values = utils.normalize_arr(metrics.get_SVD_s(final_image))
+        sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
+        ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))
 
 
         data = abs(np.array(sv_values) - np.array(ica_sv_values))
         data = abs(np.array(sv_values) - np.array(ica_sv_values))
 
 
     if data_type == 'svd_trunc_diff':
     if data_type == 'svd_trunc_diff':
 
 
-        current_image = metrics.get_LAB_L(block)
+        current_image = transform.get_LAB_L(block)
 
 
         svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
         svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
         transformed_image = svd.fit_transform(current_image)
         transformed_image = svd.fit_transform(current_image)
@@ -237,12 +238,12 @@ def get_svd_data(data_type, block):
 
 
         reduced_image = (current_image - restored_image)
         reduced_image = (current_image - restored_image)
 
 
-        U, s, V = metrics.get_SVD(reduced_image)
+        U, s, V = compression.get_SVD(reduced_image)
         data = s
         data = s
 
 
     if data_type == 'ipca_diff':
     if data_type == 'ipca_diff':
 
 
-        current_image = metrics.get_LAB_L(block)
+        current_image = transform.get_LAB_L(block)
 
 
         transformer = IncrementalPCA(n_components=20, batch_size=25)
         transformer = IncrementalPCA(n_components=20, batch_size=25)
         transformed_image = transformer.fit_transform(current_image)
         transformed_image = transformer.fit_transform(current_image)
@@ -250,7 +251,7 @@ def get_svd_data(data_type, block):
 
 
         reduced_image = (current_image - restored_image)
         reduced_image = (current_image - restored_image)
 
 
-        U, s, V = metrics.get_SVD(reduced_image)
+        U, s, V = compression.get_SVD(reduced_image)
         data = s
         data = s
 
 
     if data_type == 'svd_reconstruct':
     if data_type == 'svd_reconstruct':
@@ -258,7 +259,7 @@ def get_svd_data(data_type, block):
         reconstructed_interval = (90, 200)
         reconstructed_interval = (90, 200)
         begin, end = reconstructed_interval
         begin, end = reconstructed_interval
 
 
-        lab_img = metrics.get_LAB_L(block)
+        lab_img = transform.get_LAB_L(block)
         lab_img = np.array(lab_img, 'uint8')
         lab_img = np.array(lab_img, 'uint8')
 
 
         U, s, V = lin_svd(lab_img, full_matrices=True)
         U, s, V = lin_svd(lab_img, full_matrices=True)
@@ -269,12 +270,12 @@ def get_svd_data(data_type, block):
 
 
         output_img = np.array(output_img, 'uint8')
         output_img = np.array(output_img, 'uint8')
 
 
-        data = metrics.get_SVD_s(output_img)
+        data = compression.get_SVD_s(output_img)
 
 
     if 'sv_std_filters' in data_type:
     if 'sv_std_filters' in data_type:
 
 
         # convert into lab by default to apply filters
         # convert into lab by default to apply filters
-        lab_img = metrics.get_LAB_L(block)
+        lab_img = transform.get_LAB_L(block)
         arr = np.array(lab_img)
         arr = np.array(lab_img)
         images = []
         images = []
         
         
@@ -285,12 +286,12 @@ def get_svd_data(data_type, block):
         images.append(wiener(arr, [5, 5]))
         images.append(wiener(arr, [5, 5]))
         
         
         # By default computation of current block image
         # By default computation of current block image
-        s_arr = metrics.get_SVD_s(arr)
+        s_arr = compression.get_SVD_s(arr)
         sv_vector = [s_arr]
         sv_vector = [s_arr]
 
 
         # for each new image apply SVD and get SV 
         # for each new image apply SVD and get SV 
         for img in images:
         for img in images:
-            s = metrics.get_SVD_s(img)
+            s = compression.get_SVD_s(img)
             sv_vector.append(s)
             sv_vector.append(s)
             
             
         sv_array = np.array(sv_vector)
         sv_array = np.array(sv_vector)
@@ -307,10 +308,10 @@ def get_svd_data(data_type, block):
         indices = []
         indices = []
 
 
         if 'lowest' in data_type:
         if 'lowest' in data_type:
-            indices = get_lowest_values(sv_std, 200)
+            indices = utils.get_indices_of_lowest_values(sv_std, 200)
 
 
         if 'highest' in data_type:
         if 'highest' in data_type:
-            indices = get_highest_values(sv_std, 200)
+            indices = utils.get_indices_of_highest_values(sv_std, 200)
 
 
         # data are arranged following std trend computed
         # data are arranged following std trend computed
         data = s_arr[indices]
         data = s_arr[indices]
@@ -319,7 +320,7 @@ def get_svd_data(data_type, block):
     if 'wave_sv_std_filters' in data_type:
     if 'wave_sv_std_filters' in data_type:
 
 
         # convert into lab by default to apply filters
         # convert into lab by default to apply filters
-        lab_img = metrics.get_LAB_L(block)
+        lab_img = transform.get_LAB_L(block)
         arr = np.array(lab_img)
         arr = np.array(lab_img)
         images = []
         images = []
         
         
@@ -335,12 +336,12 @@ def get_svd_data(data_type, block):
         images.append(w2d(arr, 'haar', 4))
         images.append(w2d(arr, 'haar', 4))
         
         
         # By default computation of current block image
         # By default computation of current block image
-        s_arr = metrics.get_SVD_s(arr)
+        s_arr = compression.get_SVD_s(arr)
         sv_vector = [s_arr]
         sv_vector = [s_arr]
 
 
         # for each new image apply SVD and get SV 
         # for each new image apply SVD and get SV 
         for img in images:
         for img in images:
-            s = metrics.get_SVD_s(img)
+            s = compression.get_SVD_s(img)
             sv_vector.append(s)
             sv_vector.append(s)
             
             
         sv_array = np.array(sv_vector)
         sv_array = np.array(sv_vector)
@@ -357,10 +358,10 @@ def get_svd_data(data_type, block):
         indices = []
         indices = []
 
 
         if 'lowest' in data_type:
         if 'lowest' in data_type:
-            indices = get_lowest_values(sv_std, 200)
+            indices = utils.get_indices_of_lowest_values(sv_std, 200)
 
 
         if 'highest' in data_type:
         if 'highest' in data_type:
-            indices = get_highest_values(sv_std, 200)
+            indices = utils.get_indices_of_highest_values(sv_std, 200)
 
 
         # data are arranged following std trend computed
         # data are arranged following std trend computed
         data = s_arr[indices]
         data = s_arr[indices]
@@ -369,7 +370,7 @@ def get_svd_data(data_type, block):
 
 
         img_width, img_height = 200, 200
         img_width, img_height = 200, 200
 
 
-        lab_img = metrics.get_LAB_L(block)
+        lab_img = transform.get_LAB_L(block)
         arr = np.array(lab_img)
         arr = np.array(lab_img)
 
 
         # compute all filters statistics
         # compute all filters statistics
@@ -429,14 +430,6 @@ def get_svd_data(data_type, block):
     return data
     return data
 
 
 
 
-def get_highest_values(arr, n):
-    return np.array(arr).argsort()[-n:][::-1]
-
-
-def get_lowest_values(arr, n):
-    return np.array(arr).argsort()[::-1][-n:][::-1]
-
-
 def w2d(arr, mode='haar', level=1):
 def w2d(arr, mode='haar', level=1):
     #convert to float   
     #convert to float   
     imArray = arr
     imArray = arr
@@ -458,35 +451,14 @@ def w2d(arr, mode='haar', level=1):
 
 
 def _get_mscn_variance(block, sub_block_size=(50, 50)):
 def _get_mscn_variance(block, sub_block_size=(50, 50)):
 
 
-    blocks = processing.divide_in_blocks(block, sub_block_size)
+    blocks = segmentation.divide_in_blocks(block, sub_block_size)
 
 
     data = []
     data = []
 
 
     for block in blocks:
     for block in blocks:
-        mscn_coefficients = processing.get_mscn_coefficients(block)
+        mscn_coefficients = transform.get_mscn_coefficients(block)
         flat_coeff = mscn_coefficients.flatten()
         flat_coeff = mscn_coefficients.flatten()
         data.append(np.var(flat_coeff))
         data.append(np.var(flat_coeff))
 
 
     return np.sort(data)
     return np.sort(data)
 
 
-
-def get_renderer_scenes_indices(renderer_name):
-
-    if renderer_name not in renderer_choices:
-        raise ValueError("Unknown renderer name")
-
-    if renderer_name == 'all':
-        return scenes_indices
-    else:
-        return context_vars[renderer_name + _scenes_indices_prefix]
-
-def get_renderer_scenes_names(renderer_name):
-
-    if renderer_name not in renderer_choices:
-        raise ValueError("Unknown renderer name")
-
-    if renderer_name == 'all':
-        return scenes_names
-    else:
-        return context_vars[renderer_name + _scenes_names_prefix]
-

+ 3 - 3
generateAndTrain_maxwell.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
     exit 1
 fi
 fi
 
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 VECTOR_SIZE=200
 size=$1
 size=$1
 metric=$2
 metric=$2
@@ -54,11 +54,11 @@ for counter in {0..4}; do
 
 
                     echo "${MODEL_NAME} results already generated..."
                     echo "${MODEL_NAME} results already generated..."
                 else
                 else
-                    python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --renderer "maxwell" --step 40 --random 1 --percent 1
+                    python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --renderer "maxwell" --step 40 --random 1 --percent 1
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2'
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2'
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
                 fi
             done
             done
         done
         done

+ 3 - 3
generateAndTrain_maxwell_custom.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
     exit 1
 fi
 fi
 
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 VECTOR_SIZE=200
 size=$1
 size=$1
 metric=$2
 metric=$2
@@ -55,11 +55,11 @@ for counter in {0..4}; do
 
 
                     echo "${MODEL_NAME} results already generated..."
                     echo "${MODEL_NAME} results already generated..."
                 else
                 else
-                    python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
                 fi
             done
             done
         done
         done

+ 3 - 3
generateAndTrain_maxwell_custom_center.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
     exit 1
 fi
 fi
 
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 VECTOR_SIZE=200
 size=$1
 size=$1
 metric=$2
 metric=$2
@@ -55,11 +55,11 @@ for counter in {0..4}; do
 
 
                     echo "${MODEL_NAME} results already generated..."
                     echo "${MODEL_NAME} results already generated..."
                 else
                 else
-                    python generate_data_model_random_center.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python generate/generate_data_model_random_center.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
                 fi
             done
             done
         done
         done

+ 3 - 3
generateAndTrain_maxwell_custom_filters.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
     exit 1
 fi
 fi
 
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 VECTOR_SIZE=200
 size=$1
 size=$1
 metric=$2
 metric=$2
@@ -37,10 +37,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 3 - 3
generateAndTrain_maxwell_custom_filters_center.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
     exit 1
 fi
 fi
 
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 VECTOR_SIZE=200
 size=$1
 size=$1
 metric=$2
 metric=$2
@@ -37,10 +37,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
-                python generate_data_model_random_center.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random_center.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 3 - 3
generateAndTrain_maxwell_custom_filters_split.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
     exit 1
 fi
 fi
 
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 VECTOR_SIZE=200
 size=$1
 size=$1
 metric=$2
 metric=$2
@@ -37,10 +37,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
-                python generate_data_model_random_split.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random_split.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 3 - 3
generateAndTrain_maxwell_custom_split.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
     exit 1
 fi
 fi
 
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 VECTOR_SIZE=200
 size=$1
 size=$1
 metric=$2
 metric=$2
@@ -55,11 +55,11 @@ for counter in {0..4}; do
 
 
                     echo "${MODEL_NAME} results already generated..."
                     echo "${MODEL_NAME} results already generated..."
                 else
                 else
-                    python generate_data_model_random_split.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python generate/generate_data_model_random_split.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
                 fi
             done
             done
         done
         done

+ 1 - 0
dataset

@@ -0,0 +1 @@
+../data/Scenes/

+ 32 - 50
display_bits_shifted_scene.py

@@ -1,40 +1,34 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
 import random
 import random
 import time
 import time
 import json
 import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing
-from ipfml import metrics
 from skimage import color
 from skimage import color
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
 
 
-from modules.utils import config as cfg
+from ipfml.processing import compression, transform
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
+import custom_config as cfg
+from modules.utils import data as dt
 
 
-config_filename     = cfg.config_filename
+# variables and parameters
 zone_folder         = cfg.zone_folder
 zone_folder         = cfg.zone_folder
 min_max_filename    = cfg.min_max_filename_extension
 min_max_filename    = cfg.min_max_filename_extension
 
 
 # define all scenes values
 # define all scenes values
 scenes_list         = cfg.scenes_names
 scenes_list         = cfg.scenes_names
 scenes_indices      = cfg.scenes_indices
 scenes_indices      = cfg.scenes_indices
-choices             = cfg.normalization_choices
 path                = cfg.dataset_path
 path                = cfg.dataset_path
 zones               = cfg.zones_indices
 zones               = cfg.zones_indices
 seuil_expe_filename = cfg.seuil_expe_filename
 seuil_expe_filename = cfg.seuil_expe_filename
 
 
-metric_choices      = cfg.metric_choices_labels
 max_nb_bits = 8
 max_nb_bits = 8
 
 
 def display_data_scenes(nb_bits, p_scene):
 def display_data_scenes(nb_bits, p_scene):
@@ -50,21 +44,12 @@ def display_data_scenes(nb_bits, p_scene):
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         if p_scene == folder_scene:
         if p_scene == folder_scene:
             print(folder_scene)
             print(folder_scene)
             scene_path = os.path.join(path, folder_scene)
             scene_path = os.path.join(path, folder_scene)
 
 
-            config_file_path = os.path.join(scene_path, config_filename)
-
-            with open(config_file_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             # construct each zones folder name
             # construct each zones folder name
             zones_folder = []
             zones_folder = []
 
 
@@ -77,16 +62,12 @@ def display_data_scenes(nb_bits, p_scene):
                 current_zone = "zone"+index_str
                 current_zone = "zone"+index_str
                 zones_folder.append(current_zone)
                 zones_folder.append(current_zone)
 
 
-            zones_images_data = []
             threshold_info = []
             threshold_info = []
 
 
-            for id_zone, zone_folder in enumerate(zones_folder):
+            for zone_folder in zones_folder:
 
 
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
 
 
-                current_counter_index = int(start_index_image)
-                end_counter_index = int(end_index_image)
-
                 # get threshold information
                 # get threshold information
                 path_seuil = os.path.join(zone_path, seuil_expe_filename)
                 path_seuil = os.path.join(zone_path, seuil_expe_filename)
 
 
@@ -101,25 +82,28 @@ def display_data_scenes(nb_bits, p_scene):
             print(mean_threshold, "mean threshold found")
             print(mean_threshold, "mean threshold found")
             threshold_image_found = False
             threshold_image_found = False
 
 
-            # find appropriate mean threshold picture
-            while(current_counter_index <= end_counter_index and not threshold_image_found):
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+            start_image_path = scene_images[0]
+            end_image_path   = scene_images[-1]
+
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
 
 
-                if mean_threshold < int(current_counter_index):
-                    current_counter_index_str = str(current_counter_index)
+            # for each images
+            for img_path in scene_images:
+                current_quality_image = dt.get_scene_image_quality(img_path)
 
 
-                    while len(start_index_image) > len(current_counter_index_str):
-                        current_counter_index_str = "0" + current_counter_index_str
+                if mean_threshold < int(current_quality_image) and not threshold_image_found:
 
 
                     threshold_image_found = True
                     threshold_image_found = True
-                    threshold_image_zone = current_counter_index_str
+                    threshold_image_path = img_path
 
 
-                current_counter_index += step_counter
+                    threshold_image = dt.get_scene_image_quality(img_path)
 
 
             # all indexes of picture to plot
             # all indexes of picture to plot
-            images_indexes = [start_index_image, threshold_image_zone, end_index_image]
-            images_data = []
-
-            print(images_indexes)
+            images_path = [start_image_path, threshold_image_path, end_image_path]
 
 
             low_bits_svd_values = []
             low_bits_svd_values = []
 
 
@@ -127,16 +111,14 @@ def display_data_scenes(nb_bits, p_scene):
 
 
                 low_bits_svd_values.append([])
                 low_bits_svd_values.append([])
 
 
-                for index in images_indexes:
-
-                    img_path = os.path.join(scene_path, prefix_image_name + index + ".png")
+                for img_path in images_path:
 
 
                     current_img = Image.open(img_path)
                     current_img = Image.open(img_path)
 
 
                     block_used = np.array(current_img)
                     block_used = np.array(current_img)
 
 
-                    low_bits_block = processing.rgb_to_LAB_L_bits(block_used, (i + 1, i + nb_bits + 1))
-                    low_bits_svd = metrics.get_SVD_s(low_bits_block)
+                    low_bits_block = transform.rgb_to_LAB_L_bits(block_used, (i + 1, i + nb_bits + 1))
+                    low_bits_svd = compression.get_SVD_s(low_bits_block)
                     low_bits_svd = [b / low_bits_svd[0] for b in low_bits_svd]
                     low_bits_svd = [b / low_bits_svd[0] for b in low_bits_svd]
                     low_bits_svd_values[i].append(low_bits_svd)
                     low_bits_svd_values[i].append(low_bits_svd)
 
 
@@ -146,9 +128,9 @@ def display_data_scenes(nb_bits, p_scene):
 
 
             for id, data in enumerate(low_bits_svd_values):
             for id, data in enumerate(low_bits_svd_values):
                 fig.add_subplot(3, 3, (id + 1))
                 fig.add_subplot(3, 3, (id + 1))
-                plt.plot(data[0], label='Noisy_' + start_index_image)
-                plt.plot(data[1], label='Threshold_' + threshold_image_zone)
-                plt.plot(data[2], label='Reference_' + end_index_image)
+                plt.plot(data[0], label='Noisy_' + start_quality_image)
+                plt.plot(data[1], label='Threshold_' + threshold_image)
+                plt.plot(data[2], label='Reference_' + end_quality_image)
                 plt.ylabel('Lab SVD ' + str(nb_bits) + ' bits values shifted by ' + str(id), fontsize=14)
                 plt.ylabel('Lab SVD ' + str(nb_bits) + ' bits values shifted by ' + str(id), fontsize=14)
                 plt.xlabel('Vector features', fontsize=16)
                 plt.xlabel('Vector features', fontsize=16)
                 plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.2, fontsize=14)
                 plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.2, fontsize=14)

+ 176 - 0
display/display_scenes_zones.py

@@ -0,0 +1,176 @@
+# main imports
+import sys, os, argparse
+import numpy as np
+import random
+import time
+import json
+
+# image processing imports
+from PIL import Image
+from skimage import color
+import matplotlib.pyplot as plt
+
+
+from ipfml.processing import segmentation, transform, compression
+from ipfml import utils
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_svd_data
+
+
+# variables and parameters
+zone_folder         = cfg.zone_folder
+min_max_filename    = cfg.min_max_filename_extension
+
+# define all scenes values
+scenes_list         = cfg.scenes_names
+scenes_indices      = cfg.scenes_indices
+norm_choices        = cfg.normalization_choices
+path                = cfg.dataset_path
+zones               = cfg.zones_indices
+seuil_expe_filename = cfg.seuil_expe_filename
+
+features_choices      = cfg.features_choices_labels
+
+
+def display_data_scenes(data_type, p_scene, p_kind):
+    """
+    @brief Method which displays data from scene
+    @param data_type,  feature choice
+    @param scene, scene choice
+    @param mode, normalization choice
+    @return nothing
+    """
+
+    scenes = os.listdir(path)
+    # remove min max file from scenes folder
+    scenes = [s for s in scenes if min_max_filename not in s]
+
+    # go ahead each scenes
+    for folder_scene in scenes:
+
+        if p_scene == folder_scene:
+            print(folder_scene)
+            scene_path = os.path.join(path, folder_scene)
+
+            # construct each zones folder name
+            zones_folder = []
+
+            # get zones list info
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+
+                current_zone = "zone"+index_str
+                zones_folder.append(current_zone)
+
+            zones_images_data = []
+            threshold_info = []
+
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+            start_image_path = scene_images[0]
+            end_image_path   = scene_images[-1]
+
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+
+            for id_zone, zone_folder in enumerate(zones_folder):
+
+                zone_path = os.path.join(scene_path, zone_folder)
+
+                # get threshold information
+                path_seuil = os.path.join(zone_path, seuil_expe_filename)
+
+                # open treshold path and get this information
+                with open(path_seuil, "r") as seuil_file:
+                    threshold_learned = int(seuil_file.readline().strip())
+
+                threshold_image_found = False
+
+                for img_path in scene_images:
+                    current_quality_image = dt.get_scene_image_quality(img_path)
+
+                    if threshold_learned < int(current_quality_image) and not threshold_image_found:
+
+                        threshold_image_found = True
+                        threshold_image_path = img_path
+
+                        threshold_image = dt.get_scene_image_postfix(img_path)
+                        threshold_info.append(threshold_image)
+
+                # all indexes of picture to plot
+                images_path = [start_image_path, threshold_image_path, end_image_path]
+                images_data = []
+
+                for img_path in images_path:
+
+                    current_img = Image.open(img_path)
+                    img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+
+                    # getting expected block id
+                    block = img_blocks[id_zone]
+
+                    data = get_svd_data(data_type, block)
+
+                    ##################
+                    # Data mode part #
+                    ##################
+
+                    # modify data depending mode
+
+                    if p_kind == 'svdn':
+                        data = utils.normalize_arr(data)
+
+                    if p_kind == 'svdne':
+                        path_min_max = os.path.join(path, data_type + min_max_filename)
+
+                        with open(path_min_max, 'r') as f:
+                            min_val = float(f.readline())
+                            max_val = float(f.readline())
+
+                        data = utils.normalize_arr_with_range(data, min_val, max_val)
+
+                    # append of data
+                    images_data.append(data)
+
+                zones_images_data.append(images_data)
+
+            fig=plt.figure(figsize=(8, 8))
+            fig.suptitle(data_type + " values for " + p_scene + " scene (normalization : " + p_kind + ")", fontsize=20)
+
+            for id, data in enumerate(zones_images_data):
+                fig.add_subplot(4, 4, (id + 1))
+                plt.plot(data[0], label='Noisy_' + start_quality_image)
+                plt.plot(data[1], label='Threshold_' + threshold_info[id])
+                plt.plot(data[2], label='Reference_' + end_quality_image)
+                plt.ylabel(data_type + ' SVD, ZONE_' + str(id + 1), fontsize=18)
+                plt.xlabel('Vector features', fontsize=18)
+                plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.2, fontsize=18)
+                plt.ylim(0, 0.1)
+            plt.show()
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Display zones curves of feature on scene ")
+
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
+    parser.add_argument('--scene', type=str, help='scene index to use', choices=scenes_indices)
+    parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=norm_choices)
+
+    args = parser.parse_args()
+
+    p_feature = args.feature
+    p_kind   = args.kind
+    p_scene  = scenes_list[scenes_indices.index(args.scene)]
+
+    display_data_scenes(p_feature, p_scene, p_kind)
+
+if __name__== "__main__":
+    main()

+ 37 - 48
display_scenes_zones_shifted.py

@@ -1,39 +1,36 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
 import random
 import random
 import time
 import time
 import json
 import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
 from skimage import color
 from skimage import color
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
 
 
-from modules.utils import config as cfg
+from ipfml.processing import segmentation, transform, compression
+from ipfml import utils
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
 
 
-config_filename     = cfg.config_filename
+
+# variables and parameters
 zone_folder         = cfg.zone_folder
 zone_folder         = cfg.zone_folder
 min_max_filename    = cfg.min_max_filename_extension
 min_max_filename    = cfg.min_max_filename_extension
 
 
 # define all scenes values
 # define all scenes values
 scenes_list         = cfg.scenes_names
 scenes_list         = cfg.scenes_names
 scenes_indices      = cfg.scenes_indices
 scenes_indices      = cfg.scenes_indices
-choices             = cfg.normalization_choices
 path                = cfg.dataset_path
 path                = cfg.dataset_path
 zones               = cfg.zones_indices
 zones               = cfg.zones_indices
 seuil_expe_filename = cfg.seuil_expe_filename
 seuil_expe_filename = cfg.seuil_expe_filename
 
 
-metric_choices      = cfg.metric_choices_labels
-
 max_nb_bits = 8
 max_nb_bits = 8
 
 
 def display_data_scenes(p_scene, p_bits, p_shifted):
 def display_data_scenes(p_scene, p_bits, p_shifted):
@@ -50,21 +47,12 @@ def display_data_scenes(p_scene, p_bits, p_shifted):
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         if p_scene == folder_scene:
         if p_scene == folder_scene:
             print(folder_scene)
             print(folder_scene)
             scene_path = os.path.join(path, folder_scene)
             scene_path = os.path.join(path, folder_scene)
 
 
-            config_file_path = os.path.join(scene_path, config_filename)
-
-            with open(config_file_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             # construct each zones folder name
             # construct each zones folder name
             zones_folder = []
             zones_folder = []
 
 
@@ -80,55 +68,56 @@ def display_data_scenes(p_scene, p_bits, p_shifted):
             zones_images_data = []
             zones_images_data = []
             threshold_info = []
             threshold_info = []
 
 
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+            start_image_path = scene_images[0]
+            end_image_path   = scene_images[-1]
+
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+
             for id_zone, zone_folder in enumerate(zones_folder):
             for id_zone, zone_folder in enumerate(zones_folder):
 
 
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
 
 
-                current_counter_index = int(start_index_image)
-                end_counter_index = int(end_index_image)
-
                 # get threshold information
                 # get threshold information
                 path_seuil = os.path.join(zone_path, seuil_expe_filename)
                 path_seuil = os.path.join(zone_path, seuil_expe_filename)
 
 
                 # open treshold path and get this information
                 # open treshold path and get this information
                 with open(path_seuil, "r") as seuil_file:
                 with open(path_seuil, "r") as seuil_file:
-                    seuil_learned = int(seuil_file.readline().strip())
+                    threshold_learned = int(seuil_file.readline().strip())
 
 
                 threshold_image_found = False
                 threshold_image_found = False
-                while(current_counter_index <= end_counter_index and not threshold_image_found):
 
 
-                    if seuil_learned < int(current_counter_index):
-                        current_counter_index_str = str(current_counter_index)
+                # for each images
+                for img_path in scene_images:
+                    current_quality_image = dt.get_scene_image_quality(img_path)
 
 
-                        while len(start_index_image) > len(current_counter_index_str):
-                            current_counter_index_str = "0" + current_counter_index_str
+                    if threshold_learned < int(current_quality_image) and not threshold_image_found:
 
 
                         threshold_image_found = True
                         threshold_image_found = True
-                        threshold_image_zone = current_counter_index_str
-                        threshold_info.append(threshold_image_zone)
+                        threshold_image_path = img_path
 
 
-                    current_counter_index += step_counter
+                        threshold_image = dt.get_scene_image_postfix(img_path)
+                        threshold_info.append(threshold_image)
 
 
                 # all indexes of picture to plot
                 # all indexes of picture to plot
-                images_indexes = [start_index_image, threshold_image_zone, end_index_image]
+                images_path = [start_image_path, threshold_image_path, end_image_path]
                 images_data = []
                 images_data = []
 
 
-                print(images_indexes)
-
-                for index in images_indexes:
-
-                    img_path = os.path.join(scene_path, prefix_image_name + index + ".png")
+                for img_path in images_path:
 
 
                     current_img = Image.open(img_path)
                     current_img = Image.open(img_path)
-                    img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+                    img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
 
 
                     # getting expected block id
                     # getting expected block id
                     block = img_blocks[id_zone]
                     block = img_blocks[id_zone]
 
 
                     # get data from mode
                     # get data from mode
                     # Here you can add the way you compute data
                     # Here you can add the way you compute data
-                    low_bits_block = processing.rgb_to_LAB_L_bits(block, (p_shifted + 1, p_shifted + p_bits + 1))
-                    data = metrics.get_SVD_s(low_bits_block)
+                    low_bits_block = transform.rgb_to_LAB_L_bits(block, (p_shifted + 1, p_shifted + p_bits + 1))
+                    data = compression.get_SVD_s(low_bits_block)
 
 
                     ##################
                     ##################
                     # Data mode part #
                     # Data mode part #
@@ -145,9 +134,9 @@ def display_data_scenes(p_scene, p_bits, p_shifted):
 
 
             for id, data in enumerate(zones_images_data):
             for id, data in enumerate(zones_images_data):
                 fig.add_subplot(4, 4, (id + 1))
                 fig.add_subplot(4, 4, (id + 1))
-                plt.plot(data[0], label='Noisy_' + start_index_image)
+                plt.plot(data[0], label='Noisy_' + start_quality_image)
                 plt.plot(data[1], label='Threshold_' + threshold_info[id])
                 plt.plot(data[1], label='Threshold_' + threshold_info[id])
-                plt.plot(data[2], label='Reference_' + end_index_image)
+                plt.plot(data[2], label='Reference_' + end_quality_image)
                 plt.ylabel('Lab SVD ' + str(p_bits) + ' bits shifted by ' + str(p_shifted) + ', ZONE_' + str(id + 1), fontsize=14)
                 plt.ylabel('Lab SVD ' + str(p_bits) + ' bits shifted by ' + str(p_shifted) + ', ZONE_' + str(id + 1), fontsize=14)
                 plt.xlabel('Vector features', fontsize=16)
                 plt.xlabel('Vector features', fontsize=16)
                 plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.2, fontsize=14)
                 plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.2, fontsize=14)

+ 10 - 4
display_simulation_curves.py

@@ -1,13 +1,19 @@
+# main imports
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
+import os, sys, argparse
 
 
+# image processing imports
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
-import os, sys, argparse
 
 
-from modules.utils.data import get_svd_data
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from data_attributes import get_svd_data
 
 
-from modules.utils import config as cfg
 
 
+# variables and parameters
 learned_zones_folder = cfg.learned_zones_folder
 learned_zones_folder = cfg.learned_zones_folder
 models_name          = cfg.models_names_list
 models_name          = cfg.models_names_list
 label_freq           = 6
 label_freq           = 6
@@ -25,7 +31,7 @@ def display_curves(folder_path, model_name):
             data_filename = model_name
             data_filename = model_name
             learned_zones_folder_path = os.path.join(learned_zones_folder, data_filename)
             learned_zones_folder_path = os.path.join(learned_zones_folder, data_filename)
 
 
-    data_files = [x for x in os.listdir(folder_path) if '.png' not in x]
+    data_files = [x for x in os.listdir(folder_path) if cfg.scene_image_extension not in x]
 
 
     scene_names = [f.split('_')[3] for f in data_files]
     scene_names = [f.split('_')[3] for f in data_files]
 
 

+ 33 - 60
display_svd_area_data_scene.py

@@ -1,32 +1,24 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
-
 import numpy as np
 import numpy as np
-import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
-import ipfml.iqa.fr as fr_iqa
-
 from skimage import color
 from skimage import color
-
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
-from modules.utils.data import get_svd_data
 
 
-from modules.utils import config as cfg
+from ipfml.processing import segmentation, transform, compression
+from ipfml import utils
+import ipfml.iqa.fr as fr_iqa
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information
 # getting configuration information
-config_filename     = cfg.config_filename
 zone_folder         = cfg.zone_folder
 zone_folder         = cfg.zone_folder
 min_max_filename    = cfg.min_max_filename_extension
 min_max_filename    = cfg.min_max_filename_extension
 
 
@@ -38,7 +30,7 @@ path                = cfg.dataset_path
 zones               = cfg.zones_indices
 zones               = cfg.zones_indices
 seuil_expe_filename = cfg.seuil_expe_filename
 seuil_expe_filename = cfg.seuil_expe_filename
 
 
-metric_choices      = cfg.metric_choices_labels
+features_choices    = cfg.features_choices_labels
 
 
 max_nb_bits = 8
 max_nb_bits = 8
 
 
@@ -46,7 +38,6 @@ integral_area_choices = ['trapz', 'simps']
 
 
 def get_area_under_curve(p_area, p_data):
 def get_area_under_curve(p_area, p_data):
 
 
-    noise_method = None
     function_name = 'integral_area_' + p_area
     function_name = 'integral_area_' + p_area
 
 
     try:
     try:
@@ -74,8 +65,6 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
     max_value_svd = 0
     max_value_svd = 0
     min_value_svd = sys.maxsize
     min_value_svd = sys.maxsize
 
 
-    image_indices = []
-
     scenes = os.listdir(path)
     scenes = os.listdir(path)
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
@@ -83,23 +72,12 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
     begin_data, end_data = p_interval
     begin_data, end_data = p_interval
     begin_index, end_index = p_indices
     begin_index, end_index = p_indices
 
 
-    data_min_max_filename = os.path.join(path, p_metric + min_max_filename)
-
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         if p_scene == folder_scene:
         if p_scene == folder_scene:
             scene_path = os.path.join(path, folder_scene)
             scene_path = os.path.join(path, folder_scene)
 
 
-            config_file_path = os.path.join(scene_path, config_filename)
-
-            with open(config_file_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             # construct each zones folder name
             # construct each zones folder name
             zones_folder = []
             zones_folder = []
 
 
@@ -117,6 +95,10 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
 
 
             threshold_learned_zones = []
             threshold_learned_zones = []
 
 
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+            number_scene_image = len(scene_images)
+
             for id, zone_folder in enumerate(zones_folder):
             for id, zone_folder in enumerate(zones_folder):
 
 
                 # get threshold information
                 # get threshold information
@@ -128,25 +110,17 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
                     threshold_learned = int(seuil_file.readline().strip())
                     threshold_learned = int(seuil_file.readline().strip())
                     threshold_learned_zones.append(threshold_learned)
                     threshold_learned_zones.append(threshold_learned)
 
 
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
-
             threshold_mean = np.mean(np.asarray(threshold_learned_zones))
             threshold_mean = np.mean(np.asarray(threshold_learned_zones))
             threshold_image_found = False
             threshold_image_found = False
 
 
-            file_path = os.path.join(scene_path, prefix_image_name + "{}.png")
-
             svd_data = []
             svd_data = []
 
 
-            while(current_counter_index <= end_counter_index):
-
-                current_counter_index_str = str(current_counter_index)
-
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
+            # for each images
+            for id_img, img_path in enumerate(scene_images):
+                
+                current_quality_image = dt.get_scene_image_quality(img_path)
 
 
-                image_path = file_path.format(str(current_counter_index_str))
-                img = Image.open(image_path)
+                img = Image.open(img_path)
 
 
                 svd_values = get_svd_data(p_metric, img)
                 svd_values = get_svd_data(p_metric, img)
 
 
@@ -164,25 +138,24 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
                     max_value_svd = max_value
                     max_value_svd = max_value
 
 
                 # keep in memory used data
                 # keep in memory used data
-                if current_counter_index % p_step == 0:
-                    if current_counter_index >= begin_index and current_counter_index <= end_index:
-                        images_indices.append(current_counter_index_str)
+                if current_quality_image % p_step == 0:
+                    if current_quality_image >= begin_index and current_quality_image <= end_index:
+                        images_indices.append(current_quality_image)
                         svd_data.append(svd_values)
                         svd_data.append(svd_values)
 
 
-                    if threshold_mean < int(current_counter_index) and not threshold_image_found:
+                    if threshold_mean < current_quality_image and not threshold_image_found:
 
 
                         threshold_image_found = True
                         threshold_image_found = True
-                        threshold_image_zone = current_counter_index_str
+                        image_name_postfix = dt.get_scene_image_postfix(img_path)
+                        threshold_image_zone = image_name_postfix
 
 
-                current_counter_index += step_counter
-                print('%.2f%%' % (current_counter_index / end_counter_index * 100))
+                print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
                 sys.stdout.write("\033[F")
                 sys.stdout.write("\033[F")
 
 
 
 
             # all indices of picture to plot
             # all indices of picture to plot
             print(images_indices)
             print(images_indices)
 
 
-            previous_data = []
             area_data = []
             area_data = []
 
 
             for id, data in enumerate(svd_data):
             for id, data in enumerate(svd_data):
@@ -249,7 +222,7 @@ def main():
     parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
     parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='Feature data choice', choices=features_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
@@ -261,14 +234,14 @@ def main():
     p_scene    = scenes_list[scenes_indices.index(args.scene)]
     p_scene    = scenes_list[scenes_indices.index(args.scene)]
     p_indices  = list(map(int, args.indices.split(',')))
     p_indices  = list(map(int, args.indices.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_mode     = args.mode
     p_mode     = args.mode
     p_step     = args.step
     p_step     = args.step
     p_norm     = args.norm
     p_norm     = args.norm
     p_area     = args.area
     p_area     = args.area
     p_ylim     = list(map(int, args.ylim.split(',')))
     p_ylim     = list(map(int, args.ylim.split(',')))
 
 
-    display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_area, p_ylim)
+    display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode, p_step, p_norm, p_area, p_ylim)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 35 - 67
display_svd_area_scenes.py

@@ -1,32 +1,22 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
-
 import numpy as np
 import numpy as np
-import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
-import ipfml.iqa.fr as fr_iqa
+import matplotlib.pyplot as plt
 
 
-from skimage import color
+import ipfml.iqa.fr as fr_iqa
+from ipfml import utils
 
 
-import matplotlib.pyplot as plt
-from modules.utils.data import get_svd_data
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information
 # getting configuration information
-config_filename     = cfg.config_filename
 zone_folder         = cfg.zone_folder
 zone_folder         = cfg.zone_folder
 min_max_filename    = cfg.min_max_filename_extension
 min_max_filename    = cfg.min_max_filename_extension
 
 
@@ -38,7 +28,7 @@ path                = cfg.dataset_path
 zones               = cfg.zones_indices
 zones               = cfg.zones_indices
 seuil_expe_filename = cfg.seuil_expe_filename
 seuil_expe_filename = cfg.seuil_expe_filename
 
 
-metric_choices      = cfg.metric_choices_labels
+features_choices    = cfg.features_choices_labels
 
 
 max_nb_bits = 8
 max_nb_bits = 8
 
 
@@ -46,7 +36,6 @@ integral_area_choices = ['trapz', 'simps']
 
 
 def get_area_under_curve(p_area, p_data):
 def get_area_under_curve(p_area, p_data):
 
 
-    noise_method = None
     function_name = 'integral_area_' + p_area
     function_name = 'integral_area_' + p_area
 
 
     try:
     try:
@@ -60,10 +49,9 @@ def get_area_under_curve(p_area, p_data):
 def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_area, p_ylim):
 def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_area, p_ylim):
     """
     """
     @brief Method which gives information about svd curves from zone of picture
     @brief Method which gives information about svd curves from zone of picture
-    @param p_scene, scene expected to show svd values
     @param p_interval, interval [begin, end] of svd data to display
     @param p_interval, interval [begin, end] of svd data to display
-    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
-    @param p_metric, metric computed to show
+    @param p_indices, indices to display
+    @param p_feature, feature computed to show
     @param p_mode, normalization's mode
     @param p_mode, normalization's mode
     @param p_norm, normalization or not of selected svd data
     @param p_norm, normalization or not of selected svd data
     @param p_area, area method name to compute area under curve
     @param p_area, area method name to compute area under curve
@@ -80,30 +68,19 @@ def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm,
     begin_data, end_data = p_interval
     begin_data, end_data = p_interval
     begin_index, end_index = p_indices
     begin_index, end_index = p_indices
 
 
-    data_min_max_filename = os.path.join(path, p_metric + min_max_filename)
-
     # Store all informations about scenes
     # Store all informations about scenes
     scenes_area_data = []
     scenes_area_data = []
     scenes_images_indices = []
     scenes_images_indices = []
     scenes_threshold_mean = []
     scenes_threshold_mean = []
 
 
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         max_value_svd = 0
         max_value_svd = 0
         min_value_svd = sys.maxsize
         min_value_svd = sys.maxsize
 
 
         scene_path = os.path.join(path, folder_scene)
         scene_path = os.path.join(path, folder_scene)
 
 
-        config_file_path = os.path.join(scene_path, config_filename)
-
-        with open(config_file_path, "r") as config_file:
-            last_image_name = config_file.readline().strip()
-            prefix_image_name = config_file.readline().strip()
-            start_index_image = config_file.readline().strip()
-            end_index_image = config_file.readline().strip()
-            step_counter = int(config_file.readline().strip())
-
         # construct each zones folder name
         # construct each zones folder name
         zones_folder = []
         zones_folder = []
 
 
@@ -121,6 +98,10 @@ def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm,
         images_indices = []
         images_indices = []
         threshold_learned_zones = []
         threshold_learned_zones = []
 
 
+        # get all images of folder
+        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+        number_scene_image = len(scene_images)
+
         for id, zone_folder in enumerate(zones_folder):
         for id, zone_folder in enumerate(zones_folder):
 
 
             # get threshold information
             # get threshold information
@@ -132,26 +113,18 @@ def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm,
                 threshold_learned = int(seuil_file.readline().strip())
                 threshold_learned = int(seuil_file.readline().strip())
                 threshold_learned_zones.append(threshold_learned)
                 threshold_learned_zones.append(threshold_learned)
 
 
-        current_counter_index = int(start_index_image)
-        end_counter_index = int(end_index_image)
-
         threshold_mean = np.mean(np.asarray(threshold_learned_zones))
         threshold_mean = np.mean(np.asarray(threshold_learned_zones))
         threshold_image_found = False
         threshold_image_found = False
         scenes_threshold_mean.append(int(threshold_mean / p_step))
         scenes_threshold_mean.append(int(threshold_mean / p_step))
 
 
-        file_path = os.path.join(scene_path, prefix_image_name + "{}.png")
-
         svd_data = []
         svd_data = []
 
 
-        while(current_counter_index <= end_counter_index):
-
-            current_counter_index_str = str(current_counter_index)
+        # for each images
+        for id_img, img_path in enumerate(scene_images):
+            
+            current_quality_image = dt.get_scene_image_quality(img_path)
 
 
-            while len(start_index_image) > len(current_counter_index_str):
-                current_counter_index_str = "0" + current_counter_index_str
-
-            image_path = file_path.format(str(current_counter_index_str))
-            img = Image.open(image_path)
+            img = Image.open(img_path)
 
 
             svd_values = get_svd_data(p_metric, img)
             svd_values = get_svd_data(p_metric, img)
 
 
@@ -169,25 +142,22 @@ def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm,
                 max_value_svd = max_value
                 max_value_svd = max_value
 
 
             # keep in memory used data
             # keep in memory used data
-            if current_counter_index % p_step == 0:
-                if current_counter_index >= begin_index and current_counter_index <= end_index:
-                    images_indices.append(current_counter_index_str)
+            if current_quality_image % p_step == 0:
+                if current_quality_image >= begin_index and current_quality_image <= end_index:
+                    images_indices.append(dt.get_scene_image_postfix(img_path))
                     svd_data.append(svd_values)
                     svd_data.append(svd_values)
 
 
-                if threshold_mean < int(current_counter_index) and not threshold_image_found:
+                if threshold_mean < current_quality_image and not threshold_image_found:
 
 
                     threshold_image_found = True
                     threshold_image_found = True
-                    threshold_image_zone = current_counter_index_str
 
 
-            current_counter_index += step_counter
-            print('%.2f%%' % (current_counter_index / end_counter_index * 100))
+            print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
             sys.stdout.write("\033[F")
             sys.stdout.write("\033[F")
 
 
 
 
             # all indices of picture to plot
             # all indices of picture to plot
         print("Scene %s : %s" % (folder_scene, images_indices))
         print("Scene %s : %s" % (folder_scene, images_indices))
 
 
-
         scenes_images_indices.append(image_indices)
         scenes_images_indices.append(image_indices)
 
 
         area_data = []
         area_data = []
@@ -225,14 +195,12 @@ def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm,
         threshold_id = 0
         threshold_id = 0
         scene_name = scenes[id]
         scene_name = scenes[id]
         image_indices = scenes_images_indices[id]
         image_indices = scenes_images_indices[id]
-        threshold_image_zone = scenes_threshold_mean[id]
 
 
         p_label = scene_name + '_' + str(images_indices[id])
         p_label = scene_name + '_' + str(images_indices[id])
 
 
         threshold_id = scenes_threshold_mean[id]
         threshold_id = scenes_threshold_mean[id]
 
 
         print(p_label)
         print(p_label)
-        start_ylim, end_ylim = p_ylim
 
 
         plt.plot(area_data, label=p_label)
         plt.plot(area_data, label=p_label)
         #ax2.set_xticks(range(len(images_indices)))
         #ax2.set_xticks(range(len(images_indices)))
@@ -242,8 +210,8 @@ def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm,
             plt.plot([threshold_id, threshold_id], [np.min(area_data), np.max(area_data)], 'k-', lw=2, color='red')
             plt.plot([threshold_id, threshold_id], [np.min(area_data), np.max(area_data)], 'k-', lw=2, color='red')
 
 
 
 
-    #start_ylim, end_ylim = p_ylim
-    #plt.ylim(start_ylim, end_ylim)
+    start_ylim, end_ylim = p_ylim
+    plt.ylim(start_ylim, end_ylim)
 
 
     plt.show()
     plt.show()
 
 
@@ -251,10 +219,10 @@ def main():
 
 
     parser = argparse.ArgumentParser(description="Display area under curve on scene")
     parser = argparse.ArgumentParser(description="Display area under curve on scene")
 
 
-    parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
+    #parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='Metric data choice', choices=features_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
@@ -263,17 +231,17 @@ def main():
 
 
     args = parser.parse_args()
     args = parser.parse_args()
 
 
-    p_scene    = scenes_list[scenes_indices.index(args.scene)]
+    #p_scene    = scenes_list[scenes_indices.index(args.scene)]
     p_indices  = list(map(int, args.indices.split(',')))
     p_indices  = list(map(int, args.indices.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_mode     = args.mode
     p_mode     = args.mode
     p_step     = args.step
     p_step     = args.step
     p_norm     = args.norm
     p_norm     = args.norm
     p_area     = args.area
     p_area     = args.area
     p_ylim     = list(map(int, args.ylim.split(',')))
     p_ylim     = list(map(int, args.ylim.split(',')))
 
 
-    display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_area, p_ylim)
+    display_svd_values(p_interval, p_indices, p_feature, p_mode, p_step, p_norm, p_area, p_ylim)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 48 - 77
display_svd_data_error_scene.py

@@ -1,32 +1,23 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
-
 import numpy as np
 import numpy as np
-import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
-import ipfml.iqa.fr as fr_iqa
-
 from skimage import color
 from skimage import color
-
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
-from modules.utils.data import get_svd_data
 
 
-from modules.utils import config as cfg
+import ipfml.iqa.fr as fr_iqa
+from ipfml import utils
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information
 # getting configuration information
-config_filename     = cfg.config_filename
 zone_folder         = cfg.zone_folder
 zone_folder         = cfg.zone_folder
 min_max_filename    = cfg.min_max_filename_extension
 min_max_filename    = cfg.min_max_filename_extension
 
 
@@ -38,7 +29,7 @@ path                = cfg.dataset_path
 zones               = cfg.zones_indices
 zones               = cfg.zones_indices
 seuil_expe_filename = cfg.seuil_expe_filename
 seuil_expe_filename = cfg.seuil_expe_filename
 
 
-metric_choices      = cfg.metric_choices_labels
+features_choices    = cfg.features_choices_labels
 
 
 max_nb_bits         = 8
 max_nb_bits         = 8
 display_error       = False
 display_error       = False
@@ -48,7 +39,6 @@ error_data_choices  = ['mae', 'mse', 'ssim', 'psnr']
 
 
 def get_error_distance(p_error, y_true, y_test):
 def get_error_distance(p_error, y_true, y_test):
 
 
-    noise_method = None
     function_name = p_error
     function_name = p_error
 
 
     try:
     try:
@@ -59,16 +49,16 @@ def get_error_distance(p_error, y_true, y_test):
     return error_method(y_true, y_test)
     return error_method(y_true, y_test)
 
 
 
 
-def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_error, p_ylim):
+def display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode, p_step, p_norm, p_error, p_ylim):
     """
     """
     @brief Method which gives information about svd curves from zone of picture
     @brief Method which gives information about svd curves from zone of picture
     @param p_scene, scene expected to show svd values
     @param p_scene, scene expected to show svd values
     @param p_interval, interval [begin, end] of svd data to display
     @param p_interval, interval [begin, end] of svd data to display
     @param p_interval, interval [begin, end] of samples or minutes from render generation engine
     @param p_interval, interval [begin, end] of samples or minutes from render generation engine
-    @param p_metric, metric computed to show
+    @param p_feature, feature computed to show
     @param p_mode, normalization's mode
     @param p_mode, normalization's mode
     @param p_norm, normalization or not of selected svd data
     @param p_norm, normalization or not of selected svd data
-    @param p_error, error metric used to display
+    @param p_error, error feature used to display
     @param p_ylim, ylim choice to better display of data
     @param p_ylim, ylim choice to better display of data
     @return nothing
     @return nothing
     """
     """
@@ -76,8 +66,6 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
     max_value_svd = 0
     max_value_svd = 0
     min_value_svd = sys.maxsize
     min_value_svd = sys.maxsize
 
 
-    image_indices = []
-
     scenes = os.listdir(path)
     scenes = os.listdir(path)
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
@@ -85,23 +73,12 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
     begin_data, end_data = p_interval
     begin_data, end_data = p_interval
     begin_index, end_index = p_indices
     begin_index, end_index = p_indices
 
 
-    data_min_max_filename = os.path.join(path, p_metric + min_max_filename)
-
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         if p_scene == folder_scene:
         if p_scene == folder_scene:
             scene_path = os.path.join(path, folder_scene)
             scene_path = os.path.join(path, folder_scene)
 
 
-            config_file_path = os.path.join(scene_path, config_filename)
-
-            with open(config_file_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             # construct each zones folder name
             # construct each zones folder name
             zones_folder = []
             zones_folder = []
 
 
@@ -115,10 +92,14 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
                 zones_folder.append(current_zone)
                 zones_folder.append(current_zone)
 
 
             images_data = []
             images_data = []
-            images_indices = []
+            images_path = []
 
 
             threshold_learned_zones = []
             threshold_learned_zones = []
 
 
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+            number_scene_image = len(scene_images)
+
             for id, zone_folder in enumerate(zones_folder):
             for id, zone_folder in enumerate(zones_folder):
 
 
                 # get threshold information
                 # get threshold information
@@ -131,27 +112,19 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
                     threshold_learned = int(seuil_file.readline().strip())
                     threshold_learned = int(seuil_file.readline().strip())
                     threshold_learned_zones.append(threshold_learned)
                     threshold_learned_zones.append(threshold_learned)
 
 
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
-
             threshold_mean = np.mean(np.asarray(threshold_learned_zones))
             threshold_mean = np.mean(np.asarray(threshold_learned_zones))
             threshold_image_found = False
             threshold_image_found = False
 
 
-            file_path = os.path.join(scene_path, prefix_image_name + "{}.png")
-
             svd_data = []
             svd_data = []
+           
+            # for each images
+            for id_img, img_path in enumerate(scene_images):
+                
+                current_quality_image = dt.get_scene_image_quality(img_path)
 
 
-            while(current_counter_index <= end_counter_index):
-
-                current_counter_index_str = str(current_counter_index)
-
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
+                img = Image.open(img_path)
 
 
-                image_path = file_path.format(str(current_counter_index_str))
-                img = Image.open(image_path)
-
-                svd_values = get_svd_data(p_metric, img)
+                svd_values = get_svd_data(p_feature, img)
 
 
                 if p_norm:
                 if p_norm:
                     svd_values = svd_values[begin_data:end_data]
                     svd_values = svd_values[begin_data:end_data]
@@ -167,23 +140,19 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
                     max_value_svd = max_value
                     max_value_svd = max_value
 
 
                 # keep in memory used data
                 # keep in memory used data
-                if current_counter_index % p_step == 0:
-                    if current_counter_index >= begin_index and current_counter_index <= end_index:
-                        images_indices.append(current_counter_index_str)
+                if current_quality_image % p_step == 0:
+                    if current_quality_image >= begin_index and current_quality_image <= end_index:
+                        images_path.append(img_path)
                         svd_data.append(svd_values)
                         svd_data.append(svd_values)
 
 
-                    if threshold_mean < int(current_counter_index) and not threshold_image_found:
+                    if threshold_mean < current_quality_image and not threshold_image_found:
 
 
                         threshold_image_found = True
                         threshold_image_found = True
-                        threshold_image_zone = current_counter_index_str
+                        threshold_image_zone = dt.get_scene_image_postfix(img_path)
 
 
-                current_counter_index += step_counter
-                print('%.2f%%' % (current_counter_index / end_counter_index * 100))
+                print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
                 sys.stdout.write("\033[F")
                 sys.stdout.write("\033[F")
 
 
-            # all indices of picture to plot
-            print(images_indices)
-
             previous_data = []
             previous_data = []
             error_data = [0.]
             error_data = [0.]
 
 
@@ -204,8 +173,7 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
 
 
                 # use of whole image data for computation of ssim or psnr
                 # use of whole image data for computation of ssim or psnr
                 if p_error == 'ssim' or p_error == 'psnr':
                 if p_error == 'ssim' or p_error == 'psnr':
-                    image_path = file_path.format(str(images_indices[id]))
-                    current_data = np.asarray(Image.open(image_path))
+                    current_data = np.asarray(Image.open(images_path[id]))
 
 
                 if len(previous_data) > 0:
                 if len(previous_data) > 0:
 
 
@@ -224,18 +192,21 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
             ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)
             ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)
 
 
 
 
-            ax1.set_title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_metric + ' metric, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=20)
+            ax1.set_title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=20)
             ax1.set_ylabel('Image samples or time (minutes) generation', fontsize=14)
             ax1.set_ylabel('Image samples or time (minutes) generation', fontsize=14)
             ax1.set_xlabel('Vector features', fontsize=16)
             ax1.set_xlabel('Vector features', fontsize=16)
 
 
             for id, data in enumerate(images_data):
             for id, data in enumerate(images_data):
+                
+                current_quality_image = dt.get_scene_image_quality(images_path[id])
+                current_quality_postfix = dt.get_scene_image_postfix(images_path[id])
 
 
                 if display_error:
                 if display_error:
-                    p_label = p_scene + '_' + str(images_indices[id]) + " | " + p_error + ": " + str(error_data[id])
+                    p_label = p_scene + '_' + current_quality_postfix + " | " + p_error + ": " + str(error_data[id])
                 else:
                 else:
-                    p_label = p_scene + '_' + str(images_indices[id])
+                    p_label = p_scene + '_' + current_quality_postfix
 
 
-                if images_indices[id] == threshold_image_zone:
+                if current_quality_image == threshold_image_zone:
                     ax1.plot(data, label=p_label + " (threshold mean)", lw=4, color='red')
                     ax1.plot(data, label=p_label + " (threshold mean)", lw=4, color='red')
                 else:
                 else:
                     ax1.plot(data, label=p_label)
                     ax1.plot(data, label=p_label)
@@ -248,11 +219,11 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
             ax2.set_title(p_error + " information for whole step images")
             ax2.set_title(p_error + " information for whole step images")
             ax2.set_ylabel(p_error + ' error')
             ax2.set_ylabel(p_error + ' error')
             ax2.set_xlabel('Number of samples per pixels or times')
             ax2.set_xlabel('Number of samples per pixels or times')
-            ax2.set_xticks(range(len(images_indices)))
-            ax2.set_xticklabels(list(map(int, images_indices)))
+            ax2.set_xticks(range(len(current_quality_image)))
+            ax2.set_xticklabels(list(map(dt.get_scene_image_quality, current_quality_image)))
             ax2.plot(error_data)
             ax2.plot(error_data)
 
 
-            plot_name = p_scene + '_' + p_metric + '_' + str(p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
+            plot_name = p_scene + '_' + p_feature + '_' + str(p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
             plt.savefig(plot_name)
             plt.savefig(plot_name)
 
 
 def main():
 def main():
@@ -262,7 +233,7 @@ def main():
     parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
     parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
@@ -274,14 +245,14 @@ def main():
     p_scene    = scenes_list[scenes_indices.index(args.scene)]
     p_scene    = scenes_list[scenes_indices.index(args.scene)]
     p_indices  = list(map(int, args.indices.split(',')))
     p_indices  = list(map(int, args.indices.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
-    p_metric   = args.metric
+    p_feature   = args.feature
     p_mode     = args.mode
     p_mode     = args.mode
     p_step     = args.step
     p_step     = args.step
     p_norm     = args.norm
     p_norm     = args.norm
     p_error    = args.error
     p_error    = args.error
     p_ylim     = list(map(int, args.ylim.split(',')))
     p_ylim     = list(map(int, args.ylim.split(',')))
 
 
-    display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_error, p_ylim)
+    display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode, p_step, p_norm, p_error, p_ylim)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 38 - 68
display_svd_data_scene.py

@@ -1,34 +1,22 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
-
 import numpy as np
 import numpy as np
-import random
-import time
-import json
-import math
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
-import ipfml.iqa.fr as fr_iqa
+import matplotlib.pyplot as plt
 
 
-from skimage import color
+import ipfml.iqa.fr as fr_iqa
+from ipfml import utils
 
 
-import matplotlib as mpl
-import matplotlib.pyplot as plt
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils.data import get_svd_data
-from modules.utils import config as cfg
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information
 # getting configuration information
-config_filename     = cfg.config_filename
 zone_folder         = cfg.zone_folder
 zone_folder         = cfg.zone_folder
 min_max_filename    = cfg.min_max_filename_extension
 min_max_filename    = cfg.min_max_filename_extension
 
 
@@ -40,19 +28,19 @@ path                = cfg.dataset_path
 zones               = cfg.zones_indices
 zones               = cfg.zones_indices
 seuil_expe_filename = cfg.seuil_expe_filename
 seuil_expe_filename = cfg.seuil_expe_filename
 
 
-metric_choices      = cfg.metric_choices_labels
+features_choices    = cfg.features_choices_labels
 
 
 max_nb_bits         = 8
 max_nb_bits         = 8
 display_error       = False
 display_error       = False
 
 
 
 
-def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_ylim):
+def display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode, p_step, p_norm, p_ylim):
     """
     """
     @brief Method which gives information about svd curves from zone of picture
     @brief Method which gives information about svd curves from zone of picture
     @param p_scene, scene expected to show svd values
     @param p_scene, scene expected to show svd values
     @param p_interval, interval [begin, end] of svd data to display
     @param p_interval, interval [begin, end] of svd data to display
     @param p_interval, interval [begin, end] of samples or minutes from render generation engine
     @param p_interval, interval [begin, end] of samples or minutes from render generation engine
-    @param p_metric, metric computed to show
+    @param p_feature, feature computed to show
     @param p_mode, normalization's mode
     @param p_mode, normalization's mode
     @param p_norm, normalization or not of selected svd data
     @param p_norm, normalization or not of selected svd data
     @param p_ylim, ylim choice to better display of data
     @param p_ylim, ylim choice to better display of data
@@ -62,8 +50,6 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
     max_value_svd = 0
     max_value_svd = 0
     min_value_svd = sys.maxsize
     min_value_svd = sys.maxsize
 
 
-    image_indices = []
-
     scenes = os.listdir(path)
     scenes = os.listdir(path)
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
@@ -71,23 +57,12 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
     begin_data, end_data = p_interval
     begin_data, end_data = p_interval
     begin_index, end_index = p_indices
     begin_index, end_index = p_indices
 
 
-    data_min_max_filename = os.path.join(path, p_metric + min_max_filename)
-
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         if p_scene == folder_scene:
         if p_scene == folder_scene:
             scene_path = os.path.join(path, folder_scene)
             scene_path = os.path.join(path, folder_scene)
 
 
-            config_file_path = os.path.join(scene_path, config_filename)
-
-            with open(config_file_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             # construct each zones folder name
             # construct each zones folder name
             zones_folder = []
             zones_folder = []
 
 
@@ -104,11 +79,14 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
             images_indices = []
             images_indices = []
 
 
             threshold_learned_zones = []
             threshold_learned_zones = []
-
+    
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+            number_scene_image = len(scene_images)
+            
             for id, zone_folder in enumerate(zones_folder):
             for id, zone_folder in enumerate(zones_folder):
 
 
                 # get threshold information
                 # get threshold information
-
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
                 path_seuil = os.path.join(zone_path, seuil_expe_filename)
                 path_seuil = os.path.join(zone_path, seuil_expe_filename)
 
 
@@ -117,27 +95,20 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
                     threshold_learned = int(seuil_file.readline().strip())
                     threshold_learned = int(seuil_file.readline().strip())
                     threshold_learned_zones.append(threshold_learned)
                     threshold_learned_zones.append(threshold_learned)
 
 
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
-
             threshold_mean = np.mean(np.asarray(threshold_learned_zones))
             threshold_mean = np.mean(np.asarray(threshold_learned_zones))
             threshold_image_found = False
             threshold_image_found = False
 
 
-            file_path = os.path.join(scene_path, prefix_image_name + "{}.png")
-
             svd_data = []
             svd_data = []
 
 
-            while(current_counter_index <= end_counter_index):
 
 
-                current_counter_index_str = str(current_counter_index)
+            # for each images
+            for id_img, img_path in enumerate(scene_images):
+                
+                current_quality_image = dt.get_scene_image_quality(img_path)
 
 
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
+                img = Image.open(img_path)
 
 
-                image_path = file_path.format(str(current_counter_index_str))
-                img = Image.open(image_path)
-
-                svd_values = get_svd_data(p_metric, img)
+                svd_values = get_svd_data(p_feature, img)
 
 
                 if p_norm:
                 if p_norm:
                     svd_values = svd_values[begin_data:end_data]
                     svd_values = svd_values[begin_data:end_data]
@@ -155,18 +126,18 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
                     max_value_svd = max_value
                     max_value_svd = max_value
 
 
                 # keep in memory used data
                 # keep in memory used data
-                if current_counter_index % p_step == 0:
-                    if current_counter_index >= begin_index and current_counter_index <= end_index:
-                        images_indices.append(current_counter_index_str)
+                if current_quality_image % p_step == 0:
+                    if current_quality_image >= begin_index and current_quality_image <= end_index:
+
+                        images_indices.append(dt.get_scene_image_postfix(img_path))
                         svd_data.append(svd_values)
                         svd_data.append(svd_values)
 
 
-                    if threshold_mean < int(current_counter_index) and not threshold_image_found:
+                    if threshold_mean < current_quality_image and not threshold_image_found:
 
 
                         threshold_image_found = True
                         threshold_image_found = True
-                        threshold_image_zone = current_counter_index_str
+                        threshold_image_zone = current_quality_image
 
 
-                current_counter_index += step_counter
-                print('%.2f%%' % (current_counter_index / end_counter_index * 100))
+                print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
                 sys.stdout.write("\033[F")
                 sys.stdout.write("\033[F")
 
 
 
 
@@ -190,7 +161,6 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
 
 
 
 
             # display all data using matplotlib (configure plt)
             # display all data using matplotlib (configure plt)
-            #fig = plt.figure(figsize=(30, 22))
             fig, ax = plt.subplots(figsize=(30, 22))
             fig, ax = plt.subplots(figsize=(30, 22))
             ax.set_facecolor('#F9F9F9')
             ax.set_facecolor('#F9F9F9')
             #fig.patch.set_facecolor('#F9F9F9')
             #fig.patch.set_facecolor('#F9F9F9')
@@ -199,7 +169,7 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
             #plt.rc('xtick', labelsize=22)
             #plt.rc('xtick', labelsize=22)
             #plt.rc('ytick', labelsize=22)
             #plt.rc('ytick', labelsize=22)
 
 
-            #plt.title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_metric + ' metric, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
+            #plt.title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
             ax.set_ylabel('Component values', fontsize=30)
             ax.set_ylabel('Component values', fontsize=30)
             ax.set_xlabel('Vector features', fontsize=30)
             ax.set_xlabel('Vector features', fontsize=30)
 
 
@@ -214,10 +184,10 @@ def display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step,
 
 
             plt.legend(bbox_to_anchor=(0.65, 0.98), loc=2, borderaxespad=0.2, fontsize=24)
             plt.legend(bbox_to_anchor=(0.65, 0.98), loc=2, borderaxespad=0.2, fontsize=24)
 
 
-            start_ylim, end_ylim = p_ylim
+            #start_ylim, end_ylim = p_ylim
             #ax.set_ylim(start_ylim, end_ylim)
             #ax.set_ylim(start_ylim, end_ylim)
 
 
-            plot_name = p_scene + '_' + p_metric + '_' + str(p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
+            plot_name = p_scene + '_' + p_feature + '_' + str(p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
             plt.savefig(plot_name, facecolor=ax.get_facecolor())
             plt.savefig(plot_name, facecolor=ax.get_facecolor())
 
 
 def main():
 def main():
@@ -227,7 +197,7 @@ def main():
     parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
     parser.add_argument('--scene', type=str, help='scene index to use', choices=cfg.scenes_indices)
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
@@ -238,13 +208,13 @@ def main():
     p_scene    = scenes_list[scenes_indices.index(args.scene)]
     p_scene    = scenes_list[scenes_indices.index(args.scene)]
     p_indices  = list(map(int, args.indices.split(',')))
     p_indices  = list(map(int, args.indices.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_mode     = args.mode
     p_mode     = args.mode
     p_step     = args.step
     p_step     = args.step
     p_norm     = args.norm
     p_norm     = args.norm
     p_ylim     = list(map(int, args.ylim.split(',')))
     p_ylim     = list(map(int, args.ylim.split(',')))
 
 
-    display_svd_values(p_scene, p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_ylim)
+    display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode, p_step, p_norm, p_ylim)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 46 - 70
display_svd_zone_scene.py

@@ -1,30 +1,23 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
-
 import numpy as np
 import numpy as np
-import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
-from skimage import color
-
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
-from modules.utils.data import get_svd_data
 
 
-from modules.utils import config as cfg
+from ipfml.processing import segmentation
+import ipfml.iqa.fr as fr_iqa
+from ipfml import utils
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information
 # getting configuration information
-config_filename     = cfg.config_filename
 zone_folder         = cfg.zone_folder
 zone_folder         = cfg.zone_folder
 min_max_filename    = cfg.min_max_filename_extension
 min_max_filename    = cfg.min_max_filename_extension
 
 
@@ -36,7 +29,7 @@ path                = cfg.dataset_path
 zones               = cfg.zones_indices
 zones               = cfg.zones_indices
 seuil_expe_filename = cfg.seuil_expe_filename
 seuil_expe_filename = cfg.seuil_expe_filename
 
 
-metric_choices      = cfg.metric_choices_labels
+features_choices    = cfg.features_choices_labels
 
 
 generic_output_file_svd = '_random.csv'
 generic_output_file_svd = '_random.csv'
 
 
@@ -44,7 +37,7 @@ max_nb_bits = 8
 min_value_interval = sys.maxsize
 min_value_interval = sys.maxsize
 max_value_interval = 0
 max_value_interval = 0
 
 
-def get_min_max_value_interval(_scene, _interval, _metric):
+def get_min_max_value_interval(_scene, _interval, _feature):
 
 
     global min_value_interval, max_value_interval
     global min_value_interval, max_value_interval
 
 
@@ -53,7 +46,7 @@ def get_min_max_value_interval(_scene, _interval, _metric):
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         # only take care of current scene
         # only take care of current scene
         if folder_scene == _scene:
         if folder_scene == _scene:
@@ -68,9 +61,9 @@ def get_min_max_value_interval(_scene, _interval, _metric):
                     index_str = "0" + index_str
                     index_str = "0" + index_str
                 zones_folder.append("zone"+index_str)
                 zones_folder.append("zone"+index_str)
 
 
-            for id_zone, zone_folder in enumerate(zones_folder):
+            for zone_folder in zones_folder:
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
-                data_filename = _metric + "_svd" + generic_output_file_svd
+                data_filename = _feature + "_svd" + generic_output_file_svd
                 data_file_path = os.path.join(zone_path, data_filename)
                 data_file_path = os.path.join(zone_path, data_filename)
 
 
                 # getting number of line and read randomly lines
                 # getting number of line and read randomly lines
@@ -83,11 +76,11 @@ def get_min_max_value_interval(_scene, _interval, _metric):
                     begin, end = _interval
                     begin, end = _interval
 
 
                     line_data = line.split(';')
                     line_data = line.split(';')
-                    metrics = line_data[begin+1:end+1]
-                    metrics = [float(m) for m in metrics]
+                    features = line_data[begin+1:end+1]
+                    features = [float(m) for m in features]
 
 
-                    min_value = min(metrics)
-                    max_value = max(metrics)
+                    min_value = min(features)
+                    max_value = max(features)
 
 
                     if min_value < min_value_interval:
                     if min_value < min_value_interval:
                         min_value_interval = min_value
                         min_value_interval = min_value
@@ -96,14 +89,14 @@ def get_min_max_value_interval(_scene, _interval, _metric):
                         max_value_interval = max_value
                         max_value_interval = max_value
 
 
 
 
-def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_metric, p_mode, p_step, p_norm, p_ylim):
+def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_feature, p_mode, p_step, p_norm, p_ylim):
     """
     """
     @brief Method which gives information about svd curves from zone of picture
     @brief Method which gives information about svd curves from zone of picture
     @param p_scene, scene expected to show svd values
     @param p_scene, scene expected to show svd values
     @param p_interval, interval [begin, end] of svd data to display
     @param p_interval, interval [begin, end] of svd data to display
     @param p_interval, interval [begin, end] of samples or minutes from render generation engine
     @param p_interval, interval [begin, end] of samples or minutes from render generation engine
     @param p_zone, zone's identifier of picture
     @param p_zone, zone's identifier of picture
-    @param p_metric, metric computed to show
+    @param p_feature, feature computed to show
     @param p_mode, normalization's mode
     @param p_mode, normalization's mode
     @param p_step, step of images indices
     @param p_step, step of images indices
     @param p_norm, normalization or not of selected svd data
     @param p_norm, normalization or not of selected svd data
@@ -118,23 +111,14 @@ def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_metric, p_mode,
     begin_data, end_data = p_interval
     begin_data, end_data = p_interval
     begin_index, end_index = p_indices
     begin_index, end_index = p_indices
 
 
-    data_min_max_filename = os.path.join(path, p_metric + min_max_filename)
+    data_min_max_filename = os.path.join(path, p_feature + min_max_filename)
 
 
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         if p_scene == folder_scene:
         if p_scene == folder_scene:
             scene_path = os.path.join(path, folder_scene)
             scene_path = os.path.join(path, folder_scene)
 
 
-            config_file_path = os.path.join(scene_path, config_filename)
-
-            with open(config_file_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             # construct each zones folder name
             # construct each zones folder name
             zones_folder = []
             zones_folder = []
 
 
@@ -148,15 +132,12 @@ def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_metric, p_mode,
                 zones_folder.append(current_zone)
                 zones_folder.append(current_zone)
 
 
             zones_images_data = []
             zones_images_data = []
-            images_indices = []
+            images_path = []
 
 
             zone_folder = zones_folder[p_zone]
             zone_folder = zones_folder[p_zone]
 
 
             zone_path = os.path.join(scene_path, zone_folder)
             zone_path = os.path.join(scene_path, zone_folder)
 
 
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
-
             # get threshold information
             # get threshold information
             path_seuil = os.path.join(zone_path, seuil_expe_filename)
             path_seuil = os.path.join(zone_path, seuil_expe_filename)
 
 
@@ -166,40 +147,35 @@ def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_metric, p_mode,
 
 
             threshold_image_found = False
             threshold_image_found = False
 
 
-            while(current_counter_index <= end_counter_index):
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
 
 
-                current_counter_index_str = str(current_counter_index)
+            # for each images
+            for img_path in scene_images:
+                    
+                current_quality_image = dt.get_scene_image_quality(img_path)
 
 
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
+                if current_quality_image % p_step == 0:
+                    if current_quality_image >= begin_index and current_quality_image <= end_index:
+                        images_path.append(dt.get_scene_image_postfix(img_path))
 
 
-                if current_counter_index % p_step == 0:
-                    if current_counter_index >= begin_index and current_counter_index <= end_index:
-                        images_indices.append(current_counter_index_str)
-
-                    if seuil_learned < int(current_counter_index) and not threshold_image_found:
+                    if seuil_learned < current_quality_image and not threshold_image_found:
 
 
                         threshold_image_found = True
                         threshold_image_found = True
-                        threshold_image_zone = current_counter_index_str
-
-                current_counter_index += step_counter
-
-            # all indices of picture to plot
-            print(images_indices)
+                        threshold_image_zone = dt.get_scene_image_postfix(img_path)
 
 
-            for index in images_indices:
 
 
-                img_path = os.path.join(scene_path, prefix_image_name + str(index) + ".png")
+            for img_path in images_path:
 
 
                 current_img = Image.open(img_path)
                 current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+                img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
 
 
                 # getting expected block id
                 # getting expected block id
                 block = img_blocks[p_zone]
                 block = img_blocks[p_zone]
 
 
                 # get data from mode
                 # get data from mode
                 # Here you can add the way you compute data
                 # Here you can add the way you compute data
-                data = get_svd_data(p_metric, block)
+                data = get_svd_data(p_feature, block)
 
 
                 # TODO : improve part of this code to get correct min / max values
                 # TODO : improve part of this code to get correct min / max values
                 if p_norm:
                 if p_norm:
@@ -230,15 +206,15 @@ def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_metric, p_mode,
                 else:
                 else:
                     zones_images_data.append(data)
                     zones_images_data.append(data)
 
 
-            plt.title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + ']' + p_metric + ' metric, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=20)
+            plt.title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + ']' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=20)
             plt.ylabel('Image samples or time (minutes) generation', fontsize=14)
             plt.ylabel('Image samples or time (minutes) generation', fontsize=14)
             plt.xlabel('Vector features', fontsize=16)
             plt.xlabel('Vector features', fontsize=16)
 
 
             for id, data in enumerate(zones_images_data):
             for id, data in enumerate(zones_images_data):
 
 
-                p_label = p_scene + "_" + images_indices[id]
+                p_label = p_scene + "_" + images_path[id]
 
 
-                if images_indices[id] == threshold_image_zone:
+                if images_path[id] == threshold_image_zone:
                     plt.plot(data, label=p_label, lw=4, color='red')
                     plt.plot(data, label=p_label, lw=4, color='red')
                 else:
                 else:
                     plt.plot(data, label=p_label)
                     plt.plot(data, label=p_label)
@@ -258,7 +234,7 @@ def main():
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
     parser.add_argument('--indices', type=str, help='Samples interval to display', default='"0, 900"')
     parser.add_argument('--zone', type=int, help='Zone to display', choices=list(range(0, 16)))
     parser.add_argument('--zone', type=int, help='Zone to display', choices=list(range(0, 16)))
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--step', type=int, help='Each step samples to display', default=10)
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
     parser.add_argument('--norm', type=int, help='If values will be normalized or not', choices=[0, 1])
@@ -270,13 +246,13 @@ def main():
     p_indices  = list(map(int, args.indices.split(',')))
     p_indices  = list(map(int, args.indices.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_zone     = args.zone
     p_zone     = args.zone
-    p_metric   = args.metric
+    p_feature   = args.feature
     p_mode     = args.mode
     p_mode     = args.mode
     p_step     = args.step
     p_step     = args.step
     p_norm     = args.norm
     p_norm     = args.norm
     p_ylim     = list(map(int, args.ylim.split(',')))
     p_ylim     = list(map(int, args.ylim.split(',')))
 
 
-    display_svd_values(p_scene, p_interval, p_indices, p_zone, p_metric, p_mode, p_step, p_norm, p_ylim)
+    display_svd_values(p_scene, p_interval, p_indices, p_zone, p_feature, p_mode, p_step, p_norm, p_ylim)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 9 - 0
display/generate_metrics_curve.sh

@@ -0,0 +1,9 @@
+#! /bin/bash
+
+for feature in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
+
+    python display/display/display_svd_data_scene.py --scene D --interval "0, 800" --indices "0, 1200" --feature ${feature} --mode svdne --step 100 --norm 1 --error mse --ylim "0, 0.1"
+
+done
+
+

+ 0 - 249
display_scenes_zones.py

@@ -1,249 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
-import sys, os, argparse
-import numpy as np
-import random
-import time
-import json
-
-from PIL import Image
-from ipfml import processing, metrics, utils
-from skimage import color
-import matplotlib.pyplot as plt
-
-from modules.utils import config as cfg
-
-config_filename     = cfg.config_filename
-zone_folder         = cfg.zone_folder
-min_max_filename    = cfg.min_max_filename_extension
-
-# define all scenes values
-scenes_list         = cfg.scenes_names
-scenes_indices      = cfg.scenes_indices
-norm_choices        = cfg.normalization_choices
-path                = cfg.dataset_path
-zones               = cfg.zones_indices
-seuil_expe_filename = cfg.seuil_expe_filename
-
-metric_choices      = cfg.metric_choices_labels
-
-
-def display_data_scenes(data_type, p_scene, p_kind):
-    """
-    @brief Method which displays data from scene
-    @param data_type,  metric choice
-    @param scene, scene choice
-    @param mode, normalization choice
-    @return nothing
-    """
-
-    scenes = os.listdir(path)
-    # remove min max file from scenes folder
-    scenes = [s for s in scenes if min_max_filename not in s]
-
-    # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
-
-        if p_scene == folder_scene:
-            print(folder_scene)
-            scene_path = os.path.join(path, folder_scene)
-
-            config_file_path = os.path.join(scene_path, config_filename)
-
-            with open(config_file_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
-            # construct each zones folder name
-            zones_folder = []
-
-            # get zones list info
-            for index in zones:
-                index_str = str(index)
-                if len(index_str) < 2:
-                    index_str = "0" + index_str
-
-                current_zone = "zone"+index_str
-                zones_folder.append(current_zone)
-
-            zones_images_data = []
-            threshold_info = []
-
-            for id_zone, zone_folder in enumerate(zones_folder):
-
-                zone_path = os.path.join(scene_path, zone_folder)
-
-                current_counter_index = int(start_index_image)
-                end_counter_index = int(end_index_image)
-
-                # get threshold information
-                path_seuil = os.path.join(zone_path, seuil_expe_filename)
-
-                # open treshold path and get this information
-                with open(path_seuil, "r") as seuil_file:
-                    seuil_learned = int(seuil_file.readline().strip())
-
-                threshold_image_found = False
-                while(current_counter_index <= end_counter_index and not threshold_image_found):
-
-                    if seuil_learned < int(current_counter_index):
-                        current_counter_index_str = str(current_counter_index)
-
-                        while len(start_index_image) > len(current_counter_index_str):
-                            current_counter_index_str = "0" + current_counter_index_str
-
-                        threshold_image_found = True
-                        threshold_image_zone = current_counter_index_str
-                        threshold_info.append(threshold_image_zone)
-
-                    current_counter_index += step_counter
-
-                # all indexes of picture to plot
-                images_indexes = [start_index_image, threshold_image_zone, end_index_image]
-                images_data = []
-
-                print(images_indexes)
-
-                for index in images_indexes:
-
-                    img_path = os.path.join(scene_path, prefix_image_name + index + ".png")
-
-                    current_img = Image.open(img_path)
-                    img_blocks = processing.divide_in_blocks(current_img, (200, 200))
-
-                    # getting expected block id
-                    block = img_blocks[id_zone]
-
-                    # get data from mode
-                    # Here you can add the way you compute data
-                    if data_type == 'lab':
-
-                        block_file_path = '/tmp/lab_img.png'
-                        block.save(block_file_path)
-                        data = processing.get_LAB_L_SVD_s(Image.open(block_file_path))
-
-                    if data_type == 'mscn_revisited':
-
-                        img_mscn_revisited = processing.rgb_to_mscn(block)
-
-                        # save tmp as img
-                        img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
-                        mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
-                        img_output.save(mscn_revisited_file_path)
-                        img_block = Image.open(mscn_revisited_file_path)
-
-                        # extract from temp image
-                        data = metrics.get_SVD_s(img_block)
-
-                    '''if data_type == 'mscn':
-
-                        img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
-                        img_mscn = processing.calculate_mscn_coefficients(img_gray, 7)
-                        img_mscn_norm = utils.normalize_2D_arr(img_mscn)
-
-                        img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
-
-                        data = metrics.get_SVD_s(img_mscn_gray)'''
-
-                    if data_type == 'low_bits_6':
-
-                        low_bits_6 = processing.rgb_to_LAB_L_low_bits(block, 63)
-
-                        # extract from temp image
-                        data = metrics.get_SVD_s(low_bits_6)
-
-
-                    if data_type == 'low_bits_5':
-
-                        low_bits_5 = processing.rgb_to_LAB_L_low_bits(block, 31)
-
-                        # extract from temp image
-                        data = metrics.get_SVD_s(low_bits_5)
-
-
-                    if data_type == 'low_bits_4':
-
-                        low_bits_4 = processing.rgb_to_LAB_L_low_bits(block)
-
-                        # extract from temp image
-                        data = metrics.get_SVD_s(low_bits_4)
-
-                    if data_type == 'low_bits_3':
-
-                        low_bits_3 = processing.rgb_to_LAB_L_low_bits(block, 7)
-
-                        # extract from temp image
-                        data = metrics.get_SVD_s(low_bits_3)
-
-                    if data_type == 'low_bits_2':
-
-                        low_bits_2 = processing.rgb_to_LAB_L_low_bits(block, 3)
-
-                        # extract from temp image
-                        data = metrics.get_SVD_s(low_bits_2)
-
-                    ##################
-                    # Data mode part #
-                    ##################
-
-                    # modify data depending mode
-
-                    if p_kind == 'svdn':
-                        data = utils.normalize_arr(data)
-
-                    if p_kind == 'svdne':
-                        path_min_max = os.path.join(path, data_type + min_max_filename)
-
-                        with open(path_min_max, 'r') as f:
-                            min_val = float(f.readline())
-                            max_val = float(f.readline())
-
-                        data = utils.normalize_arr_with_range(data, min_val, max_val)
-
-                    # append of data
-                    images_data.append(data)
-
-                zones_images_data.append(images_data)
-
-            fig=plt.figure(figsize=(8, 8))
-            fig.suptitle(data_type + " values for " + p_scene + " scene (normalization : " + p_kind + ")", fontsize=20)
-
-            for id, data in enumerate(zones_images_data):
-                fig.add_subplot(4, 4, (id + 1))
-                plt.plot(data[0], label='Noisy_' + start_index_image)
-                plt.plot(data[1], label='Threshold_' + threshold_info[id])
-                plt.plot(data[2], label='Reference_' + end_index_image)
-                plt.ylabel(data_type + ' SVD, ZONE_' + str(id + 1), fontsize=18)
-                plt.xlabel('Vector features', fontsize=18)
-                plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.2, fontsize=18)
-                plt.ylim(0, 0.1)
-            plt.show()
-
-def main():
-
-    parser = argparse.ArgumentParser(description="Display zones curves of metric on scene ")
-
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    parser.add_argument('--scene', type=str, help='scene index to use', choices=scenes_indices)
-    parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=norm_choices)
-
-    args = parser.parse_args()
-
-    p_metric = args.metric
-    p_kind   = args.kind
-    p_scene  = scenes_list[scenes_indices.index(args.scene)]
-
-    display_data_scenes(p_metric, p_scene, p_kind)
-
-if __name__== "__main__":
-    main()

+ 37 - 57
generate_all_data.py

@@ -1,27 +1,25 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
 import random
 import random
 import time
 import time
 import json
 import json
 
 
-from modules.utils.data import get_svd_data
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
-from skimage import color
 
 
-from modules.utils import config as cfg
+from ipfml.processing import transform, segmentation
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_svd_data
+
 
 
 # getting configuration information
 # getting configuration information
-config_filename         = cfg.config_filename
 zone_folder             = cfg.zone_folder
 zone_folder             = cfg.zone_folder
 min_max_filename        = cfg.min_max_filename_extension
 min_max_filename        = cfg.min_max_filename_extension
 
 
@@ -33,7 +31,7 @@ path                    = cfg.dataset_path
 zones                   = cfg.zones_indices
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 seuil_expe_filename     = cfg.seuil_expe_filename
 
 
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 output_data_folder      = cfg.output_data_folder
 
 
 generic_output_file_svd = '_random.csv'
 generic_output_file_svd = '_random.csv'
@@ -41,7 +39,7 @@ generic_output_file_svd = '_random.csv'
 def generate_data_svd(data_type, mode):
 def generate_data_svd(data_type, mode):
     """
     """
     @brief Method which generates all .csv files from scenes
     @brief Method which generates all .csv files from scenes
-    @param data_type,  metric choice
+    @param data_type,  feature choice
     @param mode, normalization choice
     @param mode, normalization choice
     @return nothing
     @return nothing
     """
     """
@@ -57,20 +55,11 @@ def generate_data_svd(data_type, mode):
     data_min_max_filename = os.path.join(path, data_type + min_max_filename)
     data_min_max_filename = os.path.join(path, data_type + min_max_filename)
 
 
     # go ahead each scenes
     # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         print(folder_scene)
         print(folder_scene)
         scene_path = os.path.join(path, folder_scene)
         scene_path = os.path.join(path, folder_scene)
 
 
-        config_file_path = os.path.join(scene_path, config_filename)
-
-        with open(config_file_path, "r") as config_file:
-            last_image_name = config_file.readline().strip()
-            prefix_image_name = config_file.readline().strip()
-            start_index_image = config_file.readline().strip()
-            end_index_image = config_file.readline().strip()
-            step_counter = int(config_file.readline().strip())
-
         # getting output filename
         # getting output filename
         output_svd_filename = data_type + "_" + mode + generic_output_file_svd
         output_svd_filename = data_type + "_" + mode + generic_output_file_svd
 
 
@@ -93,27 +82,21 @@ def generate_data_svd(data_type, mode):
             # add writer into list
             # add writer into list
             svd_output_files.append(open(svd_file_path, 'w'))
             svd_output_files.append(open(svd_file_path, 'w'))
 
 
-
-        current_counter_index = int(start_index_image)
-        end_counter_index = int(end_index_image)
-
-
-        while(current_counter_index <= end_counter_index):
-
-            current_counter_index_str = str(current_counter_index)
-
-            while len(start_index_image) > len(current_counter_index_str):
-                current_counter_index_str = "0" + current_counter_index_str
-
-            img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+        # get all images of folder
+        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+        number_scene_image = len(scene_images)
+            
+        for id_img, img_path in enumerate(scene_images):
+            
+            current_image_postfix = dt.get_scene_image_postfix(img_path)
 
 
             current_img = Image.open(img_path)
             current_img = Image.open(img_path)
-            img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
 
 
             for id_block, block in enumerate(img_blocks):
             for id_block, block in enumerate(img_blocks):
 
 
                 ###########################
                 ###########################
-                # Metric computation part #
+                # feature computation part #
                 ###########################
                 ###########################
 
 
                 data = get_svd_data(data_type, block)
                 data = get_svd_data(data_type, block)
@@ -151,19 +134,16 @@ def generate_data_svd(data_type, mode):
                 current_file = svd_output_files[id_block]
                 current_file = svd_output_files[id_block]
 
 
                 # add of index
                 # add of index
-                current_file.write(current_counter_index_str + ';')
+                current_file.write(current_image_postfix + ';')
 
 
                 for val in data:
                 for val in data:
                     current_file.write(str(val) + ";")
                     current_file.write(str(val) + ";")
 
 
                 current_file.write('\n')
                 current_file.write('\n')
 
 
-            start_index_image_int = int(start_index_image)
-            print(data_type + "_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((current_counter_index - start_index_image_int) / (end_counter_index - start_index_image_int)* 100.) + "%")
+            print(data_type + "_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((id_img + 1) / number_scene_image * 100.) + "%")
             sys.stdout.write("\033[F")
             sys.stdout.write("\033[F")
 
 
-            current_counter_index += step_counter
-
         for f in svd_output_files:
         for f in svd_output_files:
             f.close()
             f.close()
 
 
@@ -180,26 +160,26 @@ def generate_data_svd(data_type, mode):
 
 
 def main():
 def main():
 
 
-    parser = argparse.ArgumentParser(description="Compute and prepare data of metric of all scenes (keep in memory min and max value found)")
+    parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes (keep in memory min and max value found)")
 
 
-    parser.add_argument('--metric', type=str, 
-                                    help="metric choice in order to compute data (use 'all' if all metrics are needed)", 
-                                    choices=metric_choices)
+    parser.add_argument('--feature', type=str, 
+                                    help="feature choice in order to compute data (use 'all' if all features are needed)", 
+                                    choices=features_choices)
 
 
     args = parser.parse_args()
     args = parser.parse_args()
 
 
-    p_metric = args.metric
+    p_feature = args.feature
 
 
-    # generate all or specific metric data
-    if p_metric == 'all':
-        for m in metric_choices:
+    # generate all or specific feature data
+    if p_feature == 'all':
+        for m in features_choices:
             generate_data_svd(m, 'svd')
             generate_data_svd(m, 'svd')
             generate_data_svd(m, 'svdn')
             generate_data_svd(m, 'svdn')
             generate_data_svd(m, 'svdne')
             generate_data_svd(m, 'svdne')
     else:
     else:
-        generate_data_svd(p_metric, 'svd')
-        generate_data_svd(p_metric, 'svdn')
-        generate_data_svd(p_metric, 'svdne')
+        generate_data_svd(p_feature, 'svd')
+        generate_data_svd(p_feature, 'svdn')
+        generate_data_svd(p_feature, 'svdne')
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 35 - 39
generate_data_model.py

@@ -1,40 +1,36 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
+import pandas as pd
 import random
 import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
 
 
-from modules.utils import config as cfg
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 from modules.utils import data as dt
 from modules.utils import data as dt
+from data_attributes import get_svd_data
+
 
 
 # getting configuration information
 # getting configuration information
-config_filename         = cfg.config_filename
 learned_folder          = cfg.learned_zones_folder
 learned_folder          = cfg.learned_zones_folder
 min_max_filename        = cfg.min_max_filename_extension
 min_max_filename        = cfg.min_max_filename_extension
 
 
-# define all scenes values
+# define all scenes variables
 scenes_list             = cfg.scenes_names
 scenes_list             = cfg.scenes_names
 scenes_indexes          = cfg.scenes_indices
 scenes_indexes          = cfg.scenes_indices
-choices                 = cfg.normalization_choices
 path                    = cfg.dataset_path
 path                    = cfg.dataset_path
 zones                   = cfg.zones_indices
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 seuil_expe_filename     = cfg.seuil_expe_filename
 
 
 renderer_choices        = cfg.renderer_choices
 renderer_choices        = cfg.renderer_choices
 normalization_choices   = cfg.normalization_choices
 normalization_choices   = cfg.normalization_choices
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 output_data_folder      = cfg.output_data_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 min_max_ext             = cfg.min_max_filename_extension
 min_max_ext             = cfg.min_max_filename_extension
@@ -50,15 +46,15 @@ def construct_new_line(path_seuil, interval, line, choice, each, norm):
 
 
     line_data = line.split(';')
     line_data = line.split(';')
     seuil = line_data[0]
     seuil = line_data[0]
-    metrics = line_data[begin+1:end+1]
+    features = line_data[begin+1:end+1]
 
 
-    metrics = [float(m) for id, m in enumerate(metrics) if id % each == 0 ]
+    features = [float(m) for id, m in enumerate(features) if id % each == 0 ]
 
 
     if norm:
     if norm:
         if choice == 'svdne':
         if choice == 'svdne':
-            metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
         if choice == 'svdn':
         if choice == 'svdn':
-            metrics = utils.normalize_arr(metrics)
+            features = utils.normalize_arr(features)
 
 
     with open(path_seuil, "r") as seuil_file:
     with open(path_seuil, "r") as seuil_file:
         seuil_learned = int(seuil_file.readline().strip())
         seuil_learned = int(seuil_file.readline().strip())
@@ -68,14 +64,14 @@ def construct_new_line(path_seuil, interval, line, choice, each, norm):
     else:
     else:
         line = '0'
         line = '0'
 
 
-    for idx, val in enumerate(metrics):
+    for val in features:
         line += ';'
         line += ';'
         line += str(val)
         line += str(val)
     line += '\n'
     line += '\n'
 
 
     return line
     return line
 
 
-def get_min_max_value_interval(_scenes_list, _interval, _metric):
+def get_min_max_value_interval(_scenes_list, _interval, _feature):
 
 
     global min_value_interval, max_value_interval
     global min_value_interval, max_value_interval
 
 
@@ -84,7 +80,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         # only take care of maxwell scenes
         # only take care of maxwell scenes
         if folder_scene in _scenes_list:
         if folder_scene in _scenes_list:
@@ -99,9 +95,9 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                     index_str = "0" + index_str
                     index_str = "0" + index_str
                 zones_folder.append("zone"+index_str)
                 zones_folder.append("zone"+index_str)
 
 
-            for id_zone, zone_folder in enumerate(zones_folder):
+            for zone_folder in zones_folder:
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
-                data_filename = _metric + "_svd" + generic_output_file_svd
+                data_filename = _feature + "_svd" + generic_output_file_svd
                 data_file_path = os.path.join(zone_path, data_filename)
                 data_file_path = os.path.join(zone_path, data_filename)
 
 
                 # getting number of line and read randomly lines
                 # getting number of line and read randomly lines
@@ -114,11 +110,11 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                     begin, end = _interval
                     begin, end = _interval
 
 
                     line_data = line.split(';')
                     line_data = line.split(';')
-                    metrics = line_data[begin+1:end+1]
-                    metrics = [float(m) for m in metrics]
+                    features = line_data[begin+1:end+1]
+                    features = [float(m) for m in features]
 
 
-                    min_value = min(metrics)
-                    max_value = max(metrics)
+                    min_value = min(features)
+                    max_value = max(features)
 
 
                     if min_value < min_value_interval:
                     if min_value < min_value_interval:
                         min_value_interval = min_value
                         min_value_interval = min_value
@@ -127,7 +123,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                         max_value_interval = max_value
                         max_value_interval = max_value
 
 
 
 
-def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes_list, _zones = zones_indices, _percent = 1, _step=1, _each=1, _norm=False, _custom=False):
+def generate_data_model(_filename, _interval, _choice, _feature, _scenes = scenes_list, _zones = zones_indices, _percent = 1, _step=1, _each=1, _norm=False, _custom=False):
 
 
     output_train_filename = _filename + ".train"
     output_train_filename = _filename + ".train"
     output_test_filename = _filename + ".test"
     output_test_filename = _filename + ".test"
@@ -142,7 +138,7 @@ def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes
     train_file = open(output_train_filename, 'w')
     train_file = open(output_train_filename, 'w')
     test_file = open(output_test_filename, 'w')
     test_file = open(output_test_filename, 'w')
 
 
-    for id_scene, folder_scene in enumerate(scenes_list):
+    for folder_scene in scenes_list:
 
 
         # only take care of maxwell scenes
         # only take care of maxwell scenes
         scene_path = os.path.join(path, folder_scene)
         scene_path = os.path.join(path, folder_scene)
@@ -172,9 +168,9 @@ def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes
 
 
             # if custom normalization choices then we use svd values not already normalized
             # if custom normalization choices then we use svd values not already normalized
             if _custom:
             if _custom:
-                data_filename = _metric + "_svd" + generic_output_file_svd
+                data_filename = _feature + "_svd" + generic_output_file_svd
             else:
             else:
-                data_filename = _metric + "_" + _choice + generic_output_file_svd
+                data_filename = _feature + "_" + _choice + generic_output_file_svd
 
 
             data_file_path = os.path.join(zone_path, data_filename)
             data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -220,7 +216,7 @@ def main():
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--zones', type=str, help='Zones indices to use for training data set')
     parser.add_argument('--zones', type=str, help='Zones indices to use for training data set')
     parser.add_argument('--percent', type=float, help='Percent of data use for train and test dataset (by default 1)', default=1.0)
     parser.add_argument('--percent', type=float, help='Percent of data use for train and test dataset (by default 1)', default=1.0)
@@ -234,7 +230,7 @@ def main():
     p_filename = args.output
     p_filename = args.output
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_kind     = args.kind
     p_kind     = args.kind
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_scenes   = args.scenes.split(',')
     p_scenes   = args.scenes.split(',')
     p_zones    = list(map(int, args.zones.split(',')))
     p_zones    = list(map(int, args.zones.split(',')))
     p_percent  = args.percent
     p_percent  = args.percent
@@ -251,12 +247,12 @@ def main():
     scenes_selected = []
     scenes_selected = []
 
 
     for scene_id in p_scenes:
     for scene_id in p_scenes:
-        index = scenes_indexes.index(scene_id.strip())
+        index = scenes_indices.index(scene_id.strip())
         scenes_selected.append(scenes_list[index])
         scenes_selected.append(scenes_list[index])
 
 
     # find min max value if necessary to renormalize data
     # find min max value if necessary to renormalize data
     if p_custom:
     if p_custom:
-        get_min_max_value_interval(scenes_list, p_interval, p_metric)
+        get_min_max_value_interval(scenes_list, p_interval, p_feature)
 
 
         # write new file to save
         # write new file to save
         if not os.path.exists(custom_min_max_folder):
         if not os.path.exists(custom_min_max_folder):
@@ -270,7 +266,7 @@ def main():
             f.write(str(max_value_interval) + '\n')
             f.write(str(max_value_interval) + '\n')
 
 
     # create database using img folder (generate first time only)
     # create database using img folder (generate first time only)
-    generate_data_model(p_filename, p_interval, p_kind, p_metric, scenes_selected, p_zones, p_percent, p_step, p_each, p_custom)
+    generate_data_model(p_filename, p_interval, p_kind, p_feature, scenes_selected, p_zones, p_percent, p_step, p_each, p_custom)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 36 - 40
generate_data_model_corr_random.py

@@ -1,32 +1,28 @@
-    #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
-import random
-import time
-import json
 import subprocess
 import subprocess
+import random
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
 
 
-from modules.utils import config as cfg
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 from modules.utils import data as dt
 from modules.utils import data as dt
+from data_attributes import get_svd_data
+
 
 
 # getting configuration information
 # getting configuration information
-config_filename         = cfg.config_filename
 learned_folder          = cfg.learned_zones_folder
 learned_folder          = cfg.learned_zones_folder
 min_max_filename        = cfg.min_max_filename_extension
 min_max_filename        = cfg.min_max_filename_extension
 
 
-# define all scenes values
+# define all scenes variables
 all_scenes_list         = cfg.scenes_names
 all_scenes_list         = cfg.scenes_names
 all_scenes_indices      = cfg.scenes_indices
 all_scenes_indices      = cfg.scenes_indices
 
 
@@ -36,7 +32,7 @@ path                    = cfg.dataset_path
 zones                   = cfg.zones_indices
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 seuil_expe_filename     = cfg.seuil_expe_filename
 
 
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 output_data_folder      = cfg.output_data_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 min_max_ext             = cfg.min_max_filename_extension
 min_max_ext             = cfg.min_max_filename_extension
@@ -55,15 +51,15 @@ def construct_new_line(path_seuil, indices, line, choice, norm):
 
 
     line_data = np.array(line.split(';'))
     line_data = np.array(line.split(';'))
     seuil = line_data[0]
     seuil = line_data[0]
-    metrics = line_data[indices]
-    metrics = metrics.astype('float32')
+    features = line_data[indices]
+    features = features.astype('float32')
 
 
     # TODO : check if it's always necessary to do that (loss of information for svd)
     # TODO : check if it's always necessary to do that (loss of information for svd)
     if norm:
     if norm:
         if choice == 'svdne':
         if choice == 'svdne':
-            metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
         if choice == 'svdn':
         if choice == 'svdn':
-            metrics = utils.normalize_arr(metrics)
+            features = utils.normalize_arr(features)
 
 
     with open(path_seuil, "r") as seuil_file:
     with open(path_seuil, "r") as seuil_file:
         seuil_learned = int(seuil_file.readline().strip())
         seuil_learned = int(seuil_file.readline().strip())
@@ -73,27 +69,27 @@ def construct_new_line(path_seuil, indices, line, choice, norm):
     else:
     else:
         line = '0'
         line = '0'
 
 
-    for idx, val in enumerate(metrics):
+    for val in features:
         line += ';'
         line += ';'
         line += str(val)
         line += str(val)
     line += '\n'
     line += '\n'
 
 
     return line
     return line
 
 
-def get_min_max_value_interval(_scenes_list, _indices, _metric):
+def get_min_max_value_interval(_scenes_list, _indices, _feature):
 
 
     global min_value_interval, max_value_interval
     global min_value_interval, max_value_interval
 
 
     # increase indices values by one to avoid label
     # increase indices values by one to avoid label
     f = lambda x : x + 1
     f = lambda x : x + 1
-    indices = f(_indices)
+    _indices = f(_indices)
 
 
     scenes = os.listdir(path)
     scenes = os.listdir(path)
 
 
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         # only take care of maxwell scenes
         # only take care of maxwell scenes
         if folder_scene in _scenes_list:
         if folder_scene in _scenes_list:
@@ -108,12 +104,12 @@ def get_min_max_value_interval(_scenes_list, _indices, _metric):
                     index_str = "0" + index_str
                     index_str = "0" + index_str
                 zones_folder.append("zone"+index_str)
                 zones_folder.append("zone"+index_str)
 
 
-            for id_zone, zone_folder in enumerate(zones_folder):
+            for zone_folder in zones_folder:
 
 
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
 
 
                 # if custom normalization choices then we use svd values not already normalized
                 # if custom normalization choices then we use svd values not already normalized
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
 
 
                 data_file_path = os.path.join(zone_path, data_filename)
                 data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -126,11 +122,11 @@ def get_min_max_value_interval(_scenes_list, _indices, _metric):
 
 
                     line_data = np.array(line.split(';'))
                     line_data = np.array(line.split(';'))
 
 
-                    metrics = line_data[[_indices]]
-                    metrics = [float(m) for m in metrics]
+                    features = line_data[[_indices]]
+                    features = [float(m) for m in features]
 
 
-                    min_value = min(metrics)
-                    max_value = max(metrics)
+                    min_value = min(features)
+                    max_value = max(features)
 
 
                     if min_value < min_value_interval:
                     if min_value < min_value_interval:
                         min_value_interval = min_value
                         min_value_interval = min_value
@@ -139,7 +135,7 @@ def get_min_max_value_interval(_scenes_list, _indices, _metric):
                         max_value_interval = max_value
                         max_value_interval = max_value
 
 
 
 
-def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _custom = False):
+def generate_data_model(_scenes_list, _filename, _interval, _choice, _feature, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _custom = False):
 
 
     output_train_filename = _filename + ".train"
     output_train_filename = _filename + ".train"
     output_test_filename = _filename + ".test"
     output_test_filename = _filename + ".test"
@@ -154,7 +150,7 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
     train_file_data = []
     train_file_data = []
     test_file_data  = []
     test_file_data  = []
 
 
-    for id_scene, folder_scene in enumerate(_scenes_list):
+    for folder_scene in _scenes_list:
 
 
         scene_path = os.path.join(path, folder_scene)
         scene_path = os.path.join(path, folder_scene)
 
 
@@ -191,9 +187,9 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
 
 
             # if custom normalization choices then we use svd values not already normalized
             # if custom normalization choices then we use svd values not already normalized
             if _custom:
             if _custom:
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
             else:
             else:
-                data_filename = _metric + "_" + _choice + generic_output_file_svd
+                data_filename = _feature + "_" + _choice + generic_output_file_svd
 
 
             data_file_path = os.path.join(zone_path, data_filename)
             data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -251,7 +247,7 @@ def main():
     parser.add_argument('--highest', type=int, help='Specify if highest or lowest values are wishes', choices=[0, 1])
     parser.add_argument('--highest', type=int, help='Specify if highest or lowest values are wishes', choices=[0, 1])
     parser.add_argument('--label', type=int, help='Specify if label correlation is used or not', choices=[0, 1])
     parser.add_argument('--label', type=int, help='Specify if label correlation is used or not', choices=[0, 1])
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
@@ -267,7 +263,7 @@ def main():
     p_highest  = args.highest
     p_highest  = args.highest
     p_label    = args.label
     p_label    = args.label
     p_kind     = args.kind
     p_kind     = args.kind
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_scenes   = args.scenes.split(',')
     p_scenes   = args.scenes.split(',')
     p_nb_zones = args.nb_zones
     p_nb_zones = args.nb_zones
     p_random   = args.random
     p_random   = args.random
@@ -296,7 +292,7 @@ def main():
             '--output', temp_filename_path,
             '--output', temp_filename_path,
             '--interval', '0, 200',
             '--interval', '0, 200',
             '--kind', p_kind,
             '--kind', p_kind,
-            '--metric', p_metric,
+            '--feature', p_feature,
             '--scenes', args.scenes,
             '--scenes', args.scenes,
             '--nb_zones', str(16),
             '--nb_zones', str(16),
             '--random', str(int(p_random)),
             '--random', str(int(p_random)),
@@ -362,7 +358,7 @@ def main():
 
 
     # find min max value if necessary to renormalize data from `n` indices found
     # find min max value if necessary to renormalize data from `n` indices found
     if p_custom:
     if p_custom:
-        get_min_max_value_interval(scenes_list, indices, p_metric)
+        get_min_max_value_interval(scenes_list, indices, p_feature)
 
 
         # write new file to save
         # write new file to save
         if not os.path.exists(custom_min_max_folder):
         if not os.path.exists(custom_min_max_folder):
@@ -379,7 +375,7 @@ def main():
             f.write(str(max_value_interval) + '\n')
             f.write(str(max_value_interval) + '\n')
 
 
     # create database using img folder (generate first time only)
     # create database using img folder (generate first time only)
-    generate_data_model(scenes_list, p_filename, indices, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_custom)
+    generate_data_model(scenes_list, p_filename, indices, p_kind, p_feature, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_custom)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 34 - 37
generate_data_model_random.py

@@ -1,30 +1,27 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
+import pandas as pd
 import random
 import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
 
 
-from modules.utils import config as cfg
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 from modules.utils import data as dt
 from modules.utils import data as dt
+from data_attributes import get_svd_data
+
 
 
 # getting configuration information
 # getting configuration information
-config_filename         = cfg.config_filename
 learned_folder          = cfg.learned_zones_folder
 learned_folder          = cfg.learned_zones_folder
 min_max_filename        = cfg.min_max_filename_extension
 min_max_filename        = cfg.min_max_filename_extension
 
 
-# define all scenes values
+# define all scenes variables
 all_scenes_list         = cfg.scenes_names
 all_scenes_list         = cfg.scenes_names
 all_scenes_indices      = cfg.scenes_indices
 all_scenes_indices      = cfg.scenes_indices
 
 
@@ -34,7 +31,7 @@ zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 seuil_expe_filename     = cfg.seuil_expe_filename
 
 
 renderer_choices        = cfg.renderer_choices
 renderer_choices        = cfg.renderer_choices
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 output_data_folder      = cfg.output_data_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 min_max_ext             = cfg.min_max_filename_extension
 min_max_ext             = cfg.min_max_filename_extension
@@ -49,18 +46,18 @@ def construct_new_line(path_seuil, interval, line, choice, each, norm):
 
 
     line_data = line.split(';')
     line_data = line.split(';')
     seuil = line_data[0]
     seuil = line_data[0]
-    metrics = line_data[begin+1:end+1]
+    features = line_data[begin+1:end+1]
 
 
     # keep only if modulo result is 0 (keep only each wanted values)
     # keep only if modulo result is 0 (keep only each wanted values)
-    metrics = [float(m) for id, m in enumerate(metrics) if id % each == 0]
+    features = [float(m) for id, m in enumerate(features) if id % each == 0]
 
 
     # TODO : check if it's always necessary to do that (loss of information for svd)
     # TODO : check if it's always necessary to do that (loss of information for svd)
     if norm:
     if norm:
 
 
         if choice == 'svdne':
         if choice == 'svdne':
-            metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
         if choice == 'svdn':
         if choice == 'svdn':
-            metrics = utils.normalize_arr(metrics)
+            features = utils.normalize_arr(features)
 
 
     with open(path_seuil, "r") as seuil_file:
     with open(path_seuil, "r") as seuil_file:
         seuil_learned = int(seuil_file.readline().strip())
         seuil_learned = int(seuil_file.readline().strip())
@@ -70,14 +67,14 @@ def construct_new_line(path_seuil, interval, line, choice, each, norm):
     else:
     else:
         line = '0'
         line = '0'
 
 
-    for idx, val in enumerate(metrics):
+    for val in features:
         line += ';'
         line += ';'
         line += str(val)
         line += str(val)
     line += '\n'
     line += '\n'
 
 
     return line
     return line
 
 
-def get_min_max_value_interval(_scenes_list, _interval, _metric):
+def get_min_max_value_interval(_scenes_list, _interval, _feature):
 
 
     global min_value_interval, max_value_interval
     global min_value_interval, max_value_interval
 
 
@@ -86,7 +83,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         # only take care of maxwell scenes
         # only take care of maxwell scenes
         if folder_scene in _scenes_list:
         if folder_scene in _scenes_list:
@@ -101,12 +98,12 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                     index_str = "0" + index_str
                     index_str = "0" + index_str
                 zones_folder.append("zone"+index_str)
                 zones_folder.append("zone"+index_str)
 
 
-            for id_zone, zone_folder in enumerate(zones_folder):
+            for zone_folder in zones_folder:
 
 
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
 
 
                 # if custom normalization choices then we use svd values not already normalized
                 # if custom normalization choices then we use svd values not already normalized
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
 
 
                 data_file_path = os.path.join(zone_path, data_filename)
                 data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -121,11 +118,11 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
 
 
                     line_data = line.split(';')
                     line_data = line.split(';')
 
 
-                    metrics = line_data[begin+1:end+1]
-                    metrics = [float(m) for m in metrics]
+                    features = line_data[begin+1:end+1]
+                    features = [float(m) for m in features]
 
 
-                    min_value = min(metrics)
-                    max_value = max(metrics)
+                    min_value = min(features)
+                    max_value = max(features)
 
 
                     if min_value < min_value_interval:
                     if min_value < min_value_interval:
                         min_value_interval = min_value
                         min_value_interval = min_value
@@ -134,7 +131,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                         max_value_interval = max_value
                         max_value_interval = max_value
 
 
 
 
-def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
+def generate_data_model(_scenes_list, _filename, _interval, _choice, _feature, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
 
 
     output_train_filename = _filename + ".train"
     output_train_filename = _filename + ".train"
     output_test_filename = _filename + ".test"
     output_test_filename = _filename + ".test"
@@ -149,7 +146,7 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
     train_file_data = []
     train_file_data = []
     test_file_data  = []
     test_file_data  = []
 
 
-    for id_scene, folder_scene in enumerate(_scenes_list):
+    for folder_scene in _scenes_list:
 
 
         scene_path = os.path.join(path, folder_scene)
         scene_path = os.path.join(path, folder_scene)
 
 
@@ -186,9 +183,9 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
 
 
             # if custom normalization choices then we use svd values not already normalized
             # if custom normalization choices then we use svd values not already normalized
             if _custom:
             if _custom:
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
             else:
             else:
-                data_filename = _metric + "_" + _choice + generic_output_file_svd
+                data_filename = _feature + "_" + _choice + generic_output_file_svd
 
 
             data_file_path = os.path.join(zone_path, data_filename)
             data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -244,7 +241,7 @@ def main():
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
@@ -259,7 +256,7 @@ def main():
     p_filename = args.output
     p_filename = args.output
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_kind     = args.kind
     p_kind     = args.kind
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_scenes   = args.scenes.split(',')
     p_scenes   = args.scenes.split(',')
     p_nb_zones = args.nb_zones
     p_nb_zones = args.nb_zones
     p_random   = args.random
     p_random   = args.random
@@ -283,7 +280,7 @@ def main():
 
 
     # find min max value if necessary to renormalize data
     # find min max value if necessary to renormalize data
     if p_custom:
     if p_custom:
-        get_min_max_value_interval(scenes_list, p_interval, p_metric)
+        get_min_max_value_interval(scenes_list, p_interval, p_feature)
 
 
         # write new file to save
         # write new file to save
         if not os.path.exists(custom_min_max_folder):
         if not os.path.exists(custom_min_max_folder):
@@ -297,7 +294,7 @@ def main():
             f.write(str(max_value_interval) + '\n')
             f.write(str(max_value_interval) + '\n')
 
 
     # create database using img folder (generate first time only)
     # create database using img folder (generate first time only)
-    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_each, p_custom)
+    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_feature, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_each, p_custom)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 34 - 37
generate_data_model_random_center.py

@@ -1,30 +1,27 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
+import pandas as pd
 import random
 import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
 
 
-from modules.utils import config as cfg
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 from modules.utils import data as dt
 from modules.utils import data as dt
+from data_attributes import get_svd_data
+
 
 
 # getting configuration information
 # getting configuration information
-config_filename         = cfg.config_filename
 learned_folder          = cfg.learned_zones_folder
 learned_folder          = cfg.learned_zones_folder
 min_max_filename        = cfg.min_max_filename_extension
 min_max_filename        = cfg.min_max_filename_extension
 
 
-# define all scenes values
+# define all scenes variables
 all_scenes_list         = cfg.scenes_names
 all_scenes_list         = cfg.scenes_names
 all_scenes_indices      = cfg.scenes_indices
 all_scenes_indices      = cfg.scenes_indices
 
 
@@ -34,7 +31,7 @@ zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 seuil_expe_filename     = cfg.seuil_expe_filename
 
 
 renderer_choices        = cfg.renderer_choices
 renderer_choices        = cfg.renderer_choices
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 output_data_folder      = cfg.output_data_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 min_max_ext             = cfg.min_max_filename_extension
 min_max_ext             = cfg.min_max_filename_extension
@@ -51,32 +48,32 @@ def construct_new_line(seuil_learned, interval, line, choice, each, norm):
 
 
     line_data = line.split(';')
     line_data = line.split(';')
     seuil = line_data[0]
     seuil = line_data[0]
-    metrics = line_data[begin+1:end+1]
+    features = line_data[begin+1:end+1]
 
 
     # keep only if modulo result is 0 (keep only each wanted values)
     # keep only if modulo result is 0 (keep only each wanted values)
-    metrics = [float(m) for id, m in enumerate(metrics) if id % each == 0]
+    features = [float(m) for id, m in enumerate(features) if id % each == 0]
 
 
     # TODO : check if it's always necessary to do that (loss of information for svd)
     # TODO : check if it's always necessary to do that (loss of information for svd)
     if norm:
     if norm:
 
 
         if choice == 'svdne':
         if choice == 'svdne':
-            metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
         if choice == 'svdn':
         if choice == 'svdn':
-            metrics = utils.normalize_arr(metrics)
+            features = utils.normalize_arr(features)
 
 
     if seuil_learned > int(seuil):
     if seuil_learned > int(seuil):
         line = '1'
         line = '1'
     else:
     else:
         line = '0'
         line = '0'
 
 
-    for idx, val in enumerate(metrics):
+    for val in features:
         line += ';'
         line += ';'
         line += str(val)
         line += str(val)
     line += '\n'
     line += '\n'
 
 
     return line
     return line
 
 
-def get_min_max_value_interval(_scenes_list, _interval, _metric):
+def get_min_max_value_interval(_scenes_list, _interval, _feature):
 
 
     global min_value_interval, max_value_interval
     global min_value_interval, max_value_interval
 
 
@@ -85,7 +82,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         # only take care of maxwell scenes
         # only take care of maxwell scenes
         if folder_scene in _scenes_list:
         if folder_scene in _scenes_list:
@@ -100,12 +97,12 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                     index_str = "0" + index_str
                     index_str = "0" + index_str
                 zones_folder.append("zone"+index_str)
                 zones_folder.append("zone"+index_str)
 
 
-            for id_zone, zone_folder in enumerate(zones_folder):
+            for zone_folder in zones_folder:
 
 
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
 
 
                 # if custom normalization choices then we use svd values not already normalized
                 # if custom normalization choices then we use svd values not already normalized
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
 
 
                 data_file_path = os.path.join(zone_path, data_filename)
                 data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -120,11 +117,11 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
 
 
                     line_data = line.split(';')
                     line_data = line.split(';')
 
 
-                    metrics = line_data[begin+1:end+1]
-                    metrics = [float(m) for m in metrics]
+                    features = line_data[begin+1:end+1]
+                    features = [float(m) for m in features]
 
 
-                    min_value = min(metrics)
-                    max_value = max(metrics)
+                    min_value = min(features)
+                    max_value = max(features)
 
 
                     if min_value < min_value_interval:
                     if min_value < min_value_interval:
                         min_value_interval = min_value
                         min_value_interval = min_value
@@ -133,7 +130,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                         max_value_interval = max_value
                         max_value_interval = max_value
 
 
 
 
-def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
+def generate_data_model(_scenes_list, _filename, _interval, _choice, _feature, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
 
 
     output_train_filename = _filename + ".train"
     output_train_filename = _filename + ".train"
     output_test_filename = _filename + ".test"
     output_test_filename = _filename + ".test"
@@ -148,7 +145,7 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
     train_file_data = []
     train_file_data = []
     test_file_data  = []
     test_file_data  = []
 
 
-    for id_scene, folder_scene in enumerate(_scenes_list):
+    for folder_scene in _scenes_list:
 
 
         scene_path = os.path.join(path, folder_scene)
         scene_path = os.path.join(path, folder_scene)
 
 
@@ -185,9 +182,9 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
 
 
             # if custom normalization choices then we use svd values not already normalized
             # if custom normalization choices then we use svd values not already normalized
             if _custom:
             if _custom:
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
             else:
             else:
-                data_filename = _metric + "_" + _choice + generic_output_file_svd
+                data_filename = _feature + "_" + _choice + generic_output_file_svd
 
 
             data_file_path = os.path.join(zone_path, data_filename)
             data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -255,7 +252,7 @@ def main():
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
@@ -270,7 +267,7 @@ def main():
     p_filename = args.output
     p_filename = args.output
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_kind     = args.kind
     p_kind     = args.kind
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_scenes   = args.scenes.split(',')
     p_scenes   = args.scenes.split(',')
     p_nb_zones = args.nb_zones
     p_nb_zones = args.nb_zones
     p_random   = args.random
     p_random   = args.random
@@ -294,7 +291,7 @@ def main():
 
 
     # find min max value if necessary to renormalize data
     # find min max value if necessary to renormalize data
     if p_custom:
     if p_custom:
-        get_min_max_value_interval(scenes_list, p_interval, p_metric)
+        get_min_max_value_interval(scenes_list, p_interval, p_feature)
 
 
         # write new file to save
         # write new file to save
         if not os.path.exists(custom_min_max_folder):
         if not os.path.exists(custom_min_max_folder):
@@ -308,7 +305,7 @@ def main():
             f.write(str(max_value_interval) + '\n')
             f.write(str(max_value_interval) + '\n')
 
 
     # create database using img folder (generate first time only)
     # create database using img folder (generate first time only)
-    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_each, p_custom)
+    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_feature, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_each, p_custom)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 34 - 37
generate_data_model_random_split.py

@@ -1,30 +1,27 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Sep 14 21:02:42 2018
-
-@author: jbuisine
-"""
-
-from __future__ import print_function
+# main imports
 import sys, os, argparse
 import sys, os, argparse
 import numpy as np
 import numpy as np
+import pandas as pd
 import random
 import random
-import time
-import json
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
-from ipfml import processing, metrics, utils
 
 
-from modules.utils import config as cfg
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 from modules.utils import data as dt
 from modules.utils import data as dt
+from data_attributes import get_svd_data
+
 
 
 # getting configuration information
 # getting configuration information
-config_filename         = cfg.config_filename
 learned_folder          = cfg.learned_zones_folder
 learned_folder          = cfg.learned_zones_folder
 min_max_filename        = cfg.min_max_filename_extension
 min_max_filename        = cfg.min_max_filename_extension
 
 
-# define all scenes values
+# define all scenes variables
 all_scenes_list         = cfg.scenes_names
 all_scenes_list         = cfg.scenes_names
 all_scenes_indices      = cfg.scenes_indices
 all_scenes_indices      = cfg.scenes_indices
 
 
@@ -34,7 +31,7 @@ zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 seuil_expe_filename     = cfg.seuil_expe_filename
 
 
 renderer_choices        = cfg.renderer_choices
 renderer_choices        = cfg.renderer_choices
-metric_choices          = cfg.metric_choices_labels
+features_choices        = cfg.features_choices_labels
 output_data_folder      = cfg.output_data_folder
 output_data_folder      = cfg.output_data_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 custom_min_max_folder   = cfg.min_max_custom_folder
 min_max_ext             = cfg.min_max_filename_extension
 min_max_ext             = cfg.min_max_filename_extension
@@ -51,32 +48,32 @@ def construct_new_line(seuil_learned, interval, line, choice, each, norm):
 
 
     line_data = line.split(';')
     line_data = line.split(';')
     seuil = line_data[0]
     seuil = line_data[0]
-    metrics = line_data[begin+1:end+1]
+    features = line_data[begin+1:end+1]
 
 
     # keep only if modulo result is 0 (keep only each wanted values)
     # keep only if modulo result is 0 (keep only each wanted values)
-    metrics = [float(m) for id, m in enumerate(metrics) if id % each == 0]
+    features = [float(m) for id, m in enumerate(features) if id % each == 0]
 
 
     # TODO : check if it's always necessary to do that (loss of information for svd)
     # TODO : check if it's always necessary to do that (loss of information for svd)
     if norm:
     if norm:
 
 
         if choice == 'svdne':
         if choice == 'svdne':
-            metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
         if choice == 'svdn':
         if choice == 'svdn':
-            metrics = utils.normalize_arr(metrics)
+            features = utils.normalize_arr(features)
 
 
     if seuil_learned > int(seuil):
     if seuil_learned > int(seuil):
         line = '1'
         line = '1'
     else:
     else:
         line = '0'
         line = '0'
 
 
-    for idx, val in enumerate(metrics):
+    for val in features:
         line += ';'
         line += ';'
         line += str(val)
         line += str(val)
     line += '\n'
     line += '\n'
 
 
     return line
     return line
 
 
-def get_min_max_value_interval(_scenes_list, _interval, _metric):
+def get_min_max_value_interval(_scenes_list, _interval, _feature):
 
 
     global min_value_interval, max_value_interval
     global min_value_interval, max_value_interval
 
 
@@ -85,7 +82,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
     scenes = [s for s in scenes if min_max_filename not in s]
 
 
-    for id_scene, folder_scene in enumerate(scenes):
+    for folder_scene in scenes:
 
 
         # only take care of maxwell scenes
         # only take care of maxwell scenes
         if folder_scene in _scenes_list:
         if folder_scene in _scenes_list:
@@ -100,12 +97,12 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                     index_str = "0" + index_str
                     index_str = "0" + index_str
                 zones_folder.append("zone"+index_str)
                 zones_folder.append("zone"+index_str)
 
 
-            for id_zone, zone_folder in enumerate(zones_folder):
+            for zone_folder in zones_folder:
 
 
                 zone_path = os.path.join(scene_path, zone_folder)
                 zone_path = os.path.join(scene_path, zone_folder)
 
 
                 # if custom normalization choices then we use svd values not already normalized
                 # if custom normalization choices then we use svd values not already normalized
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
 
 
                 data_file_path = os.path.join(zone_path, data_filename)
                 data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -120,11 +117,11 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
 
 
                     line_data = line.split(';')
                     line_data = line.split(';')
 
 
-                    metrics = line_data[begin+1:end+1]
-                    metrics = [float(m) for m in metrics]
+                    features = line_data[begin+1:end+1]
+                    features = [float(m) for m in features]
 
 
-                    min_value = min(metrics)
-                    max_value = max(metrics)
+                    min_value = min(features)
+                    max_value = max(features)
 
 
                     if min_value < min_value_interval:
                     if min_value < min_value_interval:
                         min_value_interval = min_value
                         min_value_interval = min_value
@@ -133,7 +130,7 @@ def get_min_max_value_interval(_scenes_list, _interval, _metric):
                         max_value_interval = max_value
                         max_value_interval = max_value
 
 
 
 
-def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
+def generate_data_model(_scenes_list, _filename, _interval, _choice, _feature, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
 
 
     output_train_filename = _filename + ".train"
     output_train_filename = _filename + ".train"
     output_test_filename = _filename + ".test"
     output_test_filename = _filename + ".test"
@@ -148,7 +145,7 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
     train_file_data = []
     train_file_data = []
     test_file_data  = []
     test_file_data  = []
 
 
-    for id_scene, folder_scene in enumerate(_scenes_list):
+    for folder_scene in _scenes_list:
 
 
         scene_path = os.path.join(path, folder_scene)
         scene_path = os.path.join(path, folder_scene)
 
 
@@ -185,9 +182,9 @@ def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _s
 
 
             # if custom normalization choices then we use svd values not already normalized
             # if custom normalization choices then we use svd values not already normalized
             if _custom:
             if _custom:
-                data_filename = _metric + "_svd"+ generic_output_file_svd
+                data_filename = _feature + "_svd"+ generic_output_file_svd
             else:
             else:
-                data_filename = _metric + "_" + _choice + generic_output_file_svd
+                data_filename = _feature + "_" + _choice + generic_output_file_svd
 
 
             data_file_path = os.path.join(zone_path, data_filename)
             data_file_path = os.path.join(zone_path, data_filename)
 
 
@@ -254,7 +251,7 @@ def main():
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
@@ -269,7 +266,7 @@ def main():
     p_filename = args.output
     p_filename = args.output
     p_interval = list(map(int, args.interval.split(',')))
     p_interval = list(map(int, args.interval.split(',')))
     p_kind     = args.kind
     p_kind     = args.kind
-    p_metric   = args.metric
+    p_feature  = args.feature
     p_scenes   = args.scenes.split(',')
     p_scenes   = args.scenes.split(',')
     p_nb_zones = args.nb_zones
     p_nb_zones = args.nb_zones
     p_random   = args.random
     p_random   = args.random
@@ -293,7 +290,7 @@ def main():
 
 
     # find min max value if necessary to renormalize data
     # find min max value if necessary to renormalize data
     if p_custom:
     if p_custom:
-        get_min_max_value_interval(scenes_list, p_interval, p_metric)
+        get_min_max_value_interval(scenes_list, p_interval, p_feature)
 
 
         # write new file to save
         # write new file to save
         if not os.path.exists(custom_min_max_folder):
         if not os.path.exists(custom_min_max_folder):
@@ -307,7 +304,7 @@ def main():
             f.write(str(max_value_interval) + '\n')
             f.write(str(max_value_interval) + '\n')
 
 
     # create database using img folder (generate first time only)
     # create database using img folder (generate first time only)
-    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_each, p_custom)
+    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_feature, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_each, p_custom)
 
 
 if __name__== "__main__":
 if __name__== "__main__":
     main()
     main()

+ 0 - 6
generate_all_simulate_curves.sh

@@ -1,6 +0,0 @@
-for file in "threshold_map"/*; do
-
-    echo ${file}
-
-    python display_simulation_curves.py --folder ${file}
-done

+ 0 - 7
generate_metrics_curve.sh

@@ -1,7 +0,0 @@
-for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
-
-    python display_svd_data_scene.py --scene D --interval "0, 800" --indices "0, 1200" --metric ${metric} --mode svdne --step 100 --norm 1 --error mse --ylim "0, 0.1"
-
-done
-
-

+ 1 - 0
modules/models.py

@@ -1,3 +1,4 @@
+# models imports
 from sklearn.model_selection import GridSearchCV
 from sklearn.model_selection import GridSearchCV
 from sklearn.linear_model import LogisticRegression
 from sklearn.linear_model import LogisticRegression
 from sklearn.ensemble import RandomForestClassifier, VotingClassifier
 from sklearn.ensemble import RandomForestClassifier, VotingClassifier

+ 1 - 0
modules

@@ -0,0 +1 @@
+Subproject commit 139aa3c2312e9449b32d1d6fa506d741e7790c98

+ 0 - 0
modules/utils/__init__.py


Fichier diff supprimé car celui-ci est trop grand
+ 0 - 41
modules/utils/config.py


+ 13 - 9
save_model_result_in_md.py

@@ -1,17 +1,21 @@
-from sklearn.externals import joblib
-
+# main imports
 import numpy as np
 import numpy as np
-
-from ipfml import processing
-from PIL import Image
-
 import sys, os, argparse
 import sys, os, argparse
 import subprocess
 import subprocess
 import time
 import time
 
 
+# models imports
+from sklearn.externals import joblib
+
+# image processing imports
+from PIL import Image
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
+import custom_config as cfg
 
 
+# variables and parameters
 threshold_map_folder      = cfg.threshold_map_folder
 threshold_map_folder      = cfg.threshold_map_folder
 threshold_map_file_prefix = cfg.threshold_map_folder + "_"
 threshold_map_file_prefix = cfg.threshold_map_folder + "_"
 
 
@@ -26,7 +30,7 @@ def main():
 
 
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=cfg.metric_choices_labels)
+    parser.add_argument('--feature', type=str, help='Feature data choice', choices=cfg.features_choices_labels)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
 
 
     args = parser.parse_args()
     args = parser.parse_args()
@@ -41,7 +45,7 @@ def main():
 
 
     begin, end = p_interval
     begin, end = p_interval
 
 
-    bash_cmd = "bash testModelByScene.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
+    bash_cmd = "bash others/testModelByScene.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
     print(bash_cmd)
     print(bash_cmd)
 
 
     ## call command ##
     ## call command ##

+ 18 - 11
save_model_result_in_md_maxwell.py

@@ -1,3 +1,13 @@
+# main imports
+import numpy as np
+import pandas as pd
+
+import sys, os, argparse
+import subprocess
+import time
+import json
+
+# models imports
 from sklearn.utils import shuffle
 from sklearn.utils import shuffle
 from sklearn.externals import joblib
 from sklearn.externals import joblib
 from sklearn.metrics import accuracy_score, f1_score, recall_score, roc_auc_score
 from sklearn.metrics import accuracy_score, f1_score, recall_score, roc_auc_score
@@ -12,19 +22,16 @@ from keras.wrappers.scikit_learn import KerasClassifier
 from keras import backend as K
 from keras import backend as K
 from keras.models import model_from_json
 from keras.models import model_from_json
 
 
-import numpy as np
-import pandas as pd
-
+# image processing imports
 from ipfml import processing
 from ipfml import processing
 from PIL import Image
 from PIL import Image
 
 
-import sys, os, argparse
-import subprocess
-import time
-import json
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
+import custom_config as cfg
 
 
+# variables and parameters
 threshold_map_folder        = cfg.threshold_map_folder
 threshold_map_folder        = cfg.threshold_map_folder
 threshold_map_file_prefix   = cfg.threshold_map_folder + "_"
 threshold_map_file_prefix   = cfg.threshold_map_folder + "_"
 
 
@@ -60,7 +67,7 @@ def main():
     # call model and get global result in scenes
     # call model and get global result in scenes
     begin, end = p_interval
     begin, end = p_interval
 
 
-    bash_cmd = "bash testModelByScene_maxwell.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
+    bash_cmd = "bash others/testModelByScene_maxwell.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
 
 
     print(bash_cmd)
     print(bash_cmd)
 
 
@@ -117,8 +124,8 @@ def main():
     # Keep model information to compare
     # Keep model information to compare
     current_model_name = p_model_file.split('/')[-1].replace(model_ext, '')
     current_model_name = p_model_file.split('/')[-1].replace(model_ext, '')
 
 
-    # Prepare writing in .csv file
-    output_final_file_path = os.path.join(markdowns_folder, final_csv_model_comparisons)
+    # Prepare writing in .csv file into results folder
+    output_final_file_path = os.path.join(cfg.results_information_folder, final_csv_model_comparisons)
     output_final_file = open(output_final_file_path, "a")
     output_final_file = open(output_final_file_path, "a")
 
 
     print(current_model_name)
     print(current_model_name)

+ 1 - 1
testModelByScene.sh

@@ -55,7 +55,7 @@ for scene in {"A","B","C","D","E","F","G","H","I"}; do
 
 
   FILENAME="data/data_${INPUT_MODE}_${INPUT_METRIC}_B${INPUT_BEGIN}_E${INPUT_END}_scene${scene}"
   FILENAME="data/data_${INPUT_MODE}_${INPUT_METRIC}_B${INPUT_BEGIN}_E${INPUT_END}_scene${scene}"
 
 
-  python generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1 --sep ";" --rowindex "0"
+  python generate/generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1 --sep ";" --rowindex "0"
 
 
   python prediction_scene.py --data "$FILENAME.train" --model ${INPUT_MODEL} --output "${INPUT_MODEL}_Scene${scene}_mode_${INPUT_MODE}_metric_${INPUT_METRIC}.prediction" --scene ${scene}
   python prediction_scene.py --data "$FILENAME.train" --model ${INPUT_MODEL} --output "${INPUT_MODEL}_Scene${scene}_mode_${INPUT_MODE}_metric_${INPUT_METRIC}.prediction" --scene ${scene}
 
 

+ 1 - 1
testModelByScene_maxwell.sh

@@ -63,7 +63,7 @@ for scene in {"A","D","G","H"}; do
 
 
   FILENAME="data/data_${INPUT_MODE}_${INPUT_METRIC}_B${INPUT_BEGIN}_E${INPUT_END}_scene${scene}"
   FILENAME="data/data_${INPUT_MODE}_${INPUT_METRIC}_B${INPUT_BEGIN}_E${INPUT_END}_scene${scene}"
 
 
-  python generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1
+  python generate/generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1
 
 
   python prediction_scene.py --data "$FILENAME.train" --model ${INPUT_MODEL} --output "${INPUT_MODEL}_Scene${scene}_mode_${INPUT_MODE}_metric_${INPUT_METRIC}.prediction" --scene ${scene}
   python prediction_scene.py --data "$FILENAME.train" --model ${INPUT_MODEL} --output "${INPUT_MODEL}_Scene${scene}_mode_${INPUT_MODE}_metric_${INPUT_METRIC}.prediction" --scene ${scene}
 
 

+ 18 - 13
predict_noisy_image_svd.py

@@ -1,20 +1,25 @@
-from sklearn.externals import joblib
-
+# main imports
+import sys, os, argparse, json
 import numpy as np
 import numpy as np
 
 
+# models imports
+from keras.models import model_from_json
+from sklearn.externals import joblib
+
+# image processing imports
 from ipfml import processing, utils
 from ipfml import processing, utils
 from PIL import Image
 from PIL import Image
 
 
-import sys, os, argparse, json
-
-from keras.models import model_from_json
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
-from modules.utils import data as dt
+import custom_config as cfg
+from data_attributes import get_svd_data
 
 
+# variables and parameters
 path                  = cfg.dataset_path
 path                  = cfg.dataset_path
 min_max_ext           = cfg.min_max_filename_extension
 min_max_ext           = cfg.min_max_filename_extension
-metric_choices        = cfg.metric_choices_labels
+features_choices      = cfg.features_choices_labels
 normalization_choices = cfg.normalization_choices
 normalization_choices = cfg.normalization_choices
 
 
 custom_min_max_folder = cfg.min_max_custom_folder
 custom_min_max_folder = cfg.min_max_custom_folder
@@ -28,7 +33,7 @@ def main():
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
 
 
     args = parser.parse_args()
     args = parser.parse_args()
@@ -37,7 +42,7 @@ def main():
     p_model_file = args.model
     p_model_file = args.model
     p_interval   = list(map(int, args.interval.split(',')))
     p_interval   = list(map(int, args.interval.split(',')))
     p_mode       = args.mode
     p_mode       = args.mode
-    p_metric     = args.metric
+    p_feature    = args.feature
     p_custom     = args.custom
     p_custom     = args.custom
 
 
     if '.joblib' in p_model_file:
     if '.joblib' in p_model_file:
@@ -69,12 +74,12 @@ def main():
 
 
             model.compile(loss='binary_crossentropy',
             model.compile(loss='binary_crossentropy',
                         optimizer='adam',
                         optimizer='adam',
-                        metrics=['accuracy'])
+                        features=['accuracy'])
 
 
     # load image
     # load image
     img = Image.open(p_img_file)
     img = Image.open(p_img_file)
 
 
-    data = dt.get_svd_data(p_metric, img)
+    data = get_svd_data(p_feature, img)
 
 
     # get interval values
     # get interval values
     begin, end = p_interval
     begin, end = p_interval
@@ -109,7 +114,7 @@ def main():
         if p_mode == 'svdne':
         if p_mode == 'svdne':
 
 
             # set min_max_filename if custom use
             # set min_max_filename if custom use
-            min_max_file_path = path + '/' + p_metric + min_max_ext
+            min_max_file_path = path + '/' + p_feature + min_max_ext
 
 
             # need to read min_max_file
             # need to read min_max_file
             file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
             file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)

+ 44 - 44
predict_seuil_expe.py

@@ -1,17 +1,24 @@
-from sklearn.externals import joblib
-
+# main imports
+import sys, os, argparse
+import subprocess
+import time
 import numpy as np
 import numpy as np
 
 
-from ipfml import processing, utils
+# image processing imports
+from ipfml.processing import segmentation
 from PIL import Image
 from PIL import Image
 
 
-import sys, os, argparse
-import subprocess
-import time
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
+import custom_config as cfg
+from modules.utils import data as dt
 
 
-config_filename           = cfg.config_filename
+
+# variables and parameters
 scenes_path               = cfg.dataset_path
 scenes_path               = cfg.dataset_path
 min_max_filename          = cfg.min_max_filename_extension
 min_max_filename          = cfg.min_max_filename_extension
 threshold_expe_filename   = cfg.seuil_expe_filename
 threshold_expe_filename   = cfg.seuil_expe_filename
@@ -21,7 +28,7 @@ threshold_map_file_prefix = cfg.threshold_map_folder + "_"
 
 
 zones                     = cfg.zones_indices
 zones                     = cfg.zones_indices
 normalization_choices     = cfg.normalization_choices
 normalization_choices     = cfg.normalization_choices
-metric_choices            = cfg.metric_choices_labels
+features_choices          = cfg.features_choices_labels
 
 
 tmp_filename              = '/tmp/__model__img_to_predict.png'
 tmp_filename              = '/tmp/__model__img_to_predict.png'
 
 
@@ -36,8 +43,8 @@ def main():
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--feature', type=str, help='Feature data choice', choices=features_choices)
+    parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
 
 
     args = parser.parse_args()
     args = parser.parse_args()
@@ -45,8 +52,8 @@ def main():
     p_interval   = list(map(int, args.interval.split(',')))
     p_interval   = list(map(int, args.interval.split(',')))
     p_model_file = args.model
     p_model_file = args.model
     p_mode       = args.mode
     p_mode       = args.mode
-    p_metric     = args.metric
-    #p_limit      = args.limit
+    p_feature     = args.feature
+    p_limit      = args.limit
     p_custom     = args.custom
     p_custom     = args.custom
 
 
     scenes = os.listdir(scenes_path)
     scenes = os.listdir(scenes_path)
@@ -59,20 +66,17 @@ def main():
 
 
         scene_path = os.path.join(scenes_path, folder_scene)
         scene_path = os.path.join(scenes_path, folder_scene)
 
 
-        config_path = os.path.join(scene_path, config_filename)
-
-        with open(config_path, "r") as config_file:
-            last_image_name = config_file.readline().strip()
-            prefix_image_name = config_file.readline().strip()
-            start_index_image = config_file.readline().strip()
-            end_index_image = config_file.readline().strip()
-            step_counter = int(config_file.readline().strip())
-
         threshold_expes = []
         threshold_expes = []
         threshold_expes_detected = []
         threshold_expes_detected = []
         threshold_expes_counter = []
         threshold_expes_counter = []
         threshold_expes_found = []
         threshold_expes_found = []
 
 
+            # get all images of folder
+        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+        start_quality_image = dt.get_scene_image_quality(scene_images[0])
+        end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+       
         # get zones list info
         # get zones list info
         for index in zones:
         for index in zones:
             index_str = str(index)
             index_str = str(index)
@@ -89,29 +93,26 @@ def main():
                 # Initialize default data to get detected model threshold found
                 # Initialize default data to get detected model threshold found
                 threshold_expes_detected.append(False)
                 threshold_expes_detected.append(False)
                 threshold_expes_counter.append(0)
                 threshold_expes_counter.append(0)
-                threshold_expes_found.append(int(end_index_image)) # by default use max
-
-        current_counter_index = int(start_index_image)
-        end_counter_index = int(end_index_image)
+                threshold_expes_found.append(end_quality_image) # by default use max
 
 
-        print(current_counter_index)
         check_all_done = False
         check_all_done = False
 
 
-        while(current_counter_index <= end_counter_index and not check_all_done):
-
-            current_counter_index_str = str(current_counter_index)
-
-            while len(start_index_image) > len(current_counter_index_str):
-                current_counter_index_str = "0" + current_counter_index_str
-
-            img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+        # for each images
+        for img_path in scene_images:
 
 
             current_img = Image.open(img_path)
             current_img = Image.open(img_path)
-            img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+            current_quality_image = dt.get_scene_image_quality(img_path)
+            current_image_potfix = dt.get_scene_image_postfix(img_path)
 
 
+            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+            current_img = Image.open(img_path)
+            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
 
 
             check_all_done = all(d == True for d in threshold_expes_detected)
             check_all_done = all(d == True for d in threshold_expes_detected)
 
 
+            if check_all_done:
+                break
+
             for id_block, block in enumerate(img_blocks):
             for id_block, block in enumerate(img_blocks):
 
 
                 # check only if necessary for this scene (not already detected)
                 # check only if necessary for this scene (not already detected)
@@ -124,7 +125,7 @@ def main():
                                     " --interval '" + p_interval + \
                                     " --interval '" + p_interval + \
                                     "' --model " + p_model_file  + \
                                     "' --model " + p_model_file  + \
                                     " --mode " + p_mode + \
                                     " --mode " + p_mode + \
-                                    " --metric " + p_metric
+                                    " --feature " + p_feature
 
 
                     # specify use of custom file for min max normalization
                     # specify use of custom file for min max normalization
                     if p_custom:
                     if p_custom:
@@ -148,11 +149,10 @@ def main():
 
 
                     if threshold_expes_counter[id_block] == p_limit:
                     if threshold_expes_counter[id_block] == p_limit:
                         threshold_expes_detected[id_block] = True
                         threshold_expes_detected[id_block] = True
-                        threshold_expes_found[id_block] = current_counter_index
+                        threshold_expes_found[id_block] = current_quality_image
 
 
-                    print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+                    print(str(id_block) + " : " + current_image_potfix + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
 
 
-            current_counter_index += step_counter
             print("------------------------")
             print("------------------------")
             print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
             print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
             print("------------------------")
             print("------------------------")
@@ -192,12 +192,12 @@ def main():
         avg_abs_dist = sum(abs_dist) / len(abs_dist)
         avg_abs_dist = sum(abs_dist) / len(abs_dist)
 
 
         f_map.write('\nScene information : ')
         f_map.write('\nScene information : ')
-        f_map.write('\n- BEGIN : ' + str(start_index_image))
-        f_map.write('\n- END : ' + str(end_index_image))
+        f_map.write('\n- BEGIN : ' + str(start_quality_image))
+        f_map.write('\n- END : ' + str(end_quality_image))
 
 
         f_map.write('\n\nDistances information : ')
         f_map.write('\n\nDistances information : ')
         f_map.write('\n- MIN : ' + str(min_abs_dist))
         f_map.write('\n- MIN : ' + str(min_abs_dist))
-        f_map.write('\n- MAX : ' + str(max_abs_dist))
+        f_map.write('\n- MAX : ' + str(max_abs_dist))          
         f_map.write('\n- AVG : ' + str(avg_abs_dist))
         f_map.write('\n- AVG : ' + str(avg_abs_dist))
 
 
         f_map.write('\n\nOther information : ')
         f_map.write('\n\nOther information : ')
@@ -209,7 +209,7 @@ def main():
         print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)) + " Done..")
         print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)) + " Done..")
         print("------------------------")
         print("------------------------")
 
 
-        time.sleep(10)
+        time.sleep(1)
 
 
 
 
 if __name__== "__main__":
 if __name__== "__main__":

+ 39 - 42
predict_seuil_expe_maxwell.py

@@ -1,18 +1,24 @@
-from sklearn.externals import joblib
-
+# main imports
+import sys, os, argparse
+import subprocess
+import time
 import numpy as np
 import numpy as np
 
 
-from ipfml import processing
+# image processing imports
+from ipfml.processing import segmentation
 from PIL import Image
 from PIL import Image
 
 
-import sys, os, argparse
-import subprocess
-import time
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
+import custom_config as cfg
+from modules.utils import data as dt
 
 
-from modules.utils import config as cfg
 
 
-config_filename           = cfg.config_filename
+# variables and parameters
 scenes_path               = cfg.dataset_path
 scenes_path               = cfg.dataset_path
 min_max_filename          = cfg.min_max_filename_extension
 min_max_filename          = cfg.min_max_filename_extension
 threshold_expe_filename   = cfg.seuil_expe_filename
 threshold_expe_filename   = cfg.seuil_expe_filename
@@ -23,7 +29,7 @@ threshold_map_file_prefix = cfg.threshold_map_folder + "_"
 zones                     = cfg.zones_indices
 zones                     = cfg.zones_indices
 maxwell_scenes            = cfg.maxwell_scenes_names
 maxwell_scenes            = cfg.maxwell_scenes_names
 normalization_choices     = cfg.normalization_choices
 normalization_choices     = cfg.normalization_choices
-metric_choices            = cfg.metric_choices_labels
+features_choices          = cfg.features_choices_labels
 
 
 tmp_filename              = '/tmp/__model__img_to_predict.png'
 tmp_filename              = '/tmp/__model__img_to_predict.png'
 
 
@@ -39,8 +45,8 @@ def main():
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--feature', type=str, help='Feature data choice', choices=features_choices)
+    parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
 
 
     args = parser.parse_args()
     args = parser.parse_args()
@@ -48,8 +54,8 @@ def main():
     p_interval   = list(map(int, args.interval.split(',')))
     p_interval   = list(map(int, args.interval.split(',')))
     p_model_file = args.model
     p_model_file = args.model
     p_mode       = args.mode
     p_mode       = args.mode
-    p_metric     = args.metric
-    #p_limit      = args.limit
+    p_feature    = args.feature
+    p_limit      = args.limit
     p_custom     = args.custom
     p_custom     = args.custom
 
 
     scenes = os.listdir(scenes_path)
     scenes = os.listdir(scenes_path)
@@ -65,20 +71,18 @@ def main():
 
 
             scene_path = os.path.join(scenes_path, folder_scene)
             scene_path = os.path.join(scenes_path, folder_scene)
 
 
-            config_path = os.path.join(scene_path, config_filename)
-
-            with open(config_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             threshold_expes = []
             threshold_expes = []
             threshold_expes_detected = []
             threshold_expes_detected = []
             threshold_expes_counter = []
             threshold_expes_counter = []
             threshold_expes_found = []
             threshold_expes_found = []
 
 
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+    
+
             # get zones list info
             # get zones list info
             for index in zones:
             for index in zones:
                 index_str = str(index)
                 index_str = str(index)
@@ -95,29 +99,23 @@ def main():
                     # Initialize default data to get detected model threshold found
                     # Initialize default data to get detected model threshold found
                     threshold_expes_detected.append(False)
                     threshold_expes_detected.append(False)
                     threshold_expes_counter.append(0)
                     threshold_expes_counter.append(0)
-                    threshold_expes_found.append(int(end_index_image)) # by default use max
-
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
+                    threshold_expes_found.append(end_quality_image) # by default use max
 
 
-            print(current_counter_index)
             check_all_done = False
             check_all_done = False
 
 
-            while(current_counter_index <= end_counter_index and not check_all_done):
-
-                current_counter_index_str = str(current_counter_index)
-
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
-
-                img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+            # for each images
+            for img_path in scene_images:
 
 
                 current_img = Image.open(img_path)
                 current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+                current_postfix_image = dt.get_scene_image_postfix(img_path)
 
 
+                img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
 
 
                 check_all_done = all(d == True for d in threshold_expes_detected)
                 check_all_done = all(d == True for d in threshold_expes_detected)
 
 
+                if check_all_done:
+                    break
+
                 for id_block, block in enumerate(img_blocks):
                 for id_block, block in enumerate(img_blocks):
 
 
                     # check only if necessary for this scene (not already detected)
                     # check only if necessary for this scene (not already detected)
@@ -130,7 +128,7 @@ def main():
                                         " --interval '" + p_interval + \
                                         " --interval '" + p_interval + \
                                         "' --model " + p_model_file  + \
                                         "' --model " + p_model_file  + \
                                         " --mode " + p_mode + \
                                         " --mode " + p_mode + \
-                                        " --metric " + p_metric
+                                        " --feature " + p_feature
 
 
                         # specify use of custom file for min max normalization
                         # specify use of custom file for min max normalization
                         if p_custom:
                         if p_custom:
@@ -153,11 +151,10 @@ def main():
 
 
                         if threshold_expes_counter[id_block] == p_limit:
                         if threshold_expes_counter[id_block] == p_limit:
                             threshold_expes_detected[id_block] = True
                             threshold_expes_detected[id_block] = True
-                            threshold_expes_found[id_block] = current_counter_index
+                            threshold_expes_found[id_block] = int(current_postfix_image)
 
 
-                        print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+                        print(str(id_block) + " : " + current_postfix_image + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
 
 
-                current_counter_index += step_counter
                 print("------------------------")
                 print("------------------------")
                 print("Scene " + str(id_scene + 1) + "/" + str(len(maxwell_scenes)))
                 print("Scene " + str(id_scene + 1) + "/" + str(len(maxwell_scenes)))
                 print("------------------------")
                 print("------------------------")
@@ -197,8 +194,8 @@ def main():
             avg_abs_dist = sum(abs_dist) / len(abs_dist)
             avg_abs_dist = sum(abs_dist) / len(abs_dist)
 
 
             f_map.write('\nScene information : ')
             f_map.write('\nScene information : ')
-            f_map.write('\n- BEGIN : ' + str(start_index_image))
-            f_map.write('\n- END : ' + str(end_index_image))
+            f_map.write('\n- BEGIN : ' + str(start_quality_image))
+            f_map.write('\n- END : ' + str(end_quality_image))
 
 
             f_map.write('\n\nDistances information : ')
             f_map.write('\n\nDistances information : ')
             f_map.write('\n- MIN : ' + str(min_abs_dist))
             f_map.write('\n- MIN : ' + str(min_abs_dist))

+ 38 - 43
predict_seuil_expe_maxwell_curve.py

@@ -1,17 +1,24 @@
-from sklearn.externals import joblib
-
+# main imports
+import sys, os, argparse
+import subprocess
+import time
 import numpy as np
 import numpy as np
 
 
-from ipfml import processing
+# image processing imports
+from ipfml.processing import segmentation
 from PIL import Image
 from PIL import Image
 
 
-import sys, os, argparse
-import subprocess
-import time
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
+import custom_config as cfg
+from modules.utils import data as dt
 
 
-config_filename           = cfg.config_filename
+
+# variables and parameters
 scenes_path               = cfg.dataset_path
 scenes_path               = cfg.dataset_path
 min_max_filename          = cfg.min_max_filename_extension
 min_max_filename          = cfg.min_max_filename_extension
 threshold_expe_filename   = cfg.seuil_expe_filename
 threshold_expe_filename   = cfg.seuil_expe_filename
@@ -22,7 +29,7 @@ threshold_map_file_prefix = cfg.threshold_map_folder + "_"
 zones                     = cfg.zones_indices
 zones                     = cfg.zones_indices
 maxwell_scenes            = cfg.maxwell_scenes_names
 maxwell_scenes            = cfg.maxwell_scenes_names
 normalization_choices     = cfg.normalization_choices
 normalization_choices     = cfg.normalization_choices
-metric_choices            = cfg.metric_choices_labels
+features_choices          = cfg.features_choices_labels
 
 
 simulation_curves_zones   = "simulation_curves_zones_"
 simulation_curves_zones   = "simulation_curves_zones_"
 tmp_filename              = '/tmp/__model__img_to_predict.png'
 tmp_filename              = '/tmp/__model__img_to_predict.png'
@@ -39,16 +46,17 @@ def main():
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
     #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
 
 
     args = parser.parse_args()
     args = parser.parse_args()
 
 
-    p_interval   = list(map(int, args.interval.split(',')))
+    # keep p_interval as it is
+    p_interval   = args.interval
     p_model_file = args.model
     p_model_file = args.model
     p_mode       = args.mode
     p_mode       = args.mode
-    p_metric     = args.metric
+    p_feature    = args.feature
     #p_limit      = args.limit
     #p_limit      = args.limit
     p_custom     = args.custom
     p_custom     = args.custom
 
 
@@ -67,19 +75,18 @@ def main():
 
 
             scene_path = os.path.join(scenes_path, folder_scene)
             scene_path = os.path.join(scenes_path, folder_scene)
 
 
-            config_path = os.path.join(scene_path, config_filename)
-
-            with open(config_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
             threshold_expes = []
             threshold_expes = []
             threshold_expes_found = []
             threshold_expes_found = []
             block_predictions_str = []
             block_predictions_str = []
 
 
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+            # using first two images find the step of quality used
+            quality_step_image  = dt.get_scene_image_quality(scene_images[1]) - start_quality_image
+
             # get zones list info
             # get zones list info
             for index in zones:
             for index in zones:
                 index_str = str(index)
                 index_str = str(index)
@@ -94,26 +101,18 @@ def main():
                     threshold_expes.append(threshold)
                     threshold_expes.append(threshold)
 
 
                     # Initialize default data to get detected model threshold found
                     # Initialize default data to get detected model threshold found
-                    threshold_expes_found.append(int(end_index_image)) # by default use max
+                    threshold_expes_found.append(end_quality_image) # by default use max
 
 
-                block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_index_image) + ";" + str(step_counter))
+                block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_quality_image) + ";" + str(quality_step_image))
 
 
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
 
 
-            print(current_counter_index)
-
-            while(current_counter_index <= end_counter_index):
-
-                current_counter_index_str = str(current_counter_index)
-
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
-
-                img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+            # for each images
+            for img_path in scene_images:
 
 
                 current_img = Image.open(img_path)
                 current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+                current_quality_image = dt.get_scene_image_quality(img_path)
+
+                img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
 
 
                 for id_block, block in enumerate(img_blocks):
                 for id_block, block in enumerate(img_blocks):
 
 
@@ -123,11 +122,8 @@ def main():
                         tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
                         tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
                         block.save(tmp_file_path)
                         block.save(tmp_file_path)
 
 
-                        python_cmd = "python predict_noisy_image_svd.py --image " + tmp_file_path + \
-                                        " --interval '" + p_interval + \
-                                        "' --model " + p_model_file  + \
-                                        " --mode " + p_mode + \
-                                        " --metric " + p_metric
+                        python_cmd_line = "python predict_noisy_image_svd.py --image {0} --interval '{1}' --model {2} --mode {3} --feature {4}"
+                        python_cmd = python_cmd_line.format(tmp_file_path, p_interval, p_model_file, p_mode, p_feature) 
 
 
                         # specify use of custom file for min max normalization
                         # specify use of custom file for min max normalization
                         if p_custom:
                         if p_custom:
@@ -146,9 +142,8 @@ def main():
                         # save here in specific file of block all the predictions done
                         # save here in specific file of block all the predictions done
                         block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
                         block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
 
 
-                        print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+                        print(str(id_block) + " : " + str(current_quality_image) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
 
 
-                current_counter_index += step_counter
                 print("------------------------")
                 print("------------------------")
                 print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
                 print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
                 print("------------------------")
                 print("------------------------")

+ 10 - 6
prediction_scene.py

@@ -1,8 +1,11 @@
-from sklearn.externals import joblib
-
+# main imports
+import sys, os, argparse
 import numpy as np
 import numpy as np
-
+import json
 import pandas as pd
 import pandas as pd
+
+# models imports
+from sklearn.externals import joblib
 from sklearn.metrics import accuracy_score
 from sklearn.metrics import accuracy_score
 from keras.models import Sequential
 from keras.models import Sequential
 from keras.layers import Conv1D, MaxPooling1D
 from keras.layers import Conv1D, MaxPooling1D
@@ -11,11 +14,12 @@ from keras import backend as K
 from keras.models import model_from_json
 from keras.models import model_from_json
 from keras.wrappers.scikit_learn import KerasClassifier
 from keras.wrappers.scikit_learn import KerasClassifier
 
 
-import sys, os, argparse
-import json
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
+import custom_config as cfg
 
 
+# parameters and variables
 output_model_folder = cfg.saved_models_folder
 output_model_folder = cfg.saved_models_folder
 
 
 def main():
 def main():

+ 1 - 1
runAll_display_data_scene.sh

@@ -2,6 +2,6 @@
 
 
 for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
 for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
     for scene in {"A","D","G","H"}; do
     for scene in {"A","D","G","H"}; do
-        python display_svd_data_scene.py --scene ${scene} --interval "0,800" --indices "0, 2000" --metric ${metric} --mode svdne --step 100 --norm 1 --ylim "0, 0.01"
+        python display/display_svd_data_scene.py --scene ${scene} --interval "0,800" --indices "0, 2000" --metric ${metric} --mode svdne --step 100 --norm 1 --ylim "0, 0.01"
     done
     done
 done
 done

+ 3 - 3
runAll_maxwell.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 for size in {"4","8","16","26","32","40"}; do
 
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
-        bash generateAndTrain_maxwell.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell.sh ${size} ${metric}
     done
     done
 done
 done

+ 4 - 4
runAll_maxwell_area.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 4 - 4
runAll_maxwell_area_normed.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 4 - 4
runAll_maxwell_corr_custom.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -41,11 +41,11 @@ for label in {"0","1"}; do
 
 
                             echo "${MODEL_NAME} results already generated..."
                             echo "${MODEL_NAME} results already generated..."
                         else
                         else
-                            python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                            python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                             # use of interval but it is not really an interval..
                             # use of interval but it is not really an interval..
-                            python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                         fi
                         fi
                     done
                     done
                 done
                 done

+ 3 - 3
runAll_maxwell_custom.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 for size in {"4","8","16","26","32","40"}; do
 
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
-        bash generateAndTrain_maxwell_custom.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell_custom.sh ${size} ${metric}
     done
     done
 done
 done

+ 3 - 3
runAll_maxwell_custom_center.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 for size in {"4","8","16","26","32","40"}; do
 
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
-        bash generateAndTrain_maxwell_custom_center.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell_custom_center.sh ${size} ${metric}
     done
     done
 done
 done

+ 3 - 3
runAll_maxwell_custom_filters.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -20,6 +20,6 @@ for size in {"4","8","16","26","32","40","60","80"}; do
 
 
     # for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     # for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
-        bash generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &
+        bash data_processing/generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &
     done
     done
 done
 done

+ 3 - 3
runAll_maxwell_custom_filters_center.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -20,6 +20,6 @@ for size in {"4","8","16","26","32","40","60","80"}; do
 
 
     # for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     # for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
-        bash generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &
+        bash data_processing/generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &
     done
     done
 done
 done

+ 3 - 3
runAll_maxwell_custom_filters_split.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -20,6 +20,6 @@ for size in {"4","8","16","26","32","40","60","80"}; do
 
 
     #for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     #for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
-        bash generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &
+        bash data_processing/generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &
     done
     done
 done
 done

+ 3 - 3
runAll_maxwell_custom_filters_stats.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -19,4 +19,4 @@ fi
 size=26
 size=26
 metric="filters_statistics"
 metric="filters_statistics"
 
 
-bash generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &
+bash data_processing/generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &

+ 3 - 3
runAll_maxwell_custom_filters_stats_center.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -19,4 +19,4 @@ fi
 size=26
 size=26
 metric="filters_statistics"
 metric="filters_statistics"
 
 
-bash generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &
+bash data_processing/generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &

+ 3 - 3
runAll_maxwell_custom_filters_stats_split.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -19,4 +19,4 @@ fi
 size=26
 size=26
 metric="filters_statistics"
 metric="filters_statistics"
 
 
-bash generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &
+bash data_processing/generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &

+ 3 - 3
runAll_maxwell_custom_split.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 for size in {"4","8","16","26","32","40"}; do
 
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
-        bash generateAndTrain_maxwell_custom_split.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell_custom_split.sh ${size} ${metric}
     done
     done
 done
 done

+ 4 - 4
runAll_maxwell_keras.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -44,10 +44,10 @@ for metric in {"sub_blocks_stats","sub_blocks_stats_reduced","sub_blocks_area","
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
                 echo "test"
                 echo "test"
-                #python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                #python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 #python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${end_index}
                 #python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${end_index}
 
 
-                #python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                #python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 4 - 4
runAll_maxwell_keras_corr.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -40,11 +40,11 @@ for label in {"0","1"}; do
 
 
                         echo "${MODEL_NAME} results already generated..."
                         echo "${MODEL_NAME} results already generated..."
                     else
                     else
-                        python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                        python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
 
 
                         # use of interval but it is not really an interval..
                         # use of interval but it is not really an interval..
-                        python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
                     fi
                     fi
                 done
                 done
             done
             done

+ 4 - 4
runAll_maxwell_keras_corr_custom.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -40,11 +40,11 @@ for label in {"0","1"}; do
 
 
                         echo "${MODEL_NAME} results already generated..."
                         echo "${MODEL_NAME} results already generated..."
                     else
                     else
-                        python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                        python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
 
 
                         # use of interval but it is not really an interval..
                         # use of interval but it is not really an interval..
-                        python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
                     fi
                     fi
                 done
                 done
             done
             done

+ 4 - 4
runAll_maxwell_mscn_var.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -43,10 +43,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                     echo "${MODEL_NAME} results already generated..."
                     echo "${MODEL_NAME} results already generated..."
                 else
                 else
-                    python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                    python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                    python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
                 fi
             done
             done
         done
         done

+ 4 - 4
runAll_maxwell_sub_blocks_stats.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 4 - 4
runAll_maxwell_sub_blocks_stats_reduced.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 #! bin/bash
 
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 
 erased=$1
 erased=$1
 
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
 
                 echo "${MODEL_NAME} results already generated..."
                 echo "${MODEL_NAME} results already generated..."
             else
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
             fi
         done
         done
     done
     done

+ 6 - 0
simulation/generate_all_simulate_curves.sh

@@ -0,0 +1,6 @@
+for file in "threshold_map"/*; do
+
+    echo ${file}
+
+    python display/display/display_simulation_curves.py --folder ${file}
+done

+ 2 - 2
run_maxwell_simulation.sh

@@ -38,13 +38,13 @@ for size in {"4","8","16","26","32","40"}; do
                             echo "Run simulation for model ${MODEL_NAME}"
                             echo "Run simulation for model ${MODEL_NAME}"
 
 
                             # by default regenerate model
                             # by default regenerate model
-                            python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1
+                            python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1
 
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                             python predict_seuil_expe_maxwell_curve.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2'
                             python predict_seuil_expe_maxwell_curve.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2'
 
 
-                            python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
 
 
                         fi
                         fi
                     done
                     done

+ 3 - 3
run_maxwell_simulation_corr_custom.sh

@@ -26,13 +26,13 @@ for label in {"0","1"}; do
                         if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                         if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                             echo "Run simulation for model ${MODEL_NAME}"
                             echo "Run simulation for model ${MODEL_NAME}"
 
 
-                            python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                            python generate/generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
 
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
-                            python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
+                            python prediction/predict_seuil_expe_maxwell_curve.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
 
-                            python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
 
 
                         fi
                         fi
                     done
                     done

+ 2 - 2
run_maxwell_simulation_custom.sh

@@ -39,13 +39,13 @@ for size in {"4","8","16","26","32","40"}; do
                             echo "Run simulation for model ${MODEL_NAME}"
                             echo "Run simulation for model ${MODEL_NAME}"
 
 
                             # by default regenerate model
                             # by default regenerate model
-                            python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                            python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
 
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                             python predict_seuil_expe_maxwell_curve.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
                             python predict_seuil_expe_maxwell_curve.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
 
-                            python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
 
 
                         fi
                         fi
                     done
                     done

+ 2 - 2
run_maxwell_simulation_custom_filters.sh

@@ -31,12 +31,12 @@ for size in {"4","8","16","26","32","40"}; do
                         echo "${MODEL_NAME} results already generated..."
                         echo "${MODEL_NAME} results already generated..."
                     else
                     else
                         # Use of already generated model
                         # Use of already generated model
-                        # python generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                        # python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                         # python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                         # python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                         python predict_seuil_expe_maxwell_curve.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
                         python predict_seuil_expe_maxwell_curve.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
 
-                        python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                     fi
                     fi
                 done
                 done
             done
             done

+ 39 - 0
simulation/run_maxwell_simulation_filters_statistics.sh

@@ -0,0 +1,39 @@
+#! bin/bash
+
+# file which contains model names we want to use for simulation
+simulate_models="simulate_models.csv"
+
+# selection of four scenes (only maxwell)
+scenes="A, D, G, H"
+
+size="26"
+
+# for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
+metric="filters_statistics"
+
+for nb_zones in {4,6,8,10,12}; do
+    for mode in {"svd","svdn","svdne"}; do
+        for model in {"svm_model","ensemble_model","ensemble_model_v2"}; do
+
+            FILENAME="data/${model}_N${size}_B0_E${size}_nb_zones_${nb_zones}_${metric}_${mode}"
+            MODEL_NAME="${model}_N${size}_B0_E${size}_nb_zones_${nb_zones}_${metric}_${mode}"
+            CUSTOM_MIN_MAX_FILENAME="N${size}_B0_E${size}_nb_zones_${nb_zones}_${metric}_${mode}_min_max"
+
+            echo $MODEL_NAME
+
+            # only compute if necessary (perhaps server will fall.. Just in case)
+            if grep -q "${MODEL_NAME}" "${simulate_models}"; then
+
+                echo "${MODEL_NAME} results already generated..."
+            else
+                # Use of already generated model
+                # python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                # python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
+
+                python predict_seuil_expe_maxwell_curve.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --custom ${CUSTOM_MIN_MAX_FILENAME}
+
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+            fi
+        done
+    done
+done

+ 2 - 2
run_maxwell_simulation_keras_corr_custom.sh

@@ -27,13 +27,13 @@ for label in {"0","1"}; do
                     if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                     if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                         echo "Run simulation for model ${MODEL_NAME}"
                         echo "Run simulation for model ${MODEL_NAME}"
 
 
-                        python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                        python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
 
 
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
 
 
                         python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
                         python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
 
-                        python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
 
 
                     fi
                     fi
                 done
                 done

+ 2 - 2
run_maxwell_simulation_keras_custom.sh

@@ -24,13 +24,13 @@ for metric in {"sub_blocks_stats","sub_blocks_stats_reduced","sub_blocks_area","
                 echo "Run simulation for model ${MODEL_NAME}"
                 echo "Run simulation for model ${MODEL_NAME}"
 
 
                 # by default regenerate model
                 # by default regenerate model
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
 
 
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
 
                 python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
                 python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
 
 
             fi
             fi
         done
         done

+ 10 - 13
deep_network_keras_svd.py

@@ -1,3 +1,11 @@
+# main imports
+import sys, os
+import argparse
+import json
+import numpy as np
+import pandas as pd
+
+# models imports
 from keras.preprocessing.image import ImageDataGenerator
 from keras.preprocessing.image import ImageDataGenerator
 from keras.models import Sequential
 from keras.models import Sequential
 from keras.layers import Conv1D, MaxPooling1D
 from keras.layers import Conv1D, MaxPooling1D
@@ -8,20 +16,9 @@ from keras import backend as K
 from sklearn.utils import shuffle
 from sklearn.utils import shuffle
 from sklearn.metrics import roc_auc_score
 from sklearn.metrics import roc_auc_score
 
 
-import numpy as np
-import pandas as pd
-
-from ipfml import processing
-import modules.utils.config as cfg
-
-from PIL import Image
-
-import sys, os
-import argparse
-import json
+# modules and config imports
+import custom_config as cfg
 
 
-import subprocess
-import time
 
 
 def f1(y_true, y_pred):
 def f1(y_true, y_pred):
     def recall(y_true, y_pred):
     def recall(y_true, y_pred):

+ 12 - 6
train_model.py

@@ -1,3 +1,9 @@
+# main imports
+import numpy as np
+import pandas as pd
+import sys, os, argparse
+
+# models imports
 from sklearn.model_selection import train_test_split
 from sklearn.model_selection import train_test_split
 from sklearn.model_selection import GridSearchCV
 from sklearn.model_selection import GridSearchCV
 from sklearn.linear_model import LogisticRegression
 from sklearn.linear_model import LogisticRegression
@@ -9,17 +15,17 @@ from sklearn.externals import joblib
 from sklearn.metrics import accuracy_score, f1_score
 from sklearn.metrics import accuracy_score, f1_score
 from sklearn.model_selection import cross_val_score
 from sklearn.model_selection import cross_val_score
 
 
-import numpy as np
-import pandas as pd
-import sys, os, argparse
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from modules.utils import config as cfg
-from modules import models as mdl
+import custom_config as cfg
+import models as mdl
 
 
+# variables and parameters
 saved_models_folder = cfg.saved_models_folder
 saved_models_folder = cfg.saved_models_folder
 models_list         = cfg.models_names_list
 models_list         = cfg.models_names_list
 
 
-current_dirpath = os.getcwd()
+current_dirpath     = os.getcwd()
 output_model_folder = os.path.join(current_dirpath, saved_models_folder)
 output_model_folder = os.path.join(current_dirpath, saved_models_folder)