Parcourir la source

Refactoring of the whole project

Jérôme BUISINE il y a 5 ans
Parent
commit
914e4bc50c
66 fichiers modifiés avec 265 ajouts et 1052 suppressions
  1. 5 7
      .gitignore
  2. 7 6
      README.md
  3. 0 0
      __init__.py
  4. 1 1
      analysis/corr_analysys.ipynb
  5. 3 3
      generateAndTrain_maxwell.sh
  6. 3 3
      generateAndTrain_maxwell_custom.sh
  7. 3 3
      generateAndTrain_maxwell_custom_center.sh
  8. 3 3
      generateAndTrain_maxwell_custom_filters.sh
  9. 3 3
      generateAndTrain_maxwell_custom_filters_center.sh
  10. 3 3
      generateAndTrain_maxwell_custom_filters_split.sh
  11. 3 3
      generateAndTrain_maxwell_custom_split.sh
  12. 1 1
      display/display_scenes_zones.py
  13. 1 1
      display/display_simulation_curves.py
  14. 1 2
      display/display_svd_area_data_scene.py
  15. 1 2
      display/display_svd_area_scenes.py
  16. 1 2
      display/display_svd_data_error_scene.py
  17. 1 2
      display/display_svd_data_scene.py
  18. 1 2
      display/display_svd_zone_scene.py
  19. 9 0
      display/generate_metrics_curve.sh
  20. 1 2
      generate/generate_all_data.py
  21. 1 2
      generate/generate_data_model.py
  22. 1 2
      generate/generate_data_model_corr_random.py
  23. 1 2
      generate/generate_data_model_random.py
  24. 1 2
      generate/generate_data_model_random_center.py
  25. 1 2
      generate/generate_data_model_random_split.py
  26. 0 7
      generate/generate_metrics_curve.sh
  27. 0 6
      generate_all_simulate_curves.sh
  28. 76 0
      models.py
  29. 13 9
      save_model_result_in_md.py
  30. 18 11
      save_model_result_in_md_maxwell.py
  31. 1 1
      testModelByScene.sh
  32. 1 1
      testModelByScene_maxwell.sh
  33. 0 145
      predict_noisy_image_svd.py
  34. 0 216
      predict_seuil_expe.py
  35. 0 221
      predict_seuil_expe_maxwell.py
  36. 0 178
      predict_seuil_expe_maxwell_curve.py
  37. 0 110
      prediction_scene.py
  38. 1 1
      runAll_display_data_scene.sh
  39. 3 3
      runAll_maxwell.sh
  40. 4 4
      runAll_maxwell_area.sh
  41. 4 4
      runAll_maxwell_area_normed.sh
  42. 4 4
      runAll_maxwell_corr_custom.sh
  43. 3 3
      runAll_maxwell_custom.sh
  44. 3 3
      runAll_maxwell_custom_center.sh
  45. 3 3
      runAll_maxwell_custom_filters.sh
  46. 3 3
      runAll_maxwell_custom_filters_center.sh
  47. 3 3
      runAll_maxwell_custom_filters_split.sh
  48. 3 3
      runAll_maxwell_custom_filters_stats.sh
  49. 3 3
      runAll_maxwell_custom_filters_stats_center.sh
  50. 3 3
      runAll_maxwell_custom_filters_stats_split.sh
  51. 3 3
      runAll_maxwell_custom_split.sh
  52. 4 4
      runAll_maxwell_keras.sh
  53. 4 4
      runAll_maxwell_keras_corr.sh
  54. 4 4
      runAll_maxwell_keras_corr_custom.sh
  55. 4 4
      runAll_maxwell_mscn_var.sh
  56. 4 4
      runAll_maxwell_sub_blocks_stats.sh
  57. 4 4
      runAll_maxwell_sub_blocks_stats_reduced.sh
  58. 6 0
      simulation/generate_all_simulate_curves.sh
  59. 2 2
      run_maxwell_simulation.sh
  60. 3 3
      run_maxwell_simulation_corr_custom.sh
  61. 2 2
      run_maxwell_simulation_custom.sh
  62. 2 2
      run_maxwell_simulation_custom_filters.sh
  63. 2 2
      run_maxwell_simulation_filters_statistics.sh
  64. 2 2
      run_maxwell_simulation_keras_corr_custom.sh
  65. 2 2
      run_maxwell_simulation_keras_custom.sh
  66. 12 6
      train_model.py

+ 5 - 7
.gitignore

@@ -10,18 +10,16 @@ results
 metric_curves
 .ipynb_checkpoints
 
-# simulate_models.csv
-
-fichiersSVD_light
+# dataset and files
+simulate_models*.csv
+dataset
 
+# python cache
 .python-version
 __pycache__
 
 # by default avoid model files and png files
-saved_models/*.h5
+saved_models
 *.png
 !saved_models/*.png
 .vscode
-
-# simulate models .csv file
-simulate_models*.csv

+ 7 - 6
README.md

@@ -9,7 +9,7 @@ pip install -r requirements.txt
 Generate all needed data for each metrics (which requires the the whole dataset. In order to get it, you need to contact us).
 
 ```bash
-python generate_all_data.py --metric all
+python generate/generate_all_data.py --metric all
 ```
 
 For noise detection, many metrics are available:
@@ -24,7 +24,7 @@ For noise detection, many metrics are available:
 
 You can also specify metric you want to compute and image step to avoid some images:
 ```bash
-python generate_all_data.py --metric mscn --step 50
+python generate/generate_all_data.py --metric mscn --step 50
 ```
 
 - **step**: keep only image if image id % 50 == 0 (assumption is that keeping spaced data will let model better fit).
@@ -38,7 +38,8 @@ python generate_all_data.py --metric mscn --step 50
 - **train_model.py**: script which is used to run specific model available.
 - **data/\***: folder which will contain all *.train* & *.test* files in order to train model.
 - **saved_models/*.joblib**: all scikit learn models saved.
-- **models_info/***: all markdown files generated to get quick information about model performance and prediction. This folder contains also **model_comparisons.csv** obtained after running runAll_maxwell.sh script.
+- **models_info/***: all markdown files generated to get quick information about model performance and prediction. 
+- **results**: This folder contains **model_comparisons.csv** obtained after running runAll_maxwell_*.sh script.
 - **modules/\***: contains all modules usefull for the whole project (such as configuration variables)
 
 ### Scripts for generating data files
@@ -52,9 +53,9 @@ Two scripts can be used for generating data in order to fit model:
 **Remark**: Note here that all python script have *--help* command.
 
 ```
-python generate_data_model.py --help
+python generate/generate_data_model.py --help
 
-python generate_data_model.py --output xxxx --interval 0,20  --kind svdne --scenes "A, B, D" --zones "0, 1, 2" --percent 0.7 --sep: --rowindex 1 --custom custom_min_max_filename
+python generate/generate_data_model.py --output xxxx --interval 0,20  --kind svdne --scenes "A, B, D" --zones "0, 1, 2" --percent 0.7 --sep: --rowindex 1 --custom custom_min_max_filename
 ```
 
 Parameters explained:
@@ -162,7 +163,7 @@ The content will be divised into two parts:
 The previous script need to already have ran to obtain and display treshold maps on this markdown file.
 
 ```bash
-python save_model_result_in_md.py --interval "xx,xx" --model saved_models/xxxx.joblib --mode ["svd", "svdn", "svdne"] --metric ['lab', 'mscn']
+python others/save_model_result_in_md.py --interval "xx,xx" --model saved_models/xxxx.joblib --mode ["svd", "svdn", "svdne"] --metric ['lab', 'mscn']
 ```
 
 Parameters list:

+ 0 - 0
__init__.py


+ 1 - 1
analysis/corr_analysys.ipynb

@@ -39,7 +39,7 @@
     "data_file = \"data/temp.train\"\n",
     "interval = 16\n",
     "\n",
-    "!python generate_data_model_random.py --output data/temp --interval \"0, 16\"  --kind svdne --metric sub_blocks_area --scenes \"A, D, G, H\" --nb_zones 16 --random 1 --percent 1.0 --step 10 --each 1 --renderer maxwell --custom temp_min_max_values"
+    "!python generate/generate_data_model_random.py --output data/temp --interval \"0, 16\"  --kind svdne --metric sub_blocks_area --scenes \"A, D, G, H\" --nb_zones 16 --random 1 --percent 1.0 --step 10 --each 1 --renderer maxwell --custom temp_min_max_values"
    ]
   },
   {

+ 3 - 3
generateAndTrain_maxwell.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
 fi
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 size=$1
 metric=$2
@@ -54,11 +54,11 @@ for counter in {0..4}; do
 
                     echo "${MODEL_NAME} results already generated..."
                 else
-                    python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --renderer "maxwell" --step 40 --random 1 --percent 1
+                    python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --renderer "maxwell" --step 40 --random 1 --percent 1
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2'
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
             done
         done

+ 3 - 3
generateAndTrain_maxwell_custom.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
 fi
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 size=$1
 metric=$2
@@ -55,11 +55,11 @@ for counter in {0..4}; do
 
                     echo "${MODEL_NAME} results already generated..."
                 else
-                    python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
             done
         done

+ 3 - 3
generateAndTrain_maxwell_custom_center.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
 fi
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 size=$1
 metric=$2
@@ -55,11 +55,11 @@ for counter in {0..4}; do
 
                     echo "${MODEL_NAME} results already generated..."
                 else
-                    python generate_data_model_random_center.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python generate/generate_data_model_random_center.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
             done
         done

+ 3 - 3
generateAndTrain_maxwell_custom_filters.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
 fi
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 size=$1
 metric=$2
@@ -37,10 +37,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                 echo "${MODEL_NAME} results already generated..."
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 3 - 3
generateAndTrain_maxwell_custom_filters_center.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
 fi
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 size=$1
 metric=$2
@@ -37,10 +37,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                 echo "${MODEL_NAME} results already generated..."
             else
-                python generate_data_model_random_center.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random_center.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 3 - 3
generateAndTrain_maxwell_custom_filters_split.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
 fi
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 size=$1
 metric=$2
@@ -37,10 +37,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                 echo "${MODEL_NAME} results already generated..."
             else
-                python generate_data_model_random_split.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random_split.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 3 - 3
generateAndTrain_maxwell_custom_split.sh

@@ -14,7 +14,7 @@ if [ -z "$2" ]
     exit 1
 fi
 
-result_filename="models_info/models_comparisons.csv"
+result_filename="results/models_comparisons.csv"
 VECTOR_SIZE=200
 size=$1
 metric=$2
@@ -55,11 +55,11 @@ for counter in {0..4}; do
 
                     echo "${MODEL_NAME} results already generated..."
                 else
-                    python generate_data_model_random_split.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python generate/generate_data_model_random_split.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
-                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
             done
         done

+ 1 - 1
display/display_scenes_zones.py

@@ -10,7 +10,6 @@ from PIL import Image
 from skimage import color
 import matplotlib.pyplot as plt
 
-from data_attributes import get_svd_data
 
 from ipfml.processing import segmentation, transform, compression
 from ipfml import utils
@@ -20,6 +19,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # variables and parameters

+ 1 - 1
display/display_simulation_curves.py

@@ -5,12 +5,12 @@ import os, sys, argparse
 
 # image processing imports
 import matplotlib.pyplot as plt
-from data_attributes import get_svd_data
 
 # modules and config imports
 sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
+from data_attributes import get_svd_data
 
 
 # variables and parameters

+ 1 - 2
display/display_svd_area_data_scene.py

@@ -7,8 +7,6 @@ from PIL import Image
 from skimage import color
 import matplotlib.pyplot as plt
 
-from data_attributes import get_svd_data
-
 from ipfml.processing import segmentation, transform, compression
 from ipfml import utils
 import ipfml.iqa.fr as fr_iqa
@@ -18,6 +16,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 # getting configuration information
 zone_folder         = cfg.zone_folder

+ 1 - 2
display/display_svd_area_scenes.py

@@ -6,8 +6,6 @@ import numpy as np
 from PIL import Image
 import matplotlib.pyplot as plt
 
-from data_attributes import get_svd_data
-
 import ipfml.iqa.fr as fr_iqa
 from ipfml import utils
 
@@ -16,6 +14,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 # getting configuration information
 zone_folder         = cfg.zone_folder

+ 1 - 2
display/display_svd_data_error_scene.py

@@ -7,8 +7,6 @@ from PIL import Image
 from skimage import color
 import matplotlib.pyplot as plt
 
-from data_attributes import get_svd_data
-
 import ipfml.iqa.fr as fr_iqa
 from ipfml import utils
 
@@ -17,6 +15,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 # getting configuration information
 zone_folder         = cfg.zone_folder

+ 1 - 2
display/display_svd_data_scene.py

@@ -6,8 +6,6 @@ import numpy as np
 from PIL import Image
 import matplotlib.pyplot as plt
 
-from data_attributes import get_svd_data
-
 import ipfml.iqa.fr as fr_iqa
 from ipfml import utils
 
@@ -16,6 +14,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 # getting configuration information
 zone_folder         = cfg.zone_folder

+ 1 - 2
display/display_svd_zone_scene.py

@@ -6,8 +6,6 @@ import numpy as np
 from PIL import Image
 import matplotlib.pyplot as plt
 
-from data_attributes import get_svd_data
-
 from ipfml.processing import segmentation
 import ipfml.iqa.fr as fr_iqa
 from ipfml import utils
@@ -17,6 +15,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 # getting configuration information
 zone_folder         = cfg.zone_folder

+ 9 - 0
display/generate_metrics_curve.sh

@@ -0,0 +1,9 @@
+#! /bin/bash
+
+for feature in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
+
+    python display/display/display_svd_data_scene.py --scene D --interval "0, 800" --indices "0, 1200" --feature ${feature} --mode svdne --step 100 --norm 1 --error mse --ylim "0, 0.1"
+
+done
+
+

+ 1 - 2
generate/generate_all_data.py

@@ -8,8 +8,6 @@ import json
 # image processing imports
 from PIL import Image
 
-from data_attributes import get_svd_data
-
 from ipfml.processing import transform, segmentation
 from ipfml import utils
 
@@ -18,6 +16,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information

+ 1 - 2
generate/generate_data_model.py

@@ -7,8 +7,6 @@ import random
 # image processing imports
 from PIL import Image
 
-from data_attributes import get_svd_data
-
 from ipfml import utils
 
 # modules imports
@@ -16,6 +14,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information

+ 1 - 2
generate/generate_data_model_corr_random.py

@@ -8,8 +8,6 @@ import random
 # image processing imports
 from PIL import Image
 
-from data_attributes import get_svd_data
-
 from ipfml import utils
 
 # modules imports
@@ -17,6 +15,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information

+ 1 - 2
generate/generate_data_model_random.py

@@ -7,8 +7,6 @@ import random
 # image processing imports
 from PIL import Image
 
-from data_attributes import get_svd_data
-
 from ipfml import utils
 
 # modules imports
@@ -16,6 +14,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information

+ 1 - 2
generate/generate_data_model_random_center.py

@@ -7,8 +7,6 @@ import random
 # image processing imports
 from PIL import Image
 
-from data_attributes import get_svd_data
-
 from ipfml import utils
 
 # modules imports
@@ -16,6 +14,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information

+ 1 - 2
generate/generate_data_model_random_split.py

@@ -7,8 +7,6 @@ import random
 # image processing imports
 from PIL import Image
 
-from data_attributes import get_svd_data
-
 from ipfml import utils
 
 # modules imports
@@ -16,6 +14,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
 from modules.utils import data as dt
+from data_attributes import get_svd_data
 
 
 # getting configuration information

+ 0 - 7
generate/generate_metrics_curve.sh

@@ -1,7 +0,0 @@
-for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
-
-    python display_svd_data_scene.py --scene D --interval "0, 800" --indices "0, 1200" --metric ${metric} --mode svdne --step 100 --norm 1 --error mse --ylim "0, 0.1"
-
-done
-
-

+ 0 - 6
generate_all_simulate_curves.sh

@@ -1,6 +0,0 @@
-for file in "threshold_map"/*; do
-
-    echo ${file}
-
-    python display_simulation_curves.py --folder ${file}
-done

+ 76 - 0
models.py

@@ -0,0 +1,76 @@
+# models imports
+from sklearn.model_selection import GridSearchCV
+from sklearn.linear_model import LogisticRegression
+from sklearn.ensemble import RandomForestClassifier, VotingClassifier
+from sklearn.neighbors import KNeighborsClassifier
+from sklearn.ensemble import GradientBoostingClassifier
+import sklearn.svm as svm
+
+
+def _get_best_model(X_train, y_train):
+
+    Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
+    gammas = [0.001, 0.01, 0.1, 1, 5, 10, 100]
+    param_grid = {'kernel':['rbf'], 'C': Cs, 'gamma' : gammas}
+
+    svc = svm.SVC(probability=True)
+    clf = GridSearchCV(svc, param_grid, cv=10, scoring='accuracy', verbose=10)
+
+    clf.fit(X_train, y_train)
+
+    model = clf.best_estimator_
+
+    return model
+
+def svm_model(X_train, y_train):
+
+    return _get_best_model(X_train, y_train)
+
+
+def ensemble_model(X_train, y_train):
+
+    svm_model = _get_best_model(X_train, y_train)
+
+    lr_model = LogisticRegression(solver='liblinear', multi_class='ovr', random_state=1)
+    rf_model = RandomForestClassifier(n_estimators=100, random_state=1)
+
+    ensemble_model = VotingClassifier(estimators=[
+       ('svm', svm_model), ('lr', lr_model), ('rf', rf_model)], voting='soft', weights=[1,1,1])
+
+    ensemble_model.fit(X_train, y_train)
+
+    return ensemble_model
+
+
+def ensemble_model_v2(X_train, y_train):
+
+    svm_model = _get_best_model(X_train, y_train)
+    knc_model = KNeighborsClassifier(n_neighbors=2)
+    gbc_model = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
+    lr_model = LogisticRegression(solver='liblinear', multi_class='ovr', random_state=1)
+    rf_model = RandomForestClassifier(n_estimators=100, random_state=1)
+
+    ensemble_model = VotingClassifier(estimators=[
+       ('lr', lr_model),
+       ('knc', knc_model),
+       ('gbc', gbc_model),
+       ('svm', svm_model),
+       ('rf', rf_model)],
+       voting='soft', weights=[1, 1, 1, 1, 1])
+
+    ensemble_model.fit(X_train, y_train)
+
+    return ensemble_model
+
+def get_trained_model(choice, X_train, y_train):
+
+    if choice == 'svm_model':
+        return svm_model(X_train, y_train)
+
+    if choice == 'ensemble_model':
+        return ensemble_model(X_train, y_train)
+
+    if choice == 'ensemble_model_v2':
+        return ensemble_model_v2(X_train, y_train)
+
+

+ 13 - 9
save_model_result_in_md.py

@@ -1,17 +1,21 @@
-from sklearn.externals import joblib
-
+# main imports
 import numpy as np
-
-from ipfml import processing
-from PIL import Image
-
 import sys, os, argparse
 import subprocess
 import time
 
+# models imports
+from sklearn.externals import joblib
+
+# image processing imports
+from PIL import Image
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
-from modules.utils import config as cfg
+import custom_config as cfg
 
+# variables and parameters
 threshold_map_folder      = cfg.threshold_map_folder
 threshold_map_file_prefix = cfg.threshold_map_folder + "_"
 
@@ -26,7 +30,7 @@ def main():
 
     parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
     parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=cfg.metric_choices_labels)
+    parser.add_argument('--feature', type=str, help='Feature data choice', choices=cfg.features_choices_labels)
     parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
 
     args = parser.parse_args()
@@ -41,7 +45,7 @@ def main():
 
     begin, end = p_interval
 
-    bash_cmd = "bash testModelByScene.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
+    bash_cmd = "bash others/testModelByScene.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
     print(bash_cmd)
 
     ## call command ##

+ 18 - 11
save_model_result_in_md_maxwell.py

@@ -1,3 +1,13 @@
+# main imports
+import numpy as np
+import pandas as pd
+
+import sys, os, argparse
+import subprocess
+import time
+import json
+
+# models imports
 from sklearn.utils import shuffle
 from sklearn.externals import joblib
 from sklearn.metrics import accuracy_score, f1_score, recall_score, roc_auc_score
@@ -12,19 +22,16 @@ from keras.wrappers.scikit_learn import KerasClassifier
 from keras import backend as K
 from keras.models import model_from_json
 
-import numpy as np
-import pandas as pd
-
+# image processing imports
 from ipfml import processing
 from PIL import Image
 
-import sys, os, argparse
-import subprocess
-import time
-import json
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
-from modules.utils import config as cfg
+import custom_config as cfg
 
+# variables and parameters
 threshold_map_folder        = cfg.threshold_map_folder
 threshold_map_file_prefix   = cfg.threshold_map_folder + "_"
 
@@ -60,7 +67,7 @@ def main():
     # call model and get global result in scenes
     begin, end = p_interval
 
-    bash_cmd = "bash testModelByScene_maxwell.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
+    bash_cmd = "bash others/testModelByScene_maxwell.sh '" + str(begin) + "' '" + str(end) + "' '" + p_model_file + "' '" + p_mode + "' '" + p_metric + "'"
 
     print(bash_cmd)
 
@@ -117,8 +124,8 @@ def main():
     # Keep model information to compare
     current_model_name = p_model_file.split('/')[-1].replace(model_ext, '')
 
-    # Prepare writing in .csv file
-    output_final_file_path = os.path.join(markdowns_folder, final_csv_model_comparisons)
+    # Prepare writing in .csv file into results folder
+    output_final_file_path = os.path.join(cfg.results_information_folder, final_csv_model_comparisons)
     output_final_file = open(output_final_file_path, "a")
 
     print(current_model_name)

+ 1 - 1
testModelByScene.sh

@@ -55,7 +55,7 @@ for scene in {"A","B","C","D","E","F","G","H","I"}; do
 
   FILENAME="data/data_${INPUT_MODE}_${INPUT_METRIC}_B${INPUT_BEGIN}_E${INPUT_END}_scene${scene}"
 
-  python generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1 --sep ";" --rowindex "0"
+  python generate/generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1 --sep ";" --rowindex "0"
 
   python prediction_scene.py --data "$FILENAME.train" --model ${INPUT_MODEL} --output "${INPUT_MODEL}_Scene${scene}_mode_${INPUT_MODE}_metric_${INPUT_METRIC}.prediction" --scene ${scene}
 

+ 1 - 1
testModelByScene_maxwell.sh

@@ -63,7 +63,7 @@ for scene in {"A","D","G","H"}; do
 
   FILENAME="data/data_${INPUT_MODE}_${INPUT_METRIC}_B${INPUT_BEGIN}_E${INPUT_END}_scene${scene}"
 
-  python generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1
+  python generate/generate_data_model.py --output ${FILENAME} --interval "${INPUT_BEGIN},${INPUT_END}" --kind ${INPUT_MODE} --metric ${INPUT_METRIC} --scenes "${scene}" --zones "${zones}" --percent 1
 
   python prediction_scene.py --data "$FILENAME.train" --model ${INPUT_MODEL} --output "${INPUT_MODEL}_Scene${scene}_mode_${INPUT_MODE}_metric_${INPUT_METRIC}.prediction" --scene ${scene}
 

+ 0 - 145
predict_noisy_image_svd.py

@@ -1,145 +0,0 @@
-from sklearn.externals import joblib
-
-import numpy as np
-
-from ipfml import processing, utils
-from PIL import Image
-
-import sys, os, argparse, json
-
-from keras.models import model_from_json
-
-from modules.utils import config as cfg
-from modules.utils import data as dt
-
-path                  = cfg.dataset_path
-min_max_ext           = cfg.min_max_filename_extension
-metric_choices        = cfg.metric_choices_labels
-normalization_choices = cfg.normalization_choices
-
-custom_min_max_folder = cfg.min_max_custom_folder
-
-def main():
-
-    # getting all params
-    parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
-
-    parser.add_argument('--image', type=str, help='Image path')
-    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
-    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
-
-    args = parser.parse_args()
-
-    p_img_file   = args.image
-    p_model_file = args.model
-    p_interval   = list(map(int, args.interval.split(',')))
-    p_mode       = args.mode
-    p_metric     = args.metric
-    p_custom     = args.custom
-
-    if '.joblib' in p_model_file:
-        kind_model = 'sklearn'
-
-    if '.json' in p_model_file:
-        kind_model = 'keras'
-
-    if 'corr' in p_model_file:
-        corr_model = True
-
-        indices_corr_path = os.path.join(cfg.correlation_indices_folder, p_model_file.split('/')[1].replace('.json', '').replace('.joblib', '') + '.csv')
-
-        with open(indices_corr_path, 'r') as f:
-            data_corr_indices = [int(x) for x in f.readline().split(';') if x != '']
-    else:
-        corr_model = False
-
-
-    if kind_model == 'sklearn':
-        # load of model file
-        model = joblib.load(p_model_file)
-
-    if kind_model == 'keras':
-        with open(p_model_file, 'r') as f:
-            json_model = json.load(f)
-            model = model_from_json(json_model)
-            model.load_weights(p_model_file.replace('.json', '.h5'))
-
-            model.compile(loss='binary_crossentropy',
-                        optimizer='adam',
-                        metrics=['accuracy'])
-
-    # load image
-    img = Image.open(p_img_file)
-
-    data = dt.get_svd_data(p_metric, img)
-
-    # get interval values
-    begin, end = p_interval
-
-    # check if custom min max file is used
-    if p_custom:
-
-        if corr_model:
-            test_data = data[data_corr_indices]
-        else:
-            test_data = data[begin:end]
-
-        if p_mode == 'svdne':
-
-            # set min_max_filename if custom use
-            min_max_file_path = custom_min_max_folder + '/' +  p_custom
-
-            # need to read min_max_file
-            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
-            with open(file_path, 'r') as f:
-                min_val = float(f.readline().replace('\n', ''))
-                max_val = float(f.readline().replace('\n', ''))
-
-            test_data = utils.normalize_arr_with_range(test_data, min_val, max_val)
-
-        if p_mode == 'svdn':
-            test_data = utils.normalize_arr(test_data)
-
-    else:
-
-        # check mode to normalize data
-        if p_mode == 'svdne':
-
-            # set min_max_filename if custom use
-            min_max_file_path = path + '/' + p_metric + min_max_ext
-
-            # need to read min_max_file
-            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
-            with open(file_path, 'r') as f:
-                min_val = float(f.readline().replace('\n', ''))
-                max_val = float(f.readline().replace('\n', ''))
-
-            l_values = utils.normalize_arr_with_range(data, min_val, max_val)
-
-        elif p_mode == 'svdn':
-            l_values = utils.normalize_arr(data)
-        else:
-            l_values = data
-
-        if corr_model:
-            test_data = data[data_corr_indices]
-        else:
-            test_data = data[begin:end]
-
-
-    # get prediction of model
-    if kind_model == 'sklearn':
-        prediction = model.predict([test_data])[0]
-
-    if kind_model == 'keras':
-        test_data = np.asarray(test_data).reshape(1, len(test_data), 1)
-        prediction = model.predict_classes([test_data])[0][0]
-
-    # output expected from others scripts
-    print(prediction)
-
-if __name__== "__main__":
-    main()

+ 0 - 216
predict_seuil_expe.py

@@ -1,216 +0,0 @@
-from sklearn.externals import joblib
-
-import numpy as np
-
-from ipfml import processing, utils
-from PIL import Image
-
-import sys, os, argparse
-import subprocess
-import time
-
-from modules.utils import config as cfg
-
-config_filename           = cfg.config_filename
-scenes_path               = cfg.dataset_path
-min_max_filename          = cfg.min_max_filename_extension
-threshold_expe_filename   = cfg.seuil_expe_filename
-
-threshold_map_folder      = cfg.threshold_map_folder
-threshold_map_file_prefix = cfg.threshold_map_folder + "_"
-
-zones                     = cfg.zones_indices
-normalization_choices     = cfg.normalization_choices
-metric_choices            = cfg.metric_choices_labels
-
-tmp_filename              = '/tmp/__model__img_to_predict.png'
-
-current_dirpath = os.getcwd()
-
-def main():
-
-    p_custom = False
-
-    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
-
-    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
-    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
-    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
-
-    args = parser.parse_args()
-
-    p_interval   = list(map(int, args.interval.split(',')))
-    p_model_file = args.model
-    p_mode       = args.mode
-    p_metric     = args.metric
-    #p_limit      = args.limit
-    p_custom     = args.custom
-
-    scenes = os.listdir(scenes_path)
-    scenes = [s for s in scenes if not min_max_filename in s]
-
-    # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
-
-        print(folder_scene)
-
-        scene_path = os.path.join(scenes_path, folder_scene)
-
-        config_path = os.path.join(scene_path, config_filename)
-
-        with open(config_path, "r") as config_file:
-            last_image_name = config_file.readline().strip()
-            prefix_image_name = config_file.readline().strip()
-            start_index_image = config_file.readline().strip()
-            end_index_image = config_file.readline().strip()
-            step_counter = int(config_file.readline().strip())
-
-        threshold_expes = []
-        threshold_expes_detected = []
-        threshold_expes_counter = []
-        threshold_expes_found = []
-
-        # get zones list info
-        for index in zones:
-            index_str = str(index)
-            if len(index_str) < 2:
-                index_str = "0" + index_str
-            zone_folder = "zone"+index_str
-
-            threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
-
-            with open(threshold_path_file) as f:
-                threshold = int(f.readline())
-                threshold_expes.append(threshold)
-
-                # Initialize default data to get detected model threshold found
-                threshold_expes_detected.append(False)
-                threshold_expes_counter.append(0)
-                threshold_expes_found.append(int(end_index_image)) # by default use max
-
-        current_counter_index = int(start_index_image)
-        end_counter_index = int(end_index_image)
-
-        print(current_counter_index)
-        check_all_done = False
-
-        while(current_counter_index <= end_counter_index and not check_all_done):
-
-            current_counter_index_str = str(current_counter_index)
-
-            while len(start_index_image) > len(current_counter_index_str):
-                current_counter_index_str = "0" + current_counter_index_str
-
-            img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
-
-            current_img = Image.open(img_path)
-            img_blocks = processing.divide_in_blocks(current_img, (200, 200))
-
-
-            check_all_done = all(d == True for d in threshold_expes_detected)
-
-            for id_block, block in enumerate(img_blocks):
-
-                # check only if necessary for this scene (not already detected)
-                if not threshold_expes_detected[id_block]:
-
-                    tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
-                    block.save(tmp_file_path)
-
-                    python_cmd = "python predict_noisy_image_svd.py --image " + tmp_file_path + \
-                                    " --interval '" + p_interval + \
-                                    "' --model " + p_model_file  + \
-                                    " --mode " + p_mode + \
-                                    " --metric " + p_metric
-
-                    # specify use of custom file for min max normalization
-                    if p_custom:
-                        python_cmd = python_cmd + ' --custom ' + p_custom
-
-
-                    ## call command ##
-                    p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
-
-                    (output, err) = p.communicate()
-
-                    ## Wait for result ##
-                    p_status = p.wait()
-
-                    prediction = int(output)
-
-                    if prediction == 0:
-                        threshold_expes_counter[id_block] = threshold_expes_counter[id_block] + 1
-                    else:
-                        threshold_expes_counter[id_block] = 0
-
-                    if threshold_expes_counter[id_block] == p_limit:
-                        threshold_expes_detected[id_block] = True
-                        threshold_expes_found[id_block] = current_counter_index
-
-                    print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
-
-            current_counter_index += step_counter
-            print("------------------------")
-            print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
-            print("------------------------")
-
-        # end of scene => display of results
-
-        # construct path using model name for saving threshold map folder
-        model_treshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
-
-        # create threshold model path if necessary
-        if not os.path.exists(model_treshold_path):
-            os.makedirs(model_treshold_path)
-
-        abs_dist = []
-
-        map_filename = os.path.join(model_treshold_path, threshold_map_file_prefix + folder_scene)
-        f_map = open(map_filename, 'w')
-
-        line_information = ""
-
-        # default header
-        f_map.write('|  |    |    |  |\n')
-        f_map.write('---|----|----|---\n')
-        for id, threshold in enumerate(threshold_expes_found):
-
-            line_information += str(threshold) + " / " + str(threshold_expes[id]) + " | "
-            abs_dist.append(abs(threshold - threshold_expes[id]))
-
-            if (id + 1) % 4 == 0:
-                f_map.write(line_information + '\n')
-                line_information = ""
-
-        f_map.write(line_information + '\n')
-
-        min_abs_dist = min(abs_dist)
-        max_abs_dist = max(abs_dist)
-        avg_abs_dist = sum(abs_dist) / len(abs_dist)
-
-        f_map.write('\nScene information : ')
-        f_map.write('\n- BEGIN : ' + str(start_index_image))
-        f_map.write('\n- END : ' + str(end_index_image))
-
-        f_map.write('\n\nDistances information : ')
-        f_map.write('\n- MIN : ' + str(min_abs_dist))
-        f_map.write('\n- MAX : ' + str(max_abs_dist))
-        f_map.write('\n- AVG : ' + str(avg_abs_dist))
-
-        f_map.write('\n\nOther information : ')
-        f_map.write('\n- Detection limit : ' + str(p_limit))
-
-        # by default print last line
-        f_map.close()
-
-        print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)) + " Done..")
-        print("------------------------")
-
-        time.sleep(10)
-
-
-if __name__== "__main__":
-    main()

+ 0 - 221
predict_seuil_expe_maxwell.py

@@ -1,221 +0,0 @@
-from sklearn.externals import joblib
-
-import numpy as np
-
-from ipfml import processing
-from PIL import Image
-
-import sys, os, argparse
-import subprocess
-import time
-
-
-from modules.utils import config as cfg
-
-config_filename           = cfg.config_filename
-scenes_path               = cfg.dataset_path
-min_max_filename          = cfg.min_max_filename_extension
-threshold_expe_filename   = cfg.seuil_expe_filename
-
-threshold_map_folder      = cfg.threshold_map_folder
-threshold_map_file_prefix = cfg.threshold_map_folder + "_"
-
-zones                     = cfg.zones_indices
-maxwell_scenes            = cfg.maxwell_scenes_names
-normalization_choices     = cfg.normalization_choices
-metric_choices            = cfg.metric_choices_labels
-
-tmp_filename              = '/tmp/__model__img_to_predict.png'
-
-current_dirpath = os.getcwd()
-
-def main():
-
-    # by default..
-    p_custom = False
-
-    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
-
-    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
-    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
-    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
-
-    args = parser.parse_args()
-
-    p_interval   = list(map(int, args.interval.split(',')))
-    p_model_file = args.model
-    p_mode       = args.mode
-    p_metric     = args.metric
-    #p_limit      = args.limit
-    p_custom     = args.custom
-
-    scenes = os.listdir(scenes_path)
-    scenes = [s for s in scenes if s in maxwell_scenes]
-
-    # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
-
-        # only take in consideration maxwell scenes
-        if folder_scene in maxwell_scenes:
-
-            print(folder_scene)
-
-            scene_path = os.path.join(scenes_path, folder_scene)
-
-            config_path = os.path.join(scene_path, config_filename)
-
-            with open(config_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
-            threshold_expes = []
-            threshold_expes_detected = []
-            threshold_expes_counter = []
-            threshold_expes_found = []
-
-            # get zones list info
-            for index in zones:
-                index_str = str(index)
-                if len(index_str) < 2:
-                    index_str = "0" + index_str
-                zone_folder = "zone"+index_str
-
-                threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
-
-                with open(threshold_path_file) as f:
-                    threshold = int(f.readline())
-                    threshold_expes.append(threshold)
-
-                    # Initialize default data to get detected model threshold found
-                    threshold_expes_detected.append(False)
-                    threshold_expes_counter.append(0)
-                    threshold_expes_found.append(int(end_index_image)) # by default use max
-
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
-
-            print(current_counter_index)
-            check_all_done = False
-
-            while(current_counter_index <= end_counter_index and not check_all_done):
-
-                current_counter_index_str = str(current_counter_index)
-
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
-
-                img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
-
-                current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
-
-
-                check_all_done = all(d == True for d in threshold_expes_detected)
-
-                for id_block, block in enumerate(img_blocks):
-
-                    # check only if necessary for this scene (not already detected)
-                    if not threshold_expes_detected[id_block]:
-
-                        tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
-                        block.save(tmp_file_path)
-
-                        python_cmd = "python predict_noisy_image_svd.py --image " + tmp_file_path + \
-                                        " --interval '" + p_interval + \
-                                        "' --model " + p_model_file  + \
-                                        " --mode " + p_mode + \
-                                        " --metric " + p_metric
-
-                        # specify use of custom file for min max normalization
-                        if p_custom:
-                            python_cmd = python_cmd + ' --custom ' + p_custom
-
-                        ## call command ##
-                        p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
-
-                        (output, err) = p.communicate()
-
-                        ## Wait for result ##
-                        p_status = p.wait()
-
-                        prediction = int(output)
-
-                        if prediction == 0:
-                            threshold_expes_counter[id_block] = threshold_expes_counter[id_block] + 1
-                        else:
-                            threshold_expes_counter[id_block] = 0
-
-                        if threshold_expes_counter[id_block] == p_limit:
-                            threshold_expes_detected[id_block] = True
-                            threshold_expes_found[id_block] = current_counter_index
-
-                        print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
-
-                current_counter_index += step_counter
-                print("------------------------")
-                print("Scene " + str(id_scene + 1) + "/" + str(len(maxwell_scenes)))
-                print("------------------------")
-
-            # end of scene => display of results
-
-            # construct path using model name for saving threshold map folder
-            model_treshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
-
-            # create threshold model path if necessary
-            if not os.path.exists(model_treshold_path):
-                os.makedirs(model_treshold_path)
-
-            abs_dist = []
-
-            map_filename = os.path.join(model_treshold_path, threshold_map_file_prefix + folder_scene)
-            f_map = open(map_filename, 'w')
-
-            line_information = ""
-
-            # default header
-            f_map.write('|  |    |    |  |\n')
-            f_map.write('---|----|----|---\n')
-            for id, threshold in enumerate(threshold_expes_found):
-
-                line_information += str(threshold) + " / " + str(threshold_expes[id]) + " | "
-                abs_dist.append(abs(threshold - threshold_expes[id]))
-
-                if (id + 1) % 4 == 0:
-                    f_map.write(line_information + '\n')
-                    line_information = ""
-
-            f_map.write(line_information + '\n')
-
-            min_abs_dist = min(abs_dist)
-            max_abs_dist = max(abs_dist)
-            avg_abs_dist = sum(abs_dist) / len(abs_dist)
-
-            f_map.write('\nScene information : ')
-            f_map.write('\n- BEGIN : ' + str(start_index_image))
-            f_map.write('\n- END : ' + str(end_index_image))
-
-            f_map.write('\n\nDistances information : ')
-            f_map.write('\n- MIN : ' + str(min_abs_dist))
-            f_map.write('\n- MAX : ' + str(max_abs_dist))
-            f_map.write('\n- AVG : ' + str(avg_abs_dist))
-
-            f_map.write('\n\nOther information : ')
-            f_map.write('\n- Detection limit : ' + str(p_limit))
-
-            # by default print last line
-            f_map.close()
-
-            print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)) + " Done..")
-            print("------------------------")
-
-            time.sleep(10)
-
-
-if __name__== "__main__":
-    main()

+ 0 - 178
predict_seuil_expe_maxwell_curve.py

@@ -1,178 +0,0 @@
-from sklearn.externals import joblib
-
-import numpy as np
-
-from ipfml import processing
-from PIL import Image
-
-import sys, os, argparse
-import subprocess
-import time
-
-from modules.utils import config as cfg
-
-config_filename           = cfg.config_filename
-scenes_path               = cfg.dataset_path
-min_max_filename          = cfg.min_max_filename_extension
-threshold_expe_filename   = cfg.seuil_expe_filename
-
-threshold_map_folder      = cfg.threshold_map_folder
-threshold_map_file_prefix = cfg.threshold_map_folder + "_"
-
-zones                     = cfg.zones_indices
-maxwell_scenes            = cfg.maxwell_scenes_names
-normalization_choices     = cfg.normalization_choices
-metric_choices            = cfg.metric_choices_labels
-
-simulation_curves_zones   = "simulation_curves_zones_"
-tmp_filename              = '/tmp/__model__img_to_predict.png'
-
-current_dirpath = os.getcwd()
-
-
-def main():
-
-    p_custom = False
-        
-    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
-
-    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
-    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
-    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
-
-    args = parser.parse_args()
-
-    # keep p_interval as it is
-    p_interval   = args.interval
-    p_model_file = args.model
-    p_mode       = args.mode
-    p_metric     = args.metric
-    #p_limit      = args.limit
-    p_custom     = args.custom
-
-    scenes = os.listdir(scenes_path)
-    scenes = [s for s in scenes if s in maxwell_scenes]
-
-    print(scenes)
-
-    # go ahead each scenes
-    for id_scene, folder_scene in enumerate(scenes):
-
-        # only take in consideration maxwell scenes
-        if folder_scene in maxwell_scenes:
-
-            print(folder_scene)
-
-            scene_path = os.path.join(scenes_path, folder_scene)
-
-            config_path = os.path.join(scene_path, config_filename)
-
-            with open(config_path, "r") as config_file:
-                last_image_name = config_file.readline().strip()
-                prefix_image_name = config_file.readline().strip()
-                start_index_image = config_file.readline().strip()
-                end_index_image = config_file.readline().strip()
-                step_counter = int(config_file.readline().strip())
-
-            threshold_expes = []
-            threshold_expes_found = []
-            block_predictions_str = []
-
-            # get zones list info
-            for index in zones:
-                index_str = str(index)
-                if len(index_str) < 2:
-                    index_str = "0" + index_str
-                zone_folder = "zone"+index_str
-
-                threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
-
-                with open(threshold_path_file) as f:
-                    threshold = int(f.readline())
-                    threshold_expes.append(threshold)
-
-                    # Initialize default data to get detected model threshold found
-                    threshold_expes_found.append(int(end_index_image)) # by default use max
-
-                block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_index_image) + ";" + str(step_counter))
-
-            current_counter_index = int(start_index_image)
-            end_counter_index = int(end_index_image)
-
-            print(current_counter_index)
-
-            while(current_counter_index <= end_counter_index):
-
-                current_counter_index_str = str(current_counter_index)
-
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
-
-                img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
-
-                current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
-
-                for id_block, block in enumerate(img_blocks):
-
-                    # check only if necessary for this scene (not already detected)
-                    #if not threshold_expes_detected[id_block]:
-
-                        tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
-                        block.save(tmp_file_path)
-
-                        python_cmd_line = "python predict_noisy_image_svd.py --image {0} --interval '{1}' --model {2} --mode {3} --metric {4}"
-                        python_cmd = python_cmd_line.format(tmp_file_path, p_interval, p_model_file, p_mode, p_metric) 
-
-                        # specify use of custom file for min max normalization
-                        if p_custom:
-                            python_cmd = python_cmd + ' --custom ' + p_custom
-
-                        ## call command ##
-                        p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
-
-                        (output, err) = p.communicate()
-
-                        ## Wait for result ##
-                        p_status = p.wait()
-
-                        prediction = int(output)
-
-                        # save here in specific file of block all the predictions done
-                        block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
-
-                        print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
-
-                current_counter_index += step_counter
-                print("------------------------")
-                print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
-                print("------------------------")
-
-            # end of scene => display of results
-
-            # construct path using model name for saving threshold map folder
-            model_threshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
-
-            # create threshold model path if necessary
-            if not os.path.exists(model_threshold_path):
-                os.makedirs(model_threshold_path)
-
-            map_filename = os.path.join(model_threshold_path, simulation_curves_zones + folder_scene)
-            f_map = open(map_filename, 'w')
-
-            for line in block_predictions_str:
-                f_map.write(line + '\n')
-            f_map.close()
-
-            print("Scene " + str(id_scene + 1) + "/" + str(len(maxwell_scenes)) + " Done..")
-            print("------------------------")
-
-            print("Model predictions are saved into %s" % map_filename)
-            time.sleep(10)
-
-
-if __name__== "__main__":
-    main()

+ 0 - 110
prediction_scene.py

@@ -1,110 +0,0 @@
-from sklearn.externals import joblib
-
-import numpy as np
-
-import pandas as pd
-from sklearn.metrics import accuracy_score
-from keras.models import Sequential
-from keras.layers import Conv1D, MaxPooling1D
-from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
-from keras import backend as K
-from keras.models import model_from_json
-from keras.wrappers.scikit_learn import KerasClassifier
-
-import sys, os, argparse
-import json
-
-from modules.utils import config as cfg
-
-output_model_folder = cfg.saved_models_folder
-
-def main():
-    
-    parser = argparse.ArgumentParser(description="Give model performance on specific scene")
-
-    parser.add_argument('--data', type=str, help='dataset filename prefix of specific scene (without .train and .test)')
-    parser.add_argument('--model', type=str, help='saved model (Keras or SKlearn) filename with extension')
-    parser.add_argument('--output', type=str, help="filename to store predicted and performance model obtained on scene")
-    parser.add_argument('--scene', type=str, help="scene indice to predict", choices=cfg.scenes_indices)
-
-    args = parser.parse_args()
-
-    p_data_file  = args.data
-    p_model_file = args.model
-    p_output     = args.output
-    p_scene      = args.scene
-
-    if '.joblib' in p_model_file:
-        kind_model = 'sklearn'
-        model_ext = '.joblib'
-
-    if '.json' in p_model_file:
-        kind_model = 'keras'
-        model_ext = '.json'
-
-    if not os.path.exists(output_model_folder):
-        os.makedirs(output_model_folder)
-
-    dataset = pd.read_csv(p_data_file, header=None, sep=";")
-
-    y_dataset = dataset.ix[:,0]
-    x_dataset = dataset.ix[:,1:]
-
-    noisy_dataset = dataset[dataset.ix[:, 0] == 1]
-    not_noisy_dataset = dataset[dataset.ix[:, 0] == 0]
-
-    y_noisy_dataset = noisy_dataset.ix[:, 0]
-    x_noisy_dataset = noisy_dataset.ix[:, 1:]
-
-    y_not_noisy_dataset = not_noisy_dataset.ix[:, 0]
-    x_not_noisy_dataset = not_noisy_dataset.ix[:, 1:]
-
-    if kind_model == 'keras':
-        with open(p_model_file, 'r') as f:
-            json_model = json.load(f)
-            model = model_from_json(json_model)
-            model.load_weights(p_model_file.replace('.json', '.h5'))
-
-            model.compile(loss='binary_crossentropy',
-                  optimizer='adam',
-                  metrics=['accuracy'])
-
-        _, vector_size = np.array(x_dataset).shape
-
-        # reshape all data
-        x_dataset = np.array(x_dataset).reshape(len(x_dataset), vector_size, 1)
-        x_noisy_dataset = np.array(x_noisy_dataset).reshape(len(x_noisy_dataset), vector_size, 1)
-        x_not_noisy_dataset = np.array(x_not_noisy_dataset).reshape(len(x_not_noisy_dataset), vector_size, 1)
-
-
-    if kind_model == 'sklearn':
-        model = joblib.load(p_model_file)
-
-    if kind_model == 'keras':
-        y_pred = model.predict_classes(x_dataset)
-        y_noisy_pred = model.predict_classes(x_noisy_dataset)
-        y_not_noisy_pred = model.predict_classes(x_not_noisy_dataset)
-
-    if kind_model == 'sklearn':
-        y_pred = model.predict(x_dataset)
-        y_noisy_pred = model.predict(x_noisy_dataset)
-        y_not_noisy_pred = model.predict(x_not_noisy_dataset)
-
-    accuracy_global = accuracy_score(y_dataset, y_pred)
-    accuracy_noisy = accuracy_score(y_noisy_dataset, y_noisy_pred)
-    accuracy_not_noisy = accuracy_score(y_not_noisy_dataset, y_not_noisy_pred)
-
-    if(p_scene):
-        print(p_scene + " | " + str(accuracy_global) + " | " + str(accuracy_noisy) + " | " + str(accuracy_not_noisy))
-    else:
-        print(str(accuracy_global) + " \t | " + str(accuracy_noisy) + " \t | " + str(accuracy_not_noisy))
-
-        with open(p_output, 'w') as f:
-            f.write("Global accuracy found %s " % str(accuracy_global))
-            f.write("Noisy accuracy found %s " % str(accuracy_noisy))
-            f.write("Not noisy accuracy found %s " % str(accuracy_not_noisy))
-            for prediction in y_pred:
-                f.write(str(prediction) + '\n')
-
-if __name__== "__main__":
-    main()

+ 1 - 1
runAll_display_data_scene.sh

@@ -2,6 +2,6 @@
 
 for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
     for scene in {"A","D","G","H"}; do
-        python display_svd_data_scene.py --scene ${scene} --interval "0,800" --indices "0, 2000" --metric ${metric} --mode svdne --step 100 --norm 1 --ylim "0, 0.01"
+        python display/display_svd_data_scene.py --scene ${scene} --interval "0,800" --indices "0, 2000" --metric ${metric} --mode svdne --step 100 --norm 1 --ylim "0, 0.01"
     done
 done

+ 3 - 3
runAll_maxwell.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
-        bash generateAndTrain_maxwell.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell.sh ${size} ${metric}
     done
 done

+ 4 - 4
runAll_maxwell_area.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                 echo "${MODEL_NAME} results already generated..."
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 4 - 4
runAll_maxwell_area_normed.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                 echo "${MODEL_NAME} results already generated..."
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 4 - 4
runAll_maxwell_corr_custom.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -41,11 +41,11 @@ for label in {"0","1"}; do
 
                             echo "${MODEL_NAME} results already generated..."
                         else
-                            python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                            python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                             # use of interval but it is not really an interval..
-                            python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                         fi
                     done
                 done

+ 3 - 3
runAll_maxwell_custom.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
-        bash generateAndTrain_maxwell_custom.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell_custom.sh ${size} ${metric}
     done
 done

+ 3 - 3
runAll_maxwell_custom_center.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
-        bash generateAndTrain_maxwell_custom_center.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell_custom_center.sh ${size} ${metric}
     done
 done

+ 3 - 3
runAll_maxwell_custom_filters.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -20,6 +20,6 @@ for size in {"4","8","16","26","32","40","60","80"}; do
 
     # for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
-        bash generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &
+        bash data_processing/generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &
     done
 done

+ 3 - 3
runAll_maxwell_custom_filters_center.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -20,6 +20,6 @@ for size in {"4","8","16","26","32","40","60","80"}; do
 
     # for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
-        bash generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &
+        bash data_processing/generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &
     done
 done

+ 3 - 3
runAll_maxwell_custom_filters_split.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -20,6 +20,6 @@ for size in {"4","8","16","26","32","40","60","80"}; do
 
     #for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
     for metric in {"highest_sv_std_filters","lowest_sv_std_filters","highest_wave_sv_std_filters","lowest_sv_std_filters"}; do
-        bash generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &
+        bash data_processing/generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &
     done
 done

+ 3 - 3
runAll_maxwell_custom_filters_stats.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -19,4 +19,4 @@ fi
 size=26
 metric="filters_statistics"
 
-bash generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &
+bash data_processing/generateAndTrain_maxwell_custom_filters.sh ${size} ${metric} &

+ 3 - 3
runAll_maxwell_custom_filters_stats_center.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -19,4 +19,4 @@ fi
 size=26
 metric="filters_statistics"
 
-bash generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &
+bash data_processing/generateAndTrain_maxwell_custom_filters_center.sh ${size} ${metric} &

+ 3 - 3
runAll_maxwell_custom_filters_stats_split.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -19,4 +19,4 @@ fi
 size=26
 metric="filters_statistics"
 
-bash generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &
+bash data_processing/generateAndTrain_maxwell_custom_filters_split.sh ${size} ${metric} &

+ 3 - 3
runAll_maxwell_custom_split.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -19,6 +19,6 @@ fi
 for size in {"4","8","16","26","32","40"}; do
 
     for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","svd_trunc_diff","ipca_diff","svd_reconstruct"}; do
-        bash generateAndTrain_maxwell_custom_split.sh ${size} ${metric}
+        bash data_processing/generateAndTrain_maxwell_custom_split.sh ${size} ${metric}
     done
 done

+ 4 - 4
runAll_maxwell_keras.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -44,10 +44,10 @@ for metric in {"sub_blocks_stats","sub_blocks_stats_reduced","sub_blocks_area","
                 echo "${MODEL_NAME} results already generated..."
             else
                 echo "test"
-                #python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                #python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 #python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${end_index}
 
-                #python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                #python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 4 - 4
runAll_maxwell_keras_corr.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -40,11 +40,11 @@ for label in {"0","1"}; do
 
                         echo "${MODEL_NAME} results already generated..."
                     else
-                        python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                        python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
 
                         # use of interval but it is not really an interval..
-                        python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
                     fi
                 done
             done

+ 4 - 4
runAll_maxwell_keras_corr_custom.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -40,11 +40,11 @@ for label in {"0","1"}; do
 
                         echo "${MODEL_NAME} results already generated..."
                     else
-                        python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                        python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
 
                         # use of interval but it is not really an interval..
-                        python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
                     fi
                 done
             done

+ 4 - 4
runAll_maxwell_mscn_var.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -43,10 +43,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                     echo "${MODEL_NAME} results already generated..."
                 else
-                    python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                    python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                    python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                    python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                 fi
             done
         done

+ 4 - 4
runAll_maxwell_sub_blocks_stats.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                 echo "${MODEL_NAME} results already generated..."
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 4 - 4
runAll_maxwell_sub_blocks_stats_reduced.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
-# erase "models_info/models_comparisons.csv" file and write new header
-file_path='models_info/models_comparisons.csv'
+# erase "results/models_comparisons.csv" file and write new header
+file_path='results/models_comparisons.csv'
 
 erased=$1
 
@@ -40,10 +40,10 @@ for nb_zones in {4,6,8,10,12}; do
 
                 echo "${MODEL_NAME} results already generated..."
             else
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 6 - 0
simulation/generate_all_simulate_curves.sh

@@ -0,0 +1,6 @@
+for file in "threshold_map"/*; do
+
+    echo ${file}
+
+    python display/display/display_simulation_curves.py --folder ${file}
+done

+ 2 - 2
run_maxwell_simulation.sh

@@ -38,13 +38,13 @@ for size in {"4","8","16","26","32","40"}; do
                             echo "Run simulation for model ${MODEL_NAME}"
 
                             # by default regenerate model
-                            python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1
+                            python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                             python predict_seuil_expe_maxwell_curve.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2'
 
-                            python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
 
                         fi
                     done

+ 3 - 3
run_maxwell_simulation_corr_custom.sh

@@ -26,13 +26,13 @@ for label in {"0","1"}; do
                         if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                             echo "Run simulation for model ${MODEL_NAME}"
 
-                            python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                            python generate/generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
-                            python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
+                            python prediction/predict_seuil_expe_maxwell_curve.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
-                            python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
 
                         fi
                     done

+ 2 - 2
run_maxwell_simulation_custom.sh

@@ -39,13 +39,13 @@ for size in {"4","8","16","26","32","40"}; do
                             echo "Run simulation for model ${MODEL_NAME}"
 
                             # by default regenerate model
-                            python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                            python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                             python predict_seuil_expe_maxwell_curve.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
-                            python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                            python others/save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
 
                         fi
                     done

+ 2 - 2
run_maxwell_simulation_custom_filters.sh

@@ -31,12 +31,12 @@ for size in {"4","8","16","26","32","40"}; do
                         echo "${MODEL_NAME} results already generated..."
                     else
                         # Use of already generated model
-                        # python generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                        # python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                         # python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                         python predict_seuil_expe_maxwell_curve.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
-                        python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
                     fi
                 done
             done

+ 2 - 2
run_maxwell_simulation_filters_statistics.sh

@@ -27,12 +27,12 @@ for nb_zones in {4,6,8,10,12}; do
                 echo "${MODEL_NAME} results already generated..."
             else
                 # Use of already generated model
-                # python generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                # python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                 # python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                 python predict_seuil_expe_maxwell_curve.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --custom ${CUSTOM_MIN_MAX_FILENAME}
 
-                python save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
             fi
         done
     done

+ 2 - 2
run_maxwell_simulation_keras_corr_custom.sh

@@ -27,13 +27,13 @@ for label in {"0","1"}; do
                     if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                         echo "Run simulation for model ${MODEL_NAME}"
 
-                        python generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
+                        python generate/generate_data_model_corr_random.py --output ${FILENAME} --n ${size} --highest ${highest} --label ${label} --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom 1
 
                         python deep_network_keras_svd.py --data ${FILENAME} --output ${MODEL_NAME} --size ${size}
 
                         python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
-                        python save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                        python others/save_model_result_in_md_maxwell.py --interval "${start_index},${size}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
 
                     fi
                 done

+ 2 - 2
run_maxwell_simulation_keras_custom.sh

@@ -24,13 +24,13 @@ for metric in {"sub_blocks_stats","sub_blocks_stats_reduced","sub_blocks_area","
                 echo "Run simulation for model ${MODEL_NAME}"
 
                 # by default regenerate model
-                python generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "${start_index},${end_index}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
 
                 python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                 python predict_seuil_expe_maxwell_curve.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
 
-                python save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
+                python others/save_model_result_in_md_maxwell.py --interval "${start_index},${end_index}" --model "saved_models/${MODEL_NAME}.json" --mode "${mode}" --metric ${metric}
 
             fi
         done

+ 12 - 6
train_model.py

@@ -1,3 +1,9 @@
+# main imports
+import numpy as np
+import pandas as pd
+import sys, os, argparse
+
+# models imports
 from sklearn.model_selection import train_test_split
 from sklearn.model_selection import GridSearchCV
 from sklearn.linear_model import LogisticRegression
@@ -9,17 +15,17 @@ from sklearn.externals import joblib
 from sklearn.metrics import accuracy_score, f1_score
 from sklearn.model_selection import cross_val_score
 
-import numpy as np
-import pandas as pd
-import sys, os, argparse
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
-from modules.utils import config as cfg
-from modules import models as mdl
+import custom_config as cfg
+import models as mdl
 
+# variables and parameters
 saved_models_folder = cfg.saved_models_folder
 models_list         = cfg.models_names_list
 
-current_dirpath = os.getcwd()
+current_dirpath     = os.getcwd()
 output_model_folder = os.path.join(current_dirpath, saved_models_folder)