Parcourir la source

Create generate dataset 3D

Jérôme BUISINE il y a 4 ans
Parent
commit
0d651ec858
10 fichiers modifiés avec 667 ajouts et 28 suppressions
  1. 4 4
      README.md
  2. 0 9
      RESULTS.md
  3. 1 1
      generate_dataset.py
  4. 220 0
      generate_dataset_3D.py
  5. 145 0
      predict_noisy_image_svd.py
  6. 180 0
      predict_seuil_expe_maxwell_curve.py
  7. 3 4
      run.sh
  8. 63 0
      run_maxwell_simulation_custom.sh
  9. 34 0
      run_test.sh
  10. 17 10
      train_model_2D.py

+ 4 - 4
README.md

@@ -18,16 +18,16 @@ Generate custom dataset from one reconstructed method or multiples (implemented
 python generate_dataset.py -h
 ```
 
-### Reconstruction parameter
+### Reconstruction parameter (--params)
 
 List of expected parameter by reconstruction method:
-- **svd:** Singular Values Decomposition
+- **svd_reconstruction:** Singular Values Decomposition
   - Param definition: *interval data used for reconstruction (begin, end)*
   - Example: *"100, 200"*
-- **ipca:** Iterative Principal Component Analysis
+- **ipca_reconstruction:** Iterative Principal Component Analysis
   - Param definition: *number of components used for compression and batch size*
   - Example: *"50, 32"*
-- **fast_ica:**  Fast Iterative Component Analysis
+- **fast_ica_reconstruction:**  Fast Iterative Component Analysis
   - Param definition: *number of components used for compression*
   - Example: *"50"*
 

+ 0 - 9
RESULTS.md

@@ -1,9 +0,0 @@
-# 1. Create database
-    - 6 scenes for train
-    - 3 scenes for validation
-    - Equilibrer noise / final classes
-
-# 2. Test CNN (check if size is correct)
-
-# 3. Results
-    - noise_classification_img100.h5 :: loss: 0.1551 - acc: 0.9393 - val_loss: 1.2858 - val_acc: 0.7845

+ 1 - 1
generate_dataset.py

@@ -138,7 +138,7 @@ def main():
 
     parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
     parser.add_argument('--metric', type=str, 
-                                    help="metric choice in order to compute data (use 'all' if all metrics are needed)", 
+                                    help="metric choice in order to compute data", 
                                     choices=metric_choices,
                                     required=True)
     parser.add_argument('--param', type=str, help="specific param for metric (See README.md for further information)")

+ 220 - 0
generate_dataset_3D.py

@@ -0,0 +1,220 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jun 19 11:47:42 2019
+
+@author: jbuisine
+"""
+
+import sys, os, argparse
+import numpy as np
+import random
+import time
+import json
+
+from PIL import Image
+from ipfml import processing, metrics, utils
+from skimage import color
+
+from modules.utils import config as cfg
+from modules.utils import data as dt
+
+from transformation_functions import svd_reconstruction
+from modules.classes.Transformation import Transformation
+
+# getting configuration information
+config_filename         = cfg.config_filename
+zone_folder             = cfg.zone_folder
+learned_folder          = cfg.learned_zones_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes values
+scenes_list             = cfg.scenes_names
+scenes_indexes          = cfg.scenes_indices
+choices                 = cfg.normalization_choices
+path                    = cfg.dataset_path
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+metric_choices          = cfg.metric_choices_labels
+output_data_folder      = cfg.output_data_folder
+
+generic_output_file_svd = '_random.csv'
+
+def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_zones = 4, _random=0):
+
+    output_train_filename = _filename + ".train"
+    output_test_filename = _filename + ".test"
+
+    if not '/' in output_train_filename:
+        raise Exception("Please select filename with directory path to save data. Example : data/dataset")
+
+    # create path if not exists
+    if not os.path.exists(output_data_folder):
+        os.makedirs(output_data_folder)
+
+    train_file_data = []
+    test_file_data  = []
+
+    scenes = os.listdir(path)
+    # remove min max file from scenes folder
+    scenes = [s for s in scenes if min_max_filename not in s]
+
+    # go ahead each scenes
+    for id_scene, folder_scene in enumerate(_scenes_list):
+
+        scene_path = os.path.join(path, folder_scene)
+
+        zones_indices = zones
+
+        # shuffle list of zones (=> randomly choose zones)
+        # only in random mode
+        if _random:
+            random.shuffle(zones_indices)
+
+         # store zones learned
+        learned_zones_indices = zones_indices[:_nb_zones]
+
+        # write into file
+        folder_learned_path = os.path.join(learned_folder, _filename.split('/')[1])
+
+        if not os.path.exists(folder_learned_path):
+            os.makedirs(folder_learned_path)
+
+        file_learned_path = os.path.join(folder_learned_path, folder_scene + '.csv')
+
+        with open(file_learned_path, 'w') as f:
+            for i in learned_zones_indices:
+                f.write(str(i) + ';')
+
+        for id_zone, index_folder in enumerate(zones_indices):
+
+            index_str = str(index_folder)
+            if len(index_str) < 2:
+                index_str = "0" + index_str
+            
+            current_zone_folder = "zone" + index_str
+            zone_path = os.path.join(scene_path, current_zone_folder)
+
+            # custom path for interval of reconstruction and metric
+
+            metrics_path = []
+
+            for transformation in _transformations:
+                metric_interval_path = os.path.join(zone_path, transformation.getTranformationPath())
+                metrics_path.append(metric_interval_path)
+
+            # as labels are same for each metric
+            for label in os.listdir(metrics_path[0]):
+
+                label_metrics_path = []
+
+                for path in metrics_path:
+                    label_path = os.path.join(path, label)
+                    label_metrics_path.append(label_path)
+
+                # getting images list for each metric
+                metrics_images_list = []
+                    
+                for label_path in label_metrics_path:
+                    images = sorted(os.listdir(label_path))
+                    metrics_images_list.append(images)
+
+                # construct each line using all images path of each
+                for index_image in range(0, len(metrics_images_list)):
+                    
+                    images_path = []
+
+                    # getting images with same index and hence name for each metric (transformation)
+                    for index_metric in range(0, len(metrics_path)):
+                        img_path = metrics_images_list[index_metric][index_image]
+                        images_path.append(img_path)
+
+                    if label == cfg.noisy_folder:
+                        line = '1;'
+                    else:
+                        line = '0;'
+
+                    # compute line information with all images paths
+                    for id_path, img_path in enumerate(images_path):
+                        if id_path < len(images_path) - 1:
+                            line = line + img_path + '::'
+                        else:
+                            line = line + img_path
+                    
+                    line = line + '\n'
+
+                    if id_zone < _nb_zones and folder_scene in _scenes:
+                        train_file_data.append(line)
+                    else:
+                        test_file_data.append(line)
+
+    train_file = open(output_train_filename, 'w')
+    test_file = open(output_test_filename, 'w')
+
+    random.shuffle(train_file_data)
+    random.shuffle(test_file_data)
+
+    for line in train_file_data:
+        train_file.write(line)
+
+    for line in test_file_data:
+        test_file.write(line)
+
+    train_file.close()
+    test_file.close()
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Compute specific dataset for model using of metric")
+
+    parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
+    parser.add_argument('--metrics', type=str, 
+                                     help="list of metrics choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
+    parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set', choices=list(range(1, 17)))
+    parser.add_argument('--renderer', type=str, help='Renderer choice in order to limit scenes used', choices=cfg.renderer_choices, default='all')
+    parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
+
+    args = parser.parse_args()
+
+    p_filename = args.output
+    p_metrics  = args.metrics.split(',')
+    p_params   = args.params.split('::')
+    p_scenes   = args.scenes.split(',')
+    p_nb_zones = args.nb_zones
+    p_renderer = args.renderer
+    p_random   = args.random
+
+    # create list of Transformation
+    transformations = []
+
+    for id, metric in enumerate(p_metrics):
+
+        if metric not in metric_choices:
+            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
+
+        transformations.append(Transformation(metric, p_params[id]))
+
+    # list all possibles choices of renderer
+    scenes_list = dt.get_renderer_scenes_names(p_renderer)
+    scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
+
+    # getting scenes from indexes user selection
+    scenes_selected = []
+
+    for scene_id in p_scenes:
+        index = scenes_indices.index(scene_id.strip())
+        scenes_selected.append(scenes_list[index])
+
+    # create database using img folder (generate first time only)
+    generate_data_model(scenes_list, p_filename, transformations, scenes_selected, p_nb_zones, p_random)
+
+if __name__== "__main__":
+    main()

+ 145 - 0
predict_noisy_image_svd.py

@@ -0,0 +1,145 @@
+from sklearn.externals import joblib
+
+import numpy as np
+
+from ipfml import processing, utils
+from PIL import Image
+
+import sys, os, argparse, json
+
+from keras.models import model_from_json
+
+from modules.utils import config as cfg
+from modules.utils import data as dt
+
+path                  = cfg.dataset_path
+min_max_ext           = cfg.min_max_filename_extension
+metric_choices        = cfg.metric_choices_labels
+normalization_choices = cfg.normalization_choices
+
+custom_min_max_folder = cfg.min_max_custom_folder
+
+def main():
+
+    # getting all params
+    parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
+
+    parser.add_argument('--image', type=str, help='Image path')
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_img_file   = args.image
+    p_model_file = args.model
+    p_interval   = list(map(int, args.interval.split(',')))
+    p_mode       = args.mode
+    p_metric     = args.metric
+    p_custom     = args.custom
+
+    if '.joblib' in p_model_file:
+        kind_model = 'sklearn'
+
+    if '.json' in p_model_file:
+        kind_model = 'keras'
+
+    if 'corr' in p_model_file:
+        corr_model = True
+
+        indices_corr_path = os.path.join(cfg.correlation_indices_folder, p_model_file.split('/')[1].replace('.json', '').replace('.joblib', '') + '.csv')
+
+        with open(indices_corr_path, 'r') as f:
+            data_corr_indices = [int(x) for x in f.readline().split(';') if x != '']
+    else:
+        corr_model = False
+
+
+    if kind_model == 'sklearn':
+        # load of model file
+        model = joblib.load(p_model_file)
+
+    if kind_model == 'keras':
+        with open(p_model_file, 'r') as f:
+            json_model = json.load(f)
+            model = model_from_json(json_model)
+            model.load_weights(p_model_file.replace('.json', '.h5'))
+
+            model.compile(loss='binary_crossentropy',
+                        optimizer='adam',
+                        metrics=['accuracy'])
+
+    # load image
+    img = Image.open(p_img_file)
+
+    data = dt.get_svd_data(p_metric, img)
+
+    # get interval values
+    begin, end = p_interval
+
+    # check if custom min max file is used
+    if p_custom:
+
+        if corr_model:
+            test_data = data[data_corr_indices]
+        else:
+            test_data = data[begin:end]
+
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
+            min_max_file_path = custom_min_max_folder + '/' +  p_custom
+
+            # need to read min_max_file
+            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
+            with open(file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
+
+            test_data = utils.normalize_arr_with_range(test_data, min_val, max_val)
+
+        if p_mode == 'svdn':
+            test_data = utils.normalize_arr(test_data)
+
+    else:
+
+        # check mode to normalize data
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
+            min_max_file_path = path + '/' + p_metric + min_max_ext
+
+            # need to read min_max_file
+            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
+            with open(file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
+
+            l_values = utils.normalize_arr_with_range(data, min_val, max_val)
+
+        elif p_mode == 'svdn':
+            l_values = utils.normalize_arr(data)
+        else:
+            l_values = data
+
+        if corr_model:
+            test_data = data[data_corr_indices]
+        else:
+            test_data = data[begin:end]
+
+
+    # get prediction of model
+    if kind_model == 'sklearn':
+        prediction = model.predict([test_data])[0]
+
+    if kind_model == 'keras':
+        test_data = np.asarray(test_data).reshape(1, len(test_data), 1)
+        prediction = model.predict_classes([test_data])[0][0]
+
+    # output expected from others scripts
+    print(prediction)
+
+if __name__== "__main__":
+    main()

+ 180 - 0
predict_seuil_expe_maxwell_curve.py

@@ -0,0 +1,180 @@
+from sklearn.externals import joblib
+
+import numpy as np
+
+from ipfml import processing
+from PIL import Image
+
+import sys, os, argparse
+import subprocess
+import time
+
+from modules.utils import config as cfg
+
+config_filename           = cfg.config_filename
+scenes_path               = cfg.dataset_path
+min_max_filename          = cfg.min_max_filename_extension
+threshold_expe_filename   = cfg.seuil_expe_filename
+
+threshold_map_folder      = cfg.threshold_map_folder
+threshold_map_file_prefix = cfg.threshold_map_folder + "_"
+
+zones                     = cfg.zones_indices
+maxwell_scenes            = cfg.maxwell_scenes_names
+normalization_choices     = cfg.normalization_choices
+metric_choices            = cfg.metric_choices_labels
+
+simulation_curves_zones   = "simulation_curves_zones_"
+tmp_filename              = '/tmp/__model__img_to_predict.png'
+
+current_dirpath = os.getcwd()
+
+
+def main():
+
+    p_custom = False
+        
+    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
+
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_interval   = list(map(int, args.interval.split(',')))
+    p_model_file = args.model
+    p_mode       = args.mode
+    p_metric     = args.metric
+    #p_limit      = args.limit
+    p_custom     = args.custom
+
+    scenes = os.listdir(scenes_path)
+    scenes = [s for s in scenes if s in maxwell_scenes]
+
+    print(scenes)
+
+    # go ahead each scenes
+    for id_scene, folder_scene in enumerate(scenes):
+
+        # only take in consideration maxwell scenes
+        if folder_scene in maxwell_scenes:
+
+            print(folder_scene)
+
+            scene_path = os.path.join(scenes_path, folder_scene)
+
+            config_path = os.path.join(scene_path, config_filename)
+
+            with open(config_path, "r") as config_file:
+                last_image_name = config_file.readline().strip()
+                prefix_image_name = config_file.readline().strip()
+                start_index_image = config_file.readline().strip()
+                end_index_image = config_file.readline().strip()
+                step_counter = int(config_file.readline().strip())
+
+            threshold_expes = []
+            threshold_expes_found = []
+            block_predictions_str = []
+
+            # get zones list info
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+                zone_folder = "zone"+index_str
+
+                threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
+
+                with open(threshold_path_file) as f:
+                    threshold = int(f.readline())
+                    threshold_expes.append(threshold)
+
+                    # Initialize default data to get detected model threshold found
+                    threshold_expes_found.append(int(end_index_image)) # by default use max
+
+                block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_index_image) + ";" + str(step_counter))
+
+            current_counter_index = int(start_index_image)
+            end_counter_index = int(end_index_image)
+
+            print(current_counter_index)
+
+            while(current_counter_index <= end_counter_index):
+
+                current_counter_index_str = str(current_counter_index)
+
+                while len(start_index_image) > len(current_counter_index_str):
+                    current_counter_index_str = "0" + current_counter_index_str
+
+                img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+
+                current_img = Image.open(img_path)
+                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+
+                for id_block, block in enumerate(img_blocks):
+
+                    # check only if necessary for this scene (not already detected)
+                    #if not threshold_expes_detected[id_block]:
+
+                        tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
+                        block.save(tmp_file_path)
+
+                        python_cmd = "python predict_noisy_image_svd.py --image " + tmp_file_path + \
+                                        " --interval '" + p_interval + \
+                                        "' --model " + p_model_file  + \
+                                        " --mode " + p_mode + \
+                                        " --metric " + p_metric
+
+                        # specify use of custom file for min max normalization
+                        if p_custom:
+                            python_cmd = python_cmd + ' --custom ' + p_custom
+
+                        ## call command ##
+                        p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
+
+                        (output, err) = p.communicate()
+
+                        ## Wait for result ##
+                        p_status = p.wait()
+
+                        prediction = int(output)
+
+                        # save here in specific file of block all the predictions done
+                        block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
+
+                        print(str(id_block) + " : " + str(current_counter_index) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+
+                current_counter_index += step_counter
+                print("------------------------")
+                print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
+                print("------------------------")
+
+            # end of scene => display of results
+
+            # construct path using model name for saving threshold map folder
+            model_threshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
+
+            # create threshold model path if necessary
+            if not os.path.exists(model_threshold_path):
+                os.makedirs(model_threshold_path)
+
+            map_filename = os.path.join(model_threshold_path, simulation_curves_zones + folder_scene)
+            f_map = open(map_filename, 'w')
+
+            for line in block_predictions_str:
+                f_map.write(line + '\n')
+            f_map.close()
+
+            print("Scene " + str(id_scene + 1) + "/" + str(len(maxwell_scenes)) + " Done..")
+            print("------------------------")
+
+            print("Model predictions are saved into %s" % map_filename)
+            time.sleep(10)
+
+
+if __name__== "__main__":
+    main()

+ 3 - 4
run.sh

@@ -42,7 +42,7 @@ for begin in {80,85,90,95,100,105,110}; do
 
         python generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metric ${svd_metric} --renderer ${renderer} --scenes ${scenes} --param "${begin}, ${end}" --nb_zones ${zone} --random 1
         
-        python train_model_2D.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
+        python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
       fi
     done
   done
@@ -67,8 +67,7 @@ for component in {50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200}; d
       echo "Run computation for IPCA model ${OUTPUT_DATA_FILE}"
 
       python generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metric ${ipca_metric} --renderer ${renderer} --scenes ${scenes} --param "${component},${ipca_batch_size}" --nb_zones ${zone} --random 1
-      
-      python train_model_2D.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
+      python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
     fi
   done
 done
@@ -92,7 +91,7 @@ for component in {50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200}; d
 
       python generate_dataset.py --output data/${OUTPUT_DATA_FILE} --metric ${fast_ica_metric} --renderer ${renderer} --scenes ${scenes} --param "${component}" --nb_zones ${zone} --random 1
       
-      python train_model_2D.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
+      python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} &
     fi
   done
 done

+ 63 - 0
run_maxwell_simulation_custom.sh

@@ -0,0 +1,63 @@
+#! bin/bash
+
+# file which contains model names we want to use for simulation
+simulate_models="simulate_models.csv"
+
+# selection of four scenes (only maxwell)
+scenes="A, D, G, H"
+VECTOR_SIZE=200
+
+for size in {"4","8","16","26","32","40"}; do
+    for metric in {"lab","mscn","mscn_revisited","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2","ica_diff","ipca_diff","svd_trunc_diff","svd_reconstruct"}; do
+
+        half=$(($size/2))
+        start=-$half
+
+        for counter in {0..4}; do
+             end=$(($start+$size))
+
+             if [ "$end" -gt "$VECTOR_SIZE" ]; then
+                 start=$(($VECTOR_SIZE-$size))
+                 end=$(($VECTOR_SIZE))
+             fi
+
+             if [ "$start" -lt "0" ]; then
+                 start=$((0))
+                 end=$(($size))
+             fi
+
+             for nb_zones in {4,6,8,10,12,14}; do
+
+                 for mode in {"svd","svdn","svdne"}; do
+                     for model in {"svm_model","ensemble_model","ensemble_model_v2"}; do
+
+                        FILENAME="data/${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}"
+                        MODEL_NAME="${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}"
+                        CUSTOM_MIN_MAX_FILENAME="N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}_min_max"
+
+                        if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
+                            echo "Run simulation for model ${MODEL_NAME}"
+
+                            # by default regenerate model
+                            python generate_data_model_random.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+
+                            python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
+
+                            python predict_seuil_expe_maxwell_curve.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
+
+                            python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+
+                        fi
+                    done
+                done
+            done
+
+            if [ "$counter" -eq "0" ]; then
+                start=$(($start+50-$half))
+            else
+                start=$(($start+50))
+            fi
+
+        done
+    done
+done

+ 34 - 0
run_test.sh

@@ -0,0 +1,34 @@
+#!/bin/bash
+
+erased=$1
+
+# file which contains model names we want to use for simulation
+file_path="models_info/models_comparisons.csv"
+
+if [ "${erased}" == "Y" ]; then
+    echo "Previous data file erased..."
+    rm ${file_path}
+    mkdir -p models_info
+    touch ${file_path}
+
+    # add of header
+    echo 'model_name; global_train_size; global_test_size; filtered_train_size; filtered_test_size; f1_train; f1_test; recall_train; recall_test; presicion_train; precision_test; acc_train; acc_test; roc_auc_train; roc_auc_test;' >> ${file_path}
+fi
+
+renderer="maxwell"
+scenes="A, D, G, H"
+
+svd_metric="svd_reconstruction"
+ipca_metric="ipca_reconstruction"
+fast_ica_metric="fast_ica_reconstruction"
+
+metrics="${svd_metric},${ipca_metric},${fast_ica_metric}"
+
+python generate_reconstructed_data.py --metric ${svd_metric} --param "100, 200"
+python generate_reconstructed_data.py --metric ${ipca_reconstruction} --param "50, 10"
+python generate_reconstructed_data.py --metric ${fast_ica_metric} --param "50"
+
+OUTPUT_DATA_FILE="test_3D_model"
+
+python generate_dataset_3D.py --output data/${OUTPUT_DATA_FILE} --metrics ${metrics} --renderer ${renderer} --scenes ${scenes} --params "100, 200 :: 50, 10 :: 50" --nb_zones ${zone} --random 1
+python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} --n_channels 3

+ 17 - 10
train_model_2D.py

@@ -22,12 +22,6 @@ from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, reca
 img_width, img_height = 200, 200
 batch_size = 32
 
-# 1 because we have 1 color canal
-if K.image_data_format() == 'channels_first':
-    input_shape = (1, img_width, img_height)
-else:
-    input_shape = (img_width, img_height, 1)
-
 def auc(y_true, y_pred):
     auc = tf.metrics.auc(y_true, y_pred)[1]
     K.get_session().run(tf.local_variables_initializer())
@@ -96,6 +90,7 @@ def main():
     parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=cfg.keras_batch)
     parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
     parser.add_argument('--val_size', type=int, help='percent of validation data during training process', default=cfg.val_dataset_size)
+    parser.add_argument('--n_channels', type=int, help='number of canals for 3D', default=1)
 
     args = parser.parse_args()
 
@@ -104,7 +99,14 @@ def main():
     p_batch_size = args.batch_size
     p_epochs     = args.epochs
     p_val_size   = args.val_size
-
+    p_n_channels = args.n_channels
+
+    # specify the number of dimensions
+    if K.image_data_format() == 'channels_first':
+        input_shape = (p_n_channels, img_width, img_height)
+    else:
+        input_shape = (img_width, img_height, p_n_channels)
+        
     ########################
     # 1. Get and prepare data
     ########################
@@ -120,9 +122,14 @@ def main():
     dataset_test = shuffle(dataset_test)
 
     print("Reading all images data...")
-    dataset_train[1] = dataset_train[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
-    dataset_test[1] = dataset_test[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
-    
+
+    # `:` is the separator used for getting each img path
+    if p_n_channels > 1:
+        dataset_train[1] = dataset_train[1].split(':').apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
+        dataset_test[1] = dataset_test[1].split(':').apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
+    else:
+        dataset_train[1] = dataset_train[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
+        dataset_test[1] = dataset_test[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
 
     # get dataset with equal number of classes occurences
     noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 1]