Parcourir la source

Refactoring of prediction scripts

Jérôme BUISINE il y a 4 ans
Parent
commit
99d17ebd30

+ 150 - 0
prediction/predict_noisy_image_svd.py

@@ -0,0 +1,150 @@
+# main imports
+import sys, os, argparse, json
+import numpy as np
+
+# models imports
+from keras.models import model_from_json
+from sklearn.externals import joblib
+
+# image processing imports
+from ipfml import processing, utils
+from PIL import Image
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from data_attributes import get_svd_data
+
+# variables and parameters
+path                  = cfg.dataset_path
+min_max_ext           = cfg.min_max_filename_extension
+features_choices      = cfg.features_choices_labels
+normalization_choices = cfg.normalization_choices
+
+custom_min_max_folder = cfg.min_max_custom_folder
+
+def main():
+
+    # getting all params
+    parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
+
+    parser.add_argument('--image', type=str, help='Image path')
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_img_file   = args.image
+    p_model_file = args.model
+    p_interval   = list(map(int, args.interval.split(',')))
+    p_mode       = args.mode
+    p_feature    = args.feature
+    p_custom     = args.custom
+
+    if '.joblib' in p_model_file:
+        kind_model = 'sklearn'
+
+    if '.json' in p_model_file:
+        kind_model = 'keras'
+
+    if 'corr' in p_model_file:
+        corr_model = True
+
+        indices_corr_path = os.path.join(cfg.correlation_indices_folder, p_model_file.split('/')[1].replace('.json', '').replace('.joblib', '') + '.csv')
+
+        with open(indices_corr_path, 'r') as f:
+            data_corr_indices = [int(x) for x in f.readline().split(';') if x != '']
+    else:
+        corr_model = False
+
+
+    if kind_model == 'sklearn':
+        # load of model file
+        model = joblib.load(p_model_file)
+
+    if kind_model == 'keras':
+        with open(p_model_file, 'r') as f:
+            json_model = json.load(f)
+            model = model_from_json(json_model)
+            model.load_weights(p_model_file.replace('.json', '.h5'))
+
+            model.compile(loss='binary_crossentropy',
+                        optimizer='adam',
+                        features=['accuracy'])
+
+    # load image
+    img = Image.open(p_img_file)
+
+    data = get_svd_data(p_feature, img)
+
+    # get interval values
+    begin, end = p_interval
+
+    # check if custom min max file is used
+    if p_custom:
+
+        if corr_model:
+            test_data = data[data_corr_indices]
+        else:
+            test_data = data[begin:end]
+
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
+            min_max_file_path = custom_min_max_folder + '/' +  p_custom
+
+            # need to read min_max_file
+            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
+            with open(file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
+
+            test_data = utils.normalize_arr_with_range(test_data, min_val, max_val)
+
+        if p_mode == 'svdn':
+            test_data = utils.normalize_arr(test_data)
+
+    else:
+
+        # check mode to normalize data
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
+            min_max_file_path = path + '/' + p_feature + min_max_ext
+
+            # need to read min_max_file
+            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
+            with open(file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
+
+            l_values = utils.normalize_arr_with_range(data, min_val, max_val)
+
+        elif p_mode == 'svdn':
+            l_values = utils.normalize_arr(data)
+        else:
+            l_values = data
+
+        if corr_model:
+            test_data = data[data_corr_indices]
+        else:
+            test_data = data[begin:end]
+
+
+    # get prediction of model
+    if kind_model == 'sklearn':
+        prediction = model.predict([test_data])[0]
+
+    if kind_model == 'keras':
+        test_data = np.asarray(test_data).reshape(1, len(test_data), 1)
+        prediction = model.predict_classes([test_data])[0][0]
+
+    # output expected from others scripts
+    print(prediction)
+
+if __name__== "__main__":
+    main()

+ 216 - 0
prediction/predict_seuil_expe.py

@@ -0,0 +1,216 @@
+# main imports
+import sys, os, argparse
+import subprocess
+import time
+import numpy as np
+
+# image processing imports
+from ipfml.processing import segmentation
+from PIL import Image
+
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+
+
+# variables and parameters
+scenes_path               = cfg.dataset_path
+min_max_filename          = cfg.min_max_filename_extension
+threshold_expe_filename   = cfg.seuil_expe_filename
+
+threshold_map_folder      = cfg.threshold_map_folder
+threshold_map_file_prefix = cfg.threshold_map_folder + "_"
+
+zones                     = cfg.zones_indices
+normalization_choices     = cfg.normalization_choices
+features_choices          = cfg.features_choices_labels
+
+tmp_filename              = '/tmp/__model__img_to_predict.png'
+
+current_dirpath = os.getcwd()
+
+def main():
+
+    p_custom = False
+
+    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
+
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='Feature data choice', choices=features_choices)
+    parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_interval   = list(map(int, args.interval.split(',')))
+    p_model_file = args.model
+    p_mode       = args.mode
+    p_feature     = args.feature
+    p_limit      = args.limit
+    p_custom     = args.custom
+
+    scenes = os.listdir(scenes_path)
+    scenes = [s for s in scenes if not min_max_filename in s]
+
+    # go ahead each scenes
+    for id_scene, folder_scene in enumerate(scenes):
+
+        print(folder_scene)
+
+        scene_path = os.path.join(scenes_path, folder_scene)
+
+        threshold_expes = []
+        threshold_expes_detected = []
+        threshold_expes_counter = []
+        threshold_expes_found = []
+
+            # get all images of folder
+        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+        start_quality_image = dt.get_scene_image_quality(scene_images[0])
+        end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+       
+        # get zones list info
+        for index in zones:
+            index_str = str(index)
+            if len(index_str) < 2:
+                index_str = "0" + index_str
+            zone_folder = "zone"+index_str
+
+            threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
+
+            with open(threshold_path_file) as f:
+                threshold = int(f.readline())
+                threshold_expes.append(threshold)
+
+                # Initialize default data to get detected model threshold found
+                threshold_expes_detected.append(False)
+                threshold_expes_counter.append(0)
+                threshold_expes_found.append(end_quality_image) # by default use max
+
+        check_all_done = False
+
+        # for each images
+        for img_path in scene_images:
+
+            current_img = Image.open(img_path)
+            current_quality_image = dt.get_scene_image_quality(img_path)
+            current_image_potfix = dt.get_scene_image_postfix(img_path)
+
+            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+            current_img = Image.open(img_path)
+            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+
+            check_all_done = all(d == True for d in threshold_expes_detected)
+
+            if check_all_done:
+                break
+
+            for id_block, block in enumerate(img_blocks):
+
+                # check only if necessary for this scene (not already detected)
+                if not threshold_expes_detected[id_block]:
+
+                    tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
+                    block.save(tmp_file_path)
+
+                    python_cmd = "python predict_noisy_image_svd.py --image " + tmp_file_path + \
+                                    " --interval '" + p_interval + \
+                                    "' --model " + p_model_file  + \
+                                    " --mode " + p_mode + \
+                                    " --feature " + p_feature
+
+                    # specify use of custom file for min max normalization
+                    if p_custom:
+                        python_cmd = python_cmd + ' --custom ' + p_custom
+
+
+                    ## call command ##
+                    p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
+
+                    (output, err) = p.communicate()
+
+                    ## Wait for result ##
+                    p_status = p.wait()
+
+                    prediction = int(output)
+
+                    if prediction == 0:
+                        threshold_expes_counter[id_block] = threshold_expes_counter[id_block] + 1
+                    else:
+                        threshold_expes_counter[id_block] = 0
+
+                    if threshold_expes_counter[id_block] == p_limit:
+                        threshold_expes_detected[id_block] = True
+                        threshold_expes_found[id_block] = current_quality_image
+
+                    print(str(id_block) + " : " + current_image_potfix + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+
+            print("------------------------")
+            print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
+            print("------------------------")
+
+        # end of scene => display of results
+
+        # construct path using model name for saving threshold map folder
+        model_treshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
+
+        # create threshold model path if necessary
+        if not os.path.exists(model_treshold_path):
+            os.makedirs(model_treshold_path)
+
+        abs_dist = []
+
+        map_filename = os.path.join(model_treshold_path, threshold_map_file_prefix + folder_scene)
+        f_map = open(map_filename, 'w')
+
+        line_information = ""
+
+        # default header
+        f_map.write('|  |    |    |  |\n')
+        f_map.write('---|----|----|---\n')
+        for id, threshold in enumerate(threshold_expes_found):
+
+            line_information += str(threshold) + " / " + str(threshold_expes[id]) + " | "
+            abs_dist.append(abs(threshold - threshold_expes[id]))
+
+            if (id + 1) % 4 == 0:
+                f_map.write(line_information + '\n')
+                line_information = ""
+
+        f_map.write(line_information + '\n')
+
+        min_abs_dist = min(abs_dist)
+        max_abs_dist = max(abs_dist)
+        avg_abs_dist = sum(abs_dist) / len(abs_dist)
+
+        f_map.write('\nScene information : ')
+        f_map.write('\n- BEGIN : ' + str(start_quality_image))
+        f_map.write('\n- END : ' + str(end_quality_image))
+
+        f_map.write('\n\nDistances information : ')
+        f_map.write('\n- MIN : ' + str(min_abs_dist))
+        f_map.write('\n- MAX : ' + str(max_abs_dist))          
+        f_map.write('\n- AVG : ' + str(avg_abs_dist))
+
+        f_map.write('\n\nOther information : ')
+        f_map.write('\n- Detection limit : ' + str(p_limit))
+
+        # by default print last line
+        f_map.close()
+
+        print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)) + " Done..")
+        print("------------------------")
+
+        time.sleep(1)
+
+
+if __name__== "__main__":
+    main()

+ 218 - 0
prediction/predict_seuil_expe_maxwell.py

@@ -0,0 +1,218 @@
+# main imports
+import sys, os, argparse
+import subprocess
+import time
+import numpy as np
+
+# image processing imports
+from ipfml.processing import segmentation
+from PIL import Image
+
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+
+
+# variables and parameters
+scenes_path               = cfg.dataset_path
+min_max_filename          = cfg.min_max_filename_extension
+threshold_expe_filename   = cfg.seuil_expe_filename
+
+threshold_map_folder      = cfg.threshold_map_folder
+threshold_map_file_prefix = cfg.threshold_map_folder + "_"
+
+zones                     = cfg.zones_indices
+maxwell_scenes            = cfg.maxwell_scenes_names
+normalization_choices     = cfg.normalization_choices
+features_choices          = cfg.features_choices_labels
+
+tmp_filename              = '/tmp/__model__img_to_predict.png'
+
+current_dirpath = os.getcwd()
+
+def main():
+
+    # by default..
+    p_custom = False
+
+    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
+
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='Feature data choice', choices=features_choices)
+    parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_interval   = list(map(int, args.interval.split(',')))
+    p_model_file = args.model
+    p_mode       = args.mode
+    p_feature    = args.feature
+    p_limit      = args.limit
+    p_custom     = args.custom
+
+    scenes = os.listdir(scenes_path)
+    scenes = [s for s in scenes if s in maxwell_scenes]
+
+    # go ahead each scenes
+    for id_scene, folder_scene in enumerate(scenes):
+
+        # only take in consideration maxwell scenes
+        if folder_scene in maxwell_scenes:
+
+            print(folder_scene)
+
+            scene_path = os.path.join(scenes_path, folder_scene)
+
+            threshold_expes = []
+            threshold_expes_detected = []
+            threshold_expes_counter = []
+            threshold_expes_found = []
+
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+    
+
+            # get zones list info
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+                zone_folder = "zone"+index_str
+
+                threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
+
+                with open(threshold_path_file) as f:
+                    threshold = int(f.readline())
+                    threshold_expes.append(threshold)
+
+                    # Initialize default data to get detected model threshold found
+                    threshold_expes_detected.append(False)
+                    threshold_expes_counter.append(0)
+                    threshold_expes_found.append(end_quality_image) # by default use max
+
+            check_all_done = False
+
+            # for each images
+            for img_path in scene_images:
+
+                current_img = Image.open(img_path)
+                current_postfix_image = dt.get_scene_image_postfix(img_path)
+
+                img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+
+                check_all_done = all(d == True for d in threshold_expes_detected)
+
+                if check_all_done:
+                    break
+
+                for id_block, block in enumerate(img_blocks):
+
+                    # check only if necessary for this scene (not already detected)
+                    if not threshold_expes_detected[id_block]:
+
+                        tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
+                        block.save(tmp_file_path)
+
+                        python_cmd = "python predict_noisy_image_svd.py --image " + tmp_file_path + \
+                                        " --interval '" + p_interval + \
+                                        "' --model " + p_model_file  + \
+                                        " --mode " + p_mode + \
+                                        " --feature " + p_feature
+
+                        # specify use of custom file for min max normalization
+                        if p_custom:
+                            python_cmd = python_cmd + ' --custom ' + p_custom
+
+                        ## call command ##
+                        p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
+
+                        (output, err) = p.communicate()
+
+                        ## Wait for result ##
+                        p_status = p.wait()
+
+                        prediction = int(output)
+
+                        if prediction == 0:
+                            threshold_expes_counter[id_block] = threshold_expes_counter[id_block] + 1
+                        else:
+                            threshold_expes_counter[id_block] = 0
+
+                        if threshold_expes_counter[id_block] == p_limit:
+                            threshold_expes_detected[id_block] = True
+                            threshold_expes_found[id_block] = int(current_postfix_image)
+
+                        print(str(id_block) + " : " + current_postfix_image + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+
+                print("------------------------")
+                print("Scene " + str(id_scene + 1) + "/" + str(len(maxwell_scenes)))
+                print("------------------------")
+
+            # end of scene => display of results
+
+            # construct path using model name for saving threshold map folder
+            model_treshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
+
+            # create threshold model path if necessary
+            if not os.path.exists(model_treshold_path):
+                os.makedirs(model_treshold_path)
+
+            abs_dist = []
+
+            map_filename = os.path.join(model_treshold_path, threshold_map_file_prefix + folder_scene)
+            f_map = open(map_filename, 'w')
+
+            line_information = ""
+
+            # default header
+            f_map.write('|  |    |    |  |\n')
+            f_map.write('---|----|----|---\n')
+            for id, threshold in enumerate(threshold_expes_found):
+
+                line_information += str(threshold) + " / " + str(threshold_expes[id]) + " | "
+                abs_dist.append(abs(threshold - threshold_expes[id]))
+
+                if (id + 1) % 4 == 0:
+                    f_map.write(line_information + '\n')
+                    line_information = ""
+
+            f_map.write(line_information + '\n')
+
+            min_abs_dist = min(abs_dist)
+            max_abs_dist = max(abs_dist)
+            avg_abs_dist = sum(abs_dist) / len(abs_dist)
+
+            f_map.write('\nScene information : ')
+            f_map.write('\n- BEGIN : ' + str(start_quality_image))
+            f_map.write('\n- END : ' + str(end_quality_image))
+
+            f_map.write('\n\nDistances information : ')
+            f_map.write('\n- MIN : ' + str(min_abs_dist))
+            f_map.write('\n- MAX : ' + str(max_abs_dist))
+            f_map.write('\n- AVG : ' + str(avg_abs_dist))
+
+            f_map.write('\n\nOther information : ')
+            f_map.write('\n- Detection limit : ' + str(p_limit))
+
+            # by default print last line
+            f_map.close()
+
+            print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)) + " Done..")
+            print("------------------------")
+
+            time.sleep(10)
+
+
+if __name__== "__main__":
+    main()

+ 175 - 0
prediction/predict_seuil_expe_maxwell_curve.py

@@ -0,0 +1,175 @@
+# main imports
+import sys, os, argparse
+import subprocess
+import time
+import numpy as np
+
+# image processing imports
+from ipfml.processing import segmentation
+from PIL import Image
+
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+
+
+# variables and parameters
+scenes_path               = cfg.dataset_path
+min_max_filename          = cfg.min_max_filename_extension
+threshold_expe_filename   = cfg.seuil_expe_filename
+
+threshold_map_folder      = cfg.threshold_map_folder
+threshold_map_file_prefix = cfg.threshold_map_folder + "_"
+
+zones                     = cfg.zones_indices
+maxwell_scenes            = cfg.maxwell_scenes_names
+normalization_choices     = cfg.normalization_choices
+features_choices          = cfg.features_choices_labels
+
+simulation_curves_zones   = "simulation_curves_zones_"
+tmp_filename              = '/tmp/__model__img_to_predict.png'
+
+current_dirpath = os.getcwd()
+
+
+def main():
+
+    p_custom = False
+        
+    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
+
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
+    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    # keep p_interval as it is
+    p_interval   = args.interval
+    p_model_file = args.model
+    p_mode       = args.mode
+    p_feature    = args.feature
+    #p_limit      = args.limit
+    p_custom     = args.custom
+
+    scenes = os.listdir(scenes_path)
+    scenes = [s for s in scenes if s in maxwell_scenes]
+
+    print(scenes)
+
+    # go ahead each scenes
+    for id_scene, folder_scene in enumerate(scenes):
+
+        # only take in consideration maxwell scenes
+        if folder_scene in maxwell_scenes:
+
+            print(folder_scene)
+
+            scene_path = os.path.join(scenes_path, folder_scene)
+
+            threshold_expes = []
+            threshold_expes_found = []
+            block_predictions_str = []
+
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+            start_quality_image = dt.get_scene_image_quality(scene_images[0])
+            end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+            # using first two images find the step of quality used
+            quality_step_image  = dt.get_scene_image_quality(scene_images[1]) - start_quality_image
+
+            # get zones list info
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+                zone_folder = "zone"+index_str
+
+                threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
+
+                with open(threshold_path_file) as f:
+                    threshold = int(f.readline())
+                    threshold_expes.append(threshold)
+
+                    # Initialize default data to get detected model threshold found
+                    threshold_expes_found.append(end_quality_image) # by default use max
+
+                block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_quality_image) + ";" + str(quality_step_image))
+
+
+            # for each images
+            for img_path in scene_images:
+
+                current_img = Image.open(img_path)
+                current_quality_image = dt.get_scene_image_quality(img_path)
+
+                img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+
+                for id_block, block in enumerate(img_blocks):
+
+                    # check only if necessary for this scene (not already detected)
+                    #if not threshold_expes_detected[id_block]:
+
+                        tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
+                        block.save(tmp_file_path)
+
+                        python_cmd_line = "python predict_noisy_image_svd.py --image {0} --interval '{1}' --model {2} --mode {3} --feature {4}"
+                        python_cmd = python_cmd_line.format(tmp_file_path, p_interval, p_model_file, p_mode, p_feature) 
+
+                        # specify use of custom file for min max normalization
+                        if p_custom:
+                            python_cmd = python_cmd + ' --custom ' + p_custom
+
+                        ## call command ##
+                        p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
+
+                        (output, err) = p.communicate()
+
+                        ## Wait for result ##
+                        p_status = p.wait()
+
+                        prediction = int(output)
+
+                        # save here in specific file of block all the predictions done
+                        block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
+
+                        print(str(id_block) + " : " + str(current_quality_image) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+
+                print("------------------------")
+                print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
+                print("------------------------")
+
+            # end of scene => display of results
+
+            # construct path using model name for saving threshold map folder
+            model_threshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
+
+            # create threshold model path if necessary
+            if not os.path.exists(model_threshold_path):
+                os.makedirs(model_threshold_path)
+
+            map_filename = os.path.join(model_threshold_path, simulation_curves_zones + folder_scene)
+            f_map = open(map_filename, 'w')
+
+            for line in block_predictions_str:
+                f_map.write(line + '\n')
+            f_map.close()
+
+            print("Scene " + str(id_scene + 1) + "/" + str(len(maxwell_scenes)) + " Done..")
+            print("------------------------")
+
+            print("Model predictions are saved into %s" % map_filename)
+            time.sleep(10)
+
+
+if __name__== "__main__":
+    main()

+ 114 - 0
prediction/prediction_scene.py

@@ -0,0 +1,114 @@
+# main imports
+import sys, os, argparse
+import numpy as np
+import json
+import pandas as pd
+
+# models imports
+from sklearn.externals import joblib
+from sklearn.metrics import accuracy_score
+from keras.models import Sequential
+from keras.layers import Conv1D, MaxPooling1D
+from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
+from keras import backend as K
+from keras.models import model_from_json
+from keras.wrappers.scikit_learn import KerasClassifier
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+
+# parameters and variables
+output_model_folder = cfg.saved_models_folder
+
+def main():
+    
+    parser = argparse.ArgumentParser(description="Give model performance on specific scene")
+
+    parser.add_argument('--data', type=str, help='dataset filename prefix of specific scene (without .train and .test)')
+    parser.add_argument('--model', type=str, help='saved model (Keras or SKlearn) filename with extension')
+    parser.add_argument('--output', type=str, help="filename to store predicted and performance model obtained on scene")
+    parser.add_argument('--scene', type=str, help="scene indice to predict", choices=cfg.scenes_indices)
+
+    args = parser.parse_args()
+
+    p_data_file  = args.data
+    p_model_file = args.model
+    p_output     = args.output
+    p_scene      = args.scene
+
+    if '.joblib' in p_model_file:
+        kind_model = 'sklearn'
+        model_ext = '.joblib'
+
+    if '.json' in p_model_file:
+        kind_model = 'keras'
+        model_ext = '.json'
+
+    if not os.path.exists(output_model_folder):
+        os.makedirs(output_model_folder)
+
+    dataset = pd.read_csv(p_data_file, header=None, sep=";")
+
+    y_dataset = dataset.ix[:,0]
+    x_dataset = dataset.ix[:,1:]
+
+    noisy_dataset = dataset[dataset.ix[:, 0] == 1]
+    not_noisy_dataset = dataset[dataset.ix[:, 0] == 0]
+
+    y_noisy_dataset = noisy_dataset.ix[:, 0]
+    x_noisy_dataset = noisy_dataset.ix[:, 1:]
+
+    y_not_noisy_dataset = not_noisy_dataset.ix[:, 0]
+    x_not_noisy_dataset = not_noisy_dataset.ix[:, 1:]
+
+    if kind_model == 'keras':
+        with open(p_model_file, 'r') as f:
+            json_model = json.load(f)
+            model = model_from_json(json_model)
+            model.load_weights(p_model_file.replace('.json', '.h5'))
+
+            model.compile(loss='binary_crossentropy',
+                  optimizer='adam',
+                  metrics=['accuracy'])
+
+        _, vector_size = np.array(x_dataset).shape
+
+        # reshape all data
+        x_dataset = np.array(x_dataset).reshape(len(x_dataset), vector_size, 1)
+        x_noisy_dataset = np.array(x_noisy_dataset).reshape(len(x_noisy_dataset), vector_size, 1)
+        x_not_noisy_dataset = np.array(x_not_noisy_dataset).reshape(len(x_not_noisy_dataset), vector_size, 1)
+
+
+    if kind_model == 'sklearn':
+        model = joblib.load(p_model_file)
+
+    if kind_model == 'keras':
+        y_pred = model.predict_classes(x_dataset)
+        y_noisy_pred = model.predict_classes(x_noisy_dataset)
+        y_not_noisy_pred = model.predict_classes(x_not_noisy_dataset)
+
+    if kind_model == 'sklearn':
+        y_pred = model.predict(x_dataset)
+        y_noisy_pred = model.predict(x_noisy_dataset)
+        y_not_noisy_pred = model.predict(x_not_noisy_dataset)
+
+    accuracy_global = accuracy_score(y_dataset, y_pred)
+    accuracy_noisy = accuracy_score(y_noisy_dataset, y_noisy_pred)
+    accuracy_not_noisy = accuracy_score(y_not_noisy_dataset, y_not_noisy_pred)
+
+    if(p_scene):
+        print(p_scene + " | " + str(accuracy_global) + " | " + str(accuracy_noisy) + " | " + str(accuracy_not_noisy))
+    else:
+        print(str(accuracy_global) + " \t | " + str(accuracy_noisy) + " \t | " + str(accuracy_not_noisy))
+
+        with open(p_output, 'w') as f:
+            f.write("Global accuracy found %s " % str(accuracy_global))
+            f.write("Noisy accuracy found %s " % str(accuracy_noisy))
+            f.write("Not noisy accuracy found %s " % str(accuracy_not_noisy))
+            for prediction in y_pred:
+                f.write(str(prediction) + '\n')
+
+if __name__== "__main__":
+    main()