ソースを参照

Merge branch 'release/v0.2.3'

Jérôme BUISINE 5 年 前
コミット
8a5fe6540a

+ 1 - 0
data_attributes.py

@@ -111,6 +111,7 @@ def w2d(arr, mode='haar', level=1):
 
     return imArray_H
 
+
 def _get_mscn_variance(block, sub_block_size=(50, 50)):
 
     blocks = segmentation.divide_in_blocks(block, sub_block_size)

+ 147 - 0
display/display_reconstructed_image_from_humans.py

@@ -0,0 +1,147 @@
+# main imports
+import numpy as np
+import pandas as pd
+import math
+import time
+
+import os, sys, argparse
+
+# image processing imports
+import matplotlib.pyplot as plt
+from PIL import Image
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from data_attributes import get_image_features
+from modules.utils import data as dt
+
+# other variables
+learned_zones_folder = cfg.learned_zones_folder
+models_name          = cfg.models_names_list
+
+# utils information
+zone_width, zone_height = (200, 200)
+scene_width, scene_height = (800, 800)
+nb_x_parts = math.floor(scene_width / zone_width)
+
+
+def reconstruct_image(scene_name, output):
+    """
+    @brief Method used to display simulation given .csv files
+    @param scene_name, scene name used
+    @param output, the output filename
+    @return nothing
+    """
+
+    # compute zone start index
+    zones_coordinates = []
+    for zone_index in cfg.zones_indices:
+        x_zone = (zone_index % nb_x_parts) * zone_width
+        y_zone = (math.floor(zone_index / nb_x_parts)) * zone_height
+
+        zones_coordinates.append((x_zone, y_zone))
+
+    scene_folder = os.path.join(cfg.dataset_path, scene_name)
+
+    folder_scene_elements = os.listdir(scene_folder)
+
+    zones_folder = [zone for zone in folder_scene_elements if 'zone' in zone]
+    zones_folder = sorted(zones_folder)
+
+    scenes_images = [img for img in folder_scene_elements if cfg.scene_image_extension in img]
+    scenes_images = sorted(scenes_images)
+
+    # 1. find thresholds from scene
+    human_thresholds = []
+
+    for zone_folder in zones_folder:
+        zone_path = os.path.join(scene_folder, zone_folder)
+        
+        with open(os.path.join(zone_path, cfg.seuil_expe_filename)) as f:
+            human_thresholds.append(int(f.readline()))
+
+    # 2. find images for each zone which are attached to these human thresholds by the model
+    zone_images_index = []
+
+    for threshold in human_thresholds:
+
+        current_image_index = 0
+
+        for image_name in scenes_images:
+
+            image_quality = dt.get_scene_image_quality(image_name)
+
+            if image_quality > threshold:
+                current_image_index = image_quality
+                break
+
+
+        str_index = str(current_image_index)
+        while len(str_index) < 5:
+            str_index = "0" + str_index
+
+        zone_images_index.append(str_index)
+
+    images_zones = []
+    line_images_zones = []
+    # get image using threshold by zone
+    for id, zone_index in enumerate(zone_images_index):
+        filtered_images = [img for img in scenes_images if zone_index in img]
+        
+        if len(filtered_images) > 0:
+            image_name = filtered_images[0]
+        else:
+            image_name = scenes_images[-1]
+        
+        image_path = os.path.join(scene_folder, image_name)
+        selected_image = Image.open(image_path)
+
+        x_zone, y_zone = zones_coordinates[id]
+        zone_image = np.array(selected_image)[y_zone:y_zone+zone_height, x_zone:x_zone+zone_width]
+        line_images_zones.append(zone_image)
+
+        if int(id + 1) % int(scene_width / zone_width) == 0:
+            images_zones.append(np.concatenate(line_images_zones, axis=1))
+            line_images_zones = []
+
+
+    # 3. reconstructed the image using these zones
+    reconstructed_image = np.concatenate(images_zones, axis=0)
+
+    # 4. Save the image with generated name based on scene
+    reconstructed_pil_img = Image.fromarray(reconstructed_image)
+
+    folders = output.split('/')
+    if len(folders) > 1:
+        output_folder = '/'.join(folders[:len(folders) - 1])
+        
+        if not os.path.exists(output_folder):
+            os.makedirs(output_folder)
+
+    reconstructed_pil_img.save(output)
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Compute and save reconstructed images from human thresholds")
+
+    parser.add_argument('--scene', type=str, help='Scene index to use', choices=cfg.scenes_indices)
+    parser.add_argument('--output', type=str, help='Output reconstructed image path and filename')
+
+    args = parser.parse_args()
+
+    p_scene = args.scene
+    p_output = args.output
+    
+    scenes_list = cfg.scenes_names
+    scenes_indices = cfg.scenes_indices
+
+    scene_index = scenes_indices.index(p_scene.strip())
+    scene_name = scenes_list[scene_index]
+
+    reconstruct_image(scene_name, p_output)
+
+if __name__== "__main__":
+    main()

+ 184 - 0
display/display_reconstructed_image_from_simulation.py

@@ -0,0 +1,184 @@
+# main imports
+import numpy as np
+import pandas as pd
+import math
+import time
+
+import os, sys, argparse
+
+# image processing imports
+import matplotlib.pyplot as plt
+from PIL import Image
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from data_attributes import get_image_features
+
+# other variables
+learned_zones_folder = cfg.learned_zones_folder
+models_name          = cfg.models_names_list
+
+# utils information
+zone_width, zone_height = (200, 200)
+scene_width, scene_height = (800, 800)
+nb_x_parts = math.floor(scene_width / zone_width)
+
+
+def reconstruct_image(folder_path, model_name, p_limit):
+    """
+    @brief Method used to display simulation given .csv files
+    @param folder_path, folder which contains all .csv files obtained during simulation
+    @param model_name, current name of model
+    @return nothing
+    """
+
+    for name in models_name:
+        if name in model_name:
+            data_filename = model_name
+            learned_zones_folder_path = os.path.join(learned_zones_folder, data_filename)
+
+    data_files = [x for x in os.listdir(folder_path) if '.png' not in x]
+
+    scene_names = [f.split('_')[3] for f in data_files]
+
+    # compute zone start index
+    zones_coordinates = []
+    for index, zone_index in enumerate(cfg.zones_indices):
+        x_zone = (zone_index % nb_x_parts) * zone_width
+        y_zone = (math.floor(zone_index / nb_x_parts)) * zone_height
+
+        zones_coordinates.append((x_zone, y_zone))
+
+    print(zones_coordinates)
+
+    for id, f in enumerate(data_files):
+
+        scene_name = scene_names[id]
+        path_file = os.path.join(folder_path, f)
+
+        # TODO : check if necessary to keep information about zone learned when displaying data
+        scenes_zones_used_file_path = os.path.join(learned_zones_folder_path, scene_name + '.csv')
+
+        zones_used = []
+
+        if os.path.exists(scenes_zones_used_file_path):
+            with open(scenes_zones_used_file_path, 'r') as f:
+                zones_used = [int(x) for x in f.readline().split(';') if x != '']
+
+        # 1. find estimated threshold for each zone scene using `data_files` and p_limit
+        model_thresholds = []
+        df = pd.read_csv(path_file, header=None, sep=";")
+
+        for index, row in df.iterrows():
+
+            row = np.asarray(row)
+
+            threshold = row[2]
+            start_index = row[3]
+            step_value = row[4]
+            rendering_predictions = row[5:]
+
+            nb_generated_image = 0
+            nb_not_noisy_prediction = 0
+
+            for prediction in rendering_predictions:
+                
+                if int(prediction) == 0:
+                    nb_not_noisy_prediction += 1
+                else:
+                    nb_not_noisy_prediction = 0
+
+                # exit loop if limit is targeted
+                if nb_not_noisy_prediction >= p_limit:
+                    break
+
+                nb_generated_image += 1
+            
+            current_threshold = start_index + step_value * nb_generated_image
+            model_thresholds.append(current_threshold)
+
+        # 2. find images for each zone which are attached to this estimated threshold by the model
+
+        zone_images_index = []
+
+        for est_threshold in model_thresholds:
+
+            str_index = str(est_threshold)
+            while len(str_index) < 5:
+                str_index = "0" + str_index
+
+            zone_images_index.append(str_index)
+
+        scene_folder = os.path.join(cfg.dataset_path, scene_name)
+        
+        scenes_images = [img for img in os.listdir(scene_folder) if cfg.scene_image_extension in img]
+        scenes_images = sorted(scenes_images)
+
+        images_zones = []
+        line_images_zones = []
+        # get image using threshold by zone
+        for id, zone_index in enumerate(zone_images_index):
+            filtered_images = [img for img in scenes_images if zone_index in img]
+            
+            if len(filtered_images) > 0:
+                image_name = filtered_images[0]
+            else:
+                image_name = scenes_images[-1]
+            
+            #print(image_name)
+            image_path = os.path.join(scene_folder, image_name)
+            selected_image = Image.open(image_path)
+
+            x_zone, y_zone = zones_coordinates[id]
+            zone_image = np.array(selected_image)[y_zone:y_zone+zone_height, x_zone:x_zone+zone_width]
+            line_images_zones.append(zone_image)
+
+            if int(id + 1) % int(scene_width / zone_width) == 0:
+                images_zones.append(np.concatenate(line_images_zones, axis=1))
+                print(len(line_images_zones))
+                line_images_zones = []
+
+
+        # 3. reconstructed the image using these zones
+        reconstructed_image = np.concatenate(images_zones, axis=0)
+
+        # 4. Save the image with generated name based on scene, model and `p_limit`
+        reconstructed_pil_img = Image.fromarray(reconstructed_image)
+
+        output_path = os.path.join(folder_path, scene_names[id] + '_reconstruction_limit_' + str(p_limit) + '.png')
+
+        reconstructed_pil_img.save(output_path)
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Display simulations curves from simulation data")
+
+    parser.add_argument('--folder', type=str, help='Folder which contains simulations data for scenes')
+    parser.add_argument('--model', type=str, help='Name of the model used for simulations')
+    parser.add_argument('--limit', type=int, help='Detection limit to target to stop rendering (number of times model tells image has not more noise)')
+
+    args = parser.parse_args()
+
+    p_folder = args.folder
+    p_limit  = args.limit
+
+    if args.model:
+        p_model = args.model
+    else:
+        # find p_model from folder if model arg not given (folder path need to have model name)
+        if p_folder.split('/')[-1]:
+            p_model = p_folder.split('/')[-1]
+        else:
+            p_model = p_folder.split('/')[-2]
+    
+    print(p_model)
+
+    reconstruct_image(p_folder, p_model, p_limit)
+
+    print(p_folder)
+
+if __name__== "__main__":
+    main()

+ 4 - 2
display/display_simulation_curves.py

@@ -42,10 +42,12 @@ def display_curves(folder_path, model_name):
 
         scenes_zones_used_file_path = os.path.join(learned_zones_folder_path, scene_names[id] + '.csv')
 
+        # by default zone used is empty
         zones_used = []
 
-        with open(scenes_zones_used_file_path, 'r') as f:
-            zones_used = [int(x) for x in f.readline().split(';') if x != '']
+        if os.path.exists(scenes_zones_used_file_path):
+            with open(scenes_zones_used_file_path, 'r') as f:
+                zones_used = [int(x) for x in f.readline().split(';') if x != '']
 
         print(zones_used)
 

+ 0 - 1
generate/generate_data_model_random_all.py

@@ -267,7 +267,6 @@ def main():
     p_renderer = args.renderer
     p_custom   = args.custom
 
-
     # list all possibles choices of renderer
     scenes_list = dt.get_renderer_scenes_names(p_renderer)
     scenes_indices = dt.get_renderer_scenes_indices(p_renderer)

+ 173 - 0
prediction/predict_seuil_expe_curve_opti_scene.py

@@ -0,0 +1,173 @@
+# main imports
+import sys, os, argparse
+import subprocess
+import time
+import numpy as np
+
+# image processing imports
+from ipfml.processing import segmentation
+from PIL import Image
+
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+
+
+# variables and parameters
+scenes_path               = cfg.dataset_path
+min_max_filename          = cfg.min_max_filename_extension
+threshold_expe_filename   = cfg.seuil_expe_filename
+
+threshold_map_folder      = cfg.threshold_map_folder
+threshold_map_file_prefix = cfg.threshold_map_folder + "_"
+
+zones                     = cfg.zones_indices
+normalization_choices     = cfg.normalization_choices
+features_choices          = cfg.features_choices_labels
+
+simulation_curves_zones   = "simulation_curves_zones_"
+tmp_filename              = '/tmp/__model__img_to_predict.png'
+
+current_dirpath = os.getcwd()
+
+
+def main():
+
+    p_custom = False
+        
+    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
+
+    parser.add_argument('--solution', type=str, help='Data of solution to specify filters to use')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
+    parser.add_argument('--scene', type=str, help='scene to use for simulation', choices=cfg.scenes_indices)
+    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+    parser.add_argument('--filter', type=str, help='filter reduction solution used', choices=cfg.filter_reduction_choices)
+
+    args = parser.parse_args()
+
+    # keep p_interval as it is
+    p_solution   = args.solution
+    p_model_file = args.model
+    p_mode       = args.mode
+    p_feature    = args.feature
+    p_scene      = args.scene
+    #p_limit      = args.limit
+    p_custom     = args.custom
+    p_filter     = args.filter
+
+    # get scene name using index
+    
+    # list all possibles choices of renderer
+    scenes_list = cfg.scenes_names
+    scenes_indices = cfg.scenes_indices
+
+    scene_index = scenes_indices.index(p_scene.strip())
+    scene_name = scenes_list[scene_index]
+
+    print(scene_name)
+    scene_path = os.path.join(scenes_path, scene_name)
+
+    threshold_expes = []
+    threshold_expes_found = []
+    block_predictions_str = []
+
+    # get all images of folder
+    scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+    start_quality_image = dt.get_scene_image_quality(scene_images[0])
+    end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+    # using first two images find the step of quality used
+    quality_step_image  = dt.get_scene_image_quality(scene_images[1]) - start_quality_image
+
+    # get zones list info
+    for index in zones:
+        index_str = str(index)
+        if len(index_str) < 2:
+            index_str = "0" + index_str
+        zone_folder = "zone"+index_str
+
+        threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
+
+        with open(threshold_path_file) as f:
+            threshold = int(f.readline())
+            threshold_expes.append(threshold)
+
+            # Initialize default data to get detected model threshold found
+            threshold_expes_found.append(end_quality_image) # by default use max
+
+        block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_quality_image) + ";" + str(quality_step_image))
+
+
+    # for each images
+    for img_path in scene_images:
+
+        current_img = Image.open(img_path)
+        current_quality_image = dt.get_scene_image_quality(img_path)
+
+        img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+
+        for id_block, block in enumerate(img_blocks):
+
+            # check only if necessary for this scene (not already detected)
+            #if not threshold_expes_detected[id_block]:
+
+                tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
+                block.save(tmp_file_path)
+
+                python_cmd_line = "python prediction/predict_noisy_image_svd_" + p_filter + ".py --image {0} --solution '{1}' --model {2} --mode {3} --feature {4}"
+                python_cmd = python_cmd_line.format(tmp_file_path, p_solution, p_model_file, p_mode, p_feature) 
+
+                # specify use of custom file for min max normalization
+                if p_custom:
+                    python_cmd = python_cmd + ' --custom ' + p_custom
+
+                ## call command ##
+                p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
+
+                (output, err) = p.communicate()
+
+                ## Wait for result ##
+                p_status = p.wait()
+
+                prediction = int(output)
+
+                # save here in specific file of block all the predictions done
+                block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
+
+                print(str(id_block) + " : " + str(current_quality_image) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+
+        print("------------------------")
+        print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
+        print("------------------------")
+
+    # end of scene => display of results
+
+    # construct path using model name for saving threshold map folder
+    model_threshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
+
+    # create threshold model path if necessary
+    if not os.path.exists(model_threshold_path):
+        os.makedirs(model_threshold_path)
+
+    map_filename = os.path.join(model_threshold_path, simulation_curves_zones + folder_scene)
+    f_map = open(map_filename, 'w')
+
+    for line in block_predictions_str:
+        f_map.write(line + '\n')
+    f_map.close()
+
+    print("------------------------")
+
+    print("Model predictions are saved into %s" % map_filename)
+
+
+if __name__== "__main__":
+    main()

+ 171 - 0
prediction/predict_seuil_expe_curve_scene.py

@@ -0,0 +1,171 @@
+# main imports
+import sys, os, argparse
+import subprocess
+import time
+import numpy as np
+
+# image processing imports
+from ipfml.processing import segmentation
+from PIL import Image
+
+# models imports
+from sklearn.externals import joblib
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+
+
+# variables and parameters
+scenes_path               = cfg.dataset_path
+min_max_filename          = cfg.min_max_filename_extension
+threshold_expe_filename   = cfg.seuil_expe_filename
+
+threshold_map_folder      = cfg.threshold_map_folder
+threshold_map_file_prefix = cfg.threshold_map_folder + "_"
+
+zones                     = cfg.zones_indices
+normalization_choices     = cfg.normalization_choices
+features_choices          = cfg.features_choices_labels
+
+simulation_curves_zones   = "simulation_curves_zones_"
+tmp_filename              = '/tmp/__model__img_to_predict.png'
+
+current_dirpath = os.getcwd()
+
+
+def main():
+
+    p_custom = False
+        
+    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
+
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
+    parser.add_argument('--scene', type=str, help='scene to use for simulation', choices=cfg.scenes_indices)
+    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+    parser.add_argument('--filter', type=str, help='filter reduction solution used', choices=cfg.filter_reduction_choices)
+
+    args = parser.parse_args()
+
+    # keep p_interval as it is
+    p_model_file = args.model
+    p_mode       = args.mode
+    p_feature    = args.feature
+    p_scene      = args.scene
+    #p_limit      = args.limit
+    p_custom     = args.custom
+    p_filter     = args.filter
+
+    # get scene name using index
+    
+    # list all possibles choices of renderer
+    scenes_list = cfg.scenes_names
+    scenes_indices = cfg.scenes_indices
+
+    scene_index = scenes_indices.index(p_scene.strip())
+    scene_name = scenes_list[scene_index]
+
+    print(scene_name)
+    scene_path = os.path.join(scenes_path, scene_name)
+
+    threshold_expes = []
+    threshold_expes_found = []
+    block_predictions_str = []
+
+    # get all images of folder
+    scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+
+    start_quality_image = dt.get_scene_image_quality(scene_images[0])
+    end_quality_image   = dt.get_scene_image_quality(scene_images[-1])
+    # using first two images find the step of quality used
+    quality_step_image  = dt.get_scene_image_quality(scene_images[1]) - start_quality_image
+
+    # get zones list info
+    for index in zones:
+        index_str = str(index)
+        if len(index_str) < 2:
+            index_str = "0" + index_str
+        zone_folder = "zone"+index_str
+
+        threshold_path_file = os.path.join(os.path.join(scene_path, zone_folder), threshold_expe_filename)
+
+        with open(threshold_path_file) as f:
+            threshold = int(f.readline())
+            threshold_expes.append(threshold)
+
+            # Initialize default data to get detected model threshold found
+            threshold_expes_found.append(end_quality_image) # by default use max
+
+        block_predictions_str.append(index_str + ";" + p_model_file + ";" + str(threshold) + ";" + str(start_quality_image) + ";" + str(quality_step_image))
+
+
+    # for each images
+    for img_path in scene_images:
+
+        current_img = Image.open(img_path)
+        current_quality_image = dt.get_scene_image_quality(img_path)
+
+        img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
+
+        for id_block, block in enumerate(img_blocks):
+
+            # check only if necessary for this scene (not already detected)
+            #if not threshold_expes_detected[id_block]:
+
+                tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
+                block.save(tmp_file_path)
+
+                python_cmd_line = "python prediction/predict_noisy_image_svd.py --image {0} --model {2} --mode {3} --feature {4}"
+                python_cmd = python_cmd_line.format(tmp_file_path, p_model_file, p_mode, p_feature) 
+
+                # specify use of custom file for min max normalization
+                if p_custom:
+                    python_cmd = python_cmd + ' --custom ' + p_custom
+
+                ## call command ##
+                p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
+
+                (output, err) = p.communicate()
+
+                ## Wait for result ##
+                p_status = p.wait()
+
+                prediction = int(output)
+
+                # save here in specific file of block all the predictions done
+                block_predictions_str[id_block] = block_predictions_str[id_block] + ";" + str(prediction)
+
+                print(str(id_block) + " : " + str(current_quality_image) + "/" + str(threshold_expes[id_block]) + " => " + str(prediction))
+
+        print("------------------------")
+        print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
+        print("------------------------")
+
+    # end of scene => display of results
+
+    # construct path using model name for saving threshold map folder
+    model_threshold_path = os.path.join(threshold_map_folder, p_model_file.split('/')[-1].replace('.joblib', ''))
+
+    # create threshold model path if necessary
+    if not os.path.exists(model_threshold_path):
+        os.makedirs(model_threshold_path)
+
+    map_filename = os.path.join(model_threshold_path, simulation_curves_zones + folder_scene)
+    f_map = open(map_filename, 'w')
+
+    for line in block_predictions_str:
+        f_map.write(line + '\n')
+    f_map.close()
+
+    print("------------------------")
+
+    print("Model predictions are saved into %s" % map_filename)
+
+
+if __name__== "__main__":
+    main()