Parcourir la source

Merge branch 'release/v0.0.1'

Jérôme BUISINE il y a 5 ans
Parent
commit
26fc69e6c0

+ 3 - 0
.gitignore

@@ -61,3 +61,6 @@ target/
 
 # others
 generated/*
+curves_pictures/*
+!generated/**/*.csv
+!generated/**/*.txt

+ 97 - 1
README.md

@@ -1,3 +1,99 @@
 # NoiseAnalysis
 
-Analysis of different noise using SVD compression
+## Description
+
+Analysis of different noises using singular values vector obtained from SVD compression.
+
+Noise list :
+- cauchy
+- gaussian
+- laplace
+- log_normal
+- mut_white
+- salt_pepper
+- white
+
+
+## Generate all data
+
+### Step 1.
+
+First of all you need to generate all noise of each images in **/generated** folder.
+
+```
+bash generate_all_noise.sh
+```
+
+### Step 2.
+
+Once you had generate all noisy images from synthesis scenes, you need to extract features (SVD singular values) using different metrics.
+
+```
+python generate_all_data.py --metric all --step 40 --color 0
+python generate_all_data.py --metric all --step 40 --color 1
+```
+
+### Step 3.
+
+You can display curves of each noise for each scene :
+
+```
+bash generate_noise_all_curves.sh
+```
+
+This will give you some information about SVD singular values obtained from noise applied synthesis images. All these curves are available into **curves_pictures** folder after running script.
+
+## Scripts
+
+### noise_computation.py
+
+This script is used to compute all noise for each image in the **images** folder.
+
+```
+python noise_computation.py --noise salt_pepper --image path/to/image.png --n 1000 --identical 1 --output image_salt_pepper.png --all 1 --p 0.1
+```
+
+Parameters :
+- **noise** : specify the noise to use (one available from the list above)
+- **image** : source path of the image we want to add noise
+- **n** : level of noise to use
+- **identical** : same noise or not for each chanel in case of RGB image
+- **step** : interval of identifier between each image kept
+- **output** : output image name wanted
+- **all** : generate all level noise from 1 to **n**
+- **p** : optional parameter only used for salt and pepper noise
+
+
+### noise_svd_visualization.py
+
+This script is used to display noise for each level of noise of image.
+
+```
+python noise_svd_visualization.py  --prefix generated/${image}/${noise} --metric lab --n 1000 --mode svdne --interval "0, 200" --step 40 --norm 0 --ylim "0, 0.05"
+```
+
+Parameters :
+- **prefix** : specify the folder of image for specific noise 
+- **metric** : metric choice to compute in order to extract SVD data
+- **n** : limit identifier to use for image scene 
+- **mode** : level of normalization ['svd', 'svdn', 'svdne']
+- **interval** : features to display from singular values vector
+- **step** : interval of noise to keep for display 
+- **norm** : normalization between only values kept from interval
+- **color** : specify if we use 3 chanels with different noise or with same noise
+- **ylim** : ylim to use in order to display curves
+
+### noise_svd_tend_visualization.py
+
+Display information about tend of svd values for specific scene
+
+### noise_svd_threshold.py
+
+Display threshold information about scene for each noise perceived. It's necessary to have in scene folder one of this file :
+- threshold_data_mean.csv
+- threshold_data_median.csv
+
+These files contains threshold information about a noise such that each row are written like that :
+- <noise>;<threshold>;<color(0, 1)>
+
+

+ 0 - 24
generate_all.sh

@@ -1,24 +0,0 @@
-for noise in {"cauchy","gaussian","laplace","log_normal","mut_white","white"}; do
-
-    for identical in {"0","1"}; do
-
-        if [ ${identical} == "1" ]; then
-            python noise_computation.py --noise ${noise} --image images/calibration.png --n 999 --identical ${identical} --output ${noise}.png --all 1
-        else
-            python noise_computation.py --noise ${noise} --image images/calibration.png --n 999 --identical ${identical} --output ${noise}_color.png --all 1
-        fi
-
-    done
-done
-
-
-# specifig for salt and pepper noise
-for identical in {"0","1"}; do
-    if [ ${identical} == "1" ]; then
-        python noise_computation.py --noise salt_pepper --image images/calibration.png --n 999 --identical ${identical} --output ${noise}_B.png --all 1 --p 0.1
-        python noise_computation.py --noise salt_pepper --image images/calibration.png --n 999 --identical ${identical} --output ${noise}_A.png --all 1 --p 0.01
-    else
-        python noise_computation.py --noise salt_pepper --image images/calibration.png --n 999 --identical ${identical} --output ${noise}_A_color.png --all 1 --p 0.01
-        python noise_computation.py --noise salt_pepper --image images/calibration.png --n 999 --identical ${identical} --output ${noise}_B_color.png --all 1 --p 0.1
-    fi
-done

+ 250 - 0
generate_all_data.py

@@ -0,0 +1,250 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Sep 14 21:02:42 2018
+
+@author: jbuisine
+"""
+
+from __future__ import print_function
+import sys, os, getopt
+import numpy as np
+import random
+import time
+import json
+
+from modules.utils.data_type import get_svd_data
+from PIL import Image
+from ipfml import processing, metrics, utils
+from skimage import color
+
+from modules.utils import config as cfg
+
+# getting configuration information
+zone_folder             = cfg.zone_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes values
+scenes_list             = cfg.scenes_names
+scenes_indices          = cfg.scenes_indices
+choices                 = cfg.normalization_choices
+path                    = cfg.generated_folder
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+noise_choices           = cfg.noise_labels
+metric_choices          = cfg.metric_choices_labels
+output_data_folder      = cfg.output_data_folder
+
+end_counter_index       = cfg.default_number_of_images
+
+generic_output_file_svd = '_random.csv'
+picture_step            = 10
+
+# avoid calibration data ?
+calibration_folder      = 'calibration'
+
+def generate_data_svd(data_type, color, mode):
+    """
+    @brief Method which generates all .csv files from scenes
+    @param data_type,  metric choice
+    @param mode, normalization choice
+    @return nothing
+    """
+
+    scenes = os.listdir(path)
+
+    # filter scene
+    scenes = [s for s in scenes if calibration_folder not in s]
+
+    # remove min max file from scenes folder
+    scenes = [s for s in scenes if min_max_filename not in s]
+
+    # keep in memory min and max data found from data_type
+    min_val_found = sys.maxsize
+    max_val_found = 0
+
+    data_min_max_filename = os.path.join(path, data_type + min_max_filename)
+
+    # go ahead each scenes
+    for id_scene, folder_scene in enumerate(scenes):
+
+        print(folder_scene)
+        scene_path = os.path.join(path, folder_scene)
+
+        for noise in noise_choices:
+
+            noise_path = os.path.join(scene_path, noise)
+
+            # getting output filename
+            if color:
+                output_svd_filename = data_type + "_color_" + mode + generic_output_file_svd
+            else:
+                output_svd_filename = data_type + "_" + mode + generic_output_file_svd
+
+            # construct each zones folder name
+            zones_folder = []
+            svd_output_files = []
+
+            # get zones list info
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+
+                current_zone = "zone"+index_str
+                zones_folder.append(current_zone)
+
+                zone_path = os.path.join(noise_path, current_zone)
+
+                if not os.path.exists(zone_path):
+                    os.makedirs(zone_path)
+
+                svd_file_path = os.path.join(zone_path, output_svd_filename)
+
+                # add writer into list
+                svd_output_files.append(open(svd_file_path, 'w'))
+
+            counter_index = 1
+
+            while(counter_index < end_counter_index):
+
+                if counter_index % picture_step == 0:
+                    counter_index_str = str(counter_index)
+
+                    if color:
+                        img_path = os.path.join(noise_path, folder_scene + "_" + noise + "_color_" + counter_index_str + ".png")
+                    else:
+                        img_path = os.path.join(noise_path, folder_scene + "_" + noise + "_" + counter_index_str + ".png")
+
+                    current_img = Image.open(img_path)
+                    img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+
+                    for id_block, block in enumerate(img_blocks):
+
+                        ###########################
+                        # Metric computation part #
+                        ###########################
+
+                        data = get_svd_data(data_type, block)
+
+                        ##################
+                        # Data mode part #
+                        ##################
+
+                        # modify data depending mode
+                        if mode == 'svdne':
+
+                            # getting max and min information from min_max_filename
+                            with open(data_min_max_filename, 'r') as f:
+                                min_val = float(f.readline())
+                                max_val = float(f.readline())
+
+                            data = utils.normalize_arr_with_range(data, min_val, max_val)
+
+                        if mode == 'svdn':
+                            data = utils.normalize_arr(data)
+
+                        # save min and max found from dataset in order to normalize data using whole data known
+                        if mode == 'svd':
+
+                            current_min = data.min()
+                            current_max = data.max()
+
+                            if current_min < min_val_found:
+                                min_val_found = current_min
+
+                            if current_max > max_val_found:
+                                max_val_found = current_max
+
+                        # now write data into current writer
+                        current_file = svd_output_files[id_block]
+
+                        # add of index
+                        current_file.write(counter_index_str + ';')
+
+                        for val in data:
+                            current_file.write(str(val) + ";")
+
+                        current_file.write('\n')
+
+                if color:
+                    print(data_type + "_" + noise + "_color_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((counter_index) / (end_counter_index)* 100.) + "%")
+                else:
+                    print(data_type + "_" + noise + "_"+ mode + "_" + folder_scene + " - " + "{0:.2f}".format((counter_index) / (end_counter_index)* 100.) + "%")
+
+                sys.stdout.write("\033[F")
+
+                counter_index += 1
+
+            for f in svd_output_files:
+                f.close()
+
+            if color:
+                print(data_type + "_" + noise + "_color_" + mode + "_" + folder_scene + " - " + "Done...")
+            else:
+                print(data_type + "_" + noise + "_"+ mode + "_" + folder_scene + " - " + "Done...")
+
+
+    # save current information about min file found
+    if mode == 'svd':
+        with open(data_min_max_filename, 'w') as f:
+            f.write(str(min_val_found) + '\n')
+            f.write(str(max_val_found) + '\n')
+
+    print("%s : end of data generation\n" % mode)
+
+
+def main():
+
+    # default value of p_step
+    p_step = 10
+    p_color = 0
+
+    if len(sys.argv) <= 1:
+        print('Run with default parameters...')
+        print('python generate_all_data.py --metric all --color 0')
+        print('python generate_all_data.py --metric lab --color 0')
+        print('python generate_all_data.py --metric lab --color 1 --step 10')
+        sys.exit(2)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "hm:s:c", ["help=", "metric=", "step=", "color="])
+    except getopt.GetoptError:
+        # print help information and exit:
+        print('python generate_all_data.py --metric all --color 1 --step 10')
+        sys.exit(2)
+    for o, a in opts:
+        if o == "-h":
+            print('python generate_all_data.py --metric all --color 1 --step 10')
+            sys.exit()
+        elif o in ("-s", "--step"):
+            p_step = int(a)
+        elif o in ("-c", "--color"):
+            p_color = int(a)
+        elif o in ("-m", "--metric"):
+            p_metric = a
+
+            if p_metric != 'all' and p_metric not in metric_choices:
+                assert False, "Invalid metric choice"
+        else:
+            assert False, "unhandled option"
+
+    global picture_step
+    picture_step = p_step
+
+    if picture_step % 10 != 0:
+        assert False, "Picture step variable needs to be divided by ten"
+
+    # generate all or specific metric data
+    if p_metric == 'all':
+        for m in metric_choices:
+            generate_data_svd(m, p_color, 'svd')
+            generate_data_svd(m, p_color, 'svdn')
+            generate_data_svd(m, p_color, 'svdne')
+    else:
+        generate_data_svd(p_metric, p_color, 'svd')
+        generate_data_svd(p_metric, p_color, 'svdn')
+        generate_data_svd(p_metric, p_color, 'svdne')
+
+if __name__== "__main__":
+    main()

+ 23 - 0
generate_all_noise.sh

@@ -0,0 +1,23 @@
+
+for file in "images"/*; do
+
+    IFS='/' # space is set as delimiter
+    read -ra ADDR <<< "$file" # str is read into an array as tokens separated by IFS
+    IFS=' '
+
+    image=${ADDR[1]%".png"}
+
+
+    for noise in {"cauchy","gaussian","laplace","log_normal","mut_white","white","salt_pepper"}; do
+
+        for identical in {"0","1"}; do
+
+            if [ ${identical} == "1" ]; then
+                python noise_computation.py --noise ${noise} --image ${file} --n 1000 --identical ${identical} --output ${image}_${noise}.png --step 10 --all 1 &
+            else
+                python noise_computation.py --noise ${noise} --image ${file} --n 1000 --identical ${identical} --output ${image}_${noise}_color.png --step 10 --all 1 &
+            fi
+
+        done
+    done
+done

+ 306 - 0
generate_data_model_random.py

@@ -0,0 +1,306 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Sep 14 21:02:42 2018
+
+@author: jbuisine
+"""
+
+from __future__ import print_function
+import sys, os, getopt
+import numpy as np
+import random
+import time
+import json
+
+from PIL import Image
+from ipfml import processing, metrics, utils
+
+from modules.utils import config as cfg
+from modules.utils import data as dt
+
+# getting configuration information
+zone_folder             = cfg.zone_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes values
+all_scenes_list         = cfg.scenes_names
+all_scenes_indices      = cfg.scenes_indices
+
+choices                 = cfg.normalization_choices
+path                    = cfg.dataset_path
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+metric_choices          = cfg.metric_choices_labels
+output_data_folder      = cfg.output_data_folder
+custom_min_max_folder   = cfg.min_max_custom_folder
+min_max_ext             = cfg.min_max_filename_extension
+
+calibration_folder      = 'calibration'
+generic_output_file_svd = '_random.csv'
+
+min_value_interval = sys.maxsize
+max_value_interval = 0
+
+def construct_new_line(path_seuil, interval, line, norm):
+    begin, end = interval
+
+    line_data = line.split(';')
+    seuil = line_data[0]
+    metrics = line_data[begin+1:end+1]
+
+    metrics = [float(m) for m in metrics]
+
+    # TODO : check if it's always necessary to do that (loss of information for svd)
+    if norm:
+        metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+
+    with open(path_seuil, "r") as seuil_file:
+        seuil_learned = int(seuil_file.readline().strip())
+
+    if seuil_learned > int(seuil):
+        line = '1'
+    else:
+        line = '0'
+
+    for idx, val in enumerate(metrics):
+        line += ';'
+        line += str(val)
+    line += '\n'
+
+    return line
+
+def get_min_max_value_interval(_scenes_list, _filename, _interval, _choice, _color, _metric):
+
+    global min_value_interval, max_value_interval
+
+    scenes = os.listdir(path)
+
+    # remove min max file from scenes folder
+    scenes = [s for s in scenes if min_max_filename not in s]
+
+    # remove calibration mire from images
+    scenes = [s for s in scenes if calibration_folder not in s]
+
+    for id_scene, folder_scene in enumerate(scenes):
+
+        # only take care of synthesis scenes
+        if folder_scene in _scenes_list:
+
+            scene_path = os.path.join(path, folder_scene)
+
+            zones_folder = []
+            # create zones list
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+                zones_folder.append("zone"+index_str)
+
+            # shuffle list of zones (=> randomly choose zones)
+            random.shuffle(zones_folder)
+
+            for id_zone, zone_folder in enumerate(zones_folder):
+                zone_path = os.path.join(scene_path, zone_folder)
+
+                if _color:
+                    data_filename = _metric + "_color_" + _choice + generic_output_file_svd
+                else:
+                    data_filename = _metric + "_" + _choice + generic_output_file_svd
+
+                data_file_path = os.path.join(zone_path, data_filename)
+
+                # getting number of line and read randomly lines
+                f = open(data_file_path)
+                lines = f.readlines()
+
+                counter = 0
+                # check if user select current scene and zone to be part of training data set
+                for line in lines:
+
+                    begin, end = _interval
+
+                    line_data = line.split(';')
+                    metrics = line_data[begin+1:end+1]
+                    metrics = [float(m) for m in metrics]
+
+                    min_value = min(metrics)
+                    max_value = max(metrics)
+
+                    if min_value < min_value_interval:
+                        min_value_interval = min_value
+
+                    if max_value > max_value_interval:
+                        max_value_interval = max_value
+
+                    counter += 1
+
+
+def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes = scenes_list, _nb_zones = 4, _percent = 1,  _random=0, _step=40, _color=False, _norm = False):
+
+    output_train_filename = _filename + ".train"
+    output_test_filename = _filename + ".test"
+
+    if not '/' in output_train_filename:
+        raise Exception("Please select filename with directory path to save data. Example : data/dataset")
+
+    # create path if not exists
+    if not os.path.exists(output_data_folder):
+        os.makedirs(output_data_folder)
+
+    scenes = os.listdir(path)
+
+    # remove min max file from scenes folder
+    scenes = [s for s in scenes if min_max_filename not in s]
+
+    train_file_data = []
+    test_file_data  = []
+
+    for id_scene, folder_scene in enumerate(scenes):
+
+        # only take care of maxwell scenes
+        if folder_scene in _scenes_list:
+
+            scene_path = os.path.join(path, folder_scene)
+
+            zones_folder = []
+            # create zones list
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+                zones_folder.append("zone"+index_str)
+
+            # shuffle list of zones (=> randomly choose zones)
+            if _random:
+                random.shuffle(zones_folder)
+
+            path_seuil = os.path.join(scene_path, seuil_expe_filename)
+
+            for id_zone, zone_folder in enumerate(zones_folder):
+                zone_path = os.path.join(scene_path, zone_folder)
+
+                if _color:
+                    data_filename = _metric + "_color_" + _choice + generic_output_file_svd
+                else:
+                    data_filename = _metric + "_" + _choice + generic_output_file_svd
+
+                data_file_path = os.path.join(zone_path, data_filename)
+
+                # getting number of line and read randomly lines
+                f = open(data_file_path)
+                lines = f.readlines()
+
+                num_lines = len(lines)
+
+                if _random:
+                    random.shuffle(lines_indexes)
+
+                counter = 0
+                # check if user select current scene and zone to be part of training data set
+                for data in lines:
+
+                    percent = counter / num_lines
+                    image_index = int(data.split(';')[0])
+
+                    if image_index % _step == 0:
+                        line = construct_new_line(path_seuil, _interval, data, _choice, _norm, _sep, _index)
+
+                        if id_zone < _nb_zones and folder_scene in _scenes and percent <= _percent:
+                            train_file_data.append(line)
+                        else:
+                            test_file_data.append(line)
+
+                    counter += 1
+
+                f.close()
+
+
+    train_file = open(output_train_filename, 'w')
+    test_file = open(output_test_filename, 'w')
+
+    for line in train_file_data:
+        train_file.write(line)
+
+    for line in test_file_data:
+        test_file_data.write(line)
+
+    train_file.close()
+    test_file.close()
+
+
+def main():
+
+    p_custom = False
+
+    if len(sys.argv) <= 1:
+        print('Run with default parameters...')
+        print('python generate_data_model_random.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --percent 0.7 --random 0 --step 40 --color 0 --custom min_max_filename')
+        sys.exit(2)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "ho:i:k:s:n:p:r:s:c:c", ["help=", "output=", "interval=", "kind=", "metric=","scenes=", "nb_zones=", "percent=", "random=", "step=", "color=", "custom="])
+    except getopt.GetoptError:
+        # print help information and exit:
+        print('python generate_data_model_random.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --percent 0.7 --random 0 --step 40 --color 0 --custom min_max_filename')
+        sys.exit(2)
+    for o, a in opts:
+        if o == "-h":
+            print('python generate_data_model_random.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --percent 0.7 --random 0 --step 40 --color 0 --custom min_max_filename')
+            sys.exit()
+        elif o in ("-o", "--output"):
+            p_filename = a
+        elif o in ("-i", "--interval"):
+            p_interval = list(map(int, a.split(',')))
+        elif o in ("-k", "--kind"):
+            p_kind = a
+        elif o in ("-m", "--metric"):
+            p_metric = a
+        elif o in ("-s", "--scenes"):
+            p_scenes = a.split(',')
+        elif o in ("-n", "--nb_zones"):
+            p_nb_zones = int(a)
+        elif o in ("-p", "--percent"):
+            p_percent = float(a)
+        elif o in ("-r", "--random"):
+            p_random = int(a)
+        elif o in ("-p", "--percent"):
+            p_step = int(a)
+        elif o in ("-c", "--color"):
+            p_color = int(a)
+        elif o in ("-c", "--custom"):
+            p_custom = a
+        else:
+            assert False, "unhandled option"
+
+    # list all possibles choices of renderer
+    scenes_list = dt.get_renderer_scenes_names(p_renderer)
+    scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
+
+    # getting scenes from indexes user selection
+    scenes_selected = []
+
+    for scene_id in p_scenes:
+        index = scenes_indices.index(scene_id.strip())
+        scenes_selected.append(scenes_list[index])
+
+    # find min max value if necessary to renormalize data
+    if p_custom:
+        get_min_max_value_interval(scenes_list, p_filename, p_interval, p_kind, p_color, p_metric)
+
+        # write new file to save
+        if not os.path.exists(custom_min_max_folder):
+            os.makedirs(custom_min_max_folder)
+
+        min_max_folder_path = os.path.join(os.path.dirname(__file__), custom_min_max_folder)
+        min_max_filename_path = os.path.join(min_max_folder_path, p_custom)
+
+        with open(min_max_filename_path, 'w') as f:
+            f.write(str(min_value_interval) + '\n')
+            f.write(str(max_value_interval) + '\n')
+
+    # create database using img folder (generate first time only)
+    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_color, p_custom)
+
+if __name__== "__main__":
+    main()

+ 54 - 0
generate_noise_all_curves.sh

@@ -0,0 +1,54 @@
+
+for file in "images"/*; do
+
+    IFS='/' # space is set as delimiter
+    read -ra ADDR <<< "$file" # str is read into an array as tokens separated by IFS
+    IFS=' '
+
+    image=${ADDR[1]%".png"}
+
+    echo $image
+    if [ "$image" != "calibration" ] || [ "$image" != *"min_max_values"* ]; then
+
+        #for metric in {"lab","mscn_revisited","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
+        for metric in {"lab","low_bits_5","low_bits_4_shifted_2"}; do
+            for noise in {"cauchy","gaussian","laplace","log_normal","mut_white","white","salt_pepper"}; do
+                for mode in {"svdn","svdne"}; do
+                    for error in {"MAE","MSE"}; do
+
+                        filename_prefix="${image}_${noise}_1_to_1000_B30_E800_${metric}_S30_norm"
+                        filename_suffix="_${mode}_${error}"
+
+                        if [ ! -f "curves_pictures/${filename_prefix}0${filename_suffix}.png" ]; then
+
+                            python noise_svd_tend_visualization.py  --prefix generated/${image}/${noise} --metric ${metric} --n 1000 --mode ${mode} --interval "30, 800" --step 30 --norm 0 --ylim "0, 0.05" --error ${error}
+                        else
+                            echo "Already generated.."
+                        fi
+
+                        if [ ! -f "curves_pictures/${filename_prefix}1${filename_suffix}.png" ]; then
+                            python noise_svd_tend_visualization.py  --prefix generated/${image}/${noise} --metric ${metric} --n 1000 --mode ${mode} --interval "30, 800" --step 30 --norm 1 --ylim "0, 0.1" --error ${error}
+                        else
+                            echo "Already generated.."
+                        fi
+
+
+                        if [ ! -f "curves_pictures/${filename_prefix}0${filename_suffix}_color.png" ]; then
+                            python noise_svd_tend_visualization.py  --prefix generated/${image}/${noise} --metric ${metric} --n 1000 --mode ${mode} --interval "30, 800" --step 30 --norm 0 --color 1 --ylim "0, 0.05" --error ${error}
+                        else
+                            echo "Already generated.."
+                        fi
+
+                        if [ ! -f "curves_pictures/${filename_prefix}1${filename_suffix}_color.png" ]; then
+                            python noise_svd_tend_visualization.py  --prefix generated/${image}/${noise} --metric ${metric} --n 1000 --mode ${mode} --interval "30, 800" --step 30 --norm 1 --color 1 --ylim "0, 0.1" --error ${error}
+
+                        else
+                            echo "Already generated.."
+
+                        fi
+                    done
+                done
+            done
+    done
+    fi
+done

+ 14 - 0
generated/calibration/threshold_data_mean.csv

@@ -0,0 +1,14 @@
+white;27.4;0
+gaussian;14.4;0
+salt_pepper;8.4;0
+cauchy;36.2;0
+laplace;10.2;0
+log_normal;9.6;0
+mut_white;25.9;0
+white;41.5;1
+gaussian;22.5;1
+salt_pepper;7.6;1
+cauchy;62.4;1
+laplace;12.5;1
+log_normal;12.0;1
+mut_white;36.1;1

+ 14 - 0
generated/calibration/threshold_data_median.csv

@@ -0,0 +1,14 @@
+white;19.5;0
+gaussian;1.5;0
+salt_pepper;0.0;0
+cauchy;21.0;0
+laplace;0.0;0
+log_normal;0.0;0
+mut_white;15.0;0
+white;39.0;1
+gaussian;16.5;1
+salt_pepper;3.0;1
+cauchy;78.0;1
+laplace;6.0;1
+log_normal;0.0;1
+mut_white;33.0;1

+ 30 - 0
generated/calibration/threshold_info.txt

@@ -0,0 +1,30 @@
+Calibration scene information
+
+  1 |  2 |  3 |  4
+ ------------------
+  5 |  6 |  7 |  8
+ ------------------
+  9 | 10 | 11 | 12
+ ------------------
+ 13 | 14 | 15 | 16
+ ------------------
+
+Bruits "monochromes"
+1. blanc
+2. gaussien
+3. poivreselA
+4. poivreselB
+5. cauchylorentz
+6. laplacien
+7. lognormal
+8. multiplicatif
+
+Bruits "colorés"
+9. blanc
+10. gaussien
+11. poivreselA
+12. poivreselB
+13. cauchylorentz
+14. laplacien
+15. lognormal
+16. multiplicatif

BIN
image_test.png


BIN
images/SdB2.png


BIN
images/SdB2_D.png


BIN
images/appartAopt.png


BIN
images/bureau1.png


BIN
images/cendrierIUT2.png


BIN
images/cuisine01.png


BIN
images/echecs.png


BIN
images/pnd.png


BIN
images/selles_envir.png


+ 41 - 2
modules/utils/config.py

@@ -1,3 +1,42 @@
+import numpy as np
 
-image_kinds     = ['RGB', 'Grey']
-noise_labels    = ['cauchy', 'gaussian', 'laplace', 'log_normal', 'mut_white', 'salt_papper', 'white']
+zone_folder                     = "zone"
+output_data_folder              = 'data'
+threshold_map_folder            = 'threshold_map'
+models_information_folder       = 'models_info'
+saved_models_folder             = 'saved_models'
+min_max_custom_folder           = 'custom_norm'
+generated_folder                = 'generated'
+pictures_output_folder          = 'curves_pictures'
+
+csv_model_comparisons_filename  = "models_comparisons.csv"
+seuil_expe_filename             = 'seuilExpe'
+min_max_filename_extension      = "_min_max_values"
+config_filename                 = "config"
+filename_ext                    = 'png'
+default_number_of_images        = 1000
+
+models_names_list               = ["svm_model","ensemble_model","ensemble_model_v2"]
+
+# define all scenes values
+renderer_choices                = ['maxwell', 'igloo', 'cycle']
+
+scenes_names                    = ['Appart1opt02', 'Bureau1', 'Cendrier', 'Cuisine01', 'EchecsBas', 'PNDVuePlongeante', 'SdbCentre', 'SdbDroite', 'Selles']
+scenes_indices                  = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
+
+maxwell_scenes_names            = ['Appart1opt02', 'Cuisine01', 'SdbCentre', 'SdbDroite']
+maxwell_scenes_indices          = ['A', 'D', 'G', 'H']
+
+igloo_scenes_names              = ['Bureau1', 'PNDVuePlongeante']
+igloo_scenes_indices            = ['B', 'F']
+
+cycle_scenes_names              = ['EchecBas', 'Selles']
+cycle_scenes_indices            = ['E', 'I']
+
+normalization_choices           = ['svd', 'svdn', 'svdne']
+zones_indices                   = np.arange(16)
+
+metric_choices_labels           = ['lab', 'low_bits_2', 'low_bits_3', 'low_bits_4', 'low_bits_5', 'low_bits_6','low_bits_4_shifted_2']
+
+# noise information
+noise_labels                    = ['cauchy', 'gaussian', 'laplace', 'log_normal', 'mut_white', 'salt_pepper', 'white']

+ 104 - 0
modules/utils/data_type.py

@@ -0,0 +1,104 @@
+from ipfml import processing, metrics
+from modules.utils.config import *
+from PIL import Image
+from skimage import color
+
+import random
+import numpy as np
+
+_scenes_names_prefix   = '_scenes_names'
+_scenes_indices_prefix = '_scenes_indices'
+
+# store all variables from current module context
+context_vars = vars()
+
+def get_svd_data(data_type, block):
+    """
+    Method which returns the data type expected
+    """
+
+    if data_type == 'lab':
+
+        nb = random.randint(0, 10)
+
+        block_file_path = '/tmp/' + str(nb) + '_lab_img.png'
+        block.save(block_file_path)
+        data = processing.get_LAB_L_SVD_s(Image.open(block_file_path))
+
+    if data_type == 'mscn_revisited':
+
+        nb = random.randint(0, 10)
+
+        img_mscn_revisited = processing.rgb_to_mscn(block)
+
+        # save tmp as img
+        img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
+        mscn_revisited_file_path = '/tmp/' + str(nb) + '_mscn_revisited_img.png'
+        img_output.save(mscn_revisited_file_path)
+        img_block = Image.open(mscn_revisited_file_path)
+
+        # extract from temp image
+        data = metrics.get_SVD_s(img_block)
+
+    if data_type == 'mscn':
+
+        img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
+        img_mscn = processing.calculate_mscn_coefficients(img_gray, 7)
+        img_mscn_norm = processing.normalize_2D_arr(img_mscn)
+
+        img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
+
+        data = metrics.get_SVD_s(img_mscn_gray)
+
+    if data_type == 'low_bits_6':
+
+        low_bits_6 = processing.rgb_to_LAB_L_low_bits(block, 6)
+        data = metrics.get_SVD_s(low_bits_6)
+
+    if data_type == 'low_bits_5':
+
+        low_bits_5 = processing.rgb_to_LAB_L_low_bits(block, 5)
+        data = metrics.get_SVD_s(low_bits_5)
+
+    if data_type == 'low_bits_4':
+
+        low_bits_4 = processing.rgb_to_LAB_L_low_bits(block, 4)
+        data = metrics.get_SVD_s(low_bits_4)
+
+    if data_type == 'low_bits_3':
+
+        low_bits_3 = processing.rgb_to_LAB_L_low_bits(block, 3)
+        data = metrics.get_SVD_s(low_bits_3)
+
+    if data_type == 'low_bits_2':
+
+        low_bits_2 = processing.rgb_to_LAB_L_low_bits(block, 2)
+        data = metrics.get_SVD_s(low_bits_2)
+
+    if data_type == 'low_bits_4_shifted_2':
+
+        data = metrics.get_SVD_s(processing.rgb_to_LAB_L_bits(block, (3, 6)))
+
+    return data
+
+def get_renderer_scenes_indices(renderer_name):
+
+    if renderer_name not in renderer_choices:
+        raise ValueError("Unknown renderer name")
+
+    if renderer_name == 'all':
+        return scenes_indices
+    else:
+        return context_vars[renderer_name + _scenes_indices_prefix]
+
+def get_renderer_scenes_names(renderer_name):
+
+    if renderer_name not in renderer_choices:
+        raise ValueError("Unknown renderer name")
+
+    if renderer_name == 'all':
+        return scenes_names
+    else:
+        return context_vars[renderer_name + _scenes_names_prefix]
+
+

+ 26 - 15
noise_computation.py

@@ -6,16 +6,14 @@ from modules.utils import config as cfg
 from modules import noise
 
 noise_list       = cfg.noise_labels
-filename_ext     = 'png'
-
-generated_folder = 'generated'
+generated_folder = cfg.generated_folder
+filename_ext     = cfg.filename_ext
 
 def generate_noisy_image(p_image, p_n, p_noise, p_identical, p_output, p_param):
 
-    noisy_image = noise.get_noise_result(p_image, p_n, _noise_choice=p_noise, _identical=p_identical, _p=p_param)
-    noisy_image = Image.fromarray(noisy_image)
+    image_folder = p_image.filename.split('/')[-1].replace('.' + filename_ext, '')
 
-    output_path = os.path.join(generated_folder, p_noise)
+    output_path = os.path.join(os.path.join(generated_folder, image_folder), p_noise)
 
     if not os.path.exists(output_path):
         os.makedirs(output_path)
@@ -25,35 +23,44 @@ def generate_noisy_image(p_image, p_n, p_noise, p_identical, p_output, p_param):
     if not filename_ext in output_image_path:
         output_image_path = output_image_path + filename_ext
 
-    noisy_image.save(output_image_path)
+    if not os.path.exists(output_image_path):
+
+        noisy_image = noise.get_noise_result(p_image, p_n, _noise_choice=p_noise, _identical=p_identical, _p=p_param)
+        noisy_image = Image.fromarray(noisy_image)
+
+        noisy_image.save(output_image_path)
+
+        print("Image saved at... '%s'" % output_image_path)
+    else:
+        print("Image already exists... '%s'" % output_image_path)
 
-    print("Image saved at... '%s'" % output_image_path)
 
 
 def main():
 
     # by default..
+    p_step = 1
     p_param = None
     p_all = False
 
     if len(sys.argv) < 1:
-        print('python noise_computation.py --noise xxxx --image path/to/image.png --n 100 --identical 0 --output image_name --all 1 --p 0.1')
+        print('python noise_computation.py --noise xxxx --image path/to/image.png --n 100 --identical 0 --output image_name --step 10 --all 1 --p 0.1')
         sys.exit(2)
     try:
-        opts, args = getopt.getopt(sys.argv[1:], "h:n:i:n:i:o:a:p", ["help=", "noise=", "image=", "n=", "identical=", "output=", "all=", "p="])
+        opts, args = getopt.getopt(sys.argv[1:], "h:n:i:n:i:o:a:p", ["help=", "noise=", "image=", "n=", "identical=", "output=", "step=", "all=", "p="])
     except getopt.GetoptError:
         # print help information and exit:
-        print('python noise_computation.py --noise xxxx --image path/to/image.png --n 100 --identical 0 --output image_name --all 1 --p 0.1')
+        print('python noise_computation.py --noise xxxx --image path/to/image.png --n 100 --identical 0 --output image_name --step 10 --all 1 --p 0.1')
         sys.exit(2)
     for o, a in opts:
         if o == "-h":
-            print('python noise_computation.py --noise xxxx --image path/to/image.png --n 100 --identical 0 --output image_name --all 1 --p 0.1')
+            print('python noise_computation.py --noise xxxx --image path/to/image.png --n 100 --identical 0 --output image_name --step 10 --all 1 --p 0.1')
             sys.exit()
         elif o in ("-n", "--noise"):
             p_noise = a
 
             if not p_noise in noise_list:
-                assert False, "Unknow noise parameter %s " % (noise_list)
+                assert False, "Unknow noise parameter %s, %s " % (p_noise, noise_list)
 
         elif o in ("-i", "--image"):
             p_image_path = a
@@ -62,6 +69,8 @@ def main():
 
         elif o in ("-i", "--identical"):
             p_identical = int(a)
+        elif o in ("-s", "--step"):
+            p_step = int(a)
         elif o in ("-o", "--output"):
             p_output = a
         elif o in ("-a", "--all"):
@@ -78,9 +87,11 @@ def main():
         split_output = p_output.split('.')
 
         for i in range(1, p_n):
-            p_filename = split_output[0] + "_" + str(i) + "." + filename_ext
 
-            generate_noisy_image(img, i, p_noise, p_identical, p_filename, p_param)
+            if i % p_step == 0:
+                p_filename = split_output[0] + "_" + str(i) + "." + filename_ext
+
+                generate_noisy_image(img, i, p_noise, p_identical, p_filename, p_param)
 
     else:
         generate_noisy_image(img, p_n, p_noise, p_identical, p_output, p_param)

+ 224 - 0
noise_svd_tend_visualization.py

@@ -0,0 +1,224 @@
+import sys, os, getopt
+from PIL import Image
+
+from ipfml import processing, utils
+import ipfml.iqa.fr as fr_iqa
+
+from modules.utils import config as cfg
+from modules.utils import data_type as dt
+from modules import noise
+import numpy as np
+
+import matplotlib.pyplot as plt
+plt.style.use('ggplot')
+
+noise_list            = cfg.noise_labels
+generated_folder      = cfg.generated_folder
+filename_ext          = cfg.filename_ext
+metric_choices        = cfg.metric_choices_labels
+normalization_choices = cfg.normalization_choices
+pictures_folder       = cfg.pictures_output_folder
+
+step_picture          = 10
+
+error_data_choices    = ['mae', 'mse', 'ssim', 'psnr']
+
+
+def get_error_distance(p_error, y_true, y_test):
+
+    noise_method = None
+    function_name = p_error
+
+    try:
+        error_method = getattr(fr_iqa, function_name)
+    except AttributeError:
+        raise NotImplementedError("Error method `{}` not implement `{}`".format(fr_iqa.__name__, function_name))
+
+    return error_method(y_true, y_test)
+
+
+def main():
+
+    # default values
+    p_step = 1
+    p_color = 0
+    p_norm = 0
+    p_ylim = (0, 1)
+
+    max_value_svd = 0
+    min_value_svd = sys.maxsize
+
+    if len(sys.argv) <= 1:
+        print('python noise_svd_tend_visualization.py --prefix generated/prefix/noise --metric lab --mode svdn --n 300 --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1" --error mae')
+        sys.exit(2)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "h:p:m:m:n:i:s:c:n:y:e", ["help=", "prefix=", "metric=", "mode=", "n=", "interval=", "step=", "color=", "norm=", "ylim=", "error="])
+    except getopt.GetoptError:
+        # print help information and exit:
+        print('python noise_svd_tend_visualization.py --prefix generated/prefix/noise --metric lab --mode svdn --n 300 --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1" --error mae')
+        sys.exit(2)
+    for o, a in opts:
+        if o == "-h":
+            print('python noise_svd_tend_visualization.py --prefix generated/prefix/noise --metric lab --mode svdn --n 300 --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1" --error MAE')
+            sys.exit()
+        elif o in ("-p", "--prefix"):
+            p_path = a
+        elif o in ("-m", "--mode"):
+            p_mode = a
+
+            if not p_mode in normalization_choices:
+                assert False, "Unknown normalization choice, %s" % normalization_choices
+
+        elif o in ("-m", "--metric"):
+            p_metric = a
+
+            if not p_metric in metric_choices:
+                assert False, "Unknown metric choice, %s" % metric_choices
+
+        elif o in ("-n", "--n"):
+            p_n = int(a)
+        elif o in ("-n", "--norm"):
+            p_norm = int(a)
+        elif o in ("-c", "--color"):
+            p_color = int(a)
+        elif o in ("-i", "--interval"):
+            p_interval = list(map(int, a.split(',')))
+        elif o in ("-s", "--step"):
+            p_step = int(a)
+        elif o in ("-y", "--ylim"):
+            p_ylim = list(map(float, a.split(',')))
+        elif o in ("-e", "--error"):
+            p_error = a
+
+            if p_error not in error_data_choices:
+                assert False, "Unknow error choice to display %s" % error_data_choices
+        else:
+            assert False, "unhandled option"
+
+
+    p_prefix = p_path.split('/')[1].replace('_', '')
+    noise_name = p_path.split('/')[2]
+
+    if p_color:
+        file_path = os.path.join(p_path, p_prefix + "_" + noise_name + "_color_{}." + filename_ext)
+    else:
+        file_path = os.path.join(p_path, p_prefix + "_" + noise_name + "_{}." + filename_ext)
+
+    begin, end = p_interval
+    all_svd_data = []
+
+    svd_data = []
+    image_indices = []
+
+    noise_indices = range(1, p_n)[::-1]
+
+    # get all data from images
+    for i in noise_indices:
+
+        if i % step_picture == 0:
+
+            image_path = file_path.format(str(i))
+
+            img = Image.open(image_path)
+
+            svd_values = dt.get_svd_data(p_metric, img)
+
+            if p_norm:
+                svd_values = svd_values[begin:end]
+
+            all_svd_data.append(svd_values)
+
+            # update min max values
+            min_value = svd_values.min()
+            max_value = svd_values.max()
+
+            if min_value < min_value_svd:
+                min_value_svd = min_value
+
+            if max_value > max_value_svd:
+                max_value_svd = max_value
+
+        print('%.2f%%' % ((p_n - i + 1) / p_n * 100))
+        sys.stdout.write("\033[F")
+
+    previous_data = []
+    error_data = [0.]
+
+    for id, data in enumerate(all_svd_data):
+
+        current_id = (p_n - ((id + 1) * 10))
+
+        if current_id % p_step == 0:
+
+            current_data = data
+
+            if p_mode == 'svdn':
+                current_data = utils.normalize_arr(current_data)
+
+            if p_mode == 'svdne':
+                current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)
+
+            svd_data.append(current_data)
+            image_indices.append(current_id)
+
+            # use of whole image data for computation of ssim or psnr
+            if p_error == 'ssim' or p_error == 'psnr':
+                image_path = file_path.format(str(current_id))
+                current_data = np.asarray(Image.open(image_path))
+
+            if len(previous_data) > 0:
+
+                current_error = get_error_distance(p_error, previous_data, current_data)
+                error_data.append(current_error)
+
+            if len(previous_data) == 0:
+                previous_data = current_data
+
+    # display all data using matplotlib (configure plt)
+    gridsize = (3, 2)
+
+    # fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(30, 22))
+    fig = plt.figure(figsize=(30, 22))
+    ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
+    ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)
+
+    ax1.set_title(p_prefix  + ', ' + noise_name + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + p_metric + ' metric, step ' + str(p_step) + ' normalization ' + p_mode)
+    ax1.set_label('Importance of noise [1, 999]')
+    ax1.set_xlabel('Vector features')
+
+    for id, data in enumerate(svd_data):
+
+        p_label = p_prefix + str(image_indices[id]) + " | " + p_error + ": " + str(error_data[id])
+        ax1.plot(data, label=p_label)
+
+    ax1.legend(bbox_to_anchor=(0.8, 1), loc=2, borderaxespad=0.2, fontsize=12)
+
+    if not p_norm:
+        ax1.set_xlim(begin, end)
+
+    # adapt ylim
+    y_begin, y_end = p_ylim
+    ax1.set_ylim(y_begin, y_end)
+
+    output_filename = p_prefix + "_" + noise_name + "_1_to_" + str(p_n) + "_B" + str(begin) + "_E" + str(end) + "_" + p_metric + "_S" + str(p_step) + "_norm" + str(p_norm )+  "_" + p_mode + "_" + p_error
+
+    if p_color:
+        output_filename = output_filename + '_color'
+
+    ax2.set_title(p_error + " information for : " + p_prefix  + ', ' + noise_name + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + p_metric + ' metric, step ' + str(p_step) + ', normalization ' + p_mode)
+    ax2.set_ylabel(p_error + ' error')
+    ax2.set_xlabel('Number of samples per pixels')
+    ax2.set_xticks(range(len(image_indices)))
+    ax2.set_xticklabels(image_indices)
+    ax2.plot(error_data)
+
+    print("Generation of output figure... %s" % output_filename)
+    output_path = os.path.join(pictures_folder, output_filename)
+
+    if not os.path.exists(pictures_folder):
+        os.makedirs(pictures_folder)
+
+    fig.savefig(output_path, dpi=(200))
+
+if __name__== "__main__":
+    main()

+ 223 - 0
noise_svd_threshold.py

@@ -0,0 +1,223 @@
+import sys, os, getopt
+from PIL import Image
+
+from ipfml import processing, utils
+
+from modules.utils import config as cfg
+from modules.utils import data_type as dt
+from modules import noise
+
+import matplotlib.pyplot as plt
+
+noise_list            = cfg.noise_labels
+generated_folder      = cfg.generated_folder
+filename_ext          = cfg.filename_ext
+metric_choices        = cfg.metric_choices_labels
+normalization_choices = cfg.normalization_choices
+pictures_folder       = cfg.pictures_output_folder
+
+step_picture          = 10
+
+class ThresholdData():
+    """
+    A simple class to store threshold data
+    """
+
+    def __init__(self, noise, threshold, color):
+        self.noise = noise
+        self.threshold = threshold
+        self.color = color
+
+    def get_noise(self):
+        return self.noise
+
+    def get_threshold(self):
+        return self.threshold
+
+    def isColor(self):
+        return self.color
+
+
+def main():
+
+    # default values
+    p_step = 1
+    p_color = 0
+    p_norm = 0
+    p_ylim = (0, 1)
+    p_n = 1000
+
+    if len(sys.argv) <= 1:
+        print('python noise_svd_threshold.py --prefix generated/scene --file threshold_file --metric lab --mode svdn --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1"')
+        sys.exit(2)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "h:p:f:m:m:i:s:c:n:y", ["help=", "prefix=", "file=", "metric=", "mode=", "interval=", "step=", "color=", "norm=", "ylim="])
+    except getopt.GetoptError:
+        # print help information and exit:
+        print('python noise_svd_threshold.py --prefix generated/scene --file threshold_file --metric lab --mode svdn --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1"')
+        sys.exit(2)
+    for o, a in opts:
+        if o == "-h":
+            print('python noise_svd_threshold.py --prefix generated/scene --file threshold_file --metric lab --mode svdn --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1"')
+            sys.exit()
+        elif o in ("-p", "--prefix"):
+            p_path = a
+        elif o in ("-f", "--file"):
+            p_data_file = a
+
+        elif o in ("-m", "--mode"):
+            p_mode = a
+
+            if not p_mode in normalization_choices:
+                assert False, "Unknown normalization choice, %s" % normalization_choices
+
+        elif o in ("-m", "--metric"):
+            p_metric = a
+
+            if not p_metric in metric_choices:
+                assert False, "Unknown metric choice, %s" % metric_choices
+
+        elif o in ("-n", "--norm"):
+            p_norm = int(a)
+        elif o in ("-c", "--color"):
+            p_color = int(a)
+        elif o in ("-i", "--interval"):
+            p_interval = list(map(int, a.split(',')))
+        elif o in ("-s", "--step"):
+            p_step = int(a)
+        elif o in ("-y", "--ylim"):
+            p_ylim = list(map(float, a.split(',')))
+        else:
+            assert False, "unhandled option"
+
+
+    p_prefix = p_path.split('/')[1].replace('_', '')
+
+    if p_color:
+        file_path = p_path + "{}/" + p_prefix + "_{}_color_{}." + filename_ext
+    else:
+        file_path = p_path + "{}/" + p_prefix + "_{}_{}." + filename_ext
+
+    begin, end = p_interval
+
+    svd_data = []
+    final_svd_data = []
+    image_indices = []
+    min_max_list = {}
+
+    threshold_data = []
+
+    # read data threshold file
+    with open(p_data_file, 'r') as f:
+        lines = f.readlines()
+
+        for line in lines:
+            data = line.replace('\n', '').split(';')
+            print(data)
+
+            threshold = ThresholdData(data[0], float(data[1]), int(data[2]))
+            threshold_data.append(threshold)
+
+    # filter data if color or not
+    threshold_data = [t for t in threshold_data if t.isColor() == p_color]
+
+    for id, threshold in enumerate(threshold_data):
+
+        current_noise = threshold.get_noise()
+        current_threshold = threshold.get_threshold()
+
+        min_max_list[current_noise] = (sys.maxsize, 0)
+        threshold_found = False
+
+        # get all data from images
+        for i in range(1, p_n):
+
+            if i % step_picture == 0:
+                image_path = file_path.format(current_noise, current_noise, str(i))
+                img = Image.open(image_path)
+
+                svd_values = dt.get_svd_data(p_metric, img)
+
+                if p_norm:
+                    svd_values = svd_values[begin:end]
+
+                # only append data once
+                if not threshold_found and current_threshold < i:
+                    svd_data.append(svd_values)
+                    image_indices.append(i)
+
+                if current_threshold < i:
+                    threshold_found = True
+
+                # update min max values
+                min_value = svd_values.min()
+                max_value = svd_values.max()
+
+                # update of min max values for noise
+                current_min, current_max = min_max_list[current_noise]
+
+                if min_value < current_min:
+                    current_min = min_value
+
+                if max_value > current_max:
+                    current_max = max_value
+
+                min_max_list[current_noise] = (current_min, current_max)
+
+            print('%.2f%%' % (((i + 1) * 100 + (id * p_n * 100)) / (p_n * len(threshold_data))))
+            sys.stdout.write("\033[F")
+
+    for id, data in enumerate(svd_data):
+
+        current_data = data
+
+        threshold = threshold_data[id]
+        min_value_svd, max_value_svd = min_max_list[threshold.get_noise()]
+
+        if p_mode == 'svdn':
+            current_data = utils.normalize_arr(current_data)
+
+        if p_mode == 'svdne':
+            current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)
+
+        final_svd_data.append(current_data)
+
+    # display all data using matplotlib (configure plt)
+
+    plt.rcParams['figure.figsize'] = (25, 18)
+
+    plt.title(p_prefix  + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + p_metric + ' metric, step ' + str(p_step) + ' normalization ' + p_mode, fontsize=20)
+    plt.ylabel('Importance of noise [1, 999]', fontsize=14)
+    plt.xlabel('Vector features', fontsize=16)
+
+    for id, data in enumerate(final_svd_data):
+
+        p_label = p_prefix + '_' + threshold_data[id].get_noise() + str(image_indices[id])
+        plt.plot(data, label=p_label)
+
+    plt.legend(bbox_to_anchor=(0.8, 1), loc=2, borderaxespad=0.2, fontsize=14)
+
+    if not p_norm:
+        plt.xlim(begin, end)
+
+    # adapt ylim
+    y_begin, y_end = p_ylim
+    plt.ylim(y_begin, y_end)
+
+    output_filename = p_prefix + "_threshold_1_to_" + str(p_n) + "_B" + str(begin) + "_E" + str(end) + "_" + p_metric + "_S" + str(p_step) + "_norm" + str(p_norm )+  "_" + p_mode
+
+    if p_color:
+        output_filename = output_filename + '_color'
+
+    print("Generation of output figure... %s" % output_filename)
+    output_path = os.path.join(pictures_folder, output_filename)
+
+    if not os.path.exists(pictures_folder):
+        os.makedirs(pictures_folder)
+
+    plt.savefig(output_path, dpi=(200))
+
+
+
+if __name__== "__main__":
+    main()

+ 169 - 0
noise_svd_visualization.py

@@ -0,0 +1,169 @@
+import sys, os, getopt
+from PIL import Image
+
+from ipfml import processing, utils
+
+from modules.utils import config as cfg
+from modules.utils import data_type as dt
+from modules import noise
+
+import matplotlib.pyplot as plt
+
+noise_list            = cfg.noise_labels
+generated_folder      = cfg.generated_folder
+filename_ext          = cfg.filename_ext
+metric_choices        = cfg.metric_choices_labels
+normalization_choices = cfg.normalization_choices
+pictures_folder       = cfg.pictures_output_folder
+
+step_picture          = 10
+
+def main():
+
+    # default values
+    p_step = 1
+    p_color = 0
+    p_norm = 0
+    p_ylim = (0, 1)
+
+    max_value_svd = 0
+    min_value_svd = sys.maxsize
+
+    if len(sys.argv) <= 1:
+        print('python noise_svd_visualization.py --prefix generated/prefix/noise --metric lab --mode svdn --n 300 --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1"')
+        sys.exit(2)
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "h:p:m:m:n:i:s:c:n:y", ["help=", "prefix=", "metric=", "mode=", "n=", "interval=", "step=", "color=", "norm=", "ylim="])
+    except getopt.GetoptError:
+        # print help information and exit:
+        print('python noise_svd_visualization.py --prefix generated/prefix/noise --metric lab --mode svdn --n 300 --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1"')
+        sys.exit(2)
+    for o, a in opts:
+        if o == "-h":
+            print('python noise_svd_visualization.py --prefix generated/prefix/noise --metric lab --mode svdn --n 300 --interval "0, 200" --step 30 --color 1 --norm 1 --ylim "0, 1"')
+            sys.exit()
+        elif o in ("-p", "--prefix"):
+            p_path = a
+        elif o in ("-m", "--mode"):
+            p_mode = a
+
+            if not p_mode in normalization_choices:
+                assert False, "Unknown normalization choice, %s" % normalization_choices
+
+        elif o in ("-m", "--metric"):
+            p_metric = a
+
+            if not p_metric in metric_choices:
+                assert False, "Unknown metric choice, %s" % metric_choices
+
+        elif o in ("-n", "--n"):
+            p_n = int(a)
+        elif o in ("-n", "--norm"):
+            p_norm = int(a)
+        elif o in ("-c", "--color"):
+            p_color = int(a)
+        elif o in ("-i", "--interval"):
+            p_interval = list(map(int, a.split(',')))
+        elif o in ("-s", "--step"):
+            p_step = int(a)
+        elif o in ("-y", "--ylim"):
+            p_ylim = list(map(float, a.split(',')))
+        else:
+            assert False, "unhandled option"
+
+
+    p_prefix = p_path.split('/')[1].replace('_', '')
+    noise_name = p_path.split('/')[2]
+
+    if p_color:
+        file_path = p_path + "/" + p_prefix + "_" + noise_name + "_color_{}." + filename_ext
+    else:
+        file_path = p_path + "/" + p_prefix + "_" + noise_name + "_{}." + filename_ext
+
+    begin, end = p_interval
+    all_svd_data = []
+
+    svd_data = []
+    image_indices = []
+
+    # get all data from images
+    for i in range(1, p_n):
+
+        if i % step_picture == 0:
+
+            image_path = file_path.format(str(i))
+            img = Image.open(image_path)
+
+            svd_values = dt.get_svd_data(p_metric, img)
+
+            if p_norm:
+                svd_values = svd_values[begin:end]
+
+            all_svd_data.append(svd_values)
+
+            # update min max values
+            min_value = svd_values.min()
+            max_value = svd_values.max()
+
+            if min_value < min_value_svd:
+                min_value_svd = min_value
+
+            if max_value > max_value_svd:
+                max_value_svd = max_value
+
+            print('%.2f%%' % ((i + 1) / p_n * 100))
+            sys.stdout.write("\033[F")
+
+    for id, data in enumerate(all_svd_data):
+
+        if (id * step_picture) % p_step == 0:
+
+            current_data = data
+            if p_mode == 'svdn':
+                current_data = utils.normalize_arr(current_data)
+
+            if p_mode == 'svdne':
+                current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)
+
+            svd_data.append(current_data)
+            image_indices.append(str(id * step_picture))
+
+    # display all data using matplotlib (configure plt)
+
+    plt.rcParams['figure.figsize'] = (25, 18)
+
+    plt.title(p_prefix  + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + p_metric + ' metric, step ' + str(p_step) + ' normalization ' + p_mode, fontsize=20)
+    plt.ylabel('Importance of noise [1, 999]', fontsize=14)
+    plt.xlabel('Vector features', fontsize=16)
+
+    for id, data in enumerate(svd_data):
+
+        p_label = p_prefix + str(image_indices[id])
+        plt.plot(data, label=p_label)
+
+    plt.legend(bbox_to_anchor=(0.8, 1), loc=2, borderaxespad=0.2, fontsize=14)
+
+    if not p_norm:
+        plt.xlim(begin, end)
+
+    # adapt ylim
+    y_begin, y_end = p_ylim
+    plt.ylim(y_begin, y_end)
+
+    output_filename = p_prefix + "_" + noise_name + "_1_to_" + str(p_n) + "_B" + str(begin) + "_E" + str(end) + "_" + p_metric + "_S" + str(p_step) + "_norm" + str(p_norm )+  "_" + p_mode
+
+    if p_color:
+        output_filename = output_filename + '_color'
+
+    print("Generation of output figure... %s" % output_filename)
+    output_path = os.path.join(pictures_folder, output_filename)
+
+    if not os.path.exists(pictures_folder):
+        os.makedirs(pictures_folder)
+
+    plt.savefig(output_path, dpi=(200))
+
+
+
+if __name__== "__main__":
+    main()