Parcourir la source

Merge branch 'release/v0.4.4'

Jérôme BUISINE il y a 3 ans
Parent
commit
7eb23e0c2b

Fichier diff supprimé car celui-ci est trop grand
+ 110 - 14
analysis/data_local_mean.ipynb


+ 9 - 6
cnn_models.py

@@ -14,7 +14,7 @@ import tensorflow as tf
 sys.path.insert(0, '') # trick to enable import of main folder module
 
 import custom_config as cfg
-from models import metrics
+#from models import metrics
 
 
 def generate_model_2D(_input_shape, _weights_file=None):
@@ -69,7 +69,8 @@ def generate_model_2D(_input_shape, _weights_file=None):
 
     model.compile(loss='categorical_crossentropy',
                   optimizer='adam',
-                  metrics=['accuracy', metrics.auc])
+                  #metrics=['accuracy', metrics.auc])
+                  metrics=['accuracy'])
 
     return model
 
@@ -119,7 +120,7 @@ def generate_model_3D(_input_shape, _weights_file=None):
     model.add(BatchNormalization())
     model.add(Dropout(0.5))
 
-    model.add(Dense(1))
+    model.add(Dense(2))
     model.add(Activation('sigmoid'))
 
     # reload weights if exists
@@ -128,7 +129,8 @@ def generate_model_3D(_input_shape, _weights_file=None):
 
     model.compile(loss='categorical_crossentropy',
                   optimizer='rmsprop',
-                  metrics=['accuracy', metrics.auc])
+                  #metrics=['accuracy', metrics.auc])
+                  metrics=['accuracy'])
 
     return model
 
@@ -203,7 +205,8 @@ def generate_model_3D_TL(_input_shape, _weights_file=None):
 
     model_final.compile(loss='binary_crossentropy',
                   optimizer='rmsprop',
-                  metrics=['accuracy', metrics.auc])
+                #   metrics=['accuracy', metrics.auc])
+                  metrics=['accuracy'])
 
     return model_final
 
@@ -219,5 +222,5 @@ def get_model(n_channels, _input_shape, _tl=False, _weights_file=None):
     if n_channels == 1:
         return generate_model_2D(_input_shape, _weights_file)
 
-    if n_channels == 3:
+    if n_channels >= 2:
         return generate_model_3D(_input_shape, _weights_file)

+ 12 - 2
custom_config.py

@@ -1,5 +1,7 @@
 from modules.config.cnn_config import *
 
+import os
+
 # store all variables from cnn config
 context_vars = vars()
 
@@ -7,13 +9,21 @@ context_vars = vars()
 
 # folders
 
+output_data_folder              = 'data'
+output_data_generated           = os.path.join(output_data_folder, 'generated')
+output_datasets                 = os.path.join(output_data_folder, 'datasets')
+output_zones_learned            = os.path.join(output_data_folder, 'learned_zones')
+output_models                   = os.path.join(output_data_folder, 'saved_models')
+output_results_folder           = os.path.join(output_data_folder, 'results')
+
 ## noisy_folder                    = 'noisy'
 ## not_noisy_folder                = 'notNoisy'
-backup_model_folder             = 'models_backup'
+backup_model_folder             = os.path.join(output_data_folder, 'models_backup')
 
 # file or extensions
 
 perf_prediction_model_path      = 'predictions_models_results.csv'
+results_filename                = 'results.csv'
 ## post_image_name_separator       = '___'
 
 # variables
@@ -29,4 +39,4 @@ keras_epochs                    = 30
 ## keras_batch                     = 32
 ## val_dataset_size                = 0.2
 
-keras_img_size                  = (100, 100)
+keras_img_size                  = (200, 200)

+ 246 - 0
generate/generate_dataset_file.py

@@ -0,0 +1,246 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jun 19 11:47:42 2019
+
+@author: jbuisine
+"""
+
+# main imports
+import sys, os, argparse
+import numpy as np
+import random
+
+# images processing imports
+from PIL import Image
+from ipfml.processing.segmentation import divide_in_blocks
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config  as cfg
+from modules.utils import data as dt
+from modules.classes.Transformation import Transformation
+
+# getting configuration information
+zone_folder             = cfg.zone_folder
+learned_folder          = cfg.learned_zones_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes values
+scenes_list             = cfg.scenes_names
+scenes_indices          = cfg.scenes_indices
+dataset_path            = cfg.dataset_path
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+features_choices        = cfg.features_choices_labels
+output_data_folder      = cfg.output_datasets
+
+generic_output_file_svd = '_random.csv'
+
+def generate_data_model(_filename, _transformations, _dataset_folder, _selected_zones):
+
+    output_train_filename = os.path.join(output_data_folder, _filename, _filename + ".train")
+    output_test_filename = os.path.join(output_data_folder, _filename, _filename + ".val")
+
+    # create path if not exists
+    if not os.path.exists(os.path.join(output_data_folder, _filename)):
+        os.makedirs(os.path.join(output_data_folder, _filename))
+
+    train_file_data = []
+    test_file_data  = []
+
+    # specific number of zones (zones indices)
+    zones = np.arange(16)
+
+    # go ahead each scenes
+    for folder_scene in _selected_zones:
+
+        scene_path = os.path.join(_dataset_folder, folder_scene)
+
+        train_zones = _selected_zones[folder_scene]
+
+        for id_zone, index_folder in enumerate(zones):
+
+            index_str = str(index_folder)
+            if len(index_str) < 2:
+                index_str = "0" + index_str
+            
+            current_zone_folder = "zone" + index_str
+            zone_path = os.path.join(scene_path, current_zone_folder)
+
+            # custom path for interval of reconstruction and metric
+
+            features_path = []
+
+            for transformation in _transformations:
+                
+                # check if it's a static content and create augmented images if necessary
+                if transformation.getName() == 'static':
+                    
+                    # {sceneName}/zoneXX/static
+                    static_metric_path = os.path.join(zone_path, transformation.getName())
+
+                    # img.png
+                    image_name = transformation.getParam().split('/')[-1]
+
+                    # {sceneName}/zoneXX/static/img
+                    image_prefix_name = image_name.replace('.png', '')
+                    image_folder_path = os.path.join(static_metric_path, image_prefix_name)
+                    
+                    if not os.path.exists(image_folder_path):
+                        os.makedirs(image_folder_path)
+
+                    features_path.append(image_folder_path)
+
+                    # get image path to manage
+                    # {sceneName}/static/img.png
+                    transform_image_path = os.path.join(scene_path, transformation.getName(), image_name) 
+                    static_transform_image = Image.open(transform_image_path)
+
+                    static_transform_image_block = divide_in_blocks(static_transform_image, cfg.sub_image_size)[id_zone]
+
+                    dt.augmented_data_image(static_transform_image_block, image_folder_path, image_prefix_name)
+
+                else:
+                    metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
+                    features_path.append(metric_interval_path)
+
+            # as labels are same for each metric
+            for label in os.listdir(features_path[0]):
+
+                label_features_path = []
+
+                for path in features_path:
+                    label_path = os.path.join(path, label)
+                    label_features_path.append(label_path)
+
+                # getting images list for each metric
+                features_images_list = []
+                    
+                for index_metric, label_path in enumerate(label_features_path):
+
+                    if _transformations[index_metric].getName() == 'static':
+                        # by default append nothing..
+                        features_images_list.append([])
+                    else:
+                        images = sorted(os.listdir(label_path))
+                        features_images_list.append(images)
+
+                # construct each line using all images path of each
+                for index_image in range(0, len(features_images_list[0])):
+                    
+                    images_path = []
+
+                    # get information about rotation and flip from first transformation (need to be a not static transformation)
+                    current_post_fix =  features_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
+
+                    # getting images with same index and hence name for each metric (transformation)
+                    for index_metric in range(0, len(features_path)):
+
+                        # custom behavior for static transformation (need to check specific image)
+                        if _transformations[index_metric].getName() == 'static':
+                            # add static path with selecting correct data augmented image
+                            image_name = _transformations[index_metric].getParam().split('/')[-1].replace('.png', '')
+                            img_path = os.path.join(features_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)
+                            images_path.append(img_path)
+                        else:
+                            img_path = features_images_list[index_metric][index_image]
+                            images_path.append(os.path.join(label_features_path[index_metric], img_path))
+
+                    if label == cfg.noisy_folder:
+                        line = '1;'
+                    else:
+                        line = '0;'
+
+                    # compute line information with all images paths
+                    for id_path, img_path in enumerate(images_path):
+                        if id_path < len(images_path) - 1:
+                            line = line + img_path + '::'
+                        else:
+                            line = line + img_path
+                    
+                    line = line + '\n'
+
+                    if id_zone in train_zones:
+                        train_file_data.append(line)
+                    else:
+                        test_file_data.append(line)
+
+    train_file = open(output_train_filename, 'w')
+    test_file = open(output_test_filename, 'w')
+
+    random.shuffle(train_file_data)
+    random.shuffle(test_file_data)
+
+    for line in train_file_data:
+        train_file.write(line)
+
+    for line in test_file_data:
+        test_file.write(line)
+
+    train_file.close()
+    test_file.close()
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Compute specific dataset for model using of metric")
+
+    parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
+    parser.add_argument('--features', type=str,
+                                     help="list of features choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--folder', type=str,
+                        help='folder where generated data are available',
+                        required=True)  
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--size', type=str, 
+                                  help="Size of input images",
+                                  default="100, 100")
+    parser.add_argument('--selected_zones', type=str, help='file which contains all selected zones of scene', required=True)    
+
+    args = parser.parse_args()
+
+    p_filename   = args.output
+    p_folder     = args.folder
+    p_features   = list(map(str.strip, args.features.split(',')))
+    p_params     = list(map(str.strip, args.params.split('::')))
+    p_size       = args.size # not necessary to split here
+    p_selected_zones = args.selected_zones
+
+    selected_zones = {}
+    with(open(p_selected_zones, 'r')) as f:
+
+        for line in f.readlines():
+
+            data = line.split(';')
+            del data[-1]
+            scene_name = data[0]
+            thresholds = data[1:]
+
+            selected_zones[scene_name] = [ int(t) for t in thresholds ]
+
+    # create list of Transformation
+    transformations = []
+
+    for id, feature in enumerate(p_features):
+
+        if feature not in features_choices:
+            raise ValueError("Unknown metric, please select a correct metric : ", features_choices)
+
+        transformations.append(Transformation(feature, p_params[id], p_size))
+
+    if transformations[0].getName() == 'static':
+        raise ValueError("The first transformation in list cannot be static")
+
+
+    # create database using img folder (generate first time only)
+    generate_data_model(p_filename, transformations, p_folder, selected_zones)
+
+if __name__== "__main__":
+    main()

+ 265 - 0
generate/generate_dataset_sequence_file.py

@@ -0,0 +1,265 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jun 19 11:47:42 2019
+
+@author: jbuisine
+"""
+
+# main imports
+import sys, os, argparse
+import numpy as np
+import random
+
+# images processing imports
+from PIL import Image
+from ipfml.processing.segmentation import divide_in_blocks
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config  as cfg
+from modules.utils import data as dt
+from modules.classes.Transformation import Transformation
+
+# getting configuration information
+zone_folder             = cfg.zone_folder
+learned_folder          = cfg.learned_zones_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes values
+scenes_list             = cfg.scenes_names
+scenes_indices          = cfg.scenes_indices
+dataset_path            = cfg.dataset_path
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+features_choices        = cfg.features_choices_labels
+output_data_folder      = cfg.output_datasets
+
+generic_output_file_svd = '_random.csv'
+
+def generate_data_model(_filename, _transformations, _dataset_folder, _selected_zones, _sequence):
+
+    output_train_filename = os.path.join(output_data_folder, _filename, _filename + ".train")
+    output_test_filename = os.path.join(output_data_folder, _filename, _filename + ".val")
+
+    # create path if not exists
+    if not os.path.exists(os.path.join(output_data_folder, _filename)):
+        os.makedirs(os.path.join(output_data_folder, _filename))
+
+    train_file_data = []
+    test_file_data  = []
+
+    # specific number of zones (zones indices)
+    zones = np.arange(16)
+
+    # go ahead each scenes
+    for folder_scene in _selected_zones:
+
+        scene_path = os.path.join(_dataset_folder, folder_scene)
+
+        train_zones = _selected_zones[folder_scene]
+
+        for id_zone, index_folder in enumerate(zones):
+
+            index_str = str(index_folder)
+            if len(index_str) < 2:
+                index_str = "0" + index_str
+            
+            current_zone_folder = "zone" + index_str
+            zone_path = os.path.join(scene_path, current_zone_folder)
+
+            # custom path for interval of reconstruction and metric
+
+            features_path = []
+
+            for transformation in _transformations:
+                
+                # check if it's a static content and create augmented images if necessary
+                if transformation.getName() == 'static':
+                    
+                    # {sceneName}/zoneXX/static
+                    static_metric_path = os.path.join(zone_path, transformation.getName())
+
+                    # img.png
+                    image_name = transformation.getParam().split('/')[-1]
+
+                    # {sceneName}/zoneXX/static/img
+                    image_prefix_name = image_name.replace('.png', '')
+                    image_folder_path = os.path.join(static_metric_path, image_prefix_name)
+                    
+                    if not os.path.exists(image_folder_path):
+                        os.makedirs(image_folder_path)
+
+                    features_path.append(image_folder_path)
+
+                    # get image path to manage
+                    # {sceneName}/static/img.png
+                    transform_image_path = os.path.join(scene_path, transformation.getName(), image_name) 
+                    static_transform_image = Image.open(transform_image_path)
+
+                    static_transform_image_block = divide_in_blocks(static_transform_image, cfg.sub_image_size)[id_zone]
+
+                    dt.augmented_data_image(static_transform_image_block, image_folder_path, image_prefix_name)
+
+                else:
+                    metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
+                    features_path.append(metric_interval_path)
+
+            # as labels are same for each metric
+            for label in os.listdir(features_path[0]):
+
+                label_features_path = []
+
+                for path in features_path:
+                    label_path = os.path.join(path, label)
+                    label_features_path.append(label_path)
+
+                # getting images list for each metric
+                features_images_list = []
+                    
+                for index_metric, label_path in enumerate(label_features_path):
+
+                    if _transformations[index_metric].getName() == 'static':
+                        # by default append nothing..
+                        features_images_list.append([])
+                    else:
+                        images = sorted(os.listdir(label_path))
+                        features_images_list.append(images)
+
+                sequence_data = []
+
+                # construct each line using all images path of each
+                for index_image in range(0, len(features_images_list[0])):
+                    
+                    images_path = []
+
+                    # get information about rotation and flip from first transformation (need to be a not static transformation)
+                    current_post_fix =  features_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
+
+                    # getting images with same index and hence name for each metric (transformation)
+                    for index_metric in range(0, len(features_path)):
+
+                        # custom behavior for static transformation (need to check specific image)
+                        if _transformations[index_metric].getName() == 'static':
+                            # add static path with selecting correct data augmented image
+                            image_name = _transformations[index_metric].getParam().split('/')[-1].replace('.png', '')
+                            img_path = os.path.join(features_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)
+                            images_path.append(img_path)
+                        else:
+                            img_path = features_images_list[index_metric][index_image]
+                            images_path.append(os.path.join(label_features_path[index_metric], img_path))
+
+                    if label == cfg.noisy_folder:
+                        line = '1;'
+                    else:
+                        line = '0;'
+
+                    # add new data information into sequence
+                    sequence_data.append(images_path)
+
+                    if len(sequence_data) >= _sequence:
+                        
+                        # prepare whole line for LSTM model kind
+                        # keeping last noisy label
+
+                        for id_seq, seq_images_path in enumerate(sequence_data):
+                            # compute line information with all images paths
+                            for id_path, img_path in enumerate(seq_images_path):
+                                if id_path < len(seq_images_path) - 1:
+                                    line = line + img_path + '::'
+                                else:
+                                    line = line + img_path
+
+                            if id_seq < len(sequence_data) - 1:
+                                line += ';'
+                        
+                        line = line + '\n'
+
+                        if id_zone in train_zones:
+                            train_file_data.append(line)
+                        else:
+                            test_file_data.append(line)
+
+                        # remove first element (sliding window)
+                        del sequence_data[0]
+
+    train_file = open(output_train_filename, 'w')
+    test_file = open(output_test_filename, 'w')
+
+    random.shuffle(train_file_data)
+    random.shuffle(test_file_data)
+
+    for line in train_file_data:
+        train_file.write(line)
+
+    for line in test_file_data:
+        test_file.write(line)
+
+    train_file.close()
+    test_file.close()
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Compute specific dataset for model using of metric")
+
+    parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
+    parser.add_argument('--folder', type=str,
+                    help='folder where generated data are available',
+                    required=True) 
+    parser.add_argument('--features', type=str,
+                                     help="list of features choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--sequence', type=int, help='sequence length expected', required=True)
+    parser.add_argument('--size', type=str, 
+                                  help="Size of input images",
+                                  default="100, 100")
+    parser.add_argument('--selected_zones', type=str, help='file which contains all selected zones of scene', required=True)    
+
+    args = parser.parse_args()
+
+    p_filename   = args.output
+    p_folder     = args.folder
+    p_features   = list(map(str.strip, args.features.split(',')))
+    p_params     = list(map(str.strip, args.params.split('::')))
+    p_sequence   = args.sequence
+    p_size       = args.size # not necessary to split here
+    p_selected_zones = args.selected_zones
+
+    selected_zones = {}
+    with(open(p_selected_zones, 'r')) as f:
+
+        for line in f.readlines():
+
+            data = line.split(';')
+            del data[-1]
+            scene_name = data[0]
+            thresholds = data[1:]
+
+            selected_zones[scene_name] = [ int(t) for t in thresholds ]
+
+    # create list of Transformation
+    transformations = []
+
+    for id, feature in enumerate(p_features):
+
+        if feature not in features_choices:
+            raise ValueError("Unknown metric, please select a correct metric : ", features_choices)
+
+        transformations.append(Transformation(feature, p_params[id], p_size))
+
+    if transformations[0].getName() == 'static':
+        raise ValueError("The first transformation in list cannot be static")
+
+
+    # create database using img folder (generate first time only)
+    generate_data_model(p_filename, transformations, p_folder, selected_zones, p_sequence)
+
+if __name__== "__main__":
+    main()

+ 289 - 0
generate/generate_reconstructed_folder.py

@@ -0,0 +1,289 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jun 19 11:47:42 2019
+
+@author: jbuisine
+"""
+
+# main imports
+import sys, os, argparse
+import numpy as np
+
+# images processing imports
+from PIL import Image
+from ipfml.processing.segmentation import divide_in_blocks
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils.data import get_scene_image_quality
+from modules.classes.Transformation import Transformation
+
+# getting configuration information
+zone_folder             = cfg.zone_folder
+
+# define all scenes values
+zones                   = cfg.zones_indices
+features_choices        = cfg.features_choices_labels
+
+'''
+Display progress information as progress bar
+'''
+def write_progress(progress):
+    barWidth = 180
+
+    output_str = "["
+    pos = barWidth * progress
+    for i in range(barWidth):
+        if i < pos:
+           output_str = output_str + "="
+        elif i == pos:
+           output_str = output_str + ">"
+        else:
+            output_str = output_str + " "
+
+    output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
+    print(output_str)
+    sys.stdout.write("\033[F")
+
+
+def generate_data(transformation, _dataset_path, _output, _human_thresholds, _replace):
+    """
+    @brief Method which generates all .csv files from scenes
+    @return nothing
+    """
+
+    # path is the default dataset path
+    scenes = os.listdir(_dataset_path)
+    n_scenes = len(scenes)
+
+    # go ahead each scenes
+    for id_scene, folder_scene in enumerate(scenes):
+
+        print('Scene {0} of {1} ({2})'.format((id_scene + 1), n_scenes, folder_scene))
+        scene_path = os.path.join(_dataset_path, folder_scene)
+        output_scene_path = os.path.join(cfg.output_data_generated, _output, folder_scene)
+
+        # construct each zones folder name
+        zones_folder = []
+        features_folder = []
+
+        if folder_scene in _human_thresholds:
+
+            zones_threshold = _human_thresholds[folder_scene]
+            # get zones list info
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+
+                current_zone = "zone"+index_str
+                zones_folder.append(current_zone)
+                zone_path = os.path.join(output_scene_path, current_zone)
+
+                # custom path for feature
+                feature_path = os.path.join(zone_path, transformation.getName())
+
+                if not os.path.exists(feature_path):
+                    os.makedirs(feature_path)
+
+                # custom path for interval of reconstruction and feature
+                feature_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
+                features_folder.append(feature_interval_path)
+
+                if not os.path.exists(feature_interval_path):
+                    os.makedirs(feature_interval_path)
+
+                # create for each zone the labels folder
+                labels = [cfg.not_noisy_folder, cfg.noisy_folder]
+
+                for label in labels:
+                    label_folder = os.path.join(feature_interval_path, label)
+
+                    if not os.path.exists(label_folder):
+                        os.makedirs(label_folder)
+
+            # get all images of folder
+            scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+            number_scene_image = len(scene_images)
+
+            # for each images
+            for id_img, img_path in enumerate(scene_images):
+
+                current_img = Image.open(img_path)
+                img_blocks = divide_in_blocks(current_img, cfg.sub_image_size)
+
+                current_quality_index = int(get_scene_image_quality(img_path))
+
+                for id_block, block in enumerate(img_blocks):
+
+                    ##########################
+                    # Image computation part #
+                    ##########################
+
+                    label_path = features_folder[id_block]
+
+                    # get label folder for block
+                    if current_quality_index > zones_threshold[id_block]:
+                        label_path = os.path.join(label_path, cfg.not_noisy_folder)
+                    else:
+                        label_path = os.path.join(label_path, cfg.noisy_folder)
+
+                    # check if necessary to compute or not images
+                    # Disable use of data augmentation for the moment
+                    # Data augmentation!
+                    # rotations = [0, 90, 180, 270]
+
+                    #img_flip_labels = ['original', 'horizontal', 'vertical', 'both']
+                    # img_flip_labels = ['original', 'horizontal']
+
+                    # output_images_path = []
+                    # check_path_exists = []
+                    # # rotate and flip image to increase dataset size
+                    # for id, flip_label in enumerate(img_flip_labels):
+                    #     for rotation in rotations:
+                    #         output_reconstructed_filename = img_path.split('/')[-1].replace('.png', '') + '_' + zones_folder[id_block] + cfg.post_image_name_separator
+                    #         output_reconstructed_filename = output_reconstructed_filename + flip_label + '_' + str(rotation) + '.png'
+                    #         output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
+
+                    #         if os.path.exists(output_reconstructed_path):
+                    #             check_path_exists.append(True)
+                    #         else:
+                    #             check_path_exists.append(False)
+
+                    #         output_images_path.append(output_reconstructed_path)
+
+                    # compute only if not exists or necessary to replace
+                    # if _replace or not np.array(check_path_exists).all():
+                        # compute image
+                        # pass block to grey level
+                        # output_block = transformation.getTransformedImage(block)
+                        # output_block = np.array(output_block, 'uint8')
+                        
+                        # # current output image
+                        # output_block_img = Image.fromarray(output_block)
+
+                        #horizontal_img = output_block_img.transpose(Image.FLIP_LEFT_RIGHT)
+                        #vertical_img = output_block_img.transpose(Image.FLIP_TOP_BOTTOM)
+                        #both_img = output_block_img.transpose(Image.TRANSPOSE)
+
+                        #flip_images = [output_block_img, horizontal_img, vertical_img, both_img]
+                        #flip_images = [output_block_img, horizontal_img]
+
+                        # Only current image img currenlty
+                        # flip_images = [output_block_img]
+
+                        # # rotate and flip image to increase dataset size
+                        # counter_index = 0 # get current path index
+                        # for id, flip in enumerate(flip_images):
+                        #     for rotation in rotations:
+
+                        #         if _replace or not check_path_exists[counter_index]:
+                        #             rotated_output_img = flip.rotate(rotation)
+                        #             rotated_output_img.save(output_images_path[counter_index])
+
+                        #         counter_index +=1
+                    
+                    if _replace:
+                        
+                        _, filename = os.path.split(img_path)
+
+                        # build of output image filename
+                        filename = filename.replace('.png', '')
+                        filename_parts = filename.split('_')
+
+                        # get samples : `00XXX`
+                        n_samples = filename_parts[2]
+                        del filename_parts[2]
+
+                        # `p3d_XXXXXX`
+                        output_reconstructed = '_'.join(filename_parts)
+
+                        output_reconstructed_filename = output_reconstructed + '_' + zones_folder[id_block] + '_' + n_samples + '.png'
+                        output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
+
+                        output_block = transformation.getTransformedImage(block)
+                        output_block = np.array(output_block, 'uint8')
+                        
+                        # current output image
+                        output_block_img = Image.fromarray(output_block)
+                        output_block_img.save(output_reconstructed_path)
+
+
+                write_progress((id_img + 1) / number_scene_image)
+
+            print('\n')
+
+    print("{0}_{1} : end of data generation\n".format(transformation.getName(), transformation.getParam()))
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes using specific interval if necessary")
+
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each feature choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--folder', type=str,
+                            help='folder where dataset is available',
+                            required=True)  
+    parser.add_argument('--output', type=str,
+                                help='output folder where data are saved',
+                                required=True)              
+    parser.add_argument('--thresholds', type=str, help='file which cantains all thresholds', required=True)                  
+    parser.add_argument('--size', type=str, 
+                                help="specific size of image", 
+                                default='100, 100',
+                                required=True)
+    parser.add_argument('--replace', type=int, help='replace previous picutre', default=1)
+
+    args = parser.parse_args()
+
+    p_features   = list(map(str.strip, args.features.split(',')))
+    p_params     = list(map(str.strip, args.params.split('::')))
+    p_folder     = args.folder
+    p_output     = args.output
+    p_thresholds = args.thresholds
+    p_size       = args.size
+    p_replace    = bool(args.replace)
+
+    # list of transformations
+    transformations = []
+
+    for id, feature in enumerate(p_features):
+
+        if feature not in features_choices or feature == 'static':
+            raise ValueError("Unknown feature {0}, please select a correct feature (`static` excluded) : {1}".format(feature, features_choices))
+        
+        transformations.append(Transformation(feature, p_params[id], p_size))
+
+    human_thresholds = {}
+
+    # 3. retrieve human_thresholds
+    # construct zones folder
+    with open(p_thresholds) as f:
+        thresholds_line = f.readlines()
+
+        for line in thresholds_line:
+            data = line.split(';')
+            del data[-1] # remove unused last element `\n`
+            current_scene = data[0]
+            thresholds_scene = data[1:]
+
+            if current_scene != '50_shades_of_grey':
+                human_thresholds[current_scene] = [ int(threshold) for threshold in  thresholds_scene ]
+
+
+    # generate all or specific feature data
+    for transformation in transformations:
+        generate_data(transformation, p_folder, p_output, human_thresholds, p_replace)
+
+if __name__== "__main__":
+    main()

+ 104 - 0
generate/generate_selected_zones_file.py

@@ -0,0 +1,104 @@
+# main imports
+import numpy as np
+import pandas as pd
+import sys, os, argparse
+import random
+
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+
+
+def save_learned_zones(output_name, scene, zones):
+
+    if not os.path.exists(cfg.output_zones_learned):
+        os.makedirs(cfg.output_zones_learned)
+
+    with open(os.path.join(cfg.output_zones_learned, output_name), 'a') as f:
+        f.write(scene + ';')
+
+        for zone in zones:
+            f.write(str(zone) + ';')
+
+        f.write('\n')
+
+
+def get_random_zones(scene, zones, n_zones):
+
+    random.shuffle(zones)
+
+    # specific case for 'Cuisine01' (zone 12 is also noisy even in reference image)
+    # if scene == 'Cuisine01':
+    #     while 12 in zones[0:n_zones]:
+    #         random.shuffle(zones)
+    
+    return zones[0:n_zones]
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Read and compute entropy data file (using diff)")
+
+    parser.add_argument('--folder', type=str, help='dataset scene folder', required=True)
+    parser.add_argument('--n_zones', type=int, help='number of zones used in train', default=10)
+    parser.add_argument('--output', type=str, help='file with specific training zone', required=True)
+    parser.add_argument('--thresholds', type=str, help='file with specific thresholds (using only scene from this file', default='')
+
+    args = parser.parse_args()
+
+    p_folder       = args.folder
+    p_n_zones      = args.n_zones
+    p_output       = args.output
+    p_thresholds   = args.thresholds
+
+    # extract scenes to use if specified
+    available_scenes = None
+
+    if len(p_thresholds) > 0:
+        
+        available_scenes = []
+
+        with open(p_thresholds) as f:
+            thresholds_line = f.readlines()
+
+            for line in thresholds_line:
+                data = line.split(';')
+                del data[-1] # remove unused last element `\n`
+                current_scene = data[0]
+
+                # need to rename `current_name` because we only used part6
+                # scene_split = current_scene.split('_')
+                # del scene_split[-1]
+                # scene_name = '_'.join(scene_split)
+
+                available_scenes.append(current_scene)
+
+
+    # specific number of zones (zones indices)
+    zones = np.arange(16)
+
+    # get all scene names
+    scenes = os.listdir(p_folder)
+
+    # create output thresholds directory if necessary
+    folder, _ = os.path.split(p_output)
+
+    if len(folder) > 0:
+        os.makedirs(folder)
+
+    # for each scene we generate random zones choice
+    for folder_scene in scenes:
+
+        if available_scenes is not None:
+
+            if folder_scene in available_scenes:
+                selected_zones = get_random_zones(folder_scene, zones, p_n_zones)
+                save_learned_zones(p_output, folder_scene, selected_zones)
+        else:
+            selected_zones = get_random_zones(folder_scene, zones, p_n_zones)
+            save_learned_zones(p_output, folder_scene, selected_zones)
+            
+
+if __name__== "__main__":
+    main()

+ 1 - 1
modules

@@ -1 +1 @@
-Subproject commit acdc213cdd418eb03833f7f0f0a72466b8812cbf
+Subproject commit 270de3a969ff3121e68f435cc6a3b570ba5b9d69

+ 201 - 0
prediction/estimate_thresholds_cnn.py

@@ -0,0 +1,201 @@
+# main imports
+import numpy as np
+import pandas as pd
+import sys, os, argparse
+
+# image processing
+from PIL import Image
+from ipfml import utils
+from ipfml.processing import transform, segmentation
+
+import matplotlib.pyplot as plt
+
+# model imports
+import joblib
+from keras.models import load_model
+from keras import backend as K
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+import modules.utils.data as dt
+from modules.classes.Transformation import Transformation
+
+def write_progress(progress):
+    barWidth = 180
+
+    output_str = "["
+    pos = barWidth * progress
+    for i in range(barWidth):
+        if i < pos:
+           output_str = output_str + "="
+        elif i == pos:
+           output_str = output_str + ">"
+        else:
+            output_str = output_str + " "
+
+    output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
+    print(output_str)
+    sys.stdout.write("\033[F")
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Read and compute entropy data file")
+
+    parser.add_argument('--model', type=str, help='model .h5 file')
+    parser.add_argument('--folder', type=str,
+                        help='folder where scene dataset is available',
+                        required=True)  
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each feature choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--size', type=str, 
+                                help="specific size of image", 
+                                default='100, 100',
+                                required=True)
+    parser.add_argument('--n_stop', type=int, help='number of detection to make sure to stop', default=1)
+    parser.add_argument('--save', type=str, help='filename where to save input data')
+    parser.add_argument('--label', type=str, help='label to use when saving thresholds')
+
+    args = parser.parse_args()
+
+    p_model    = args.model
+    p_folder   = args.folder
+    p_features = list(map(str.strip, args.features.split(',')))
+    p_params   = list(map(str.strip, args.params.split('::')))
+    p_size     = args.size
+    p_n_stop = args.n_stop
+    p_save     = args.save
+    p_label    = args.label
+
+    # 1. Load expected transformations
+
+    # list of transformations
+    transformations = []
+
+    for id, feature in enumerate(p_features):
+
+        if feature not in cfg.features_choices_labels or feature == 'static':
+            raise ValueError("Unknown feature, please select a correct feature (`static` excluded) : ", cfg.features_choices_labels)
+
+        transformations.append(Transformation(feature, p_params[id], p_size))
+
+    # 2. load model and compile it
+
+    # TODO : check kind of model
+    model = load_model(p_model)
+    # model.compile(loss='binary_crossentropy',
+    #               optimizer='rmsprop',
+    #               metrics=['accuracy'])
+
+
+    estimated_thresholds = []
+    n_estimated_thresholds = []
+
+    scene_path = p_folder
+
+    if not os.path.exists(scene_path):
+        print('Unvalid scene path:', scene_path)
+        exit(0)
+
+    # 3. retrieve human_thresholds
+    # construct zones folder
+    zones_indices = np.arange(16)
+    zones_list = []
+
+    for index in zones_indices:
+
+        index_str = str(index)
+
+        while len(index_str) < 2:
+            index_str = "0" + index_str
+        
+        zones_list.append(cfg.zone_folder + index_str)
+
+
+    # 4. get estimated thresholds using model and specific method
+    images_path = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+    number_of_images = len(images_path)
+    image_indices = [ dt.get_scene_image_quality(img_path) for img_path in images_path ]
+
+    image_counter = 0
+
+    # append empty list
+    for _ in zones_list:
+        estimated_thresholds.append(None)
+        n_estimated_thresholds.append(0)
+
+    for img_i, img_path in enumerate(images_path):
+
+        blocks = segmentation.divide_in_blocks(Image.open(img_path), (200, 200))
+
+        for index, block in enumerate(blocks):
+            
+            if estimated_thresholds[index] is None:
+                
+                transformed_list = []
+                # compute data here
+                for transformation in transformations:
+                    transformed = transformation.getTransformedImage(block)
+                    transformed_list.append(transformed)
+
+                data = np.array(transformed_list)
+                
+                # compute input size
+                n_chanels, _, _ = data.shape
+
+                if K.image_data_format() == 'chanels_first':
+                    if n_chanels > 1:
+                        data = np.expand_dims(data, axis=0)
+
+                else:
+                    if n_chanels > 1:
+                        data = data.transpose()
+                        data = np.expand_dims(data, axis=0)
+                    else:
+                        data = data.transpose()
+                    
+                data = np.expand_dims(data, axis=0)
+
+                probs = model.predict(np.array(data))[0]
+                prediction = list(probs).index(max(probs))
+                #print(index, ':', image_indices[img_i], '=>', prediction)
+               
+                if prediction == 0:
+                    n_estimated_thresholds[index] += 1
+
+                    # if same number of detection is attempted
+                    if n_estimated_thresholds[index] >= p_n_stop:
+                        estimated_thresholds[index] = image_indices[img_i]
+                else:
+                    n_estimated_thresholds[index] = 0
+
+        # write progress bar
+        write_progress((image_counter + 1) / number_of_images)
+        
+        image_counter = image_counter + 1
+    
+    # default label
+    for i, _ in enumerate(zones_list):
+        if estimated_thresholds[i] == None:
+            estimated_thresholds[i] = image_indices[-1]
+
+    # 6. save estimated thresholds into specific file
+    print('\nEstimated thresholds', estimated_thresholds)
+    if p_save is not None:
+        with open(p_save, 'a') as f:
+            f.write(p_label + ';')
+
+            for t in estimated_thresholds:
+                f.write(str(t) + ';')
+            f.write('\n')
+    
+
+if __name__== "__main__":
+    main()

+ 208 - 0
prediction/estimate_thresholds_lstm.py

@@ -0,0 +1,208 @@
+# main imports
+import numpy as np
+import pandas as pd
+import sys, os, argparse
+
+# image processing
+from PIL import Image
+from ipfml import utils
+from ipfml.processing import transform, segmentation
+
+import matplotlib.pyplot as plt
+
+# model imports
+import joblib
+from keras.models import load_model
+from keras import backend as K
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+import modules.utils.data as dt
+from modules.classes.Transformation import Transformation
+
+def write_progress(progress):
+    barWidth = 180
+
+    output_str = "["
+    pos = barWidth * progress
+    for i in range(barWidth):
+        if i < pos:
+           output_str = output_str + "="
+        elif i == pos:
+           output_str = output_str + ">"
+        else:
+            output_str = output_str + " "
+
+    output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
+    print(output_str)
+    sys.stdout.write("\033[F")
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Read and compute entropy data file")
+
+    parser.add_argument('--model', type=str, help='model .h5 file')
+    parser.add_argument('--folder', type=str,
+                        help='folder where scene dataset is available',
+                        required=True)  
+    parser.add_argument('--features', type=str, 
+                                     help="list of features choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each feature choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--size', type=str, 
+                                help="specific size of image", 
+                                default='100, 100',
+                                required=True)
+    parser.add_argument('--sequence', type=int, help='sequence size expected', required=True, default=1)
+    parser.add_argument('--n_stop', type=int, help='number of detection to make sure to stop', default=1)
+    parser.add_argument('--save', type=str, help='filename where to save input data')
+    parser.add_argument('--label', type=str, help='label to use when saving thresholds')
+
+    args = parser.parse_args()
+
+    p_model    = args.model
+    p_folder   = args.folder
+    p_features = list(map(str.strip, args.features.split(',')))
+    p_params   = list(map(str.strip, args.params.split('::')))
+    p_size     = args.size
+    p_sequence = args.sequence
+    p_n_stop   = args.n_stop
+    p_save     = args.save
+    p_label    = args.label
+
+    # 1. Load expected transformations
+
+    # list of transformations
+    transformations = []
+
+    for id, feature in enumerate(p_features):
+
+        if feature not in cfg.features_choices_labels or feature == 'static':
+            raise ValueError("Unknown feature, please select a correct feature (`static` excluded) : ", cfg.features_choices_labels)
+
+        transformations.append(Transformation(feature, p_params[id], p_size))
+
+    # 2. load model and compile it
+
+    # TODO : check kind of model
+    model = joblib.load(p_model)
+    model.compile(loss='binary_crossentropy',
+                  optimizer='rmsprop',
+                  metrics=['accuracy'])
+    # model = load_model(p_model)
+    # model.compile(loss='binary_crossentropy',
+    #               optimizer='rmsprop',
+    #               metrics=['accuracy'])
+
+
+    estimated_thresholds = []
+    n_estimated_thresholds = []
+    sequence_list_zones = []
+
+    scene_path = p_folder
+
+    if not os.path.exists(scene_path):
+        print('Unvalid scene path:', scene_path)
+        exit(0)
+
+    # 3. retrieve human_thresholds
+    # construct zones folder
+    zones_indices = np.arange(16)
+    zones_list = []
+
+    for index in zones_indices:
+
+        index_str = str(index)
+
+        while len(index_str) < 2:
+            index_str = "0" + index_str
+        
+        zones_list.append(cfg.zone_folder + index_str)
+
+
+    # 4. get estimated thresholds using model and specific method
+    images_path = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
+    number_of_images = len(images_path)
+    image_indices = [ dt.get_scene_image_quality(img_path) for img_path in images_path ]
+
+    image_counter = 0
+
+    # append empty list
+    for _ in zones_list:
+        estimated_thresholds.append(None)
+        n_estimated_thresholds.append(0)
+        sequence_list_zones.append([])
+
+    for img_i, img_path in enumerate(images_path):
+
+        blocks = segmentation.divide_in_blocks(Image.open(img_path), (200, 200))
+
+        for index, block in enumerate(blocks):
+            
+            sequence_list = sequence_list_zones[index]
+
+            if estimated_thresholds[index] is None:
+                
+                transformed_list = []
+                # compute data here
+                for transformation in transformations:
+                    transformed = transformation.getTransformedImage(block)
+                    transformed_list.append(transformed)
+
+                data = np.array(transformed_list)
+
+                sequence_list.append(data)
+                
+                if len(sequence_list) >= p_sequence:
+                    # compute input size
+                    # n_chanels, _, _ = data.shape
+
+                    input_data = np.array(sequence_list)
+                        
+                    input_data = np.expand_dims(input_data, axis=0)
+
+                    prob = model.predict(np.array(input_data))[0]
+                    #print(index, ':', image_indices[img_i], '=>', prediction)
+                
+                    # if prob is now near to label `0` then image is not longer noisy
+                    if prob < 0.5:
+                        n_estimated_thresholds[index] += 1
+
+                        # if same number of detection is attempted
+                        if n_estimated_thresholds[index] >= p_n_stop:
+                            estimated_thresholds[index] = image_indices[img_i]
+                    else:
+                        n_estimated_thresholds[index] = 0
+
+                    # remove first image
+                    del sequence_list[0]
+
+        # write progress bar
+        write_progress((image_counter + 1) / number_of_images)
+        
+        image_counter = image_counter + 1
+    
+    # default label
+    for i, _ in enumerate(zones_list):
+        if estimated_thresholds[i] == None:
+            estimated_thresholds[i] = image_indices[-1]
+
+    # 6. save estimated thresholds into specific file
+    print('\nEstimated thresholds', estimated_thresholds)
+    if p_save is not None:
+        with open(p_save, 'a') as f:
+            f.write(p_label + ';')
+
+            for t in estimated_thresholds:
+                f.write(str(t) + ';')
+            f.write('\n')
+    
+
+if __name__== "__main__":
+    main()

+ 45 - 21
run/cross_run_nl_mean.sh

@@ -6,41 +6,65 @@ all_scenes="A,B,C,D,E,F,G,H,I"
 # file which contains model names we want to use for simulation
 file_path="results/models_comparisons.csv"
 stride=1
-dist_patch=6
+window=6
 
 # for kernel in {3,5,7}; do
 #     echo python generate/generate_reconstructed_data.py --features ${metric} --params ${kernel},${dist_patch} --size 100,100 --scenes ${all_scenes} --replace 0
 # done
 
-for scene in {"A","B","D","G","H","I"}; do
+# for scene in {"A","B","D","G","H","I"}; do
 
-    # remove current scene test from dataset
-    s="${scenes//,${scene}}"
-    s="${s//${scene},}"
+#     # remove current scene test from dataset
+#     s="${scenes//,${scene}}"
+#     s="${s//${scene},}"
 
-    for zone in {10,11,12}; do
-        for kernel in {3,5,7}; do
-            for balancing in {0,1}; do
+#     for zone in {10,11,12}; do
+#         for kernel in {3,5,7}; do
+#             for balancing in {0,1}; do
             
-                OUTPUT_DATA_FILE="${metric}_nb_zones_${zone}_W${window}_K${kernel}_balancing${balancing}_without_${scene}"
-                OUTPUT_DATA_FILE_TEST="${metric}_nb_zones_${zone}_W${window}_K${kernel}_balancing${balancing}_scene_${scene}"
+#                 OUTPUT_DATA_FILE="${metric}_nb_zones_${zone}_W${window}_K${kernel}_balancing${balancing}_without_${scene}"
+#                 OUTPUT_DATA_FILE_TEST="${metric}_nb_zones_${zone}_W${window}_K${kernel}_balancing${balancing}_scene_${scene}"
 
-                if grep -q "${OUTPUT_DATA_FILE}" "${file_path}"; then
+#                 if grep -q "${OUTPUT_DATA_FILE}" "${file_path}"; then
                 
-                    echo "SVD model ${OUTPUT_DATA_FILE} already generated"
+#                     echo "SVD model ${OUTPUT_DATA_FILE} already generated"
 
-                else
+#                 else
 
-                    #echo "Run computation for SVD model ${OUTPUT_DATA_FILE}"
-                    echo python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE_TEST} --features ${metric} --scenes ${scene} --params ${kernel},${dist_patch} --nb_zones ${zone} --random 1 --size 200,200     
+#                     #echo "Run computation for SVD model ${OUTPUT_DATA_FILE}"
+#                     echo python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE_TEST} --features ${metric} --scenes ${scene} --params ${kernel},${dist_patch} --nb_zones ${zone} --random 1 --size 200,200     
 
-                    echo python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --features ${metric} --scenes ${s} --params ${kernel},${dist_patch} --nb_zones ${zone} --random 1 --size 200,200     
+#                     echo python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --features ${metric} --scenes ${s} --params ${kernel},${dist_patch} --nb_zones ${zone} --random 1 --size 200,200     
                     
-                    echo python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} --balancing ${balancing}
-                    echo python prediction_model.py --data data/${OUTPUT_DATA_FILE_TEST}.train --model saved_models/${OUTPUT_DATA_FILE}.json
-                fi 
-            done
+#                     echo python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} --balancing ${balancing} --chanels 3
+#                     echo python prediction_model.py --data data/${OUTPUT_DATA_FILE_TEST}.train --model saved_models/${OUTPUT_DATA_FILE}.json
+#                 fi 
+#             done
+#         done
+#     done
+# done
+
+
+s="A,D,G,H"
+for zone in {10,11,12}; do
+    for kernel in {3,5,7}; do
+        for balancing in {0,1}; do
+        
+            OUTPUT_DATA_FILE="${metric}_nb_zones_${zone}_W${window}_K${kernel}_balancing${balancing}_maxwell"
+
+            if grep -q "${OUTPUT_DATA_FILE}" "${file_path}"; then
+            
+                echo "SVD model ${OUTPUT_DATA_FILE} already generated"
+
+            else
+
+                #echo "Run computation for SVD model ${OUTPUT_DATA_FILE}"
+
+                echo python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --features ${metric} --scenes ${s} --params ${kernel},${dist_patch} --nb_zones ${zone} --random 1 --size 200,200     
+                
+                echo python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} --balancing ${balancing} --chanels 3
+                echo python prediction_model.py --data data/${OUTPUT_DATA_FILE_TEST}.test --model saved_models/${OUTPUT_DATA_FILE}.json
+            fi 
         done
     done
 done
-

+ 277 - 0
train_lstm_weighted.py

@@ -0,0 +1,277 @@
+# main imports
+import argparse
+import numpy as np
+import pandas as pd
+import os
+import ctypes
+from PIL import Image
+
+from keras import backend as K
+import matplotlib.pyplot as plt
+from ipfml import utils
+
+# dl imports
+from keras.layers import Dense, Dropout, LSTM, Embedding, GRU, BatchNormalization, ConvLSTM2D, Conv3D, Flatten
+from keras.preprocessing.sequence import pad_sequences
+from keras.models import Sequential
+from sklearn.metrics import roc_auc_score, accuracy_score
+import tensorflow as tf
+from keras import backend as K
+import sklearn
+from joblib import dump
+
+import custom_config as cfg
+
+
+def build_input(df, seq_norm):
+    """Convert dataframe to numpy array input with timesteps as float array
+    
+    Arguments:
+        df: {pd.Dataframe} -- Dataframe input
+        seq_norm: {bool} -- normalize or not seq input data by features
+    
+    Returns:
+        {np.ndarray} -- input LSTM data as numpy array
+    """
+
+    arr = []
+
+    # for each input line
+    for row in df.iterrows():
+
+        seq_arr = []
+
+        # for each sequence data input
+        for column in row[1]:
+
+            seq_elems = []
+
+            # for each element in sequence data
+            for img_path in column:
+                img = Image.open(img_path)
+                # seq_elems.append(np.array(img).flatten())
+                seq_elems.append(np.array(img))
+
+            #seq_arr.append(np.array(seq_elems).flatten())
+            seq_arr.append(np.array(seq_elems))
+            
+        arr.append(seq_arr)
+
+    arr = np.array(arr)
+    print(arr.shape)
+
+    # final_arr = []
+    # for v in arr:
+    #     v_data = []
+    #     for vv in v:
+    #         #scaled_vv = np.array(vv, 'float') - np.mean(np.array(vv, 'float'))
+    #         #v_data.append(scaled_vv)
+    #         v_data.append(vv)
+        
+    #     final_arr.append(v_data)
+    
+    final_arr = np.array(arr, 'float32')
+
+    # check if sequence normalization is used
+    if seq_norm:
+
+        if final_arr.ndim > 2:
+            n, s, f = final_arr.shape
+            for index, seq in enumerate(final_arr):
+                
+                for i in range(f):
+                    final_arr[index][:, i] = utils.normalize_arr_with_range(seq[:, i])
+
+            
+
+    return final_arr
+
+def create_model(_input_shape):
+    print ('Creating model...')
+    model = Sequential()
+    
+    # model.add(Conv3D(60, (1, 2, 2), input_shape=input_shape))
+    # model.add(Activation('relu'))
+    # model.add(MaxPooling3D(pool_size=(1, 2, 2)))
+
+    #model.add(Embedding(input_dim = 1000, output_dim = 50, input_length=input_length))
+    # model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), input_shape=input_shape, units=256, activation='sigmoid', recurrent_activation='hard_sigmoid'))
+    # model.add(Dropout(0.4))
+    # model.add(GRU(units=128, activation='sigmoid', recurrent_activation='hard_sigmoid'))
+    # model.add(Dropout(0.4))
+    # model.add(Dense(1, activation='sigmoid'))
+
+    model.add(ConvLSTM2D(filters=100, kernel_size=(3, 3),
+                   input_shape=_input_shape,
+                   padding='same', return_sequences=True))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(ConvLSTM2D(filters=50, kernel_size=(3, 3),
+                    padding='same', return_sequences=True))
+    model.add(BatchNormalization())
+    model.add(Dropout(0.4))
+
+    model.add(Conv3D(filters=20, kernel_size=(3, 3, 3),
+                activation='sigmoid',
+                padding='same', data_format='channels_last'))
+    model.add(Dropout(0.4))
+
+    model.add(Flatten())
+    model.add(Dense(512, activation='sigmoid'))
+    model.add(Dropout(0.4))
+    model.add(Dense(128, activation='sigmoid'))
+    model.add(Dropout(0.4))
+    model.add(Dense(1, activation='sigmoid'))
+    model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])
+
+    print ('Compiling...')
+    # model.compile(loss='binary_crossentropy',
+    #               optimizer='rmsprop',
+    #               metrics=['accuracy'])
+
+    return model
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Read and compute training of LSTM model")
+
+    parser.add_argument('--train', type=str, help='input train dataset')
+    parser.add_argument('--test', type=str, help='input test dataset')
+    parser.add_argument('--output', type=str, help='output model name')
+    parser.add_argument('--seq_norm', type=int, help='normalization sequence by features', choices=[0, 1])
+
+    args = parser.parse_args()
+
+    p_train        = args.train
+    p_test         = args.test
+    p_output       = args.output
+    p_seq_norm     = bool(args.seq_norm)
+
+
+    dataset_train = pd.read_csv(p_train, header=None, sep=';')
+    dataset_test = pd.read_csv(p_test, header=None, sep=';')
+
+    # getting weighted class over the whole dataset
+    noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 1]
+    not_noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 0]
+    nb_noisy_train = len(noisy_df_train.index)
+    nb_not_noisy_train = len(not_noisy_df_train.index)
+
+    noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 1]
+    not_noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 0]
+    nb_noisy_test = len(noisy_df_test.index)
+    nb_not_noisy_test = len(not_noisy_df_test.index)
+
+    noisy_samples = nb_noisy_test + nb_noisy_train
+    not_noisy_samples = nb_not_noisy_test + nb_not_noisy_train
+
+    total_samples = noisy_samples + not_noisy_samples
+
+    print('noisy', noisy_samples)
+    print('not_noisy', not_noisy_samples)
+    print('total', total_samples)
+
+    class_weight = {
+        0: noisy_samples / float(total_samples),
+        1: (not_noisy_samples / float(total_samples)),
+    }
+
+    # shuffle data
+    final_df_train = sklearn.utils.shuffle(dataset_train)
+    final_df_test = sklearn.utils.shuffle(dataset_test)
+
+    # split dataset into X_train, y_train, X_test, y_test
+    X_train = final_df_train.loc[:, 1:].apply(lambda x: x.astype(str).str.split('::'))
+    X_train = build_input(X_train, p_seq_norm)
+    y_train = final_df_train.loc[:, 0].astype('int')
+
+    X_test = final_df_test.loc[:, 1:].apply(lambda x: x.astype(str).str.split('::'))
+    X_test = build_input(X_test, p_seq_norm)
+    y_test = final_df_test.loc[:, 0].astype('int')
+
+    X_all = np.concatenate([X_train, X_test])
+    y_all = np.concatenate([y_train, y_test])
+
+    input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3], X_train.shape[4])
+    print('Training data input shape', input_shape)
+    model = create_model(input_shape)
+    model.summary()
+
+    print("Fitting model with custom class_weight", class_weight)
+    history = model.fit(X_train, y_train, batch_size=16, epochs=3, validation_split = 0.30, verbose=1, shuffle=True, class_weight=class_weight)
+
+    # list all data in history
+    # print(history.history.keys())
+    # # summarize history for accuracy
+    # plt.plot(history.history['accuracy'])
+    # plt.plot(history.history['val_accuracy'])
+    # plt.title('model accuracy')
+    # plt.ylabel('accuracy')
+    # plt.xlabel('epoch')
+    # plt.legend(['train', 'test'], loc='upper left')
+    # plt.show()
+    # # summarize history for loss
+    # plt.plot(history.history['loss'])
+    # plt.plot(history.history['val_loss'])
+    # plt.title('model loss')
+    # plt.ylabel('loss')
+    # plt.xlabel('epoch')
+    # plt.legend(['train', 'test'], loc='upper left')
+    # plt.show()
+
+    # train_score, train_acc = model.evaluate(X_train, y_train, batch_size=1)
+
+    # print(train_acc)
+    y_train_predict = model.predict_classes(X_train)
+    y_test_predict = model.predict_classes(X_test)
+    y_all_predict = model.predict_classes(X_all)
+
+    print(y_train_predict)
+    print(y_test_predict)
+
+    auc_train = roc_auc_score(y_train, y_train_predict)
+    auc_test = roc_auc_score(y_test, y_test_predict)
+    auc_all = roc_auc_score(y_all, y_all_predict)
+
+    acc_train = accuracy_score(y_train, y_train_predict)
+    acc_test = accuracy_score(y_test, y_test_predict)
+    acc_all = accuracy_score(y_all, y_all_predict)
+    
+    print('Train ACC:', acc_train)
+    print('Train AUC', auc_train)
+    print('Test ACC:', acc_test)
+    print('Test AUC:', auc_test)
+    print('All ACC:', acc_all)
+    print('All AUC:', auc_all)
+
+
+    # save model results
+    if not os.path.exists(cfg.output_results_folder):
+        os.makedirs(cfg.output_results_folder)
+
+    results_filename = os.path.join(cfg.output_results_folder, cfg.results_filename)
+
+    with open(results_filename, 'a') as f:
+        f.write(p_output + ';' + str(acc_train) + ';' + str(auc_train) + ';' + str(acc_test) + ';' + str(auc_test) + '\n')
+
+    # save acc metric information
+    plt.plot(history.history['accuracy'])
+    plt.plot(history.history['val_accuracy'])
+    plt.title('model accuracy')
+    plt.ylabel('accuracy')
+    plt.xlabel('epoch')
+    plt.legend(['train', 'test'], loc='upper left')
+
+    model_history = os.path.join(cfg.output_results_folder, p_output + '.png')
+    plt.savefig(model_history)
+
+    # save model using joblib
+    if not os.path.exists(cfg.output_models):
+        os.makedirs(cfg.output_models)
+
+    dump(model, os.path.join(cfg.output_models, p_output + '.joblib'))
+
+if __name__ == "__main__":
+    main()

+ 16 - 18
train_model.py

@@ -34,6 +34,7 @@ def main():
     parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
     parser.add_argument('--balancing', type=int, help='specify if balacing of classes is done or not', default="1")
     parser.add_argument('--chanels', type=int, help="given number of chanels if necessary", default=0)
+    parser.add_argument('--size', type=str, help="Size of input images", default="100, 100")
     #parser.add_argument('--val_size', type=float, help='percent of validation data during training process', default=cfg.val_dataset_size)
 
 
@@ -46,10 +47,11 @@ def main():
     p_epochs      = args.epochs
     p_balancing   = bool(args.balancing)
     p_chanels     = args.chanels
+    p_size        = args.size.split(',')
 
     #p_val_size    = args.val_size
     initial_epoch = 0
-        
+
     ########################
     # 1. Get and prepare data
     ########################
@@ -73,7 +75,7 @@ def main():
         n_chanels = p_chanels
 
     print("Number of chanels : ", n_chanels)
-    img_width, img_height = cfg.keras_img_size
+    img_width, img_height = [ int(s) for s in p_size ]
 
     # specify the number of dimensions
     if K.image_data_format() == 'chanels_first':
@@ -172,8 +174,8 @@ def main():
         os.makedirs(model_backup_folder)
 
     # add of callback models
-    filepath = os.path.join(cfg.backup_model_folder, p_output, p_output + "-{auc:02f}-{val_auc:02f}__{epoch:02d}.hdf5")
-    checkpoint = ModelCheckpoint(filepath, monitor='val_auc', verbose=1, save_best_only=True, mode='max')
+    filepath = os.path.join(cfg.backup_model_folder, p_output, p_output + "-{accuracy:02f}-{val_accuracy:02f}__{epoch:02d}.hdf5")
+    checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
     callbacks_list = [checkpoint]
 
     
@@ -201,13 +203,14 @@ def main():
 
         initial_epoch = max_last_epoch
         print("-------------------------------------------------")
-        print("Previous backup model found",  last_model_backup, "with already", initial_epoch, "done...")
+        print("Previous backup model found",  last_model_backup, "with already", initial_epoch, " epoch(s) done...")
         print("Resuming from epoch", str(initial_epoch + 1))
         print("-------------------------------------------------")
 
         # load weights
         weights_filepath = os.path.join(model_backup_folder, last_model_backup)
 
+    print(n_chanels)
     model = models.get_model(n_chanels, input_shape, p_tl, weights_filepath)
     model.summary()
 
@@ -218,6 +221,7 @@ def main():
     y_data_categorical = to_categorical(y_data)
     #print(y_data_categorical)
 
+    print(x_data.shape)
     # validation split parameter will use the last `%` data, so here, data will really validate our model
     model.fit(x_data, y_data_categorical, validation_split=validation_split, initial_epoch=initial_epoch, epochs=p_epochs, batch_size=p_batch_size, callbacks=callbacks_list)
 
@@ -226,18 +230,12 @@ def main():
 
     print("Accuracy score on val dataset ", score)
 
-    if not os.path.exists(cfg.saved_models_folder):
-        os.makedirs(cfg.saved_models_folder)
+    if not os.path.exists(cfg.output_models):
+        os.makedirs(cfg.output_models)
 
     # save the model into HDF5 file
-    model_output_path = os.path.join(cfg.saved_models_folder, p_output + '.json')
-    json_model_content = model.to_json()
-
-    with open(model_output_path, 'w') as f:
-        print("Model saved into ", model_output_path)
-        json.dump(json_model_content, f, indent=4)
-
-    model.save_weights(model_output_path.replace('.json', '.h5'))
+    model_output_path = os.path.join(cfg.output_models, p_output + '.h5')
+    model.save(model_output_path)
 
     # Get results obtained from model
     y_train_prediction = model.predict(x_data_train)
@@ -265,10 +263,10 @@ def main():
     roc_val_score = roc_auc_score(y_dataset_val, y_val_prediction)
 
     # save model performance
-    if not os.path.exists(cfg.results_information_folder):
-        os.makedirs(cfg.results_information_folder)
+    if not os.path.exists(cfg.output_results_folder):
+        os.makedirs(cfg.output_results_folder)
 
-    perf_file_path = os.path.join(cfg.results_information_folder, cfg.csv_model_comparisons_filename)
+    perf_file_path = os.path.join(cfg.output_results_folder, cfg.csv_model_comparisons_filename)
 
     # write header if necessary
     if not os.path.exists(perf_file_path):