Parcourir la source

Update of generation data scripts

Jérôme BUISINE il y a 5 ans
Parent
commit
6349290806

+ 1 - 1
display_simulation_curves.py

@@ -4,7 +4,7 @@ import pandas as pd
 import matplotlib.pyplot as plt
 import os, sys, getopt
 
-from modules.utils.data_type import get_svd_data
+from modules.utils.data import get_svd_data
 
 label_freq = 6
 

+ 1 - 1
display_svd_zone_scene.py

@@ -20,7 +20,7 @@ from ipfml import metrics
 from skimage import color
 
 import matplotlib.pyplot as plt
-from modules.utils.data_type import get_svd_data
+from modules.utils.data import get_svd_data
 
 from modules.utils import config as cfg
 

+ 1 - 1
generateAndTrain_maxwell.sh

@@ -54,7 +54,7 @@ for counter in {0..4}; do
 
                     echo "${MODEL_NAME} results already generated..."
                 else
-                    python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --sep ';' --rowindex '0'
+                    python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --renderer "maxwell" --step 40 --random 1 --percent 1
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2'

+ 1 - 1
generateAndTrain_maxwell_custom.sh

@@ -55,7 +55,7 @@ for counter in {0..4}; do
 
                     echo "${MODEL_NAME} results already generated..."
                 else
-                    python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --sep ';' --rowindex '0' --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
                     python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                     #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}

+ 43 - 53
generate_all_data.py

@@ -13,7 +13,7 @@ import random
 import time
 import json
 
-from modules.utils.data_type import get_svd_data
+from modules.utils.data import get_svd_data
 from PIL import Image
 from ipfml import processing
 from ipfml import metrics
@@ -38,7 +38,6 @@ metric_choices          = cfg.metric_choices_labels
 output_data_folder      = cfg.output_data_folder
 
 generic_output_file_svd = '_random.csv'
-picture_step            = 10
 
 def generate_data_svd(data_type, mode):
     """
@@ -102,64 +101,63 @@ def generate_data_svd(data_type, mode):
 
         while(current_counter_index <= end_counter_index):
 
-            if current_counter_index % picture_step == 0:
-                current_counter_index_str = str(current_counter_index)
+            current_counter_index_str = str(current_counter_index)
 
-                while len(start_index_image) > len(current_counter_index_str):
-                    current_counter_index_str = "0" + current_counter_index_str
+            while len(start_index_image) > len(current_counter_index_str):
+                current_counter_index_str = "0" + current_counter_index_str
 
-                img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
+            img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
 
-                current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+            current_img = Image.open(img_path)
+            img_blocks = processing.divide_in_blocks(current_img, (200, 200))
 
-                for id_block, block in enumerate(img_blocks):
+            for id_block, block in enumerate(img_blocks):
 
-                    ###########################
-                    # Metric computation part #
-                    ###########################
+                ###########################
+                # Metric computation part #
+                ###########################
 
-                    data = get_svd_data(data_type, block)
+                data = get_svd_data(data_type, block)
 
-                    ##################
-                    # Data mode part #
-                    ##################
+                ##################
+                # Data mode part #
+                ##################
 
-                    # modify data depending mode
-                    if mode == 'svdne':
+                # modify data depending mode
+                if mode == 'svdne':
 
-                        # getting max and min information from min_max_filename
-                        with open(data_min_max_filename, 'r') as f:
-                            min_val = float(f.readline())
-                            max_val = float(f.readline())
+                    # getting max and min information from min_max_filename
+                    with open(data_min_max_filename, 'r') as f:
+                        min_val = float(f.readline())
+                        max_val = float(f.readline())
 
-                        data = processing.normalize_arr_with_range(data, min_val, max_val)
+                    data = processing.normalize_arr_with_range(data, min_val, max_val)
 
-                    if mode == 'svdn':
-                        data = processing.normalize_arr(data)
+                if mode == 'svdn':
+                    data = processing.normalize_arr(data)
 
-                    # save min and max found from dataset in order to normalize data using whole data known
-                    if mode == 'svd':
+                # save min and max found from dataset in order to normalize data using whole data known
+                if mode == 'svd':
 
-                        current_min = data.min()
-                        current_max = data.max()
+                    current_min = data.min()
+                    current_max = data.max()
 
-                        if current_min < min_val_found:
-                            min_val_found = current_min
+                    if current_min < min_val_found:
+                        min_val_found = current_min
 
-                        if current_max > max_val_found:
-                            max_val_found = current_max
+                    if current_max > max_val_found:
+                        max_val_found = current_max
 
-                    # now write data into current writer
-                    current_file = svd_output_files[id_block]
+                # now write data into current writer
+                current_file = svd_output_files[id_block]
 
-                    # add of index
-                    current_file.write(current_counter_index_str + ';')
+                # add of index
+                current_file.write(current_counter_index_str + ';')
 
-                    for val in data:
-                        current_file.write(str(val) + ";")
+                for val in data:
+                    current_file.write(str(val) + ";")
 
-                    current_file.write('\n')
+                current_file.write('\n')
 
             start_index_image_int = int(start_index_image)
             print(data_type + "_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((current_counter_index - start_index_image_int) / (end_counter_index - start_index_image_int)* 100.) + "%")
@@ -184,26 +182,24 @@ def generate_data_svd(data_type, mode):
 def main():
 
     # default value of p_step
-    p_step = 10
+    p_step = 1
 
     if len(sys.argv) <= 1:
         print('Run with default parameters...')
         print('python generate_all_data.py --metric all')
         print('python generate_all_data.py --metric lab')
-        print('python generate_all_data.py --metric lab --step 10')
+        print('python generate_all_data.py --metric lab')
         sys.exit(2)
     try:
         opts, args = getopt.getopt(sys.argv[1:], "hms", ["help=", "metric=", "step="])
     except getopt.GetoptError:
         # print help information and exit:
-        print('python generate_all_data.py --metric all --step 10')
+        print('python generate_all_data.py --metric all')
         sys.exit(2)
     for o, a in opts:
         if o == "-h":
-            print('python generate_all_data.py --metric all --step 10')
+            print('python generate_all_data.py --metric all')
             sys.exit()
-        elif o in ("-s", "--step"):
-            p_step = int(a)
         elif o in ("-m", "--metric"):
             p_metric = a
 
@@ -212,12 +208,6 @@ def main():
         else:
             assert False, "unhandled option"
 
-    global picture_step
-    picture_step = p_step
-
-    if picture_step % 10 != 0:
-        assert False, "Picture step variable needs to be divided by ten"
-
     # generate all or specific metric data
     if p_metric == 'all':
         for m in metric_choices:

+ 71 - 40
generate_data_model_random_maxwell.py

@@ -17,6 +17,7 @@ from PIL import Image
 from ipfml import processing, metrics
 
 from modules.utils import config as cfg
+from modules.utils import data as dt
 
 # getting configuration information
 config_filename         = cfg.config_filename
@@ -24,8 +25,9 @@ zone_folder             = cfg.zone_folder
 min_max_filename        = cfg.min_max_filename_extension
 
 # define all scenes values
-scenes_list             = cfg.maxwell_scenes_names
-scenes_indexes          = cfg.maxwell_scenes_indices
+all_scenes_list         = cfg.scenes_names
+all_scenes_indices      = cfg.scenes_indices
+
 choices                 = cfg.normalization_choices
 path                    = cfg.dataset_path
 zones                   = cfg.zones_indices
@@ -38,10 +40,11 @@ min_max_ext             = cfg.min_max_filename_extension
 
 generic_output_file_svd = '_random.csv'
 
-min_value_interval = sys.maxsize
-max_value_interval = 0
+min_value_interval      = sys.maxsize
+max_value_interval      = 0
+
 
-def construct_new_line(path_seuil, interval, line, norm, sep, index):
+def construct_new_line(path_seuil, interval, line, choice, norm):
     begin, end = interval
 
     line_data = line.split(';')
@@ -52,7 +55,11 @@ def construct_new_line(path_seuil, interval, line, norm, sep, index):
 
     # TODO : check if it's always necessary to do that (loss of information for svd)
     if norm:
-        metrics = processing.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+
+        if choice == 'svdne':
+            metrics = processing.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+        if choice == 'svdn':
+            metrics = processing.normalize_arr(metrics)
 
     with open(path_seuil, "r") as seuil_file:
         seuil_learned = int(seuil_file.readline().strip())
@@ -63,15 +70,13 @@ def construct_new_line(path_seuil, interval, line, norm, sep, index):
         line = '0'
 
     for idx, val in enumerate(metrics):
-        if index:
-            line += " " + str(idx + 1)
-        line += sep
+        line += ';'
         line += str(val)
     line += '\n'
 
     return line
 
-def get_min_max_value_interval(_filename, _interval, _choice, _metric):
+def get_min_max_value_interval(_scenes_list, _filename, _interval, _choice, _metric):
 
     global min_value_interval, max_value_interval
 
@@ -83,7 +88,7 @@ def get_min_max_value_interval(_filename, _interval, _choice, _metric):
     for id_scene, folder_scene in enumerate(scenes):
 
         # only take care of maxwell scenes
-        if folder_scene in scenes_list:
+        if folder_scene in _scenes_list:
 
             scene_path = os.path.join(path, folder_scene)
 
@@ -99,6 +104,7 @@ def get_min_max_value_interval(_filename, _interval, _choice, _metric):
             random.shuffle(zones_folder)
 
             for id_zone, zone_folder in enumerate(zones_folder):
+
                 zone_path = os.path.join(scene_path, zone_folder)
                 data_filename = _metric + "_" + _choice + generic_output_file_svd
                 data_file_path = os.path.join(zone_path, data_filename)
@@ -111,10 +117,11 @@ def get_min_max_value_interval(_filename, _interval, _choice, _metric):
                 # check if user select current scene and zone to be part of training data set
                 for line in lines:
 
-
                     begin, end = _interval
 
                     line_data = line.split(';')
+                    print(line_data)
+
                     metrics = line_data[begin+1:end+1]
                     metrics = [float(m) for m in metrics]
 
@@ -130,7 +137,7 @@ def get_min_max_value_interval(_filename, _interval, _choice, _metric):
                     counter += 1
 
 
-def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes_list, _nb_zones = 4, _percent = 1, _norm = False, _sep=':', _index=True):
+def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _norm = False):
 
     output_train_filename = _filename + ".train"
     output_test_filename = _filename + ".test"
@@ -142,18 +149,18 @@ def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes
     if not os.path.exists(output_data_folder):
         os.makedirs(output_data_folder)
 
-    train_file = open(output_train_filename, 'w')
-    test_file = open(output_test_filename, 'w')
-
     scenes = os.listdir(path)
 
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
 
+    train_file_data = []
+    test_file_data  = []
+
     for id_scene, folder_scene in enumerate(scenes):
 
         # only take care of maxwell scenes
-        if folder_scene in scenes_list:
+        if folder_scene in _scenes_list:
 
             scene_path = os.path.join(path, folder_scene)
 
@@ -166,7 +173,9 @@ def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes
                 zones_folder.append("zone"+index_str)
 
             # shuffle list of zones (=> randomly choose zones)
-            random.shuffle(zones_folder)
+            # only in random mode
+            if _random:
+                random.shuffle(zones_folder)
 
             for id_zone, zone_folder in enumerate(zones_folder):
                 zone_path = os.path.join(scene_path, zone_folder)
@@ -179,26 +188,38 @@ def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes
 
                 num_lines = len(lines)
 
-                lines_indexes = np.arange(num_lines)
-                random.shuffle(lines_indexes)
+                # randomly shuffle image
+                if _random:
+                    random.shuffle(lines)
 
                 path_seuil = os.path.join(zone_path, seuil_expe_filename)
 
                 counter = 0
                 # check if user select current scene and zone to be part of training data set
-                for index in lines_indexes:
-                    line = construct_new_line(path_seuil, _interval, lines[index], _norm, _sep, _index)
+                for data in lines:
 
                     percent = counter / num_lines
+                    image_index = int(data.split(';')[0])
+
+                    if image_index % _step == 0:
+                        line = construct_new_line(path_seuil, _interval, data, _choice, _norm, _sep, _index)
 
-                    if id_zone < _nb_zones and folder_scene in _scenes and percent <= _percent:
-                        train_file.write(line)
-                    else:
-                        test_file.write(line)
+                        if id_zone < _nb_zones and folder_scene in _scenes and percent <= _percent:
+                            train_file_data.append(line)
+                        else:
+                            test_file_data.append(line)
 
                     counter += 1
 
-                f.close()
+
+    train_file = open(output_train_filename, 'w')
+    test_file = open(output_test_filename, 'w')
+
+    for line in train_file_data:
+        train_file.write(line)
+
+    for line in test_file_data:
+        test_file_data.write(line)
 
     train_file.close()
     test_file.close()
@@ -206,21 +227,23 @@ def generate_data_model(_filename, _interval, _choice, _metric, _scenes = scenes
 
 def main():
 
-    p_custom = False
+    p_custom    = False
+    p_step      = 1
+    p_renderer  = 'all'
 
     if len(sys.argv) <= 1:
         print('Run with default parameters...')
-        print('python generate_data_model_random.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --percent 0.7 --sep : --rowindex 1 --custom min_max_filename')
+        print('python generate_data_model.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --random 1 --percent 0.7 --step 10 renderer all --custom min_max_filename')
         sys.exit(2)
     try:
-        opts, args = getopt.getopt(sys.argv[1:], "ho:i:k:s:n:p:r:c", ["help=", "output=", "interval=", "kind=", "metric=","scenes=", "nb_zones=", "percent=", "sep=", "rowindex=", "custom="])
+        opts, args = getopt.getopt(sys.argv[1:], "ho:i:k:s:n:r:p:s:r:c", ["help=", "output=", "interval=", "kind=", "metric=","scenes=", "nb_zones=", "random=", "percent=", "step=", "renderer=", "custom="])
     except getopt.GetoptError:
         # print help information and exit:
-        print('python generate_data_model_random.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --percent 0.7 --sep : --rowindex 1 --custom min_max_filename')
+        print('python generate_data_model.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --random 1 --percent 0.7 --step 10 --renderer all --custom min_max_filename')
         sys.exit(2)
     for o, a in opts:
         if o == "-h":
-            print('python generate_data_model_random.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --percent 0.7 --sep : --rowindex 1 --custom min_max_filename')
+            print('python generate_data_model.py --output xxxx --interval 0,20  --kind svdne --metric lab --scenes "A, B, D" --nb_zones 5 --random 1 --percent 0.7 --step 10 --renderer all --custom min_max_filename')
             sys.exit()
         elif o in ("-o", "--output"):
             p_filename = a
@@ -234,30 +257,38 @@ def main():
             p_scenes = a.split(',')
         elif o in ("-n", "--nb_zones"):
             p_nb_zones = int(a)
+        elif o in ("-r", "--random"):
+            p_randrom = int(a)
         elif o in ("-p", "--percent"):
             p_percent = float(a)
         elif o in ("-s", "--sep"):
             p_sep = a
-        elif o in ("-r", "--rowindex"):
-            if int(a) == 1:
-                p_rowindex = True
-            else:
-                p_rowindex = False
+        elif o in ("-s", "--step"):
+            p_step = int(a)
+        elif o in ("-r", "--renderer"):
+            p_renderer = a
+
+            if p_renderer not in cfg.renderer_choices:
+                assert False, "Unknown renderer choice, %s" % cfg.renderer_choices
         elif o in ("-c", "--custom"):
             p_custom = a
         else:
             assert False, "unhandled option"
 
+    # list all possibles choices of renderer
+    scenes_list = dt.get_renderer_scenes_names(p_renderer)
+    scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
+
     # getting scenes from indexes user selection
     scenes_selected = []
 
     for scene_id in p_scenes:
-        index = scenes_indexes.index(scene_id.strip())
+        index = scenes_indices.index(scene_id.strip())
         scenes_selected.append(scenes_list[index])
 
     # find min max value if necessary to renormalize data
     if p_custom:
-        get_min_max_value_interval(p_filename, p_interval, p_kind, p_metric)
+        get_min_max_value_interval(scenes_list, p_filename, p_interval, p_kind, p_metric)
 
         # write new file to save
         if not os.path.exists(custom_min_max_folder):
@@ -271,7 +302,7 @@ def main():
             f.write(str(max_value_interval) + '\n')
 
     # create database using img folder (generate first time only)
-    generate_data_model(p_filename, p_interval, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_custom, p_sep, p_rowindex)
+    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_custom)
 
 if __name__== "__main__":
     main()

+ 9 - 1
modules/utils/config.py

@@ -16,13 +16,21 @@ config_filename                 = "config"
 models_names_list               = ["svm_model","ensemble_model","ensemble_model_v2"]
 
 # define all scenes values
+renderer_choices                = ['maxwell', 'igloo', 'cycle']
+
 scenes_names                    = ['Appart1opt02', 'Bureau1', 'Cendrier', 'Cuisine01', 'EchecsBas', 'PNDVuePlongeante', 'SdbCentre', 'SdbDroite', 'Selles']
 scenes_indices                  = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
 
 maxwell_scenes_names            = ['Appart1opt02', 'Cuisine01', 'SdbCentre', 'SdbDroite']
 maxwell_scenes_indices          = ['A', 'D', 'G', 'H']
 
+igloo_scenes_names              = ['Bureau1', 'PNDVuePlongeante']
+igloo_scenes_indices            = ['B', 'F']
+
+cycle_scenes_names              = ['EchecBas', 'Selles']
+cycle_scenes_indices            = ['E', 'I']
+
 normalization_choices           = ['svd', 'svdn', 'svdne']
 zones_indices                   = np.arange(16)
 
-metric_choices_labels           = ['lab', 'mscn', 'mscn_revisited', 'low_bits_2', 'low_bits_3', 'low_bits_4', 'low_bits_5', 'low_bits_6','low_bits_4_shifted_2']
+metric_choices_labels           = ['lab', 'mscn_revisited', 'low_bits_2', 'low_bits_3', 'low_bits_4', 'low_bits_5', 'low_bits_6','low_bits_4_shifted_2']

+ 29 - 0
modules/utils/data_type.py

@@ -1,9 +1,19 @@
 from ipfml import processing, metrics
+from modules.utils.config import *
+
 from PIL import Image
 from skimage import color
 
 import numpy as np
 
+
+_scenes_names_prefix   = '_scenes_names'
+_scenes_indices_prefix = '_scenes_indices'
+
+# store all variables from current module context
+context_vars = vars()
+
+
 def get_svd_data(data_type, block):
     """
     Method which returns the data type expected
@@ -69,4 +79,23 @@ def get_svd_data(data_type, block):
 
     return data
 
+def get_renderer_scenes_indices(renderer_name):
+
+    if renderer_name not in renderer_choices:
+        raise ValueError("Unknown renderer name")
+
+    if renderer_name == 'all':
+        return scenes_indices
+    else:
+        return context_vars[renderer_name + _scenes_indices_prefix]
+
+def get_renderer_scenes_names(renderer_name):
+
+    if renderer_name not in renderer_choices:
+        raise ValueError("Unknown renderer name")
+
+    if renderer_name == 'all':
+        return scenes_names
+    else:
+        return context_vars[renderer_name + _scenes_names_prefix]
 

+ 46 - 20
predict_noisy_image_svd.py

@@ -8,11 +8,11 @@ from PIL import Image
 import sys, os, getopt
 
 from modules.utils import config as cfg
-from modules.utils import data_type as dt
+from modules.utils import data as dt
 
 path                  = cfg.dataset_path
 min_max_ext           = cfg.min_max_filename_extension
-metric_choices       = cfg.metric_choices_labels
+metric_choices        = cfg.metric_choices_labels
 normalization_choices = cfg.normalization_choices
 
 custom_min_max_folder = cfg.min_max_custom_folder
@@ -51,7 +51,7 @@ def main():
 
             if not p_mode in normalization_choices:
                 assert False, "Mode of normalization not recognized"
-        elif o in ("-m", "--custom"):
+        elif o in ("-c", "--custom"):
             p_custom = a
 
         else:
@@ -65,32 +65,58 @@ def main():
 
     data = dt.get_svd_data(p_metric, img)
 
+    # get interval values
+    begin, end = p_interval
+
     # check mode to normalize data
-    if p_mode == 'svdne':
 
-        # set min_max_filename if custom use
-        if p_custom:
-            min_max_filename = custom_min_max_folder + '/' +  p_custom
+    if p_custom:
+
+        data = data[begin:end]
+
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
+            min_max_file_path = custom_min_max_folder + '/' +  p_custom
+
+            # need to read min_max_file
+            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
+            with open(file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
+
+            l_values = processing.normalize_arr_with_range(data, min_val, max_val)
+
+        elif p_mode == 'svdn':
+            l_values = processing.normalize_arr(data)
         else:
+            l_values = data
+
+        test_data = l_values
+
+    else:
+
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
             min_max_file_path = path + '/' + p_metric + min_max_ext
 
-        # need to read min_max_file
-        file_path = os.path.join(os.path.join(os.path.dirname(__file__),'../'), min_max_file_path)
-        with open(file_path, 'r') as f:
-            min = float(f.readline().replace('\n', ''))
-            max = float(f.readline().replace('\n', ''))
+            # need to read min_max_file
+            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
+            with open(file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
 
-        l_values = processing.normalize_arr_with_range(data, min, max)
+            l_values = processing.normalize_arr_with_range(data, min_val, max_val)
 
-    elif p_mode == 'svdn':
-        l_values = processing.normalize_arr(data)
-    else:
-        l_values = data
+        elif p_mode == 'svdn':
+            l_values = processing.normalize_arr(data)
+        else:
+            l_values = data
 
+        test_data = l_values[begin:end]
 
-    # get interval values
-    begin, end = p_interval
-    test_data = l_values[begin:end]
+    print(data)
 
     # get prediction of model
     prediction = model.predict([test_data])[0]

+ 2 - 2
run_maxwell_simulation.sh

@@ -34,11 +34,11 @@ for size in {"4","8","16","26","32","40"}; do
                         FILENAME="data/data_maxwell_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}"
                         MODEL_NAME="${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}"
 
-                        if grep -q "${MODEL_NAME}" "${simulate_models}"; then
+                        if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                             echo "Run simulation for model ${MODEL_NAME}"
 
                             # by default regenerate model
-                            python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --sep ';' --rowindex '0'
+                            python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 

+ 2 - 2
run_maxwell_simulation_custom.sh

@@ -35,11 +35,11 @@ for size in {"4","8","16","26","32","40"}; do
                         MODEL_NAME="${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}"
                         CUSTOM_MIN_MAX_FILENAME="N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}_min_max"
 
-                        if grep -q "${MODEL_NAME}" "${simulate_models}"; then
+                        if grep -xq "${MODEL_NAME}" "${simulate_models}"; then
                             echo "Run simulation for model ${MODEL_NAME}"
 
                             # by default regenerate model
-                            python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --norm 0 --sep ';' --rowindex '0' --custom ${CUSTOM_MIN_MAX_FILENAME}
+                            python generate_data_model_random_maxwell.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
 
                             python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}