Преглед на файлове

Merge branch 'release/v0.4.4'

Jérôme BUISINE преди 5 години
родител
ревизия
4e9a22b7ce
променени са 3 файла, в които са добавени 407 реда и са изтрити 4 реда
  1. 166 0
      generate/generate_all_data_augmentation.py
  2. 9 4
      generate/generate_data_augmentation.py
  3. 232 0
      generate/generate_data_model_random_augmented.py

+ 166 - 0
generate/generate_all_data_augmentation.py

@@ -0,0 +1,166 @@
+# main imports
+import sys, os, argparse
+import numpy as np
+import random
+import time
+import json
+
+# image processing imports
+from PIL import Image
+
+from ipfml.processing import transform, segmentation
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_image_features
+
+
+# getting configuration information
+zone_folder             = cfg.zone_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes values
+scenes_list             = cfg.scenes_names
+scenes_indexes          = cfg.scenes_indices
+choices                 = cfg.normalization_choices
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+features_choices        = cfg.features_choices_labels
+output_data_folder      = cfg.output_data_folder
+
+data_augmented_filename = cfg.data_augmented_filename
+generic_output_file_svd = '_random.csv'
+
+def generate_data_svd(data_type, mode, path):
+    """
+    @brief Method which generates all .csv files from scenes
+    @param data_type,  feature choice
+    @param mode, normalization choice
+    @param path, data augmented path
+    @return nothing
+    """
+
+    scenes = os.listdir(path)
+    # remove min max file from scenes folder
+    scenes = [s for s in scenes if min_max_filename and generic_output_file_svd not in s]
+
+    # keep in memory min and max data found from data_type
+    min_val_found = sys.maxsize
+    max_val_found = 0
+
+    data_min_max_filename = os.path.join(path, data_type + min_max_filename)
+    data_filename = os.path.join(path, data_augmented_filename)
+
+    # getting output filename
+    output_svd_filename = data_type + "_" + mode + generic_output_file_svd
+
+    current_file = open(os.path.join(path, output_svd_filename), 'w')
+
+    with open(data_filename, 'r') as f:
+
+        lines = f.readlines()
+        number_of_images = len(lines)
+
+        for index, line in enumerate(lines):        
+            
+            data = line.split(';')
+
+            scene_name = data[0]
+            number_of_samples = data[2]
+            label_img = data[3]
+            img_path = data[4].replace('\n', '')
+
+            block = Image.open(os.path.join(path, img_path))
+         
+            ###########################
+            # feature computation part #
+            ###########################
+
+            data = get_image_features(data_type, block)
+
+            ##################
+            # Data mode part #
+            ##################
+
+            # modify data depending mode
+            if mode == 'svdne':
+
+                # getting max and min information from min_max_filename
+                with open(data_min_max_filename, 'r') as f:
+                    min_val = float(f.readline())
+                    max_val = float(f.readline())
+
+                data = utils.normalize_arr_with_range(data, min_val, max_val)
+
+            if mode == 'svdn':
+                data = utils.normalize_arr(data)
+
+            # save min and max found from dataset in order to normalize data using whole data known
+            if mode == 'svd':
+
+                current_min = data.min()
+                current_max = data.max()
+
+                if current_min < min_val_found:
+                    min_val_found = current_min
+
+                if current_max > max_val_found:
+                    max_val_found = current_max
+
+            # add of index
+            current_file.write(scene_name + ';' + number_of_samples + ';' + label_img + ';')
+
+            for val in data:
+                current_file.write(str(val) + ";")
+
+            print(data_type + "_" + mode + " - " + "{0:.2f}".format((index + 1) / number_of_images * 100.) + "%")
+            sys.stdout.write("\033[F")
+
+            current_file.write('\n')
+
+        print('\n')
+
+    # save current information about min file found
+    if mode == 'svd':
+        with open(data_min_max_filename, 'w') as f:
+            f.write(str(min_val_found) + '\n')
+            f.write(str(max_val_found) + '\n')
+
+    print("%s_%s : end of data generation\n" % (data_type, mode))
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes (keep in memory min and max value found)")
+
+    parser.add_argument('--feature', type=str, 
+                                    help="feature choice in order to compute data (use 'all' if all features are needed)")
+    parser.add_argument('--folder', type=str, help="folder which contains the whole dataset")
+
+    args = parser.parse_args()
+
+    p_feature = args.feature
+    p_folder  = args.folder
+
+    # generate all or specific feature data
+    if p_feature == 'all':
+        for m in features_choices:
+            generate_data_svd(m, 'svd', p_folder)
+            generate_data_svd(m, 'svdn', p_folder)
+            generate_data_svd(m, 'svdne', p_folder)
+    else:
+
+        if p_feature not in features_choices:
+            raise ValueError('Unknown feature choice : ', features_choices)
+            
+        generate_data_svd(p_feature, 'svd', p_folder)
+        generate_data_svd(p_feature, 'svdn', p_folder)
+        generate_data_svd(p_feature, 'svdne', p_folder)
+
+if __name__== "__main__":
+    main()

+ 9 - 4
generate/generate_data_augmentation.py

@@ -130,6 +130,7 @@ def main():
                     points = [p_top_left, p_top_right, p_bottom_right, p_bottom_left]
 
                     p_zones_indices = []
+                    
                     # for each points get threshold information
                     for p in points:
                         x, y = p
@@ -188,22 +189,24 @@ def main():
                     for rotation in rotations:
 
                         rotated_img_name = extracted_image_name +  'rot' + str(rotation) + '_' + current_image_postfix + cfg.scene_image_extension
-                        rotated_img_path = os.path.join(folder_scene, rotated_img_name)
+                        rotated_img_path = os.path.join(output_scene_path, rotated_img_name)
+                        saved_rotated_img_path = os.path.join(folder_scene, rotated_img_name)
                         rotated_img = pil_extracted_img.rotate(rotation)
                         rotated_img.save(rotated_img_path)
 
-                        csv_line = folder_scene + ';' + str(final_threshold) + ';' + str(int(current_image_postfix)) + ';' + str(int(label_img)) + ';' + rotated_img_path + '\n'
+                        csv_line = folder_scene + ';' + str(final_threshold) + ';' + str(int(current_image_postfix)) + ';' + str(int(label_img)) + ';' + saved_rotated_img_path + '\n'
 
                         with open(output_dataset_filename_path, 'a') as f:
                             f.write(csv_line)
 
                 else:
                     extracted_image_name += current_image_postfix + cfg.scene_image_extension
-                    extracted_image_path = os.path.join(folder_scene, extracted_image_name)
+                    extracted_image_path = os.path.join(output_scene_path, extracted_image_name)
+                    saved_extracted_image_path = os.path.join(output_scene_path, extracted_image_name)
                     
                     pil_extracted_img.save(extracted_image_path)
 
-                    csv_line = folder_scene + ';' + str(final_threshold) + ';' + str(int(current_image_postfix)) + ';' + str(int(label_img)) + ';' + extracted_image_path + '\n'
+                    csv_line = folder_scene + ';' + str(final_threshold) + ';' + str(int(current_image_postfix)) + ';' + str(int(label_img)) + ';' + saved_extracted_image_path + '\n'
 
                     with open(output_dataset_filename_path, 'a') as f:
                         f.write(csv_line)
@@ -211,6 +214,8 @@ def main():
                 print(folder_scene + " - " + "{0:.2f}".format(((id_img * p_number + generation) + 1) / (p_number * number_scene_image) * 100.) + "%")
                 sys.stdout.write("\033[F")
 
+        print('\n', folder_scene, 'done...')
+
 
 if __name__== "__main__":
     main()

+ 232 - 0
generate/generate_data_model_random_augmented.py

@@ -0,0 +1,232 @@
+# main imports
+import sys, os, argparse
+import numpy as np
+import pandas as pd
+import random
+
+# image processing imports
+from PIL import Image
+
+from ipfml import utils
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from modules.utils import data as dt
+from data_attributes import get_image_features
+
+
+# getting configuration information
+learned_folder          = cfg.learned_zones_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes variables
+all_scenes_list         = cfg.scenes_names
+all_scenes_indices      = cfg.scenes_indices
+
+normalization_choices   = cfg.normalization_choices
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+renderer_choices        = cfg.renderer_choices
+features_choices        = cfg.features_choices_labels
+output_data_folder      = cfg.output_data_folder
+custom_min_max_folder   = cfg.min_max_custom_folder
+min_max_ext             = cfg.min_max_filename_extension
+
+generic_output_file_svd = '_random.csv'
+
+min_value_interval      = sys.maxsize
+max_value_interval      = 0
+
+def construct_new_line(interval, line_data, choice, each, norm):
+    begin, end = interval
+
+    label = line_data[2]
+    features = line_data[begin+3:end+3]
+    
+    # keep only if modulo result is 0 (keep only each wanted values)
+    features = [float(m) for id, m in enumerate(features) if id % each == 0]
+
+    # TODO : check if it's always necessary to do that (loss of information for svd)
+    if norm:
+
+        if choice == 'svdne':
+            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
+        if choice == 'svdn':
+            features = utils.normalize_arr(features)
+
+    line = label
+
+    for val in features:
+        line += ';'
+        line += str(val)
+    line += '\n'
+
+    return line
+
+def get_min_max_value_interval(_path, _scenes_list, _interval, _feature):
+
+    global min_value_interval, max_value_interval
+
+    data_filename = _feature + "_svd" + generic_output_file_svd
+
+    data_file_path = os.path.join(_path, data_filename)
+
+    # getting number of line and read randomly lines
+    f = open(data_file_path)
+    lines = f.readlines()
+
+    # check if user select current scene and zone to be part of training data set
+    for line in lines:
+
+        begin, end = _interval
+
+        line_data = line.split(';')
+
+        features = line_data[begin+3:end+3]
+        features = [float(m) for m in features]
+
+        min_value = min(features)
+        max_value = max(features)
+
+        if min_value < min_value_interval:
+            min_value_interval = min_value
+
+        if max_value > max_value_interval:
+            max_value_interval = max_value
+
+
+def generate_data_model(_path, _scenes_list, _filename, _interval, _choice, _feature, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
+
+    output_train_filename = _filename + ".train"
+    output_test_filename = _filename + ".test"
+
+    if not '/' in output_train_filename:
+        raise Exception("Please select filename with directory path to save data. Example : data/dataset")
+
+    # create path if not exists
+    if not os.path.exists(output_data_folder):
+        os.makedirs(output_data_folder)
+
+    train_file_data = []
+    test_file_data  = []
+
+    # if custom normalization choices then we use svd values not already normalized
+    if _custom:
+        data_filename = _feature + "_svd"+ generic_output_file_svd
+    else:
+        data_filename = _feature + "_" + _choice + generic_output_file_svd
+
+    data_file_path = os.path.join(_path, data_filename)
+
+    # getting number of line and read randomly lines
+    f = open(data_file_path)
+    lines = f.readlines()
+
+    num_lines = len(lines)
+
+    # randomly shuffle image
+    if _random:
+        random.shuffle(lines)
+
+    counter = 0
+    # check if user select current scene data line of training data set
+    for data in lines:
+
+        percent = counter / num_lines
+
+        data = data.split(';')
+        scene_name = data[0]
+        image_index = int(data[1])
+
+        if image_index % _step == 0:
+            line = construct_new_line(_interval, data, _choice, int(_each), _custom)
+
+            if scene_name in _scenes and percent <= _percent:
+                train_file_data.append(line)
+            else:
+                test_file_data.append(line)
+
+        counter += 1
+
+    f.close()
+
+    train_file = open(output_train_filename, 'w')
+    test_file = open(output_test_filename, 'w')
+
+    for line in train_file_data:
+        train_file.write(line)
+
+    for line in test_file_data:
+        test_file.write(line)
+
+    train_file.close()
+    test_file.close()
+
+
+def main():
+
+    # getting all params
+    parser = argparse.ArgumentParser(description="Generate data for model using correlation matrix information from data")
+
+    parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
+    parser.add_argument('--folder', type=str, help='folder path of data augmented database')
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
+    parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
+    parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
+    parser.add_argument('--percent', type=float, help='Percent of data use for train and test dataset (by default 1)')
+    parser.add_argument('--step', type=int, help='Photo step to keep for build datasets', default=1)
+    parser.add_argument('--each', type=int, help='Each features to keep from interval', default=1)
+    parser.add_argument('--renderer', type=str, help='Renderer choice in order to limit scenes used', choices=renderer_choices, default='all')
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_filename = args.output
+    p_folder   = args.folder
+    p_interval = list(map(int, args.interval.split(',')))
+    p_kind     = args.kind
+    p_feature  = args.feature
+    p_scenes   = args.scenes.split(',')
+    p_random   = args.random
+    p_percent  = args.percent
+    p_step     = args.step
+    p_each     = args.each
+    p_renderer = args.renderer
+    p_custom   = args.custom
+
+
+    # list all possibles choices of renderer
+    scenes_list = dt.get_renderer_scenes_names(p_renderer)
+    scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
+
+    # getting scenes from indexes user selection
+    scenes_selected = []
+
+    for scene_id in p_scenes:
+        index = scenes_indices.index(scene_id.strip())
+        scenes_selected.append(scenes_list[index])
+
+    # find min max value if necessary to renormalize data
+    if p_custom:
+        get_min_max_value_interval(p_folder, scenes_list, p_interval, p_feature)
+
+        # write new file to save
+        if not os.path.exists(custom_min_max_folder):
+            os.makedirs(custom_min_max_folder)
+
+        min_max_filename_path = os.path.join(custom_min_max_folder, p_custom)
+
+        with open(min_max_filename_path, 'w') as f:
+            f.write(str(min_value_interval) + '\n')
+            f.write(str(max_value_interval) + '\n')
+
+    # create database using img folder (generate first time only)
+    generate_data_model(p_folder, scenes_list, p_filename, p_interval, p_kind, p_feature, scenes_selected, p_percent, p_random, p_step, p_each, p_custom)
+
+if __name__== "__main__":
+    main()