Parcourir la source

Add of prediction script; Add of simulation script

Jérôme BUISINE il y a 4 ans
Parent
commit
e88e2afb76

+ 6 - 1
README.md

@@ -26,11 +26,16 @@ List of expected parameter by reconstruction method:
   - Example: *"100, 200"*
 - **ipca_reconstruction:** Iterative Principal Component Analysis
   - Param definition: *number of components used for compression and batch size*
-  - Example: *"50, 32"*
+  - Example: *"30, 35"*
 - **fast_ica_reconstruction:**  Fast Iterative Component Analysis
   - Param definition: *number of components used for compression*
   - Example: *"50"*
 
+**__Example:__**
+```bash
+python generate_dataset_3D.py --output data/output_data_filename --metrics "svd_reconstruction, ipca_reconstruction, fast_ica_reconstruction" --renderer "maxwell" --scenes "A, D, G, H" --params "100, 200 :: 50, 10 :: 50" --nb_zones 10 --random 1
+```
+
 ## Modules
 
 This project contains modules:

+ 5 - 5
generate_dataset_3D.py

@@ -32,7 +32,7 @@ min_max_filename        = cfg.min_max_filename_extension
 scenes_list             = cfg.scenes_names
 scenes_indexes          = cfg.scenes_indices
 choices                 = cfg.normalization_choices
-path                    = cfg.dataset_path
+dataset_path            = cfg.dataset_path
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
 
@@ -56,14 +56,14 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
     train_file_data = []
     test_file_data  = []
 
-    scenes = os.listdir(path)
+    scenes = os.listdir(dataset_path)
     # remove min max file from scenes folder
     scenes = [s for s in scenes if min_max_filename not in s]
 
     # go ahead each scenes
     for id_scene, folder_scene in enumerate(_scenes_list):
 
-        scene_path = os.path.join(path, folder_scene)
+        scene_path = os.path.join(dataset_path, folder_scene)
 
         zones_indices = zones
 
@@ -185,8 +185,8 @@ def main():
     args = parser.parse_args()
 
     p_filename = args.output
-    p_metrics  = args.metrics.split(',')
-    p_params   = args.params.split('::')
+    p_metrics  = list(map(str.strip, args.metrics.split(',')))
+    p_params   = list(map(str.strip, args.params.split('::')))
     p_scenes   = args.scenes.split(',')
     p_nb_zones = args.nb_zones
     p_renderer = args.renderer

+ 24 - 10
generate_reconstructed_data.py

@@ -118,13 +118,17 @@ def generate_data(transformation):
             img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
 
             current_img = Image.open(img_path)
-            img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+            img_blocks = processing.divide_in_blocks(current_img, cfg.keras_img_size)
 
             for id_block, block in enumerate(img_blocks):
 
                 ##########################
                 # Image computation part #
                 ##########################
+                
+                # pass block to grey level
+
+
                 output_block = transformation.getTransformedImage(block)
                 output_block = np.array(output_block, 'uint8')
                 
@@ -177,22 +181,32 @@ def main():
 
     parser = argparse.ArgumentParser(description="Compute and prepare data of metric of all scenes using specific interval if necessary")
 
-    parser.add_argument('--metric', type=str, 
-                                    help="metric choice in order to compute data", 
-                                    choices=metric_choices,
+    parser.add_argument('--metrics', type=str, 
+                                     help="list of metrics choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
                                     required=True)
 
-    parser.add_argument('--param', type=str, help="specific param for metric (See README.md for further information)")
-
     args = parser.parse_args()
 
-    p_metric   = args.metric
-    p_param    = args.param
+    p_metrics  = list(map(str.strip, args.metrics.split(',')))
+    p_params   = list(map(str.strip, args.params.split('::')))
+
+    transformations = []
+
+    for id, metric in enumerate(p_metrics):
+
+        if metric not in metric_choices:
+            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
 
-    transformation = Transformation(p_metric, p_param)
+        transformations.append(Transformation(metric, p_params[id]))
 
     # generate all or specific metric data
-    generate_data(transformation)
+    for transformation in transformations:
+        generate_data(transformation)
 
 if __name__== "__main__":
     main()

+ 3 - 1
modules/utils/config.py

@@ -42,4 +42,6 @@ metric_choices_labels           = ['all', 'svd_reconstruction', 'fast_ica_recons
 
 keras_epochs                    = 50
 keras_batch                     = 32
-val_dataset_size                = 0.2
+val_dataset_size                = 0.2
+
+keras_img_size                  = (200, 200)

+ 92 - 0
predict_noisy_image.py

@@ -0,0 +1,92 @@
+from sklearn.externals import joblib
+
+import numpy as np
+
+from ipfml import processing, utils
+from PIL import Image
+
+import sys, os, argparse, json
+
+from keras.models import model_from_json
+
+from modules.utils import config as cfg
+from modules.utils import data as dt
+
+from modules.classes.Transformation import Transformation
+
+path                  = cfg.dataset_path
+min_max_ext           = cfg.min_max_filename_extension
+metric_choices        = cfg.metric_choices_labels
+normalization_choices = cfg.normalization_choices
+
+custom_min_max_folder = cfg.min_max_custom_folder
+
+def main():
+
+    # getting all params
+    parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
+
+    parser.add_argument('--image', type=str, help='Image path')
+    parser.add_argument('--metrics', type=str, 
+                                     help="list of metrics choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--model', type=str, help='.json file of keras model')
+
+    args = parser.parse_args()
+
+    p_img_file   = args.image
+    p_metrics    = list(map(str.strip, args.metrics.split(',')))
+    p_params     = list(map(str.strip, args.params.split('::')))
+    p_model_file = args.model
+
+
+    with open(p_model_file, 'r') as f:
+        json_model = json.load(f)
+        model = model_from_json(json_model)
+        model.load_weights(p_model_file.replace('.json', '.h5'))
+
+        model.compile(loss='binary_crossentropy',
+                    optimizer='rmsprop',
+                    metrics=['accuracy'])
+
+    # load image
+    img = Image.open(p_img_file)
+
+    transformations = []
+
+    for id, metric in enumerate(p_metrics):
+
+        if metric not in metric_choices:
+            raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
+
+        transformations.append(Transformation(metric, p_params[id]))
+
+    # getting transformed image
+    transformed_images = []
+
+    for transformation in transformations:
+        transformed_images.append(transformation.getTransformedImage(img))
+
+    data = np.array(transformed_images)
+
+    # specify the number of dimensions
+    img_width, img_height = cfg.keras_img_size
+    n_channels = len(transformations)
+
+    if K.image_data_format() == 'channels_first':
+        input_shape = (n_channels, img_width, img_height)
+    else:
+        input_shape = (img_width, img_height, n_channels)
+
+    prediction = model.predict_classes([data])[0][0]
+
+    # output expected from others scripts
+    print(prediction)
+
+if __name__== "__main__":
+    main()

+ 0 - 145
predict_noisy_image_svd.py

@@ -1,145 +0,0 @@
-from sklearn.externals import joblib
-
-import numpy as np
-
-from ipfml import processing, utils
-from PIL import Image
-
-import sys, os, argparse, json
-
-from keras.models import model_from_json
-
-from modules.utils import config as cfg
-from modules.utils import data as dt
-
-path                  = cfg.dataset_path
-min_max_ext           = cfg.min_max_filename_extension
-metric_choices        = cfg.metric_choices_labels
-normalization_choices = cfg.normalization_choices
-
-custom_min_max_folder = cfg.min_max_custom_folder
-
-def main():
-
-    # getting all params
-    parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
-
-    parser.add_argument('--image', type=str, help='Image path')
-    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
-    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
-
-    args = parser.parse_args()
-
-    p_img_file   = args.image
-    p_model_file = args.model
-    p_interval   = list(map(int, args.interval.split(',')))
-    p_mode       = args.mode
-    p_metric     = args.metric
-    p_custom     = args.custom
-
-    if '.joblib' in p_model_file:
-        kind_model = 'sklearn'
-
-    if '.json' in p_model_file:
-        kind_model = 'keras'
-
-    if 'corr' in p_model_file:
-        corr_model = True
-
-        indices_corr_path = os.path.join(cfg.correlation_indices_folder, p_model_file.split('/')[1].replace('.json', '').replace('.joblib', '') + '.csv')
-
-        with open(indices_corr_path, 'r') as f:
-            data_corr_indices = [int(x) for x in f.readline().split(';') if x != '']
-    else:
-        corr_model = False
-
-
-    if kind_model == 'sklearn':
-        # load of model file
-        model = joblib.load(p_model_file)
-
-    if kind_model == 'keras':
-        with open(p_model_file, 'r') as f:
-            json_model = json.load(f)
-            model = model_from_json(json_model)
-            model.load_weights(p_model_file.replace('.json', '.h5'))
-
-            model.compile(loss='binary_crossentropy',
-                        optimizer='adam',
-                        metrics=['accuracy'])
-
-    # load image
-    img = Image.open(p_img_file)
-
-    data = dt.get_svd_data(p_metric, img)
-
-    # get interval values
-    begin, end = p_interval
-
-    # check if custom min max file is used
-    if p_custom:
-
-        if corr_model:
-            test_data = data[data_corr_indices]
-        else:
-            test_data = data[begin:end]
-
-        if p_mode == 'svdne':
-
-            # set min_max_filename if custom use
-            min_max_file_path = custom_min_max_folder + '/' +  p_custom
-
-            # need to read min_max_file
-            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
-            with open(file_path, 'r') as f:
-                min_val = float(f.readline().replace('\n', ''))
-                max_val = float(f.readline().replace('\n', ''))
-
-            test_data = utils.normalize_arr_with_range(test_data, min_val, max_val)
-
-        if p_mode == 'svdn':
-            test_data = utils.normalize_arr(test_data)
-
-    else:
-
-        # check mode to normalize data
-        if p_mode == 'svdne':
-
-            # set min_max_filename if custom use
-            min_max_file_path = path + '/' + p_metric + min_max_ext
-
-            # need to read min_max_file
-            file_path = os.path.join(os.path.dirname(__file__), min_max_file_path)
-            with open(file_path, 'r') as f:
-                min_val = float(f.readline().replace('\n', ''))
-                max_val = float(f.readline().replace('\n', ''))
-
-            l_values = utils.normalize_arr_with_range(data, min_val, max_val)
-
-        elif p_mode == 'svdn':
-            l_values = utils.normalize_arr(data)
-        else:
-            l_values = data
-
-        if corr_model:
-            test_data = data[data_corr_indices]
-        else:
-            test_data = data[begin:end]
-
-
-    # get prediction of model
-    if kind_model == 'sklearn':
-        prediction = model.predict([test_data])[0]
-
-    if kind_model == 'keras':
-        test_data = np.asarray(test_data).reshape(1, len(test_data), 1)
-        prediction = model.predict_classes([test_data])[0][0]
-
-    # output expected from others scripts
-    print(prediction)
-
-if __name__== "__main__":
-    main()

+ 22 - 25
predict_seuil_expe_maxwell_curve.py

@@ -34,23 +34,25 @@ def main():
 
     p_custom = False
         
-    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific model")
-
-    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
-    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
-    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
-    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
-    #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
-    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+    parser = argparse.ArgumentParser(description="Script which predicts threshold using specific keras model")
+
+    parser.add_argument('--metrics', type=str, 
+                                     help="list of metrics choice in order to compute data",
+                                     default='svd_reconstruction, ipca_reconstruction',
+                                     required=True)
+    parser.add_argument('--params', type=str, 
+                                    help="list of specific param for each metric choice (See README.md for further information in 3D mode)", 
+                                    default='100, 200 :: 50, 25',
+                                    required=True)
+    parser.add_argument('--model', type=str, help='.json file of keras model')
 
     args = parser.parse_args()
 
-    p_interval   = list(map(int, args.interval.split(',')))
+    p_metrics    = list(map(str.strip, args.metrics.split(',')))
+    p_params     = list(map(str.strip, args.params.split('::')))
     p_model_file = args.model
-    p_mode       = args.mode
-    p_metric     = args.metric
-    #p_limit      = args.limit
-    p_custom     = args.custom
+
+    args = parser.parse_args()
 
     scenes = os.listdir(scenes_path)
     scenes = [s for s in scenes if s in maxwell_scenes]
@@ -113,25 +115,20 @@ def main():
                 img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
 
                 current_img = Image.open(img_path)
-                img_blocks = processing.divide_in_blocks(current_img, (200, 200))
+                img_blocks = processing.divide_in_blocks(current_img, cfg.keras_img_size)
 
                 for id_block, block in enumerate(img_blocks):
 
                     # check only if necessary for this scene (not already detected)
                     #if not threshold_expes_detected[id_block]:
 
-                        tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
+                        tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.json', '_'))
                         block.save(tmp_file_path)
 
-                        python_cmd = "python predict_noisy_image_svd.py --image " + tmp_file_path + \
-                                        " --interval '" + p_interval + \
-                                        "' --model " + p_model_file  + \
-                                        " --mode " + p_mode + \
-                                        " --metric " + p_metric
-
-                        # specify use of custom file for min max normalization
-                        if p_custom:
-                            python_cmd = python_cmd + ' --custom ' + p_custom
+                        python_cmd = "python predict_noisy_image.py --image " + tmp_file_path + \
+                                        " --metrics " + p_metrics + \
+                                        " --params " + p_params + \
+                                        " --model " + p_model_file 
 
                         ## call command ##
                         p = subprocess.Popen(python_cmd, stdout=subprocess.PIPE, shell=True)
@@ -173,7 +170,7 @@ def main():
             print("------------------------")
 
             print("Model predictions are saved into %s" % map_filename)
-            time.sleep(10)
+            time.sleep(2)
 
 
 if __name__== "__main__":

+ 11 - 8
train_model.py

@@ -19,13 +19,13 @@ from keras.utils import plot_model
 from modules.utils import config as cfg
 from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
 
-img_width, img_height = 200, 200
+img_width, img_height = cfg.keras_img_size
 batch_size = 32
 
 def auc(y_true, y_pred):
     auc = tf.metrics.auc(y_true, y_pred)[1]
     K.get_session().run(tf.local_variables_initializer())
-    #K.get_session().run(tf.local_variables_initializer())
+    
     return auc
 
 def generate_model(_input_shape):
@@ -100,12 +100,6 @@ def main():
     p_epochs     = args.epochs
     p_val_size   = args.val_size
     p_n_channels = args.n_channels
-
-    # specify the number of dimensions
-    if K.image_data_format() == 'channels_first':
-        input_shape = (p_n_channels, img_width, img_height)
-    else:
-        input_shape = (img_width, img_height, p_n_channels)
         
     ########################
     # 1. Get and prepare data
@@ -123,6 +117,9 @@ def main():
 
     print("Reading all images data...")
 
+    # getting number of chanel
+    n_channels = len(dataset_train[1].split(':'))
+
     # `:` is the separator used for getting each img path
     if p_n_channels > 1:
         dataset_train[1] = dataset_train[1].split(':').apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE).reshape(input_shape))
@@ -181,6 +178,12 @@ def main():
     # 2. Getting model
     #######################
 
+        # specify the number of dimensions
+    if K.image_data_format() == 'channels_first':
+        input_shape = (n_channels, img_width, img_height)
+    else:
+        input_shape = (img_width, img_height, n_channels)
+
     model = generate_model(input_shape)
     model.summary()
  

+ 9 - 3
transformation_functions.py

@@ -23,11 +23,14 @@ def svd_reconstruction(img, interval):
 
 def fast_ica_reconstruction(img, components):
 
+    lab_img = metrics.get_LAB_L(img)
+    lab_img = np.array(lab_img, 'uint8')
+
     ica = FastICA(n_components = 50)
     # run ICA on image
-    ica.fit(img)
+    ica.fit(lab_img)
     # reconstruct image with independent components
-    image_ica = ica.fit_transform(img)
+    image_ica = ica.fit_transform(lab_img)
     restored_image = ica.inverse_transform(image_ica)
 
     return restored_image
@@ -35,9 +38,12 @@ def fast_ica_reconstruction(img, components):
 
 def ipca_reconstruction(img, components, _batch_size=25):
 
+    lab_img = metrics.get_LAB_L(img)
+    lab_img = np.array(lab_img, 'uint8')
+
     transformer = IncrementalPCA(n_components=components, batch_size=_batch_size)
 
-    transformed_image = transformer.fit_transform(img) 
+    transformed_image = transformer.fit_transform(lab_img) 
     restored_image = transformer.inverse_transform(transformed_image)
 
     return restored_image