Parcourir la source

Merge branch 'release/v0.0.4'

Jérôme BUISINE il y a 5 ans
Parent
commit
dd71817afe
9 fichiers modifiés avec 297 ajouts et 6 suppressions
  1. 31 0
      compare_images.py
  2. 11 0
      generate_data.sh
  3. 2 0
      make_dataset.py
  4. 2 2
      modules/config.py
  5. 95 0
      reconstruct_keras.py
  6. 5 3
      reconstruct_scene_mean.py
  7. 37 0
      run_keras.sh
  8. 1 1
      train_model.py
  9. 113 0
      train_model_keras.py

+ 31 - 0
compare_images.py

@@ -0,0 +1,31 @@
+import os, sys, argparse
+
+from PIL import Image
+import ipfml.iqa.fr as fr
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Compute .csv dataset file")
+
+    parser.add_argument('--reference', type=str, help='Reference image')
+    parser.add_argument('--reconstructed', type=str, help='Image to compare')
+    parser.add_argument('--iqa', type=str, help='Image to compare', choices=['ssim', 'mse', 'rmse', 'mae', 'psnr'])
+    args = parser.parse_args()
+
+    param_reference = args.reference
+    param_reconstructed = args.reconstructed
+    param_iqa = args.iqa
+
+    reference_image = Image.open(param_reference)
+    reconstructed_image = Image.open(param_reconstructed)
+
+    try:
+        fr_iqa = getattr(fr, param_iqa)
+    except AttributeError:
+        raise NotImplementedError("FR IQA `{}` not implement `{}`".format(fr.__name__, param_iqa))
+
+    print(fr_iqa(reference_image, reconstructed_image))
+
+
+if __name__== "__main__":
+    main()

+ 11 - 0
generate_data.sh

@@ -0,0 +1,11 @@
+for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
+    for row in {1,2,3,4,5}; do
+        for column in {1,2,3,4,5}; do
+
+            # Run creation of dataset and train model
+            DATASET_NAME="data/dataset_${n}_column_${column}_row_${row}.csv"
+
+            python make_dataset.py --n ${n} --each_row ${row} --each_column ${column} &
+        done
+    done
+done

+ 2 - 0
make_dataset.py

@@ -62,6 +62,8 @@ def compute_files(_n, _each_row, _each_column):
                             pixel_values = lines[0:int(_n)]
                             mean = sum(lines) / float(len(lines))
 
+
+                            # if mean != pixel_values[0]:
                             saved_row += str(mean)
 
                             for val in pixel_values:

+ 2 - 2
modules/config.py

@@ -13,6 +13,6 @@ number_of_columns               = 512
 kind_of_models                  = ["SGD", "Ridge", "SVR"]
 
 global_result_filepath          = "models_info/models_comparisons.csv"
-scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
+scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVide', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
 
-test_scenes                     = []
+test_scenes                     = ['Sponza1']

+ 95 - 0
reconstruct_keras.py

@@ -0,0 +1,95 @@
+import numpy as np
+import pandas as pd
+import json
+import os, sys, argparse
+
+from keras.models import model_from_json
+
+import modules.config as cfg
+import modules.metrics as metrics
+
+from joblib import dump, load
+
+from PIL import Image
+
+def reconstruct(_scene_name, _model_path, _n):
+    
+    # construct the empty output image
+    output_image = np.empty([cfg.number_of_rows, cfg.number_of_columns])
+
+    # load the trained model
+    with open(_model_path, 'r') as f:
+        json_model = json.load(f)
+        model = model_from_json(json_model)
+        model.load_weights(_model_path.replace('.json', '.h5'))
+
+        model.compile(loss='binary_crossentropy',
+                    optimizer='adam',
+                    metrics=['accuracy'])
+
+    # load scene and its `n` first pixel value data
+    scene_path = os.path.join(cfg.folder_scenes_path, _scene_name)
+
+    for id_column in range(cfg.number_of_columns):
+
+        folder_path = os.path.join(scene_path, str(id_column))
+
+        pixels_predicted = []
+
+        for id_row in range(cfg.number_of_rows):
+            
+            pixel_filename = _scene_name + '_' + str(id_column) + '_' + str(id_row) + ".dat"
+            pixel_file_path = os.path.join(folder_path, pixel_filename)
+            
+            with open(pixel_file_path, 'r') as f:
+
+                # predict the expected pixel value
+                lines = [float(l)/255. for l in f.readlines()]
+                pixel_values = lines[0:int(_n)]
+                pixel_values = np.array(pixel_values).reshape(1, (int(_n)))
+                # predict pixel per pixel
+                pixels_predicted.append(model.predict(pixel_values))
+                
+        # change normalized predicted value to pixel value
+        pixels_predicted = [ val * 255. for val in pixels_predicted]
+
+        for id_pixel, pixel in enumerate(pixels_predicted):
+            output_image[id_pixel, id_column] = pixel
+
+        print("{0:.2f}%".format(id_column / cfg.number_of_columns * 100))
+        sys.stdout.write("\033[F")
+
+    return output_image
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Train model and saved it")
+
+    parser.add_argument('--scene', type=str, help='Scene name to reconstruct', choices=cfg.scenes_list)
+    parser.add_argument('--model_path', type=str, help='Json model file path')
+    parser.add_argument('--n', type=str, help='Number of pixel values approximated to keep')
+    parser.add_argument('--image_name', type=str, help="The ouput image name")
+
+    args = parser.parse_args()
+
+    param_scene_name = args.scene
+    param_n = args.n
+    param_model_path = args.model_path
+    param_image_name = args.image_name
+
+    # get default value of `n` param
+    if not param_n:
+        param_n = param_model_path.split('_')[0]
+
+    output_image = reconstruct(param_scene_name, param_model_path, param_n)
+
+    if not os.path.exists(cfg.reconstructed_folder):
+        os.makedirs(cfg.reconstructed_folder)
+
+    image_path = os.path.join(cfg.reconstructed_folder, param_image_name)
+
+    img = Image.fromarray(np.uint8(output_image))
+    img.save(image_path)
+
+if __name__== "__main__":
+    main()

+ 5 - 3
reconstruct_scene_mean.py

@@ -14,7 +14,7 @@ from joblib import dump, load
 
 from PIL import Image
 
-def reconstruct(_scene_name):
+def reconstruct(_scene_name, _n):
     
     # construct the empty output image
     output_image = np.empty([cfg.number_of_rows, cfg.number_of_columns])
@@ -35,7 +35,7 @@ def reconstruct(_scene_name):
 
                 # predict the expected pixel value
                 lines = [float(l) for l in f.readlines()]
-                mean = sum(lines) / float(len(lines))
+                mean = sum(lines[0:int(_n)]) / float(_n)
 
             output_image[id_row, id_column] = mean
 
@@ -49,14 +49,16 @@ def main():
     parser = argparse.ArgumentParser(description="Train model and saved it")
 
     parser.add_argument('--scene', type=str, help='Scene name to reconstruct', choices=cfg.scenes_list)
+    parser.add_argument('--n', type=str, help='Number of samples to take')
     parser.add_argument('--image_name', type=str, help="The ouput image name")
 
     args = parser.parse_args()
 
     param_scene_name = args.scene
+    param_n = args.n
     param_image_name = args.image_name
 
-    output_image = reconstruct(param_scene_name)
+    output_image = reconstruct(param_scene_name, param_n)
 
     if not os.path.exists(cfg.reconstructed_folder):
         os.makedirs(cfg.reconstructed_folder)

+ 37 - 0
run_keras.sh

@@ -0,0 +1,37 @@
+# erase "models_info/models_comparisons.csv" file and write new header
+file_path='models_info/models_comparisons.csv'
+
+erased=$1
+
+if [ "${erased}" == "Y" ]; then
+    echo "Previous data file erased..."
+    rm ${file_path}
+    mkdir -p models_info
+    touch ${file_path}
+
+    # add of header
+    echo 'model_name; number_of_approximations; coeff_of_determination;' >> ${file_path}
+fi
+
+for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
+    for row in {1,2,3,4,5}; do
+        for column in {1,2,3,4,5}; do
+
+            # Run creation of dataset and train model
+            DATASET_NAME="data/dataset_${n}_column_${column}_row_${row}.csv"
+            MODEL_NAME="${n}_column_${column}_row_${row}_KERAS"
+
+            if ! grep -q "${MODEL_NAME}" "${file_path}"; then
+                echo "Run computation for model ${MODEL_NAME}"
+
+                #python make_dataset.py --n ${n} --each_row ${row} --each_column ${column}
+                python train_model_keras.py --data ${DATASET_NAME} --model ${model} &
+
+                # TODO : Add of reconstruct process for image ?
+                # python reconstruct_keras.py --n ${n} --model_path data/${model}
+            else
+                echo "${MODEL_NAME} results already computed.."
+            fi
+        done
+    done
+done

+ 1 - 1
train_model.py

@@ -50,7 +50,7 @@ def train(_data_file, _model_name):
     if not os.path.exists(cfg.saved_models_folder):
         os.makedirs(cfg.saved_models_folder)
 
-    # compute model filename
+    # compute model filename_colum,n
     model_filename = _data_file.split('/')[-1].replace(cfg.output_file_prefix, '').replace('.csv', '')
     model_filename = model_filename + '_' + _model_name + '.joblib'
 

+ 113 - 0
train_model_keras.py

@@ -0,0 +1,113 @@
+import os, sys, argparse
+import numpy as np
+import json
+import matplotlib.pyplot as plt
+from joblib import dump
+import tensorflow as tf
+
+from sklearn.model_selection import train_test_split
+from sklearn.model_selection import cross_val_score
+from sklearn.model_selection import KFold
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import MinMaxScaler
+from keras.models import Sequential
+from keras.layers import Dense, Dropout
+from keras.wrappers.scikit_learn import KerasRegressor
+from keras import backend as K
+
+import modules.config as cfg
+import modules.metrics as metrics
+
+def train(_data_file, _model_name):
+
+    # get length of data
+    dataset=np.loadtxt(_data_file, delimiter=';')
+
+    y = dataset[:,0]
+    X = dataset[:,1:]
+    print(X.shape)
+    _, nb_elem = X.shape
+
+    y=np.reshape(y, (-1,1))
+    scaler = MinMaxScaler()
+
+    print(scaler.fit(X))
+    print(scaler.fit(y))
+    
+    xscale=scaler.transform(X)
+    yscale=scaler.transform(y)
+
+    X_train, X_test, y_train, y_test = train_test_split(xscale, yscale)
+
+    # define keras NN structure
+    model = Sequential()
+    model.add(Dense(200, input_dim=nb_elem, kernel_initializer='normal', activation='relu'))
+    model.add(Dropout(0.2))
+    model.add(Dense(100, activation='relu'))
+    model.add(Dropout(0.2))
+    model.add(Dense(50, activation='relu'))
+    model.add(Dropout(0.2))
+    model.add(Dense(10, activation='relu'))
+    model.add(Dropout(0.2))
+    model.add(Dense(1, activation='linear'))
+    model.summary()
+
+    # Set expected metrics
+    # TODO : add coefficients of determination as metric ? Or always use MSE/MAE
+    model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])
+    history = model.fit(X_train, y_train, epochs=150, batch_size=50,  verbose=1, validation_split=0.2)
+
+    # Save model 
+    print(history.history.keys())
+
+    y_predicted = model.predict(X_test)
+    len_shape, _ = y_predicted.shape
+    y_predicted = y_predicted.reshape(len_shape)
+
+    coeff = metrics.coefficient_of_determination(y_test, y_predicted)
+
+    # save the model into json/HDF5 file
+    if not os.path.exists(cfg.saved_models_folder):
+        os.makedirs(cfg.saved_models_folder)
+
+    model_output_path = os.path.join(cfg.saved_models_folder, _model_name + '.json')
+    json_model_content = model.to_json()
+
+    with open(model_output_path, 'w') as f:
+        print("Model saved into ", model_output_path)
+        json.dump(json_model_content, f, indent=4)
+
+    model.save_weights(model_output_path.replace('.json', '.h5'))
+
+    # save score into global_result.csv file
+    with open(cfg.global_result_filepath, "a") as f:
+       f.write(_model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';\n')
+
+
+    # Save plot info using model name
+    plt.figure(figsize=(30, 22))
+    plt.plot(history.history['loss'])
+    plt.plot(history.history['val_loss'])
+    plt.title('model loss', fontsize=20)
+    plt.ylabel('loss', fontsize=16)
+    plt.xlabel('epoch', fontsize=16)
+    plt.legend(['train', 'validation'], loc='upper left', fontsize=16)
+    plt.savefig(model_output_path.replace('.json', '.png'))
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Train model and saved it")
+
+    parser.add_argument('--data', type=str, help='Filename of dataset')
+    parser.add_argument('--model_name', type=str, help='Saved model name')
+
+    args = parser.parse_args()
+
+    param_data_file = args.data
+    param_model = args.model_name
+
+    train(param_data_file, param_model)
+
+if __name__== "__main__":
+    main()