Parcourir la source

Merge branch 'release/v0.0.3'

Jérôme BUISINE il y a 5 ans
Parent
commit
2785939174
8 fichiers modifiés avec 133 ajouts et 30 suppressions
  1. 3 0
      .gitignore
  2. 33 0
      README.md
  3. 10 8
      make_dataset.py
  4. 0 11
      models_info/models_comparisons.csv
  5. 3 1
      modules/config.py
  6. 11 7
      reconstruct.py
  7. 70 0
      reconstruct_scene_mean.py
  8. 3 3
      run.sh

+ 3 - 0
.gitignore

@@ -3,5 +3,8 @@
 
 data
 saved_models
+reconstructed
+
+models_info/models_comparisons.csv
 
 __pycache__

+ 33 - 0
README.md

@@ -0,0 +1,33 @@
+# Sample Analysis
+
+## Description
+
+The aim of this project is to predict the mean pixel value from monte carlo process rendering in synthesis images using only few samples information in input for model.
+
+
+### Data
+
+Data are all scenes samples information obtained during the rendering process.
+
+For each pixel we have a list of all grey value estimated (samples).
+
+### Models
+List of models tested :
+- Ridge Regression
+- SGD
+- SVR (with rbf kernel)
+
+
+## How to use
+
+First you need to contact **jerome.buisine@univ-littoral.fr** in order to get datatset version. The dataset is not available with this source code.
+
+
+```bash
+python make_dataset.py --n 10 --each_row 8 --each_column 8
+```
+
+```bash
+python reconstruct.py --scene Scene1 --model_path saved_models/Model1.joblib --n 10 --image_name output.png
+```
+

+ 10 - 8
make_dataset.py

@@ -27,6 +27,9 @@ def compute_files(_n, _each_row, _each_column):
     scenes = [s for s in scenes if s not in cfg.folder_and_files_filtered]
     scenes = [s for s in scenes if '.csv' not in s] # do not keep generated .csv file
 
+    # skip test scene from dataset
+    scenes = [ s for s in scenes if s not in cfg.test_scenes]
+
     # print(scenes)
 
     counter = 0
@@ -36,20 +39,19 @@ def compute_files(_n, _each_row, _each_column):
     for scene in scenes:
 
         scene_path = os.path.join(cfg.folder_scenes_path, scene)
-        columns_folder = os.listdir(scene_path)
 
-        for id_column, column in enumerate(columns_folder):
+        for id_column in range(cfg.number_of_columns):
 
             if id_column % int(_each_column) == 0 :
 
-                folder_path = os.path.join(scene_path, column)
-
-                pixel_files_list = os.listdir(folder_path)
-
-                for id_row, pixel_file in enumerate(pixel_files_list):
+                folder_path = os.path.join(scene_path, str(id_column))
+                
+                for id_row in range(cfg.number_of_rows):
 
                     if id_row % int(_each_row) == 0:
-                        pixel_file_path = os.path.join(folder_path, pixel_file)
+
+                        pixel_filename = scene + '_' + str(id_column) + '_' + str(id_row) + ".dat"
+                        pixel_file_path = os.path.join(folder_path, pixel_filename)
 
                         saved_row = ''
 

+ 0 - 11
models_info/models_comparisons.csv

@@ -1,11 +0,0 @@
-model_name; number_samples; coeff_of_determination;
-10_column_7_row_7_SGD;60236;0.967103608014;
-10_column_8_row_7_SGD;52096;0.9668493240289788;
-10_column_9_row_7_SGD;46398;0.9657893418797697;
-10_column_10_row_7_SGD;42328;0.9662073687839958;
-10_column_7_row_8_SGD;52096;0.9678400765943843;
-10_column_8_row_8_SGD;45056;0.9717562796162227;
-10_column_9_row_8_SGD;40128;0.9690779553770827;
-10_column_10_row_8_SGD;36608;0.9696013575399688;
-10_column_7_row_9_SGD;46398;0.9698917329738924;
-10_column_8_row_9_SGD;40128;0.9680105640513834;

+ 3 - 1
modules/config.py

@@ -13,4 +13,6 @@ number_of_columns               = 512
 kind_of_models                  = ["SGD", "Ridge", "SVR"]
 
 global_result_filepath          = "models_info/models_comparisons.csv"
-scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
+scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
+
+test_scenes                     = []

+ 11 - 7
reconstruct.py

@@ -25,17 +25,16 @@ def reconstruct(_scene_name, _model_path, _n):
     # load scene and its `n` first pixel value data
     scene_path = os.path.join(cfg.folder_scenes_path, _scene_name)
 
-    columns_folder = os.listdir(scene_path)
-    for id_column, column in enumerate(columns_folder):
+    for id_column in range(cfg.number_of_columns):
 
-        folder_path = os.path.join(scene_path, column)
-        pixel_files_list = os.listdir(folder_path)
+        folder_path = os.path.join(scene_path, str(id_column))
 
         pixels = []
 
-        for id_row, pixel_file in enumerate(pixel_files_list):
+        for id_row in range(cfg.number_of_rows):
             
-            pixel_file_path = os.path.join(folder_path, pixel_file)
+            pixel_filename = _scene_name + '_' + str(id_column) + '_' + str(id_row) + ".dat"
+            pixel_file_path = os.path.join(folder_path, pixel_filename)
             
             with open(pixel_file_path, 'r') as f:
 
@@ -46,7 +45,12 @@ def reconstruct(_scene_name, _model_path, _n):
 
         # predict column pixels and fill image column by column
         pixels_predicted = clf.predict(pixels)
-        output_image[id_column] = pixels_predicted*255.
+
+        # change normalized predicted value to pixel value
+        pixels_predicted = pixels_predicted*255.
+
+        for id_pixel, pixel in enumerate(pixels_predicted):
+            output_image[id_pixel, id_column] = pixel
 
         print("{0:.2f}%".format(id_column / cfg.number_of_columns * 100))
         sys.stdout.write("\033[F")

+ 70 - 0
reconstruct_scene_mean.py

@@ -0,0 +1,70 @@
+import numpy as np
+import pandas as pd
+
+import os, sys, argparse
+
+from sklearn import linear_model
+from sklearn import svm
+from sklearn.utils import shuffle
+
+import modules.config as cfg
+import modules.metrics as metrics
+
+from joblib import dump, load
+
+from PIL import Image
+
+def reconstruct(_scene_name):
+    
+    # construct the empty output image
+    output_image = np.empty([cfg.number_of_rows, cfg.number_of_columns])
+
+    # load scene and its `n` first pixel value data
+    scene_path = os.path.join(cfg.folder_scenes_path, _scene_name)
+
+    for id_column in range(cfg.number_of_columns):
+
+        folder_path = os.path.join(scene_path, str(id_column))
+
+        for id_row in range(cfg.number_of_rows):
+            
+            pixel_filename = _scene_name + '_' + str(id_column) + '_' + str(id_row) + ".dat"
+            pixel_file_path = os.path.join(folder_path, pixel_filename)
+            
+            with open(pixel_file_path, 'r') as f:
+
+                # predict the expected pixel value
+                lines = [float(l) for l in f.readlines()]
+                mean = sum(lines) / float(len(lines))
+
+            output_image[id_row, id_column] = mean
+
+        print("{0:.2f}%".format(id_column / cfg.number_of_columns * 100))
+        sys.stdout.write("\033[F")
+
+    return output_image
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Train model and saved it")
+
+    parser.add_argument('--scene', type=str, help='Scene name to reconstruct', choices=cfg.scenes_list)
+    parser.add_argument('--image_name', type=str, help="The ouput image name")
+
+    args = parser.parse_args()
+
+    param_scene_name = args.scene
+    param_image_name = args.image_name
+
+    output_image = reconstruct(param_scene_name)
+
+    if not os.path.exists(cfg.reconstructed_folder):
+        os.makedirs(cfg.reconstructed_folder)
+
+    image_path = os.path.join(cfg.reconstructed_folder, param_image_name)
+
+    img = Image.fromarray(np.uint8(output_image))
+    img.save(image_path)
+
+if __name__== "__main__":
+    main()

+ 3 - 3
run.sh

@@ -13,10 +13,10 @@ if [ "${erased}" == "Y" ]; then
     echo 'model_name; number_of_approximations; coeff_of_determination;' >> ${file_path}
 fi
 
-for n in {10,15,20,25,30}; do
+for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
     for model in {"SGD","Ridge","SVR"}; do
-        for row in {7,8,9,10}; do
-            for column in {7,8,9,10}; do
+        for row in {2,3,4,5,6,7,8,9,10}; do
+            for column in {2,3,4,5,6,7,8,9,10}; do
 
                 # Run creation of dataset and train model
                 DATASET_NAME="data/dataset_${n}_column_${column}_row_${row}.csv"