Parcourir la source

Use of modules dependency

Jérôme BUISINE il y a 4 ans
Parent
commit
95cba96b9b
16 fichiers modifiés avec 137 ajouts et 71 suppressions
  1. 3 0
      .gitmodules
  2. 2 0
      compare_images.py
  3. 41 0
      custom_config.py
  4. 7 2
      modules/features.py
  5. 9 6
      make_dataset.py
  6. 1 0
      modules
  7. 0 0
      modules/__init__.py
  8. 0 22
      modules/config.py
  9. 13 7
      write_result_keras.py
  10. 10 7
      reconstruct.py
  11. 10 5
      reconstruct_keras.py
  12. 10 6
      reconstruct_scene_mean.py
  13. 1 1
      run.sh
  14. 10 7
      run_keras.sh
  15. 8 4
      train_model.py
  16. 12 4
      train_model_keras.py

+ 3 - 0
.gitmodules

@@ -0,0 +1,3 @@
+[submodule "modules"]
+	path = modules
+	url = https://github.com/prise-3d/Thesis-CommonModules.git

+ 2 - 0
compare_images.py

@@ -1,5 +1,7 @@
+# main imports
 import os, sys, argparse
 import os, sys, argparse
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
 import ipfml.iqa.fr as fr
 import ipfml.iqa.fr as fr
 
 

+ 41 - 0
custom_config.py

@@ -0,0 +1,41 @@
+from modules.config.global_config import *
+import os
+
+# store all variables from global config
+context_vars = vars()
+
+# folders
+## zone_folder                     = 'zone'
+## output_data_folder              = 'data'
+## dataset_path                    = 'dataset'
+## threshold_map_folder            = 'threshold_map'
+## models_information_folder       = 'models_info'
+## results_information_folder      = 'results'
+## saved_models_folder             = 'saved_models'
+## min_max_custom_folder           = 'custom_norm'
+## learned_zones_folder            = 'learned_zones'
+reconstructed_folder            = 'reconstructed'
+
+# files or extensions
+## csv_model_comparisons_filename  = 'models_comparisons.csv'
+## seuil_expe_filename             = 'seuilExpe'
+## min_max_filename_extension      = '_min_max_values'
+output_file_prefix              = "dataset_"
+global_result_filepath          = os.path.join(results_information_folder, "models_comparisons.csv")
+global_result_filepath_keras    = os.path.join(results_information_folder, "models_comparisons_keras.csv")
+
+# variables 
+folder_and_files_filtered       = ["analyse", "make_dataset.py", ".vscode"]
+
+number_of_rows                  = 512
+number_of_columns               = 512
+keras_epochs                    = 5
+
+kind_of_models                  = ["SGD", "Ridge", "SVR"]
+
+
+features_list                   = ['samples', 'variances']
+
+scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVide', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
+
+test_scenes                     = ['Sponza1']

+ 7 - 2
modules/features.py

@@ -1,6 +1,11 @@
-from . import config as config
-
+# main imports
 import numpy as np
 import numpy as np
+import sys
+
+# config and modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 
 
 def compute_feature(feature_choice, samples):
 def compute_feature(feature_choice, samples):
 
 

+ 9 - 6
make_dataset.py

@@ -1,17 +1,20 @@
+# main imports
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
-
 import os, sys, argparse
 import os, sys, argparse
 
 
-import modules.config as cfg
-from modules.features import compute_feature
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from features import compute_feature
 
 
 def compute_files(_n, _feature_choice, _each_row, _each_column):
 def compute_files(_n, _feature_choice, _each_row, _each_column):
     """
     """
     Read all folders and files of scenes in order to compute output dataset
     Read all folders and files of scenes in order to compute output dataset
     """
     """
 
 
-    output_dataset_filename = cfg.output_file_prefix + _feature_choice +'_' + _n + '_column_' + _each_column + '_row_' + _each_row + '.csv'
+    output_dataset_filename = cfg.output_file_prefix + _n + '_' + _feature_choice + '_column_' + _each_column + '_row_' + _each_row + '.csv'
 
 
     output_dataset_filename = os.path.join(cfg.output_data_folder, output_dataset_filename)
     output_dataset_filename = os.path.join(cfg.output_data_folder, output_dataset_filename)
 
 
@@ -22,7 +25,7 @@ def compute_files(_n, _feature_choice, _each_row, _each_column):
 
 
     print('Preparing to store data into ', output_dataset_filename)
     print('Preparing to store data into ', output_dataset_filename)
 
 
-    scenes = os.listdir(cfg.folder_scenes_path)
+    scenes = os.listdir(cfg.dataset_path)
 
 
     # remove min max file from scenes folder
     # remove min max file from scenes folder
     scenes = [s for s in scenes if s not in cfg.folder_and_files_filtered]
     scenes = [s for s in scenes if s not in cfg.folder_and_files_filtered]
@@ -39,7 +42,7 @@ def compute_files(_n, _feature_choice, _each_row, _each_column):
 
 
     for scene in scenes:
     for scene in scenes:
 
 
-        scene_path = os.path.join(cfg.folder_scenes_path, scene)
+        scene_path = os.path.join(cfg.dataset_path, scene)
 
 
         for id_column in range(cfg.number_of_columns):
         for id_column in range(cfg.number_of_columns):
 
 

+ 1 - 0
modules

@@ -0,0 +1 @@
+Subproject commit d5de038bdccaa58ff2123d5227482dc6c0ea2500

+ 0 - 0
modules/__init__.py


+ 0 - 22
modules/config.py

@@ -1,22 +0,0 @@
-output_data_folder              = "data"
-folder_scenes_path              = "dataset"
-models_information_folder       = 'models_info'
-saved_models_folder             = 'saved_models'
-reconstructed_folder            = 'reconstructed'
-
-output_file_prefix              = "dataset_"
-folder_and_files_filtered       = ["analyse", "make_dataset.py", ".vscode"]
-
-number_of_rows                  = 512
-number_of_columns               = 512
-
-kind_of_models                  = ["SGD", "Ridge", "SVR"]
-
-global_result_filepath          = "results/models_comparisons.csv"
-global_result_filepath_keras    = "results/models_comparisons_keras.csv"
-
-features_list                   = ['samples', 'variances']
-
-scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVide', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
-
-test_scenes                     = ['Sponza1']

+ 13 - 7
write_result_keras.py

@@ -1,21 +1,24 @@
+# main imports
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 import json
 import json
 import os, sys, argparse, subprocess
 import os, sys, argparse, subprocess
 
 
+# model imports
 from keras.models import model_from_json
 from keras.models import model_from_json
-
 from sklearn.model_selection import train_test_split
 from sklearn.model_selection import train_test_split
 from sklearn.preprocessing import MinMaxScaler
 from sklearn.preprocessing import MinMaxScaler
-
-import modules.config as cfg
-
 from joblib import dump, load
 from joblib import dump, load
-from PIL import Image
 
 
+# image processing imports
+from PIL import Image
 import ipfml.iqa.fr as fr
 import ipfml.iqa.fr as fr
 from ipfml import metrics
 from ipfml import metrics
 
 
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
 
 
 n_samples_image_name_postfix = "_samples_mean.png"
 n_samples_image_name_postfix = "_samples_mean.png"
 reference_image_name_postfix = "_1000_samples_mean.png"
 reference_image_name_postfix = "_1000_samples_mean.png"
@@ -46,12 +49,12 @@ def write_result(_scene_name, _data_file, _model_path, _n, _reconstructed_path,
     if not os.path.exists(n_samples_image_path):
     if not os.path.exists(n_samples_image_path):
         # call sub process to create 'n' samples img
         # call sub process to create 'n' samples img
         print("Creation of 'n' samples image : ", n_samples_image_path)
         print("Creation of 'n' samples image : ", n_samples_image_path)
-        subprocess.run(["python", "reconstruct_scene_mean.py", "--scene", _scene_name, "--n", _n, "--image_name", n_samples_image_path.split('/')[-1]])
+        subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", _n, "--image_name", n_samples_image_path.split('/')[-1]])
 
 
     if not os.path.exists(reference_image_path):
     if not os.path.exists(reference_image_path):
         # call sub process to create 'reference' img
         # call sub process to create 'reference' img
         print("Creation of reference image : ", reference_image_path)
         print("Creation of reference image : ", reference_image_path)
-        subprocess.run(["python", "reconstruct_scene_mean.py", "--scene", _scene_name, "--n", str(1000), "--image_name", reference_image_path.split('/')[-1]])
+        subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", str(1000), "--image_name", reference_image_path.split('/')[-1]])
 
 
 
 
     # load the trained model
     # load the trained model
@@ -88,6 +91,9 @@ def write_result(_scene_name, _data_file, _model_path, _n, _reconstructed_path,
 
 
     model_name = _model_path.split('/')[-1].replace('.json', '')
     model_name = _model_path.split('/')[-1].replace('.json', '')
 
 
+    if not os.path.exists(cfg.results_information_folder):
+        os.makedirs(cfg.results_information_folder)
+    
     # save score into models_comparisons_keras.csv file
     # save score into models_comparisons_keras.csv file
     with open(cfg.global_result_filepath_keras, "a") as f:
     with open(cfg.global_result_filepath_keras, "a") as f:
        f.write(model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';' + str(mse_reconstructed_n_samples) + ';' + str(mse_ref_reconstructed_samples) + '\n')
        f.write(model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';' + str(mse_reconstructed_n_samples) + ';' + str(mse_ref_reconstructed_samples) + '\n')

+ 10 - 7
reconstruct.py

@@ -1,20 +1,23 @@
+# main imports
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
-
 import os, sys, argparse
 import os, sys, argparse
 
 
+# models imports
 from sklearn import linear_model
 from sklearn import linear_model
 from sklearn import svm
 from sklearn import svm
 from sklearn.utils import shuffle
 from sklearn.utils import shuffle
+from joblib import dump, load
 
 
-import modules.config as cfg
-from modules.features import compute_feature
-
+# image processing imports
 from ipfml import metrics
 from ipfml import metrics
+from PIL import Image
 
 
-from joblib import dump, load
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
 
 
-from PIL import Image
+import custom_config as cfg
+from features import compute_feature
 
 
 def reconstruct(_scene_name, _model_path, _n, _feature_choice):
 def reconstruct(_scene_name, _model_path, _n, _feature_choice):
     
     
@@ -25,7 +28,7 @@ def reconstruct(_scene_name, _model_path, _n, _feature_choice):
     clf = load(_model_path)
     clf = load(_model_path)
 
 
     # load scene and its `n` first pixel value data
     # load scene and its `n` first pixel value data
-    scene_path = os.path.join(cfg.folder_scenes_path, _scene_name)
+    scene_path = os.path.join(cfg.dataset_path, _scene_name)
 
 
     for id_column in range(cfg.number_of_columns):
     for id_column in range(cfg.number_of_columns):
 
 

+ 10 - 5
reconstruct_keras.py

@@ -1,17 +1,22 @@
+# main imports
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 import json
 import json
 import os, sys, argparse
 import os, sys, argparse
 
 
+# models imports
 from keras.models import model_from_json
 from keras.models import model_from_json
-
-import modules.config as cfg
-from modules.features import compute_feature
-
 from joblib import dump, load
 from joblib import dump, load
 
 
+# image processing imports
 from PIL import Image
 from PIL import Image
 
 
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from features import compute_feature
+
 def reconstruct(_scene_name, _model_path, _n, _feature_choice):
 def reconstruct(_scene_name, _model_path, _n, _feature_choice):
     
     
     # construct the empty output image
     # construct the empty output image
@@ -28,7 +33,7 @@ def reconstruct(_scene_name, _model_path, _n, _feature_choice):
                     metrics=['accuracy'])
                     metrics=['accuracy'])
 
 
     # load scene and its `n` first pixel value data
     # load scene and its `n` first pixel value data
-    scene_path = os.path.join(cfg.folder_scenes_path, _scene_name)
+    scene_path = os.path.join(cfg.dataset_path, _scene_name)
 
 
     for id_column in range(cfg.number_of_columns):
     for id_column in range(cfg.number_of_columns):
 
 

+ 10 - 6
reconstruct_scene_mean.py

@@ -1,26 +1,30 @@
+# main imports
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
-
 import os, sys, argparse
 import os, sys, argparse
 
 
+# models imports
 from sklearn import linear_model
 from sklearn import linear_model
 from sklearn import svm
 from sklearn import svm
 from sklearn.utils import shuffle
 from sklearn.utils import shuffle
-
-import modules.config as cfg
-import modules.metrics as metrics
-
 from joblib import dump, load
 from joblib import dump, load
 
 
+# image processing imports 
 from PIL import Image
 from PIL import Image
 
 
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+
+
 def reconstruct(_scene_name, _n):
 def reconstruct(_scene_name, _n):
     
     
     # construct the empty output image
     # construct the empty output image
     output_image = np.empty([cfg.number_of_rows, cfg.number_of_columns])
     output_image = np.empty([cfg.number_of_rows, cfg.number_of_columns])
 
 
     # load scene and its `n` first pixel value data
     # load scene and its `n` first pixel value data
-    scene_path = os.path.join(cfg.folder_scenes_path, _scene_name)
+    scene_path = os.path.join(cfg.dataset_path, _scene_name)
 
 
     for id_column in range(cfg.number_of_columns):
     for id_column in range(cfg.number_of_columns):
 
 

+ 1 - 1
run.sh

@@ -24,7 +24,7 @@ for feature in {'variances','samples'}; do
                 if ! grep -q "${MODEL_NAME}" "${file_path}"; then
                 if ! grep -q "${MODEL_NAME}" "${file_path}"; then
                     echo "Run computation data for model ${MODEL_NAME}"
                     echo "Run computation data for model ${MODEL_NAME}"
 
 
-                    python make_dataset.py --n ${n} --feature ${feature} --each_row ${row} --each_column ${column}
+                    python generate/make_dataset.py --n ${n} --feature ${feature} --each_row ${row} --each_column ${column}
                 fi
                 fi
 
 
                 for model in {"SGD","Ridge"}; do
                 for model in {"SGD","Ridge"}; do

+ 10 - 7
run_keras.sh

@@ -15,24 +15,27 @@ fi
 
 
 for feature in {'variances','samples'}; do
 for feature in {'variances','samples'}; do
     for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
     for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
-    for row in {1,2,3,4,5}; do
-        for column in {1,2,3,4,5}; do
+    #for row in {1,2,3,4,5}; do
+    #    for column in {1,2,3,4,5}; do
+    
+    for row in {4,5}; do
+        for column in {4,5}; do
 
 
                 # Run creation of dataset and train model
                 # Run creation of dataset and train model
                 DATASET_NAME="data/dataset_${n}_${feature}_column_${column}_row_${row}.csv"
                 DATASET_NAME="data/dataset_${n}_${feature}_column_${column}_row_${row}.csv"
-                MODEL_NAME="${n}_${feature}_column_${column}_row_${row}_${model}"
-                IMAGE_RECONSTRUCTED="Sponza1_${feature}_${n}_${row}_${column}.png"
+                MODEL_NAME="${n}_${feature}_column_${column}_row_${row}"
+                IMAGE_RECONSTRUCTED="Sponza1_${n}_${feature}_${row}_${column}.png"
 
 
                 if ! grep -q "${MODEL_NAME}" "${file_path}"; then
                 if ! grep -q "${MODEL_NAME}" "${file_path}"; then
                     echo "Run computation for model ${MODEL_NAME}"
                     echo "Run computation for model ${MODEL_NAME}"
 
 
                     # Already computed..
                     # Already computed..
-                    python make_dataset.py --n ${n} --feature ${feature} --each_row ${row} --each_column ${column}
+                    python generate/make_dataset.py --n ${n} --feature ${feature} --each_row ${row} --each_column ${column}
                     python train_model_keras.py --data ${DATASET_NAME} --model_name ${MODEL_NAME}
                     python train_model_keras.py --data ${DATASET_NAME} --model_name ${MODEL_NAME}
 
 
                     # TODO : Add of reconstruct process for image ?
                     # TODO : Add of reconstruct process for image ?
-                    python reconstruct_keras.py --n ${n} --feature ${feature} --model_path saved_models/${MODEL_NAME}.json --scene Sponza1 --image_name ${IMAGE_RECONSTRUCTED}
-                    python write_result_keras.py --n ${n} --feature ${feature} --model_path saved_models/${MODEL_NAME}.json --scene Sponza1 --image_path reconstructed/${IMAGE_RECONSTRUCTED} --data ${DATASET_NAME} --iqa mse &
+                    python reconstruct/reconstruct_keras.py --n ${n} --feature ${feature} --model_path saved_models/${MODEL_NAME}.json --scene Sponza1 --image_name ${IMAGE_RECONSTRUCTED}
+                    python others/write_result_keras.py --n ${n} --model_path saved_models/${MODEL_NAME}.json --scene Sponza1 --image_path reconstructed/${IMAGE_RECONSTRUCTED} --data ${DATASET_NAME} --iqa mse &
                 else
                 else
                     echo "${MODEL_NAME} results already computed.."
                     echo "${MODEL_NAME} results already computed.."
                 fi
                 fi

+ 8 - 4
train_model.py

@@ -1,16 +1,20 @@
+# main imports
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
-
 import os, sys, argparse
 import os, sys, argparse
 
 
+# model imports
 from sklearn import linear_model
 from sklearn import linear_model
 from sklearn import svm
 from sklearn import svm
 from sklearn.utils import shuffle
 from sklearn.utils import shuffle
+from joblib import dump, load
 
 
-import modules.config as cfg
-import modules.metrics as metrics
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from ipfml import metrics
 
 
-from joblib import dump, load
 
 
 def get_model_choice(_model_name):
 def get_model_choice(_model_name):
     """
     """

+ 12 - 4
train_model_keras.py

@@ -1,10 +1,12 @@
+# main imports
 import os, sys, argparse
 import os, sys, argparse
 import numpy as np
 import numpy as np
 import json
 import json
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
+
+# model imports
 from joblib import dump
 from joblib import dump
 import tensorflow as tf
 import tensorflow as tf
-
 from sklearn.model_selection import train_test_split
 from sklearn.model_selection import train_test_split
 from sklearn.model_selection import cross_val_score
 from sklearn.model_selection import cross_val_score
 from sklearn.model_selection import KFold
 from sklearn.model_selection import KFold
@@ -15,8 +17,11 @@ from keras.layers import Dense, Dropout
 from keras.wrappers.scikit_learn import KerasRegressor
 from keras.wrappers.scikit_learn import KerasRegressor
 from keras import backend as K
 from keras import backend as K
 
 
-import modules.config as cfg
-import modules.metrics as metrics
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+
 
 
 def train(_data_file, _model_name):
 def train(_data_file, _model_name):
 
 
@@ -54,7 +59,7 @@ def train(_data_file, _model_name):
     # Set expected metrics
     # Set expected metrics
     # TODO : add coefficients of determination as metric ? Or always use MSE/MAE
     # TODO : add coefficients of determination as metric ? Or always use MSE/MAE
     model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])
     model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])
-    history = model.fit(X_train, y_train, epochs=50, batch_size=50,  verbose=1, validation_split=0.2)
+    history = model.fit(X_train, y_train, epochs=cfg.keras_epochs, batch_size=50,  verbose=1, validation_split=0.2)
 
 
     # save the model into json/HDF5 file
     # save the model into json/HDF5 file
     if not os.path.exists(cfg.saved_models_folder):
     if not os.path.exists(cfg.saved_models_folder):
@@ -70,6 +75,9 @@ def train(_data_file, _model_name):
     model.save_weights(model_output_path.replace('.json', '.h5'))
     model.save_weights(model_output_path.replace('.json', '.h5'))
 
 
     # save score into global_result.csv file
     # save score into global_result.csv file
+    # if not os.path.exists(cfg.results_information_folder):
+    #    os.makedirs(cfg.results_information_folder)
+    #
     # with open(cfg.global_result_filepath, "a") as f:
     # with open(cfg.global_result_filepath, "a") as f:
     #   f.write(_model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';\n')
     #   f.write(_model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';\n')