Parcourir la source

Update of Keras scripts

Jérôme BUISINE il y a 5 ans
Parent
commit
cccee3b412
4 fichiers modifiés avec 31 ajouts et 18 suppressions
  1. 11 0
      generate_data.sh
  2. 2 2
      modules/config.py
  3. 5 4
      run_keras.sh
  4. 13 12
      train_model_keras.py

+ 11 - 0
generate_data.sh

@@ -0,0 +1,11 @@
+for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
+    for row in {1,2,3,4,5}; do
+        for column in {1,2,3,4,5}; do
+
+            # Run creation of dataset and train model
+            DATASET_NAME="data/dataset_${n}_column_${column}_row_${row}.csv"
+
+            python make_dataset.py --n ${n} --each_row ${row} --each_column ${column} &
+        done
+    done
+done

+ 2 - 2
modules/config.py

@@ -13,6 +13,6 @@ number_of_columns               = 512
 kind_of_models                  = ["SGD", "Ridge", "SVR"]
 
 global_result_filepath          = "models_info/models_comparisons.csv"
-scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
+scenes_list                     = ['Exterieur01', 'Boulanger', 'CornellBoxNonVide', 'CornellBoxNonVideTextureArcade', 'CornellBoxVide', 'Bar1', 'CornellBoxNonVideTextureDegrade', 'CornellBoxNonVideTextureDamier', 'CornellBoxVideTextureDamier', 'CornellBoxNonVide', 'Sponza1', 'Bureau1_cam2']
 
-test_scenes                     = ['CornellBoxNonVide']
+test_scenes                     = ['Sponza1']

+ 5 - 4
run_keras.sh

@@ -14,8 +14,8 @@ if [ "${erased}" == "Y" ]; then
 fi
 
 for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
-    for row in {2,3,4,5,6,7,8,9,10}; do
-        for column in {2,3,4,5,6,7,8,9,10}; do
+    for row in {1,2,3,4,5}; do
+        for column in {1,2,3,4,5}; do
 
             # Run creation of dataset and train model
             DATASET_NAME="data/dataset_${n}_column_${column}_row_${row}.csv"
@@ -24,10 +24,11 @@ for n in {3,4,5,6,7,8,9,10,15,20,25,30}; do
             if ! grep -q "${MODEL_NAME}" "${file_path}"; then
                 echo "Run computation for model ${MODEL_NAME}"
 
-                python make_dataset.py --n ${n} --each_row ${row} --each_column ${column}
-                python train_model_keras.py --data ${DATASET_NAME} --model ${model}
+                #python make_dataset.py --n ${n} --each_row ${row} --each_column ${column}
+                python train_model_keras.py --data ${DATASET_NAME} --model ${model} &
 
                 # TODO : Add of reconstruct process for image ?
+                # python reconstruct_keras.py --n ${n} --model_path data/${model}
             else
                 echo "${MODEL_NAME} results already computed.."
             fi

+ 13 - 12
train_model_keras.py

@@ -53,22 +53,13 @@ def train(_data_file, _model_name):
     model.summary()
 
     # Set expected metrics
-    # TODO : add coefficients of determination as metric
+    # TODO : add coefficients of determination as metric ? Or always use MSE/MAE
     model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])
-    history = model.fit(X_train, y_train, epochs=1, batch_size=50,  verbose=1, validation_split=0.2)
+    history = model.fit(X_train, y_train, epochs=150, batch_size=50,  verbose=1, validation_split=0.2)
 
     # Save model 
     print(history.history.keys())
 
-    # TODO : Save plot info and increase figure size
-    plt.plot(history.history['loss'])
-    plt.plot(history.history['val_loss'])
-    plt.title('model loss', fontsize=20)
-    plt.ylabel('loss', fontsize=16)
-    plt.xlabel('epoch', fontsize=16)
-    plt.legend(['train', 'validation'], loc='upper left', fontsize=16)
-    #plt.show()
-
     y_predicted = model.predict(X_test)
     len_shape, _ = y_predicted.shape
     y_predicted = y_predicted.reshape(len_shape)
@@ -88,12 +79,22 @@ def train(_data_file, _model_name):
 
     model.save_weights(model_output_path.replace('.json', '.h5'))
 
-    # TODO : Save test score into .csv files
     # save score into global_result.csv file
     with open(cfg.global_result_filepath, "a") as f:
        f.write(_model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';\n')
 
 
+    # Save plot info using model name
+    plt.figure(figsize=(30, 22))
+    plt.plot(history.history['loss'])
+    plt.plot(history.history['val_loss'])
+    plt.title('model loss', fontsize=20)
+    plt.ylabel('loss', fontsize=16)
+    plt.xlabel('epoch', fontsize=16)
+    plt.legend(['train', 'validation'], loc='upper left', fontsize=16)
+    plt.savefig(model_output_path.replace('.json', '.png'))
+
+
 def main():
 
     parser = argparse.ArgumentParser(description="Train model and saved it")