Parcourir la source

Merge branch 'release/v0.2.9'

Jérôme BUISINE il y a 4 ans
Parent
commit
cd116eb4b3
2 fichiers modifiés avec 62 ajouts et 58 suppressions
  1. 6 11
      generate/generate_dataset.py
  2. 56 47
      train_model.py

+ 6 - 11
generate/generate_dataset.py

@@ -29,7 +29,7 @@ min_max_filename        = cfg.min_max_filename_extension
 
 # define all scenes values
 scenes_list             = cfg.scenes_names
-scenes_indexes          = cfg.scenes_indices
+scenes_indices          = cfg.scenes_indices
 dataset_path            = cfg.dataset_path
 zones                   = cfg.zones_indices
 seuil_expe_filename     = cfg.seuil_expe_filename
@@ -39,10 +39,10 @@ output_data_folder      = cfg.output_data_folder
 
 generic_output_file_svd = '_random.csv'
 
-def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_zones = 4, _random=0):
+def generate_data_model(_filename, _transformations, _scenes_list, _nb_zones = 4, _random=0):
 
     output_train_filename = _filename + ".train"
-    output_test_filename = _filename + ".test"
+    output_test_filename = _filename + ".val"
 
     if not '/' in output_train_filename:
         raise Exception("Please select filename with directory path to save data. Example : data/dataset")
@@ -187,7 +187,7 @@ def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_
                     
                     line = line + '\n'
 
-                    if id_zone < _nb_zones and folder_scene in _scenes:
+                    if id_zone < _nb_zones:
                         train_file_data.append(line)
                     else:
                         test_file_data.append(line)
@@ -225,7 +225,6 @@ def main():
                                   default="100, 100")
     parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
     parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set', choices=list(range(1, 17)))
-    parser.add_argument('--renderer', type=str, help='Renderer choice in order to limit scenes used', choices=cfg.renderer_choices, default='all')
     parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
 
     args = parser.parse_args()
@@ -236,7 +235,6 @@ def main():
     p_scenes   = args.scenes.split(',')
     p_size     = args.size # not necessary to split here
     p_nb_zones = args.nb_zones
-    p_renderer = args.renderer
     p_random   = args.random
 
     # create list of Transformation
@@ -252,10 +250,7 @@ def main():
     if transformations[0].getName() == 'static':
         raise ValueError("The first transformation in list cannot be static")
 
-    # list all possibles choices of renderer
-    scenes_list = dt.get_renderer_scenes_names(p_renderer)
-    scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
-
+    # Update: not use of renderer scenes list
     # getting scenes from indexes user selection
     scenes_selected = []
 
@@ -264,7 +259,7 @@ def main():
         scenes_selected.append(scenes_list[index])
 
     # create database using img folder (generate first time only)
-    generate_data_model(scenes_list, p_filename, transformations, scenes_selected, p_nb_zones, p_random)
+    generate_data_model(p_filename, transformations, scenes_selected, p_nb_zones, p_random)
 
 if __name__== "__main__":
     main()

+ 56 - 47
train_model.py

@@ -24,19 +24,14 @@ import custom_config as cfg
 
 def main():
 
-    # default keras configuration
-    #config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 8}) 
-    #sess = tf.Session(config=config) 
-    #keras.backend.set_session(sess)
-
     parser = argparse.ArgumentParser(description="Train Keras model and save it into .json file")
 
-    parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)', required=True)
+    parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .val)', required=True)
     parser.add_argument('--output', type=str, help='output file name desired for model (without .json extension)', required=True)
     parser.add_argument('--tl', type=int, help='use or not of transfer learning (`VGG network`)', default=0, choices=[0, 1])
     parser.add_argument('--batch_size', type=int, help='batch size used as model input', default=cfg.keras_batch)
     parser.add_argument('--epochs', type=int, help='number of epochs used for training model', default=cfg.keras_epochs)
-    parser.add_argument('--val_size', type=float, help='percent of validation data during training process', default=cfg.val_dataset_size)
+    #parser.add_argument('--val_size', type=float, help='percent of validation data during training process', default=cfg.val_dataset_size)
 
 
     args = parser.parse_args()
@@ -46,7 +41,7 @@ def main():
     p_tl          = args.tl
     p_batch_size  = args.batch_size
     p_epochs      = args.epochs
-    p_val_size    = args.val_size
+    #p_val_size    = args.val_size
     initial_epoch = 0
         
     ########################
@@ -54,14 +49,14 @@ def main():
     ########################
     print("Preparing data...")
     dataset_train = pd.read_csv(p_data_file + '.train', header=None, sep=";")
-    dataset_test = pd.read_csv(p_data_file + '.test', header=None, sep=";")
+    dataset_val = pd.read_csv(p_data_file + '.val', header=None, sep=";")
 
     print("Train set size : ", len(dataset_train))
-    print("Test set size : ", len(dataset_test))
+    print("val set size : ", len(dataset_val))
 
     # default first shuffle of data
     dataset_train = shuffle(dataset_train)
-    dataset_test = shuffle(dataset_test)
+    dataset_val = shuffle(dataset_val)
 
     print("Reading all images data...")
 
@@ -87,40 +82,46 @@ def main():
     # `:` is the separator used for getting each img path
     if n_channels > 1:
         dataset_train[1] = dataset_train[1].apply(lambda x: [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')])
-        dataset_test[1] = dataset_test[1].apply(lambda x: [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')])
+        dataset_val[1] = dataset_val[1].apply(lambda x: [cv2.imread(path, cv2.IMREAD_GRAYSCALE) for path in x.split('::')])
     else:
         dataset_train[1] = dataset_train[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE))
-        dataset_test[1] = dataset_test[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE))
+        dataset_val[1] = dataset_val[1].apply(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE))
 
     # reshape array data
     dataset_train[1] = dataset_train[1].apply(lambda x: np.array(x).reshape(input_shape))
-    dataset_test[1] = dataset_test[1].apply(lambda x: np.array(x).reshape(input_shape))
+    dataset_val[1] = dataset_val[1].apply(lambda x: np.array(x).reshape(input_shape))
 
     # get dataset with equal number of classes occurences
     noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 1]
     not_noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 0]
     nb_noisy_train = len(noisy_df_train.index)
 
-    noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 1]
-    not_noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 0]
-    nb_noisy_test = len(noisy_df_test.index)
+    noisy_df_val = dataset_val[dataset_val.ix[:, 0] == 1]
+    not_noisy_df_val = dataset_val[dataset_val.ix[:, 0] == 0]
+    nb_noisy_val = len(noisy_df_val.index)
 
     final_df_train = pd.concat([not_noisy_df_train[0:nb_noisy_train], noisy_df_train])
-    final_df_test = pd.concat([not_noisy_df_test[0:nb_noisy_test], noisy_df_test])
+    final_df_val = pd.concat([not_noisy_df_val[0:nb_noisy_val], noisy_df_val])
 
     # shuffle data another time
     final_df_train = shuffle(final_df_train)
-    final_df_test = shuffle(final_df_test)
+    final_df_val = shuffle(final_df_val)
 
     final_df_train_size = len(final_df_train.index)
-    final_df_test_size = len(final_df_test.index)
+    final_df_val_size = len(final_df_val.index)
+
+    validation_split = final_df_val_size / (final_df_train_size + final_df_val_size)
+    print("----------------------------------------------------------")
+    print("Validation size is based of `.val` content")
+    print("Validation split is now set at", validation_split)
+    print("----------------------------------------------------------")
 
     # use of the whole data set for training
     x_dataset_train = final_df_train.ix[:,1:]
-    x_dataset_test = final_df_test.ix[:,1:]
+    x_dataset_val = final_df_val.ix[:,1:]
 
     y_dataset_train = final_df_train.ix[:,0]
-    y_dataset_test = final_df_test.ix[:,0]
+    y_dataset_val = final_df_val.ix[:,0]
 
     x_data_train = []
     for item in x_dataset_train.values:
@@ -129,18 +130,17 @@ def main():
 
     x_data_train = np.array(x_data_train)
 
-    x_data_test = []
-    for item in x_dataset_test.values:
+    x_data_val = []
+    for item in x_dataset_val.values:
         #print("Item is here", item)
-        x_data_test.append(item[0])
-
-    x_data_test = np.array(x_data_test)
+        x_data_val.append(item[0])
 
+    x_data_val = np.array(x_data_val)
 
     print("End of loading data..")
 
     print("Train set size (after balancing) : ", final_df_train_size)
-    print("Test set size (after balancing) : ", final_df_test_size)
+    print("val set size (after balancing) : ", final_df_val_size)
 
     #######################
     # 2. Getting model
@@ -167,14 +167,21 @@ def main():
         last_backup = backups[-1]
         last_epoch = int(last_backup.split('__')[1].replace('.hdf5', ''))
         initial_epoch = last_epoch
-        print("Previous backup model found.. ")
-        print("Restart from epoch ", last_epoch)
+        print("-------------------------------------------------")
+        print("Previous backup model found with already", last_epoch, "done...")
+        print("Resuming from epoch", str(last_epoch + 1))
+        print("-------------------------------------------------")
 
-    model.fit(x_data_train, y_dataset_train.values, validation_split=p_val_size, initial_epoch=initial_epoch, epochs=p_epochs, batch_size=p_batch_size, callbacks=callbacks_list)
+    # concatenate train and validation data (`validation_split` param will do the separation into keras model)
+    y_data = np.concatenate([y_dataset_train.values, y_dataset_val.values])
+    x_data = np.concatenate([x_data_train, x_data_val])
 
-    score = model.evaluate(x_data_test, y_dataset_test, batch_size=p_batch_size)
+    # validation split parameter will use the last `%` data, so here, data will really validate our model
+    model.fit(x_data, y_data, validation_split=validation_split, initial_epoch=initial_epoch, epochs=p_epochs, batch_size=p_batch_size, callbacks=callbacks_list)
 
-    print("Accuracy score on test dataset ", score)
+    score = model.evaluate(x_data_val, y_dataset_val, batch_size=p_batch_size)
+
+    print("Accuracy score on val dataset ", score)
 
     if not os.path.exists(cfg.saved_models_folder):
         os.makedirs(cfg.saved_models_folder)
@@ -191,25 +198,25 @@ def main():
 
     # Get results obtained from model
     y_train_prediction = model.predict(x_data_train)
-    y_test_prediction = model.predict(x_data_test)
+    y_val_prediction = model.predict(x_data_val)
 
     y_train_prediction = [1 if x > 0.5 else 0 for x in y_train_prediction]
-    y_test_prediction = [1 if x > 0.5 else 0 for x in y_test_prediction]
+    y_val_prediction = [1 if x > 0.5 else 0 for x in y_val_prediction]
 
     acc_train_score = accuracy_score(y_dataset_train, y_train_prediction)
-    acc_test_score = accuracy_score(y_dataset_test, y_test_prediction)
+    acc_val_score = accuracy_score(y_dataset_val, y_val_prediction)
 
     f1_train_score = f1_score(y_dataset_train, y_train_prediction)
-    f1_test_score = f1_score(y_dataset_test, y_test_prediction)
+    f1_val_score = f1_score(y_dataset_val, y_val_prediction)
 
     recall_train_score = recall_score(y_dataset_train, y_train_prediction)
-    recall_test_score = recall_score(y_dataset_test, y_test_prediction)
+    recall_val_score = recall_score(y_dataset_val, y_val_prediction)
 
     pres_train_score = precision_score(y_dataset_train, y_train_prediction)
-    pres_test_score = precision_score(y_dataset_test, y_test_prediction)
+    pres_val_score = precision_score(y_dataset_val, y_val_prediction)
 
     roc_train_score = roc_auc_score(y_dataset_train, y_train_prediction)
-    roc_test_score = roc_auc_score(y_dataset_test, y_test_prediction)
+    roc_val_score = roc_auc_score(y_dataset_val, y_val_prediction)
 
     # save model performance
     if not os.path.exists(cfg.results_information_folder):
@@ -224,14 +231,16 @@ def main():
             
     # add information into file
     with open(perf_file_path, 'a') as f:
-        line = p_output + ';' + str(len(dataset_train)) + ';' + str(len(dataset_test)) + ';' \
-                        + str(final_df_train_size) + ';' + str(final_df_test_size) + ';' \
-                        + str(acc_train_score) + ';' + str(acc_test_score) + ';' \
-                        + str(f1_train_score) + ';' + str(f1_test_score) + ';' \
-                        + str(recall_train_score) + ';' + str(recall_test_score) + ';' \
-                        + str(pres_train_score) + ';' + str(pres_test_score) + ';' \
-                        + str(roc_train_score) + ';' + str(roc_test_score) + '\n'
+        line = p_output + ';' + str(len(dataset_train)) + ';' + str(len(dataset_val)) + ';' \
+                        + str(final_df_train_size) + ';' + str(final_df_val_size) + ';' \
+                        + str(acc_train_score) + ';' + str(acc_val_score) + ';' \
+                        + str(f1_train_score) + ';' + str(f1_val_score) + ';' \
+                        + str(recall_train_score) + ';' + str(recall_val_score) + ';' \
+                        + str(pres_train_score) + ';' + str(pres_val_score) + ';' \
+                        + str(roc_train_score) + ';' + str(roc_val_score) + '\n'
         f.write(line)
 
+    print("You can now run your model with your own `test` dataset")
+
 if __name__== "__main__":
     main()