Browse Source

Add of attributes choice optimization scripts

Jérôme BUISINE 10 months ago
parent
commit
c70a55e2cc

+ 6 - 3
custom_config.py

@@ -4,14 +4,17 @@ from modules.config.attributes_config import *
 context_vars = vars()
 
 # folders
-logs_folder                        = 'logs'
+logs_folder                             = 'logs'
 
 ## min_max_custom_folder           = 'custom_norm'
 ## correlation_indices_folder      = 'corr_indices'
 
 # variables
-features_choices_labels         = ['filters_statistics']
-optimization_result_filename    = 'optimization_comparisons.csv'
+features_choices_labels                 = ['filters_statistics']
+optimization_filters_result_filename    = 'optimization_comparisons_filters.csv'
+optimization_attributes_result_filename = 'optimization_comparisons_attributes.csv'
+
+filter_reduction_choices                = ['attributes', 'filters']
 
 ## models_names_list               = ["svm_model","ensemble_model","ensemble_model_v2","deep_keras"]
 ## normalization_choices           = ['svd', 'svdn', 'svdne']

+ 14 - 4
data_processing/generateAndTrain_maxwell_custom_optimization.sh

@@ -21,9 +21,19 @@ if [ -z "$3" ]
     exit 1
 fi
 
+if [ -z "$4" ]
+  then
+    echo "No argument supplied"
+    echo "Use of filters or attributes"
+    exit 1
+fi
+
+
 size=$1
 feature=$2
 data=$3
+filter=$4
+
 
 # selection of four scenes (only maxwell)
 scenes="A, D, G, H"
@@ -36,9 +46,9 @@ for nb_zones in {4,6,8,10,12}; do
     for mode in {"svd","svdn","svdne"}; do
         for model in {"svm_model","ensemble_model","ensemble_model_v2"}; do
 
-            FILENAME="data/${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${feature}_${mode}_${data}"
-            MODEL_NAME="${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${feature}_${mode}_${data}"
-            CUSTOM_MIN_MAX_FILENAME="N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_min_max"
+            FILENAME="data/${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_${filter}"
+            MODEL_NAME="${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_${filter}"
+            CUSTOM_MIN_MAX_FILENAME="N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_${filter}_min_max"
 
             echo $FILENAME
 
@@ -48,7 +58,7 @@ for nb_zones in {4,6,8,10,12}; do
                 echo "${MODEL_NAME} results already generated..."
             else
                 python generate/generate_data_model_random_${data}.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --feature ${feature} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
-                python find_best_attributes.py --data ${FILENAME} --choice ${model}
+                python find_best_${filter}.py --data ${FILENAME} --choice ${model}
             fi
         done
     done

+ 3 - 4
find_best_attributes.py

@@ -47,9 +47,9 @@ def validator(solution):
 
     return True
 
-# init solution (13 filters)
+# init solution (26 attributes)
 def init():
-    return BinarySolution([], 13).random(validator)
+    return BinarySolution([], number_of_values).random(validator)
 
 def loadDataset(filename):
 
@@ -117,8 +117,7 @@ def main():
 
         for index, value in enumerate(solution.data): 
             if value == 1: 
-                indices.append(index*2) 
-                indices.append(index*2+1) 
+                indices.append(index) 
 
         # keep only selected filters from solution
         x_train_filters = x_train.iloc[:, indices]

+ 160 - 0
find_best_filters.py

@@ -0,0 +1,160 @@
+# main imports
+import os
+import sys
+import argparse
+import pandas as pd
+import numpy as np
+import logging
+
+# model imports
+from sklearn.model_selection import train_test_split
+from sklearn.model_selection import GridSearchCV
+from sklearn.linear_model import LogisticRegression
+from sklearn.ensemble import RandomForestClassifier, VotingClassifier
+
+import sklearn.svm as svm
+from sklearn.utils import shuffle
+from sklearn.externals import joblib
+from sklearn.metrics import roc_auc_score
+from sklearn.model_selection import cross_val_score
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+import models as mdl
+
+from optimization.algorithms.IteratedLocalSearch import IteratedLocalSearch as ILS
+from optimization.solutions.BinarySolution import BinarySolution
+
+from optimization.operators.mutators.SimpleMutation import SimpleMutation
+from optimization.operators.mutators.SimpleBinaryMutation import SimpleBinaryMutation
+from optimization.operators.crossovers.SimpleCrossover import SimpleCrossover
+
+from optimization.operators.policies.RandomPolicy import RandomPolicy
+
+# variables and parameters
+models_list         = cfg.models_names_list
+number_of_values    = 26
+ils_iteration       = 10000
+ls_iteration        = 20
+
+# default validator
+def validator(solution):
+
+    if list(solution.data).count(1) < 5:
+        return False
+
+    return True
+
+# init solution (13 filters)
+def init():
+    return BinarySolution([], 13).random(validator)
+
+def loadDataset(filename):
+
+    ########################
+    # 1. Get and prepare data
+    ########################
+    dataset_train = pd.read_csv(filename + '.train', header=None, sep=";")
+    dataset_test = pd.read_csv(filename + '.test', header=None, sep=";")
+
+    # default first shuffle of data
+    dataset_train = shuffle(dataset_train)
+    dataset_test = shuffle(dataset_test)
+
+    # get dataset with equal number of classes occurences
+    noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 1]
+    not_noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 0]
+    nb_noisy_train = len(noisy_df_train.index)
+
+    noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 1]
+    not_noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 0]
+    nb_noisy_test = len(noisy_df_test.index)
+
+    final_df_train = pd.concat([not_noisy_df_train[0:nb_noisy_train], noisy_df_train])
+    final_df_test = pd.concat([not_noisy_df_test[0:nb_noisy_test], noisy_df_test])
+
+    # shuffle data another time
+    final_df_train = shuffle(final_df_train)
+    final_df_test = shuffle(final_df_test)
+
+    # use of the whole data set for training
+    x_dataset_train = final_df_train.iloc[:,1:]
+    x_dataset_test = final_df_test.iloc[:,1:]
+
+    y_dataset_train = final_df_train.iloc[:,0]
+    y_dataset_test = final_df_test.iloc[:,0]
+
+    return x_dataset_train, y_dataset_train, x_dataset_test, y_dataset_test
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Train and find best filters to use for model")
+
+    parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)')
+    parser.add_argument('--choice', type=str, help='model choice from list of choices', choices=models_list)
+
+    args = parser.parse_args()
+
+    p_data_file = args.data
+    p_choice    = args.choice
+
+    # load data from file
+    x_train, y_train, x_test, y_test = loadDataset(p_data_file)
+
+    # create `logs` folder if necessary
+    if not os.path.exists(cfg.logs_folder):
+        os.makedirs(cfg.logs_folder)
+
+    logging.basicConfig(format='%(asctime)s %(message)s', filename='logs/%s.log' % p_data_file.split('/')[-1], level=logging.DEBUG)
+
+    # define evaluate function here (need of data information)
+    def evaluate(solution):
+
+        # get indices of filters data to use (filters selection from solution)
+        indices = []
+
+        for index, value in enumerate(solution.data): 
+            if value == 1: 
+                indices.append(index*2) 
+                indices.append(index*2+1) 
+
+        # keep only selected filters from solution
+        x_train_filters = x_train.iloc[:, indices]
+        y_train_filters = y_train
+        x_test_filters = x_test.iloc[:, indices]
+
+        model = mdl.get_trained_model(p_choice, x_train_filters, y_train_filters)
+        
+        y_test_model = model.predict(x_test_filters)
+        test_roc_auc = roc_auc_score(y_test, y_test_model)
+
+        return test_roc_auc
+
+    # prepare optimization algorithm
+    updators = [SimpleBinaryMutation(), SimpleMutation(), SimpleCrossover()]
+    policy = RandomPolicy(updators)
+
+    algo = ILS(init, evaluate, updators, policy, validator, True)
+
+    bestSol = algo.run(ils_iteration, ls_iteration)
+
+    # print best solution found
+    print("Found ", bestSol)
+
+    # save model information into .csv file
+    if not os.path.exists(cfg.results_information_folder):
+        os.makedirs(cfg.results_information_folder)
+
+    filename_path = os.path.join(cfg.results_information_folder, cfg.optimization_filters_result_filename)
+
+    line_info = p_data_file + ';' + str(ils_iteration) + ';' + str(ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(bestSol.fitness())
+    with open(filename_path, 'a') as f:
+        f.write(line_info + '\n')
+    
+    print('Result saved into %s' % filename_path)
+
+
+if __name__ == "__main__":
+    main()

+ 135 - 0
prediction/predict_noisy_image_svd_attributes.py

@@ -0,0 +1,135 @@
+# main imports
+import sys, os, argparse, json
+import numpy as np
+
+# models imports
+from keras.models import model_from_json
+from sklearn.externals import joblib
+
+# image processing imports
+from ipfml import processing, utils
+from PIL import Image
+
+# modules imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+from data_attributes import get_image_features
+
+# variables and parameters
+path                  = cfg.dataset_path
+min_max_ext           = cfg.min_max_filename_extension
+features_choices      = cfg.features_choices_labels
+normalization_choices = cfg.normalization_choices
+
+custom_min_max_folder = cfg.min_max_custom_folder
+
+def main():
+
+    # getting all params
+    parser = argparse.ArgumentParser(description="Script which detects if an image is noisy or not using specific model")
+
+    parser.add_argument('--image', type=str, help='Image path')
+    parser.add_argument('--solution', type=str, help='Data of solution to specify filters to use')
+    parser.add_argument('--model', type=str, help='.joblib or .json file (sklearn or keras model)')
+    parser.add_argument('--mode', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_img_file   = args.image
+    p_model_file = args.model
+    p_solution   = list(map(int, args.solution.split(' ')))
+    p_mode       = args.mode
+    p_feature    = args.feature
+    p_custom     = args.custom
+
+    if '.joblib' in p_model_file:
+        kind_model = 'sklearn'
+
+    if '.json' in p_model_file:
+        kind_model = 'keras'
+
+    if kind_model == 'sklearn':
+        # load of model file
+        model = joblib.load(p_model_file)
+
+    if kind_model == 'keras':
+        with open(p_model_file, 'r') as f:
+            json_model = json.load(f)
+            model = model_from_json(json_model)
+            model.load_weights(p_model_file.replace('.json', '.h5'))
+
+            model.compile(loss='binary_crossentropy',
+                        optimizer='adam',
+                        features=['accuracy'])
+
+    # load image
+    img = Image.open(p_img_file)
+
+    data = get_image_features(p_feature, img)
+
+    # get indices of filters data to use (filters selection from solution)
+    indices = []
+
+    for index, value in enumerate(p_solution): 
+        if value == 1: 
+            indices.append(index) 
+
+    # check if custom min max file is used
+    if p_custom:
+        
+        test_data = data[indices]
+        
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
+            min_max_file_path = os.path.join(custom_min_max_folder, p_custom)
+
+            # need to read min_max_file
+            with open(min_max_file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
+
+            test_data = utils.normalize_arr_with_range(test_data, min_val, max_val)
+
+        if p_mode == 'svdn':
+            test_data = utils.normalize_arr(test_data)
+
+    else:
+
+        # check mode to normalize data
+        if p_mode == 'svdne':
+
+            # set min_max_filename if custom use
+            min_max_file_path = os.path.join(path, p_feature + min_max_ext)
+
+            # need to read min_max_file
+            with open(min_max_file_path, 'r') as f:
+                min_val = float(f.readline().replace('\n', ''))
+                max_val = float(f.readline().replace('\n', ''))
+
+            l_values = utils.normalize_arr_with_range(data, min_val, max_val)
+
+        elif p_mode == 'svdn':
+            l_values = utils.normalize_arr(data)
+        else:
+            l_values = data
+
+        test_data = data[indices]
+
+
+    # get prediction of model
+    if kind_model == 'sklearn':
+        prediction = model.predict([test_data])[0]
+
+    if kind_model == 'keras':
+        test_data = np.asarray(test_data).reshape(1, len(test_data), 1)
+        prediction = model.predict_classes([test_data])[0][0]
+
+    # output expected from others scripts
+    print(prediction)
+
+if __name__== "__main__":
+    main()

+ 3 - 1
prediction/predict_seuil_expe_maxwell_curve_filters.py

@@ -49,6 +49,7 @@ def main():
     parser.add_argument('--feature', type=str, help='feature data choice', choices=features_choices)
     #parser.add_argument('--limit_detection', type=int, help='Specify number of same prediction to stop threshold prediction', default=2)
     parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+    parser.add_argument('--filter', type=str, help='filter reduction solution used', choices=cfg.filter_reduction_choices)
 
     args = parser.parse_args()
 
@@ -59,6 +60,7 @@ def main():
     p_feature    = args.feature
     #p_limit      = args.limit
     p_custom     = args.custom
+    p_filter     = args.filter
 
     scenes = os.listdir(scenes_path)
     scenes = [s for s in scenes if s in maxwell_scenes]
@@ -122,7 +124,7 @@ def main():
                         tmp_file_path = tmp_filename.replace('__model__',  p_model_file.split('/')[-1].replace('.joblib', '_'))
                         block.save(tmp_file_path)
 
-                        python_cmd_line = "python prediction/predict_noisy_image_svd_filters.py --image {0} --solution '{1}' --model {2} --mode {3} --feature {4}"
+                        python_cmd_line = "python prediction/predict_noisy_image_svd_" + p_filter + ".py --image {0} --solution '{1}' --model {2} --mode {3} --feature {4}"
                         python_cmd = python_cmd_line.format(tmp_file_path, p_solution, p_model_file, p_mode, p_feature) 
 
                         # specify use of custom file for min max normalization

+ 38 - 0
run/runAll_maxwell_custom_optimization_attributes.sh

@@ -0,0 +1,38 @@
+#! bin/bash
+
+# erase "results/optimization_comparisons.csv" file and write new header
+file_path='results/optimization_comparisons_attributes.csv'
+list="all, center, split"
+
+if [ -z "$1" ]
+  then
+    echo "No argument supplied"
+    echo "Need argument from [${list}]"
+    exit 1
+fi
+
+if [[ "$1" =~ ^(all|center|split)$ ]]; then
+    echo "$1 is in the list"
+else
+    echo "$1 is not in the list"
+fi
+
+data=$1
+erased=$2
+
+if [ "${erased}" == "Y" ]; then
+    echo "Previous data file erased..."
+    rm ${file_path}
+    mkdir -p results
+    touch ${file_path}
+
+    # add of header
+    echo 'data_file; ils_iteration; ls_iteration; best_solution; nb_attributes; nb_filters; fitness (roc test);' >> ${file_path}
+
+fi
+
+size=26
+feature="filters_statistics"
+filter="attributes"
+
+bash data_processing/generateAndTrain_maxwell_custom_optimization.sh ${size} ${feature} ${data} ${filter}

+ 3 - 2
run/runAll_maxwell_custom_optimization.sh

@@ -1,7 +1,7 @@
 #! bin/bash
 
 # erase "results/optimization_comparisons.csv" file and write new header
-file_path='results/optimization_comparisons.csv'
+file_path='results/optimization_comparisons_filters.csv'
 list="all, center, split"
 
 if [ -z "$1" ]
@@ -33,5 +33,6 @@ fi
 
 size=26
 feature="filters_statistics"
+filter="filters"
 
-bash data_processing/generateAndTrain_maxwell_custom_optimization.sh ${size} ${feature} ${data}
+bash data_processing/generateAndTrain_maxwell_custom_optimization.sh ${size} ${feature} ${data} ${filter}

+ 3 - 3
simulation/run_maxwell_simulation_filters_statistics.sh

@@ -27,12 +27,12 @@ for nb_zones in {4,6,8,10,12}; do
                 echo "Run simulation for ${MODEL_NAME}..."
 
                 # Use of already generated model
-                # python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --feature ${feature} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
-                # python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
+                python generate/generate_data_model_random.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --feature ${feature} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 40 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
 
                 python prediction/predict_seuil_expe_maxwell_curve.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --feature ${feature} --custom ${CUSTOM_MIN_MAX_FILENAME}
 
-                python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --feature ${feature}
+                # python others/save_model_result_in_md_maxwell.py --interval "0,${size}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --feature ${feature}
             fi
         done
     done

+ 18 - 8
simulation/run_maxwell_simulation_filters_statistics_all.sh

@@ -1,23 +1,33 @@
 #! bin/bash
 
 # file which contains model names we want to use for simulation
-simulate_models="simulate_models_all.csv"
+list="attributes, filters"
+
+if [ -z "$1" ]
+  then
+    echo "No argument supplied"
+    echo "Need argument from [${list}]"
+    exit 1
+fi
+
 
 # selection of four scenes (only maxwell)
 scenes="A, D, G, H"
-
 size="26"
-
 feature="filters_statistics"
+filter=$1
+
+simulate_models="simulate_models_${filter}_all.csv"
+
 
 for nb_zones in {4,6,8,10,12}; do
     for mode in {"svd","svdn","svdne"}; do
         for model in {"svm_model","ensemble_model","ensemble_model_v2"}; do
             for data in {"all","center","split"}; do
 
-                FILENAME="data/${model}_N${size}_B0_E${size}_nb_zones_${nb_zones}_${feature}_${mode}_${data}"
-                MODEL_NAME="${model}_N${size}_B0_E${size}_nb_zones_${nb_zones}_${feature}_${mode}_${data}"
-                CUSTOM_MIN_MAX_FILENAME="N${size}_B0_E${size}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_min_max"
+                FILENAME="data/${model}_N${size}_B0_E${size}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_${filter}"
+                MODEL_NAME="${model}_N${size}_B0_E${size}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_${filter}"
+                CUSTOM_MIN_MAX_FILENAME="N${size}_B0_E${size}_nb_zones_${nb_zones}_${feature}_${mode}_${data}_${filter}_min_max"
 
                 # only compute if necessary (perhaps server will fall.. Just in case)
                 if grep -q "${FILENAME}" "${simulate_models}"; then
@@ -34,9 +44,9 @@ for nb_zones in {4,6,8,10,12}; do
 
                     # Use of already generated model
                     python generate/generate_data_model_random_${data}.py --output ${FILENAME} --interval "0,${size}" --kind ${mode} --feature ${feature} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
-                    python train_model_filters.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model} --solution "${SOLUTION}"
+                    python train_model_${filter}.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model} --solution "${SOLUTION}"
 
-                    python prediction/predict_seuil_expe_maxwell_curve_filters.py --solution "${SOLUTION}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --feature ${feature} --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python prediction/predict_seuil_expe_maxwell_curve_opti.py --solution "${SOLUTION}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --feature ${feature} --custom ${CUSTOM_MIN_MAX_FILENAME} --filter ${filter}
 
                     #python others/save_model_result_in_md_maxwell.py --solution "${SOLUTION}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --feature ${feature}
                 fi

+ 160 - 0
train_model_attributes.py

@@ -0,0 +1,160 @@
+# main imports
+import numpy as np
+import pandas as pd
+import sys, os, argparse
+
+# models imports
+from sklearn.model_selection import train_test_split
+from sklearn.model_selection import GridSearchCV
+from sklearn.linear_model import LogisticRegression
+from sklearn.ensemble import RandomForestClassifier, VotingClassifier
+
+import sklearn.svm as svm
+from sklearn.utils import shuffle
+from sklearn.externals import joblib
+from sklearn.metrics import accuracy_score, f1_score
+from sklearn.model_selection import cross_val_score
+
+# modules and config imports
+sys.path.insert(0, '') # trick to enable import of main folder module
+
+import custom_config as cfg
+import models as mdl
+
+# variables and parameters
+saved_models_folder = cfg.saved_models_folder
+models_list         = cfg.models_names_list
+
+current_dirpath     = os.getcwd()
+output_model_folder = os.path.join(current_dirpath, saved_models_folder)
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Train SKLearn model and save it into .joblib file")
+
+    parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)')
+    parser.add_argument('--output', type=str, help='output file name desired for model (without .joblib extension)')
+    parser.add_argument('--choice', type=str, help='model choice from list of choices', choices=models_list)
+    parser.add_argument('--solution', type=str, help='Data of solution to specify filters to use')
+
+    args = parser.parse_args()
+
+    p_data_file = args.data
+    p_output    = args.output
+    p_choice    = args.choice
+    p_solution  = list(map(int, args.solution.split(' ')))
+
+    if not os.path.exists(output_model_folder):
+        os.makedirs(output_model_folder)
+
+    ########################
+    # 1. Get and prepare data
+    ########################
+    dataset_train = pd.read_csv(p_data_file + '.train', header=None, sep=";")
+    dataset_test = pd.read_csv(p_data_file + '.test', header=None, sep=";")
+
+    # default first shuffle of data
+    dataset_train = shuffle(dataset_train)
+    dataset_test = shuffle(dataset_test)
+
+    # get dataset with equal number of classes occurences
+    noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 1]
+    not_noisy_df_train = dataset_train[dataset_train.ix[:, 0] == 0]
+    nb_noisy_train = len(noisy_df_train.index)
+
+    noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 1]
+    not_noisy_df_test = dataset_test[dataset_test.ix[:, 0] == 0]
+    nb_noisy_test = len(noisy_df_test.index)
+
+    final_df_train = pd.concat([not_noisy_df_train[0:nb_noisy_train], noisy_df_train])
+    final_df_test = pd.concat([not_noisy_df_test[0:nb_noisy_test], noisy_df_test])
+
+    # shuffle data another time
+    final_df_train = shuffle(final_df_train)
+    final_df_test = shuffle(final_df_test)
+
+    final_df_train_size = len(final_df_train.index)
+    final_df_test_size = len(final_df_test.index)
+
+    # use of the whole data set for training
+    x_dataset_train = final_df_train.ix[:,1:]
+    x_dataset_test = final_df_test.ix[:,1:]
+
+    y_dataset_train = final_df_train.ix[:,0]
+    y_dataset_test = final_df_test.ix[:,0]
+
+    # get indices of filters data to use (filters selection from solution)
+    indices = []
+
+    print(p_solution)
+    for index, value in enumerate(p_solution): 
+        if value == 1: 
+            indices.append(index) 
+
+    print(indices)
+
+    x_dataset_train = x_dataset_train.iloc[:, indices]
+    x_dataset_test =  x_dataset_test.iloc[:, indices]
+
+    #######################
+    # 2. Construction of the model : Ensemble model structure
+    #######################
+
+    print("-------------------------------------------")
+    print("Train dataset size: ", final_df_train_size)
+    model = mdl.get_trained_model(p_choice, x_dataset_train, y_dataset_train)
+
+    #######################
+    # 3. Fit model : use of cross validation to fit model
+    #######################
+    val_scores = cross_val_score(model, x_dataset_train, y_dataset_train, cv=5)
+    print("Accuracy: %0.2f (+/- %0.2f)" % (val_scores.mean(), val_scores.std() * 2))
+
+    ######################
+    # 4. Test : Validation and test dataset from .test dataset
+    ######################
+
+    # we need to specify validation size to 20% of whole dataset
+    val_set_size = int(final_df_train_size/3)
+    test_set_size = val_set_size
+
+    total_validation_size = val_set_size + test_set_size
+
+    if final_df_test_size > total_validation_size:
+        x_dataset_test = x_dataset_test[0:total_validation_size]
+        y_dataset_test = y_dataset_test[0:total_validation_size]
+
+    X_test, X_val, y_test, y_val = train_test_split(x_dataset_test, y_dataset_test, test_size=0.5, random_state=1)
+
+    y_test_model = model.predict(X_test)
+    y_val_model = model.predict(X_val)
+
+    val_accuracy = accuracy_score(y_val, y_val_model)
+    test_accuracy = accuracy_score(y_test, y_test_model)
+
+    val_f1 = f1_score(y_val, y_val_model)
+    test_f1 = f1_score(y_test, y_test_model)
+
+    ###################
+    # 5. Output : Print and write all information in csv
+    ###################
+
+    print("Validation dataset size ", val_set_size)
+    print("Validation: ", val_accuracy)
+    print("Validation F1: ", val_f1)
+    print("Test dataset size ", test_set_size)
+    print("Test: ", val_accuracy)
+    print("Test F1: ", test_f1)
+
+    ##################
+    # 6. Save model : create path if not exists
+    ##################
+
+    if not os.path.exists(saved_models_folder):
+        os.makedirs(saved_models_folder)
+
+    joblib.dump(model, output_model_folder + '/' + p_output + '.joblib')
+
+if __name__== "__main__":
+    main()