Parcourir la source

Add of new generation script

Jérôme BUISINE il y a 5 ans
Parent
commit
15f256d47d

+ 303 - 0
analysis/svd_entropy_analysis.ipynb

@@ -0,0 +1,303 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from ipfml import processing\n",
+    "from ipfml import utils\n",
+    "from ipfml import metrics\n",
+    "from PIL import Image\n",
+    "from scipy import signal\n",
+    "from skimage import color\n",
+    "import scipy.stats as stats\n",
+    "import seaborn as sns\n",
+    "import cv2\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "import os"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "data_folder = \"../fichiersSVD_light\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# SVD analysis on zones of Synthesis Images "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Utils functions definition"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def compute_images_path(dict_data):\n",
+    "    scene = dict_data['name']\n",
+    "    prefix = dict_data['prefix']\n",
+    "    indices = dict_data['indices']\n",
+    "    \n",
+    "    images_path = []\n",
+    "    for index in indices:\n",
+    "        path = os.path.join(data_folder, os.path.join(scene, prefix + index + \".png\"))\n",
+    "        print(path)\n",
+    "        images_path.append(path)\n",
+    "    return images_path"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def get_images_zones(dict_data, images_path):\n",
+    "    \n",
+    "    zones_indices = dict_data['zones']\n",
+    "    zones_img = []\n",
+    "    \n",
+    "    for path in images_path:\n",
+    "        img = Image.open(path)\n",
+    "        zones = processing.divide_in_blocks(img, (200, 200))\n",
+    "        \n",
+    "        zones_list = []\n",
+    "        \n",
+    "        for id_zone in zones_indices:\n",
+    "            zones_list.append(zones[id_zone])\n",
+    "            \n",
+    "        zones_img.append(zones_list)\n",
+    "        \n",
+    "    return zones_img"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def display_sv_data(dict_data, zones_data, interval, _norm=False):\n",
+    "    \n",
+    "    scene_name = dict_data['name']\n",
+    "    image_indices = dict_data['indices']\n",
+    "    zones_indices = dict_data['zones']\n",
+    "    colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\n",
+    "    \n",
+    "    plt.figure(figsize=(25, 20))\n",
+    "    \n",
+    "    sv_data = []\n",
+    "    begin, end = interval\n",
+    "    for id_img, zones in enumerate(zones_data):\n",
+    "        \n",
+    "        for id_zone, zone in enumerate(zones):\n",
+    "            U, s, V = processing.get_LAB_L_SVD(zone)\n",
+    "        \n",
+    "            data = s[begin:end]\n",
+    "            \n",
+    "            if _norm:\n",
+    "                data = utils.normalize_arr(data)\n",
+    "                \n",
+    "            plt.plot(data, \n",
+    "                     color=colors[id_zone], \n",
+    "                     label='Zone ' + str(zones_indices[id_zone]) + ' of ' + scene_name + '_' + str(image_indices[id_img]))\n",
+    "            \n",
+    "    plt.legend(fontsize=18)\n",
+    "    plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Useful function\n",
+    "\n",
+    "def get_highest_values(arr, n):\n",
+    "    return np.array(arr).argsort()[-n:][::-1]\n",
+    "\n",
+    "def get_lowest_values(arr, n):\n",
+    "    return np.array(arr).argsort()[::-1][-n:][::-1]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Scenes zones data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# start 00020 - ref 00900 - step 10\n",
+    "dict_appart = {'name': 'Appart1opt02', \n",
+    "               'prefix': 'appartAopt_', \n",
+    "               'indices': [\"00020\", \"00200\", \"00900\"],\n",
+    "               'zones': [3, 6]}\n",
+    "\n",
+    "# start 00050 - ref 01200 - step 10\n",
+    "dict_cuisine = {'name': 'Cuisine01', \n",
+    "               'prefix': 'cuisine01_', \n",
+    "               'indices': [\"00050\", \"00400\", \"01200\"],\n",
+    "               'zones': [3, 6]}\n",
+    "\n",
+    "# start 00020 - ref 00950 - step 10\n",
+    "dict_sdb_c = {'name': 'SdbCentre', \n",
+    "               'prefix': 'SdB2_', \n",
+    "               'indices': [\"00020\", \"00400\", \"00950\"],\n",
+    "               'zones': [3, 6]}\n",
+    "\n",
+    "# start 00020 - ref 00950 - step 10\n",
+    "dict_sdb_d = {'name': 'SdbDroite', \n",
+    "               'prefix': 'SdB2_D_', \n",
+    "               'indices': [\"00020\", \"00400\", \"00950\"],\n",
+    "               'zones': [2, 3, 10, 13]}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "current_dict = dict_sdb_d\n",
+    "interval = (30, 200)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "../fichiersSVD_light/SdbDroite/SdB2_D_00020.png\n",
+      "../fichiersSVD_light/SdbDroite/SdB2_D_00400.png\n",
+      "../fichiersSVD_light/SdbDroite/SdB2_D_00950.png\n"
+     ]
+    }
+   ],
+   "source": [
+    "images_path = compute_images_path(current_dict)\n",
+    "zones_data = get_images_zones(current_dict, images_path)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 28,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "first_image = zones_data[0][1]\n",
+    "entropy_contribution_data = []\n",
+    "\n",
+    "sv = processing.get_LAB_L_SVD_s(zone)\n",
+    "sv = utils.normalize_arr(sv)\n",
+    "entropy = utils.get_entropy(sv)\n",
+    "\n",
+    "for i in range(200):\n",
+    "    entropy_without_column = utils.get_entropy_without_i(sv, i)\n",
+    "    entropy_contribution_column = entropy - entropy_without_column\n",
+    "    entropy_contribution_data.append(entropy_contribution_column)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 29,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array([  1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,  13,\n",
+       "        14,  15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,\n",
+       "        27,  28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,\n",
+       "        40,  41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,\n",
+       "        53,  54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,\n",
+       "        66,  67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,\n",
+       "        79,  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,\n",
+       "        92,  93,  94,  95,  96,  97,  98,  99, 100])"
+      ]
+     },
+     "execution_count": 29,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "get_highest_values(entropy_contribution_data, 100)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 30,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array([  0, 199, 198, 197, 196, 195, 194, 193, 192, 191, 190, 189, 188,\n",
+       "       187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175,\n",
+       "       174, 173, 172, 171, 170, 169, 168, 167, 166, 165, 164, 163, 162,\n",
+       "       161, 160, 159, 158, 157, 156, 155, 154, 153, 152, 151, 150, 149,\n",
+       "       148, 147, 146, 145, 144, 143, 142, 141, 140, 139, 138, 137, 136,\n",
+       "       135, 134, 133, 132, 131, 130, 129, 128, 127, 126, 125, 124, 123,\n",
+       "       122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110,\n",
+       "       109, 108, 107, 106, 105, 104, 103, 102, 101])"
+      ]
+     },
+     "execution_count": 30,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "get_lowest_values(entropy_contribution_data, 100)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "thesis-venv",
+   "language": "python",
+   "name": "thesis-venv"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.0"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 74 - 0
generateAndTrain_maxwell_custom_center.sh

@@ -0,0 +1,74 @@
+#! bin/bash
+
+if [ -z "$1" ]
+  then
+    echo "No argument supplied"
+    echo "Need of vector size"
+    exit 1
+fi
+
+if [ -z "$2" ]
+  then
+    echo "No argument supplied"
+    echo "Need of metric information"
+    exit 1
+fi
+
+result_filename="models_info/models_comparisons.csv"
+VECTOR_SIZE=200
+size=$1
+metric=$2
+
+# selection of four scenes (only maxwell)
+scenes="A, D, G, H"
+
+half=$(($size/2))
+start=-$half
+for counter in {0..4}; do
+    end=$(($start+$size))
+
+    if [ "$end" -gt "$VECTOR_SIZE" ]; then
+        start=$(($VECTOR_SIZE-$size))
+        end=$(($VECTOR_SIZE))
+    fi
+
+    if [ "$start" -lt "0" ]; then
+        start=$((0))
+        end=$(($size))
+    fi
+
+    for nb_zones in {4,6,8,10,12}; do
+
+        echo $start $end
+
+        for mode in {"svd","svdn","svdne"}; do
+            for model in {"svm_model","ensemble_model","ensemble_model_v2"}; do
+
+                FILENAME="data/${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}"
+                MODEL_NAME="${model}_N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}"
+                CUSTOM_MIN_MAX_FILENAME="N${size}_B${start}_E${end}_nb_zones_${nb_zones}_${metric}_${mode}_min_max"
+
+                echo $FILENAME
+
+                # only compute if necessary (perhaps server will fall.. Just in case)
+                if grep -q "${MODEL_NAME}" "${result_filename}"; then
+
+                    echo "${MODEL_NAME} results already generated..."
+                else
+                    python generate_data_model_random_center.py --output ${FILENAME} --interval "${start},${end}" --kind ${mode} --metric ${metric} --scenes "${scenes}" --nb_zones "${nb_zones}" --percent 1 --renderer "maxwell" --step 10 --random 1 --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python train_model.py --data ${FILENAME} --output ${MODEL_NAME} --choice ${model}
+
+                    #python predict_seuil_expe_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric} --limit_detection '2' --custom ${CUSTOM_MIN_MAX_FILENAME}
+                    python save_model_result_in_md_maxwell.py --interval "${start},${end}" --model "saved_models/${MODEL_NAME}.joblib" --mode "${mode}" --metric ${metric}
+                fi
+            done
+        done
+    done
+
+    if [ "$counter" -eq "0" ]; then
+        start=$(($start+50-$half))
+    else
+        start=$(($start+50))
+    fi
+
+done

+ 314 - 0
generate_data_model_random_center.py

@@ -0,0 +1,314 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Sep 14 21:02:42 2018
+
+@author: jbuisine
+"""
+
+from __future__ import print_function
+import sys, os, argparse
+import numpy as np
+import random
+import time
+import json
+
+from PIL import Image
+from ipfml import processing, metrics, utils
+
+from modules.utils import config as cfg
+from modules.utils import data as dt
+
+# getting configuration information
+config_filename         = cfg.config_filename
+learned_folder          = cfg.learned_zones_folder
+min_max_filename        = cfg.min_max_filename_extension
+
+# define all scenes values
+all_scenes_list         = cfg.scenes_names
+all_scenes_indices      = cfg.scenes_indices
+
+normalization_choices   = cfg.normalization_choices
+path                    = cfg.dataset_path
+zones                   = cfg.zones_indices
+seuil_expe_filename     = cfg.seuil_expe_filename
+
+renderer_choices        = cfg.renderer_choices
+metric_choices          = cfg.metric_choices_labels
+output_data_folder      = cfg.output_data_folder
+custom_min_max_folder   = cfg.min_max_custom_folder
+min_max_ext             = cfg.min_max_filename_extension
+
+generic_output_file_svd = '_random.csv'
+
+min_value_interval      = sys.maxsize
+max_value_interval      = 0
+abs_gap_data            = 150
+
+
+def construct_new_line(seuil_learned, interval, line, choice, each, norm):
+    begin, end = interval
+
+    line_data = line.split(';')
+    seuil = line_data[0]
+    metrics = line_data[begin+1:end+1]
+
+    # keep only if modulo result is 0 (keep only each wanted values)
+    metrics = [float(m) for id, m in enumerate(metrics) if id % each == 0]
+
+    # TODO : check if it's always necessary to do that (loss of information for svd)
+    if norm:
+
+        if choice == 'svdne':
+            metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
+        if choice == 'svdn':
+            metrics = utils.normalize_arr(metrics)
+
+    if seuil_learned > int(seuil):
+        line = '1'
+    else:
+        line = '0'
+
+    for idx, val in enumerate(metrics):
+        line += ';'
+        line += str(val)
+    line += '\n'
+
+    return line
+
+def get_min_max_value_interval(_scenes_list, _interval, _metric):
+
+    global min_value_interval, max_value_interval
+
+    scenes = os.listdir(path)
+
+    # remove min max file from scenes folder
+    scenes = [s for s in scenes if min_max_filename not in s]
+
+    for id_scene, folder_scene in enumerate(scenes):
+
+        # only take care of maxwell scenes
+        if folder_scene in _scenes_list:
+
+            scene_path = os.path.join(path, folder_scene)
+
+            zones_folder = []
+            # create zones list
+            for index in zones:
+                index_str = str(index)
+                if len(index_str) < 2:
+                    index_str = "0" + index_str
+                zones_folder.append("zone"+index_str)
+
+            for id_zone, zone_folder in enumerate(zones_folder):
+
+                zone_path = os.path.join(scene_path, zone_folder)
+
+                # if custom normalization choices then we use svd values not already normalized
+                data_filename = _metric + "_svd"+ generic_output_file_svd
+
+                data_file_path = os.path.join(zone_path, data_filename)
+
+                # getting number of line and read randomly lines
+                f = open(data_file_path)
+                lines = f.readlines()
+
+                # check if user select current scene and zone to be part of training data set
+                for line in lines:
+
+                    begin, end = _interval
+
+                    line_data = line.split(';')
+
+                    metrics = line_data[begin+1:end+1]
+                    metrics = [float(m) for m in metrics]
+
+                    min_value = min(metrics)
+                    max_value = max(metrics)
+
+                    if min_value < min_value_interval:
+                        min_value_interval = min_value
+
+                    if max_value > max_value_interval:
+                        max_value_interval = max_value
+
+
+def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _each=1, _custom = False):
+
+    output_train_filename = _filename + ".train"
+    output_test_filename = _filename + ".test"
+
+    if not '/' in output_train_filename:
+        raise Exception("Please select filename with directory path to save data. Example : data/dataset")
+
+    # create path if not exists
+    if not os.path.exists(output_data_folder):
+        os.makedirs(output_data_folder)
+
+    train_file_data = []
+    test_file_data  = []
+
+    for id_scene, folder_scene in enumerate(_scenes_list):
+
+        scene_path = os.path.join(path, folder_scene)
+
+        zones_indices = zones
+
+        # shuffle list of zones (=> randomly choose zones)
+        # only in random mode
+        if _random:
+            random.shuffle(zones_indices)
+
+        # store zones learned
+        learned_zones_indices = zones_indices[:_nb_zones]
+
+        # write into file
+        folder_learned_path = os.path.join(learned_folder, _filename.split('/')[1])
+
+        if not os.path.exists(folder_learned_path):
+            os.makedirs(folder_learned_path)
+
+        file_learned_path = os.path.join(folder_learned_path, folder_scene + '.csv')
+
+        with open(file_learned_path, 'w') as f:
+            for i in learned_zones_indices:
+                f.write(str(i) + ';')
+
+        for id_zone, index_folder in enumerate(zones_indices):
+
+            index_str = str(index_folder)
+            if len(index_str) < 2:
+                index_str = "0" + index_str
+            current_zone_folder = "zone" + index_str
+
+            zone_path = os.path.join(scene_path, current_zone_folder)
+
+            # if custom normalization choices then we use svd values not already normalized
+            if _custom:
+                data_filename = _metric + "_svd"+ generic_output_file_svd
+            else:
+                data_filename = _metric + "_" + _choice + generic_output_file_svd
+
+            data_file_path = os.path.join(zone_path, data_filename)
+
+            # getting number of line and read randomly lines
+            f = open(data_file_path)
+            lines = f.readlines()
+
+            num_lines = len(lines)
+
+            # randomly shuffle image
+            if _random:
+                random.shuffle(lines)
+
+            path_seuil = os.path.join(zone_path, seuil_expe_filename)
+
+            with open(path_seuil, "r") as seuil_file:
+                seuil_learned = int(seuil_file.readline().strip())
+
+            counter = 0
+            # check if user select current scene and zone to be part of training data set
+            for data in lines:
+
+                percent = counter / num_lines
+                image_index = int(data.split(';')[0])
+
+                if image_index % _step == 0:
+
+                    with open(path_seuil, "r") as seuil_file:
+                        seuil_learned = int(seuil_file.readline().strip())
+
+                    gap_threshold = abs(seuil_learned - image_index)
+
+                    # only keep data near to threshold of zone image
+                    if gap_threshold <= abs_gap_data:
+
+                        line = construct_new_line(seuil_learned, _interval, data, _choice, _each, _custom)
+
+                        if id_zone < _nb_zones and folder_scene in _scenes and percent <= _percent:
+                            train_file_data.append(line)
+                        else:
+                            test_file_data.append(line)
+
+                counter += 1
+
+            f.close()
+
+    train_file = open(output_train_filename, 'w')
+    test_file = open(output_test_filename, 'w')
+
+    for line in train_file_data:
+        train_file.write(line)
+
+    for line in test_file_data:
+        test_file.write(line)
+
+    train_file.close()
+    test_file.close()
+
+
+def main():
+
+    # getting all params
+    parser = argparse.ArgumentParser(description="Generate data for model using correlation matrix information from data")
+
+    parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
+    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
+    parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
+    parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
+    parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
+    parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
+    parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
+    parser.add_argument('--percent', type=float, help='Percent of data use for train and test dataset (by default 1)')
+    parser.add_argument('--step', type=int, help='Photo step to keep for build datasets', default=1)
+    parser.add_argument('--each', type=int, help='Each features to keep from interval', default=1)
+    parser.add_argument('--renderer', type=str, help='Renderer choice in order to limit scenes used', choices=renderer_choices, default='all')
+    parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
+
+    args = parser.parse_args()
+
+    p_filename = args.output
+    p_interval = list(map(int, args.interval.split(',')))
+    p_kind     = args.kind
+    p_metric   = args.metric
+    p_scenes   = args.scenes.split(',')
+    p_nb_zones = args.nb_zones
+    p_random   = args.random
+    p_percent  = args.percent
+    p_step     = args.step
+    p_each     = args.each
+    p_renderer = args.renderer
+    p_custom   = args.custom
+
+
+    # list all possibles choices of renderer
+    scenes_list = dt.get_renderer_scenes_names(p_renderer)
+    scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
+
+    # getting scenes from indexes user selection
+    scenes_selected = []
+
+    for scene_id in p_scenes:
+        index = scenes_indices.index(scene_id.strip())
+        scenes_selected.append(scenes_list[index])
+
+    # find min max value if necessary to renormalize data
+    if p_custom:
+        get_min_max_value_interval(scenes_list, p_interval, p_metric)
+
+        # write new file to save
+        if not os.path.exists(custom_min_max_folder):
+            os.makedirs(custom_min_max_folder)
+
+        min_max_folder_path = os.path.join(os.path.dirname(__file__), custom_min_max_folder)
+        min_max_filename_path = os.path.join(min_max_folder_path, p_custom)
+
+        with open(min_max_filename_path, 'w') as f:
+            f.write(str(min_value_interval) + '\n')
+            f.write(str(max_value_interval) + '\n')
+
+    # create database using img folder (generate first time only)
+    generate_data_model(scenes_list, p_filename, p_interval, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_each, p_custom)
+
+if __name__== "__main__":
+    main()

+ 24 - 0
runAll_maxwell_custom_center.sh

@@ -0,0 +1,24 @@
+#! bin/bash
+
+# erase "models_info/models_comparisons.csv" file and write new header
+file_path='models_info/models_comparisons.csv'
+
+erased=$1
+
+if [ "${erased}" == "Y" ]; then
+    echo "Previous data file erased..."
+    rm ${file_path}
+    mkdir -p models_info
+    touch ${file_path}
+
+    # add of header
+    echo 'model_name; vector_size; start; end; nb_zones; metric; mode; tran_size; val_size; test_size; train_pct_size; val_pct_size; test_pct_size; train_acc; val_acc; test_acc; all_acc; F1_train; recall_train; roc_auc_train; F1_val; recall_val; roc_auc_val; F1_test; recall_test; roc_auc_test; F1_all; recall_all; roc_auc_all;' >> ${file_path}
+
+fi
+
+for size in {"4","8","16","26","32","40"}; do
+
+    for metric in {"lab","mscn","low_bits_2","low_bits_3","low_bits_4","low_bits_5","low_bits_6","low_bits_4_shifted_2"}; do
+        bash generateAndTrain_maxwell_custom_center.sh ${size} ${metric}
+    done
+done