Browse Source

Use of population for rendering surrogate

Jérôme BUISINE 10 months ago
parent
commit
6032efa1b1

+ 1 - 1
find_best_attributes.py

@@ -190,7 +190,7 @@ def main():
                 filters_counter += 1
 
 
-    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(filters_counter) + ';' + str(bestSol.fitness())
+    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(filters_counter) + ';' + str(bestSol.fitness)
     with open(filename_path, 'a') as f:
         f.write(line_info + '\n')
     

+ 15 - 12
find_best_attributes_surrogate.py

@@ -26,7 +26,7 @@ sys.path.insert(0, '') # trick to enable import of main folder module
 import custom_config as cfg
 import models as mdl
 
-from optimization.ILSSurrogate import ILSSurrogate
+from optimization.ILSPopSurrogate import ILSPopSurrogate
 from macop.solutions.discrete import BinarySolution
 from macop.evaluators.base import Evaluator
 
@@ -34,11 +34,13 @@ from macop.operators.discrete.mutators import SimpleMutation
 from macop.operators.discrete.mutators import SimpleBinaryMutation
 from macop.operators.discrete.crossovers import SimpleCrossover
 from macop.operators.discrete.crossovers import RandomSplitCrossover
+from optimization.operators.SimplePopCrossover import SimplePopCrossover
 
 from macop.policies.reinforcement import UCBPolicy
 
 from macop.callbacks.classicals import BasicCheckpoint
 from macop.callbacks.policies import UCBCheckpoint
+from optimization.callbacks.MultiPopCheckpoint import MultiPopCheckpoint
 
 #from sklearn.ensemble import RandomForestClassifier
 
@@ -102,7 +104,7 @@ def _get_best_model(X_train, y_train):
 
     svc = svm.SVC(probability=True, class_weight='balanced')
     #clf = GridSearchCV(svc, param_grid, cv=5, verbose=1, scoring=my_accuracy_scorer, n_jobs=-1)
-    clf = GridSearchCV(svc, param_grid, cv=5, verbose=1, n_jobs=-1)
+    clf = GridSearchCV(svc, param_grid, cv=5, verbose=1, n_jobs=4)
 
     clf.fit(X_train, y_train)
 
@@ -146,10 +148,10 @@ def main():
         return BinarySolution.random(p_length, validator)
 
 
-    class SurrogateEvaluator(Evaluator):
+    class SVMEvaluator(Evaluator):
 
         # define evaluate function here (need of data information)
-        def compute(solution):
+        def compute(self, solution):
 
             start = datetime.datetime.now()
 
@@ -161,16 +163,16 @@ def main():
                     indices.append(index) 
 
             # keep only selected filters from solution
-            x_train_filters = self.data['x_train'].iloc[:, indices]
-            y_train_filters = self.data['y_train']
-            x_test_filters = self.data['x_test'].iloc[:, indices]
+            x_train_filters = self._data['x_train'].iloc[:, indices]
+            y_train_filters = self._data['y_train']
+            x_test_filters = self._data['x_test'].iloc[:, indices]
             
             model = _get_best_model(x_train_filters, y_train_filters)
             #model = RandomForestClassifier(n_estimators=10)
             #model = model.fit(x_train_filters, y_train_filters)
             
             y_test_model = model.predict(x_test_filters)
-            test_roc_auc = roc_auc_score(self.data['y_test'], y_test_model)
+            test_roc_auc = roc_auc_score(self._data['y_test'], y_test_model)
 
             end = datetime.datetime.now()
 
@@ -208,18 +210,19 @@ def main():
             f.write('x;y\n')
 
     # custom ILS for surrogate use
-    algo = ILSSurrogate(initalizer=init, 
-                        evaluator=SurrogateEvaluator(data={'x_train': x_train, 'y_train': y_train, 'x_test': x_test, 'y_test': y_test}), # same evaluator by default, as we will use the surrogate function
+    algo = ILSPopSurrogate(initalizer=init, 
+                        evaluator=SVMEvaluator(data={'x_train': x_train, 'y_train': y_train, 'x_test': x_test, 'y_test': y_test}), # same evaluator by default, as we will use the surrogate function
                         operators=operators, 
                         policy=policy, 
                         validator=validator,
+                        population_size=20,
                         surrogate_file_path=surrogate_output_model,
                         start_train_surrogate=p_start, # start learning and using surrogate after 1000 real evaluation
                         solutions_file=surrogate_output_data,
                         ls_train_surrogate=5,
                         maximise=True)
     
-    algo.addCallback(BasicCheckpoint(every=1, filepath=backup_file_path))
+    algo.addCallback(MultiPopCheckpoint(every=1, filepath=backup_file_path))
     algo.addCallback(UCBCheckpoint(every=1, filepath=ucb_backup_file_path))
 
     bestSol = algo.run(p_ils_iteration, p_ls_iteration)
@@ -244,7 +247,7 @@ def main():
                 filters_counter += 1
 
 
-    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(filters_counter) + ';' + str(bestSol.fitness())
+    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(filters_counter) + ';' + str(bestSol.fitness)
     with open(filename_path, 'a') as f:
         f.write(line_info + '\n')
     

+ 1 - 1
find_best_attributes_surrogate_dl.py

@@ -310,7 +310,7 @@ def main():
                 filters_counter += 1
 
 
-    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(filters_counter) + ';' + str(bestSol.fitness())
+    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(filters_counter) + ';' + str(bestSol.fitness)
     with open(filename_path, 'a') as f:
         f.write(line_info + '\n')
     

+ 1 - 1
find_best_attributes_surrogate_openML.py

@@ -237,7 +237,7 @@ def main():
 
     filename_path = os.path.join(cfg.results_information_folder, cfg.optimization_attributes_result_filename)
 
-    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(bestSol.fitness())
+    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(bestSol.fitness)
     with open(filename_path, 'a') as f:
         f.write(line_info + '\n')
     

+ 1 - 1
find_best_attributes_surrogate_openML_multi.py

@@ -260,7 +260,7 @@ def main():
 
     filename_path = os.path.join(cfg.results_information_folder, cfg.optimization_attributes_result_filename)
 
-    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol._data) + ';' + str(list(bestSol._data).count(1)) + ';' + str(bestSol.fitness())
+    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol._data) + ';' + str(list(bestSol._data).count(1)) + ';' + str(bestSol.fitness)
     with open(filename_path, 'a') as f:
         f.write(line_info + '\n')
     

+ 1 - 1
find_best_attributes_surrogate_openML_multi_specific.py

@@ -288,7 +288,7 @@ def main():
 
     filename_path = os.path.join(cfg.results_information_folder, cfg.optimization_attributes_result_filename)
 
-    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol._data) + ';' + str(list(bestSol._data).count(1)) + ';' + str(bestSol.fitness())
+    line_info = p_data_file + ';' + str(p_ils_iteration) + ';' + str(p_ls_iteration) + ';' + str(bestSol._data) + ';' + str(list(bestSol._data).count(1)) + ';' + str(bestSol.fitness)
     with open(filename_path, 'a') as f:
         f.write(line_info + '\n')
     

+ 1 - 1
find_best_filters.py

@@ -161,7 +161,7 @@ def main():
 
     filename_path = os.path.join(cfg.results_information_folder, cfg.optimization_filters_result_filename)
 
-    line_info = p_data_file + ';' + str(ils_iteration) + ';' + str(ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(bestSol.fitness())
+    line_info = p_data_file + ';' + str(ils_iteration) + ';' + str(ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(bestSol.fitness)
     with open(filename_path, 'a') as f:
         f.write(line_info + '\n')
     

+ 3 - 3
optimization/ILSMultiSpecificSurrogate.py

@@ -448,10 +448,10 @@ class ILSMultiSpecificSurrogate(Algorithm):
 
                     # if solution is really better after real evaluation, then we replace (depending of problem nature (minimizing / maximizing))
                     if self._maximise:
-                        if sub_problem_solution.fitness() > self._population[i].fitness():
+                        if sub_problem_solution.fitness > self._population[i].fitness:
                             self._population[i] = sub_problem_solution
                     else:
-                        if sub_problem_solution.fitness() < self._population[i].fitness():
+                        if sub_problem_solution.fitness < self._population[i].fitness:
                             self._population[i] = sub_problem_solution
 
                     self.add_to_surrogate(sub_problem_solution, i)
@@ -520,7 +520,7 @@ class ILSMultiSpecificSurrogate(Algorithm):
     
             print(f'State of current population for surrogates ({len(self._population)} members)')
             for i, s in enumerate(self._population):
-                print(f'Population[{i}]: best solution fitness is {s.fitness()}')
+                print(f'Population[{i}]: best solution fitness is {s.fitness}')
 
             # check using specific dynamic criteria based on r^2
             r_squared_scores = self.surrogates_coefficient_of_determination()

+ 327 - 0
optimization/ILSPopSurrogate.py

@@ -0,0 +1,327 @@
+"""Iterated Local Search Algorithm implementation using surrogate as fitness approximation
+"""
+
+# main imports
+import os
+import logging
+import joblib
+import time
+
+# module imports
+from macop.algorithms.base import Algorithm
+from macop.evaluators.base import Evaluator
+
+from .LSSurrogate import LocalSearchSurrogate
+from .utils.SurrogateAnalysis import SurrogateAnalysisMono
+
+from sklearn.linear_model import (LinearRegression, Lasso, Lars, LassoLars,
+                                    LassoCV, ElasticNet)
+
+from wsao.sao.problems.nd3dproblem import ND3DProblem
+from wsao.sao.surrogates.walsh import WalshSurrogate
+from wsao.sao.algos.fitter import FitterAlgo
+from wsao.sao.utils.analysis import SamplerAnalysis, FitterAnalysis, OptimizerAnalysis
+
+
+class SurrogateEvaluator(Evaluator):
+
+    # use of surrogate in order to evaluate solution
+    def compute(self, solution):
+        return self._data['surrogate'].surrogate.predict([solution.data])[0]
+        
+
+class ILSPopSurrogate(Algorithm):
+    """Iterated Local Search used to avoid local optima and increave EvE (Exploration vs Exploitation) compromise using surrogate
+
+
+    Attributes:
+        initalizer: {function} -- basic function strategy to initialize solution
+        evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
+        operators: {[Operator]} -- list of operator to use when launching algorithm
+        policy: {Policy} -- Policy class implementation strategy to select operators
+        validator: {function} -- basic function to check if solution is valid or not under some constraints
+        maximise: {bool} -- specify kind of optimization problem 
+        currentSolution: {Solution} -- current solution managed for current evaluation
+        bestSolution: {Solution} -- best solution found so far during running algorithm
+        ls_iteration: {int} -- number of evaluation for each local search algorithm
+        population_size: {int} -- size of the population to manage
+        surrogate_file: {str} -- Surrogate model file to load (model trained using https://gitlab.com/florianlprt/wsao)
+        start_train_surrogate: {int} -- number of evaluation expected before start training and use surrogate
+        surrogate: {Surrogate} -- Surrogate model instance loaded
+        ls_train_surrogate: {int} -- Specify if we need to retrain our surrogate model (every Local Search)
+        solutions_file: {str} -- Path where real evaluated solutions are saved in order to train surrogate again
+        callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
+    """
+    def __init__(self,
+                 initalizer,
+                 evaluator,
+                 operators,
+                 policy,
+                 validator,
+                 population_size,
+                 surrogate_file_path,
+                 start_train_surrogate,
+                 ls_train_surrogate,
+                 solutions_file,
+                 maximise=True,
+                 parent=None):
+
+        # set real evaluator as default
+        super().__init__(initalizer, evaluator, operators, policy,
+                validator, maximise, parent)
+
+        self._n_local_search = 0
+        self._main_evaluator = evaluator
+
+        self._surrogate_file_path = surrogate_file_path
+        self._start_train_surrogate = start_train_surrogate
+
+        self._surrogate_evaluator = None
+        self._surrogate_analyser = None
+
+        self._ls_train_surrogate = ls_train_surrogate
+        self._solutions_file = solutions_file
+
+        # default population values
+        self.population_size = population_size
+        self.population = []
+
+        for _ in range(self.population_size):
+            self.population.append(None)
+
+    def train_surrogate(self):
+        """Retrain if necessary the whole surrogate fitness approximation function
+        """
+        # Following https://gitlab.com/florianlprt/wsao, we re-train the model
+        # ---------------------------------------------------------------------------
+        # cli_restart.py problem=nd3d,size=30,filename="data/statistics_extended_svdn" \
+        #        model=lasso,alpha=1e-5 \
+        #        surrogate=walsh,order=3 \
+        #        algo=fitter,algo_restarts=10,samplefile=stats_extended.csv \
+        #        sample=1000,step=10 \
+        #        analysis=fitter,logfile=out_fit.csv
+
+        problem = ND3DProblem(size=len(self._bestSolution.data)) # problem size based on best solution size (need to improve...)
+        model = Lasso(alpha=1e-5)
+        surrogate = WalshSurrogate(order=2, size=problem.size, model=model)
+        analysis = FitterAnalysis(logfile="train_surrogate.log", problem=problem)
+        algo = FitterAlgo(problem=problem, surrogate=surrogate, analysis=analysis, seed=problem.seed)
+
+        # dynamic number of samples based on dataset real evaluations
+        nsamples = None
+        with open(self._solutions_file, 'r') as f:
+            nsamples = len(f.readlines()) - 1 # avoid header
+
+        training_samples = int(0.7 * nsamples) # 70% used for learning part at each iteration
+        
+        print("Start fitting again the surrogate model")
+        print(f'Using {training_samples} of {nsamples} samples for train dataset')
+        for r in range(10):
+            print(f"Iteration n°{r}: for fitting surrogate")
+            algo.run(samplefile=self._solutions_file, sample=training_samples, step=10)
+
+        joblib.dump(algo, self._surrogate_file_path)
+
+
+    def load_surrogate(self):
+        """Load algorithm with surrogate model and create lambda evaluator function
+        """
+
+        # need to first train surrogate if not exist
+        if not os.path.exists(self._surrogate_file_path):
+            self.train_surrogate()
+
+        self._surrogate = joblib.load(self._surrogate_file_path)
+
+        # update evaluator function
+        self._surrogate_evaluator = SurrogateEvaluator(data={'surrogate': self._surrogate})
+
+    def add_to_surrogate(self, solution):
+
+        # save real evaluated solution into specific file for surrogate
+        with open(self._solutions_file, 'a') as f:
+
+            line = ""
+
+            for index, e in enumerate(solution._data):
+
+                line += str(e)
+                
+                if index < len(solution._data) - 1:
+                    line += ","
+
+            line += ";"
+            line += str(solution._score)
+
+            f.write(line + "\n")
+
+    def initRun(self):
+
+        fitness_scores = []
+        print('Initialisation of population')
+        for i in range(len(self.population)):
+
+            if self.population[i] is None:
+                solution = self.initialiser()
+                solution.evaluate(self.evaluator)
+
+                self.population[i] = solution
+                self.add_to_surrogate(solution)
+
+            fitness_scores.append(self.population[i].fitness)
+
+        print('Best solution initialisation')
+        self._bestSolution = self.population[fitness_scores.index(max(fitness_scores))]
+
+
+    def run(self, evaluations, ls_evaluations=100):
+        """
+        Run the iterated local search algorithm using local search (EvE compromise)
+
+        Args:
+            evaluations: {int} -- number of global evaluations for ILS
+            ls_evaluations: {int} -- number of Local search evaluations (default: 100)
+
+        Returns:
+            {Solution} -- best solution found
+        """
+
+        # by default use of mother method to initialize variables
+        super().run(evaluations)
+
+        # enable resuming for ILS
+        self.resume()
+
+        # initialize current solution
+        self.initRun()
+
+        # count number of surrogate obtained and restart using real evaluations done
+        nsamples = None
+        with open(self._solutions_file, 'r') as f:
+            nsamples = len(f.readlines()) - 1 # avoid header
+
+        if self.getGlobalEvaluation() < nsamples:
+            print(f'Restart using {nsamples} of {self._start_train_surrogate} real evaluations obtained')
+            self._numberOfEvaluations = nsamples
+
+        if self._start_train_surrogate > self.getGlobalEvaluation():
+        
+            # get `self.start_train_surrogate` number of real evaluations and save it into surrogate dataset file
+            # using randomly generated solutions (in order to cover seearch space)
+            while self._start_train_surrogate > self.getGlobalEvaluation():
+                
+                newSolution = self.initialiser()
+
+                # evaluate new solution
+                newSolution.evaluate(self.evaluator)
+
+                # add it to surrogate pool
+                self.add_to_surrogate(newSolution)
+
+                self.increaseEvaluation()
+
+        # train surrogate on real evaluated solutions file
+        self.train_surrogate()
+        self.load_surrogate()
+
+        # local search algorithm implementation
+        while not self.stop():
+
+            # set current evaluator based on used or not of surrogate function
+            self.local_evaluator = self._surrogate_evaluator if self._start_train_surrogate <= self.getGlobalEvaluation() else self._main_evaluator
+
+            for i in range(len(self.population)):
+
+                # create new local search instance
+                # passing global evaluation param from ILS
+                ls = LocalSearchSurrogate(self.initialiser,
+                            self.local_evaluator,
+                            self._operators,
+                            self.policy,
+                            self.validator,
+                            self._maximise,
+                            parent=self)
+
+                # create current new solution using policy
+                ls._currentSolution = self.policy.apply(self.population[i])
+                ls.result = ls._currentSolution
+                print("Inside pop => ", ls._currentSolution)
+
+                # add same callbacks
+                #for callback in self._callbacks:
+                #    ls.addCallback(callback)
+
+                # create and search solution from local search
+                newSolution = ls.run(ls_evaluations)
+
+                # if better solution than currently, replace it (solution saved in training pool, only if surrogate process is in a second process step)
+                # Update : always add new solution into surrogate pool, not only if solution is better
+                #if self.isBetter(newSolution) and self.start_train_surrogate < self.getGlobalEvaluation():
+                if self._start_train_surrogate <= self.getGlobalEvaluation():
+
+                    # if better solution found from local search, retrained the found solution and test again
+                    # without use of surrogate
+                    fitness_score = self._main_evaluator.compute(newSolution)
+                    # self.increaseEvaluation() # dot not add evaluation
+
+                    newSolution.fitness = fitness_score
+
+                    # if solution is really better after real evaluation, then we replace
+                    if self.isBetter(newSolution):
+                        self.result = newSolution
+
+                    if self.population[i].fitness < newSolution.fitness:
+                        self.population[i] = newSolution
+
+                    self.add_to_surrogate(newSolution)
+
+                    self.progress()
+
+                # check using specific dynamic criteria based on r^2
+                r_squared = self._surrogate.analysis.coefficient_of_determination(self._surrogate.surrogate)
+                mae = self._surrogate.analysis.mae(self._surrogate.surrogate)
+                training_surrogate_every = int(r_squared * self._ls_train_surrogate)
+                print(f"=> R^2 of surrogate is of {r_squared}. Retraining model every {training_surrogate_every} LS")
+                print(f"=> MAE of surrogate is of {mae}. Retraining model every {training_surrogate_every} LS")
+
+                # avoid issue when lauching every each local search
+                if training_surrogate_every <= 0:
+                    training_surrogate_every = 1
+
+                # check if necessary or not to train again surrogate
+                if self._n_local_search % training_surrogate_every == 0 and self._start_train_surrogate <= self.getGlobalEvaluation():
+
+                    # train again surrogate on real evaluated solutions file
+                    start_training = time.time()
+                    self.train_surrogate()
+                    training_time = time.time() - start_training
+
+                    self._surrogate_analyser = SurrogateAnalysisMono(training_time, training_surrogate_every, r_squared, mae, self.getGlobalMaxEvaluation(), self._n_local_search)
+
+                    # reload new surrogate function
+                    self.load_surrogate()
+
+                # increase number of local search done
+                self._n_local_search += 1
+
+                self.information()
+
+        logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
+
+        self.end()
+        return self._bestSolution
+
+    def addCallback(self, callback):
+        """Add new callback to algorithm specifying usefull parameters
+
+        Args:
+            callback: {Callback} -- specific Callback instance
+        """
+        # specify current main algorithm reference
+        if self.getParent() is not None:
+            callback.setAlgo(self.getParent())
+        else:
+            callback.setAlgo(self)
+
+        # set as new
+        self._callbacks.append(callback)

+ 2 - 3
optimization/ILSSurrogate.py

@@ -10,7 +10,6 @@ import time
 # module imports
 from macop.algorithms.base import Algorithm
 from .LSSurrogate import LocalSearchSurrogate
-from .utils.SurrogateAnalysis import SurrogateAnalysis
 
 from sklearn.linear_model import (LinearRegression, Lasso, Lars, LassoLars,
                                     LassoCV, ElasticNet)
@@ -173,7 +172,7 @@ class ILSSurrogate(Algorithm):
             # using randomly generated solutions (in order to cover seearch space)
             while self._start_train_surrogate > self.getGlobalEvaluation():
                 
-                newSolution = self.initializer()
+                newSolution = self.initialiser()
 
                 # evaluate new solution
                 newSolution.evaluate(self.evaluator)
@@ -195,7 +194,7 @@ class ILSSurrogate(Algorithm):
 
             # create new local search instance
             # passing global evaluation param from ILS
-            ls = LocalSearchSurrogate(self.initializer,
+            ls = LocalSearchSurrogate(self.initialiser,
                          self.evaluator,
                          self._operators,
                          self.policy,

+ 3 - 2
optimization/LSSurrogate.py

@@ -41,7 +41,8 @@ class LocalSearchSurrogate(Algorithm):
         #     self.bestSolution = self.parent.bestSolution
 
         # initialize current solution
-        self.initRun()
+        # self.initRun()
+        print("Inside LS => ", self._currentSolution)
 
         solutionSize = self._currentSolution._size
 
@@ -62,7 +63,7 @@ class LocalSearchSurrogate(Algorithm):
 
                 self.progress()
 
-                logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
+                logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness}")
 
                 # add to surrogate pool file if necessary (using ILS parent reference)
                 # if self.parent.start_train_surrogate >= self.getGlobalEvaluation():

+ 121 - 0
optimization/callbacks/MultiPopCheckpoint.py

@@ -0,0 +1,121 @@
+# main imports
+import os
+import logging
+import numpy as np
+
+# module imports
+from macop.callbacks.base import Callback
+from macop.utils.progress import macop_text, macop_line
+
+
+class MultiPopCheckpoint(Callback):
+    """
+    MultiCheckpoint is used for loading previous computations and start again after loading checkpoint
+
+    Attributes:
+        algo: {:class:`~macop.algorithms.base.Algorithm`} -- main algorithm instance reference
+        every: {int} -- checkpoint frequency used (based on number of evaluations)
+        filepath: {str} -- file path where checkpoints will be saved
+    """
+    def run(self):
+        """
+        Check if necessary to do backup based on `every` variable
+        """
+        # get current population
+        population = self._algo.population
+
+        currentEvaluation = self._algo.getGlobalEvaluation()
+
+        # backup if necessary
+        if currentEvaluation % self._every == 0:
+
+            logging.info("Checkpoint is done into " + self._filepath)
+
+            with open(self._filepath, 'a') as f:
+                
+                pop_line = str(currentEvaluation) + ';'
+
+                scores = []
+                pop_data = []
+
+                for solution in population:
+                    solution_data = ""
+                    solutionSize = len(solution.data)
+
+                    for index, val in enumerate(solution.data):
+                        solution_data += str(val)
+
+                        if index < solutionSize - 1:
+                            solution_data += ' '
+                    
+                    scores.append(solution.fitness)
+                    pop_data.append(solution_data)
+
+                for score in scores:
+                    pop_line += str(score) + ';'
+
+                for data in pop_data:
+                    pop_line += data + ';'
+
+                pop_line += '\n'
+
+                f.write(pop_line)
+
+    def load(self):
+        """
+        Load backup lines as population and set algorithm state (population and pareto front) at this backup
+        """
+        if os.path.exists(self._filepath):
+
+            logging.info('Load best solution from last checkpoint')
+            with open(self._filepath, 'r') as f:
+
+                # read data for each line
+                data_line = f.readlines()[-1]
+                
+                data = data_line.replace(';\n', '').split(';')
+          
+                # get evaluation  information
+                globalEvaluation = int(data[0])
+
+                if self._algo.getParent() is not None:
+                    self._algo.getParent(
+                    )._numberOfEvaluations = globalEvaluation
+                else:
+                    self._algo._numberOfEvaluations = globalEvaluation
+
+                nSolutions = len(self._algo.population)
+                scores = list(map(float, data[1:nSolutions + 1]))
+
+                # get best solution data information
+                pop_str_data = data[nSolutions + 1:]
+                pop_data = []
+
+                for sol_data in pop_str_data:
+                    current_data = list(map(int, sol_data.split(' ')))
+                    pop_data.append(current_data)
+
+                for i, sol_data in enumerate(pop_data):
+
+                    # initialise and fill with data
+                    self._algo.population[i] = self._algo.initialiser()
+                    self._algo.population[i].data = np.array(sol_data)
+                    self._algo.population[i].fitness = scores[i]
+
+            macop_line(self._algo)
+            macop_text(
+                self._algo,
+                f'Load of available population from `{self._filepath}`')
+            macop_text(
+                self._algo,
+                f'Restart algorithm from evaluation {self._algo._numberOfEvaluations}.'
+            )
+        else:
+            macop_text(
+                self._algo,
+                'No backup found... Start running algorithm from evaluation 0.'
+            )
+            logging.info(
+                "Can't load backup... Backup filepath not valid in Checkpoint")
+
+        macop_line(self._algo)

+ 2 - 2
optimization/callbacks/MultiSurrogateSpecificCheckpoint.py

@@ -40,7 +40,7 @@ class MultiSurrogateSpecificCheckpoint(Callback):
 
             line = ''
 
-            fitness_list = [ s.fitness() for s in population ]
+            fitness_list = [ s.fitness for s in population ]
             fitness_data = ' '.join(list(map(str, fitness_list)))
 
             for s in population:
@@ -82,7 +82,7 @@ class MultiSurrogateSpecificCheckpoint(Callback):
             print(macop_text(f' MultiSurrogateSpecificCheckpoint found from `{self._filepath}` file. Start running using previous `population` values'))
 
             for i, s in enumerate(self._algo._population):
-                print(f'Population[{i}]: best solution fitness is {s.fitness()}')
+                print(f'Population[{i}]: best solution fitness is {s.fitness}')
 
         else:
             print(macop_text('No backup found... Start running using new `population` values'))

+ 1 - 1
optimization/callbacks/SurrogateCheckpoint.py

@@ -54,7 +54,7 @@ class SurrogateCheckpoint(Callback):
 
             line = str(currentEvaluation) + ';' + str(surrogate_analyser._n_local_search) + ';' + str(surrogate_analyser._every_ls) + ';' + str(surrogate_analyser._time) + ';' + r2_data + ';' + str(surrogate_analyser._r2) \
                 + ';' + mae_data + ';' + str(surrogate_analyser._mae) \
-                + ';' + solutionData + ';' + str(solution.fitness()) + ';\n'
+                + ';' + solutionData + ';' + str(solution.fitness) + ';\n'
 
             # check if file exists
             if not os.path.exists(self._filepath):

+ 47 - 0
optimization/operators/SimplePopCrossover.py

@@ -0,0 +1,47 @@
+from macop.operators.base import Crossover
+import random
+
+class SimplePopCrossover(Crossover):
+
+    def apply(self, solution1, solution2=None):
+        """Create new solution based on best solution found and solution passed as parameter
+
+        Args:
+            solution1: {:class:`~macop.solutions.base.Solution`} -- the first solution to use for generating new solution
+            solution2: {:class:`~macop.solutions.base.Solution`} -- the second solution to use for generating new solution (using population)
+
+        Returns:
+            {:class:`~macop.solutions.base.Solution`}: new generated solution
+        """
+
+        size = solution1._size
+        population = self._algo.population
+
+        # copy data of solution
+        firstData = solution1.data.copy()
+
+        # copy of solution2 as output solution
+        valid = False
+        copy_solution = None
+
+        # use of different random population solution
+        ncounter = 0
+        while not valid:
+
+            chosen_solution = population[random.randint(0, len(population))]
+            
+            if chosen_solution.data != firstData or ncounter > 10:
+                valid = True
+                copy_solution = chosen_solution.clone()
+
+            # add security
+            ncounter += 1
+
+        splitIndex = int(size / 2)
+
+        if random.uniform(0, 1) > 0.5:
+            copy_solution.data[splitIndex:] = firstData[splitIndex:]
+        else:
+            copy_solution.data[:splitIndex] = firstData[:splitIndex]
+
+        return copy_solution

+ 12 - 1
optimization/utils/SurrogateAnalysis.py

@@ -1,5 +1,16 @@
 # quick object for surrogate logging data
-class SurrogateAnalysis():
+class SurrogateAnalysisMono():
+
+    def __init__(self, time, every_ls, r2, mae, evaluations, n_local_search):
+        self._time = time
+        self._every_ls = every_ls
+        self._r2 = r2
+        self._mae = mae
+        self._evaluations = evaluations
+        self._n_local_search = n_local_search
+
+
+class SurrogateAnalysisMulti():
 
     def __init__(self, time, every_ls, r2_scores, r2, mae_scores, mae, evaluations, n_local_search):
         self._time = time