Przeglądaj źródła

update the of multi-objective evaluation

Jérôme BUISINE 3 lat temu
rodzic
commit
266d5de3bd

+ 15 - 28
examples/knapsackMultiExample.py

@@ -4,21 +4,21 @@ import os
 import random
 
 # module imports
-from macop.solutions.BinarySolution import BinarySolution
-from macop.evaluators.EvaluatorExample import evaluatorExample
+from macop.solutions.discrete import BinarySolution
+from macop.evaluators.knapsacks import KnapsackEvaluator
 
-from macop.operators.mutators.SimpleMutation import SimpleMutation
-from macop.operators.mutators.SimpleBinaryMutation import SimpleBinaryMutation
-from macop.operators.crossovers.SimpleCrossover import SimpleCrossover
-from macop.operators.crossovers.RandomSplitCrossover import RandomSplitCrossover
+from macop.operators.discrete.mutators import SimpleMutation
+from macop.operators.discrete.mutators import SimpleBinaryMutation
+from macop.operators.discrete.crossovers import SimpleCrossover
+from macop.operators.discrete.crossovers import RandomSplitCrossover
 
-from macop.operators.policies.RandomPolicy import RandomPolicy
-from macop.operators.policies.UCBPolicy import UCBPolicy
+from macop.policies.classicals import RandomPolicy
+from macop.policies.reinforcement import UCBPolicy
 
-from macop.algorithms.multi.MOEAD import MOEAD
-from macop.callbacks.MultiCheckpoint import MultiCheckpoint
-from macop.callbacks.ParetoCheckpoint import ParetoCheckpoint
-from macop.callbacks.UCBCheckpoint import UCBCheckpoint
+from macop.algorithms.multi import MOEAD
+from macop.callbacks.multi import MultiCheckpoint
+from macop.callbacks.multi import ParetoCheckpoint
+from macop.callbacks.policies import UCBCheckpoint
 
 if not os.path.exists('data'):
     os.makedirs('data')
@@ -52,22 +52,6 @@ def validator(solution):
 def init():
     return BinarySolution([], 200).random(validator)
 
-def evaluator1(solution):
-
-    fitness = 0
-    for index, elem in enumerate(solution._data):
-        fitness += (elements_score1[index] * elem)
-
-    return fitness
-
-def evaluator2(solution):
-
-    fitness = 0
-    for index, elem in enumerate(solution._data):
-        fitness += (elements_score2[index] * elem)
-
-    return fitness
-
 
 mo_checkpoint_path = "data/checkpointsMOEAD.csv"
 pf_checkpoint_path = "data/pfMOEAD.csv"
@@ -79,6 +63,9 @@ def main():
     operators = [SimpleBinaryMutation(), SimpleMutation(), SimpleCrossover(), RandomSplitCrossover()]
     policy = UCBPolicy(operators, C=100, exp_rate=0.2)
 
+    evaluator1 = KnapsackEvaluator(data={'worths': elements_score1})
+    evaluator2 = KnapsackEvaluator(data={'worths': elements_score2})
+
     # pass list of evaluators
     algo = MOEAD(20, 5, init, [evaluator1, evaluator2], operators, policy, validator, maximise=True)
     

+ 4 - 24
macop/algorithms/multi.py

@@ -10,16 +10,7 @@ from ..utils.color import macop_text, macop_line, macop_progress
 
 # module imports
 from .base import Algorithm
-
-
-def moEvaluator(solution, evaluator, weights):
-
-    scores = [eval(solution) for eval in evaluator]
-
-    # associate objectives scores to solution
-    solution._scores = scores
-
-    return sum([scores[i] for i, w in enumerate(weights)])
+from ..evaluators.multi import WeightedSum
 
 
 class MOEAD(Algorithm):
@@ -114,8 +105,7 @@ class MOEAD(Algorithm):
         for i in range(self._mu):
 
             # compute weight sum from solution
-            sub_evaluator = lambda solution: moEvaluator(
-                solution, evaluator, weights[i])
+            sub_evaluator = WeightedSum(data={'evaluators': evaluator, 'weights': weights[i]})
 
             # intialize each sub problem
             # use copy of list to keep track for each sub problem
@@ -187,20 +177,10 @@ class MOEAD(Algorithm):
                 # for each neighbor of current sub problem update solution if better
                 improvment = False
                 for j in self._neighbors[i]:
-                    if spBestSolution.fitness(
-                    ) > self._subProblems[j]._bestSolution.fitness():
+                    if spBestSolution.fitness() > self._subProblems[j]._bestSolution.fitness():
 
                         # create new solution based on current new if better, computes fitness associated to new solution for sub problem
-                        class_name = type(spBestSolution).__name__
-
-                        # dynamically load solution class if unknown
-                        if class_name not in sys.modules:
-                            load_class(class_name, globals())
-
-                        newSolution = getattr(
-                            globals()['macop.solutions.' + class_name],
-                            class_name)(spBestSolution._data,
-                                        len(spBestSolution._data))
+                        newSolution = spBestSolution.clone()
 
                         # evaluate solution for new sub problem and update as best solution
                         self._subProblems[j].evaluate(newSolution)

+ 3 - 17
macop/callbacks/multi.py

@@ -156,18 +156,8 @@ class ParetoCheckpoint(Callback):
             logging.info('Load best solution from last checkpoint')
             with open(self._filepath) as f:
 
-                # reinit pf population
-                self._algo._pfPop = []
-
-                # retrieve class name from algo
-                class_name = type(self._algo._population[0]).__name__
-
-                # dynamically load solution class if unknown
-                if class_name not in sys.modules:
-                    load_class(class_name, globals())
-
                 # read data for each line
-                for line in f.readlines():
+                for i, line in enumerate(f.readlines()):
 
                     data = line.replace(';\n', '').split(';')
 
@@ -177,12 +167,8 @@ class ParetoCheckpoint(Callback):
                     # get best solution data information
                     solutionData = list(map(int, data[-1].split(' ')))
 
-                    newSolution = getattr(
-                        globals()['macop.solutions.' + class_name],
-                        class_name)(solutionData, len(solutionData))
-                    newSolution._scores = scores
-
-                    self._algo._pfPop.append(newSolution)
+                    self._algo._pfPop[i]._data = solutionData
+                    self._algo._pfPop[i]._scores = scores
 
             print(
                 macop_text(f'Load of available pareto front backup from `{ self._filepath}`'))

+ 0 - 28
macop/evaluators/knapsacks.py

@@ -1,7 +1,6 @@
 """Knapsack evaluators classes
 """
 # main imports
-from abc import abstractmethod
 from .base import Evaluator
 
 
@@ -10,7 +9,6 @@ class KnapsackEvaluator(Evaluator):
 
     - stores into its `_data` dictionary attritute required measures when computing a knapsack solution
     - `_data['worths']` stores knapsack objects worths information
-    - `_data['weights']` stores knapsack objects weights information
     - `compute` method enables to compute and associate a score to a given knapsack solution
     """
 
@@ -27,30 +25,4 @@ class KnapsackEvaluator(Evaluator):
         for index, elem in enumerate(solution._data):
             fitness += self._data['worths'][index] * elem
 
-        return fitness
-
-
-class KnapsackMultiEvaluator(Evaluator):
-    """Knapsack multi-objective evaluator class which enables to compute solution using specific `_data` 
-
-    - stores into its `_data` dictionary attritute required measures when computing a knapsack solution
-    - `_data['worths1']` stores knapsack objects worths information
-    - `_data['worths2']` stores knapsack objects worths information
-    - `_data['weights']` stores knapsack objects weights information
-    - `compute` method enables to compute and associate a score to a given knapsack solution
-    """
-
-    def compute(self, solution):
-        """Apply the computation of fitness from solution
-
-        Args:
-            solution: {Solution} -- Solution instance
-    
-        Returns:
-            {float} -- fitness score of solution
-        """
-        fitness = 0
-        for index, elem in enumerate(solution._data):
-            fitness += self._data['worths1'][index] * elem
-
         return fitness

+ 33 - 0
macop/evaluators/multi.py

@@ -0,0 +1,33 @@
+"""Multi-objective evaluators classes 
+"""
+# main imports
+from .base import Evaluator
+
+
+class WeightedSum(Evaluator):
+    """Weighted-sum sub-evaluator class which enables to compute solution using specific `_data`
+
+    - stores into its `_data` dictionary attritute required measures when computing a solution
+    - `_data['evaluators']` current evaluator to use
+    - `_data['weights']` Associated weight to use
+    - `compute` method enables to compute and associate a tuples of scores to a given solution
+    """
+
+    def compute(self, solution):
+        """Apply the computation of fitness from solution
+
+        - Associate tuple of fitness scores for each objective to the current solution
+        - Compute weighted-sum for these objectives
+
+        Args:
+            solution: {Solution} -- Solution instance
+    
+        Returns:
+            {float} -- weighted-sum of the fitness scores
+        """
+        scores = [evaluator.compute(solution) for evaluator in self._data['evaluators']]
+
+        # associate objectives scores to solution
+        solution._scores = scores
+
+        return sum([scores[i] for i, w in enumerate(self._data['weights'])])

+ 1 - 1
macop/solutions/discrete.py

@@ -23,7 +23,7 @@ class BinarySolution(Solution):
             data: {ndarray} --  array of binary values
             size: {int} -- size of binary array values
         """
-        super().__init__(data, size)
+        super().__init__(np.array(data), size)
 
     def random(self, validator):
         """