multi.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. """Multi-objetive classes algorithm
  2. """
  3. # main imports
  4. import logging
  5. import math
  6. import numpy as np
  7. import sys
  8. from ..utils.progress import macop_text, macop_line, macop_progress
  9. # module imports
  10. from .base import Algorithm
  11. from ..evaluators.discrete.multi import WeightedSum
  12. class MOEAD(Algorithm):
  13. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition
  14. Attributes:
  15. mu: {int} -- number of sub problems
  16. T: {[float]} -- number of neightbors for each sub problem
  17. nObjectives: {int} -- number of objectives (based of number evaluator)
  18. initializer: {function} -- basic function strategy to initialize solution
  19. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  20. operators: {[Operator]} -- list of operator to use when launching algorithm
  21. policy: {Policy} -- Policy class implementation strategy to select operators
  22. validator: {function} -- basic function to check if solution is valid or not under some constraints
  23. maximise: {bool} -- specify kind of optimisation problem
  24. verbose: {bool} -- verbose or not information about the algorithm
  25. population: [{Solution}] -- population of solution, one for each sub problem
  26. pfPop: [{Solution}] -- pareto front population
  27. weights: [[{float}]] -- random weights used for custom mu sub problems
  28. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  29. >>> import random
  30. >>> # operators import
  31. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  32. >>> from macop.operators.discrete.mutators import SimpleMutation
  33. >>> # policy import
  34. >>> from macop.policies.classicals import RandomPolicy
  35. >>> # solution and algorithm
  36. >>> from macop.solutions.discrete import BinarySolution
  37. >>> from macop.algorithms.multi import MOEAD
  38. >>> # evaluator import
  39. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  40. >>> # evaluator initialization (worths objects passed into data)
  41. >>> problem_size = 20
  42. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  43. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  44. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  45. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  46. >>> # validator specification (based on weights of each objects)
  47. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  48. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  49. >>> # initializer function with lambda function
  50. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  51. >>> # operators list with crossover and mutation
  52. >>> operators = [SimpleCrossover(), SimpleMutation()]
  53. >>> policy = RandomPolicy(operators)
  54. >>> # MOEAD use multi-objective, hence list of evaluators with mu=100 and T=10
  55. >>> algo = MOEAD(20, 5, initializer, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  56. >>> # run the algorithm and get the pareto front obtained
  57. >>> pf_solutions = algo.run(100)
  58. >>> # check size of expected pareto
  59. >>> len(pf_solutions)
  60. 33
  61. """
  62. def __init__(self,
  63. mu,
  64. T,
  65. initializer,
  66. evaluator,
  67. operators,
  68. policy,
  69. validator,
  70. maximise=True,
  71. parent=None,
  72. verbose=True):
  73. # redefinition of constructor to well use `initRun` method
  74. self._initializer = initializer
  75. self._evaluator = evaluator
  76. self._operators = operators
  77. self._policy = policy
  78. self._validator = validator
  79. self._callbacks = []
  80. # by default
  81. self._numberOfEvaluations = 0
  82. self._maxEvaluations = 0
  83. self._nObjectives = len(evaluator)
  84. # other parameters
  85. self._parent = parent # parent algorithm if it's sub algorithm
  86. #self.maxEvaluations = 0 # by default
  87. self._maximise = maximise
  88. self._verbose = verbose
  89. # track reference of algo into operator (keep an eye into best solution)
  90. for operator in self._operators:
  91. operator.setAlgo(self)
  92. # by default track reference for policy
  93. self._policy.setAlgo(self)
  94. if mu < T:
  95. raise ValueError('`mu` cannot be less than `T`')
  96. if mu < T:
  97. raise ValueError('`mu` cannot be less than `T`')
  98. self._mu = mu
  99. self._T = T
  100. # initialize neighbors for each sub problem
  101. self.setNeighbors()
  102. weights = []
  103. if self._nObjectives == 2:
  104. for i in range(self._mu):
  105. angle = math.pi / 2 * i / (self._mu - 1)
  106. weights.append([math.cos(angle), math.sin(angle)])
  107. elif self._nObjectives >= 3:
  108. # random weights using uniform
  109. for i in range(self._mu):
  110. w_i = np.random.uniform(0, 1, self._nObjectives)
  111. weights.append(w_i / sum(w_i))
  112. else:
  113. raise ValueError('Unvalid number of objectives')
  114. self._weights = weights
  115. self._subProblems = []
  116. for i in range(self._mu):
  117. # compute weight sum from solution
  118. sub_evaluator = WeightedSum(data={'evaluators': evaluator, 'weights': weights[i]})
  119. # intialize each sub problem
  120. # use copy of list to keep track for each sub problem
  121. subProblem = MOSubProblem(i, weights[i],
  122. initializer, sub_evaluator,
  123. operators.copy(), policy, validator,
  124. maximise, self, verbose=self._verbose)
  125. self._subProblems.append(subProblem)
  126. self._population = [None for n in range(self._mu)]
  127. self._pfPop = []
  128. # ref point based on number of evaluators
  129. if self._maximise:
  130. self._refPoint = [0 for _ in range(self._nObjectives)]
  131. else:
  132. self._refPoint = [
  133. sys.float_info.max for _ in range(self._nObjectives)
  134. ]
  135. def initRun(self):
  136. """
  137. Method which initialiazes or re-initializes the whole algorithm context specifically for MOEAD
  138. """
  139. # initialization is done during run method
  140. pass
  141. def run(self, evaluations):
  142. """
  143. Run the local search algorithm
  144. Args:
  145. evaluations: {int} -- number of Local search evaluations
  146. Returns:
  147. {Solution} -- best solution found
  148. """
  149. # by default use of mother method to initialize variables
  150. super().run(evaluations)
  151. # enable callback resume for MOEAD
  152. self.resume()
  153. # initialize each sub problem if no backup
  154. for i in range(self._mu):
  155. if self._subProblems[i]._bestSolution is None:
  156. self._subProblems[i].run(1)
  157. self._population[i] = self._subProblems[i]._bestSolution
  158. # if no backup for pf population
  159. if len(self._pfPop) == 0:
  160. for i in range(self._mu):
  161. self._pfPop.append(self._subProblems[i]._bestSolution)
  162. # MOEAD algorithm implementation
  163. while not self.stop():
  164. for i in range(self._mu):
  165. # run 1 iteration into sub problem `i`
  166. self._subProblems[i].run(1)
  167. spBestSolution = self._subProblems[i]._bestSolution
  168. self.updateRefPoint(spBestSolution)
  169. # for each neighbor of current sub problem update solution if better
  170. improvment = False
  171. for j in self._neighbors[i]:
  172. if spBestSolution.fitness() > self._subProblems[j]._bestSolution.fitness():
  173. # create new solution based on current new if better, computes fitness associated to new solution for sub problem
  174. newSolution = spBestSolution.clone()
  175. # evaluate solution for new sub problem and update as best solution
  176. self._subProblems[j].evaluate(newSolution)
  177. self._subProblems[j]._bestSolution = newSolution
  178. # update population solution for this sub problem
  179. self._population[j] = newSolution
  180. improvment = True
  181. # add new solution if improvment is idenfity
  182. if improvment:
  183. self._pfPop.append(spBestSolution)
  184. # update pareto front
  185. self._pfPop = self.paretoFront(self._pfPop)
  186. # add progress here
  187. self.progress()
  188. # stop algorithm if necessary
  189. if self.stop():
  190. break
  191. logging.info(f"End of {type(self).__name__}, best solution found {self._population}")
  192. self.end()
  193. return self._pfPop
  194. def progress(self):
  195. """
  196. Log progress and apply callbacks if necessary
  197. """
  198. if len(self._callbacks) > 0:
  199. for callback in self._callbacks:
  200. callback.run()
  201. macop_progress(self, self.getGlobalEvaluation(), self.getGlobalMaxEvaluation())
  202. logging.info(f"-- {type(self).__name__} evaluation {self._numberOfEvaluations} of {self._maxEvaluations} ({((self._numberOfEvaluations) / self._maxEvaluations * 100.):.2f}%)")
  203. def setNeighbors(self):
  204. if self._T % 2 == 1:
  205. dmin = -int(self._T / 2)
  206. dmax = int(self._T / 2) + 1
  207. else:
  208. dmin = -int(self._T / 2) + 1
  209. dmax = int(+self._T / 2)
  210. # init neighbord list
  211. self._neighbors = [[] for n in range(self._mu)]
  212. for direction in range(0, -dmin):
  213. for i in range(self._T):
  214. self._neighbors[direction].append(i)
  215. for direction in range(-dmin, self._mu - dmax):
  216. for i in range(direction + dmin, direction + dmax - 1):
  217. self._neighbors[direction].append(i)
  218. for direction in range(self._mu - dmax, self._mu):
  219. for i in range(self._mu - self._T, self._mu):
  220. self._neighbors[direction].append(i)
  221. def updateRefPoint(self, solution):
  222. if self._maximise:
  223. for i in range(len(self._evaluator)):
  224. if solution._scores[i] > self._refPoint[i]:
  225. self._refPoint[i] = solution._scores[i]
  226. else:
  227. for i in range(len(self._evaluator)):
  228. if solution.scores[i] < self._refPoint[i]:
  229. self._refPoint[i] = solution._scores[i]
  230. def paretoFront(self, population):
  231. paFront = []
  232. indexes = []
  233. nObjectives = len(self._evaluator)
  234. nSolutions = len(population)
  235. # find dominated solution
  236. for i in range(nSolutions):
  237. for j in range(nSolutions):
  238. if j in indexes:
  239. continue
  240. # check if solutions are the same
  241. if all([
  242. population[i]._data[k] == population[j]._data[k]
  243. for k in range(len(population[i]._data))
  244. ]):
  245. continue
  246. nDominated = 0
  247. # check number of dominated objectives of current solution by the others solution
  248. for k in range(len(self._evaluator)):
  249. if self._maximise:
  250. if population[i]._scores[k] < population[j]._scores[k]:
  251. nDominated += 1
  252. else:
  253. if population[i]._scores[k] > population[j]._scores[k]:
  254. nDominated += 1
  255. if nDominated == nObjectives:
  256. indexes.append(i)
  257. break
  258. # store the non dominated solution into pareto front
  259. for i in range(nSolutions):
  260. if i not in indexes:
  261. paFront.append(population[i])
  262. return paFront
  263. def end(self):
  264. """Display end message into `run` method
  265. """
  266. macop_text(self, f'({type(self).__name__}) Found after {self._numberOfEvaluations} evaluations')
  267. for i, solution in enumerate(self._pfPop):
  268. macop_text(self, f' - [{i}] {solution._scores} : {solution}')
  269. macop_line(self)
  270. def information(self):
  271. logging.info("-- Pareto front :")
  272. for i, solution in enumerate(self._pfPop):
  273. logging.info(f"-- {i}] SCORE {solution._scores} - {solution}")
  274. def __str__(self):
  275. return f"{type(self).__name__} using {type(self._population).__name__}"
  276. class MOSubProblem(Algorithm):
  277. """Specific MO sub problem used into MOEAD
  278. Attributes:
  279. index: {int} -- sub problem index
  280. weights: {[float]} -- sub problems objectives weights
  281. initalizer: {function} -- basic function strategy to initialize solution
  282. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  283. operators: {[Operator]} -- list of operator to use when launching algorithm
  284. policy: {Policy} -- Policy class implementation strategy to select operators
  285. validator: {function} -- basic function to check if solution is valid or not under some constraints
  286. maximise: {bool} -- specify kind of optimisation problem
  287. verbose: {bool} -- verbose or not information about the algorithm
  288. currentSolution: {Solution} -- current solution managed for current evaluation
  289. bestSolution: {Solution} -- best solution found so far during running algorithm
  290. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  291. Example:
  292. >>> import random
  293. >>> # operators import
  294. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  295. >>> from macop.operators.discrete.mutators import SimpleMutation
  296. >>> # policy import
  297. >>> from macop.policies.classicals import RandomPolicy
  298. >>> # solution and algorithm
  299. >>> from macop.solutions.discrete import BinarySolution
  300. >>> from macop.algorithms.multi import MOEAD, MOSubProblem
  301. >>> # evaluator import
  302. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  303. >>> # evaluator initialization (worths objects passed into data)
  304. >>> problem_size = 20
  305. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  306. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  307. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  308. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  309. >>> # validator specification (based on weights of each objects)
  310. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  311. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  312. >>> # initializer function with lambda function
  313. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  314. >>> # operators list with crossover and mutation
  315. >>> operators = [SimpleCrossover(), SimpleMutation()]
  316. >>> policy = RandomPolicy(operators)
  317. >>> algo = MOEAD(20, 5, initializer, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  318. >>> # weights of the sub problem
  319. >>> sub_problem_weights = [0.4, 0.6]
  320. >>> sub_evaluator = WeightedSum(data={'evaluators': [evaluator1, evaluator2], 'weights': sub_problem_weights})
  321. >>> # first parameter is the index of the MOSubProblem
  322. >>> subProblem = MOSubProblem(0, sub_problem_weights, initializer, sub_evaluator, operators, policy, validator, maximise=True, parent=algo, verbose=False)
  323. >>> # run the algorithm
  324. >>> solution = subProblem.run(100)
  325. >>> solution._score
  326. 133.0
  327. """
  328. def __init__(self,
  329. index,
  330. weights,
  331. initalizer,
  332. evaluator,
  333. operators,
  334. policy,
  335. validator,
  336. maximise=True,
  337. parent=None,
  338. verbose=True):
  339. super().__init__(initalizer, evaluator, operators, policy,
  340. validator, maximise, parent)
  341. self._index = index
  342. self._weights = weights
  343. self._verbose = verbose
  344. def run(self, evaluations):
  345. """
  346. Run the local search algorithm
  347. Args:
  348. evaluations: {int} -- number of evaluations
  349. Returns:
  350. {Solution} -- best solution found
  351. """
  352. # by default use of mother method to initialize variables
  353. super().run(evaluations)
  354. # initialize solution if necessary
  355. if self._bestSolution is None:
  356. self.initRun()
  357. # new operators list keep track of current sub problem
  358. for op in self._operators:
  359. op.setAlgo(self)
  360. for _ in range(evaluations):
  361. # keep reference of sub problem used
  362. self._policy.setAlgo(self)
  363. # update solution using policy
  364. newSolution = self.update(self._bestSolution)
  365. # if better solution than currently, replace it
  366. if self.isBetter(newSolution):
  367. self._bestSolution = newSolution
  368. # increase number of evaluations
  369. self.increaseEvaluation()
  370. self.progress()
  371. # stop algorithm if necessary
  372. if self.stop():
  373. break
  374. logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  375. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  376. return self._bestSolution