multi.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. """Multi-objetive classes algorithm
  2. """
  3. # main imports
  4. import logging
  5. import math
  6. import numpy as np
  7. import sys
  8. from ..utils.progress import macop_text, macop_line, macop_progress
  9. # module imports
  10. from .base import Algorithm
  11. from ..evaluators.multi import WeightedSum
  12. class MOEAD(Algorithm):
  13. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition
  14. Attributes:
  15. mu: {int} -- number of sub problems
  16. T: {[float]} -- number of neightbors for each sub problem
  17. nObjectives: {int} -- number of objectives (based of number evaluator)
  18. initializer: {function} -- basic function strategy to initialize solution
  19. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  20. operators: {[Operator]} -- list of operator to use when launching algorithm
  21. policy: {Policy} -- Policy class implementation strategy to select operators
  22. validator: {function} -- basic function to check if solution is valid or not under some constraints
  23. maximise: {bool} -- specify kind of optimisation problem
  24. verbose: {bool} -- verbose or not information about the algorithm
  25. population: [{Solution}] -- population of solution, one for each sub problem
  26. pfPop: [{Solution}] -- pareto front population
  27. weights: [[{float}]] -- random weights used for custom mu sub problems
  28. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  29. """
  30. def __init__(self,
  31. mu,
  32. T,
  33. initializer,
  34. evaluator,
  35. operators,
  36. policy,
  37. validator,
  38. maximise=True,
  39. parent=None,
  40. verbose=True):
  41. # redefinition of constructor to well use `initRun` method
  42. self._initializer = initializer
  43. self._evaluator = evaluator
  44. self._operators = operators
  45. self._policy = policy
  46. self._validator = validator
  47. self._callbacks = []
  48. # by default
  49. self._numberOfEvaluations = 0
  50. self._maxEvaluations = 0
  51. self._nObjectives = len(evaluator)
  52. # other parameters
  53. self._parent = parent # parent algorithm if it's sub algorithm
  54. #self.maxEvaluations = 0 # by default
  55. self._maximise = maximise
  56. self._verbose = verbose
  57. # track reference of algo into operator (keep an eye into best solution)
  58. for operator in self._operators:
  59. operator.setAlgo(self)
  60. # by default track reference for policy
  61. self._policy.setAlgo(self)
  62. if mu < T:
  63. raise ValueError('`mu` cannot be less than `T`')
  64. self._mu = mu
  65. self._T = T
  66. # initialize neighbors for each sub problem
  67. self.setNeighbors()
  68. weights = []
  69. if self._nObjectives == 2:
  70. for i in range(self._mu):
  71. angle = math.pi / 2 * i / (self._mu - 1)
  72. weights.append([math.cos(angle), math.sin(angle)])
  73. elif self._nObjectives >= 3:
  74. # random weights using uniform
  75. for i in range(self._mu):
  76. w_i = np.random.uniform(0, 1, self._nObjectives)
  77. weights.append(w_i / sum(w_i))
  78. else:
  79. raise ValueError('Unvalid number of objectives')
  80. self._weights = weights
  81. self._subProblems = []
  82. for i in range(self._mu):
  83. # compute weight sum from solution
  84. sub_evaluator = WeightedSum(data={'evaluators': evaluator, 'weights': weights[i]})
  85. # intialize each sub problem
  86. # use copy of list to keep track for each sub problem
  87. subProblem = MOSubProblem(i, weights[i],
  88. initializer, sub_evaluator,
  89. operators.copy(), policy, validator,
  90. maximise, self)
  91. self._subProblems.append(subProblem)
  92. self._population = [None for n in range(self._mu)]
  93. self._pfPop = []
  94. # ref point based on number of evaluators
  95. if self._maximise:
  96. self._refPoint = [0 for _ in range(self._nObjectives)]
  97. else:
  98. self._refPoint = [
  99. sys.float_info.max for _ in range(self._nObjectives)
  100. ]
  101. def initRun(self):
  102. """
  103. Method which initialiazes or re-initializes the whole algorithm context specifically for MOEAD
  104. """
  105. # initialization is done during run method
  106. pass
  107. def run(self, evaluations):
  108. """
  109. Run the local search algorithm
  110. Args:
  111. evaluations: {int} -- number of Local search evaluations
  112. Returns:
  113. {Solution} -- best solution found
  114. """
  115. # by default use of mother method to initialize variables
  116. super().run(evaluations)
  117. # enable callback resume for MOEAD
  118. self.resume()
  119. # initialize each sub problem if no backup
  120. for i in range(self._mu):
  121. if self._subProblems[i]._bestSolution is None:
  122. self._subProblems[i].run(1)
  123. self._population[i] = self._subProblems[i]._bestSolution
  124. # if no backup for pf population
  125. if len(self._pfPop) == 0:
  126. for i in range(self._mu):
  127. self._pfPop.append(self._subProblems[i]._bestSolution)
  128. # MOEAD algorithm implementation
  129. while not self.stop():
  130. for i in range(self._mu):
  131. # run 1 iteration into sub problem `i`
  132. self._subProblems[i].run(1)
  133. spBestSolution = self._subProblems[i]._bestSolution
  134. self.updateRefPoint(spBestSolution)
  135. # for each neighbor of current sub problem update solution if better
  136. improvment = False
  137. for j in self._neighbors[i]:
  138. if spBestSolution.fitness() > self._subProblems[j]._bestSolution.fitness():
  139. # create new solution based on current new if better, computes fitness associated to new solution for sub problem
  140. newSolution = spBestSolution.clone()
  141. # evaluate solution for new sub problem and update as best solution
  142. self._subProblems[j].evaluate(newSolution)
  143. self._subProblems[j]._bestSolution = newSolution
  144. # update population solution for this sub problem
  145. self._population[j] = newSolution
  146. improvment = True
  147. # add new solution if improvment is idenfity
  148. if improvment:
  149. self._pfPop.append(spBestSolution)
  150. # update pareto front
  151. self._pfPop = self.paretoFront(self._pfPop)
  152. # add progress here
  153. self.progress()
  154. # stop algorithm if necessary
  155. if self.stop():
  156. break
  157. logging.info(f"End of {type(self).__name__}, best solution found {self._population}")
  158. self.end()
  159. return self._pfPop
  160. def progress(self):
  161. """
  162. Log progress and apply callbacks if necessary
  163. """
  164. if len(self._callbacks) > 0:
  165. for callback in self._callbacks:
  166. callback.run()
  167. macop_progress(self, self.getGlobalEvaluation(), self.getGlobalMaxEvaluation())
  168. logging.info(f"-- {type(self).__name__} evaluation {self._numberOfEvaluations} of {self._maxEvaluations} ({((self._numberOfEvaluations) / self._maxEvaluations * 100.):.2f}%)")
  169. def setNeighbors(self):
  170. dmin = dmax = 0
  171. if self._T % 2 == 1:
  172. dmin = -int(self._T / 2)
  173. dmax = int(self._T / 2) + 1
  174. else:
  175. dmin = -int(self._T / 2) + 1
  176. dmax = +self._T / 2
  177. # init neighbord list
  178. self._neighbors = [[] for n in range(self._mu)]
  179. for direction in range(0, -dmin):
  180. for i in range(self._T):
  181. self._neighbors[direction].append(i)
  182. for direction in range(-dmin, self._mu - dmax):
  183. for i in range(direction + dmin, direction + dmax):
  184. self._neighbors[direction].append(i)
  185. for direction in range(self._mu - dmax, self._mu):
  186. for i in range(self._mu - self._T, self._mu):
  187. self._neighbors[direction].append(i)
  188. def updateRefPoint(self, solution):
  189. if self._maximise:
  190. for i in range(len(self._evaluator)):
  191. if solution._scores[i] > self._refPoint[i]:
  192. self._refPoint[i] = solution._scores[i]
  193. else:
  194. for i in range(len(self._evaluator)):
  195. if solution.scores[i] < self._refPoint[i]:
  196. self._refPoint[i] = solution._scores[i]
  197. def paretoFront(self, population):
  198. paFront = []
  199. indexes = []
  200. nObjectives = len(self._evaluator)
  201. nSolutions = len(population)
  202. # find dominated solution
  203. for i in range(nSolutions):
  204. for j in range(nSolutions):
  205. if j in indexes:
  206. continue
  207. # check if solutions are the same
  208. if all([
  209. population[i]._data[k] == population[j]._data[k]
  210. for k in range(len(population[i]._data))
  211. ]):
  212. continue
  213. nDominated = 0
  214. # check number of dominated objectives of current solution by the others solution
  215. for k in range(len(self._evaluator)):
  216. if self._maximise:
  217. if population[i]._scores[k] < population[j]._scores[k]:
  218. nDominated += 1
  219. else:
  220. if population[i]._scores[k] > population[j]._scores[k]:
  221. nDominated += 1
  222. if nDominated == nObjectives:
  223. indexes.append(i)
  224. break
  225. # store the non dominated solution into pareto front
  226. for i in range(nSolutions):
  227. if i not in indexes:
  228. paFront.append(population[i])
  229. return paFront
  230. def end(self):
  231. """Display end message into `run` method
  232. """
  233. macop_text(self, f'({type(self).__name__}) Found after {self._numberOfEvaluations} evaluations')
  234. for i, solution in enumerate(self._pfPop):
  235. macop_text(self, f' - [{i}] {solution._scores} : {solution}')
  236. macop_line(self)
  237. def information(self):
  238. logging.info("-- Pareto front :")
  239. for i, solution in enumerate(self._pfPop):
  240. logging.info(f"-- {i}] SCORE {solution._scores} - {solution}")
  241. def __str__(self):
  242. return f"{type(self).__name__} using {type(self._population).__name__}"
  243. class MOSubProblem(Algorithm):
  244. """Specific MO sub problem used into MOEAD
  245. Attributes:
  246. index: {int} -- sub problem index
  247. weights: {[float]} -- sub problems objectives weights
  248. initalizer: {function} -- basic function strategy to initialize solution
  249. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  250. operators: {[Operator]} -- list of operator to use when launching algorithm
  251. policy: {Policy} -- Policy class implementation strategy to select operators
  252. validator: {function} -- basic function to check if solution is valid or not under some constraints
  253. maximise: {bool} -- specify kind of optimisation problem
  254. verbose: {bool} -- verbose or not information about the algorithm
  255. currentSolution: {Solution} -- current solution managed for current evaluation
  256. bestSolution: {Solution} -- best solution found so far during running algorithm
  257. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  258. """
  259. def __init__(self,
  260. index,
  261. weights,
  262. initalizer,
  263. evaluator,
  264. operators,
  265. policy,
  266. validator,
  267. maximise=True,
  268. parent=None,
  269. verbose=True):
  270. super().__init__(initalizer, evaluator, operators, policy,
  271. validator, maximise, parent)
  272. self._index = index
  273. self._weights = weights
  274. self._verbose = verbose
  275. def run(self, evaluations):
  276. """
  277. Run the local search algorithm
  278. Args:
  279. evaluations: {int} -- number of evaluations
  280. Returns:
  281. {Solution} -- best solution found
  282. """
  283. # by default use of mother method to initialize variables
  284. super().run(evaluations)
  285. # initialize solution if necessary
  286. if self._bestSolution is None:
  287. self.initRun()
  288. # new operators list keep track of current sub problem
  289. for op in self._operators:
  290. op.setAlgo(self)
  291. for _ in range(evaluations):
  292. # keep reference of sub problem used
  293. self._policy.setAlgo(self)
  294. # update solution using policy
  295. newSolution = self.update(self._bestSolution)
  296. # if better solution than currently, replace it
  297. if self.isBetter(newSolution):
  298. self._bestSolution = newSolution
  299. # increase number of evaluations
  300. self.increaseEvaluation()
  301. self.progress()
  302. # stop algorithm if necessary
  303. if self.stop():
  304. break
  305. logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  306. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  307. return self._bestSolution