multi.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. """Multi-objetive classes algorithm
  2. """
  3. # main imports
  4. import logging
  5. import math
  6. import numpy as np
  7. import sys
  8. from macop.utils.progress import macop_text, macop_line, macop_progress
  9. # module imports
  10. from macop.algorithms.base import Algorithm
  11. from macop.evaluators.discrete.multi import WeightedSum
  12. class MOEAD(Algorithm):
  13. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition
  14. Attributes:
  15. mu: {int} -- number of sub problems
  16. T: {[float]} -- number of neightbors for each sub problem
  17. nObjectives: {int} -- number of objectives (based of number evaluator)
  18. initialiser: {function} -- basic function strategy to initialise solution
  19. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  20. operators: {[Operator]} -- list of operator to use when launching algorithm
  21. policy: {Policy} -- Policy class implementation strategy to select operators
  22. validator: {function} -- basic function to check if solution is valid or not under some constraints
  23. maximise: {bool} -- specify kind of optimisation problem
  24. verbose: {bool} -- verbose or not information about the algorithm
  25. population: [{Solution}] -- population of solution, one for each sub problem
  26. pfPop: [{Solution}] -- pareto front population
  27. weights: [[{float}]] -- random weights used for custom mu sub problems
  28. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  29. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  30. >>> import random
  31. >>> # operators import
  32. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  33. >>> from macop.operators.discrete.mutators import SimpleMutation
  34. >>> # policy import
  35. >>> from macop.policies.classicals import RandomPolicy
  36. >>> # solution and algorithm
  37. >>> from macop.solutions.discrete import BinarySolution
  38. >>> from macop.algorithms.multi import MOEAD
  39. >>> # evaluator import
  40. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  41. >>> # evaluator initialization (worths objects passed into data)
  42. >>> problem_size = 20
  43. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  44. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  45. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  46. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  47. >>> # validator specification (based on weights of each objects)
  48. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  49. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.getData()) if value == 1]) < 200 else False
  50. >>> # initialiser function with lambda function
  51. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  52. >>> # operators list with crossover and mutation
  53. >>> operators = [SimpleCrossover(), SimpleMutation()]
  54. >>> policy = RandomPolicy(operators)
  55. >>> # MOEAD use multi-objective, hence list of evaluators with mu=100 and T=10
  56. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  57. >>> # run the algorithm and get the pareto front obtained
  58. >>> pf_solutions = algo.run(100)
  59. >>> # check size of expected pareto
  60. >>> len(pf_solutions)
  61. 33
  62. """
  63. def __init__(self,
  64. mu,
  65. T,
  66. initialiser,
  67. evaluator,
  68. operators,
  69. policy,
  70. validator,
  71. maximise=True,
  72. parent=None,
  73. verbose=True):
  74. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition initialisation
  75. Args:
  76. mu: {int} -- number of sub problems
  77. T: {[float]} -- number of neightbors for each sub problem
  78. initialiser: {function} -- basic function strategy to initialise solution
  79. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  80. operators: {[Operator]} -- list of operator to use when launching algorithm
  81. policy: {Policy} -- Policy class implementation strategy to select operators
  82. validator: {function} -- basic function to check if solution is valid or not under some constraints
  83. maximise: {bool} -- specify kind of optimisation problem
  84. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  85. verbose: {bool} -- verbose or not information about the algorithm
  86. """
  87. # redefinition of constructor to well use `initRun` method
  88. self.initialiser = initialiser
  89. self.evaluator = evaluator
  90. self.validator = validator
  91. self._operators = operators
  92. self.policy = policy
  93. self._callbacks = []
  94. # by default
  95. self._numberOfEvaluations = 0
  96. self._maxEvaluations = 0
  97. self._nObjectives = len(evaluator)
  98. # other parameters
  99. self._parent = parent # parent algorithm if it's sub algorithm
  100. #self.maxEvaluations = 0 # by default
  101. self._maximise = maximise
  102. self._verbose = verbose
  103. # track reference of algo into operator (keep an eye into best solution)
  104. for operator in self._operators:
  105. operator.setAlgo(self)
  106. # by default track reference for policy
  107. self.policy.setAlgo(self)
  108. if mu < T:
  109. raise ValueError('`mu` cannot be less than `T`')
  110. if mu < T:
  111. raise ValueError('`mu` cannot be less than `T`')
  112. self._mu = mu
  113. self._T = T
  114. # initialise neighbors for each sub problem
  115. self.setNeighbors()
  116. weights = []
  117. if self._nObjectives == 2:
  118. for i in range(self._mu):
  119. angle = math.pi / 2 * i / (self._mu - 1)
  120. weights.append([math.cos(angle), math.sin(angle)])
  121. elif self._nObjectives >= 3:
  122. # random weights using uniform
  123. for i in range(self._mu):
  124. w_i = np.random.uniform(0, 1, self._nObjectives)
  125. weights.append(w_i / sum(w_i))
  126. else:
  127. raise ValueError('Unvalid number of objectives')
  128. self._weights = weights
  129. self._subProblems = []
  130. for i in range(self._mu):
  131. # compute weight sum from solution
  132. sub_evaluator = WeightedSum(data={
  133. 'evaluators': evaluator,
  134. 'weights': weights[i]
  135. })
  136. # intialize each sub problem
  137. # use copy of list to keep track for each sub problem
  138. subProblem = MOSubProblem(i,
  139. weights[i],
  140. initialiser,
  141. sub_evaluator,
  142. operators.copy(),
  143. policy,
  144. validator,
  145. maximise,
  146. self,
  147. verbose=self._verbose)
  148. self._subProblems.append(subProblem)
  149. self.population = [None for n in range(self._mu)]
  150. self._pfPop = []
  151. # ref point based on number of evaluators
  152. if self._maximise:
  153. self._refPoint = [0 for _ in range(self._nObjectives)]
  154. else:
  155. self._refPoint = [
  156. sys.float_info.max for _ in range(self._nObjectives)
  157. ]
  158. def initRun(self):
  159. """
  160. Method which initialiazes or re-initialises the whole algorithm context specifically for MOEAD
  161. """
  162. # initialization is done during run method
  163. pass
  164. def run(self, evaluations):
  165. """
  166. Run the local search algorithm
  167. Args:
  168. evaluations: {int} -- number of Local search evaluations
  169. Returns:
  170. {Solution} -- best solution found
  171. """
  172. # by default use of mother method to initialise variables
  173. super().run(evaluations)
  174. # enable callback resume for MOEAD
  175. self.resume()
  176. # initialise each sub problem if no backup
  177. for i in range(self._mu):
  178. if self._subProblems[i]._bestSolution is None:
  179. self._subProblems[i].run(1)
  180. self.population[i] = self._subProblems[i]._bestSolution
  181. # if no backup for pf population
  182. if len(self._pfPop) == 0:
  183. for i in range(self._mu):
  184. self._pfPop.append(self._subProblems[i]._bestSolution)
  185. # MOEAD algorithm implementation
  186. while not self.stop():
  187. for i in range(self._mu):
  188. # run 1 iteration into sub problem `i`
  189. self._subProblems[i].run(1)
  190. spBestSolution = self._subProblems[i]._bestSolution
  191. self.updateRefPoint(spBestSolution)
  192. # for each neighbor of current sub problem update solution if better
  193. improvment = False
  194. for j in self._neighbors[i]:
  195. if spBestSolution.fitness(
  196. ) > self._subProblems[j]._bestSolution.fitness():
  197. # create new solution based on current new if better, computes fitness associated to new solution for sub problem
  198. newSolution = spBestSolution.clone()
  199. # evaluate solution for new sub problem and update as best solution
  200. self._subProblems[j].evaluate(newSolution)
  201. self._subProblems[j]._bestSolution = newSolution
  202. # update population solution for this sub problem
  203. self.population[j] = newSolution
  204. improvment = True
  205. # add new solution if improvment is idenfity
  206. if improvment:
  207. self._pfPop.append(spBestSolution)
  208. # update pareto front
  209. self._pfPop = self.paretoFront(self._pfPop)
  210. # add progress here
  211. self.progress()
  212. # stop algorithm if necessary
  213. if self.stop():
  214. break
  215. logging.info(
  216. f"End of {type(self).__name__}, best solution found {self.population}"
  217. )
  218. self.end()
  219. return self._pfPop
  220. def progress(self):
  221. """
  222. Log progress and apply callbacks if necessary
  223. """
  224. if len(self._callbacks) > 0:
  225. for callback in self._callbacks:
  226. callback.run()
  227. macop_progress(self, self.getGlobalEvaluation(),
  228. self.getGlobalMaxEvaluation())
  229. logging.info(
  230. f"-- {type(self).__name__} evaluation {self._numberOfEvaluations} of {self._maxEvaluations} ({((self._numberOfEvaluations) / self._maxEvaluations * 100.):.2f}%)"
  231. )
  232. def setNeighbors(self):
  233. if self._T % 2 == 1:
  234. dmin = -int(self._T / 2)
  235. dmax = int(self._T / 2) + 1
  236. else:
  237. dmin = -int(self._T / 2) + 1
  238. dmax = int(+self._T / 2)
  239. # init neighbord list
  240. self._neighbors = [[] for n in range(self._mu)]
  241. for direction in range(0, -dmin):
  242. for i in range(self._T):
  243. self._neighbors[direction].append(i)
  244. for direction in range(-dmin, self._mu - dmax):
  245. for i in range(direction + dmin, direction + dmax - 1):
  246. self._neighbors[direction].append(i)
  247. for direction in range(self._mu - dmax, self._mu):
  248. for i in range(self._mu - self._T, self._mu):
  249. self._neighbors[direction].append(i)
  250. def updateRefPoint(self, solution):
  251. if self._maximise:
  252. for i in range(len(self.evaluator)):
  253. if solution.scores[i] > self._refPoint[i]:
  254. self._refPoint[i] = solution.scores[i]
  255. else:
  256. for i in range(len(self.evaluator)):
  257. if solution.scores[i] < self._refPoint[i]:
  258. self._refPoint[i] = solution.scores[i]
  259. def paretoFront(self, population):
  260. paFront = []
  261. indexes = []
  262. nObjectives = len(self.evaluator)
  263. nSolutions = len(population)
  264. # find dominated solution
  265. for i in range(nSolutions):
  266. for j in range(nSolutions):
  267. if j in indexes:
  268. continue
  269. # check if solutions are the same
  270. if all([
  271. population[i]._data[k] == population[j]._data[k]
  272. for k in range(len(population[i]._data))
  273. ]):
  274. continue
  275. nDominated = 0
  276. # check number of dominated objectives of current solution by the others solution
  277. for k in range(len(self.evaluator)):
  278. if self._maximise:
  279. if population[i].scores[k] < population[j].scores[k]:
  280. nDominated += 1
  281. else:
  282. if population[i].scores[k] > population[j].scores[k]:
  283. nDominated += 1
  284. if nDominated == nObjectives:
  285. indexes.append(i)
  286. break
  287. # store the non dominated solution into pareto front
  288. for i in range(nSolutions):
  289. if i not in indexes:
  290. paFront.append(population[i])
  291. return paFront
  292. def getResult(self):
  293. """Get the expected result of the current algorithm
  294. Returns:
  295. [{Solution}] -- pareto front population
  296. """
  297. return self._pfPop
  298. def setDefaultResult(self, result):
  299. """Set current default result of the algorithm
  300. Args:
  301. result: {object} -- expected result data of the current algorithm
  302. """
  303. self._pfPop = result
  304. def end(self):
  305. """Display end message into `run` method
  306. """
  307. macop_text(
  308. self,
  309. f'({type(self).__name__}) Found after {self._numberOfEvaluations} evaluations'
  310. )
  311. for i, solution in enumerate(self._pfPop):
  312. macop_text(self, f' - [{i}] {solution.scores} : {solution}')
  313. macop_line(self)
  314. def information(self):
  315. logging.info("-- Pareto front :")
  316. for i, solution in enumerate(self._pfPop):
  317. logging.info(f"-- {i}] SCORE {solution.scores} - {solution}")
  318. def __str__(self):
  319. return f"{type(self).__name__} using {type(self.population).__name__}"
  320. class MOSubProblem(Algorithm):
  321. """Specific MO sub problem used into MOEAD
  322. Attributes:
  323. index: {int} -- sub problem index
  324. weights: {[float]} -- sub problems objectives weights
  325. initalizer: {function} -- basic function strategy to initialise solution
  326. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  327. operators: {[Operator]} -- list of operator to use when launching algorithm
  328. policy: {Policy} -- Policy class implementation strategy to select operators
  329. validator: {function} -- basic function to check if solution is valid or not under some constraints
  330. maximise: {bool} -- specify kind of optimisation problem
  331. verbose: {bool} -- verbose or not information about the algorithm
  332. currentSolution: {Solution} -- current solution managed for current evaluation
  333. bestSolution: {Solution} -- best solution found so far during running algorithm
  334. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  335. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  336. Example:
  337. >>> import random
  338. >>> # operators import
  339. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  340. >>> from macop.operators.discrete.mutators import SimpleMutation
  341. >>> # policy import
  342. >>> from macop.policies.classicals import RandomPolicy
  343. >>> # solution and algorithm
  344. >>> from macop.solutions.discrete import BinarySolution
  345. >>> from macop.algorithms.multi import MOEAD, MOSubProblem
  346. >>> # evaluator import
  347. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  348. >>> # evaluator initialization (worths objects passed into data)
  349. >>> problem_size = 20
  350. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  351. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  352. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  353. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  354. >>> # validator specification (based on weights of each objects)
  355. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  356. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.getData()) if value == 1]) < 200 else False
  357. >>> # initialiser function with lambda function
  358. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  359. >>> # operators list with crossover and mutation
  360. >>> operators = [SimpleCrossover(), SimpleMutation()]
  361. >>> policy = RandomPolicy(operators)
  362. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  363. >>> # weights of the sub problem
  364. >>> sub_problem_weights = [0.4, 0.6]
  365. >>> sub_evaluator = WeightedSum(data={'evaluators': [evaluator1, evaluator2], 'weights': sub_problem_weights})
  366. >>> # first parameter is the index of the MOSubProblem
  367. >>> subProblem = MOSubProblem(0, sub_problem_weights, initialiser, sub_evaluator, operators, policy, validator, maximise=True, parent=algo, verbose=False)
  368. >>> # run the algorithm
  369. >>> solution = subProblem.run(100)
  370. >>> solution._score
  371. 133.0
  372. """
  373. def __init__(self,
  374. index,
  375. weights,
  376. initalizer,
  377. evaluator,
  378. operators,
  379. policy,
  380. validator,
  381. maximise=True,
  382. parent=None,
  383. verbose=True):
  384. """Specific multi-objective sub problem used into MOEAD
  385. Args:
  386. index: {int} -- sub problem index
  387. weights: {[float]} -- sub problems objectives weights
  388. initalizer: {function} -- basic function strategy to initialise solution
  389. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  390. operators: {[Operator]} -- list of operator to use when launching algorithm
  391. policy: {Policy} -- Policy class implementation strategy to select operators
  392. validator: {function} -- basic function to check if solution is valid or not under some constraints
  393. maximise: {bool} -- specify kind of optimisation problem
  394. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  395. verbose: {bool} -- verbose or not information about the algorithm
  396. """
  397. super().__init__(initalizer, evaluator, operators, policy, validator,
  398. maximise, parent)
  399. self._index = index
  400. self._weights = weights
  401. self._verbose = verbose
  402. def run(self, evaluations):
  403. """
  404. Run the local search algorithm
  405. Args:
  406. evaluations: {int} -- number of evaluations
  407. Returns:
  408. {Solution} -- best solution found
  409. """
  410. # by default use of mother method to initialise variables
  411. super().run(evaluations)
  412. # initialise solution if necessary
  413. if self._bestSolution is None:
  414. self.initRun()
  415. # new operators list keep track of current sub problem
  416. for op in self._operators:
  417. op.setAlgo(self)
  418. for _ in range(evaluations):
  419. # keep reference of sub problem used
  420. self.policy.setAlgo(self)
  421. # update solution using policy
  422. newSolution = self.update(self._bestSolution)
  423. # if better solution than currently, replace it
  424. if self.isBetter(newSolution):
  425. self._bestSolution = newSolution
  426. # increase number of evaluations
  427. self.increaseEvaluation()
  428. self.progress()
  429. # stop algorithm if necessary
  430. if self.stop():
  431. break
  432. logging.info(
  433. f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  434. logging.info(
  435. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  436. )
  437. return self._bestSolution