multi.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. """Multi-objetive classes algorithm
  2. """
  3. # main imports
  4. import logging
  5. import math
  6. import numpy as np
  7. import sys
  8. from macop.utils.progress import macop_text, macop_line, macop_progress
  9. # module imports
  10. from macop.algorithms.base import Algorithm
  11. from macop.evaluators.discrete.multi import WeightedSum
  12. class MOEAD(Algorithm):
  13. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition
  14. Attributes:
  15. mu: {int} -- number of sub problems
  16. T: {[float]} -- number of neightbors for each sub problem
  17. nObjectives: {int} -- number of objectives (based of number evaluator)
  18. initialiser: {function} -- basic function strategy to initialise solution
  19. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  20. operators: {[Operator]} -- list of operator to use when launching algorithm
  21. policy: {Policy} -- Policy class implementation strategy to select operators
  22. validator: {function} -- basic function to check if solution is valid or not under some constraints
  23. maximise: {bool} -- specify kind of optimisation problem
  24. verbose: {bool} -- verbose or not information about the algorithm
  25. population: [{Solution}] -- population of solution, one for each sub problem
  26. pfPop: [{Solution}] -- pareto front population
  27. weights: [[{float}]] -- random weights used for custom mu sub problems
  28. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  29. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  30. >>> import random
  31. >>>
  32. >>> # operators import
  33. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  34. >>> from macop.operators.discrete.mutators import SimpleMutation
  35. >>>
  36. >>> # policy import
  37. >>> from macop.policies.classicals import RandomPolicy
  38. >>>
  39. >>> # solution and algorithm imports
  40. >>> from macop.solutions.discrete import BinarySolution
  41. >>> from macop.algorithms.multi import MOEAD
  42. >>>
  43. >>> # evaluator import
  44. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  45. >>>
  46. >>> # evaluator initialization (worths objects passed into data)
  47. >>> problem_size = 20
  48. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  49. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  50. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  51. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  52. >>>
  53. >>> # validator specification (based on weights of each objects)
  54. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  55. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.getData()) if value == 1]) < 200 else False
  56. >>>
  57. >>> # initialiser function for binary solution using specific solution size
  58. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  59. >>>
  60. >>> # operators list with crossover and mutation
  61. >>> operators = [SimpleCrossover(), SimpleMutation()]
  62. >>> policy = RandomPolicy(operators)
  63. >>>
  64. >>> # MOEAD use multi-objective, hence list of evaluators with mu=100 and T=10
  65. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  66. >>>
  67. >>> # run the algorithm and get the pareto front obtained
  68. >>> pf_solutions = algo.run(100)
  69. >>>
  70. >>> # check size of expected pareto
  71. >>> len(pf_solutions)
  72. 33
  73. """
  74. def __init__(self,
  75. mu,
  76. T,
  77. initialiser,
  78. evaluator,
  79. operators,
  80. policy,
  81. validator,
  82. maximise=True,
  83. parent=None,
  84. verbose=True):
  85. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition initialisation
  86. Args:
  87. mu: {int} -- number of sub problems
  88. T: {[float]} -- number of neightbors for each sub problem
  89. initialiser: {function} -- basic function strategy to initialise solution
  90. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  91. operators: {[Operator]} -- list of operator to use when launching algorithm
  92. policy: {Policy} -- Policy class implementation strategy to select operators
  93. validator: {function} -- basic function to check if solution is valid or not under some constraints
  94. maximise: {bool} -- specify kind of optimisation problem
  95. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  96. verbose: {bool} -- verbose or not information about the algorithm
  97. """
  98. # redefinition of constructor to well use `initRun` method
  99. self.initialiser = initialiser
  100. self.evaluator = evaluator
  101. self.validator = validator
  102. self._operators = operators
  103. self.policy = policy
  104. self._callbacks = []
  105. # by default
  106. self._numberOfEvaluations = 0
  107. self._maxEvaluations = 0
  108. self._nObjectives = len(evaluator)
  109. # other parameters
  110. self._parent = parent # parent algorithm if it's sub algorithm
  111. #self.maxEvaluations = 0 # by default
  112. self._maximise = maximise
  113. self._verbose = verbose
  114. # track reference of algo into operator (keep an eye into best solution)
  115. for operator in self._operators:
  116. operator.setAlgo(self)
  117. # by default track reference for policy
  118. self.policy.setAlgo(self)
  119. if mu < T:
  120. raise ValueError('`mu` cannot be less than `T`')
  121. if mu < T:
  122. raise ValueError('`mu` cannot be less than `T`')
  123. self._mu = mu
  124. self._T = T
  125. # initialise neighbors for each sub problem
  126. self.setNeighbors()
  127. weights = []
  128. if self._nObjectives == 2:
  129. for i in range(self._mu):
  130. angle = math.pi / 2 * i / (self._mu - 1)
  131. weights.append([math.cos(angle), math.sin(angle)])
  132. elif self._nObjectives >= 3:
  133. # random weights using uniform
  134. for i in range(self._mu):
  135. w_i = np.random.uniform(0, 1, self._nObjectives)
  136. weights.append(w_i / sum(w_i))
  137. else:
  138. raise ValueError('Unvalid number of objectives')
  139. self._weights = weights
  140. self._subProblems = []
  141. for i in range(self._mu):
  142. # compute weight sum from solution
  143. sub_evaluator = WeightedSum(data={
  144. 'evaluators': evaluator,
  145. 'weights': weights[i]
  146. })
  147. # intialize each sub problem
  148. # use copy of list to keep track for each sub problem
  149. subProblem = MOSubProblem(i,
  150. weights[i],
  151. initialiser,
  152. sub_evaluator,
  153. operators.copy(),
  154. policy,
  155. validator,
  156. maximise,
  157. self,
  158. verbose=self._verbose)
  159. self._subProblems.append(subProblem)
  160. self.population = [None for n in range(self._mu)]
  161. self._pfPop = []
  162. # ref point based on number of evaluators
  163. if self._maximise:
  164. self._refPoint = [0 for _ in range(self._nObjectives)]
  165. else:
  166. self._refPoint = [
  167. sys.float_info.max for _ in range(self._nObjectives)
  168. ]
  169. def initRun(self):
  170. """
  171. Method which initialiazes or re-initialises the whole algorithm context specifically for MOEAD
  172. """
  173. # initialization is done during run method
  174. pass
  175. def run(self, evaluations):
  176. """
  177. Run the local search algorithm
  178. Args:
  179. evaluations: {int} -- number of Local search evaluations
  180. Returns:
  181. {Solution} -- best solution found
  182. """
  183. # by default use of mother method to initialise variables
  184. super().run(evaluations)
  185. # enable callback resume for MOEAD
  186. self.resume()
  187. # initialise each sub problem if no backup
  188. for i in range(self._mu):
  189. if self._subProblems[i]._bestSolution is None:
  190. self._subProblems[i].run(1)
  191. self.population[i] = self._subProblems[i]._bestSolution
  192. # if no backup for pf population
  193. if len(self._pfPop) == 0:
  194. for i in range(self._mu):
  195. self._pfPop.append(self._subProblems[i]._bestSolution)
  196. # MOEAD algorithm implementation
  197. while not self.stop():
  198. for i in range(self._mu):
  199. # run 1 iteration into sub problem `i`
  200. self._subProblems[i].run(1)
  201. spBestSolution = self._subProblems[i]._bestSolution
  202. self.updateRefPoint(spBestSolution)
  203. # for each neighbor of current sub problem update solution if better
  204. improvment = False
  205. for j in self._neighbors[i]:
  206. if spBestSolution.fitness(
  207. ) > self._subProblems[j]._bestSolution.fitness():
  208. # create new solution based on current new if better, computes fitness associated to new solution for sub problem
  209. newSolution = spBestSolution.clone()
  210. # evaluate solution for new sub problem and update as best solution
  211. self._subProblems[j].evaluate(newSolution)
  212. self._subProblems[j]._bestSolution = newSolution
  213. # update population solution for this sub problem
  214. self.population[j] = newSolution
  215. improvment = True
  216. # add new solution if improvment is idenfity
  217. if improvment:
  218. self._pfPop.append(spBestSolution)
  219. # update pareto front
  220. self._pfPop = self.paretoFront(self._pfPop)
  221. # add progress here
  222. self.progress()
  223. # stop algorithm if necessary
  224. if self.stop():
  225. break
  226. logging.info(
  227. f"End of {type(self).__name__}, best solution found {self.population}"
  228. )
  229. self.end()
  230. return self._pfPop
  231. def progress(self):
  232. """
  233. Log progress and apply callbacks if necessary
  234. """
  235. if len(self._callbacks) > 0:
  236. for callback in self._callbacks:
  237. callback.run()
  238. macop_progress(self, self.getGlobalEvaluation(),
  239. self.getGlobalMaxEvaluation())
  240. logging.info(
  241. f"-- {type(self).__name__} evaluation {self._numberOfEvaluations} of {self._maxEvaluations} ({((self._numberOfEvaluations) / self._maxEvaluations * 100.):.2f}%)"
  242. )
  243. def setNeighbors(self):
  244. if self._T % 2 == 1:
  245. dmin = -int(self._T / 2)
  246. dmax = int(self._T / 2) + 1
  247. else:
  248. dmin = -int(self._T / 2) + 1
  249. dmax = int(+self._T / 2)
  250. # init neighbord list
  251. self._neighbors = [[] for n in range(self._mu)]
  252. for direction in range(0, -dmin):
  253. for i in range(self._T):
  254. self._neighbors[direction].append(i)
  255. for direction in range(-dmin, self._mu - dmax):
  256. for i in range(direction + dmin, direction + dmax - 1):
  257. self._neighbors[direction].append(i)
  258. for direction in range(self._mu - dmax, self._mu):
  259. for i in range(self._mu - self._T, self._mu):
  260. self._neighbors[direction].append(i)
  261. def updateRefPoint(self, solution):
  262. if self._maximise:
  263. for i in range(len(self.evaluator)):
  264. if solution.scores[i] > self._refPoint[i]:
  265. self._refPoint[i] = solution.scores[i]
  266. else:
  267. for i in range(len(self.evaluator)):
  268. if solution.scores[i] < self._refPoint[i]:
  269. self._refPoint[i] = solution.scores[i]
  270. def paretoFront(self, population):
  271. paFront = []
  272. indexes = []
  273. nObjectives = len(self.evaluator)
  274. nSolutions = len(population)
  275. # find dominated solution
  276. for i in range(nSolutions):
  277. for j in range(nSolutions):
  278. if j in indexes:
  279. continue
  280. # check if solutions are the same
  281. if all([
  282. population[i]._data[k] == population[j]._data[k]
  283. for k in range(len(population[i]._data))
  284. ]):
  285. continue
  286. nDominated = 0
  287. # check number of dominated objectives of current solution by the others solution
  288. for k in range(len(self.evaluator)):
  289. if self._maximise:
  290. if population[i].scores[k] < population[j].scores[k]:
  291. nDominated += 1
  292. else:
  293. if population[i].scores[k] > population[j].scores[k]:
  294. nDominated += 1
  295. if nDominated == nObjectives:
  296. indexes.append(i)
  297. break
  298. # store the non dominated solution into pareto front
  299. for i in range(nSolutions):
  300. if i not in indexes:
  301. paFront.append(population[i])
  302. return paFront
  303. def getResult(self):
  304. """Get the expected result of the current algorithm
  305. Returns:
  306. [{Solution}] -- pareto front population
  307. """
  308. return self._pfPop
  309. def setDefaultResult(self, result):
  310. """Set current default result of the algorithm
  311. Args:
  312. result: {object} -- expected result data of the current algorithm
  313. """
  314. self._pfPop = result
  315. def end(self):
  316. """Display end message into `run` method
  317. """
  318. macop_text(
  319. self,
  320. f'({type(self).__name__}) Found after {self._numberOfEvaluations} evaluations'
  321. )
  322. for i, solution in enumerate(self._pfPop):
  323. macop_text(self, f' - [{i}] {solution.scores} : {solution}')
  324. macop_line(self)
  325. def information(self):
  326. logging.info("-- Pareto front :")
  327. for i, solution in enumerate(self._pfPop):
  328. logging.info(f"-- {i}] SCORE {solution.scores} - {solution}")
  329. def __str__(self):
  330. return f"{type(self).__name__} using {type(self.population).__name__}"
  331. class MOSubProblem(Algorithm):
  332. """Specific MO sub problem used into MOEAD
  333. Attributes:
  334. index: {int} -- sub problem index
  335. weights: {[float]} -- sub problems objectives weights
  336. initalizer: {function} -- basic function strategy to initialise solution
  337. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  338. operators: {[Operator]} -- list of operator to use when launching algorithm
  339. policy: {Policy} -- Policy class implementation strategy to select operators
  340. validator: {function} -- basic function to check if solution is valid or not under some constraints
  341. maximise: {bool} -- specify kind of optimisation problem
  342. verbose: {bool} -- verbose or not information about the algorithm
  343. currentSolution: {Solution} -- current solution managed for current evaluation
  344. bestSolution: {Solution} -- best solution found so far during running algorithm
  345. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  346. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  347. Example:
  348. >>> import random
  349. >>>
  350. >>> # operators import
  351. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  352. >>> from macop.operators.discrete.mutators import SimpleMutation
  353. >>>
  354. >>> # policy import
  355. >>> from macop.policies.classicals import RandomPolicy
  356. >>>
  357. >>> # solution and algorithm imports
  358. >>> from macop.solutions.discrete import BinarySolution
  359. >>> from macop.algorithms.multi import MOEAD, MOSubProblem
  360. >>>
  361. >>> # evaluator import
  362. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  363. >>>
  364. >>> # evaluator initialization (worths objects passed into data)
  365. >>> problem_size = 20
  366. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  367. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  368. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  369. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  370. >>>
  371. >>> # validator specification (based on weights of each objects)
  372. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  373. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.getData()) if value == 1]) < 200 else False
  374. >>>
  375. >>> # initialiser function for binary solution using specific solution size
  376. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  377. >>>
  378. >>> # operators list with crossover and mutation
  379. >>> operators = [SimpleCrossover(), SimpleMutation()]
  380. >>> policy = RandomPolicy(operators)
  381. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  382. >>>
  383. >>> # weights of the sub problem
  384. >>> sub_problem_weights = [0.4, 0.6]
  385. >>> sub_evaluator = WeightedSum(data={'evaluators': [evaluator1, evaluator2], 'weights': sub_problem_weights})
  386. >>>
  387. >>> # first parameter is the index of the MOSubProblem
  388. >>> subProblem = MOSubProblem(0, sub_problem_weights, initialiser, sub_evaluator, operators, policy, validator, maximise=True, parent=algo, verbose=False)
  389. >>>
  390. >>> # run the algorithm
  391. >>> solution = subProblem.run(100)
  392. >>> solution._score
  393. 133.0
  394. """
  395. def __init__(self,
  396. index,
  397. weights,
  398. initalizer,
  399. evaluator,
  400. operators,
  401. policy,
  402. validator,
  403. maximise=True,
  404. parent=None,
  405. verbose=True):
  406. """Specific multi-objective sub problem used into MOEAD
  407. Args:
  408. index: {int} -- sub problem index
  409. weights: {[float]} -- sub problems objectives weights
  410. initalizer: {function} -- basic function strategy to initialise solution
  411. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  412. operators: {[Operator]} -- list of operator to use when launching algorithm
  413. policy: {Policy} -- Policy class implementation strategy to select operators
  414. validator: {function} -- basic function to check if solution is valid or not under some constraints
  415. maximise: {bool} -- specify kind of optimisation problem
  416. parent: {Algorithm} -- parent algorithm reference in case of inner Algorithm instance (optional)
  417. verbose: {bool} -- verbose or not information about the algorithm
  418. """
  419. super().__init__(initalizer, evaluator, operators, policy, validator,
  420. maximise, parent)
  421. self._index = index
  422. self._weights = weights
  423. self._verbose = verbose
  424. def run(self, evaluations):
  425. """
  426. Run the local search algorithm
  427. Args:
  428. evaluations: {int} -- number of evaluations
  429. Returns:
  430. {Solution} -- best solution found
  431. """
  432. # by default use of mother method to initialise variables
  433. super().run(evaluations)
  434. # initialise solution if necessary
  435. if self._bestSolution is None:
  436. self.initRun()
  437. # new operators list keep track of current sub problem
  438. for op in self._operators:
  439. op.setAlgo(self)
  440. for _ in range(evaluations):
  441. # keep reference of sub problem used
  442. self.policy.setAlgo(self)
  443. # update solution using policy
  444. newSolution = self.update(self._bestSolution)
  445. # if better solution than currently, replace it
  446. if self.isBetter(newSolution):
  447. self._bestSolution = newSolution
  448. # increase number of evaluations
  449. self.increaseEvaluation()
  450. self.progress()
  451. # stop algorithm if necessary
  452. if self.stop():
  453. break
  454. logging.info(
  455. f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  456. logging.info(
  457. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  458. )
  459. return self._bestSolution