multi.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. """Multi-objetive classes algorithm
  2. """
  3. # main imports
  4. import logging
  5. import math
  6. import numpy as np
  7. import sys
  8. from macop.utils.progress import macop_text, macop_line, macop_progress
  9. # module imports
  10. from macop.algorithms.base import Algorithm
  11. from macop.evaluators.discrete.multi import WeightedSum
  12. class MOEAD(Algorithm):
  13. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition
  14. Attributes:
  15. mu: {int} -- number of sub problems
  16. T: {[float]} -- number of neightbors for each sub problem
  17. nObjectives: {int} -- number of objectives (based of number evaluator)
  18. initialiser: {function} -- basic function strategy to initialise solution
  19. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  20. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  21. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  22. validator: {function} -- basic function to check if solution is valid or not under some constraints
  23. maximise: {bool} -- specify kind of optimisation problem
  24. verbose: {bool} -- verbose or not information about the algorithm
  25. population: [{:class:`~macop.solutions.base.Solution`}] -- population of solution, one for each sub problem
  26. pfPop: [{:class:`~macop.solutions.base.Solution`}] -- pareto front population
  27. weights: [[{float}]] -- random weights used for custom mu sub problems
  28. callbacks: {[:class:`~macop.callbacks.base.Callback`]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  29. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  30. >>> import random
  31. >>>
  32. >>> # operators import
  33. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  34. >>> from macop.operators.discrete.mutators import SimpleMutation
  35. >>>
  36. >>> # policy import
  37. >>> from macop.policies.classicals import RandomPolicy
  38. >>>
  39. >>> # solution and algorithm imports
  40. >>> from macop.solutions.discrete import BinarySolution
  41. >>> from macop.algorithms.multi import MOEAD
  42. >>>
  43. >>> # evaluator import
  44. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  45. >>>
  46. >>> # evaluator initialization (worths objects passed into data)
  47. >>> problem_size = 20
  48. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  49. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  50. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  51. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  52. >>>
  53. >>> # validator specification (based on weights of each objects)
  54. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  55. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.data) if value == 1]) < 200 else False
  56. >>>
  57. >>> # initialiser function for binary solution using specific solution size
  58. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  59. >>>
  60. >>> # operators list with crossover and mutation
  61. >>> operators = [SimpleCrossover(), SimpleMutation()]
  62. >>> policy = RandomPolicy(operators)
  63. >>>
  64. >>> # MOEAD use multi-objective, hence list of evaluators with mu=100 and T=10
  65. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  66. >>>
  67. >>> # run the algorithm and get the pareto front obtained
  68. >>> pf_solutions = algo.run(100)
  69. >>>
  70. >>> # check size of expected pareto
  71. >>> len(pf_solutions)
  72. 33
  73. """
  74. def __init__(self,
  75. mu,
  76. T,
  77. initialiser,
  78. evaluator,
  79. operators,
  80. policy,
  81. validator,
  82. maximise=True,
  83. parent=None,
  84. verbose=True):
  85. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition initialisation
  86. Args:
  87. mu: {int} -- number of sub problems
  88. T: {[float]} -- number of neightbors for each sub problem
  89. initialiser: {function} -- basic function strategy to initialise solution
  90. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  91. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  92. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  93. validator: {function} -- basic function to check if solution is valid or not under some constraints
  94. maximise: {bool} -- specify kind of optimisation problem
  95. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  96. verbose: {bool} -- verbose or not information about the algorithm
  97. """
  98. # redefinition of constructor to well use `initRun` method
  99. self.initialiser = initialiser
  100. self.evaluator = evaluator
  101. self.validator = validator
  102. self.operators = operators
  103. self.policy = policy
  104. self._callbacks = []
  105. # by default
  106. self._numberOfEvaluations = 0
  107. self._maxEvaluations = 0
  108. self._nObjectives = len(evaluator)
  109. # other parameters
  110. self._parent = parent # parent algorithm if it's sub algorithm
  111. #self.maxEvaluations = 0 # by default
  112. self._maximise = maximise
  113. self._verbose = verbose
  114. # track reference of algo into operator (keep an eye into best solution)
  115. for operator in self.operators:
  116. operator.setAlgo(self)
  117. # by default track reference for policy
  118. self.policy.setAlgo(self)
  119. if mu < T:
  120. raise ValueError('`mu` cannot be less than `T`')
  121. if mu < T:
  122. raise ValueError('`mu` cannot be less than `T`')
  123. self._mu = mu
  124. self._T = T
  125. # initialise neighbors for each sub problem
  126. self.setNeighbors()
  127. weights = []
  128. if self._nObjectives == 2:
  129. for i in range(self._mu):
  130. angle = math.pi / 2 * i / (self._mu - 1)
  131. weights.append([math.cos(angle), math.sin(angle)])
  132. elif self._nObjectives >= 3:
  133. # random weights using uniform
  134. for i in range(self._mu):
  135. w_i = np.random.uniform(0, 1, self._nObjectives)
  136. weights.append(w_i / sum(w_i))
  137. else:
  138. raise ValueError('Unvalid number of objectives')
  139. self._weights = weights
  140. self._subProblems = []
  141. for i in range(self._mu):
  142. # compute weight sum from solution
  143. sub_evaluator = WeightedSum(data={
  144. 'evaluators': evaluator,
  145. 'weights': weights[i]
  146. })
  147. # intialize each sub problem
  148. # use copy of list to keep track for each sub problem
  149. subProblem = MOSubProblem(i,
  150. weights[i],
  151. initialiser,
  152. sub_evaluator,
  153. operators.copy(),
  154. policy,
  155. validator,
  156. maximise,
  157. self,
  158. verbose=self._verbose)
  159. self._subProblems.append(subProblem)
  160. self.population = [None for n in range(self._mu)]
  161. self._pfPop = []
  162. # ref point based on number of evaluators
  163. if self._maximise:
  164. self._refPoint = [0 for _ in range(self._nObjectives)]
  165. else:
  166. self._refPoint = [
  167. sys.float_info.max for _ in range(self._nObjectives)
  168. ]
  169. def initRun(self):
  170. """
  171. Method which initialiazes or re-initialises the whole algorithm context specifically for MOEAD
  172. """
  173. # initialization is done during run method
  174. pass
  175. def run(self, evaluations):
  176. """
  177. Run the local search algorithm
  178. Args:
  179. evaluations: {int} -- number of Local search evaluations
  180. Returns:
  181. {:class:`~macop.solutions.base.Solution`}: best solution found
  182. """
  183. # by default use of mother method to initialise variables
  184. super().run(evaluations)
  185. # enable callback resume for MOEAD
  186. self.resume()
  187. # initialise each sub problem if no backup
  188. for i in range(self._mu):
  189. if self._subProblems[i]._bestSolution is None:
  190. self._subProblems[i].run(1)
  191. self.population[i] = self._subProblems[i]._bestSolution
  192. # if no backup for pf population
  193. if len(self._pfPop) == 0:
  194. for i in range(self._mu):
  195. self._pfPop.append(self._subProblems[i]._bestSolution)
  196. # MOEAD algorithm implementation
  197. while not self.stop():
  198. for i in range(self._mu):
  199. # run 1 iteration into sub problem `i`
  200. self._subProblems[i].run(1)
  201. spBestSolution = self._subProblems[i]._bestSolution
  202. self.updateRefPoint(spBestSolution)
  203. # for each neighbor of current sub problem update solution if better
  204. improvment = False
  205. for j in self._neighbors[i]:
  206. if spBestSolution.fitness > self._subProblems[
  207. j]._bestSolution.fitness:
  208. # create new solution based on current new if better, computes fitness associated to new solution for sub problem
  209. newSolution = spBestSolution.clone()
  210. # evaluate solution for new sub problem and update as best solution
  211. self._subProblems[j].evaluate(newSolution)
  212. self._subProblems[j]._bestSolution = newSolution
  213. # update population solution for this sub problem
  214. self.population[j] = newSolution
  215. improvment = True
  216. # add new solution if improvment is idenfity
  217. if improvment:
  218. self._pfPop.append(spBestSolution)
  219. # update pareto front
  220. self._pfPop = self.paretoFront(self._pfPop)
  221. # add progress here
  222. self.progress()
  223. # stop algorithm if necessary
  224. if self.stop():
  225. break
  226. logging.info(
  227. f"End of {type(self).__name__}, best solution found {self.population}"
  228. )
  229. self.end()
  230. return self._pfPop
  231. def progress(self):
  232. """
  233. Log progress and apply callbacks if necessary
  234. """
  235. if len(self._callbacks) > 0:
  236. for callback in self._callbacks:
  237. callback.run()
  238. macop_progress(self, self.getGlobalEvaluation(),
  239. self.getGlobalMaxEvaluation())
  240. logging.info(
  241. f"-- {type(self).__name__} evaluation {self._numberOfEvaluations} of {self._maxEvaluations} ({((self._numberOfEvaluations) / self._maxEvaluations * 100.):.2f}%)"
  242. )
  243. def setNeighbors(self):
  244. if self._T % 2 == 1:
  245. dmin = -int(self._T / 2)
  246. dmax = int(self._T / 2) + 1
  247. else:
  248. dmin = -int(self._T / 2) + 1
  249. dmax = int(+self._T / 2)
  250. # init neighbord list
  251. self._neighbors = [[] for n in range(self._mu)]
  252. for direction in range(0, -dmin):
  253. for i in range(self._T):
  254. self._neighbors[direction].append(i)
  255. for direction in range(-dmin, self._mu - dmax):
  256. for i in range(direction + dmin, direction + dmax - 1):
  257. self._neighbors[direction].append(i)
  258. for direction in range(self._mu - dmax, self._mu):
  259. for i in range(self._mu - self._T, self._mu):
  260. self._neighbors[direction].append(i)
  261. def updateRefPoint(self, solution):
  262. if self._maximise:
  263. for i in range(len(self.evaluator)):
  264. if solution.scores[i] > self._refPoint[i]:
  265. self._refPoint[i] = solution.scores[i]
  266. else:
  267. for i in range(len(self.evaluator)):
  268. if solution.scores[i] < self._refPoint[i]:
  269. self._refPoint[i] = solution.scores[i]
  270. def paretoFront(self, population):
  271. paFront = []
  272. indexes = []
  273. nObjectives = len(self.evaluator)
  274. nSolutions = len(population)
  275. # find dominated solution
  276. for i in range(nSolutions):
  277. for j in range(nSolutions):
  278. if j in indexes:
  279. continue
  280. # check if solutions are the same
  281. if all([
  282. population[i]._data[k] == population[j]._data[k]
  283. for k in range(len(population[i]._data))
  284. ]):
  285. continue
  286. nDominated = 0
  287. # check number of dominated objectives of current solution by the others solution
  288. for k in range(len(self.evaluator)):
  289. if self._maximise:
  290. if population[i].scores[k] < population[j].scores[k]:
  291. nDominated += 1
  292. else:
  293. if population[i].scores[k] > population[j].scores[k]:
  294. nDominated += 1
  295. if nDominated == nObjectives:
  296. indexes.append(i)
  297. break
  298. # store the non dominated solution into pareto front
  299. for i in range(nSolutions):
  300. if i not in indexes:
  301. paFront.append(population[i])
  302. return paFront
  303. @property
  304. def result(self):
  305. """Get the expected result of the current algorithm
  306. Returns:
  307. [{:class:`~macop.solutions.base.Solution`}]: pareto front population
  308. """
  309. return self._pfPop
  310. @result.setter
  311. def result(self, result):
  312. """Set current default result of the algorithm
  313. Args:
  314. result: {object} -- expected result data of the current algorithm
  315. """
  316. self._pfPop = result
  317. def end(self):
  318. """Display end message into `run` method
  319. """
  320. macop_text(
  321. self,
  322. f'({type(self).__name__}) Found after {self._numberOfEvaluations} evaluations'
  323. )
  324. for i, solution in enumerate(self._pfPop):
  325. macop_text(self, f' - [{i}] {solution.scores} : {solution}')
  326. macop_line(self)
  327. def information(self):
  328. logging.info("-- Pareto front :")
  329. for i, solution in enumerate(self._pfPop):
  330. logging.info(f"-- {i}] SCORE {solution.scores} - {solution}")
  331. def __str__(self):
  332. return f"{type(self).__name__} using {type(self.population).__name__}"
  333. class MOSubProblem(Algorithm):
  334. """Specific MO sub problem used into MOEAD
  335. Attributes:
  336. index: {int} -- sub problem index
  337. weights: {[float]} -- sub problems objectives weights
  338. initalizer: {function} -- basic function strategy to initialise solution
  339. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  340. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  341. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  342. validator: {function} -- basic function to check if solution is valid or not under some constraints
  343. maximise: {bool} -- specify kind of optimisation problem
  344. verbose: {bool} -- verbose or not information about the algorithm
  345. currentSolution: {:class:`~macop.solutions.base.Solution`} -- current solution managed for current evaluation
  346. bestSolution: {:class:`~macop.solutions.base.Solution`} -- best solution found so far during running algorithm
  347. callbacks: {[:class:`~macop.callbacks.base.Callback`]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  348. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  349. Example:
  350. >>> import random
  351. >>>
  352. >>> # operators import
  353. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  354. >>> from macop.operators.discrete.mutators import SimpleMutation
  355. >>>
  356. >>> # policy import
  357. >>> from macop.policies.classicals import RandomPolicy
  358. >>>
  359. >>> # solution and algorithm imports
  360. >>> from macop.solutions.discrete import BinarySolution
  361. >>> from macop.algorithms.multi import MOEAD, MOSubProblem
  362. >>>
  363. >>> # evaluator import
  364. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  365. >>>
  366. >>> # evaluator initialization (worths objects passed into data)
  367. >>> problem_size = 20
  368. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  369. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  370. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  371. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  372. >>>
  373. >>> # validator specification (based on weights of each objects)
  374. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  375. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.data) if value == 1]) < 200 else False
  376. >>>
  377. >>> # initialiser function for binary solution using specific solution size
  378. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  379. >>>
  380. >>> # operators list with crossover and mutation
  381. >>> operators = [SimpleCrossover(), SimpleMutation()]
  382. >>> policy = RandomPolicy(operators)
  383. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  384. >>>
  385. >>> # weights of the sub problem
  386. >>> sub_problem_weights = [0.4, 0.6]
  387. >>> sub_evaluator = WeightedSum(data={'evaluators': [evaluator1, evaluator2], 'weights': sub_problem_weights})
  388. >>>
  389. >>> # first parameter is the index of the MOSubProblem
  390. >>> subProblem = MOSubProblem(0, sub_problem_weights, initialiser, sub_evaluator, operators, policy, validator, maximise=True, parent=algo, verbose=False)
  391. >>>
  392. >>> # run the algorithm
  393. >>> solution = subProblem.run(100)
  394. >>> solution._score
  395. 133.0
  396. """
  397. def __init__(self,
  398. index,
  399. weights,
  400. initalizer,
  401. evaluator,
  402. operators,
  403. policy,
  404. validator,
  405. maximise=True,
  406. parent=None,
  407. verbose=True):
  408. """Specific multi-objective sub problem used into MOEAD
  409. Args:
  410. index: {int} -- sub problem index
  411. weights: {[float]} -- sub problems objectives weights
  412. initalizer: {function} -- basic function strategy to initialise solution
  413. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  414. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  415. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  416. validator: {function} -- basic function to check if solution is valid or not under some constraints
  417. maximise: {bool} -- specify kind of optimisation problem
  418. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  419. verbose: {bool} -- verbose or not information about the algorithm
  420. """
  421. super().__init__(initalizer, evaluator, operators, policy, validator,
  422. maximise, parent)
  423. self._index = index
  424. self._weights = weights
  425. self._verbose = verbose
  426. def run(self, evaluations):
  427. """
  428. Run the local search algorithm
  429. Args:
  430. evaluations: {int} -- number of evaluations
  431. Returns:
  432. {:class:`~macop.solutions.base.Solution`}: best solution found
  433. """
  434. # by default use of mother method to initialise variables
  435. super().run(evaluations)
  436. # initialise solution if necessary
  437. if self._bestSolution is None:
  438. self.initRun()
  439. # new operators list keep track of current sub problem
  440. for op in self.operators:
  441. op.setAlgo(self)
  442. for _ in range(evaluations):
  443. # keep reference of sub problem used
  444. self.policy.setAlgo(self)
  445. # update solution using policy
  446. newSolution = self.update(self._bestSolution)
  447. # if better solution than currently, replace it
  448. if self.isBetter(newSolution):
  449. self._bestSolution = newSolution
  450. # increase number of evaluations
  451. self.increaseEvaluation()
  452. self.progress()
  453. # stop algorithm if necessary
  454. if self.stop():
  455. break
  456. logging.info(
  457. f"---- Current {newSolution} - SCORE {newSolution.fitness}")
  458. logging.info(
  459. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  460. )
  461. return self._bestSolution