multi.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. """Multi-objetive classes algorithm
  2. """
  3. # main imports
  4. import logging
  5. import math
  6. import numpy as np
  7. import sys
  8. from macop.utils.progress import macop_text, macop_line, macop_progress
  9. # module imports
  10. from macop.algorithms.base import Algorithm
  11. from macop.evaluators.discrete.multi import WeightedSum
  12. class MOEAD(Algorithm):
  13. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition
  14. Attributes:
  15. mu: {int} -- number of sub problems
  16. T: {[float]} -- number of neightbors for each sub problem
  17. nObjectives: {int} -- number of objectives (based of number evaluator)
  18. initialiser: {function} -- basic function strategy to initialise solution
  19. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  20. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  21. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  22. validator: {function} -- basic function to check if solution is valid or not under some constraints
  23. maximise: {bool} -- specify kind of optimisation problem
  24. verbose: {bool} -- verbose or not information about the algorithm
  25. population: [{:class:`~macop.solutions.base.Solution`}] -- population of solution, one for each sub problem
  26. pfPop: [{:class:`~macop.solutions.base.Solution`}] -- pareto front population
  27. weights: [[{float}]] -- random weights used for custom mu sub problems
  28. callbacks: {[:class:`~macop.callbacks.base.Callback`]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  29. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  30. >>> import random
  31. >>>
  32. >>> # operators import
  33. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  34. >>> from macop.operators.discrete.mutators import SimpleMutation
  35. >>>
  36. >>> # policy import
  37. >>> from macop.policies.classicals import RandomPolicy
  38. >>>
  39. >>> # solution and algorithm imports
  40. >>> from macop.solutions.discrete import BinarySolution
  41. >>> from macop.algorithms.multi import MOEAD
  42. >>>
  43. >>> # evaluator import
  44. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  45. >>>
  46. >>> # evaluator initialization (worths objects passed into data)
  47. >>> problem_size = 20
  48. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  49. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  50. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  51. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  52. >>>
  53. >>> # validator specification (based on weights of each objects)
  54. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  55. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.data) if value == 1]) < 200 else False
  56. >>>
  57. >>> # initialiser function for binary solution using specific solution size
  58. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  59. >>>
  60. >>> # operators list with crossover and mutation
  61. >>> operators = [SimpleCrossover(), SimpleMutation()]
  62. >>> policy = RandomPolicy(operators)
  63. >>>
  64. >>> # MOEAD use multi-objective, hence list of evaluators with mu=100 and T=10
  65. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  66. >>>
  67. >>> # run the algorithm and get the pareto front obtained
  68. >>> pf_solutions = algo.run(100)
  69. >>>
  70. >>> # check size of expected pareto
  71. >>> len(pf_solutions)
  72. 33
  73. """
  74. def __init__(self,
  75. mu,
  76. T,
  77. initialiser,
  78. evaluator,
  79. operators,
  80. policy,
  81. validator,
  82. maximise=True,
  83. parent=None,
  84. verbose=True):
  85. """Multi-Ojective Evolutionary Algorithm with Scalar Decomposition initialisation
  86. Args:
  87. mu: {int} -- number of sub problems
  88. T: {[float]} -- number of neightbors for each sub problem
  89. initialiser: {function} -- basic function strategy to initialise solution
  90. evaluator: {[function]} -- list of basic function in order to obtained fitness (multiple objectives)
  91. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  92. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  93. validator: {function} -- basic function to check if solution is valid or not under some constraints
  94. maximise: {bool} -- specify kind of optimisation problem
  95. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  96. verbose: {bool} -- verbose or not information about the algorithm
  97. """
  98. # redefinition of constructor to well use `initRun` method
  99. self.initialiser = initialiser
  100. self.evaluator = evaluator
  101. self.validator = validator
  102. self._operators = operators
  103. self.policy = policy
  104. self._callbacks = []
  105. # by default
  106. self._numberOfEvaluations = 0
  107. self._maxEvaluations = 0
  108. self._nObjectives = len(evaluator)
  109. # other parameters
  110. self._parent = parent # parent algorithm if it's sub algorithm
  111. #self.maxEvaluations = 0 # by default
  112. self._maximise = maximise
  113. self._verbose = verbose
  114. # track reference of algo into operator (keep an eye into best solution)
  115. for operator in self._operators:
  116. operator.setAlgo(self)
  117. # by default track reference for policy
  118. self.policy.setAlgo(self)
  119. if mu < T:
  120. raise ValueError('`mu` cannot be less than `T`')
  121. if mu < T:
  122. raise ValueError('`mu` cannot be less than `T`')
  123. self._mu = mu
  124. self._T = T
  125. # initialise neighbors for each sub problem
  126. self.setNeighbors()
  127. weights = []
  128. if self._nObjectives == 2:
  129. for i in range(self._mu):
  130. angle = math.pi / 2 * i / (self._mu - 1)
  131. weights.append([math.cos(angle), math.sin(angle)])
  132. elif self._nObjectives >= 3:
  133. # random weights using uniform
  134. for i in range(self._mu):
  135. w_i = np.random.uniform(0, 1, self._nObjectives)
  136. weights.append(w_i / sum(w_i))
  137. else:
  138. raise ValueError('Unvalid number of objectives')
  139. self._weights = weights
  140. self._subProblems = []
  141. for i in range(self._mu):
  142. # compute weight sum from solution
  143. sub_evaluator = WeightedSum(data={
  144. 'evaluators': evaluator,
  145. 'weights': weights[i]
  146. })
  147. # intialize each sub problem
  148. # use copy of list to keep track for each sub problem
  149. subProblem = MOSubProblem(i,
  150. weights[i],
  151. initialiser,
  152. sub_evaluator,
  153. operators.copy(),
  154. policy,
  155. validator,
  156. maximise,
  157. self,
  158. verbose=self._verbose)
  159. self._subProblems.append(subProblem)
  160. self.population = [None for n in range(self._mu)]
  161. self._pfPop = []
  162. # ref point based on number of evaluators
  163. if self._maximise:
  164. self._refPoint = [0 for _ in range(self._nObjectives)]
  165. else:
  166. self._refPoint = [
  167. sys.float_info.max for _ in range(self._nObjectives)
  168. ]
  169. def initRun(self):
  170. """
  171. Method which initialiazes or re-initialises the whole algorithm context specifically for MOEAD
  172. """
  173. # initialization is done during run method
  174. pass
  175. def run(self, evaluations):
  176. """
  177. Run the local search algorithm
  178. Args:
  179. evaluations: {int} -- number of Local search evaluations
  180. Returns:
  181. {:class:`~macop.solutions.base.Solution`}: best solution found
  182. """
  183. # by default use of mother method to initialise variables
  184. super().run(evaluations)
  185. # enable callback resume for MOEAD
  186. self.resume()
  187. # initialise each sub problem if no backup
  188. for i in range(self._mu):
  189. if self._subProblems[i]._bestSolution is None:
  190. self._subProblems[i].run(1)
  191. self.population[i] = self._subProblems[i]._bestSolution
  192. # if no backup for pf population
  193. if len(self._pfPop) == 0:
  194. for i in range(self._mu):
  195. self._pfPop.append(self._subProblems[i]._bestSolution)
  196. # MOEAD algorithm implementation
  197. while not self.stop():
  198. for i in range(self._mu):
  199. # run 1 iteration into sub problem `i`
  200. self._subProblems[i].run(1)
  201. spBestSolution = self._subProblems[i]._bestSolution
  202. self.updateRefPoint(spBestSolution)
  203. # for each neighbor of current sub problem update solution if better
  204. improvment = False
  205. for j in self._neighbors[i]:
  206. if spBestSolution.fitness > self._subProblems[j]._bestSolution.fitness:
  207. # create new solution based on current new if better, computes fitness associated to new solution for sub problem
  208. newSolution = spBestSolution.clone()
  209. # evaluate solution for new sub problem and update as best solution
  210. self._subProblems[j].evaluate(newSolution)
  211. self._subProblems[j]._bestSolution = newSolution
  212. # update population solution for this sub problem
  213. self.population[j] = newSolution
  214. improvment = True
  215. # add new solution if improvment is idenfity
  216. if improvment:
  217. self._pfPop.append(spBestSolution)
  218. # update pareto front
  219. self._pfPop = self.paretoFront(self._pfPop)
  220. # add progress here
  221. self.progress()
  222. # stop algorithm if necessary
  223. if self.stop():
  224. break
  225. logging.info(
  226. f"End of {type(self).__name__}, best solution found {self.population}"
  227. )
  228. self.end()
  229. return self._pfPop
  230. def progress(self):
  231. """
  232. Log progress and apply callbacks if necessary
  233. """
  234. if len(self._callbacks) > 0:
  235. for callback in self._callbacks:
  236. callback.run()
  237. macop_progress(self, self.getGlobalEvaluation(),
  238. self.getGlobalMaxEvaluation())
  239. logging.info(
  240. f"-- {type(self).__name__} evaluation {self._numberOfEvaluations} of {self._maxEvaluations} ({((self._numberOfEvaluations) / self._maxEvaluations * 100.):.2f}%)"
  241. )
  242. def setNeighbors(self):
  243. if self._T % 2 == 1:
  244. dmin = -int(self._T / 2)
  245. dmax = int(self._T / 2) + 1
  246. else:
  247. dmin = -int(self._T / 2) + 1
  248. dmax = int(+self._T / 2)
  249. # init neighbord list
  250. self._neighbors = [[] for n in range(self._mu)]
  251. for direction in range(0, -dmin):
  252. for i in range(self._T):
  253. self._neighbors[direction].append(i)
  254. for direction in range(-dmin, self._mu - dmax):
  255. for i in range(direction + dmin, direction + dmax - 1):
  256. self._neighbors[direction].append(i)
  257. for direction in range(self._mu - dmax, self._mu):
  258. for i in range(self._mu - self._T, self._mu):
  259. self._neighbors[direction].append(i)
  260. def updateRefPoint(self, solution):
  261. if self._maximise:
  262. for i in range(len(self.evaluator)):
  263. if solution.scores[i] > self._refPoint[i]:
  264. self._refPoint[i] = solution.scores[i]
  265. else:
  266. for i in range(len(self.evaluator)):
  267. if solution.scores[i] < self._refPoint[i]:
  268. self._refPoint[i] = solution.scores[i]
  269. def paretoFront(self, population):
  270. paFront = []
  271. indexes = []
  272. nObjectives = len(self.evaluator)
  273. nSolutions = len(population)
  274. # find dominated solution
  275. for i in range(nSolutions):
  276. for j in range(nSolutions):
  277. if j in indexes:
  278. continue
  279. # check if solutions are the same
  280. if all([
  281. population[i]._data[k] == population[j]._data[k]
  282. for k in range(len(population[i]._data))
  283. ]):
  284. continue
  285. nDominated = 0
  286. # check number of dominated objectives of current solution by the others solution
  287. for k in range(len(self.evaluator)):
  288. if self._maximise:
  289. if population[i].scores[k] < population[j].scores[k]:
  290. nDominated += 1
  291. else:
  292. if population[i].scores[k] > population[j].scores[k]:
  293. nDominated += 1
  294. if nDominated == nObjectives:
  295. indexes.append(i)
  296. break
  297. # store the non dominated solution into pareto front
  298. for i in range(nSolutions):
  299. if i not in indexes:
  300. paFront.append(population[i])
  301. return paFront
  302. def result(self):
  303. """Get the expected result of the current algorithm
  304. Returns:
  305. [{:class:`~macop.solutions.base.Solution`}]: pareto front population
  306. """
  307. return self._pfPop
  308. def result(self, result):
  309. """Set current default result of the algorithm
  310. Args:
  311. result: {object} -- expected result data of the current algorithm
  312. """
  313. self._pfPop = result
  314. def end(self):
  315. """Display end message into `run` method
  316. """
  317. macop_text(
  318. self,
  319. f'({type(self).__name__}) Found after {self._numberOfEvaluations} evaluations'
  320. )
  321. for i, solution in enumerate(self._pfPop):
  322. macop_text(self, f' - [{i}] {solution.scores} : {solution}')
  323. macop_line(self)
  324. def information(self):
  325. logging.info("-- Pareto front :")
  326. for i, solution in enumerate(self._pfPop):
  327. logging.info(f"-- {i}] SCORE {solution.scores} - {solution}")
  328. def __str__(self):
  329. return f"{type(self).__name__} using {type(self.population).__name__}"
  330. class MOSubProblem(Algorithm):
  331. """Specific MO sub problem used into MOEAD
  332. Attributes:
  333. index: {int} -- sub problem index
  334. weights: {[float]} -- sub problems objectives weights
  335. initalizer: {function} -- basic function strategy to initialise solution
  336. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  337. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  338. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  339. validator: {function} -- basic function to check if solution is valid or not under some constraints
  340. maximise: {bool} -- specify kind of optimisation problem
  341. verbose: {bool} -- verbose or not information about the algorithm
  342. currentSolution: {:class:`~macop.solutions.base.Solution`} -- current solution managed for current evaluation
  343. bestSolution: {:class:`~macop.solutions.base.Solution`} -- best solution found so far during running algorithm
  344. callbacks: {[:class:`~macop.callbacks.base.Callback`]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  345. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  346. Example:
  347. >>> import random
  348. >>>
  349. >>> # operators import
  350. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  351. >>> from macop.operators.discrete.mutators import SimpleMutation
  352. >>>
  353. >>> # policy import
  354. >>> from macop.policies.classicals import RandomPolicy
  355. >>>
  356. >>> # solution and algorithm imports
  357. >>> from macop.solutions.discrete import BinarySolution
  358. >>> from macop.algorithms.multi import MOEAD, MOSubProblem
  359. >>>
  360. >>> # evaluator import
  361. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  362. >>>
  363. >>> # evaluator initialization (worths objects passed into data)
  364. >>> problem_size = 20
  365. >>> worths1 = [ random.randint(0, 20) for i in range(problem_size) ]
  366. >>> evaluator1 = KnapsackEvaluator(data={'worths': worths1})
  367. >>> worths2 = [ random.randint(10, 15) for i in range(problem_size) ]
  368. >>> evaluator2 = KnapsackEvaluator(data={'worths': worths2})
  369. >>>
  370. >>> # validator specification (based on weights of each objects)
  371. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  372. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.data) if value == 1]) < 200 else False
  373. >>>
  374. >>> # initialiser function for binary solution using specific solution size
  375. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  376. >>>
  377. >>> # operators list with crossover and mutation
  378. >>> operators = [SimpleCrossover(), SimpleMutation()]
  379. >>> policy = RandomPolicy(operators)
  380. >>> algo = MOEAD(20, 5, initialiser, [evaluator1, evaluator2], operators, policy, validator, maximise=True, verbose=False)
  381. >>>
  382. >>> # weights of the sub problem
  383. >>> sub_problem_weights = [0.4, 0.6]
  384. >>> sub_evaluator = WeightedSum(data={'evaluators': [evaluator1, evaluator2], 'weights': sub_problem_weights})
  385. >>>
  386. >>> # first parameter is the index of the MOSubProblem
  387. >>> subProblem = MOSubProblem(0, sub_problem_weights, initialiser, sub_evaluator, operators, policy, validator, maximise=True, parent=algo, verbose=False)
  388. >>>
  389. >>> # run the algorithm
  390. >>> solution = subProblem.run(100)
  391. >>> solution._score
  392. 133.0
  393. """
  394. def __init__(self,
  395. index,
  396. weights,
  397. initalizer,
  398. evaluator,
  399. operators,
  400. policy,
  401. validator,
  402. maximise=True,
  403. parent=None,
  404. verbose=True):
  405. """Specific multi-objective sub problem used into MOEAD
  406. Args:
  407. index: {int} -- sub problem index
  408. weights: {[float]} -- sub problems objectives weights
  409. initalizer: {function} -- basic function strategy to initialise solution
  410. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  411. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  412. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  413. validator: {function} -- basic function to check if solution is valid or not under some constraints
  414. maximise: {bool} -- specify kind of optimisation problem
  415. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  416. verbose: {bool} -- verbose or not information about the algorithm
  417. """
  418. super().__init__(initalizer, evaluator, operators, policy, validator,
  419. maximise, parent)
  420. self._index = index
  421. self._weights = weights
  422. self._verbose = verbose
  423. def run(self, evaluations):
  424. """
  425. Run the local search algorithm
  426. Args:
  427. evaluations: {int} -- number of evaluations
  428. Returns:
  429. {:class:`~macop.solutions.base.Solution`}: best solution found
  430. """
  431. # by default use of mother method to initialise variables
  432. super().run(evaluations)
  433. # initialise solution if necessary
  434. if self._bestSolution is None:
  435. self.initRun()
  436. # new operators list keep track of current sub problem
  437. for op in self._operators:
  438. op.setAlgo(self)
  439. for _ in range(evaluations):
  440. # keep reference of sub problem used
  441. self.policy.setAlgo(self)
  442. # update solution using policy
  443. newSolution = self.update(self._bestSolution)
  444. # if better solution than currently, replace it
  445. if self.isBetter(newSolution):
  446. self._bestSolution = newSolution
  447. # increase number of evaluations
  448. self.increaseEvaluation()
  449. self.progress()
  450. # stop algorithm if necessary
  451. if self.stop():
  452. break
  453. logging.info(
  454. f"---- Current {newSolution} - SCORE {newSolution.fitness}")
  455. logging.info(
  456. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  457. )
  458. return self._bestSolution