mono.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. """Mono-objective available algorithms
  2. """
  3. # main imports
  4. import logging
  5. # module imports
  6. from .base import Algorithm
  7. class HillClimberFirstImprovment(Algorithm):
  8. """Hill Climber First Improvment used as quick exploration optimisation algorithm
  9. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find a better solution from the neighborhood space.
  10. - Then replace the current solution by the first one from the neighbordhood space which is better than the current solution.
  11. - And do these steps until a number of evaluation (stopping criterion) is reached.
  12. Attributes:
  13. initalizer: {function} -- basic function strategy to initialize solution
  14. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  15. operators: {[Operator]} -- list of operator to use when launching algorithm
  16. policy: {Policy} -- Policy class implementation strategy to select operators
  17. validator: {function} -- basic function to check if solution is valid or not under some constraints
  18. maximise: {bool} -- specify kind of optimisation problem
  19. currentSolution: {Solution} -- current solution managed for current evaluation
  20. bestSolution: {Solution} -- best solution found so far during running algorithm
  21. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  22. Example:
  23. >>> import random
  24. >>> # operators import
  25. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  26. >>> from macop.operators.discrete.mutators import SimpleMutation
  27. >>> # policy import
  28. >>> from macop.policies.classicals import RandomPolicy
  29. >>> # solution and algorithm
  30. >>> from macop.solutions.discrete import BinarySolution
  31. >>> from macop.algorithms.mono import HillClimberFirstImprovment
  32. >>> # evaluator import
  33. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  34. >>> # evaluator initialization (worths objects passed into data)
  35. >>> problem_size = 20
  36. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  37. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  38. >>> # validator specification (based on weights of each objects)
  39. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  40. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  41. >>> # initializer function with lambda function
  42. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  43. >>> # operators list with crossover and mutation
  44. >>> operators = [SimpleCrossover(), SimpleMutation()]
  45. >>> policy = RandomPolicy(operators)
  46. >>> algo = HillClimberFirstImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  47. >>> # run the algorithm
  48. >>> solution = algo.run(100)
  49. >>> solution._score
  50. 128
  51. """
  52. def run(self, evaluations):
  53. """
  54. Run the local search algorithm
  55. Args:
  56. evaluations: {int} -- number of Local search evaluations
  57. Returns:
  58. {Solution} -- best solution found
  59. """
  60. # by default use of mother method to initialize variables
  61. super().run(evaluations)
  62. # initialize current solution and best solution
  63. self.initRun()
  64. solutionSize = self._currentSolution._size
  65. # local search algorithm implementation
  66. while not self.stop():
  67. for _ in range(solutionSize):
  68. # update current solution using policy
  69. newSolution = self.update(self._currentSolution)
  70. # if better solution than currently, replace it and stop current exploration (first improvment)
  71. if self.isBetter(newSolution):
  72. self._bestSolution = newSolution
  73. break
  74. # increase number of evaluations
  75. self.increaseEvaluation()
  76. self.progress()
  77. logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  78. # stop algorithm if necessary
  79. if self.stop():
  80. break
  81. # set new current solution using best solution found in this neighbor search
  82. self._currentSolution = self._bestSolution
  83. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  84. return self._bestSolution
  85. class HillClimberBestImprovment(Algorithm):
  86. """Hill Climber Best Improvment used as exploitation optimisation algorithm
  87. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find the best solution from the neighborhood space.
  88. - Then replace the best solution found from the neighbordhood space as current solution to use.
  89. - And do these steps until a number of evaluation (stopping criterion) is reached.
  90. Attributes:
  91. initalizer: {function} -- basic function strategy to initialize solution
  92. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  93. operators: {[Operator]} -- list of operator to use when launching algorithm
  94. policy: {Policy} -- Policy class implementation strategy to select operators
  95. validator: {function} -- basic function to check if solution is valid or not under some constraints
  96. maximise: {bool} -- specify kind of optimisation problem
  97. currentSolution: {Solution} -- current solution managed for current evaluation
  98. bestSolution: {Solution} -- best solution found so far during running algorithm
  99. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  100. Example:
  101. >>> import random
  102. >>> # operators import
  103. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  104. >>> from macop.operators.discrete.mutators import SimpleMutation
  105. >>> # policy import
  106. >>> from macop.policies.classicals import RandomPolicy
  107. >>> # solution and algorithm
  108. >>> from macop.solutions.discrete import BinarySolution
  109. >>> from macop.algorithms.mono import HillClimberBestImprovment
  110. >>> # evaluator import
  111. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  112. >>> # evaluator initialization (worths objects passed into data)
  113. >>> problem_size = 20
  114. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  115. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  116. >>> # validator specification (based on weights of each objects)
  117. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  118. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  119. >>> # initializer function with lambda function
  120. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  121. >>> # operators list with crossover and mutation
  122. >>> operators = [SimpleCrossover(), SimpleMutation()]
  123. >>> policy = RandomPolicy(operators)
  124. >>> algo = HillClimberBestImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  125. >>> # run the algorithm
  126. >>> solution = algo.run(100)
  127. >>> solution._score
  128. 104
  129. """
  130. def run(self, evaluations):
  131. """
  132. Run the local search algorithm
  133. Args:
  134. evaluations: {int} -- number of Local search evaluations
  135. Returns:
  136. {Solution} -- best solution found
  137. """
  138. # by default use of mother method to initialize variables
  139. super().run(evaluations)
  140. # initialize current solution and best solution
  141. self.initRun()
  142. solutionSize = self._currentSolution._size
  143. # local search algorithm implementation
  144. while not self.stop():
  145. for _ in range(solutionSize):
  146. # update current solution using policy
  147. newSolution = self.update(self._currentSolution)
  148. # if better solution than currently, replace it
  149. if self.isBetter(newSolution):
  150. self._bestSolution = newSolution
  151. # increase number of evaluations
  152. self.increaseEvaluation()
  153. self.progress()
  154. logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  155. # stop algorithm if necessary
  156. if self.stop():
  157. break
  158. # set new current solution using best solution found in this neighbor search
  159. self._currentSolution = self._bestSolution
  160. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  161. return self._bestSolution
  162. class IteratedLocalSearch(Algorithm):
  163. """Iterated Local Search used to avoid local optima and increave EvE (Exploration vs Exploitation) compromise
  164. - A number of evaluations (`ls_evaluations`) is dedicated to local search process, here `HillClimberFirstImprovment` algorithm
  165. - Starting with the new generated solution, the local search algorithm will return a new solution
  166. - If the obtained solution is better than the best solution known into `IteratedLocalSearch`, then the solution is replaced
  167. - Restart this process until stopping critirion (number of expected evaluations)
  168. Attributes:
  169. initalizer: {function} -- basic function strategy to initialize solution
  170. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  171. operators: {[Operator]} -- list of operator to use when launching algorithm
  172. policy: {Policy} -- Policy class implementation strategy to select operators
  173. validator: {function} -- basic function to check if solution is valid or not under some constraints
  174. maximise: {bool} -- specify kind of optimisation problem
  175. currentSolution: {Solution} -- current solution managed for current evaluation
  176. bestSolution: {Solution} -- best solution found so far during running algorithm
  177. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  178. Example:
  179. >>> import random
  180. >>> # operators import
  181. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  182. >>> from macop.operators.discrete.mutators import SimpleMutation
  183. >>> # policy import
  184. >>> from macop.policies.classicals import RandomPolicy
  185. >>> # solution and algorithm
  186. >>> from macop.solutions.discrete import BinarySolution
  187. >>> from macop.algorithms.mono import IteratedLocalSearch
  188. >>> # evaluator import
  189. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  190. >>> # evaluator initialization (worths objects passed into data)
  191. >>> problem_size = 20
  192. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  193. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  194. >>> # validator specification (based on weights of each objects)
  195. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  196. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  197. >>> # initializer function with lambda function
  198. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  199. >>> # operators list with crossover and mutation
  200. >>> operators = [SimpleCrossover(), SimpleMutation()]
  201. >>> policy = RandomPolicy(operators)
  202. >>> algo = IteratedLocalSearch(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  203. >>> # run the algorithm
  204. >>> solution = algo.run(100, ls_evaluations=10)
  205. >>> solution._score
  206. 137
  207. """
  208. def run(self, evaluations, ls_evaluations=100):
  209. """
  210. Run the iterated local search algorithm using local search (EvE compromise)
  211. Args:
  212. evaluations: {int} -- number of global evaluations for ILS
  213. ls_evaluations: {int} -- number of Local search evaluations (default: 100)
  214. Returns:
  215. {Solution} -- best solution found
  216. """
  217. # by default use of mother method to initialize variables
  218. super().run(evaluations)
  219. # enable resuming for ILS
  220. self.resume()
  221. # initialize current solution
  222. self.initRun()
  223. # passing global evaluation param from ILS
  224. ls = HillClimberFirstImprovment(self._initializer,
  225. self._evaluator,
  226. self._operators,
  227. self._policy,
  228. self._validator,
  229. self._maximise,
  230. verbose=self._verbose,
  231. parent=self)
  232. # add same callbacks
  233. for callback in self._callbacks:
  234. ls.addCallback(callback)
  235. # local search algorithm implementation
  236. while not self.stop():
  237. # create and search solution from local search
  238. newSolution = ls.run(ls_evaluations)
  239. # if better solution than currently, replace it
  240. if self.isBetter(newSolution):
  241. self._bestSolution = newSolution
  242. # number of evaluatins increased from LocalSearch
  243. # increase number of evaluations and progress are then not necessary there
  244. #self.increaseEvaluation()
  245. #self.progress()
  246. self.information()
  247. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  248. self.end()
  249. return self._bestSolution