mono.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. """Mono-objective available algorithms
  2. """
  3. # main imports
  4. import logging
  5. # module imports
  6. from .base import Algorithm
  7. class HillClimberFirstImprovment(Algorithm):
  8. """Hill Climber First Improvment used as quick exploration optimisation algorithm
  9. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find a better solution from the neighborhood space;
  10. - Then replace the current solution by the first one from the neighbordhood space which is better than the current solution;
  11. - And do these steps until a number of evaluation (stopping criterion) is reached.
  12. Attributes:
  13. initalizer: {function} -- basic function strategy to initialize solution
  14. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  15. operators: {[Operator]} -- list of operator to use when launching algorithm
  16. policy: {Policy} -- Policy class implementation strategy to select operators
  17. validator: {function} -- basic function to check if solution is valid or not under some constraints
  18. maximise: {bool} -- specify kind of optimisation problem
  19. currentSolution: {Solution} -- current solution managed for current evaluation
  20. bestSolution: {Solution} -- best solution found so far during running algorithm
  21. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  22. Example:
  23. >>> import random
  24. >>> # operators import
  25. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  26. >>> from macop.operators.discrete.mutators import SimpleMutation
  27. >>> # policy import
  28. >>> from macop.policies.classicals import RandomPolicy
  29. >>> # solution and algorithm
  30. >>> from macop.solutions.discrete import BinarySolution
  31. >>> from macop.algorithms.mono import HillClimberFirstImprovment
  32. >>> # evaluator import
  33. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  34. >>> # evaluator initialization (worths objects passed into data)
  35. >>> problem_size = 20
  36. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  37. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  38. >>> # validator specification (based on weights of each objects)
  39. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  40. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  41. >>> # initializer function with lambda function
  42. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  43. >>> # operators list with crossover and mutation
  44. >>> operators = [SimpleCrossover(), SimpleMutation()]
  45. >>> policy = RandomPolicy(operators)
  46. >>> algo = HillClimberFirstImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  47. >>> # run the algorithm
  48. >>> solution = algo.run(100)
  49. >>> solution._score
  50. 128
  51. """
  52. def run(self, evaluations):
  53. """
  54. Run the local search algorithm
  55. Args:
  56. evaluations: {int} -- number of Local search evaluations
  57. Returns:
  58. {Solution} -- best solution found
  59. """
  60. # by default use of mother method to initialize variables
  61. super().run(evaluations)
  62. # initialize current solution and best solution
  63. self.initRun()
  64. solutionSize = self._currentSolution._size
  65. # local search algorithm implementation
  66. while not self.stop():
  67. for _ in range(solutionSize):
  68. # update current solution using policy
  69. newSolution = self.update(self._currentSolution)
  70. # if better solution than currently, replace it and stop current exploration (first improvment)
  71. if self.isBetter(newSolution):
  72. self._bestSolution = newSolution
  73. break
  74. # increase number of evaluations
  75. self.increaseEvaluation()
  76. self.progress()
  77. logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  78. # stop algorithm if necessary
  79. if self.stop():
  80. break
  81. # set new current solution using best solution found in this neighbor search
  82. self._currentSolution = self._bestSolution
  83. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  84. return self._bestSolution
  85. class HillClimberBestImprovment(Algorithm):
  86. """Hill Climber Best Improvment used as exploitation optimisation algorithm
  87. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find the best solution from the neighborhood space;
  88. - Then replace the best solution found from the neighbordhood space as current solution to use;
  89. - And do these steps until a number of evaluation (stopping criterion) is reached.
  90. Attributes:
  91. initalizer: {function} -- basic function strategy to initialize solution
  92. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  93. operators: {[Operator]} -- list of operator to use when launching algorithm
  94. policy: {Policy} -- Policy class implementation strategy to select operators
  95. validator: {function} -- basic function to check if solution is valid or not under some constraints
  96. maximise: {bool} -- specify kind of optimisation problem
  97. currentSolution: {Solution} -- current solution managed for current evaluation
  98. bestSolution: {Solution} -- best solution found so far during running algorithm
  99. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  100. Example:
  101. >>> import random
  102. >>> # operators import
  103. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  104. >>> from macop.operators.discrete.mutators import SimpleMutation
  105. >>> # policy import
  106. >>> from macop.policies.classicals import RandomPolicy
  107. >>> # solution and algorithm
  108. >>> from macop.solutions.discrete import BinarySolution
  109. >>> from macop.algorithms.mono import HillClimberBestImprovment
  110. >>> # evaluator import
  111. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  112. >>> # evaluator initialization (worths objects passed into data)
  113. >>> problem_size = 20
  114. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  115. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  116. >>> # validator specification (based on weights of each objects)
  117. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  118. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  119. >>> # initializer function with lambda function
  120. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  121. >>> # operators list with crossover and mutation
  122. >>> operators = [SimpleCrossover(), SimpleMutation()]
  123. >>> policy = RandomPolicy(operators)
  124. >>> algo = HillClimberBestImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  125. >>> # run the algorithm
  126. >>> solution = algo.run(100)
  127. >>> solution._score
  128. 104
  129. """
  130. def run(self, evaluations):
  131. """
  132. Run the local search algorithm
  133. Args:
  134. evaluations: {int} -- number of Local search evaluations
  135. Returns:
  136. {Solution} -- best solution found
  137. """
  138. # by default use of mother method to initialize variables
  139. super().run(evaluations)
  140. # initialize current solution and best solution
  141. self.initRun()
  142. solutionSize = self._currentSolution._size
  143. # local search algorithm implementation
  144. while not self.stop():
  145. for _ in range(solutionSize):
  146. # update current solution using policy
  147. newSolution = self.update(self._currentSolution)
  148. # if better solution than currently, replace it
  149. if self.isBetter(newSolution):
  150. self._bestSolution = newSolution
  151. # increase number of evaluations
  152. self.increaseEvaluation()
  153. self.progress()
  154. logging.info(f"---- Current {newSolution} - SCORE {newSolution.fitness()}")
  155. # stop algorithm if necessary
  156. if self.stop():
  157. break
  158. # set new current solution using best solution found in this neighbor search
  159. self._currentSolution = self._bestSolution
  160. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  161. return self._bestSolution
  162. class IteratedLocalSearch(Algorithm):
  163. """Iterated Local Search (ILS) used to avoid local optima and increave EvE (Exploration vs Exploitation) compromise
  164. - A number of evaluations (`ls_evaluations`) is dedicated to local search process, here `HillClimberFirstImprovment` algorithm;
  165. - Starting with the new generated solution, the local search algorithm will return a new solution;
  166. - If the obtained solution is better than the best solution known into `IteratedLocalSearch`, then the solution is replaced;
  167. - Restart this process until stopping critirion (number of expected evaluations).
  168. Attributes:
  169. initalizer: {function} -- basic function strategy to initialize solution
  170. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  171. operators: {[Operator]} -- list of operator to use when launching algorithm
  172. policy: {Policy} -- Policy class implementation strategy to select operators
  173. validator: {function} -- basic function to check if solution is valid or not under some constraints
  174. maximise: {bool} -- specify kind of optimisation problem
  175. currentSolution: {Solution} -- current solution managed for current evaluation
  176. bestSolution: {Solution} -- best solution found so far during running algorithm
  177. localSearch: {Algorithm} -- current local search into ILS
  178. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  179. Example:
  180. >>> import random
  181. >>> # operators import
  182. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  183. >>> from macop.operators.discrete.mutators import SimpleMutation
  184. >>> # policy import
  185. >>> from macop.policies.classicals import RandomPolicy
  186. >>> # solution and algorithm
  187. >>> from macop.solutions.discrete import BinarySolution
  188. >>> from macop.algorithms.mono import IteratedLocalSearch
  189. >>> from macop.algorithms.mono import HillClimberFirstImprovment
  190. >>> # evaluator import
  191. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  192. >>> # evaluator initialization (worths objects passed into data)
  193. >>> problem_size = 20
  194. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  195. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  196. >>> # validator specification (based on weights of each objects)
  197. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  198. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  199. >>> # initializer function with lambda function
  200. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  201. >>> # operators list with crossover and mutation
  202. >>> operators = [SimpleCrossover(), SimpleMutation()]
  203. >>> policy = RandomPolicy(operators)
  204. >>> local_search = HillClimberFirstImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  205. >>> algo = IteratedLocalSearch(initializer, evaluator, operators, policy, validator, localSearch=local_search, maximise=True, verbose=False)
  206. >>> # run the algorithm
  207. >>> solution = algo.run(100, ls_evaluations=10)
  208. >>> solution._score
  209. 137
  210. """
  211. def __init__(self,
  212. initializer,
  213. evaluator,
  214. operators,
  215. policy,
  216. validator,
  217. localSearch,
  218. maximise=True,
  219. parent=None,
  220. verbose=True):
  221. super().__init__(initializer, evaluator, operators, policy, validator, maximise, parent, verbose)
  222. # specific local search associated with current algorithm
  223. self._localSearch = localSearch
  224. # need to attach current algorithm as parent
  225. self._localSearch.setParent(self)
  226. def run(self, evaluations, ls_evaluations=100):
  227. """
  228. Run the iterated local search algorithm using local search (EvE compromise)
  229. Args:
  230. evaluations: {int} -- number of global evaluations for ILS
  231. ls_evaluations: {int} -- number of Local search evaluations (default: 100)
  232. Returns:
  233. {Solution} -- best solution found
  234. """
  235. # by default use of mother method to initialize variables
  236. super().run(evaluations)
  237. # enable resuming for ILS
  238. self.resume()
  239. # initialize current solution
  240. self.initRun()
  241. # add same callbacks
  242. for callback in self._callbacks:
  243. self._localSearch.addCallback(callback)
  244. # local search algorithm implementation
  245. while not self.stop():
  246. # create and search solution from local search
  247. newSolution = self._localSearch.run(ls_evaluations)
  248. # if better solution than currently, replace it
  249. if self.isBetter(newSolution):
  250. self._bestSolution = newSolution
  251. # number of evaluatins increased from LocalSearch
  252. # increase number of evaluations and progress are then not necessary there
  253. #self.increaseEvaluation()
  254. #self.progress()
  255. self.information()
  256. logging.info(f"End of {type(self).__name__}, best solution found {self._bestSolution}")
  257. self.end()
  258. return self._bestSolution