mono.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. """Mono-objective available algorithms
  2. """
  3. # main imports
  4. import logging
  5. # module imports
  6. from macop.algorithms.base import Algorithm
  7. class HillClimberFirstImprovment(Algorithm):
  8. """Hill Climber First Improvment used as quick exploration optimisation algorithm
  9. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find a better solution from the neighborhood space;
  10. - Then replace the current solution by the first one from the neighbordhood space which is better than the current solution;
  11. - And do these steps until a number of evaluation (stopping criterion) is reached.
  12. Attributes:
  13. initalizer: {function} -- basic function strategy to initialize solution
  14. evaluator: {Evaluator} -- evaluator instance in order to obtained fitness (mono or multiple objectives)
  15. operators: {[Operator]} -- list of operator to use when launching algorithm
  16. policy: {Policy} -- Policy class implementation strategy to select operators
  17. validator: {function} -- basic function to check if solution is valid or not under some constraints
  18. maximise: {bool} -- specify kind of optimisation problem
  19. currentSolution: {Solution} -- current solution managed for current evaluation
  20. bestSolution: {Solution} -- best solution found so far during running algorithm
  21. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  22. Example:
  23. >>> import random
  24. >>> # operators import
  25. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  26. >>> from macop.operators.discrete.mutators import SimpleMutation
  27. >>> # policy import
  28. >>> from macop.policies.classicals import RandomPolicy
  29. >>> # solution and algorithm
  30. >>> from macop.solutions.discrete import BinarySolution
  31. >>> from macop.algorithms.mono import HillClimberFirstImprovment
  32. >>> # evaluator import
  33. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  34. >>> # evaluator initialization (worths objects passed into data)
  35. >>> problem_size = 20
  36. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  37. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  38. >>> # validator specification (based on weights of each objects)
  39. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  40. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  41. >>> # initializer function with lambda function
  42. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  43. >>> # operators list with crossover and mutation
  44. >>> operators = [SimpleCrossover(), SimpleMutation()]
  45. >>> policy = RandomPolicy(operators)
  46. >>> algo = HillClimberFirstImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  47. >>> # run the algorithm
  48. >>> solution = algo.run(100)
  49. >>> solution._score
  50. 128
  51. """
  52. def run(self, evaluations):
  53. """
  54. Run the local search algorithm
  55. Args:
  56. evaluations: {int} -- number of Local search evaluations
  57. Returns:
  58. {Solution} -- best solution found
  59. """
  60. # by default use of mother method to initialize variables
  61. super().run(evaluations)
  62. # initialize current solution and best solution
  63. self.initRun()
  64. solutionSize = self._currentSolution._size
  65. # local search algorithm implementation
  66. while not self.stop():
  67. for _ in range(solutionSize):
  68. # update current solution using policy
  69. newSolution = self.update(self._currentSolution)
  70. # if better solution than currently, replace it and stop current exploration (first improvment)
  71. if self.isBetter(newSolution):
  72. self._bestSolution = newSolution
  73. break
  74. # increase number of evaluations
  75. self.increaseEvaluation()
  76. self.progress()
  77. logging.info(
  78. f"---- Current {newSolution} - SCORE {newSolution.fitness()}"
  79. )
  80. # stop algorithm if necessary
  81. if self.stop():
  82. break
  83. # set new current solution using best solution found in this neighbor search
  84. self._currentSolution = self._bestSolution
  85. logging.info(
  86. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  87. )
  88. return self._bestSolution
  89. class HillClimberBestImprovment(Algorithm):
  90. """Hill Climber Best Improvment used as exploitation optimisation algorithm
  91. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find the best solution from the neighborhood space;
  92. - Then replace the best solution found from the neighbordhood space as current solution to use;
  93. - And do these steps until a number of evaluation (stopping criterion) is reached.
  94. Attributes:
  95. initalizer: {function} -- basic function strategy to initialize solution
  96. evaluator: {Evaluator} -- evaluator instance in order to obtained fitness (mono or multiple objectives)
  97. operators: {[Operator]} -- list of operator to use when launching algorithm
  98. policy: {Policy} -- Policy class implementation strategy to select operators
  99. validator: {function} -- basic function to check if solution is valid or not under some constraints
  100. maximise: {bool} -- specify kind of optimisation problem
  101. currentSolution: {Solution} -- current solution managed for current evaluation
  102. bestSolution: {Solution} -- best solution found so far during running algorithm
  103. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  104. Example:
  105. >>> import random
  106. >>> # operators import
  107. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  108. >>> from macop.operators.discrete.mutators import SimpleMutation
  109. >>> # policy import
  110. >>> from macop.policies.classicals import RandomPolicy
  111. >>> # solution and algorithm
  112. >>> from macop.solutions.discrete import BinarySolution
  113. >>> from macop.algorithms.mono import HillClimberBestImprovment
  114. >>> # evaluator import
  115. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  116. >>> # evaluator initialization (worths objects passed into data)
  117. >>> problem_size = 20
  118. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  119. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  120. >>> # validator specification (based on weights of each objects)
  121. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  122. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  123. >>> # initializer function with lambda function
  124. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  125. >>> # operators list with crossover and mutation
  126. >>> operators = [SimpleCrossover(), SimpleMutation()]
  127. >>> policy = RandomPolicy(operators)
  128. >>> algo = HillClimberBestImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  129. >>> # run the algorithm
  130. >>> solution = algo.run(100)
  131. >>> solution._score
  132. 104
  133. """
  134. def run(self, evaluations):
  135. """
  136. Run the local search algorithm
  137. Args:
  138. evaluations: {int} -- number of Local search evaluations
  139. Returns:
  140. {Solution} -- best solution found
  141. """
  142. # by default use of mother method to initialize variables
  143. super().run(evaluations)
  144. # initialize current solution and best solution
  145. self.initRun()
  146. solutionSize = self._currentSolution._size
  147. # local search algorithm implementation
  148. while not self.stop():
  149. for _ in range(solutionSize):
  150. # update current solution using policy
  151. newSolution = self.update(self._currentSolution)
  152. # if better solution than currently, replace it
  153. if self.isBetter(newSolution):
  154. self._bestSolution = newSolution
  155. # increase number of evaluations
  156. self.increaseEvaluation()
  157. self.progress()
  158. logging.info(
  159. f"---- Current {newSolution} - SCORE {newSolution.fitness()}"
  160. )
  161. # stop algorithm if necessary
  162. if self.stop():
  163. break
  164. # set new current solution using best solution found in this neighbor search
  165. self._currentSolution = self._bestSolution
  166. logging.info(
  167. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  168. )
  169. return self._bestSolution
  170. class IteratedLocalSearch(Algorithm):
  171. """Iterated Local Search (ILS) used to avoid local optima and increave EvE (Exploration vs Exploitation) compromise
  172. - A number of evaluations (`ls_evaluations`) is dedicated to local search process, here `HillClimberFirstImprovment` algorithm;
  173. - Starting with the new generated solution, the local search algorithm will return a new solution;
  174. - If the obtained solution is better than the best solution known into `IteratedLocalSearch`, then the solution is replaced;
  175. - Restart this process until stopping critirion (number of expected evaluations).
  176. Attributes:
  177. initalizer: {function} -- basic function strategy to initialize solution
  178. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  179. operators: {[Operator]} -- list of operator to use when launching algorithm
  180. policy: {Policy} -- Policy class implementation strategy to select operators
  181. validator: {function} -- basic function to check if solution is valid or not under some constraints
  182. maximise: {bool} -- specify kind of optimisation problem
  183. currentSolution: {Solution} -- current solution managed for current evaluation
  184. bestSolution: {Solution} -- best solution found so far during running algorithm
  185. localSearch: {Algorithm} -- current local search into ILS
  186. callbacks: {[Callback]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initializing algorithm
  187. Example:
  188. >>> import random
  189. >>> # operators import
  190. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  191. >>> from macop.operators.discrete.mutators import SimpleMutation
  192. >>> # policy import
  193. >>> from macop.policies.classicals import RandomPolicy
  194. >>> # solution and algorithm
  195. >>> from macop.solutions.discrete import BinarySolution
  196. >>> from macop.algorithms.mono import IteratedLocalSearch
  197. >>> from macop.algorithms.mono import HillClimberFirstImprovment
  198. >>> # evaluator import
  199. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  200. >>> # evaluator initialization (worths objects passed into data)
  201. >>> problem_size = 20
  202. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  203. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  204. >>> # validator specification (based on weights of each objects)
  205. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  206. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution._data) if value == 1]) < 200 else False
  207. >>> # initializer function with lambda function
  208. >>> initializer = lambda x=20: BinarySolution.random(x, validator)
  209. >>> # operators list with crossover and mutation
  210. >>> operators = [SimpleCrossover(), SimpleMutation()]
  211. >>> policy = RandomPolicy(operators)
  212. >>> local_search = HillClimberFirstImprovment(initializer, evaluator, operators, policy, validator, maximise=True, verbose=False)
  213. >>> algo = IteratedLocalSearch(initializer, evaluator, operators, policy, validator, localSearch=local_search, maximise=True, verbose=False)
  214. >>> # run the algorithm
  215. >>> solution = algo.run(100, ls_evaluations=10)
  216. >>> solution._score
  217. 137
  218. """
  219. def __init__(self,
  220. initializer,
  221. evaluator,
  222. operators,
  223. policy,
  224. validator,
  225. localSearch,
  226. maximise=True,
  227. parent=None,
  228. verbose=True):
  229. super().__init__(initializer, evaluator, operators, policy, validator,
  230. maximise, parent, verbose)
  231. # specific local search associated with current algorithm
  232. self._localSearch = localSearch
  233. # need to attach current algorithm as parent
  234. self._localSearch.setParent(self)
  235. def run(self, evaluations, ls_evaluations=100):
  236. """
  237. Run the iterated local search algorithm using local search (EvE compromise)
  238. Args:
  239. evaluations: {int} -- number of global evaluations for ILS
  240. ls_evaluations: {int} -- number of Local search evaluations (default: 100)
  241. Returns:
  242. {Solution} -- best solution found
  243. """
  244. # by default use of mother method to initialize variables
  245. super().run(evaluations)
  246. # enable resuming for ILS
  247. self.resume()
  248. # initialize current solution
  249. self.initRun()
  250. # add same callbacks
  251. for callback in self._callbacks:
  252. self._localSearch.addCallback(callback)
  253. # local search algorithm implementation
  254. while not self.stop():
  255. # create and search solution from local search
  256. newSolution = self._localSearch.run(ls_evaluations)
  257. # if better solution than currently, replace it
  258. if self.isBetter(newSolution):
  259. self._bestSolution = newSolution
  260. # number of evaluatins increased from LocalSearch
  261. # increase number of evaluations and progress are then not necessary there
  262. #self.increaseEvaluation()
  263. #self.progress()
  264. self.information()
  265. logging.info(
  266. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  267. )
  268. self.end()
  269. return self._bestSolution