mono.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. """Mono-objective available algorithms
  2. """
  3. # main imports
  4. import logging
  5. # module imports
  6. from macop.algorithms.base import Algorithm
  7. class HillClimberFirstImprovment(Algorithm):
  8. """Hill Climber First Improvment used as quick exploration optimisation algorithm
  9. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find a better solution from the neighborhood space;
  10. - Then replace the current solution by the first one from the neighbordhood space which is better than the current solution;
  11. - And do these steps until a number of evaluation (stopping criterion) is reached.
  12. Attributes:
  13. initalizer: {function} -- basic function strategy to initialise solution
  14. evaluator: {:class:`~macop.evaluators.base.Evaluator`} -- evaluator instance in order to obtained fitness (mono or multiple objectives)
  15. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  16. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  17. validator: {function} -- basic function to check if solution is valid or not under some constraints
  18. maximise: {bool} -- specify kind of optimisation problem
  19. currentSolution: {:class:`~macop.solutions.base.Solution`} -- current solution managed for current evaluation
  20. bestSolution: {:class:`~macop.solutions.base.Solution`} -- best solution found so far during running algorithm
  21. callbacks: {[:class:`~macop.callbacks.base.Callback`]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  22. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  23. Example:
  24. >>> import random
  25. >>>
  26. >>> # operators import
  27. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  28. >>> from macop.operators.discrete.mutators import SimpleMutation
  29. >>>
  30. >>> # policy import
  31. >>> from macop.policies.classicals import RandomPolicy
  32. >>>
  33. >>> # solution and algorithm imports
  34. >>> from macop.solutions.discrete import BinarySolution
  35. >>> from macop.algorithms.mono import HillClimberFirstImprovment
  36. >>>
  37. >>> # evaluator import
  38. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  39. >>>
  40. >>> # evaluator initialization (worths objects passed into data)
  41. >>> problem_size = 20
  42. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  43. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  44. >>>
  45. >>> # validator specification (based on weights of each objects)
  46. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  47. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.data) if value == 1]) < 200 else False
  48. >>>
  49. >>> # initialiser function for binary solution using specific solution size
  50. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  51. >>>
  52. >>> # operators list with crossover and mutation
  53. >>> operators = [SimpleCrossover(), SimpleMutation()]
  54. >>> policy = RandomPolicy(operators)
  55. >>> algo = HillClimberFirstImprovment(initialiser, evaluator, operators, policy, validator, maximise=True, verbose=False)
  56. >>>
  57. >>> # run the algorithm
  58. >>> solution = algo.run(100)
  59. >>> solution._score
  60. 128
  61. """
  62. def run(self, evaluations):
  63. """
  64. Run the local search algorithm
  65. Args:
  66. evaluations: {int} -- number of Local search evaluations
  67. Returns:
  68. {:class:`~macop.solutions.base.Solution`}: best solution found
  69. """
  70. # by default use of mother method to initialise variables
  71. super().run(evaluations)
  72. # initialise current solution and best solution
  73. self.initRun()
  74. solutionSize = self._currentSolution._size
  75. # local search algorithm implementation
  76. while not self.stop():
  77. for _ in range(solutionSize):
  78. # update current solution using policy
  79. newSolution = self.update(self._currentSolution)
  80. # if better solution than currently, replace it and stop current exploration (first improvment)
  81. if self.isBetter(newSolution):
  82. self._bestSolution = newSolution
  83. break
  84. # increase number of evaluations
  85. self.increaseEvaluation()
  86. self.progress()
  87. logging.info(
  88. f"---- Current {newSolution} - SCORE {newSolution.fitness}"
  89. )
  90. # stop algorithm if necessary
  91. if self.stop():
  92. break
  93. # set new current solution using best solution found in this neighbor search
  94. self._currentSolution = self._bestSolution
  95. logging.info(
  96. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  97. )
  98. return self._bestSolution
  99. class HillClimberBestImprovment(Algorithm):
  100. """Hill Climber Best Improvment used as exploitation optimisation algorithm
  101. - First, this algorithm do a neighborhood exploration of a new generated solution (by doing operation on the current solution obtained) in order to find the best solution from the neighborhood space;
  102. - Then replace the best solution found from the neighbordhood space as current solution to use;
  103. - And do these steps until a number of evaluation (stopping criterion) is reached.
  104. Attributes:
  105. initalizer: {function} -- basic function strategy to initialise solution
  106. evaluator: {:class:`~macop.evaluators.base.Evaluator`} -- evaluator instance in order to obtained fitness (mono or multiple objectives)
  107. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  108. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  109. validator: {function} -- basic function to check if solution is valid or not under some constraints
  110. maximise: {bool} -- specify kind of optimisation problem
  111. currentSolution: {:class:`~macop.solutions.base.Solution`} -- current solution managed for current evaluation
  112. bestSolution: {:class:`~macop.solutions.base.Solution`} -- best solution found so far during running algorithm
  113. callbacks: {[:class:`~macop.callbacks.base.Callback`]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  114. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  115. Example:
  116. >>> import random
  117. >>>
  118. >>> # operators import
  119. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  120. >>> from macop.operators.discrete.mutators import SimpleMutation
  121. >>>
  122. >>> # policy import
  123. >>> from macop.policies.classicals import RandomPolicy
  124. >>>
  125. >>> # solution and algorithm imports
  126. >>> from macop.solutions.discrete import BinarySolution
  127. >>> from macop.algorithms.mono import HillClimberBestImprovment
  128. >>>
  129. >>> # evaluator import
  130. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  131. >>>
  132. >>> # evaluator initialization (worths objects passed into data)
  133. >>> problem_size = 20
  134. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  135. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  136. >>>
  137. >>> # validator specification (based on weights of each objects)
  138. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  139. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.data) if value == 1]) < 200 else False
  140. >>>
  141. >>> # initialiser function for binary solution using specific solution size
  142. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  143. >>>
  144. >>> # operators list with crossover and mutation
  145. >>> operators = [SimpleCrossover(), SimpleMutation()]
  146. >>> policy = RandomPolicy(operators)
  147. >>> algo = HillClimberBestImprovment(initialiser, evaluator, operators, policy, validator, maximise=True, verbose=False)
  148. >>>
  149. >>> # run the algorithm
  150. >>> solution = algo.run(100)
  151. >>> solution._score
  152. 104
  153. """
  154. def run(self, evaluations):
  155. """
  156. Run the local search algorithm
  157. Args:
  158. evaluations: {int} -- number of Local search evaluations
  159. Returns:
  160. {:class:`~macop.solutions.base.Solution`}: best solution found
  161. """
  162. # by default use of mother method to initialise variables
  163. super().run(evaluations)
  164. # initialise current solution and best solution
  165. self.initRun()
  166. solutionSize = self._currentSolution._size
  167. # local search algorithm implementation
  168. while not self.stop():
  169. for _ in range(solutionSize):
  170. # update current solution using policy
  171. newSolution = self.update(self._currentSolution)
  172. # if better solution than currently, replace it
  173. if self.isBetter(newSolution):
  174. self._bestSolution = newSolution
  175. # increase number of evaluations
  176. self.increaseEvaluation()
  177. self.progress()
  178. logging.info(
  179. f"---- Current {newSolution} - SCORE {newSolution.fitness}"
  180. )
  181. # stop algorithm if necessary
  182. if self.stop():
  183. break
  184. # set new current solution using best solution found in this neighbor search
  185. self._currentSolution = self._bestSolution
  186. logging.info(
  187. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  188. )
  189. return self._bestSolution
  190. class IteratedLocalSearch(Algorithm):
  191. """Iterated Local Search (ILS) used to avoid local optima and increave EvE (Exploration vs Exploitation) compromise
  192. - A number of evaluations (`ls_evaluations`) is dedicated to local search process, here `HillClimberFirstImprovment` algorithm;
  193. - Starting with the new generated solution, the local search algorithm will return a new solution;
  194. - If the obtained solution is better than the best solution known into `IteratedLocalSearch`, then the solution is replaced;
  195. - Restart this process until stopping critirion (number of expected evaluations).
  196. Attributes:
  197. initalizer: {function} -- basic function strategy to initialise solution
  198. evaluator: {function} -- basic function in order to obtained fitness (mono or multiple objectives)
  199. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  200. policy: {:class:`~macop.policies.base.Policy`} -- Policy class implementation strategy to select operators
  201. validator: {function} -- basic function to check if solution is valid or not under some constraints
  202. maximise: {bool} -- specify kind of optimisation problem
  203. currentSolution: {:class:`~macop.solutions.base.Solution`} -- current solution managed for current evaluation
  204. bestSolution: {:class:`~macop.solutions.base.Solution`} -- best solution found so far during running algorithm
  205. localSearch: {:class:`~macop.algorithms.base.Algorithm`} -- current local search into ILS
  206. callbacks: {[:class:`~macop.callbacks.base.Callback`]} -- list of Callback class implementation to do some instructions every number of evaluations and `load` when initialising algorithm
  207. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  208. Example:
  209. >>> import random
  210. >>>
  211. >>> # operators import
  212. >>> from macop.operators.discrete.crossovers import SimpleCrossover
  213. >>> from macop.operators.discrete.mutators import SimpleMutation
  214. >>>
  215. >>> # policy import
  216. >>> from macop.policies.classicals import RandomPolicy
  217. >>>
  218. >>> # import for solution and algorithm
  219. >>> from macop.solutions.discrete import BinarySolution
  220. >>> from macop.algorithms.mono import IteratedLocalSearch
  221. >>> from macop.algorithms.mono import HillClimberFirstImprovment
  222. >>>
  223. >>> # evaluator import
  224. >>> from macop.evaluators.discrete.mono import KnapsackEvaluator
  225. >>>
  226. >>> # evaluator initialization (worths objects passed into data)
  227. >>> problem_size = 20
  228. >>> worths = [ random.randint(0, 20) for i in range(problem_size) ]
  229. >>> evaluator = KnapsackEvaluator(data={'worths': worths})
  230. >>>
  231. >>> # validator specification (based on weights of each objects)
  232. >>> weights = [ random.randint(5, 30) for i in range(problem_size) ]
  233. >>> validator = lambda solution: True if sum([weights[i] for i, value in enumerate(solution.data) if value == 1]) < 200 else False
  234. >>>
  235. >>> # initialiser function with lambda function
  236. >>> initialiser = lambda x=20: BinarySolution.random(x, validator)
  237. >>>
  238. >>> # operators list with crossover and mutation
  239. >>> operators = [SimpleCrossover(), SimpleMutation()]
  240. >>> policy = RandomPolicy(operators)
  241. >>> local_search = HillClimberFirstImprovment(initialiser, evaluator, operators, policy, validator, maximise=True, verbose=False)
  242. >>> algo = IteratedLocalSearch(initialiser, evaluator, operators, policy, validator, localSearch=local_search, maximise=True, verbose=False)
  243. >>>
  244. >>> # run the algorithm using specific number of evaluations for local search
  245. >>> solution = algo.run(100, ls_evaluations=10)
  246. >>> solution._score
  247. 137
  248. """
  249. def __init__(self,
  250. initialiser,
  251. evaluator,
  252. operators,
  253. policy,
  254. validator,
  255. localSearch,
  256. maximise=True,
  257. parent=None,
  258. verbose=True):
  259. """Iterated Local Search Algorithm initialisation with use of specific LocalSearch {:class:`~macop.algorithms.base.Algorithm`} instance
  260. Args:
  261. initialiser: {function} -- basic function strategy to initialise solution
  262. evaluator: {:class:`~macop.evaluators.base.Evaluator`} -- evaluator instance in order to obtained fitness (mono or multiple objectives)
  263. operators: {[:class:`~macop.operators.base.Operator`]} -- list of operator to use when launching algorithm
  264. policy: {:class:`~macop.policies.base.Policy`} -- Policy implementation strategy to select operators
  265. validator: {function} -- basic function to check if solution is valid or not under some constraints
  266. localSearch: {:class:`~macop.algorithms.base.Algorithm`} -- current local search into ILS
  267. maximise: {bool} -- specify kind of optimisation problem
  268. parent: {:class:`~macop.algorithms.base.Algorithm`} -- parent algorithm reference in case of inner Algorithm instance (optional)
  269. verbose: {bool} -- verbose or not information about the algorithm
  270. """
  271. super().__init__(initialiser, evaluator, operators, policy, validator,
  272. maximise, parent, verbose)
  273. # specific local search associated with current algorithm
  274. self._localSearch = localSearch
  275. # need to attach current algorithm as parent
  276. self._localSearch.setParent(self)
  277. def run(self, evaluations, ls_evaluations=100):
  278. """
  279. Run the iterated local search algorithm using local search (EvE compromise)
  280. Args:
  281. evaluations: {int} -- number of global evaluations for ILS
  282. ls_evaluations: {int} -- number of Local search evaluations (default: 100)
  283. Returns:
  284. {:class:`~macop.solutions.base.Solution`}: best solution found
  285. """
  286. # by default use of mother method to initialise variables
  287. super().run(evaluations)
  288. # enable resuming for ILS
  289. self.resume()
  290. # initialise current solution
  291. self.initRun()
  292. # add same callbacks
  293. for callback in self._callbacks:
  294. self._localSearch.addCallback(callback)
  295. # local search algorithm implementation
  296. while not self.stop():
  297. # create and search solution from local search
  298. newSolution = self._localSearch.run(ls_evaluations)
  299. # if better solution than currently, replace it
  300. if self.isBetter(newSolution):
  301. self._bestSolution = newSolution
  302. # number of evaluatins increased from LocalSearch
  303. # increase number of evaluations and progress are then not necessary there
  304. #self.increaseEvaluation()
  305. #self.progress()
  306. self.information()
  307. logging.info(
  308. f"End of {type(self).__name__}, best solution found {self._bestSolution}"
  309. )
  310. self.end()
  311. return self._bestSolution