Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/optimize/_dual_annealing.py: 12%
287 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
1# Dual Annealing implementation.
2# Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>,
3# Yang Xiang <yang.xiang@pmi.com>
4# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
6"""
7A Dual Annealing global optimization algorithm
8"""
10import numpy as np
11from scipy.optimize import OptimizeResult
12from scipy.optimize import minimize, Bounds
13from scipy.special import gammaln
14from scipy._lib._util import check_random_state
15from scipy.optimize._constraints import new_bounds_to_old
17__all__ = ['dual_annealing']
20class VisitingDistribution:
21 """
22 Class used to generate new coordinates based on the distorted
23 Cauchy-Lorentz distribution. Depending on the steps within the strategy
24 chain, the class implements the strategy for generating new location
25 changes.
27 Parameters
28 ----------
29 lb : array_like
30 A 1-D NumPy ndarray containing lower bounds of the generated
31 components. Neither NaN or inf are allowed.
32 ub : array_like
33 A 1-D NumPy ndarray containing upper bounds for the generated
34 components. Neither NaN or inf are allowed.
35 visiting_param : float
36 Parameter for visiting distribution. Default value is 2.62.
37 Higher values give the visiting distribution a heavier tail, this
38 makes the algorithm jump to a more distant region.
39 The value range is (1, 3]. Its value is fixed for the life of the
40 object.
41 rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
42 A `~numpy.random.RandomState`, `~numpy.random.Generator` object
43 for using the current state of the created random generator container.
45 """
46 TAIL_LIMIT = 1.e8
47 MIN_VISIT_BOUND = 1.e-10
49 def __init__(self, lb, ub, visiting_param, rand_gen):
50 # if you wish to make _visiting_param adjustable during the life of
51 # the object then _factor2, _factor3, _factor5, _d1, _factor6 will
52 # have to be dynamically calculated in `visit_fn`. They're factored
53 # out here so they don't need to be recalculated all the time.
54 self._visiting_param = visiting_param
55 self.rand_gen = rand_gen
56 self.lower = lb
57 self.upper = ub
58 self.bound_range = ub - lb
60 # these are invariant numbers unless visiting_param changes
61 self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
62 self._visiting_param - 1.0))
63 self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
64 / (self._visiting_param - 1.0))
65 self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
66 3.0 - self._visiting_param))
68 self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
69 self._d1 = 2.0 - self._factor5
70 self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
71 np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
73 def visiting(self, x, step, temperature):
74 """ Based on the step in the strategy chain, new coordinates are
75 generated by changing all components is the same time or only
76 one of them, the new values are computed with visit_fn method
77 """
78 dim = x.size
79 if step < dim:
80 # Changing all coordinates with a new visiting value
81 visits = self.visit_fn(temperature, dim)
82 upper_sample, lower_sample = self.rand_gen.uniform(size=2)
83 visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
84 visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
85 x_visit = visits + x
86 a = x_visit - self.lower
87 b = np.fmod(a, self.bound_range) + self.bound_range
88 x_visit = np.fmod(b, self.bound_range) + self.lower
89 x_visit[np.fabs(
90 x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
91 else:
92 # Changing only one coordinate at a time based on strategy
93 # chain step
94 x_visit = np.copy(x)
95 visit = self.visit_fn(temperature, 1)[0]
96 if visit > self.TAIL_LIMIT:
97 visit = self.TAIL_LIMIT * self.rand_gen.uniform()
98 elif visit < -self.TAIL_LIMIT:
99 visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
100 index = step - dim
101 x_visit[index] = visit + x[index]
102 a = x_visit[index] - self.lower[index]
103 b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
104 x_visit[index] = np.fmod(b, self.bound_range[
105 index]) + self.lower[index]
106 if np.fabs(x_visit[index] - self.lower[
107 index]) < self.MIN_VISIT_BOUND:
108 x_visit[index] += self.MIN_VISIT_BOUND
109 return x_visit
111 def visit_fn(self, temperature, dim):
112 """ Formula Visita from p. 405 of reference [2] """
113 x, y = self.rand_gen.normal(size=(dim, 2)).T
115 factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
116 factor4 = self._factor4_p * factor1
118 # sigmax
119 x *= np.exp(-(self._visiting_param - 1.0) * np.log(
120 self._factor6 / factor4) / (3.0 - self._visiting_param))
122 den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
123 (3.0 - self._visiting_param))
125 return x / den
128class EnergyState:
129 """
130 Class used to record the energy state. At any time, it knows what is the
131 currently used coordinates and the most recent best location.
133 Parameters
134 ----------
135 lower : array_like
136 A 1-D NumPy ndarray containing lower bounds for generating an initial
137 random components in the `reset` method.
138 upper : array_like
139 A 1-D NumPy ndarray containing upper bounds for generating an initial
140 random components in the `reset` method
141 components. Neither NaN or inf are allowed.
142 callback : callable, ``callback(x, f, context)``, optional
143 A callback function which will be called for all minima found.
144 ``x`` and ``f`` are the coordinates and function value of the
145 latest minimum found, and `context` has value in [0, 1, 2]
146 """
147 # Maximum number of trials for generating a valid starting point
148 MAX_REINIT_COUNT = 1000
150 def __init__(self, lower, upper, callback=None):
151 self.ebest = None
152 self.current_energy = None
153 self.current_location = None
154 self.xbest = None
155 self.lower = lower
156 self.upper = upper
157 self.callback = callback
159 def reset(self, func_wrapper, rand_gen, x0=None):
160 """
161 Initialize current location is the search domain. If `x0` is not
162 provided, a random location within the bounds is generated.
163 """
164 if x0 is None:
165 self.current_location = rand_gen.uniform(self.lower, self.upper,
166 size=len(self.lower))
167 else:
168 self.current_location = np.copy(x0)
169 init_error = True
170 reinit_counter = 0
171 while init_error:
172 self.current_energy = func_wrapper.fun(self.current_location)
173 if self.current_energy is None:
174 raise ValueError('Objective function is returning None')
175 if (not np.isfinite(self.current_energy) or np.isnan(
176 self.current_energy)):
177 if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
178 init_error = False
179 message = (
180 'Stopping algorithm because function '
181 'create NaN or (+/-) infinity values even with '
182 'trying new random parameters'
183 )
184 raise ValueError(message)
185 self.current_location = rand_gen.uniform(self.lower,
186 self.upper,
187 size=self.lower.size)
188 reinit_counter += 1
189 else:
190 init_error = False
191 # If first time reset, initialize ebest and xbest
192 if self.ebest is None and self.xbest is None:
193 self.ebest = self.current_energy
194 self.xbest = np.copy(self.current_location)
195 # Otherwise, we keep them in case of reannealing reset
197 def update_best(self, e, x, context):
198 self.ebest = e
199 self.xbest = np.copy(x)
200 if self.callback is not None:
201 val = self.callback(x, e, context)
202 if val is not None:
203 if val:
204 return ('Callback function requested to stop early by '
205 'returning True')
207 def update_current(self, e, x):
208 self.current_energy = e
209 self.current_location = np.copy(x)
212class StrategyChain:
213 """
214 Class that implements within a Markov chain the strategy for location
215 acceptance and local search decision making.
217 Parameters
218 ----------
219 acceptance_param : float
220 Parameter for acceptance distribution. It is used to control the
221 probability of acceptance. The lower the acceptance parameter, the
222 smaller the probability of acceptance. Default value is -5.0 with
223 a range (-1e4, -5].
224 visit_dist : VisitingDistribution
225 Instance of `VisitingDistribution` class.
226 func_wrapper : ObjectiveFunWrapper
227 Instance of `ObjectiveFunWrapper` class.
228 minimizer_wrapper: LocalSearchWrapper
229 Instance of `LocalSearchWrapper` class.
230 rand_gen : {None, int, `numpy.random.Generator`,
231 `numpy.random.RandomState`}, optional
233 If `seed` is None (or `np.random`), the `numpy.random.RandomState`
234 singleton is used.
235 If `seed` is an int, a new ``RandomState`` instance is used,
236 seeded with `seed`.
237 If `seed` is already a ``Generator`` or ``RandomState`` instance then
238 that instance is used.
239 energy_state: EnergyState
240 Instance of `EnergyState` class.
242 """
244 def __init__(self, acceptance_param, visit_dist, func_wrapper,
245 minimizer_wrapper, rand_gen, energy_state):
246 # Local strategy chain minimum energy and location
247 self.emin = energy_state.current_energy
248 self.xmin = np.array(energy_state.current_location)
249 # Global optimizer state
250 self.energy_state = energy_state
251 # Acceptance parameter
252 self.acceptance_param = acceptance_param
253 # Visiting distribution instance
254 self.visit_dist = visit_dist
255 # Wrapper to objective function
256 self.func_wrapper = func_wrapper
257 # Wrapper to the local minimizer
258 self.minimizer_wrapper = minimizer_wrapper
259 self.not_improved_idx = 0
260 self.not_improved_max_idx = 1000
261 self._rand_gen = rand_gen
262 self.temperature_step = 0
263 self.K = 100 * len(energy_state.current_location)
265 def accept_reject(self, j, e, x_visit):
266 r = self._rand_gen.uniform()
267 pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
268 (e - self.energy_state.current_energy) / self.temperature_step)
269 if pqv_temp <= 0.:
270 pqv = 0.
271 else:
272 pqv = np.exp(np.log(pqv_temp) / (
273 1. - self.acceptance_param))
275 if r <= pqv:
276 # We accept the new location and update state
277 self.energy_state.update_current(e, x_visit)
278 self.xmin = np.copy(self.energy_state.current_location)
280 # No improvement for a long time
281 if self.not_improved_idx >= self.not_improved_max_idx:
282 if j == 0 or self.energy_state.current_energy < self.emin:
283 self.emin = self.energy_state.current_energy
284 self.xmin = np.copy(self.energy_state.current_location)
286 def run(self, step, temperature):
287 self.temperature_step = temperature / float(step + 1)
288 self.not_improved_idx += 1
289 for j in range(self.energy_state.current_location.size * 2):
290 if j == 0:
291 if step == 0:
292 self.energy_state_improved = True
293 else:
294 self.energy_state_improved = False
295 x_visit = self.visit_dist.visiting(
296 self.energy_state.current_location, j, temperature)
297 # Calling the objective function
298 e = self.func_wrapper.fun(x_visit)
299 if e < self.energy_state.current_energy:
300 # We have got a better energy value
301 self.energy_state.update_current(e, x_visit)
302 if e < self.energy_state.ebest:
303 val = self.energy_state.update_best(e, x_visit, 0)
304 if val is not None:
305 if val:
306 return val
307 self.energy_state_improved = True
308 self.not_improved_idx = 0
309 else:
310 # We have not improved but do we accept the new location?
311 self.accept_reject(j, e, x_visit)
312 if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
313 return ('Maximum number of function call reached '
314 'during annealing')
315 # End of StrategyChain loop
317 def local_search(self):
318 # Decision making for performing a local search
319 # based on strategy chain results
320 # If energy has been improved or no improvement since too long,
321 # performing a local search with the best strategy chain location
322 if self.energy_state_improved:
323 # Global energy has improved, let's see if LS improves further
324 e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
325 self.energy_state.ebest)
326 if e < self.energy_state.ebest:
327 self.not_improved_idx = 0
328 val = self.energy_state.update_best(e, x, 1)
329 if val is not None:
330 if val:
331 return val
332 self.energy_state.update_current(e, x)
333 if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
334 return ('Maximum number of function call reached '
335 'during local search')
336 # Check probability of a need to perform a LS even if no improvement
337 do_ls = False
338 if self.K < 90 * len(self.energy_state.current_location):
339 pls = np.exp(self.K * (
340 self.energy_state.ebest - self.energy_state.current_energy) /
341 self.temperature_step)
342 if pls >= self._rand_gen.uniform():
343 do_ls = True
344 # Global energy not improved, let's see what LS gives
345 # on the best strategy chain location
346 if self.not_improved_idx >= self.not_improved_max_idx:
347 do_ls = True
348 if do_ls:
349 e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
350 self.xmin = np.copy(x)
351 self.emin = e
352 self.not_improved_idx = 0
353 self.not_improved_max_idx = self.energy_state.current_location.size
354 if e < self.energy_state.ebest:
355 val = self.energy_state.update_best(
356 self.emin, self.xmin, 2)
357 if val is not None:
358 if val:
359 return val
360 self.energy_state.update_current(e, x)
361 if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
362 return ('Maximum number of function call reached '
363 'during dual annealing')
366class ObjectiveFunWrapper:
368 def __init__(self, func, maxfun=1e7, *args):
369 self.func = func
370 self.args = args
371 # Number of objective function evaluations
372 self.nfev = 0
373 # Number of gradient function evaluation if used
374 self.ngev = 0
375 # Number of hessian of the objective function if used
376 self.nhev = 0
377 self.maxfun = maxfun
379 def fun(self, x):
380 self.nfev += 1
381 return self.func(x, *self.args)
384class LocalSearchWrapper:
385 """
386 Class used to wrap around the minimizer used for local search
387 Default local minimizer is SciPy minimizer L-BFGS-B
388 """
390 LS_MAXITER_RATIO = 6
391 LS_MAXITER_MIN = 100
392 LS_MAXITER_MAX = 1000
394 def __init__(self, search_bounds, func_wrapper, **kwargs):
395 self.func_wrapper = func_wrapper
396 self.kwargs = kwargs
397 self.minimizer = minimize
398 bounds_list = list(zip(*search_bounds))
399 self.lower = np.array(bounds_list[0])
400 self.upper = np.array(bounds_list[1])
402 # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
403 if not self.kwargs:
404 n = len(self.lower)
405 ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
406 self.LS_MAXITER_MIN),
407 self.LS_MAXITER_MAX)
408 self.kwargs['method'] = 'L-BFGS-B'
409 self.kwargs['options'] = {
410 'maxiter': ls_max_iter,
411 }
412 self.kwargs['bounds'] = list(zip(self.lower, self.upper))
414 def local_search(self, x, e):
415 # Run local search from the given x location where energy value is e
416 x_tmp = np.copy(x)
417 mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
418 if 'njev' in mres:
419 self.func_wrapper.ngev += mres.njev
420 if 'nhev' in mres:
421 self.func_wrapper.nhev += mres.nhev
422 # Check if is valid value
423 is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
424 in_bounds = np.all(mres.x >= self.lower) and np.all(
425 mres.x <= self.upper)
426 is_valid = is_finite and in_bounds
428 # Use the new point only if it is valid and return a better results
429 if is_valid and mres.fun < e:
430 return mres.fun, mres.x
431 else:
432 return e, x_tmp
435def dual_annealing(func, bounds, args=(), maxiter=1000,
436 minimizer_kwargs=None, initial_temp=5230.,
437 restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
438 maxfun=1e7, seed=None, no_local_search=False,
439 callback=None, x0=None):
440 """
441 Find the global minimum of a function using Dual Annealing.
443 Parameters
444 ----------
445 func : callable
446 The objective function to be minimized. Must be in the form
447 ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
448 and ``args`` is a tuple of any additional fixed parameters needed to
449 completely specify the function.
450 bounds : sequence or `Bounds`
451 Bounds for variables. There are two ways to specify the bounds:
453 1. Instance of `Bounds` class.
454 2. Sequence of ``(min, max)`` pairs for each element in `x`.
456 args : tuple, optional
457 Any additional fixed parameters needed to completely specify the
458 objective function.
459 maxiter : int, optional
460 The maximum number of global search iterations. Default value is 1000.
461 minimizer_kwargs : dict, optional
462 Extra keyword arguments to be passed to the local minimizer
463 (`minimize`). Some important options could be:
464 ``method`` for the minimizer method to use and ``args`` for
465 objective function additional arguments.
466 initial_temp : float, optional
467 The initial temperature, use higher values to facilitates a wider
468 search of the energy landscape, allowing dual_annealing to escape
469 local minima that it is trapped in. Default value is 5230. Range is
470 (0.01, 5.e4].
471 restart_temp_ratio : float, optional
472 During the annealing process, temperature is decreasing, when it
473 reaches ``initial_temp * restart_temp_ratio``, the reannealing process
474 is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
475 visit : float, optional
476 Parameter for visiting distribution. Default value is 2.62. Higher
477 values give the visiting distribution a heavier tail, this makes
478 the algorithm jump to a more distant region. The value range is (1, 3].
479 accept : float, optional
480 Parameter for acceptance distribution. It is used to control the
481 probability of acceptance. The lower the acceptance parameter, the
482 smaller the probability of acceptance. Default value is -5.0 with
483 a range (-1e4, -5].
484 maxfun : int, optional
485 Soft limit for the number of objective function calls. If the
486 algorithm is in the middle of a local search, this number will be
487 exceeded, the algorithm will stop just after the local search is
488 done. Default value is 1e7.
489 seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
490 If `seed` is None (or `np.random`), the `numpy.random.RandomState`
491 singleton is used.
492 If `seed` is an int, a new ``RandomState`` instance is used,
493 seeded with `seed`.
494 If `seed` is already a ``Generator`` or ``RandomState`` instance then
495 that instance is used.
496 Specify `seed` for repeatable minimizations. The random numbers
497 generated with this seed only affect the visiting distribution function
498 and new coordinates generation.
499 no_local_search : bool, optional
500 If `no_local_search` is set to True, a traditional Generalized
501 Simulated Annealing will be performed with no local search
502 strategy applied.
503 callback : callable, optional
504 A callback function with signature ``callback(x, f, context)``,
505 which will be called for all minima found.
506 ``x`` and ``f`` are the coordinates and function value of the
507 latest minimum found, and ``context`` has value in [0, 1, 2], with the
508 following meaning:
510 - 0: minimum detected in the annealing process.
511 - 1: detection occurred in the local search process.
512 - 2: detection done in the dual annealing process.
514 If the callback implementation returns True, the algorithm will stop.
515 x0 : ndarray, shape(n,), optional
516 Coordinates of a single N-D starting point.
518 Returns
519 -------
520 res : OptimizeResult
521 The optimization result represented as a `OptimizeResult` object.
522 Important attributes are: ``x`` the solution array, ``fun`` the value
523 of the function at the solution, and ``message`` which describes the
524 cause of the termination.
525 See `OptimizeResult` for a description of other attributes.
527 Notes
528 -----
529 This function implements the Dual Annealing optimization. This stochastic
530 approach derived from [3]_ combines the generalization of CSA (Classical
531 Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
532 to a strategy for applying a local search on accepted locations [4]_.
533 An alternative implementation of this same algorithm is described in [5]_
534 and benchmarks are presented in [6]_. This approach introduces an advanced
535 method to refine the solution found by the generalized annealing
536 process. This algorithm uses a distorted Cauchy-Lorentz visiting
537 distribution, with its shape controlled by the parameter :math:`q_{v}`
539 .. math::
541 g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
542 \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
543 \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
544 \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
545 \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
547 Where :math:`t` is the artificial time. This visiting distribution is used
548 to generate a trial jump distance :math:`\\Delta x(t)` of variable
549 :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
551 From the starting point, after calling the visiting distribution
552 function, the acceptance probability is computed as follows:
554 .. math::
556 p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
557 \\frac{1}{1-q_{a}}}\\}}
559 Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
560 acceptance probability is assigned to the cases where
562 .. math::
564 [1-(1-q_{a}) \\beta \\Delta E] < 0
566 The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
568 .. math::
570 T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
571 1 + t\\right)^{q_{v}-1}-1}
573 Where :math:`q_{v}` is the visiting parameter.
575 .. versionadded:: 1.2.0
577 References
578 ----------
579 .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
580 statistics. Journal of Statistical Physics, 52, 479-487 (1998).
581 .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
582 Physica A, 233, 395-406 (1996).
583 .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
584 Annealing Algorithm and Its Application to the Thomson Model.
585 Physics Letters A, 233, 216-220 (1997).
586 .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
587 Annealing. Physical Review E, 62, 4473 (2000).
588 .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
589 Simulated Annealing for Efficient Global Optimization: the GenSA
590 Package for R. The R Journal, Volume 5/1 (2013).
591 .. [6] Mullen, K. Continuous Global Optimization in R. Journal of
592 Statistical Software, 60(6), 1 - 45, (2014).
593 :doi:`10.18637/jss.v060.i06`
595 Examples
596 --------
597 The following example is a 10-D problem, with many local minima.
598 The function involved is called Rastrigin
599 (https://en.wikipedia.org/wiki/Rastrigin_function)
601 >>> import numpy as np
602 >>> from scipy.optimize import dual_annealing
603 >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
604 >>> lw = [-5.12] * 10
605 >>> up = [5.12] * 10
606 >>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
607 >>> ret.x
608 array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
609 -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
610 -6.05775280e-09, -5.00668935e-09]) # random
611 >>> ret.fun
612 0.000000
614 """
616 if isinstance(bounds, Bounds):
617 bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
619 # noqa: E501
620 if x0 is not None and not len(x0) == len(bounds):
621 raise ValueError('Bounds size does not match x0')
623 lu = list(zip(*bounds))
624 lower = np.array(lu[0])
625 upper = np.array(lu[1])
626 # Check that restart temperature ratio is correct
627 if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
628 raise ValueError('Restart temperature ratio has to be in range (0, 1)')
629 # Checking bounds are valid
630 if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
631 np.isnan(lower)) or np.any(np.isnan(upper))):
632 raise ValueError('Some bounds values are inf values or nan values')
633 # Checking that bounds are consistent
634 if not np.all(lower < upper):
635 raise ValueError('Bounds are not consistent min < max')
636 # Checking that bounds are the same length
637 if not len(lower) == len(upper):
638 raise ValueError('Bounds do not have the same dimensions')
640 # Wrapper for the objective function
641 func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
643 # minimizer_kwargs has to be a dict, not None
644 minimizer_kwargs = minimizer_kwargs or {}
646 minimizer_wrapper = LocalSearchWrapper(
647 bounds, func_wrapper, **minimizer_kwargs)
649 # Initialization of random Generator for reproducible runs if seed provided
650 rand_state = check_random_state(seed)
651 # Initialization of the energy state
652 energy_state = EnergyState(lower, upper, callback)
653 energy_state.reset(func_wrapper, rand_state, x0)
654 # Minimum value of annealing temperature reached to perform
655 # re-annealing
656 temperature_restart = initial_temp * restart_temp_ratio
657 # VisitingDistribution instance
658 visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
659 # Strategy chain instance
660 strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
661 minimizer_wrapper, rand_state, energy_state)
662 need_to_stop = False
663 iteration = 0
664 message = []
665 # OptimizeResult object to be returned
666 optimize_res = OptimizeResult()
667 optimize_res.success = True
668 optimize_res.status = 0
670 t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
671 # Run the search loop
672 while not need_to_stop:
673 for i in range(maxiter):
674 # Compute temperature for this step
675 s = float(i) + 2.0
676 t2 = np.exp((visit - 1) * np.log(s)) - 1.0
677 temperature = initial_temp * t1 / t2
678 if iteration >= maxiter:
679 message.append("Maximum number of iteration reached")
680 need_to_stop = True
681 break
682 # Need a re-annealing process?
683 if temperature < temperature_restart:
684 energy_state.reset(func_wrapper, rand_state)
685 break
686 # starting strategy chain
687 val = strategy_chain.run(i, temperature)
688 if val is not None:
689 message.append(val)
690 need_to_stop = True
691 optimize_res.success = False
692 break
693 # Possible local search at the end of the strategy chain
694 if not no_local_search:
695 val = strategy_chain.local_search()
696 if val is not None:
697 message.append(val)
698 need_to_stop = True
699 optimize_res.success = False
700 break
701 iteration += 1
703 # Setting the OptimizeResult values
704 optimize_res.x = energy_state.xbest
705 optimize_res.fun = energy_state.ebest
706 optimize_res.nit = iteration
707 optimize_res.nfev = func_wrapper.nfev
708 optimize_res.njev = func_wrapper.ngev
709 optimize_res.nhev = func_wrapper.nhev
710 optimize_res.message = message
711 return optimize_res