Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/optimize/_tnc.py: 32%
81 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
1# TNC Python interface
2# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
4# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org)
6# Permission is hereby granted, free of charge, to any person obtaining a
7# copy of this software and associated documentation files (the
8# "Software"), to deal in the Software without restriction, including
9# without limitation the rights to use, copy, modify, merge, publish,
10# distribute, sublicense, and/or sell copies of the Software, and to
11# permit persons to whom the Software is furnished to do so, subject to
12# the following conditions:
14# The above copyright notice and this permission notice shall be included
15# in all copies or substantial portions of the Software.
17# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25"""
26TNC: A Python interface to the TNC non-linear optimizer
28TNC is a non-linear optimizer. To use it, you must provide a function to
29minimize. The function must take one argument: the list of coordinates where to
30evaluate the function; and it must return either a tuple, whose first element is the
31value of the function, and whose second argument is the gradient of the function
32(as a list of values); or None, to abort the minimization.
33"""
35import warnings
37from scipy.optimize import _moduleTNC as moduleTNC
38from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
39 _prepare_scalar_function)
40from ._constraints import old_bound_to_new
42from numpy import inf, array, zeros, asfarray
44__all__ = ['fmin_tnc']
47MSG_NONE = 0 # No messages
48MSG_ITER = 1 # One line per iteration
49MSG_INFO = 2 # Informational messages
50MSG_VERS = 4 # Version info
51MSG_EXIT = 8 # Exit reasons
52MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
54MSGS = {
55 MSG_NONE: "No messages",
56 MSG_ITER: "One line per iteration",
57 MSG_INFO: "Informational messages",
58 MSG_VERS: "Version info",
59 MSG_EXIT: "Exit reasons",
60 MSG_ALL: "All messages"
61}
63INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
64LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
65FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
66XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
67MAXFUN = 3 # Max. number of function evaluations reached
68LSFAIL = 4 # Linear search failed
69CONSTANT = 5 # All lower bounds are equal to the upper bounds
70NOPROGRESS = 6 # Unable to progress
71USERABORT = 7 # User requested end of minimization
73RCSTRINGS = {
74 INFEASIBLE: "Infeasible (lower bound > upper bound)",
75 LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
76 FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
77 XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
78 MAXFUN: "Max. number of function evaluations reached",
79 LSFAIL: "Linear search failed",
80 CONSTANT: "All lower bounds are equal to the upper bounds",
81 NOPROGRESS: "Unable to progress",
82 USERABORT: "User requested end of minimization"
83}
85# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
86# SciPy
89def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
90 bounds=None, epsilon=1e-8, scale=None, offset=None,
91 messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
92 stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
93 rescale=-1, disp=None, callback=None):
94 """
95 Minimize a function with variables subject to bounds, using
96 gradient information in a truncated Newton algorithm. This
97 method wraps a C implementation of the algorithm.
99 Parameters
100 ----------
101 func : callable ``func(x, *args)``
102 Function to minimize. Must do one of:
104 1. Return f and g, where f is the value of the function and g its
105 gradient (a list of floats).
107 2. Return the function value but supply gradient function
108 separately as `fprime`.
110 3. Return the function value and set ``approx_grad=True``.
112 If the function returns None, the minimization
113 is aborted.
114 x0 : array_like
115 Initial estimate of minimum.
116 fprime : callable ``fprime(x, *args)``, optional
117 Gradient of `func`. If None, then either `func` must return the
118 function value and the gradient (``f,g = func(x, *args)``)
119 or `approx_grad` must be True.
120 args : tuple, optional
121 Arguments to pass to function.
122 approx_grad : bool, optional
123 If true, approximate the gradient numerically.
124 bounds : list, optional
125 (min, max) pairs for each element in x0, defining the
126 bounds on that parameter. Use None or +/-inf for one of
127 min or max when there is no bound in that direction.
128 epsilon : float, optional
129 Used if approx_grad is True. The stepsize in a finite
130 difference approximation for fprime.
131 scale : array_like, optional
132 Scaling factors to apply to each variable. If None, the
133 factors are up-low for interval bounded variables and
134 1+|x| for the others. Defaults to None.
135 offset : array_like, optional
136 Value to subtract from each variable. If None, the
137 offsets are (up+low)/2 for interval bounded variables
138 and x for the others.
139 messages : int, optional
140 Bit mask used to select messages display during
141 minimization values defined in the MSGS dict. Defaults to
142 MGS_ALL.
143 disp : int, optional
144 Integer interface to messages. 0 = no message, 5 = all messages
145 maxCGit : int, optional
146 Maximum number of hessian*vector evaluations per main
147 iteration. If maxCGit == 0, the direction chosen is
148 -gradient if maxCGit < 0, maxCGit is set to
149 max(1,min(50,n/2)). Defaults to -1.
150 maxfun : int, optional
151 Maximum number of function evaluation. If None, maxfun is
152 set to max(100, 10*len(x0)). Defaults to None. Note that this function
153 may violate the limit because of evaluating gradients by numerical
154 differentiation.
155 eta : float, optional
156 Severity of the line search. If < 0 or > 1, set to 0.25.
157 Defaults to -1.
158 stepmx : float, optional
159 Maximum step for the line search. May be increased during
160 call. If too small, it will be set to 10.0. Defaults to 0.
161 accuracy : float, optional
162 Relative precision for finite difference calculations. If
163 <= machine_precision, set to sqrt(machine_precision).
164 Defaults to 0.
165 fmin : float, optional
166 Minimum function value estimate. Defaults to 0.
167 ftol : float, optional
168 Precision goal for the value of f in the stopping criterion.
169 If ftol < 0.0, ftol is set to 0.0 defaults to -1.
170 xtol : float, optional
171 Precision goal for the value of x in the stopping
172 criterion (after applying x scaling factors). If xtol <
173 0.0, xtol is set to sqrt(machine_precision). Defaults to
174 -1.
175 pgtol : float, optional
176 Precision goal for the value of the projected gradient in
177 the stopping criterion (after applying x scaling factors).
178 If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
179 Setting it to 0.0 is not recommended. Defaults to -1.
180 rescale : float, optional
181 Scaling factor (in log10) used to trigger f value
182 rescaling. If 0, rescale at each iteration. If a large
183 value, never rescale. If < 0, rescale is set to 1.3.
184 callback : callable, optional
185 Called after each iteration, as callback(xk), where xk is the
186 current parameter vector.
188 Returns
189 -------
190 x : ndarray
191 The solution.
192 nfeval : int
193 The number of function evaluations.
194 rc : int
195 Return code, see below
197 See also
198 --------
199 minimize: Interface to minimization algorithms for multivariate
200 functions. See the 'TNC' `method` in particular.
202 Notes
203 -----
204 The underlying algorithm is truncated Newton, also called
205 Newton Conjugate-Gradient. This method differs from
206 scipy.optimize.fmin_ncg in that
208 1. it wraps a C implementation of the algorithm
209 2. it allows each variable to be given an upper and lower bound.
211 The algorithm incorporates the bound constraints by determining
212 the descent direction as in an unconstrained truncated Newton,
213 but never taking a step-size large enough to leave the space
214 of feasible x's. The algorithm keeps track of a set of
215 currently active constraints, and ignores them when computing
216 the minimum allowable step size. (The x's associated with the
217 active constraint are kept fixed.) If the maximum allowable
218 step size is zero then a new constraint is added. At the end
219 of each iteration one of the constraints may be deemed no
220 longer active and removed. A constraint is considered
221 no longer active is if it is currently active
222 but the gradient for that variable points inward from the
223 constraint. The specific constraint removed is the one
224 associated with the variable of largest index whose
225 constraint is no longer active.
227 Return codes are defined as follows::
229 -1 : Infeasible (lower bound > upper bound)
230 0 : Local minimum reached (|pg| ~= 0)
231 1 : Converged (|f_n-f_(n-1)| ~= 0)
232 2 : Converged (|x_n-x_(n-1)| ~= 0)
233 3 : Max. number of function evaluations reached
234 4 : Linear search failed
235 5 : All lower bounds are equal to the upper bounds
236 6 : Unable to progress
237 7 : User requested end of minimization
239 References
240 ----------
241 Wright S., Nocedal J. (2006), 'Numerical Optimization'
243 Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
244 SIAM Journal of Numerical Analysis 21, pp. 770-778
246 """
247 # handle fprime/approx_grad
248 if approx_grad:
249 fun = func
250 jac = None
251 elif fprime is None:
252 fun = MemoizeJac(func)
253 jac = fun.derivative
254 else:
255 fun = func
256 jac = fprime
258 if disp is not None: # disp takes precedence over messages
259 mesg_num = disp
260 else:
261 mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
262 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
263 # build options
264 opts = {'eps': epsilon,
265 'scale': scale,
266 'offset': offset,
267 'mesg_num': mesg_num,
268 'maxCGit': maxCGit,
269 'maxfun': maxfun,
270 'eta': eta,
271 'stepmx': stepmx,
272 'accuracy': accuracy,
273 'minfev': fmin,
274 'ftol': ftol,
275 'xtol': xtol,
276 'gtol': pgtol,
277 'rescale': rescale,
278 'disp': False}
280 res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
282 return res['x'], res['nfev'], res['status']
285def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
286 eps=1e-8, scale=None, offset=None, mesg_num=None,
287 maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
288 minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
289 callback=None, finite_diff_rel_step=None, maxfun=None,
290 **unknown_options):
291 """
292 Minimize a scalar function of one or more variables using a truncated
293 Newton (TNC) algorithm.
295 Options
296 -------
297 eps : float or ndarray
298 If `jac is None` the absolute step size used for numerical
299 approximation of the jacobian via forward differences.
300 scale : list of floats
301 Scaling factors to apply to each variable. If None, the
302 factors are up-low for interval bounded variables and
303 1+|x] fo the others. Defaults to None.
304 offset : float
305 Value to subtract from each variable. If None, the
306 offsets are (up+low)/2 for interval bounded variables
307 and x for the others.
308 disp : bool
309 Set to True to print convergence messages.
310 maxCGit : int
311 Maximum number of hessian*vector evaluations per main
312 iteration. If maxCGit == 0, the direction chosen is
313 -gradient if maxCGit < 0, maxCGit is set to
314 max(1,min(50,n/2)). Defaults to -1.
315 maxiter : int, optional
316 Maximum number of function evaluations. If `maxfun` is also provided
317 then `maxiter` is ignored.
318 Default is None.
320 .. deprecated :: 1.9.0
321 `maxiter` is deprecated in favor of `maxfun` and will removed in
322 SciPy 1.11.0.
323 eta : float
324 Severity of the line search. If < 0 or > 1, set to 0.25.
325 Defaults to -1.
326 stepmx : float
327 Maximum step for the line search. May be increased during
328 call. If too small, it will be set to 10.0. Defaults to 0.
329 accuracy : float
330 Relative precision for finite difference calculations. If
331 <= machine_precision, set to sqrt(machine_precision).
332 Defaults to 0.
333 minfev : float
334 Minimum function value estimate. Defaults to 0.
335 ftol : float
336 Precision goal for the value of f in the stopping criterion.
337 If ftol < 0.0, ftol is set to 0.0 defaults to -1.
338 xtol : float
339 Precision goal for the value of x in the stopping
340 criterion (after applying x scaling factors). If xtol <
341 0.0, xtol is set to sqrt(machine_precision). Defaults to
342 -1.
343 gtol : float
344 Precision goal for the value of the projected gradient in
345 the stopping criterion (after applying x scaling factors).
346 If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
347 Setting it to 0.0 is not recommended. Defaults to -1.
348 rescale : float
349 Scaling factor (in log10) used to trigger f value
350 rescaling. If 0, rescale at each iteration. If a large
351 value, never rescale. If < 0, rescale is set to 1.3.
352 finite_diff_rel_step : None or array_like, optional
353 If `jac in ['2-point', '3-point', 'cs']` the relative step size to
354 use for numerical approximation of the jacobian. The absolute step
355 size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
356 possibly adjusted to fit into the bounds. For ``method='3-point'``
357 the sign of `h` is ignored. If None (default) then step is selected
358 automatically.
359 maxfun : int
360 Maximum number of function evaluations. If None, `maxfun` is
361 set to max(100, 10*len(x0)). Defaults to None.
362 """
363 _check_unknown_options(unknown_options)
364 fmin = minfev
365 pgtol = gtol
367 x0 = asfarray(x0).flatten()
368 n = len(x0)
370 if bounds is None:
371 bounds = [(None,None)] * n
372 if len(bounds) != n:
373 raise ValueError('length of x0 != length of bounds')
374 new_bounds = old_bound_to_new(bounds)
376 if mesg_num is not None:
377 messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
378 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
379 elif disp:
380 messages = MSG_ALL
381 else:
382 messages = MSG_NONE
384 sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
385 finite_diff_rel_step=finite_diff_rel_step,
386 bounds=new_bounds)
387 func_and_grad = sf.fun_and_grad
389 """
390 low, up : the bounds (lists of floats)
391 if low is None, the lower bounds are removed.
392 if up is None, the upper bounds are removed.
393 low and up defaults to None
394 """
395 low = zeros(n)
396 up = zeros(n)
397 for i in range(n):
398 if bounds[i] is None:
399 l, u = -inf, inf
400 else:
401 l,u = bounds[i]
402 if l is None:
403 low[i] = -inf
404 else:
405 low[i] = l
406 if u is None:
407 up[i] = inf
408 else:
409 up[i] = u
411 if scale is None:
412 scale = array([])
414 if offset is None:
415 offset = array([])
417 if maxfun is None:
418 if maxiter is not None:
419 warnings.warn(
420 "'maxiter' has been deprecated in favor of 'maxfun'"
421 " and will be removed in SciPy 1.11.0.",
422 DeprecationWarning, stacklevel=3
423 )
424 maxfun = maxiter
425 else:
426 maxfun = max(100, 10*len(x0))
428 rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize(
429 func_and_grad, x0, low, up, scale,
430 offset, messages, maxCGit, maxfun,
431 eta, stepmx, accuracy, fmin, ftol,
432 xtol, pgtol, rescale, callback
433 )
434 # the TNC documentation states: "On output, x, f and g may be very
435 # slightly out of sync because of scaling". Therefore re-evaluate
436 # func_and_grad so they are synced.
437 funv, jacv = func_and_grad(x)
439 return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
440 nit=nit, status=rc, message=RCSTRINGS[rc],
441 success=(-1 < rc < 3))