Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/optimize/_minpack_py.py: 10%
293 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
1import warnings
2from . import _minpack
4import numpy as np
5from numpy import (atleast_1d, dot, take, triu, shape, eye,
6 transpose, zeros, prod, greater,
7 asarray, inf,
8 finfo, inexact, issubdtype, dtype)
9from scipy import linalg
10from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv
11from scipy._lib._util import _asarray_validated, _lazywhere
12from scipy._lib._util import getfullargspec_no_self as _getfullargspec
13from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
14from ._lsq import least_squares
15# from ._lsq.common import make_strictly_feasible
16from ._lsq.least_squares import prepare_bounds
17from scipy.optimize._minimize import Bounds
19error = _minpack.error
21__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
24def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
29 if len(output_shape) > 1:
30 if output_shape[1] == 1:
31 return shape(res)
32 msg = "%s: there is a mismatch between the input and output " \
33 "shape of the '%s' argument" % (checker, argname)
34 func_name = getattr(thefunc, '__name__', None)
35 if func_name:
36 msg += " '%s'." % func_name
37 else:
38 msg += "."
39 msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
40 raise TypeError(msg)
41 if issubdtype(res.dtype, inexact):
42 dt = res.dtype
43 else:
44 dt = dtype(float)
45 return shape(res), dt
48def fsolve(func, x0, args=(), fprime=None, full_output=0,
49 col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
50 epsfcn=None, factor=100, diag=None):
51 """
52 Find the roots of a function.
54 Return the roots of the (non-linear) equations defined by
55 ``func(x) = 0`` given a starting estimate.
57 Parameters
58 ----------
59 func : callable ``f(x, *args)``
60 A function that takes at least one (possibly vector) argument,
61 and returns a value of the same length.
62 x0 : ndarray
63 The starting estimate for the roots of ``func(x) = 0``.
64 args : tuple, optional
65 Any extra arguments to `func`.
66 fprime : callable ``f(x, *args)``, optional
67 A function to compute the Jacobian of `func` with derivatives
68 across the rows. By default, the Jacobian will be estimated.
69 full_output : bool, optional
70 If True, return optional outputs.
71 col_deriv : bool, optional
72 Specify whether the Jacobian function computes derivatives down
73 the columns (faster, because there is no transpose operation).
74 xtol : float, optional
75 The calculation will terminate if the relative error between two
76 consecutive iterates is at most `xtol`.
77 maxfev : int, optional
78 The maximum number of calls to the function. If zero, then
79 ``100*(N+1)`` is the maximum where N is the number of elements
80 in `x0`.
81 band : tuple, optional
82 If set to a two-sequence containing the number of sub- and
83 super-diagonals within the band of the Jacobi matrix, the
84 Jacobi matrix is considered banded (only for ``fprime=None``).
85 epsfcn : float, optional
86 A suitable step length for the forward-difference
87 approximation of the Jacobian (for ``fprime=None``). If
88 `epsfcn` is less than the machine precision, it is assumed
89 that the relative errors in the functions are of the order of
90 the machine precision.
91 factor : float, optional
92 A parameter determining the initial step bound
93 (``factor * || diag * x||``). Should be in the interval
94 ``(0.1, 100)``.
95 diag : sequence, optional
96 N positive entries that serve as a scale factors for the
97 variables.
99 Returns
100 -------
101 x : ndarray
102 The solution (or the result of the last iteration for
103 an unsuccessful call).
104 infodict : dict
105 A dictionary of optional outputs with the keys:
107 ``nfev``
108 number of function calls
109 ``njev``
110 number of Jacobian calls
111 ``fvec``
112 function evaluated at the output
113 ``fjac``
114 the orthogonal matrix, q, produced by the QR
115 factorization of the final approximate Jacobian
116 matrix, stored column wise
117 ``r``
118 upper triangular matrix produced by QR factorization
119 of the same matrix
120 ``qtf``
121 the vector ``(transpose(q) * fvec)``
123 ier : int
124 An integer flag. Set to 1 if a solution was found, otherwise refer
125 to `mesg` for more information.
126 mesg : str
127 If no solution is found, `mesg` details the cause of failure.
129 See Also
130 --------
131 root : Interface to root finding algorithms for multivariate
132 functions. See the ``method='hybr'`` in particular.
134 Notes
135 -----
136 ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
138 Examples
139 --------
140 Find a solution to the system of equations:
141 ``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
143 >>> import numpy as np
144 >>> from scipy.optimize import fsolve
145 >>> def func(x):
146 ... return [x[0] * np.cos(x[1]) - 4,
147 ... x[1] * x[0] - x[1] - 5]
148 >>> root = fsolve(func, [1, 1])
149 >>> root
150 array([6.50409711, 0.90841421])
151 >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
152 array([ True, True])
154 """
155 options = {'col_deriv': col_deriv,
156 'xtol': xtol,
157 'maxfev': maxfev,
158 'band': band,
159 'eps': epsfcn,
160 'factor': factor,
161 'diag': diag}
163 res = _root_hybr(func, x0, args, jac=fprime, **options)
164 if full_output:
165 x = res['x']
166 info = dict((k, res.get(k))
167 for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
168 info['fvec'] = res['fun']
169 return x, info, res['status'], res['message']
170 else:
171 status = res['status']
172 msg = res['message']
173 if status == 0:
174 raise TypeError(msg)
175 elif status == 1:
176 pass
177 elif status in [2, 3, 4, 5]:
178 warnings.warn(msg, RuntimeWarning)
179 else:
180 raise TypeError(msg)
181 return res['x']
184def _root_hybr(func, x0, args=(), jac=None,
185 col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
186 factor=100, diag=None, **unknown_options):
187 """
188 Find the roots of a multivariate function using MINPACK's hybrd and
189 hybrj routines (modified Powell method).
191 Options
192 -------
193 col_deriv : bool
194 Specify whether the Jacobian function computes derivatives down
195 the columns (faster, because there is no transpose operation).
196 xtol : float
197 The calculation will terminate if the relative error between two
198 consecutive iterates is at most `xtol`.
199 maxfev : int
200 The maximum number of calls to the function. If zero, then
201 ``100*(N+1)`` is the maximum where N is the number of elements
202 in `x0`.
203 band : tuple
204 If set to a two-sequence containing the number of sub- and
205 super-diagonals within the band of the Jacobi matrix, the
206 Jacobi matrix is considered banded (only for ``fprime=None``).
207 eps : float
208 A suitable step length for the forward-difference
209 approximation of the Jacobian (for ``fprime=None``). If
210 `eps` is less than the machine precision, it is assumed
211 that the relative errors in the functions are of the order of
212 the machine precision.
213 factor : float
214 A parameter determining the initial step bound
215 (``factor * || diag * x||``). Should be in the interval
216 ``(0.1, 100)``.
217 diag : sequence
218 N positive entries that serve as a scale factors for the
219 variables.
221 """
222 _check_unknown_options(unknown_options)
223 epsfcn = eps
225 x0 = asarray(x0).flatten()
226 n = len(x0)
227 if not isinstance(args, tuple):
228 args = (args,)
229 shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
230 if epsfcn is None:
231 epsfcn = finfo(dtype).eps
232 Dfun = jac
233 if Dfun is None:
234 if band is None:
235 ml, mu = -10, -10
236 else:
237 ml, mu = band[:2]
238 if maxfev == 0:
239 maxfev = 200 * (n + 1)
240 retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
241 ml, mu, epsfcn, factor, diag)
242 else:
243 _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
244 if (maxfev == 0):
245 maxfev = 100 * (n + 1)
246 retval = _minpack._hybrj(func, Dfun, x0, args, 1,
247 col_deriv, xtol, maxfev, factor, diag)
249 x, status = retval[0], retval[-1]
251 errors = {0: "Improper input parameters were entered.",
252 1: "The solution converged.",
253 2: "The number of calls to function has "
254 "reached maxfev = %d." % maxfev,
255 3: "xtol=%f is too small, no further improvement "
256 "in the approximate\n solution "
257 "is possible." % xtol,
258 4: "The iteration is not making good progress, as measured "
259 "by the \n improvement from the last five "
260 "Jacobian evaluations.",
261 5: "The iteration is not making good progress, "
262 "as measured by the \n improvement from the last "
263 "ten iterations.",
264 'unknown': "An error occurred."}
266 info = retval[1]
267 info['fun'] = info.pop('fvec')
268 sol = OptimizeResult(x=x, success=(status == 1), status=status)
269 sol.update(info)
270 try:
271 sol['message'] = errors[status]
272 except KeyError:
273 sol['message'] = errors['unknown']
275 return sol
278LEASTSQ_SUCCESS = [1, 2, 3, 4]
279LEASTSQ_FAILURE = [5, 6, 7, 8]
282def leastsq(func, x0, args=(), Dfun=None, full_output=0,
283 col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
284 gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
285 """
286 Minimize the sum of squares of a set of equations.
288 ::
290 x = arg min(sum(func(y)**2,axis=0))
291 y
293 Parameters
294 ----------
295 func : callable
296 Should take at least one (possibly length ``N`` vector) argument and
297 returns ``M`` floating point numbers. It must not return NaNs or
298 fitting might fail. ``M`` must be greater than or equal to ``N``.
299 x0 : ndarray
300 The starting estimate for the minimization.
301 args : tuple, optional
302 Any extra arguments to func are placed in this tuple.
303 Dfun : callable, optional
304 A function or method to compute the Jacobian of func with derivatives
305 across the rows. If this is None, the Jacobian will be estimated.
306 full_output : bool, optional
307 non-zero to return all optional outputs.
308 col_deriv : bool, optional
309 non-zero to specify that the Jacobian function computes derivatives
310 down the columns (faster, because there is no transpose operation).
311 ftol : float, optional
312 Relative error desired in the sum of squares.
313 xtol : float, optional
314 Relative error desired in the approximate solution.
315 gtol : float, optional
316 Orthogonality desired between the function vector and the columns of
317 the Jacobian.
318 maxfev : int, optional
319 The maximum number of calls to the function. If `Dfun` is provided,
320 then the default `maxfev` is 100*(N+1) where N is the number of elements
321 in x0, otherwise the default `maxfev` is 200*(N+1).
322 epsfcn : float, optional
323 A variable used in determining a suitable step length for the forward-
324 difference approximation of the Jacobian (for Dfun=None).
325 Normally the actual step length will be sqrt(epsfcn)*x
326 If epsfcn is less than the machine precision, it is assumed that the
327 relative errors are of the order of the machine precision.
328 factor : float, optional
329 A parameter determining the initial step bound
330 (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
331 diag : sequence, optional
332 N positive entries that serve as a scale factors for the variables.
334 Returns
335 -------
336 x : ndarray
337 The solution (or the result of the last iteration for an unsuccessful
338 call).
339 cov_x : ndarray
340 The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
341 estimate of the Hessian. A value of None indicates a singular matrix,
342 which means the curvature in parameters `x` is numerically flat. To
343 obtain the covariance matrix of the parameters `x`, `cov_x` must be
344 multiplied by the variance of the residuals -- see curve_fit.
345 infodict : dict
346 a dictionary of optional outputs with the keys:
348 ``nfev``
349 The number of function calls
350 ``fvec``
351 The function evaluated at the output
352 ``fjac``
353 A permutation of the R matrix of a QR
354 factorization of the final approximate
355 Jacobian matrix, stored column wise.
356 Together with ipvt, the covariance of the
357 estimate can be approximated.
358 ``ipvt``
359 An integer array of length N which defines
360 a permutation matrix, p, such that
361 fjac*p = q*r, where r is upper triangular
362 with diagonal elements of nonincreasing
363 magnitude. Column j of p is column ipvt(j)
364 of the identity matrix.
365 ``qtf``
366 The vector (transpose(q) * fvec).
368 mesg : str
369 A string message giving information about the cause of failure.
370 ier : int
371 An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
372 found. Otherwise, the solution was not found. In either case, the
373 optional output variable 'mesg' gives more information.
375 See Also
376 --------
377 least_squares : Newer interface to solve nonlinear least-squares problems
378 with bounds on the variables. See ``method='lm'`` in particular.
380 Notes
381 -----
382 "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
384 cov_x is a Jacobian approximation to the Hessian of the least squares
385 objective function.
386 This approximation assumes that the objective function is based on the
387 difference between some observed target data (ydata) and a (non-linear)
388 function of the parameters `f(xdata, params)` ::
390 func(params) = ydata - f(xdata, params)
392 so that the objective function is ::
394 min sum((ydata - f(xdata, params))**2, axis=0)
395 params
397 The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
398 or whether `x0` is a scalar.
400 Examples
401 --------
402 >>> from scipy.optimize import leastsq
403 >>> def func(x):
404 ... return 2*(x-3)**2+1
405 >>> leastsq(func, 0)
406 (array([2.99999999]), 1)
408 """
409 x0 = asarray(x0).flatten()
410 n = len(x0)
411 if not isinstance(args, tuple):
412 args = (args,)
413 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
414 m = shape[0]
416 if n > m:
417 raise TypeError(f"Improper input: func input vector length N={n} must"
418 f" not exceed func output vector length M={m}")
420 if epsfcn is None:
421 epsfcn = finfo(dtype).eps
423 if Dfun is None:
424 if maxfev == 0:
425 maxfev = 200*(n + 1)
426 retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
427 gtol, maxfev, epsfcn, factor, diag)
428 else:
429 if col_deriv:
430 _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
431 else:
432 _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
433 if maxfev == 0:
434 maxfev = 100 * (n + 1)
435 retval = _minpack._lmder(func, Dfun, x0, args, full_output,
436 col_deriv, ftol, xtol, gtol, maxfev,
437 factor, diag)
439 errors = {0: ["Improper input parameters.", TypeError],
440 1: ["Both actual and predicted relative reductions "
441 "in the sum of squares\n are at most %f" % ftol, None],
442 2: ["The relative error between two consecutive "
443 "iterates is at most %f" % xtol, None],
444 3: ["Both actual and predicted relative reductions in "
445 "the sum of squares\n are at most %f and the "
446 "relative error between two consecutive "
447 "iterates is at \n most %f" % (ftol, xtol), None],
448 4: ["The cosine of the angle between func(x) and any "
449 "column of the\n Jacobian is at most %f in "
450 "absolute value" % gtol, None],
451 5: ["Number of calls to function has reached "
452 "maxfev = %d." % maxfev, ValueError],
453 6: ["ftol=%f is too small, no further reduction "
454 "in the sum of squares\n is possible." % ftol,
455 ValueError],
456 7: ["xtol=%f is too small, no further improvement in "
457 "the approximate\n solution is possible." % xtol,
458 ValueError],
459 8: ["gtol=%f is too small, func(x) is orthogonal to the "
460 "columns of\n the Jacobian to machine "
461 "precision." % gtol, ValueError]}
463 # The FORTRAN return value (possible return values are >= 0 and <= 8)
464 info = retval[-1]
466 if full_output:
467 cov_x = None
468 if info in LEASTSQ_SUCCESS:
469 # This was
470 # perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
471 # r = triu(transpose(retval[1]['fjac'])[:n, :])
472 # R = dot(r, perm)
473 # cov_x = inv(dot(transpose(R), R))
474 # but the explicit dot product was not necessary and sometimes
475 # the result was not symmetric positive definite. See gh-4555.
476 perm = retval[1]['ipvt'] - 1
477 n = len(perm)
478 r = triu(transpose(retval[1]['fjac'])[:n, :])
479 inv_triu = linalg.get_lapack_funcs('trtri', (r,))
480 try:
481 # inverse of permuted matrix is a permuation of matrix inverse
482 invR, trtri_info = inv_triu(r) # default: upper, non-unit diag
483 if trtri_info != 0: # explicit comparison for readability
484 raise LinAlgError(f'trtri returned info {trtri_info}')
485 invR[perm] = invR.copy()
486 cov_x = invR @ invR.T
487 except (LinAlgError, ValueError):
488 pass
489 return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
490 else:
491 if info in LEASTSQ_FAILURE:
492 warnings.warn(errors[info][0], RuntimeWarning)
493 elif info == 0:
494 raise errors[info][1](errors[info][0])
495 return retval[0], info
498def _wrap_func(func, xdata, ydata, transform):
499 if transform is None:
500 def func_wrapped(params):
501 return func(xdata, *params) - ydata
502 elif transform.ndim == 1:
503 def func_wrapped(params):
504 return transform * (func(xdata, *params) - ydata)
505 else:
506 # Chisq = (y - yd)^T C^{-1} (y-yd)
507 # transform = L such that C = L L^T
508 # C^{-1} = L^{-T} L^{-1}
509 # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
510 # Define (y-yd)' = L^{-1} (y-yd)
511 # by solving
512 # L (y-yd)' = (y-yd)
513 # and minimize (y-yd)'^T (y-yd)'
514 def func_wrapped(params):
515 return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
516 return func_wrapped
519def _wrap_jac(jac, xdata, transform):
520 if transform is None:
521 def jac_wrapped(params):
522 return jac(xdata, *params)
523 elif transform.ndim == 1:
524 def jac_wrapped(params):
525 return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
526 else:
527 def jac_wrapped(params):
528 return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
529 return jac_wrapped
532def _initialize_feasible(lb, ub):
533 p0 = np.ones_like(lb)
534 lb_finite = np.isfinite(lb)
535 ub_finite = np.isfinite(ub)
537 mask = lb_finite & ub_finite
538 p0[mask] = 0.5 * (lb[mask] + ub[mask])
540 mask = lb_finite & ~ub_finite
541 p0[mask] = lb[mask] + 1
543 mask = ~lb_finite & ub_finite
544 p0[mask] = ub[mask] - 1
546 return p0
549def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
550 check_finite=True, bounds=(-np.inf, np.inf), method=None,
551 jac=None, *, full_output=False, **kwargs):
552 """
553 Use non-linear least squares to fit a function, f, to data.
555 Assumes ``ydata = f(xdata, *params) + eps``.
557 Parameters
558 ----------
559 f : callable
560 The model function, f(x, ...). It must take the independent
561 variable as the first argument and the parameters to fit as
562 separate remaining arguments.
563 xdata : array_like
564 The independent variable where the data is measured.
565 Should usually be an M-length sequence or an (k,M)-shaped array for
566 functions with k predictors, and each element should be float
567 convertible if it is an array like object.
568 ydata : array_like
569 The dependent data, a length M array - nominally ``f(xdata, ...)``.
570 p0 : array_like, optional
571 Initial guess for the parameters (length N). If None, then the
572 initial values will all be 1 (if the number of parameters for the
573 function can be determined using introspection, otherwise a
574 ValueError is raised).
575 sigma : None or M-length sequence or MxM array, optional
576 Determines the uncertainty in `ydata`. If we define residuals as
577 ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
578 depends on its number of dimensions:
580 - A 1-D `sigma` should contain values of standard deviations of
581 errors in `ydata`. In this case, the optimized function is
582 ``chisq = sum((r / sigma) ** 2)``.
584 - A 2-D `sigma` should contain the covariance matrix of
585 errors in `ydata`. In this case, the optimized function is
586 ``chisq = r.T @ inv(sigma) @ r``.
588 .. versionadded:: 0.19
590 None (default) is equivalent of 1-D `sigma` filled with ones.
591 absolute_sigma : bool, optional
592 If True, `sigma` is used in an absolute sense and the estimated parameter
593 covariance `pcov` reflects these absolute values.
595 If False (default), only the relative magnitudes of the `sigma` values matter.
596 The returned parameter covariance matrix `pcov` is based on scaling
597 `sigma` by a constant factor. This constant is set by demanding that the
598 reduced `chisq` for the optimal parameters `popt` when using the
599 *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
600 match the sample variance of the residuals after the fit. Default is False.
601 Mathematically,
602 ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
603 check_finite : bool, optional
604 If True, check that the input arrays do not contain nans of infs,
605 and raise a ValueError if they do. Setting this parameter to
606 False may silently produce nonsensical results if the input arrays
607 do contain nans. Default is True.
608 bounds : 2-tuple of array_like or `Bounds`, optional
609 Lower and upper bounds on parameters. Defaults to no bounds.
610 There are two ways to specify the bounds:
612 - Instance of `Bounds` class.
614 - 2-tuple of array_like: Each element of the tuple must be either
615 an array with the length equal to the number of parameters, or a
616 scalar (in which case the bound is taken to be the same for all
617 parameters). Use ``np.inf`` with an appropriate sign to disable
618 bounds on all or some parameters.
620 method : {'lm', 'trf', 'dogbox'}, optional
621 Method to use for optimization. See `least_squares` for more details.
622 Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
623 provided. The method 'lm' won't work when the number of observations
624 is less than the number of variables, use 'trf' or 'dogbox' in this
625 case.
627 .. versionadded:: 0.17
628 jac : callable, string or None, optional
629 Function with signature ``jac(x, ...)`` which computes the Jacobian
630 matrix of the model function with respect to parameters as a dense
631 array_like structure. It will be scaled according to provided `sigma`.
632 If None (default), the Jacobian will be estimated numerically.
633 String keywords for 'trf' and 'dogbox' methods can be used to select
634 a finite difference scheme, see `least_squares`.
636 .. versionadded:: 0.18
637 full_output : boolean, optional
638 If True, this function returns additioal information: `infodict`,
639 `mesg`, and `ier`.
641 .. versionadded:: 1.9
642 **kwargs
643 Keyword arguments passed to `leastsq` for ``method='lm'`` or
644 `least_squares` otherwise.
646 Returns
647 -------
648 popt : array
649 Optimal values for the parameters so that the sum of the squared
650 residuals of ``f(xdata, *popt) - ydata`` is minimized.
651 pcov : 2-D array
652 The estimated covariance of popt. The diagonals provide the variance
653 of the parameter estimate. To compute one standard deviation errors
654 on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
656 How the `sigma` parameter affects the estimated covariance
657 depends on `absolute_sigma` argument, as described above.
659 If the Jacobian matrix at the solution doesn't have a full rank, then
660 'lm' method returns a matrix filled with ``np.inf``, on the other hand
661 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
662 the covariance matrix.
663 infodict : dict (returned only if `full_output` is True)
664 a dictionary of optional outputs with the keys:
666 ``nfev``
667 The number of function calls. Methods 'trf' and 'dogbox' do not
668 count function calls for numerical Jacobian approximation,
669 as opposed to 'lm' method.
670 ``fvec``
671 The function values evaluated at the solution.
672 ``fjac``
673 A permutation of the R matrix of a QR
674 factorization of the final approximate
675 Jacobian matrix, stored column wise.
676 Together with ipvt, the covariance of the
677 estimate can be approximated.
678 Method 'lm' only provides this information.
679 ``ipvt``
680 An integer array of length N which defines
681 a permutation matrix, p, such that
682 fjac*p = q*r, where r is upper triangular
683 with diagonal elements of nonincreasing
684 magnitude. Column j of p is column ipvt(j)
685 of the identity matrix.
686 Method 'lm' only provides this information.
687 ``qtf``
688 The vector (transpose(q) * fvec).
689 Method 'lm' only provides this information.
691 .. versionadded:: 1.9
692 mesg : str (returned only if `full_output` is True)
693 A string message giving information about the solution.
695 .. versionadded:: 1.9
696 ier : int (returnned only if `full_output` is True)
697 An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
698 found. Otherwise, the solution was not found. In either case, the
699 optional output variable `mesg` gives more information.
701 .. versionadded:: 1.9
703 Raises
704 ------
705 ValueError
706 if either `ydata` or `xdata` contain NaNs, or if incompatible options
707 are used.
709 RuntimeError
710 if the least-squares minimization fails.
712 OptimizeWarning
713 if covariance of the parameters can not be estimated.
715 See Also
716 --------
717 least_squares : Minimize the sum of squares of nonlinear functions.
718 scipy.stats.linregress : Calculate a linear least squares regression for
719 two sets of measurements.
721 Notes
722 -----
723 Users should ensure that inputs `xdata`, `ydata`, and the output of `f`
724 are ``float64``, or else the optimization may return incorrect results.
726 With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
727 through `leastsq`. Note that this algorithm can only deal with
728 unconstrained problems.
730 Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
731 the docstring of `least_squares` for more information.
733 Examples
734 --------
735 >>> import numpy as np
736 >>> import matplotlib.pyplot as plt
737 >>> from scipy.optimize import curve_fit
739 >>> def func(x, a, b, c):
740 ... return a * np.exp(-b * x) + c
742 Define the data to be fit with some noise:
744 >>> xdata = np.linspace(0, 4, 50)
745 >>> y = func(xdata, 2.5, 1.3, 0.5)
746 >>> rng = np.random.default_rng()
747 >>> y_noise = 0.2 * rng.normal(size=xdata.size)
748 >>> ydata = y + y_noise
749 >>> plt.plot(xdata, ydata, 'b-', label='data')
751 Fit for the parameters a, b, c of the function `func`:
753 >>> popt, pcov = curve_fit(func, xdata, ydata)
754 >>> popt
755 array([2.56274217, 1.37268521, 0.47427475])
756 >>> plt.plot(xdata, func(xdata, *popt), 'r-',
757 ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
759 Constrain the optimization to the region of ``0 <= a <= 3``,
760 ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
762 >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
763 >>> popt
764 array([2.43736712, 1. , 0.34463856])
765 >>> plt.plot(xdata, func(xdata, *popt), 'g--',
766 ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
768 >>> plt.xlabel('x')
769 >>> plt.ylabel('y')
770 >>> plt.legend()
771 >>> plt.show()
773 """
774 if p0 is None:
775 # determine number of parameters by inspecting the function
776 sig = _getfullargspec(f)
777 args = sig.args
778 if len(args) < 2:
779 raise ValueError("Unable to determine number of fit parameters.")
780 n = len(args) - 1
781 else:
782 p0 = np.atleast_1d(p0)
783 n = p0.size
785 if isinstance(bounds, Bounds):
786 lb, ub = bounds.lb, bounds.ub
787 else:
788 lb, ub = prepare_bounds(bounds, n)
789 if p0 is None:
790 p0 = _initialize_feasible(lb, ub)
792 bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
793 if method is None:
794 if bounded_problem:
795 method = 'trf'
796 else:
797 method = 'lm'
799 if method == 'lm' and bounded_problem:
800 raise ValueError("Method 'lm' only works for unconstrained problems. "
801 "Use 'trf' or 'dogbox' instead.")
803 # optimization may produce garbage for float32 inputs, cast them to float64
805 # NaNs cannot be handled
806 if check_finite:
807 ydata = np.asarray_chkfinite(ydata, float)
808 else:
809 ydata = np.asarray(ydata, float)
811 if isinstance(xdata, (list, tuple, np.ndarray)):
812 # `xdata` is passed straight to the user-defined `f`, so allow
813 # non-array_like `xdata`.
814 if check_finite:
815 xdata = np.asarray_chkfinite(xdata, float)
816 else:
817 xdata = np.asarray(xdata, float)
819 if ydata.size == 0:
820 raise ValueError("`ydata` must not be empty!")
822 # Determine type of sigma
823 if sigma is not None:
824 sigma = np.asarray(sigma)
826 # if 1-D, sigma are errors, define transform = 1/sigma
827 if sigma.shape == (ydata.size, ):
828 transform = 1.0 / sigma
829 # if 2-D, sigma is the covariance matrix,
830 # define transform = L such that L L^T = C
831 elif sigma.shape == (ydata.size, ydata.size):
832 try:
833 # scipy.linalg.cholesky requires lower=True to return L L^T = A
834 transform = cholesky(sigma, lower=True)
835 except LinAlgError as e:
836 raise ValueError("`sigma` must be positive definite.") from e
837 else:
838 raise ValueError("`sigma` has incorrect shape.")
839 else:
840 transform = None
842 func = _wrap_func(f, xdata, ydata, transform)
843 if callable(jac):
844 jac = _wrap_jac(jac, xdata, transform)
845 elif jac is None and method != 'lm':
846 jac = '2-point'
848 if 'args' in kwargs:
849 # The specification for the model function `f` does not support
850 # additional arguments. Refer to the `curve_fit` docstring for
851 # acceptable call signatures of `f`.
852 raise ValueError("'args' is not a supported keyword argument.")
854 if method == 'lm':
855 # if ydata.size == 1, this might be used for broadcast.
856 if ydata.size != 1 and n > ydata.size:
857 raise TypeError(f"The number of func parameters={n} must not"
858 f" exceed the number of data points={ydata.size}")
859 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
860 popt, pcov, infodict, errmsg, ier = res
861 ysize = len(infodict['fvec'])
862 cost = np.sum(infodict['fvec'] ** 2)
863 if ier not in [1, 2, 3, 4]:
864 raise RuntimeError("Optimal parameters not found: " + errmsg)
865 else:
866 # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
867 if 'max_nfev' not in kwargs:
868 kwargs['max_nfev'] = kwargs.pop('maxfev', None)
870 res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
871 **kwargs)
873 if not res.success:
874 raise RuntimeError("Optimal parameters not found: " + res.message)
876 infodict = dict(nfev=res.nfev, fvec=res.fun)
877 ier = res.status
878 errmsg = res.message
880 ysize = len(res.fun)
881 cost = 2 * res.cost # res.cost is half sum of squares!
882 popt = res.x
884 # Do Moore-Penrose inverse discarding zero singular values.
885 _, s, VT = svd(res.jac, full_matrices=False)
886 threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
887 s = s[s > threshold]
888 VT = VT[:s.size]
889 pcov = np.dot(VT.T / s**2, VT)
891 warn_cov = False
892 if pcov is None:
893 # indeterminate covariance
894 pcov = zeros((len(popt), len(popt)), dtype=float)
895 pcov.fill(inf)
896 warn_cov = True
897 elif not absolute_sigma:
898 if ysize > p0.size:
899 s_sq = cost / (ysize - p0.size)
900 pcov = pcov * s_sq
901 else:
902 pcov.fill(inf)
903 warn_cov = True
905 if warn_cov:
906 warnings.warn('Covariance of the parameters could not be estimated',
907 category=OptimizeWarning)
909 if full_output:
910 return popt, pcov, infodict, errmsg, ier
911 else:
912 return popt, pcov
915def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
916 """Perform a simple check on the gradient for correctness.
918 """
920 x = atleast_1d(x0)
921 n = len(x)
922 x = x.reshape((n,))
923 fvec = atleast_1d(fcn(x, *args))
924 m = len(fvec)
925 fvec = fvec.reshape((m,))
926 ldfjac = m
927 fjac = atleast_1d(Dfcn(x, *args))
928 fjac = fjac.reshape((m, n))
929 if col_deriv == 0:
930 fjac = transpose(fjac)
932 xp = zeros((n,), float)
933 err = zeros((m,), float)
934 fvecp = None
935 _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
937 fvecp = atleast_1d(fcn(xp, *args))
938 fvecp = fvecp.reshape((m,))
939 _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
941 good = (prod(greater(err, 0.5), axis=0))
943 return (good, err)
946def _del2(p0, p1, d):
947 return p0 - np.square(p1 - p0) / d
950def _relerr(actual, desired):
951 return (actual - desired) / desired
954def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
955 p0 = x0
956 for i in range(maxiter):
957 p1 = func(p0, *args)
958 if use_accel:
959 p2 = func(p1, *args)
960 d = p2 - 2.0 * p1 + p0
961 p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
962 else:
963 p = p1
964 relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
965 if np.all(np.abs(relerr) < xtol):
966 return p
967 p0 = p
968 msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
969 raise RuntimeError(msg)
972def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
973 """
974 Find a fixed point of the function.
976 Given a function of one or more variables and a starting point, find a
977 fixed point of the function: i.e., where ``func(x0) == x0``.
979 Parameters
980 ----------
981 func : function
982 Function to evaluate.
983 x0 : array_like
984 Fixed point of function.
985 args : tuple, optional
986 Extra arguments to `func`.
987 xtol : float, optional
988 Convergence tolerance, defaults to 1e-08.
989 maxiter : int, optional
990 Maximum number of iterations, defaults to 500.
991 method : {"del2", "iteration"}, optional
992 Method of finding the fixed-point, defaults to "del2",
993 which uses Steffensen's Method with Aitken's ``Del^2``
994 convergence acceleration [1]_. The "iteration" method simply iterates
995 the function until convergence is detected, without attempting to
996 accelerate the convergence.
998 References
999 ----------
1000 .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
1002 Examples
1003 --------
1004 >>> import numpy as np
1005 >>> from scipy import optimize
1006 >>> def func(x, c1, c2):
1007 ... return np.sqrt(c1/(x+c2))
1008 >>> c1 = np.array([10,12.])
1009 >>> c2 = np.array([3, 5.])
1010 >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
1011 array([ 1.4920333 , 1.37228132])
1013 """
1014 use_accel = {'del2': True, 'iteration': False}[method]
1015 x0 = _asarray_validated(x0, as_inexact=True)
1016 return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)