Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/scipy/optimize/minpack.py : 10%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import warnings
2from . import _minpack
4import numpy as np
5from numpy import (atleast_1d, dot, take, triu, shape, eye,
6 transpose, zeros, prod, greater,
7 asarray, inf,
8 finfo, inexact, issubdtype, dtype)
9from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv
10from scipy._lib._util import _asarray_validated, _lazywhere
11from scipy._lib._util import getfullargspec_no_self as _getfullargspec
12from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
13from ._lsq import least_squares
14# from ._lsq.common import make_strictly_feasible
15from ._lsq.least_squares import prepare_bounds
17error = _minpack.error
19__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
22def _check_func(checker, argname, thefunc, x0, args, numinputs,
23 output_shape=None):
24 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
25 if (output_shape is not None) and (shape(res) != output_shape):
26 if (output_shape[0] != 1):
27 if len(output_shape) > 1:
28 if output_shape[1] == 1:
29 return shape(res)
30 msg = "%s: there is a mismatch between the input and output " \
31 "shape of the '%s' argument" % (checker, argname)
32 func_name = getattr(thefunc, '__name__', None)
33 if func_name:
34 msg += " '%s'." % func_name
35 else:
36 msg += "."
37 msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
38 raise TypeError(msg)
39 if issubdtype(res.dtype, inexact):
40 dt = res.dtype
41 else:
42 dt = dtype(float)
43 return shape(res), dt
46def fsolve(func, x0, args=(), fprime=None, full_output=0,
47 col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
48 epsfcn=None, factor=100, diag=None):
49 """
50 Find the roots of a function.
52 Return the roots of the (non-linear) equations defined by
53 ``func(x) = 0`` given a starting estimate.
55 Parameters
56 ----------
57 func : callable ``f(x, *args)``
58 A function that takes at least one (possibly vector) argument,
59 and returns a value of the same length.
60 x0 : ndarray
61 The starting estimate for the roots of ``func(x) = 0``.
62 args : tuple, optional
63 Any extra arguments to `func`.
64 fprime : callable ``f(x, *args)``, optional
65 A function to compute the Jacobian of `func` with derivatives
66 across the rows. By default, the Jacobian will be estimated.
67 full_output : bool, optional
68 If True, return optional outputs.
69 col_deriv : bool, optional
70 Specify whether the Jacobian function computes derivatives down
71 the columns (faster, because there is no transpose operation).
72 xtol : float, optional
73 The calculation will terminate if the relative error between two
74 consecutive iterates is at most `xtol`.
75 maxfev : int, optional
76 The maximum number of calls to the function. If zero, then
77 ``100*(N+1)`` is the maximum where N is the number of elements
78 in `x0`.
79 band : tuple, optional
80 If set to a two-sequence containing the number of sub- and
81 super-diagonals within the band of the Jacobi matrix, the
82 Jacobi matrix is considered banded (only for ``fprime=None``).
83 epsfcn : float, optional
84 A suitable step length for the forward-difference
85 approximation of the Jacobian (for ``fprime=None``). If
86 `epsfcn` is less than the machine precision, it is assumed
87 that the relative errors in the functions are of the order of
88 the machine precision.
89 factor : float, optional
90 A parameter determining the initial step bound
91 (``factor * || diag * x||``). Should be in the interval
92 ``(0.1, 100)``.
93 diag : sequence, optional
94 N positive entries that serve as a scale factors for the
95 variables.
97 Returns
98 -------
99 x : ndarray
100 The solution (or the result of the last iteration for
101 an unsuccessful call).
102 infodict : dict
103 A dictionary of optional outputs with the keys:
105 ``nfev``
106 number of function calls
107 ``njev``
108 number of Jacobian calls
109 ``fvec``
110 function evaluated at the output
111 ``fjac``
112 the orthogonal matrix, q, produced by the QR
113 factorization of the final approximate Jacobian
114 matrix, stored column wise
115 ``r``
116 upper triangular matrix produced by QR factorization
117 of the same matrix
118 ``qtf``
119 the vector ``(transpose(q) * fvec)``
121 ier : int
122 An integer flag. Set to 1 if a solution was found, otherwise refer
123 to `mesg` for more information.
124 mesg : str
125 If no solution is found, `mesg` details the cause of failure.
127 See Also
128 --------
129 root : Interface to root finding algorithms for multivariate
130 functions. See the ``method=='hybr'`` in particular.
132 Notes
133 -----
134 ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
136 Examples
137 --------
138 Find a solution to the system of equations:
139 ``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
141 >>> from scipy.optimize import fsolve
142 >>> def func(x):
143 ... return [x[0] * np.cos(x[1]) - 4,
144 ... x[1] * x[0] - x[1] - 5]
145 >>> root = fsolve(func, [1, 1])
146 >>> root
147 array([6.50409711, 0.90841421])
148 >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
149 array([ True, True])
151 """
152 options = {'col_deriv': col_deriv,
153 'xtol': xtol,
154 'maxfev': maxfev,
155 'band': band,
156 'eps': epsfcn,
157 'factor': factor,
158 'diag': diag}
160 res = _root_hybr(func, x0, args, jac=fprime, **options)
161 if full_output:
162 x = res['x']
163 info = dict((k, res.get(k))
164 for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
165 info['fvec'] = res['fun']
166 return x, info, res['status'], res['message']
167 else:
168 status = res['status']
169 msg = res['message']
170 if status == 0:
171 raise TypeError(msg)
172 elif status == 1:
173 pass
174 elif status in [2, 3, 4, 5]:
175 warnings.warn(msg, RuntimeWarning)
176 else:
177 raise TypeError(msg)
178 return res['x']
181def _root_hybr(func, x0, args=(), jac=None,
182 col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
183 factor=100, diag=None, **unknown_options):
184 """
185 Find the roots of a multivariate function using MINPACK's hybrd and
186 hybrj routines (modified Powell method).
188 Options
189 -------
190 col_deriv : bool
191 Specify whether the Jacobian function computes derivatives down
192 the columns (faster, because there is no transpose operation).
193 xtol : float
194 The calculation will terminate if the relative error between two
195 consecutive iterates is at most `xtol`.
196 maxfev : int
197 The maximum number of calls to the function. If zero, then
198 ``100*(N+1)`` is the maximum where N is the number of elements
199 in `x0`.
200 band : tuple
201 If set to a two-sequence containing the number of sub- and
202 super-diagonals within the band of the Jacobi matrix, the
203 Jacobi matrix is considered banded (only for ``fprime=None``).
204 eps : float
205 A suitable step length for the forward-difference
206 approximation of the Jacobian (for ``fprime=None``). If
207 `eps` is less than the machine precision, it is assumed
208 that the relative errors in the functions are of the order of
209 the machine precision.
210 factor : float
211 A parameter determining the initial step bound
212 (``factor * || diag * x||``). Should be in the interval
213 ``(0.1, 100)``.
214 diag : sequence
215 N positive entries that serve as a scale factors for the
216 variables.
218 """
219 _check_unknown_options(unknown_options)
220 epsfcn = eps
222 x0 = asarray(x0).flatten()
223 n = len(x0)
224 if not isinstance(args, tuple):
225 args = (args,)
226 shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
227 if epsfcn is None:
228 epsfcn = finfo(dtype).eps
229 Dfun = jac
230 if Dfun is None:
231 if band is None:
232 ml, mu = -10, -10
233 else:
234 ml, mu = band[:2]
235 if maxfev == 0:
236 maxfev = 200 * (n + 1)
237 retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
238 ml, mu, epsfcn, factor, diag)
239 else:
240 _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
241 if (maxfev == 0):
242 maxfev = 100 * (n + 1)
243 retval = _minpack._hybrj(func, Dfun, x0, args, 1,
244 col_deriv, xtol, maxfev, factor, diag)
246 x, status = retval[0], retval[-1]
248 errors = {0: "Improper input parameters were entered.",
249 1: "The solution converged.",
250 2: "The number of calls to function has "
251 "reached maxfev = %d." % maxfev,
252 3: "xtol=%f is too small, no further improvement "
253 "in the approximate\n solution "
254 "is possible." % xtol,
255 4: "The iteration is not making good progress, as measured "
256 "by the \n improvement from the last five "
257 "Jacobian evaluations.",
258 5: "The iteration is not making good progress, "
259 "as measured by the \n improvement from the last "
260 "ten iterations.",
261 'unknown': "An error occurred."}
263 info = retval[1]
264 info['fun'] = info.pop('fvec')
265 sol = OptimizeResult(x=x, success=(status == 1), status=status)
266 sol.update(info)
267 try:
268 sol['message'] = errors[status]
269 except KeyError:
270 sol['message'] = errors['unknown']
272 return sol
275LEASTSQ_SUCCESS = [1, 2, 3, 4]
276LEASTSQ_FAILURE = [5, 6, 7, 8]
279def leastsq(func, x0, args=(), Dfun=None, full_output=0,
280 col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
281 gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
282 """
283 Minimize the sum of squares of a set of equations.
285 ::
287 x = arg min(sum(func(y)**2,axis=0))
288 y
290 Parameters
291 ----------
292 func : callable
293 Should take at least one (possibly length N vector) argument and
294 returns M floating point numbers. It must not return NaNs or
295 fitting might fail.
296 x0 : ndarray
297 The starting estimate for the minimization.
298 args : tuple, optional
299 Any extra arguments to func are placed in this tuple.
300 Dfun : callable, optional
301 A function or method to compute the Jacobian of func with derivatives
302 across the rows. If this is None, the Jacobian will be estimated.
303 full_output : bool, optional
304 non-zero to return all optional outputs.
305 col_deriv : bool, optional
306 non-zero to specify that the Jacobian function computes derivatives
307 down the columns (faster, because there is no transpose operation).
308 ftol : float, optional
309 Relative error desired in the sum of squares.
310 xtol : float, optional
311 Relative error desired in the approximate solution.
312 gtol : float, optional
313 Orthogonality desired between the function vector and the columns of
314 the Jacobian.
315 maxfev : int, optional
316 The maximum number of calls to the function. If `Dfun` is provided,
317 then the default `maxfev` is 100*(N+1) where N is the number of elements
318 in x0, otherwise the default `maxfev` is 200*(N+1).
319 epsfcn : float, optional
320 A variable used in determining a suitable step length for the forward-
321 difference approximation of the Jacobian (for Dfun=None).
322 Normally the actual step length will be sqrt(epsfcn)*x
323 If epsfcn is less than the machine precision, it is assumed that the
324 relative errors are of the order of the machine precision.
325 factor : float, optional
326 A parameter determining the initial step bound
327 (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
328 diag : sequence, optional
329 N positive entries that serve as a scale factors for the variables.
331 Returns
332 -------
333 x : ndarray
334 The solution (or the result of the last iteration for an unsuccessful
335 call).
336 cov_x : ndarray
337 The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
338 estimate of the Hessian. A value of None indicates a singular matrix,
339 which means the curvature in parameters `x` is numerically flat. To
340 obtain the covariance matrix of the parameters `x`, `cov_x` must be
341 multiplied by the variance of the residuals -- see curve_fit.
342 infodict : dict
343 a dictionary of optional outputs with the keys:
345 ``nfev``
346 The number of function calls
347 ``fvec``
348 The function evaluated at the output
349 ``fjac``
350 A permutation of the R matrix of a QR
351 factorization of the final approximate
352 Jacobian matrix, stored column wise.
353 Together with ipvt, the covariance of the
354 estimate can be approximated.
355 ``ipvt``
356 An integer array of length N which defines
357 a permutation matrix, p, such that
358 fjac*p = q*r, where r is upper triangular
359 with diagonal elements of nonincreasing
360 magnitude. Column j of p is column ipvt(j)
361 of the identity matrix.
362 ``qtf``
363 The vector (transpose(q) * fvec).
365 mesg : str
366 A string message giving information about the cause of failure.
367 ier : int
368 An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
369 found. Otherwise, the solution was not found. In either case, the
370 optional output variable 'mesg' gives more information.
372 See Also
373 --------
374 least_squares : Newer interface to solve nonlinear least-squares problems
375 with bounds on the variables. See ``method=='lm'`` in particular.
377 Notes
378 -----
379 "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
381 cov_x is a Jacobian approximation to the Hessian of the least squares
382 objective function.
383 This approximation assumes that the objective function is based on the
384 difference between some observed target data (ydata) and a (non-linear)
385 function of the parameters `f(xdata, params)` ::
387 func(params) = ydata - f(xdata, params)
389 so that the objective function is ::
391 min sum((ydata - f(xdata, params))**2, axis=0)
392 params
394 The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
395 or whether `x0` is a scalar.
397 Examples
398 --------
399 >>> from scipy.optimize import leastsq
400 >>> def func(x):
401 ... return 2*(x-3)**2+1
402 >>> leastsq(func, 0)
403 (array([2.99999999]), 1)
405 """
406 x0 = asarray(x0).flatten()
407 n = len(x0)
408 if not isinstance(args, tuple):
409 args = (args,)
410 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
411 m = shape[0]
413 if n > m:
414 raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
416 if epsfcn is None:
417 epsfcn = finfo(dtype).eps
419 if Dfun is None:
420 if maxfev == 0:
421 maxfev = 200*(n + 1)
422 retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
423 gtol, maxfev, epsfcn, factor, diag)
424 else:
425 if col_deriv:
426 _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
427 else:
428 _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
429 if maxfev == 0:
430 maxfev = 100 * (n + 1)
431 retval = _minpack._lmder(func, Dfun, x0, args, full_output,
432 col_deriv, ftol, xtol, gtol, maxfev,
433 factor, diag)
435 errors = {0: ["Improper input parameters.", TypeError],
436 1: ["Both actual and predicted relative reductions "
437 "in the sum of squares\n are at most %f" % ftol, None],
438 2: ["The relative error between two consecutive "
439 "iterates is at most %f" % xtol, None],
440 3: ["Both actual and predicted relative reductions in "
441 "the sum of squares\n are at most %f and the "
442 "relative error between two consecutive "
443 "iterates is at \n most %f" % (ftol, xtol), None],
444 4: ["The cosine of the angle between func(x) and any "
445 "column of the\n Jacobian is at most %f in "
446 "absolute value" % gtol, None],
447 5: ["Number of calls to function has reached "
448 "maxfev = %d." % maxfev, ValueError],
449 6: ["ftol=%f is too small, no further reduction "
450 "in the sum of squares\n is possible." % ftol,
451 ValueError],
452 7: ["xtol=%f is too small, no further improvement in "
453 "the approximate\n solution is possible." % xtol,
454 ValueError],
455 8: ["gtol=%f is too small, func(x) is orthogonal to the "
456 "columns of\n the Jacobian to machine "
457 "precision." % gtol, ValueError]}
459 # The FORTRAN return value (possible return values are >= 0 and <= 8)
460 info = retval[-1]
462 if full_output:
463 cov_x = None
464 if info in LEASTSQ_SUCCESS:
465 perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
466 r = triu(transpose(retval[1]['fjac'])[:n, :])
467 R = dot(r, perm)
468 try:
469 cov_x = inv(dot(transpose(R), R))
470 except (LinAlgError, ValueError):
471 pass
472 return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
473 else:
474 if info in LEASTSQ_FAILURE:
475 warnings.warn(errors[info][0], RuntimeWarning)
476 elif info == 0:
477 raise errors[info][1](errors[info][0])
478 return retval[0], info
481def _wrap_func(func, xdata, ydata, transform):
482 if transform is None:
483 def func_wrapped(params):
484 return func(xdata, *params) - ydata
485 elif transform.ndim == 1:
486 def func_wrapped(params):
487 return transform * (func(xdata, *params) - ydata)
488 else:
489 # Chisq = (y - yd)^T C^{-1} (y-yd)
490 # transform = L such that C = L L^T
491 # C^{-1} = L^{-T} L^{-1}
492 # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
493 # Define (y-yd)' = L^{-1} (y-yd)
494 # by solving
495 # L (y-yd)' = (y-yd)
496 # and minimize (y-yd)'^T (y-yd)'
497 def func_wrapped(params):
498 return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
499 return func_wrapped
502def _wrap_jac(jac, xdata, transform):
503 if transform is None:
504 def jac_wrapped(params):
505 return jac(xdata, *params)
506 elif transform.ndim == 1:
507 def jac_wrapped(params):
508 return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
509 else:
510 def jac_wrapped(params):
511 return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
512 return jac_wrapped
515def _initialize_feasible(lb, ub):
516 p0 = np.ones_like(lb)
517 lb_finite = np.isfinite(lb)
518 ub_finite = np.isfinite(ub)
520 mask = lb_finite & ub_finite
521 p0[mask] = 0.5 * (lb[mask] + ub[mask])
523 mask = lb_finite & ~ub_finite
524 p0[mask] = lb[mask] + 1
526 mask = ~lb_finite & ub_finite
527 p0[mask] = ub[mask] - 1
529 return p0
532def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
533 check_finite=True, bounds=(-np.inf, np.inf), method=None,
534 jac=None, **kwargs):
535 """
536 Use non-linear least squares to fit a function, f, to data.
538 Assumes ``ydata = f(xdata, *params) + eps``.
540 Parameters
541 ----------
542 f : callable
543 The model function, f(x, ...). It must take the independent
544 variable as the first argument and the parameters to fit as
545 separate remaining arguments.
546 xdata : array_like or object
547 The independent variable where the data is measured.
548 Should usually be an M-length sequence or an (k,M)-shaped array for
549 functions with k predictors, but can actually be any object.
550 ydata : array_like
551 The dependent data, a length M array - nominally ``f(xdata, ...)``.
552 p0 : array_like, optional
553 Initial guess for the parameters (length N). If None, then the
554 initial values will all be 1 (if the number of parameters for the
555 function can be determined using introspection, otherwise a
556 ValueError is raised).
557 sigma : None or M-length sequence or MxM array, optional
558 Determines the uncertainty in `ydata`. If we define residuals as
559 ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
560 depends on its number of dimensions:
562 - A 1-D `sigma` should contain values of standard deviations of
563 errors in `ydata`. In this case, the optimized function is
564 ``chisq = sum((r / sigma) ** 2)``.
566 - A 2-D `sigma` should contain the covariance matrix of
567 errors in `ydata`. In this case, the optimized function is
568 ``chisq = r.T @ inv(sigma) @ r``.
570 .. versionadded:: 0.19
572 None (default) is equivalent of 1-D `sigma` filled with ones.
573 absolute_sigma : bool, optional
574 If True, `sigma` is used in an absolute sense and the estimated parameter
575 covariance `pcov` reflects these absolute values.
577 If False (default), only the relative magnitudes of the `sigma` values matter.
578 The returned parameter covariance matrix `pcov` is based on scaling
579 `sigma` by a constant factor. This constant is set by demanding that the
580 reduced `chisq` for the optimal parameters `popt` when using the
581 *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
582 match the sample variance of the residuals after the fit. Default is False.
583 Mathematically,
584 ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
585 check_finite : bool, optional
586 If True, check that the input arrays do not contain nans of infs,
587 and raise a ValueError if they do. Setting this parameter to
588 False may silently produce nonsensical results if the input arrays
589 do contain nans. Default is True.
590 bounds : 2-tuple of array_like, optional
591 Lower and upper bounds on parameters. Defaults to no bounds.
592 Each element of the tuple must be either an array with the length equal
593 to the number of parameters, or a scalar (in which case the bound is
594 taken to be the same for all parameters). Use ``np.inf`` with an
595 appropriate sign to disable bounds on all or some parameters.
597 .. versionadded:: 0.17
598 method : {'lm', 'trf', 'dogbox'}, optional
599 Method to use for optimization. See `least_squares` for more details.
600 Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
601 provided. The method 'lm' won't work when the number of observations
602 is less than the number of variables, use 'trf' or 'dogbox' in this
603 case.
605 .. versionadded:: 0.17
606 jac : callable, string or None, optional
607 Function with signature ``jac(x, ...)`` which computes the Jacobian
608 matrix of the model function with respect to parameters as a dense
609 array_like structure. It will be scaled according to provided `sigma`.
610 If None (default), the Jacobian will be estimated numerically.
611 String keywords for 'trf' and 'dogbox' methods can be used to select
612 a finite difference scheme, see `least_squares`.
614 .. versionadded:: 0.18
615 kwargs
616 Keyword arguments passed to `leastsq` for ``method='lm'`` or
617 `least_squares` otherwise.
619 Returns
620 -------
621 popt : array
622 Optimal values for the parameters so that the sum of the squared
623 residuals of ``f(xdata, *popt) - ydata`` is minimized.
624 pcov : 2-D array
625 The estimated covariance of popt. The diagonals provide the variance
626 of the parameter estimate. To compute one standard deviation errors
627 on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
629 How the `sigma` parameter affects the estimated covariance
630 depends on `absolute_sigma` argument, as described above.
632 If the Jacobian matrix at the solution doesn't have a full rank, then
633 'lm' method returns a matrix filled with ``np.inf``, on the other hand
634 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
635 the covariance matrix.
637 Raises
638 ------
639 ValueError
640 if either `ydata` or `xdata` contain NaNs, or if incompatible options
641 are used.
643 RuntimeError
644 if the least-squares minimization fails.
646 OptimizeWarning
647 if covariance of the parameters can not be estimated.
649 See Also
650 --------
651 least_squares : Minimize the sum of squares of nonlinear functions.
652 scipy.stats.linregress : Calculate a linear least squares regression for
653 two sets of measurements.
655 Notes
656 -----
657 With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
658 through `leastsq`. Note that this algorithm can only deal with
659 unconstrained problems.
661 Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
662 the docstring of `least_squares` for more information.
664 Examples
665 --------
666 >>> import matplotlib.pyplot as plt
667 >>> from scipy.optimize import curve_fit
669 >>> def func(x, a, b, c):
670 ... return a * np.exp(-b * x) + c
672 Define the data to be fit with some noise:
674 >>> xdata = np.linspace(0, 4, 50)
675 >>> y = func(xdata, 2.5, 1.3, 0.5)
676 >>> np.random.seed(1729)
677 >>> y_noise = 0.2 * np.random.normal(size=xdata.size)
678 >>> ydata = y + y_noise
679 >>> plt.plot(xdata, ydata, 'b-', label='data')
681 Fit for the parameters a, b, c of the function `func`:
683 >>> popt, pcov = curve_fit(func, xdata, ydata)
684 >>> popt
685 array([ 2.55423706, 1.35190947, 0.47450618])
686 >>> plt.plot(xdata, func(xdata, *popt), 'r-',
687 ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
689 Constrain the optimization to the region of ``0 <= a <= 3``,
690 ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
692 >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
693 >>> popt
694 array([ 2.43708906, 1. , 0.35015434])
695 >>> plt.plot(xdata, func(xdata, *popt), 'g--',
696 ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
698 >>> plt.xlabel('x')
699 >>> plt.ylabel('y')
700 >>> plt.legend()
701 >>> plt.show()
703 """
704 if p0 is None:
705 # determine number of parameters by inspecting the function
706 sig = _getfullargspec(f)
707 args = sig.args
708 if len(args) < 2:
709 raise ValueError("Unable to determine number of fit parameters.")
710 n = len(args) - 1
711 else:
712 p0 = np.atleast_1d(p0)
713 n = p0.size
715 lb, ub = prepare_bounds(bounds, n)
716 if p0 is None:
717 p0 = _initialize_feasible(lb, ub)
719 bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
720 if method is None:
721 if bounded_problem:
722 method = 'trf'
723 else:
724 method = 'lm'
726 if method == 'lm' and bounded_problem:
727 raise ValueError("Method 'lm' only works for unconstrained problems. "
728 "Use 'trf' or 'dogbox' instead.")
730 # optimization may produce garbage for float32 inputs, cast them to float64
732 # NaNs cannot be handled
733 if check_finite:
734 ydata = np.asarray_chkfinite(ydata, float)
735 else:
736 ydata = np.asarray(ydata, float)
738 if isinstance(xdata, (list, tuple, np.ndarray)):
739 # `xdata` is passed straight to the user-defined `f`, so allow
740 # non-array_like `xdata`.
741 if check_finite:
742 xdata = np.asarray_chkfinite(xdata, float)
743 else:
744 xdata = np.asarray(xdata, float)
746 if ydata.size == 0:
747 raise ValueError("`ydata` must not be empty!")
749 # Determine type of sigma
750 if sigma is not None:
751 sigma = np.asarray(sigma)
753 # if 1-D, sigma are errors, define transform = 1/sigma
754 if sigma.shape == (ydata.size, ):
755 transform = 1.0 / sigma
756 # if 2-D, sigma is the covariance matrix,
757 # define transform = L such that L L^T = C
758 elif sigma.shape == (ydata.size, ydata.size):
759 try:
760 # scipy.linalg.cholesky requires lower=True to return L L^T = A
761 transform = cholesky(sigma, lower=True)
762 except LinAlgError:
763 raise ValueError("`sigma` must be positive definite.")
764 else:
765 raise ValueError("`sigma` has incorrect shape.")
766 else:
767 transform = None
769 func = _wrap_func(f, xdata, ydata, transform)
770 if callable(jac):
771 jac = _wrap_jac(jac, xdata, transform)
772 elif jac is None and method != 'lm':
773 jac = '2-point'
775 if 'args' in kwargs:
776 # The specification for the model function `f` does not support
777 # additional arguments. Refer to the `curve_fit` docstring for
778 # acceptable call signatures of `f`.
779 raise ValueError("'args' is not a supported keyword argument.")
781 if method == 'lm':
782 # Remove full_output from kwargs, otherwise we're passing it in twice.
783 return_full = kwargs.pop('full_output', False)
784 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
785 popt, pcov, infodict, errmsg, ier = res
786 ysize = len(infodict['fvec'])
787 cost = np.sum(infodict['fvec'] ** 2)
788 if ier not in [1, 2, 3, 4]:
789 raise RuntimeError("Optimal parameters not found: " + errmsg)
790 else:
791 # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
792 if 'max_nfev' not in kwargs:
793 kwargs['max_nfev'] = kwargs.pop('maxfev', None)
795 res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
796 **kwargs)
798 if not res.success:
799 raise RuntimeError("Optimal parameters not found: " + res.message)
801 ysize = len(res.fun)
802 cost = 2 * res.cost # res.cost is half sum of squares!
803 popt = res.x
805 # Do Moore-Penrose inverse discarding zero singular values.
806 _, s, VT = svd(res.jac, full_matrices=False)
807 threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
808 s = s[s > threshold]
809 VT = VT[:s.size]
810 pcov = np.dot(VT.T / s**2, VT)
811 return_full = False
813 warn_cov = False
814 if pcov is None:
815 # indeterminate covariance
816 pcov = zeros((len(popt), len(popt)), dtype=float)
817 pcov.fill(inf)
818 warn_cov = True
819 elif not absolute_sigma:
820 if ysize > p0.size:
821 s_sq = cost / (ysize - p0.size)
822 pcov = pcov * s_sq
823 else:
824 pcov.fill(inf)
825 warn_cov = True
827 if warn_cov:
828 warnings.warn('Covariance of the parameters could not be estimated',
829 category=OptimizeWarning)
831 if return_full:
832 return popt, pcov, infodict, errmsg, ier
833 else:
834 return popt, pcov
837def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
838 """Perform a simple check on the gradient for correctness.
840 """
842 x = atleast_1d(x0)
843 n = len(x)
844 x = x.reshape((n,))
845 fvec = atleast_1d(fcn(x, *args))
846 m = len(fvec)
847 fvec = fvec.reshape((m,))
848 ldfjac = m
849 fjac = atleast_1d(Dfcn(x, *args))
850 fjac = fjac.reshape((m, n))
851 if col_deriv == 0:
852 fjac = transpose(fjac)
854 xp = zeros((n,), float)
855 err = zeros((m,), float)
856 fvecp = None
857 _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
859 fvecp = atleast_1d(fcn(xp, *args))
860 fvecp = fvecp.reshape((m,))
861 _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
863 good = (prod(greater(err, 0.5), axis=0))
865 return (good, err)
868def _del2(p0, p1, d):
869 return p0 - np.square(p1 - p0) / d
872def _relerr(actual, desired):
873 return (actual - desired) / desired
876def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
877 p0 = x0
878 for i in range(maxiter):
879 p1 = func(p0, *args)
880 if use_accel:
881 p2 = func(p1, *args)
882 d = p2 - 2.0 * p1 + p0
883 p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
884 else:
885 p = p1
886 relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
887 if np.all(np.abs(relerr) < xtol):
888 return p
889 p0 = p
890 msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
891 raise RuntimeError(msg)
894def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
895 """
896 Find a fixed point of the function.
898 Given a function of one or more variables and a starting point, find a
899 fixed point of the function: i.e., where ``func(x0) == x0``.
901 Parameters
902 ----------
903 func : function
904 Function to evaluate.
905 x0 : array_like
906 Fixed point of function.
907 args : tuple, optional
908 Extra arguments to `func`.
909 xtol : float, optional
910 Convergence tolerance, defaults to 1e-08.
911 maxiter : int, optional
912 Maximum number of iterations, defaults to 500.
913 method : {"del2", "iteration"}, optional
914 Method of finding the fixed-point, defaults to "del2",
915 which uses Steffensen's Method with Aitken's ``Del^2``
916 convergence acceleration [1]_. The "iteration" method simply iterates
917 the function until convergence is detected, without attempting to
918 accelerate the convergence.
920 References
921 ----------
922 .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
924 Examples
925 --------
926 >>> from scipy import optimize
927 >>> def func(x, c1, c2):
928 ... return np.sqrt(c1/(x+c2))
929 >>> c1 = np.array([10,12.])
930 >>> c2 = np.array([3, 5.])
931 >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
932 array([ 1.4920333 , 1.37228132])
934 """
935 use_accel = {'del2': True, 'iteration': False}[method]
936 x0 = _asarray_validated(x0, as_inexact=True)
937 return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)