Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/scipy/optimize/zeros.py : 11%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import warnings
2from collections import namedtuple
3import operator
4from . import _zeros
5import numpy as np
8_iter = 100
9_xtol = 2e-12
10_rtol = 4 * np.finfo(float).eps
12__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748',
13 'RootResults']
15# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h
16_ECONVERGED = 0
17_ESIGNERR = -1
18_ECONVERR = -2
19_EVALUEERR = -3
20_EINPROGRESS = 1
22CONVERGED = 'converged'
23SIGNERR = 'sign error'
24CONVERR = 'convergence error'
25VALUEERR = 'value error'
26INPROGRESS = 'No error'
29flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR,
30 _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS}
33class RootResults(object):
34 """Represents the root finding result.
36 Attributes
37 ----------
38 root : float
39 Estimated root location.
40 iterations : int
41 Number of iterations needed to find the root.
42 function_calls : int
43 Number of times the function was called.
44 converged : bool
45 True if the routine converged.
46 flag : str
47 Description of the cause of termination.
49 """
51 def __init__(self, root, iterations, function_calls, flag):
52 self.root = root
53 self.iterations = iterations
54 self.function_calls = function_calls
55 self.converged = flag == _ECONVERGED
56 self.flag = None
57 try:
58 self.flag = flag_map[flag]
59 except KeyError:
60 self.flag = 'unknown error %d' % (flag,)
62 def __repr__(self):
63 attrs = ['converged', 'flag', 'function_calls',
64 'iterations', 'root']
65 m = max(map(len, attrs)) + 1
66 return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a))
67 for a in attrs])
70def results_c(full_output, r):
71 if full_output:
72 x, funcalls, iterations, flag = r
73 results = RootResults(root=x,
74 iterations=iterations,
75 function_calls=funcalls,
76 flag=flag)
77 return x, results
78 else:
79 return r
82def _results_select(full_output, r):
83 """Select from a tuple of (root, funccalls, iterations, flag)"""
84 x, funcalls, iterations, flag = r
85 if full_output:
86 results = RootResults(root=x,
87 iterations=iterations,
88 function_calls=funcalls,
89 flag=flag)
90 return x, results
91 return x
94def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
95 fprime2=None, x1=None, rtol=0.0,
96 full_output=False, disp=True):
97 """
98 Find a zero of a real or complex function using the Newton-Raphson
99 (or secant or Halley's) method.
101 Find a zero of the function `func` given a nearby starting point `x0`.
102 The Newton-Raphson method is used if the derivative `fprime` of `func`
103 is provided, otherwise the secant method is used. If the second order
104 derivative `fprime2` of `func` is also provided, then Halley's method is
105 used.
107 If `x0` is a sequence with more than one item, then `newton` returns an
108 array, and `func` must be vectorized and return a sequence or array of the
109 same shape as its first argument. If `fprime` or `fprime2` is given, then
110 its return must also have the same shape.
112 Parameters
113 ----------
114 func : callable
115 The function whose zero is wanted. It must be a function of a
116 single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...``
117 are extra arguments that can be passed in the `args` parameter.
118 x0 : float, sequence, or ndarray
119 An initial estimate of the zero that should be somewhere near the
120 actual zero. If not scalar, then `func` must be vectorized and return
121 a sequence or array of the same shape as its first argument.
122 fprime : callable, optional
123 The derivative of the function when available and convenient. If it
124 is None (default), then the secant method is used.
125 args : tuple, optional
126 Extra arguments to be used in the function call.
127 tol : float, optional
128 The allowable error of the zero value. If `func` is complex-valued,
129 a larger `tol` is recommended as both the real and imaginary parts
130 of `x` contribute to ``|x - x0|``.
131 maxiter : int, optional
132 Maximum number of iterations.
133 fprime2 : callable, optional
134 The second order derivative of the function when available and
135 convenient. If it is None (default), then the normal Newton-Raphson
136 or the secant method is used. If it is not None, then Halley's method
137 is used.
138 x1 : float, optional
139 Another estimate of the zero that should be somewhere near the
140 actual zero. Used if `fprime` is not provided.
141 rtol : float, optional
142 Tolerance (relative) for termination.
143 full_output : bool, optional
144 If `full_output` is False (default), the root is returned.
145 If True and `x0` is scalar, the return value is ``(x, r)``, where ``x``
146 is the root and ``r`` is a `RootResults` object.
147 If True and `x0` is non-scalar, the return value is ``(x, converged,
148 zero_der)`` (see Returns section for details).
149 disp : bool, optional
150 If True, raise a RuntimeError if the algorithm didn't converge, with
151 the error message containing the number of iterations and current
152 function value. Otherwise, the convergence status is recorded in a
153 `RootResults` return object.
154 Ignored if `x0` is not scalar.
155 *Note: this has little to do with displaying, however,
156 the `disp` keyword cannot be renamed for backwards compatibility.*
158 Returns
159 -------
160 root : float, sequence, or ndarray
161 Estimated location where function is zero.
162 r : `RootResults`, optional
163 Present if ``full_output=True`` and `x0` is scalar.
164 Object containing information about the convergence. In particular,
165 ``r.converged`` is True if the routine converged.
166 converged : ndarray of bool, optional
167 Present if ``full_output=True`` and `x0` is non-scalar.
168 For vector functions, indicates which elements converged successfully.
169 zero_der : ndarray of bool, optional
170 Present if ``full_output=True`` and `x0` is non-scalar.
171 For vector functions, indicates which elements had a zero derivative.
173 See Also
174 --------
175 brentq, brenth, ridder, bisect
176 fsolve : find zeros in N dimensions.
178 Notes
179 -----
180 The convergence rate of the Newton-Raphson method is quadratic,
181 the Halley method is cubic, and the secant method is
182 sub-quadratic. This means that if the function is well-behaved
183 the actual error in the estimated zero after the nth iteration
184 is approximately the square (cube for Halley) of the error
185 after the (n-1)th step. However, the stopping criterion used
186 here is the step size and there is no guarantee that a zero
187 has been found. Consequently, the result should be verified.
188 Safer algorithms are brentq, brenth, ridder, and bisect,
189 but they all require that the root first be bracketed in an
190 interval where the function changes sign. The brentq algorithm
191 is recommended for general use in one dimensional problems
192 when such an interval has been found.
194 When `newton` is used with arrays, it is best suited for the following
195 types of problems:
197 * The initial guesses, `x0`, are all relatively the same distance from
198 the roots.
199 * Some or all of the extra arguments, `args`, are also arrays so that a
200 class of similar problems can be solved together.
201 * The size of the initial guesses, `x0`, is larger than O(100) elements.
202 Otherwise, a naive loop may perform as well or better than a vector.
204 Examples
205 --------
206 >>> from scipy import optimize
207 >>> import matplotlib.pyplot as plt
209 >>> def f(x):
210 ... return (x**3 - 1) # only one real root at x = 1
212 ``fprime`` is not provided, use the secant method:
214 >>> root = optimize.newton(f, 1.5)
215 >>> root
216 1.0000000000000016
217 >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x)
218 >>> root
219 1.0000000000000016
221 Only ``fprime`` is provided, use the Newton-Raphson method:
223 >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
224 >>> root
225 1.0
227 Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
229 >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
230 ... fprime2=lambda x: 6 * x)
231 >>> root
232 1.0
234 When we want to find zeros for a set of related starting values and/or
235 function parameters, we can provide both of those as an array of inputs:
237 >>> f = lambda x, a: x**3 - a
238 >>> fder = lambda x, a: 3 * x**2
239 >>> np.random.seed(4321)
240 >>> x = np.random.randn(100)
241 >>> a = np.arange(-50, 50)
242 >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ))
244 The above is the equivalent of solving for each value in ``(x, a)``
245 separately in a for-loop, just faster:
247 >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,))
248 ... for x0, a0 in zip(x, a)]
249 >>> np.allclose(vec_res, loop_res)
250 True
252 Plot the results found for all values of ``a``:
254 >>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
255 >>> fig = plt.figure()
256 >>> ax = fig.add_subplot(111)
257 >>> ax.plot(a, analytical_result, 'o')
258 >>> ax.plot(a, vec_res, '.')
259 >>> ax.set_xlabel('$a$')
260 >>> ax.set_ylabel('$x$ where $f(x, a)=0$')
261 >>> plt.show()
263 """
264 if tol <= 0:
265 raise ValueError("tol too small (%g <= 0)" % tol)
266 maxiter = operator.index(maxiter)
267 if maxiter < 1:
268 raise ValueError("maxiter must be greater than 0")
269 if np.size(x0) > 1:
270 return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
271 full_output)
273 # Convert to float (don't use float(x0); this works also for complex x0)
274 p0 = 1.0 * x0
275 funcalls = 0
276 if fprime is not None:
277 # Newton-Raphson method
278 for itr in range(maxiter):
279 # first evaluate fval
280 fval = func(p0, *args)
281 funcalls += 1
282 # If fval is 0, a root has been found, then terminate
283 if fval == 0:
284 return _results_select(
285 full_output, (p0, funcalls, itr, _ECONVERGED))
286 fder = fprime(p0, *args)
287 funcalls += 1
288 if fder == 0:
289 msg = "Derivative was zero."
290 if disp:
291 msg += (
292 " Failed to converge after %d iterations, value is %s."
293 % (itr + 1, p0))
294 raise RuntimeError(msg)
295 warnings.warn(msg, RuntimeWarning)
296 return _results_select(
297 full_output, (p0, funcalls, itr + 1, _ECONVERR))
298 newton_step = fval / fder
299 if fprime2:
300 fder2 = fprime2(p0, *args)
301 funcalls += 1
302 # Halley's method:
303 # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder)
304 # Only do it if denominator stays close enough to 1
305 # Rationale: If 1-adj < 0, then Halley sends x in the
306 # opposite direction to Newton. Doesn't happen if x is close
307 # enough to root.
308 adj = newton_step * fder2 / fder / 2
309 if np.abs(adj) < 1:
310 newton_step /= 1.0 - adj
311 p = p0 - newton_step
312 if np.isclose(p, p0, rtol=rtol, atol=tol):
313 return _results_select(
314 full_output, (p, funcalls, itr + 1, _ECONVERGED))
315 p0 = p
316 else:
317 # Secant method
318 if x1 is not None:
319 if x1 == x0:
320 raise ValueError("x1 and x0 must be different")
321 p1 = x1
322 else:
323 eps = 1e-4
324 p1 = x0 * (1 + eps)
325 p1 += (eps if p1 >= 0 else -eps)
326 q0 = func(p0, *args)
327 funcalls += 1
328 q1 = func(p1, *args)
329 funcalls += 1
330 if abs(q1) < abs(q0):
331 p0, p1, q0, q1 = p1, p0, q1, q0
332 for itr in range(maxiter):
333 if q1 == q0:
334 if p1 != p0:
335 msg = "Tolerance of %s reached." % (p1 - p0)
336 if disp:
337 msg += (
338 " Failed to converge after %d iterations, value is %s."
339 % (itr + 1, p1))
340 raise RuntimeError(msg)
341 warnings.warn(msg, RuntimeWarning)
342 p = (p1 + p0) / 2.0
343 return _results_select(
344 full_output, (p, funcalls, itr + 1, _ECONVERGED))
345 else:
346 if abs(q1) > abs(q0):
347 p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1)
348 else:
349 p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0)
350 if np.isclose(p, p1, rtol=rtol, atol=tol):
351 return _results_select(
352 full_output, (p, funcalls, itr + 1, _ECONVERGED))
353 p0, q0 = p1, q1
354 p1 = p
355 q1 = func(p1, *args)
356 funcalls += 1
358 if disp:
359 msg = ("Failed to converge after %d iterations, value is %s."
360 % (itr + 1, p))
361 raise RuntimeError(msg)
363 return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR))
366def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output):
367 """
368 A vectorized version of Newton, Halley, and secant methods for arrays.
370 Do not use this method directly. This method is called from `newton`
371 when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
372 """
373 # Explicitly copy `x0` as `p` will be modified inplace, but the
374 # user's array should not be altered.
375 p = np.array(x0, copy=True)
377 failures = np.ones_like(p, dtype=bool)
378 nz_der = np.ones_like(failures)
379 if fprime is not None:
380 # Newton-Raphson method
381 for iteration in range(maxiter):
382 # first evaluate fval
383 fval = np.asarray(func(p, *args))
384 # If all fval are 0, all roots have been found, then terminate
385 if not fval.any():
386 failures = fval.astype(bool)
387 break
388 fder = np.asarray(fprime(p, *args))
389 nz_der = (fder != 0)
390 # stop iterating if all derivatives are zero
391 if not nz_der.any():
392 break
393 # Newton step
394 dp = fval[nz_der] / fder[nz_der]
395 if fprime2 is not None:
396 fder2 = np.asarray(fprime2(p, *args))
397 dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])
398 # only update nonzero derivatives
399 p = np.asarray(p, dtype=np.result_type(p, dp, np.float64))
400 p[nz_der] -= dp
401 failures[nz_der] = np.abs(dp) >= tol # items not yet converged
402 # stop iterating if there aren't any failures, not incl zero der
403 if not failures[nz_der].any():
404 break
405 else:
406 # Secant method
407 dx = np.finfo(float).eps**0.33
408 p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
409 q0 = np.asarray(func(p, *args))
410 q1 = np.asarray(func(p1, *args))
411 active = np.ones_like(p, dtype=bool)
412 for iteration in range(maxiter):
413 nz_der = (q1 != q0)
414 # stop iterating if all derivatives are zero
415 if not nz_der.any():
416 p = (p1 + p) / 2.0
417 break
418 # Secant Step
419 dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
420 # only update nonzero derivatives
421 p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64))
422 p[nz_der] = p1[nz_der] - dp
423 active_zero_der = ~nz_der & active
424 p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
425 active &= nz_der # don't assign zero derivatives again
426 failures[nz_der] = np.abs(dp) >= tol # not yet converged
427 # stop iterating if there aren't any failures, not incl zero der
428 if not failures[nz_der].any():
429 break
430 p1, p = p, p1
431 q0 = q1
432 q1 = np.asarray(func(p1, *args))
434 zero_der = ~nz_der & failures # don't include converged with zero-ders
435 if zero_der.any():
436 # Secant warnings
437 if fprime is None:
438 nonzero_dp = (p1 != p)
439 # non-zero dp, but infinite newton step
440 zero_der_nz_dp = (zero_der & nonzero_dp)
441 if zero_der_nz_dp.any():
442 rms = np.sqrt(
443 sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
444 )
445 warnings.warn(
446 'RMS of {:g} reached'.format(rms), RuntimeWarning)
447 # Newton or Halley warnings
448 else:
449 all_or_some = 'all' if zero_der.all() else 'some'
450 msg = '{:s} derivatives were zero'.format(all_or_some)
451 warnings.warn(msg, RuntimeWarning)
452 elif failures.any():
453 all_or_some = 'all' if failures.all() else 'some'
454 msg = '{0:s} failed to converge after {1:d} iterations'.format(
455 all_or_some, maxiter
456 )
457 if failures.all():
458 raise RuntimeError(msg)
459 warnings.warn(msg, RuntimeWarning)
461 if full_output:
462 result = namedtuple('result', ('root', 'converged', 'zero_der'))
463 p = result(p, ~failures, zero_der)
465 return p
468def bisect(f, a, b, args=(),
469 xtol=_xtol, rtol=_rtol, maxiter=_iter,
470 full_output=False, disp=True):
471 """
472 Find root of a function within an interval using bisection.
474 Basic bisection routine to find a zero of the function `f` between the
475 arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
476 Slow but sure.
478 Parameters
479 ----------
480 f : function
481 Python function returning a number. `f` must be continuous, and
482 f(a) and f(b) must have opposite signs.
483 a : scalar
484 One end of the bracketing interval [a,b].
485 b : scalar
486 The other end of the bracketing interval [a,b].
487 xtol : number, optional
488 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
489 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
490 parameter must be nonnegative.
491 rtol : number, optional
492 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
493 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
494 parameter cannot be smaller than its default value of
495 ``4*np.finfo(float).eps``.
496 maxiter : int, optional
497 If convergence is not achieved in `maxiter` iterations, an error is
498 raised. Must be >= 0.
499 args : tuple, optional
500 Containing extra arguments for the function `f`.
501 `f` is called by ``apply(f, (x)+args)``.
502 full_output : bool, optional
503 If `full_output` is False, the root is returned. If `full_output` is
504 True, the return value is ``(x, r)``, where x is the root, and r is
505 a `RootResults` object.
506 disp : bool, optional
507 If True, raise RuntimeError if the algorithm didn't converge.
508 Otherwise, the convergence status is recorded in a `RootResults`
509 return object.
511 Returns
512 -------
513 x0 : float
514 Zero of `f` between `a` and `b`.
515 r : `RootResults` (present if ``full_output = True``)
516 Object containing information about the convergence. In particular,
517 ``r.converged`` is True if the routine converged.
519 Examples
520 --------
522 >>> def f(x):
523 ... return (x**2 - 1)
525 >>> from scipy import optimize
527 >>> root = optimize.bisect(f, 0, 2)
528 >>> root
529 1.0
531 >>> root = optimize.bisect(f, -2, 0)
532 >>> root
533 -1.0
535 See Also
536 --------
537 brentq, brenth, bisect, newton
538 fixed_point : scalar fixed-point finder
539 fsolve : n-dimensional root-finding
541 """
542 if not isinstance(args, tuple):
543 args = (args,)
544 maxiter = operator.index(maxiter)
545 if xtol <= 0:
546 raise ValueError("xtol too small (%g <= 0)" % xtol)
547 if rtol < _rtol:
548 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
549 r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
550 return results_c(full_output, r)
553def ridder(f, a, b, args=(),
554 xtol=_xtol, rtol=_rtol, maxiter=_iter,
555 full_output=False, disp=True):
556 """
557 Find a root of a function in an interval using Ridder's method.
559 Parameters
560 ----------
561 f : function
562 Python function returning a number. f must be continuous, and f(a) and
563 f(b) must have opposite signs.
564 a : scalar
565 One end of the bracketing interval [a,b].
566 b : scalar
567 The other end of the bracketing interval [a,b].
568 xtol : number, optional
569 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
570 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
571 parameter must be nonnegative.
572 rtol : number, optional
573 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
574 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
575 parameter cannot be smaller than its default value of
576 ``4*np.finfo(float).eps``.
577 maxiter : int, optional
578 If convergence is not achieved in `maxiter` iterations, an error is
579 raised. Must be >= 0.
580 args : tuple, optional
581 Containing extra arguments for the function `f`.
582 `f` is called by ``apply(f, (x)+args)``.
583 full_output : bool, optional
584 If `full_output` is False, the root is returned. If `full_output` is
585 True, the return value is ``(x, r)``, where `x` is the root, and `r` is
586 a `RootResults` object.
587 disp : bool, optional
588 If True, raise RuntimeError if the algorithm didn't converge.
589 Otherwise, the convergence status is recorded in any `RootResults`
590 return object.
592 Returns
593 -------
594 x0 : float
595 Zero of `f` between `a` and `b`.
596 r : `RootResults` (present if ``full_output = True``)
597 Object containing information about the convergence.
598 In particular, ``r.converged`` is True if the routine converged.
600 See Also
601 --------
602 brentq, brenth, bisect, newton : 1-D root-finding
603 fixed_point : scalar fixed-point finder
605 Notes
606 -----
607 Uses [Ridders1979]_ method to find a zero of the function `f` between the
608 arguments `a` and `b`. Ridders' method is faster than bisection, but not
609 generally as fast as the Brent routines. [Ridders1979]_ provides the
610 classic description and source of the algorithm. A description can also be
611 found in any recent edition of Numerical Recipes.
613 The routine used here diverges slightly from standard presentations in
614 order to be a bit more careful of tolerance.
616 References
617 ----------
618 .. [Ridders1979]
619 Ridders, C. F. J. "A New Algorithm for Computing a
620 Single Root of a Real Continuous Function."
621 IEEE Trans. Circuits Systems 26, 979-980, 1979.
623 Examples
624 --------
626 >>> def f(x):
627 ... return (x**2 - 1)
629 >>> from scipy import optimize
631 >>> root = optimize.ridder(f, 0, 2)
632 >>> root
633 1.0
635 >>> root = optimize.ridder(f, -2, 0)
636 >>> root
637 -1.0
638 """
639 if not isinstance(args, tuple):
640 args = (args,)
641 maxiter = operator.index(maxiter)
642 if xtol <= 0:
643 raise ValueError("xtol too small (%g <= 0)" % xtol)
644 if rtol < _rtol:
645 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
646 r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
647 return results_c(full_output, r)
650def brentq(f, a, b, args=(),
651 xtol=_xtol, rtol=_rtol, maxiter=_iter,
652 full_output=False, disp=True):
653 """
654 Find a root of a function in a bracketing interval using Brent's method.
656 Uses the classic Brent's method to find a zero of the function `f` on
657 the sign changing interval [a , b]. Generally considered the best of the
658 rootfinding routines here. It is a safe version of the secant method that
659 uses inverse quadratic extrapolation. Brent's method combines root
660 bracketing, interval bisection, and inverse quadratic interpolation. It is
661 sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
662 claims convergence is guaranteed for functions computable within [a,b].
664 [Brent1973]_ provides the classic description of the algorithm. Another
665 description can be found in a recent edition of Numerical Recipes, including
666 [PressEtal1992]_. A third description is at
667 http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
668 understand the algorithm just by reading our code. Our code diverges a bit
669 from standard presentations: we choose a different formula for the
670 extrapolation step.
672 Parameters
673 ----------
674 f : function
675 Python function returning a number. The function :math:`f`
676 must be continuous, and :math:`f(a)` and :math:`f(b)` must
677 have opposite signs.
678 a : scalar
679 One end of the bracketing interval :math:`[a, b]`.
680 b : scalar
681 The other end of the bracketing interval :math:`[a, b]`.
682 xtol : number, optional
683 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
684 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
685 parameter must be nonnegative. For nice functions, Brent's
686 method will often satisfy the above condition with ``xtol/2``
687 and ``rtol/2``. [Brent1973]_
688 rtol : number, optional
689 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
690 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
691 parameter cannot be smaller than its default value of
692 ``4*np.finfo(float).eps``. For nice functions, Brent's
693 method will often satisfy the above condition with ``xtol/2``
694 and ``rtol/2``. [Brent1973]_
695 maxiter : int, optional
696 If convergence is not achieved in `maxiter` iterations, an error is
697 raised. Must be >= 0.
698 args : tuple, optional
699 Containing extra arguments for the function `f`.
700 `f` is called by ``apply(f, (x)+args)``.
701 full_output : bool, optional
702 If `full_output` is False, the root is returned. If `full_output` is
703 True, the return value is ``(x, r)``, where `x` is the root, and `r` is
704 a `RootResults` object.
705 disp : bool, optional
706 If True, raise RuntimeError if the algorithm didn't converge.
707 Otherwise, the convergence status is recorded in any `RootResults`
708 return object.
710 Returns
711 -------
712 x0 : float
713 Zero of `f` between `a` and `b`.
714 r : `RootResults` (present if ``full_output = True``)
715 Object containing information about the convergence. In particular,
716 ``r.converged`` is True if the routine converged.
718 Notes
719 -----
720 `f` must be continuous. f(a) and f(b) must have opposite signs.
722 Related functions fall into several classes:
724 multivariate local optimizers
725 `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
726 nonlinear least squares minimizer
727 `leastsq`
728 constrained multivariate optimizers
729 `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
730 global optimizers
731 `basinhopping`, `brute`, `differential_evolution`
732 local scalar minimizers
733 `fminbound`, `brent`, `golden`, `bracket`
734 N-D root-finding
735 `fsolve`
736 1-D root-finding
737 `brenth`, `ridder`, `bisect`, `newton`
738 scalar fixed-point finder
739 `fixed_point`
741 References
742 ----------
743 .. [Brent1973]
744 Brent, R. P.,
745 *Algorithms for Minimization Without Derivatives*.
746 Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
748 .. [PressEtal1992]
749 Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
750 *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
751 Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
752 Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
754 Examples
755 --------
756 >>> def f(x):
757 ... return (x**2 - 1)
759 >>> from scipy import optimize
761 >>> root = optimize.brentq(f, -2, 0)
762 >>> root
763 -1.0
765 >>> root = optimize.brentq(f, 0, 2)
766 >>> root
767 1.0
768 """
769 if not isinstance(args, tuple):
770 args = (args,)
771 maxiter = operator.index(maxiter)
772 if xtol <= 0:
773 raise ValueError("xtol too small (%g <= 0)" % xtol)
774 if rtol < _rtol:
775 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
776 r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
777 return results_c(full_output, r)
780def brenth(f, a, b, args=(),
781 xtol=_xtol, rtol=_rtol, maxiter=_iter,
782 full_output=False, disp=True):
783 """Find a root of a function in a bracketing interval using Brent's
784 method with hyperbolic extrapolation.
786 A variation on the classic Brent routine to find a zero of the function f
787 between the arguments a and b that uses hyperbolic extrapolation instead of
788 inverse quadratic extrapolation. There was a paper back in the 1980's ...
789 f(a) and f(b) cannot have the same signs. Generally, on a par with the
790 brent routine, but not as heavily tested. It is a safe version of the
791 secant method that uses hyperbolic extrapolation. The version here is by
792 Chuck Harris.
794 Parameters
795 ----------
796 f : function
797 Python function returning a number. f must be continuous, and f(a) and
798 f(b) must have opposite signs.
799 a : scalar
800 One end of the bracketing interval [a,b].
801 b : scalar
802 The other end of the bracketing interval [a,b].
803 xtol : number, optional
804 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
805 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
806 parameter must be nonnegative. As with `brentq`, for nice
807 functions the method will often satisfy the above condition
808 with ``xtol/2`` and ``rtol/2``.
809 rtol : number, optional
810 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
811 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
812 parameter cannot be smaller than its default value of
813 ``4*np.finfo(float).eps``. As with `brentq`, for nice functions
814 the method will often satisfy the above condition with
815 ``xtol/2`` and ``rtol/2``.
816 maxiter : int, optional
817 If convergence is not achieved in `maxiter` iterations, an error is
818 raised. Must be >= 0.
819 args : tuple, optional
820 Containing extra arguments for the function `f`.
821 `f` is called by ``apply(f, (x)+args)``.
822 full_output : bool, optional
823 If `full_output` is False, the root is returned. If `full_output` is
824 True, the return value is ``(x, r)``, where `x` is the root, and `r` is
825 a `RootResults` object.
826 disp : bool, optional
827 If True, raise RuntimeError if the algorithm didn't converge.
828 Otherwise, the convergence status is recorded in any `RootResults`
829 return object.
831 Returns
832 -------
833 x0 : float
834 Zero of `f` between `a` and `b`.
835 r : `RootResults` (present if ``full_output = True``)
836 Object containing information about the convergence. In particular,
837 ``r.converged`` is True if the routine converged.
839 Examples
840 --------
841 >>> def f(x):
842 ... return (x**2 - 1)
844 >>> from scipy import optimize
846 >>> root = optimize.brenth(f, -2, 0)
847 >>> root
848 -1.0
850 >>> root = optimize.brenth(f, 0, 2)
851 >>> root
852 1.0
854 See Also
855 --------
856 fmin, fmin_powell, fmin_cg,
857 fmin_bfgs, fmin_ncg : multivariate local optimizers
859 leastsq : nonlinear least squares minimizer
861 fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
863 basinhopping, differential_evolution, brute : global optimizers
865 fminbound, brent, golden, bracket : local scalar minimizers
867 fsolve : N-D root-finding
869 brentq, brenth, ridder, bisect, newton : 1-D root-finding
871 fixed_point : scalar fixed-point finder
873 """
874 if not isinstance(args, tuple):
875 args = (args,)
876 maxiter = operator.index(maxiter)
877 if xtol <= 0:
878 raise ValueError("xtol too small (%g <= 0)" % xtol)
879 if rtol < _rtol:
880 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
881 r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
882 return results_c(full_output, r)
885################################
886# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by
887# Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
888# See [1]
891def _within_tolerance(x, y, rtol, atol):
892 diff = np.abs(x - y)
893 z = np.abs(y)
894 result = (diff <= (atol + rtol * z))
895 return result
898def _notclose(fs, rtol=_rtol, atol=_xtol):
899 # Ensure not None, not 0, all finite, and not very close to each other
900 notclosefvals = (
901 all(fs) and all(np.isfinite(fs)) and
902 not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol))
903 for i, _f in enumerate(fs[:-1])))
904 return notclosefvals
907def _secant(xvals, fvals):
908 """Perform a secant step, taking a little care"""
909 # Secant has many "mathematically" equivalent formulations
910 # x2 = x0 - (x1 - x0)/(f1 - f0) * f0
911 # = x1 - (x1 - x0)/(f1 - f0) * f1
912 # = (-x1 * f0 + x0 * f1) / (f1 - f0)
913 # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
914 # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
915 x0, x1 = xvals[:2]
916 f0, f1 = fvals[:2]
917 if f0 == f1:
918 return np.nan
919 if np.abs(f1) > np.abs(f0):
920 x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
921 else:
922 x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
923 return x2
926def _update_bracket(ab, fab, c, fc):
927 """Update a bracket given (c, fc), return the discarded endpoints."""
928 fa, fb = fab
929 idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1)
930 rx, rfx = ab[idx], fab[idx]
931 fab[idx] = fc
932 ab[idx] = c
933 return rx, rfx
936def _compute_divided_differences(xvals, fvals, N=None, full=True,
937 forward=True):
938 """Return a matrix of divided differences for the xvals, fvals pairs
940 DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i
942 If full is False, just return the main diagonal(or last row):
943 f[a], f[a, b] and f[a, b, c].
944 If forward is False, return f[c], f[b, c], f[a, b, c]."""
945 if full:
946 if forward:
947 xvals = np.asarray(xvals)
948 else:
949 xvals = np.array(xvals)[::-1]
950 M = len(xvals)
951 N = M if N is None else min(N, M)
952 DD = np.zeros([M, N])
953 DD[:, 0] = fvals[:]
954 for i in range(1, N):
955 DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) /
956 (xvals[i:] - xvals[:M - i]))
957 return DD
959 xvals = np.asarray(xvals)
960 dd = np.array(fvals)
961 row = np.array(fvals)
962 idx2Use = (0 if forward else -1)
963 dd[0] = fvals[idx2Use]
964 for i in range(1, len(xvals)):
965 denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
966 row = np.diff(row)[:] / denom
967 dd[i] = row[idx2Use]
968 return dd
971def _interpolated_poly(xvals, fvals, x):
972 """Compute p(x) for the polynomial passing through the specified locations.
974 Use Neville's algorithm to compute p(x) where p is the minimal degree
975 polynomial passing through the points xvals, fvals"""
976 xvals = np.asarray(xvals)
977 N = len(xvals)
978 Q = np.zeros([N, N])
979 D = np.zeros([N, N])
980 Q[:, 0] = fvals[:]
981 D[:, 0] = fvals[:]
982 for k in range(1, N):
983 alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]
984 diffik = xvals[0:N - k] - xvals[k:N]
985 Q[k:, k] = (xvals[k:] - x) / diffik * alpha
986 D[k:, k] = (xvals[:N - k] - x) / diffik * alpha
987 # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root
988 return np.sum(Q[-1, 1:]) + Q[-1, 0]
991def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):
992 """Inverse cubic interpolation f-values -> x-values
994 Given four points (fa, a), (fb, b), (fc, c), (fd, d) with
995 fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points
996 and compute x=IP(0).
997 """
998 return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)
1001def _newton_quadratic(ab, fab, d, fd, k):
1002 """Apply Newton-Raphson like steps, using divided differences to approximate f'
1004 ab is a real interval [a, b] containing a root,
1005 fab holds the real values of f(a), f(b)
1006 d is a real number outside [ab, b]
1007 k is the number of steps to apply
1008 """
1009 a, b = ab
1010 fa, fb = fab
1011 _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd],
1012 forward=True, full=False)
1014 # _P is the quadratic polynomial through the 3 points
1015 def _P(x):
1016 # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b)
1017 return (A * (x - b) + B) * (x - a) + fa
1019 if A == 0:
1020 r = a - fa / B
1021 else:
1022 r = (a if np.sign(A) * np.sign(fa) > 0 else b)
1023 # Apply k Newton-Raphson steps to _P(x), starting from x=r
1024 for i in range(k):
1025 r1 = r - _P(r) / (B + A * (2 * r - a - b))
1026 if not (ab[0] < r1 < ab[1]):
1027 if (ab[0] < r < ab[1]):
1028 return r
1029 r = sum(ab) / 2.0
1030 break
1031 r = r1
1033 return r
1036class TOMS748Solver(object):
1037 """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi.
1038 """
1039 _MU = 0.5
1040 _K_MIN = 1
1041 _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3.
1043 def __init__(self):
1044 self.f = None
1045 self.args = None
1046 self.function_calls = 0
1047 self.iterations = 0
1048 self.k = 2
1049 # ab=[a,b] is a global interval containing a root
1050 self.ab = [np.nan, np.nan]
1051 # fab is function values at a, b
1052 self.fab = [np.nan, np.nan]
1053 self.d = None
1054 self.fd = None
1055 self.e = None
1056 self.fe = None
1057 self.disp = False
1058 self.xtol = _xtol
1059 self.rtol = _rtol
1060 self.maxiter = _iter
1062 def configure(self, xtol, rtol, maxiter, disp, k):
1063 self.disp = disp
1064 self.xtol = xtol
1065 self.rtol = rtol
1066 self.maxiter = maxiter
1067 # Silently replace a low value of k with 1
1068 self.k = max(k, self._K_MIN)
1069 # Noisily replace a high value of k with self._K_MAX
1070 if self.k > self._K_MAX:
1071 msg = "toms748: Overriding k: ->%d" % self._K_MAX
1072 warnings.warn(msg, RuntimeWarning)
1073 self.k = self._K_MAX
1075 def _callf(self, x, error=True):
1076 """Call the user-supplied function, update book-keeping"""
1077 fx = self.f(x, *self.args)
1078 self.function_calls += 1
1079 if not np.isfinite(fx) and error:
1080 raise ValueError("Invalid function value: f(%f) -> %s " % (x, fx))
1081 return fx
1083 def get_result(self, x, flag=_ECONVERGED):
1084 r"""Package the result and statistics into a tuple."""
1085 return (x, self.function_calls, self.iterations, flag)
1087 def _update_bracket(self, c, fc):
1088 return _update_bracket(self.ab, self.fab, c, fc)
1090 def start(self, f, a, b, args=()):
1091 r"""Prepare for the iterations."""
1092 self.function_calls = 0
1093 self.iterations = 0
1095 self.f = f
1096 self.args = args
1097 self.ab[:] = [a, b]
1098 if not np.isfinite(a) or np.imag(a) != 0:
1099 raise ValueError("Invalid x value: %s " % (a))
1100 if not np.isfinite(b) or np.imag(b) != 0:
1101 raise ValueError("Invalid x value: %s " % (b))
1103 fa = self._callf(a)
1104 if not np.isfinite(fa) or np.imag(fa) != 0:
1105 raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa))
1106 if fa == 0:
1107 return _ECONVERGED, a
1108 fb = self._callf(b)
1109 if not np.isfinite(fb) or np.imag(fb) != 0:
1110 raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb))
1111 if fb == 0:
1112 return _ECONVERGED, b
1114 if np.sign(fb) * np.sign(fa) > 0:
1115 raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " %
1116 (a, fa, b, fb))
1117 self.fab[:] = [fa, fb]
1119 return _EINPROGRESS, sum(self.ab) / 2.0
1121 def get_status(self):
1122 """Determine the current status."""
1123 a, b = self.ab[:2]
1124 if _within_tolerance(a, b, self.rtol, self.xtol):
1125 return _ECONVERGED, sum(self.ab) / 2.0
1126 if self.iterations >= self.maxiter:
1127 return _ECONVERR, sum(self.ab) / 2.0
1128 return _EINPROGRESS, sum(self.ab) / 2.0
1130 def iterate(self):
1131 """Perform one step in the algorithm.
1133 Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]
1134 """
1135 self.iterations += 1
1136 eps = np.finfo(float).eps
1137 d, fd, e, fe = self.d, self.fd, self.e, self.fe
1138 ab_width = self.ab[1] - self.ab[0] # Need the start width below
1139 c = None
1141 for nsteps in range(2, self.k+2):
1142 # If the f-values are sufficiently separated, perform an inverse
1143 # polynomial interpolation step. Otherwise, nsteps repeats of
1144 # an approximate Newton-Raphson step.
1145 if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):
1146 c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,
1147 self.fab[0], self.fab[1], fd, fe)
1148 if self.ab[0] < c0 < self.ab[1]:
1149 c = c0
1150 if c is None:
1151 c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)
1153 fc = self._callf(c)
1154 if fc == 0:
1155 return _ECONVERGED, c
1157 # re-bracket
1158 e, fe = d, fd
1159 d, fd = self._update_bracket(c, fc)
1161 # u is the endpoint with the smallest f-value
1162 uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)
1163 u, fu = self.ab[uix], self.fab[uix]
1165 _, A = _compute_divided_differences(self.ab, self.fab,
1166 forward=(uix == 0), full=False)
1167 c = u - 2 * fu / A
1168 if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):
1169 c = sum(self.ab) / 2.0
1170 else:
1171 if np.isclose(c, u, rtol=eps, atol=0):
1172 # c didn't change (much).
1173 # Either because the f-values at the endpoints have vastly
1174 # differing magnitudes, or because the root is very close to
1175 # that endpoint
1176 frs = np.frexp(self.fab)[1]
1177 if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50
1178 c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32
1179 else:
1180 # Make a bigger adjustment, about the
1181 # size of the requested tolerance.
1182 mm = (1 if uix == 0 else -1)
1183 adj = mm * np.abs(c) * self.rtol + mm * self.xtol
1184 c = u + adj
1185 if not self.ab[0] < c < self.ab[1]:
1186 c = sum(self.ab) / 2.0
1188 fc = self._callf(c)
1189 if fc == 0:
1190 return _ECONVERGED, c
1192 e, fe = d, fd
1193 d, fd = self._update_bracket(c, fc)
1195 # If the width of the new interval did not decrease enough, bisect
1196 if self.ab[1] - self.ab[0] > self._MU * ab_width:
1197 e, fe = d, fd
1198 z = sum(self.ab) / 2.0
1199 fz = self._callf(z)
1200 if fz == 0:
1201 return _ECONVERGED, z
1202 d, fd = self._update_bracket(z, fz)
1204 # Record d and e for next iteration
1205 self.d, self.fd = d, fd
1206 self.e, self.fe = e, fe
1208 status, xn = self.get_status()
1209 return status, xn
1211 def solve(self, f, a, b, args=(),
1212 xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True):
1213 r"""Solve f(x) = 0 given an interval containing a zero."""
1214 self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k)
1215 status, xn = self.start(f, a, b, args)
1216 if status == _ECONVERGED:
1217 return self.get_result(xn)
1219 # The first step only has two x-values.
1220 c = _secant(self.ab, self.fab)
1221 if not self.ab[0] < c < self.ab[1]:
1222 c = sum(self.ab) / 2.0
1223 fc = self._callf(c)
1224 if fc == 0:
1225 return self.get_result(c)
1227 self.d, self.fd = self._update_bracket(c, fc)
1228 self.e, self.fe = None, None
1229 self.iterations += 1
1231 while True:
1232 status, xn = self.iterate()
1233 if status == _ECONVERGED:
1234 return self.get_result(xn)
1235 if status == _ECONVERR:
1236 fmt = "Failed to converge after %d iterations, bracket is %s"
1237 if disp:
1238 msg = fmt % (self.iterations + 1, self.ab)
1239 raise RuntimeError(msg)
1240 return self.get_result(xn, _ECONVERR)
1243def toms748(f, a, b, args=(), k=1,
1244 xtol=_xtol, rtol=_rtol, maxiter=_iter,
1245 full_output=False, disp=True):
1246 """
1247 Find a zero using TOMS Algorithm 748 method.
1249 Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a
1250 zero of the function `f` on the interval `[a , b]`, where `f(a)` and
1251 `f(b)` must have opposite signs.
1253 It uses a mixture of inverse cubic interpolation and
1254 "Newton-quadratic" steps. [APS1995].
1256 Parameters
1257 ----------
1258 f : function
1259 Python function returning a scalar. The function :math:`f`
1260 must be continuous, and :math:`f(a)` and :math:`f(b)`
1261 have opposite signs.
1262 a : scalar,
1263 lower boundary of the search interval
1264 b : scalar,
1265 upper boundary of the search interval
1266 args : tuple, optional
1267 containing extra arguments for the function `f`.
1268 `f` is called by ``f(x, *args)``.
1269 k : int, optional
1270 The number of Newton quadratic steps to perform each
1271 iteration. ``k>=1``.
1272 xtol : scalar, optional
1273 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
1274 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
1275 parameter must be nonnegative.
1276 rtol : scalar, optional
1277 The computed root ``x0`` will satisfy ``np.allclose(x, x0,
1278 atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
1279 maxiter : int, optional
1280 If convergence is not achieved in `maxiter` iterations, an error is
1281 raised. Must be >= 0.
1282 full_output : bool, optional
1283 If `full_output` is False, the root is returned. If `full_output` is
1284 True, the return value is ``(x, r)``, where `x` is the root, and `r` is
1285 a `RootResults` object.
1286 disp : bool, optional
1287 If True, raise RuntimeError if the algorithm didn't converge.
1288 Otherwise, the convergence status is recorded in the `RootResults`
1289 return object.
1291 Returns
1292 -------
1293 x0 : float
1294 Approximate Zero of `f`
1295 r : `RootResults` (present if ``full_output = True``)
1296 Object containing information about the convergence. In particular,
1297 ``r.converged`` is True if the routine converged.
1299 See Also
1300 --------
1301 brentq, brenth, ridder, bisect, newton
1302 fsolve : find zeroes in N dimensions.
1304 Notes
1305 -----
1306 `f` must be continuous.
1307 Algorithm 748 with ``k=2`` is asymptotically the most efficient
1308 algorithm known for finding roots of a four times continuously
1309 differentiable function.
1310 In contrast with Brent's algorithm, which may only decrease the length of
1311 the enclosing bracket on the last step, Algorithm 748 decreases it each
1312 iteration with the same asymptotic efficiency as it finds the root.
1314 For easy statement of efficiency indices, assume that `f` has 4
1315 continuouous deriviatives.
1316 For ``k=1``, the convergence order is at least 2.7, and with about
1317 asymptotically 2 function evaluations per iteration, the efficiency
1318 index is approximately 1.65.
1319 For ``k=2``, the order is about 4.6 with asymptotically 3 function
1320 evaluations per iteration, and the efficiency index 1.66.
1321 For higher values of `k`, the efficiency index approaches
1322 the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are
1323 usually appropriate.
1325 References
1326 ----------
1327 .. [APS1995]
1328 Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
1329 *Algorithm 748: Enclosing Zeros of Continuous Functions*,
1330 ACM Trans. Math. Softw. Volume 221(1995)
1331 doi = {10.1145/210089.210111}
1333 Examples
1334 --------
1335 >>> def f(x):
1336 ... return (x**3 - 1) # only one real root at x = 1
1338 >>> from scipy import optimize
1339 >>> root, results = optimize.toms748(f, 0, 2, full_output=True)
1340 >>> root
1341 1.0
1342 >>> results
1343 converged: True
1344 flag: 'converged'
1345 function_calls: 11
1346 iterations: 5
1347 root: 1.0
1348 """
1349 if xtol <= 0:
1350 raise ValueError("xtol too small (%g <= 0)" % xtol)
1351 if rtol < _rtol / 4:
1352 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
1353 maxiter = operator.index(maxiter)
1354 if maxiter < 1:
1355 raise ValueError("maxiter must be greater than 0")
1356 if not np.isfinite(a):
1357 raise ValueError("a is not finite %s" % a)
1358 if not np.isfinite(b):
1359 raise ValueError("b is not finite %s" % b)
1360 if a >= b:
1361 raise ValueError("a and b are not an interval [{}, {}]".format(a, b))
1362 if not k >= 1:
1363 raise ValueError("k too small (%s < 1)" % k)
1365 if not isinstance(args, tuple):
1366 args = (args,)
1367 solver = TOMS748Solver()
1368 result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol,
1369 maxiter=maxiter, disp=disp)
1370 x, function_calls, iterations, flag = result
1371 return _results_select(full_output, (x, function_calls, iterations, flag))