Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/optimize/_zeros_py.py: 11%

464 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1import warnings 

2from collections import namedtuple 

3import operator 

4from . import _zeros 

5import numpy as np 

6 

7 

8_iter = 100 

9_xtol = 2e-12 

10_rtol = 4 * np.finfo(float).eps 

11 

12__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748', 

13 'RootResults'] 

14 

15# Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h 

16_ECONVERGED = 0 

17_ESIGNERR = -1 

18_ECONVERR = -2 

19_EVALUEERR = -3 

20_EINPROGRESS = 1 

21 

22CONVERGED = 'converged' 

23SIGNERR = 'sign error' 

24CONVERR = 'convergence error' 

25VALUEERR = 'value error' 

26INPROGRESS = 'No error' 

27 

28 

29flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR, 

30 _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS} 

31 

32 

33class RootResults: 

34 """Represents the root finding result. 

35 

36 Attributes 

37 ---------- 

38 root : float 

39 Estimated root location. 

40 iterations : int 

41 Number of iterations needed to find the root. 

42 function_calls : int 

43 Number of times the function was called. 

44 converged : bool 

45 True if the routine converged. 

46 flag : str 

47 Description of the cause of termination. 

48 

49 """ 

50 

51 def __init__(self, root, iterations, function_calls, flag): 

52 self.root = root 

53 self.iterations = iterations 

54 self.function_calls = function_calls 

55 self.converged = flag == _ECONVERGED 

56 self.flag = None 

57 try: 

58 self.flag = flag_map[flag] 

59 except KeyError: 

60 self.flag = 'unknown error %d' % (flag,) 

61 

62 def __repr__(self): 

63 attrs = ['converged', 'flag', 'function_calls', 

64 'iterations', 'root'] 

65 m = max(map(len, attrs)) + 1 

66 return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a)) 

67 for a in attrs]) 

68 

69 

70def results_c(full_output, r): 

71 if full_output: 

72 x, funcalls, iterations, flag = r 

73 results = RootResults(root=x, 

74 iterations=iterations, 

75 function_calls=funcalls, 

76 flag=flag) 

77 return x, results 

78 else: 

79 return r 

80 

81 

82def _results_select(full_output, r): 

83 """Select from a tuple of (root, funccalls, iterations, flag)""" 

84 x, funcalls, iterations, flag = r 

85 if full_output: 

86 results = RootResults(root=x, 

87 iterations=iterations, 

88 function_calls=funcalls, 

89 flag=flag) 

90 return x, results 

91 return x 

92 

93 

94def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, 

95 fprime2=None, x1=None, rtol=0.0, 

96 full_output=False, disp=True): 

97 """ 

98 Find a zero of a real or complex function using the Newton-Raphson 

99 (or secant or Halley's) method. 

100 

101 Find a zero of the scalar-valued function `func` given a nearby scalar 

102 starting point `x0`. 

103 The Newton-Raphson method is used if the derivative `fprime` of `func` 

104 is provided, otherwise the secant method is used. If the second order 

105 derivative `fprime2` of `func` is also provided, then Halley's method is 

106 used. 

107 

108 If `x0` is a sequence with more than one item, `newton` returns an array: 

109 the zeros of the function from each (scalar) starting point in `x0`. 

110 In this case, `func` must be vectorized to return a sequence or array of 

111 the same shape as its first argument. If `fprime` (`fprime2`) is given, 

112 then its return must also have the same shape: each element is the first 

113 (second) derivative of `func` with respect to its only variable evaluated 

114 at each element of its first argument. 

115 

116 `newton` is for finding roots of a scalar-valued functions of a single 

117 variable. For problems involving several variables, see `root`. 

118 

119 Parameters 

120 ---------- 

121 func : callable 

122 The function whose zero is wanted. It must be a function of a 

123 single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...`` 

124 are extra arguments that can be passed in the `args` parameter. 

125 x0 : float, sequence, or ndarray 

126 An initial estimate of the zero that should be somewhere near the 

127 actual zero. If not scalar, then `func` must be vectorized and return 

128 a sequence or array of the same shape as its first argument. 

129 fprime : callable, optional 

130 The derivative of the function when available and convenient. If it 

131 is None (default), then the secant method is used. 

132 args : tuple, optional 

133 Extra arguments to be used in the function call. 

134 tol : float, optional 

135 The allowable error of the zero value. If `func` is complex-valued, 

136 a larger `tol` is recommended as both the real and imaginary parts 

137 of `x` contribute to ``|x - x0|``. 

138 maxiter : int, optional 

139 Maximum number of iterations. 

140 fprime2 : callable, optional 

141 The second order derivative of the function when available and 

142 convenient. If it is None (default), then the normal Newton-Raphson 

143 or the secant method is used. If it is not None, then Halley's method 

144 is used. 

145 x1 : float, optional 

146 Another estimate of the zero that should be somewhere near the 

147 actual zero. Used if `fprime` is not provided. 

148 rtol : float, optional 

149 Tolerance (relative) for termination. 

150 full_output : bool, optional 

151 If `full_output` is False (default), the root is returned. 

152 If True and `x0` is scalar, the return value is ``(x, r)``, where ``x`` 

153 is the root and ``r`` is a `RootResults` object. 

154 If True and `x0` is non-scalar, the return value is ``(x, converged, 

155 zero_der)`` (see Returns section for details). 

156 disp : bool, optional 

157 If True, raise a RuntimeError if the algorithm didn't converge, with 

158 the error message containing the number of iterations and current 

159 function value. Otherwise, the convergence status is recorded in a 

160 `RootResults` return object. 

161 Ignored if `x0` is not scalar. 

162 *Note: this has little to do with displaying, however, 

163 the `disp` keyword cannot be renamed for backwards compatibility.* 

164 

165 Returns 

166 ------- 

167 root : float, sequence, or ndarray 

168 Estimated location where function is zero. 

169 r : `RootResults`, optional 

170 Present if ``full_output=True`` and `x0` is scalar. 

171 Object containing information about the convergence. In particular, 

172 ``r.converged`` is True if the routine converged. 

173 converged : ndarray of bool, optional 

174 Present if ``full_output=True`` and `x0` is non-scalar. 

175 For vector functions, indicates which elements converged successfully. 

176 zero_der : ndarray of bool, optional 

177 Present if ``full_output=True`` and `x0` is non-scalar. 

178 For vector functions, indicates which elements had a zero derivative. 

179 

180 See Also 

181 -------- 

182 root_scalar : interface to root solvers for scalar functions 

183 root : interface to root solvers for multi-input, multi-output functions 

184 

185 Notes 

186 ----- 

187 The convergence rate of the Newton-Raphson method is quadratic, 

188 the Halley method is cubic, and the secant method is 

189 sub-quadratic. This means that if the function is well-behaved 

190 the actual error in the estimated zero after the nth iteration 

191 is approximately the square (cube for Halley) of the error 

192 after the (n-1)th step. However, the stopping criterion used 

193 here is the step size and there is no guarantee that a zero 

194 has been found. Consequently, the result should be verified. 

195 Safer algorithms are brentq, brenth, ridder, and bisect, 

196 but they all require that the root first be bracketed in an 

197 interval where the function changes sign. The brentq algorithm 

198 is recommended for general use in one dimensional problems 

199 when such an interval has been found. 

200 

201 When `newton` is used with arrays, it is best suited for the following 

202 types of problems: 

203 

204 * The initial guesses, `x0`, are all relatively the same distance from 

205 the roots. 

206 * Some or all of the extra arguments, `args`, are also arrays so that a 

207 class of similar problems can be solved together. 

208 * The size of the initial guesses, `x0`, is larger than O(100) elements. 

209 Otherwise, a naive loop may perform as well or better than a vector. 

210 

211 Examples 

212 -------- 

213 >>> import numpy as np 

214 >>> import matplotlib.pyplot as plt 

215 >>> from scipy import optimize 

216 

217 >>> def f(x): 

218 ... return (x**3 - 1) # only one real root at x = 1 

219 

220 ``fprime`` is not provided, use the secant method: 

221 

222 >>> root = optimize.newton(f, 1.5) 

223 >>> root 

224 1.0000000000000016 

225 >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x) 

226 >>> root 

227 1.0000000000000016 

228 

229 Only ``fprime`` is provided, use the Newton-Raphson method: 

230 

231 >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2) 

232 >>> root 

233 1.0 

234 

235 Both ``fprime2`` and ``fprime`` are provided, use Halley's method: 

236 

237 >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2, 

238 ... fprime2=lambda x: 6 * x) 

239 >>> root 

240 1.0 

241 

242 When we want to find zeros for a set of related starting values and/or 

243 function parameters, we can provide both of those as an array of inputs: 

244 

245 >>> f = lambda x, a: x**3 - a 

246 >>> fder = lambda x, a: 3 * x**2 

247 >>> rng = np.random.default_rng() 

248 >>> x = rng.standard_normal(100) 

249 >>> a = np.arange(-50, 50) 

250 >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200) 

251 

252 The above is the equivalent of solving for each value in ``(x, a)`` 

253 separately in a for-loop, just faster: 

254 

255 >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,), 

256 ... maxiter=200) 

257 ... for x0, a0 in zip(x, a)] 

258 >>> np.allclose(vec_res, loop_res) 

259 True 

260 

261 Plot the results found for all values of ``a``: 

262 

263 >>> analytical_result = np.sign(a) * np.abs(a)**(1/3) 

264 >>> fig, ax = plt.subplots() 

265 >>> ax.plot(a, analytical_result, 'o') 

266 >>> ax.plot(a, vec_res, '.') 

267 >>> ax.set_xlabel('$a$') 

268 >>> ax.set_ylabel('$x$ where $f(x, a)=0$') 

269 >>> plt.show() 

270 

271 """ 

272 if tol <= 0: 

273 raise ValueError("tol too small (%g <= 0)" % tol) 

274 maxiter = operator.index(maxiter) 

275 if maxiter < 1: 

276 raise ValueError("maxiter must be greater than 0") 

277 if np.size(x0) > 1: 

278 return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, 

279 full_output) 

280 

281 # Convert to float (don't use float(x0); this works also for complex x0) 

282 p0 = 1.0 * x0 

283 funcalls = 0 

284 if fprime is not None: 

285 # Newton-Raphson method 

286 for itr in range(maxiter): 

287 # first evaluate fval 

288 fval = func(p0, *args) 

289 funcalls += 1 

290 # If fval is 0, a root has been found, then terminate 

291 if fval == 0: 

292 return _results_select( 

293 full_output, (p0, funcalls, itr, _ECONVERGED)) 

294 fder = fprime(p0, *args) 

295 funcalls += 1 

296 if fder == 0: 

297 msg = "Derivative was zero." 

298 if disp: 

299 msg += ( 

300 " Failed to converge after %d iterations, value is %s." 

301 % (itr + 1, p0)) 

302 raise RuntimeError(msg) 

303 warnings.warn(msg, RuntimeWarning) 

304 return _results_select( 

305 full_output, (p0, funcalls, itr + 1, _ECONVERR)) 

306 newton_step = fval / fder 

307 if fprime2: 

308 fder2 = fprime2(p0, *args) 

309 funcalls += 1 

310 # Halley's method: 

311 # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder) 

312 # Only do it if denominator stays close enough to 1 

313 # Rationale: If 1-adj < 0, then Halley sends x in the 

314 # opposite direction to Newton. Doesn't happen if x is close 

315 # enough to root. 

316 adj = newton_step * fder2 / fder / 2 

317 if np.abs(adj) < 1: 

318 newton_step /= 1.0 - adj 

319 p = p0 - newton_step 

320 if np.isclose(p, p0, rtol=rtol, atol=tol): 

321 return _results_select( 

322 full_output, (p, funcalls, itr + 1, _ECONVERGED)) 

323 p0 = p 

324 else: 

325 # Secant method 

326 if x1 is not None: 

327 if x1 == x0: 

328 raise ValueError("x1 and x0 must be different") 

329 p1 = x1 

330 else: 

331 eps = 1e-4 

332 p1 = x0 * (1 + eps) 

333 p1 += (eps if p1 >= 0 else -eps) 

334 q0 = func(p0, *args) 

335 funcalls += 1 

336 q1 = func(p1, *args) 

337 funcalls += 1 

338 if abs(q1) < abs(q0): 

339 p0, p1, q0, q1 = p1, p0, q1, q0 

340 for itr in range(maxiter): 

341 if q1 == q0: 

342 if p1 != p0: 

343 msg = "Tolerance of %s reached." % (p1 - p0) 

344 if disp: 

345 msg += ( 

346 " Failed to converge after %d iterations, value is %s." 

347 % (itr + 1, p1)) 

348 raise RuntimeError(msg) 

349 warnings.warn(msg, RuntimeWarning) 

350 p = (p1 + p0) / 2.0 

351 return _results_select( 

352 full_output, (p, funcalls, itr + 1, _ECONVERGED)) 

353 else: 

354 if abs(q1) > abs(q0): 

355 p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1) 

356 else: 

357 p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0) 

358 if np.isclose(p, p1, rtol=rtol, atol=tol): 

359 return _results_select( 

360 full_output, (p, funcalls, itr + 1, _ECONVERGED)) 

361 p0, q0 = p1, q1 

362 p1 = p 

363 q1 = func(p1, *args) 

364 funcalls += 1 

365 

366 if disp: 

367 msg = ("Failed to converge after %d iterations, value is %s." 

368 % (itr + 1, p)) 

369 raise RuntimeError(msg) 

370 

371 return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR)) 

372 

373 

374def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output): 

375 """ 

376 A vectorized version of Newton, Halley, and secant methods for arrays. 

377 

378 Do not use this method directly. This method is called from `newton` 

379 when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`. 

380 """ 

381 # Explicitly copy `x0` as `p` will be modified inplace, but the 

382 # user's array should not be altered. 

383 p = np.array(x0, copy=True) 

384 

385 failures = np.ones_like(p, dtype=bool) 

386 nz_der = np.ones_like(failures) 

387 if fprime is not None: 

388 # Newton-Raphson method 

389 for iteration in range(maxiter): 

390 # first evaluate fval 

391 fval = np.asarray(func(p, *args)) 

392 # If all fval are 0, all roots have been found, then terminate 

393 if not fval.any(): 

394 failures = fval.astype(bool) 

395 break 

396 fder = np.asarray(fprime(p, *args)) 

397 nz_der = (fder != 0) 

398 # stop iterating if all derivatives are zero 

399 if not nz_der.any(): 

400 break 

401 # Newton step 

402 dp = fval[nz_der] / fder[nz_der] 

403 if fprime2 is not None: 

404 fder2 = np.asarray(fprime2(p, *args)) 

405 dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der]) 

406 # only update nonzero derivatives 

407 p = np.asarray(p, dtype=np.result_type(p, dp, np.float64)) 

408 p[nz_der] -= dp 

409 failures[nz_der] = np.abs(dp) >= tol # items not yet converged 

410 # stop iterating if there aren't any failures, not incl zero der 

411 if not failures[nz_der].any(): 

412 break 

413 else: 

414 # Secant method 

415 dx = np.finfo(float).eps**0.33 

416 p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx) 

417 q0 = np.asarray(func(p, *args)) 

418 q1 = np.asarray(func(p1, *args)) 

419 active = np.ones_like(p, dtype=bool) 

420 for iteration in range(maxiter): 

421 nz_der = (q1 != q0) 

422 # stop iterating if all derivatives are zero 

423 if not nz_der.any(): 

424 p = (p1 + p) / 2.0 

425 break 

426 # Secant Step 

427 dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der] 

428 # only update nonzero derivatives 

429 p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64)) 

430 p[nz_der] = p1[nz_der] - dp 

431 active_zero_der = ~nz_der & active 

432 p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0 

433 active &= nz_der # don't assign zero derivatives again 

434 failures[nz_der] = np.abs(dp) >= tol # not yet converged 

435 # stop iterating if there aren't any failures, not incl zero der 

436 if not failures[nz_der].any(): 

437 break 

438 p1, p = p, p1 

439 q0 = q1 

440 q1 = np.asarray(func(p1, *args)) 

441 

442 zero_der = ~nz_der & failures # don't include converged with zero-ders 

443 if zero_der.any(): 

444 # Secant warnings 

445 if fprime is None: 

446 nonzero_dp = (p1 != p) 

447 # non-zero dp, but infinite newton step 

448 zero_der_nz_dp = (zero_der & nonzero_dp) 

449 if zero_der_nz_dp.any(): 

450 rms = np.sqrt( 

451 sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2) 

452 ) 

453 warnings.warn( 

454 'RMS of {:g} reached'.format(rms), RuntimeWarning) 

455 # Newton or Halley warnings 

456 else: 

457 all_or_some = 'all' if zero_der.all() else 'some' 

458 msg = '{:s} derivatives were zero'.format(all_or_some) 

459 warnings.warn(msg, RuntimeWarning) 

460 elif failures.any(): 

461 all_or_some = 'all' if failures.all() else 'some' 

462 msg = '{0:s} failed to converge after {1:d} iterations'.format( 

463 all_or_some, maxiter 

464 ) 

465 if failures.all(): 

466 raise RuntimeError(msg) 

467 warnings.warn(msg, RuntimeWarning) 

468 

469 if full_output: 

470 result = namedtuple('result', ('root', 'converged', 'zero_der')) 

471 p = result(p, ~failures, zero_der) 

472 

473 return p 

474 

475 

476def bisect(f, a, b, args=(), 

477 xtol=_xtol, rtol=_rtol, maxiter=_iter, 

478 full_output=False, disp=True): 

479 """ 

480 Find root of a function within an interval using bisection. 

481 

482 Basic bisection routine to find a zero of the function `f` between the 

483 arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. 

484 Slow but sure. 

485 

486 Parameters 

487 ---------- 

488 f : function 

489 Python function returning a number. `f` must be continuous, and 

490 f(a) and f(b) must have opposite signs. 

491 a : scalar 

492 One end of the bracketing interval [a,b]. 

493 b : scalar 

494 The other end of the bracketing interval [a,b]. 

495 xtol : number, optional 

496 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

497 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

498 parameter must be nonnegative. 

499 rtol : number, optional 

500 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

501 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

502 parameter cannot be smaller than its default value of 

503 ``4*np.finfo(float).eps``. 

504 maxiter : int, optional 

505 If convergence is not achieved in `maxiter` iterations, an error is 

506 raised. Must be >= 0. 

507 args : tuple, optional 

508 Containing extra arguments for the function `f`. 

509 `f` is called by ``apply(f, (x)+args)``. 

510 full_output : bool, optional 

511 If `full_output` is False, the root is returned. If `full_output` is 

512 True, the return value is ``(x, r)``, where x is the root, and r is 

513 a `RootResults` object. 

514 disp : bool, optional 

515 If True, raise RuntimeError if the algorithm didn't converge. 

516 Otherwise, the convergence status is recorded in a `RootResults` 

517 return object. 

518 

519 Returns 

520 ------- 

521 x0 : float 

522 Zero of `f` between `a` and `b`. 

523 r : `RootResults` (present if ``full_output = True``) 

524 Object containing information about the convergence. In particular, 

525 ``r.converged`` is True if the routine converged. 

526 

527 Examples 

528 -------- 

529 

530 >>> def f(x): 

531 ... return (x**2 - 1) 

532 

533 >>> from scipy import optimize 

534 

535 >>> root = optimize.bisect(f, 0, 2) 

536 >>> root 

537 1.0 

538 

539 >>> root = optimize.bisect(f, -2, 0) 

540 >>> root 

541 -1.0 

542 

543 See Also 

544 -------- 

545 brentq, brenth, bisect, newton 

546 fixed_point : scalar fixed-point finder 

547 fsolve : n-dimensional root-finding 

548 

549 """ 

550 if not isinstance(args, tuple): 

551 args = (args,) 

552 maxiter = operator.index(maxiter) 

553 if xtol <= 0: 

554 raise ValueError("xtol too small (%g <= 0)" % xtol) 

555 if rtol < _rtol: 

556 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) 

557 r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp) 

558 return results_c(full_output, r) 

559 

560 

561def ridder(f, a, b, args=(), 

562 xtol=_xtol, rtol=_rtol, maxiter=_iter, 

563 full_output=False, disp=True): 

564 """ 

565 Find a root of a function in an interval using Ridder's method. 

566 

567 Parameters 

568 ---------- 

569 f : function 

570 Python function returning a number. f must be continuous, and f(a) and 

571 f(b) must have opposite signs. 

572 a : scalar 

573 One end of the bracketing interval [a,b]. 

574 b : scalar 

575 The other end of the bracketing interval [a,b]. 

576 xtol : number, optional 

577 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

578 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

579 parameter must be nonnegative. 

580 rtol : number, optional 

581 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

582 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

583 parameter cannot be smaller than its default value of 

584 ``4*np.finfo(float).eps``. 

585 maxiter : int, optional 

586 If convergence is not achieved in `maxiter` iterations, an error is 

587 raised. Must be >= 0. 

588 args : tuple, optional 

589 Containing extra arguments for the function `f`. 

590 `f` is called by ``apply(f, (x)+args)``. 

591 full_output : bool, optional 

592 If `full_output` is False, the root is returned. If `full_output` is 

593 True, the return value is ``(x, r)``, where `x` is the root, and `r` is 

594 a `RootResults` object. 

595 disp : bool, optional 

596 If True, raise RuntimeError if the algorithm didn't converge. 

597 Otherwise, the convergence status is recorded in any `RootResults` 

598 return object. 

599 

600 Returns 

601 ------- 

602 x0 : float 

603 Zero of `f` between `a` and `b`. 

604 r : `RootResults` (present if ``full_output = True``) 

605 Object containing information about the convergence. 

606 In particular, ``r.converged`` is True if the routine converged. 

607 

608 See Also 

609 -------- 

610 brentq, brenth, bisect, newton : 1-D root-finding 

611 fixed_point : scalar fixed-point finder 

612 

613 Notes 

614 ----- 

615 Uses [Ridders1979]_ method to find a zero of the function `f` between the 

616 arguments `a` and `b`. Ridders' method is faster than bisection, but not 

617 generally as fast as the Brent routines. [Ridders1979]_ provides the 

618 classic description and source of the algorithm. A description can also be 

619 found in any recent edition of Numerical Recipes. 

620 

621 The routine used here diverges slightly from standard presentations in 

622 order to be a bit more careful of tolerance. 

623 

624 References 

625 ---------- 

626 .. [Ridders1979] 

627 Ridders, C. F. J. "A New Algorithm for Computing a 

628 Single Root of a Real Continuous Function." 

629 IEEE Trans. Circuits Systems 26, 979-980, 1979. 

630 

631 Examples 

632 -------- 

633 

634 >>> def f(x): 

635 ... return (x**2 - 1) 

636 

637 >>> from scipy import optimize 

638 

639 >>> root = optimize.ridder(f, 0, 2) 

640 >>> root 

641 1.0 

642 

643 >>> root = optimize.ridder(f, -2, 0) 

644 >>> root 

645 -1.0 

646 """ 

647 if not isinstance(args, tuple): 

648 args = (args,) 

649 maxiter = operator.index(maxiter) 

650 if xtol <= 0: 

651 raise ValueError("xtol too small (%g <= 0)" % xtol) 

652 if rtol < _rtol: 

653 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) 

654 r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp) 

655 return results_c(full_output, r) 

656 

657 

658def brentq(f, a, b, args=(), 

659 xtol=_xtol, rtol=_rtol, maxiter=_iter, 

660 full_output=False, disp=True): 

661 """ 

662 Find a root of a function in a bracketing interval using Brent's method. 

663 

664 Uses the classic Brent's method to find a zero of the function `f` on 

665 the sign changing interval [a , b]. Generally considered the best of the 

666 rootfinding routines here. It is a safe version of the secant method that 

667 uses inverse quadratic extrapolation. Brent's method combines root 

668 bracketing, interval bisection, and inverse quadratic interpolation. It is 

669 sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) 

670 claims convergence is guaranteed for functions computable within [a,b]. 

671 

672 [Brent1973]_ provides the classic description of the algorithm. Another 

673 description can be found in a recent edition of Numerical Recipes, including 

674 [PressEtal1992]_. A third description is at 

675 http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to 

676 understand the algorithm just by reading our code. Our code diverges a bit 

677 from standard presentations: we choose a different formula for the 

678 extrapolation step. 

679 

680 Parameters 

681 ---------- 

682 f : function 

683 Python function returning a number. The function :math:`f` 

684 must be continuous, and :math:`f(a)` and :math:`f(b)` must 

685 have opposite signs. 

686 a : scalar 

687 One end of the bracketing interval :math:`[a, b]`. 

688 b : scalar 

689 The other end of the bracketing interval :math:`[a, b]`. 

690 xtol : number, optional 

691 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

692 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

693 parameter must be nonnegative. For nice functions, Brent's 

694 method will often satisfy the above condition with ``xtol/2`` 

695 and ``rtol/2``. [Brent1973]_ 

696 rtol : number, optional 

697 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

698 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

699 parameter cannot be smaller than its default value of 

700 ``4*np.finfo(float).eps``. For nice functions, Brent's 

701 method will often satisfy the above condition with ``xtol/2`` 

702 and ``rtol/2``. [Brent1973]_ 

703 maxiter : int, optional 

704 If convergence is not achieved in `maxiter` iterations, an error is 

705 raised. Must be >= 0. 

706 args : tuple, optional 

707 Containing extra arguments for the function `f`. 

708 `f` is called by ``apply(f, (x)+args)``. 

709 full_output : bool, optional 

710 If `full_output` is False, the root is returned. If `full_output` is 

711 True, the return value is ``(x, r)``, where `x` is the root, and `r` is 

712 a `RootResults` object. 

713 disp : bool, optional 

714 If True, raise RuntimeError if the algorithm didn't converge. 

715 Otherwise, the convergence status is recorded in any `RootResults` 

716 return object. 

717 

718 Returns 

719 ------- 

720 x0 : float 

721 Zero of `f` between `a` and `b`. 

722 r : `RootResults` (present if ``full_output = True``) 

723 Object containing information about the convergence. In particular, 

724 ``r.converged`` is True if the routine converged. 

725 

726 Notes 

727 ----- 

728 `f` must be continuous. f(a) and f(b) must have opposite signs. 

729 

730 Related functions fall into several classes: 

731 

732 multivariate local optimizers 

733 `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` 

734 nonlinear least squares minimizer 

735 `leastsq` 

736 constrained multivariate optimizers 

737 `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` 

738 global optimizers 

739 `basinhopping`, `brute`, `differential_evolution` 

740 local scalar minimizers 

741 `fminbound`, `brent`, `golden`, `bracket` 

742 N-D root-finding 

743 `fsolve` 

744 1-D root-finding 

745 `brenth`, `ridder`, `bisect`, `newton` 

746 scalar fixed-point finder 

747 `fixed_point` 

748 

749 References 

750 ---------- 

751 .. [Brent1973] 

752 Brent, R. P., 

753 *Algorithms for Minimization Without Derivatives*. 

754 Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. 

755 

756 .. [PressEtal1992] 

757 Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. 

758 *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. 

759 Cambridge, England: Cambridge University Press, pp. 352-355, 1992. 

760 Section 9.3: "Van Wijngaarden-Dekker-Brent Method." 

761 

762 Examples 

763 -------- 

764 >>> def f(x): 

765 ... return (x**2 - 1) 

766 

767 >>> from scipy import optimize 

768 

769 >>> root = optimize.brentq(f, -2, 0) 

770 >>> root 

771 -1.0 

772 

773 >>> root = optimize.brentq(f, 0, 2) 

774 >>> root 

775 1.0 

776 """ 

777 if not isinstance(args, tuple): 

778 args = (args,) 

779 maxiter = operator.index(maxiter) 

780 if xtol <= 0: 

781 raise ValueError("xtol too small (%g <= 0)" % xtol) 

782 if rtol < _rtol: 

783 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) 

784 r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp) 

785 return results_c(full_output, r) 

786 

787 

788def brenth(f, a, b, args=(), 

789 xtol=_xtol, rtol=_rtol, maxiter=_iter, 

790 full_output=False, disp=True): 

791 """Find a root of a function in a bracketing interval using Brent's 

792 method with hyperbolic extrapolation. 

793 

794 A variation on the classic Brent routine to find a zero of the function f 

795 between the arguments a and b that uses hyperbolic extrapolation instead of 

796 inverse quadratic extrapolation. Bus & Dekker (1975) guarantee convergence 

797 for this method, claiming that the upper bound of function evaluations here 

798 is 4 or 5 times lesser than that for bisection. 

799 f(a) and f(b) cannot have the same signs. Generally, on a par with the 

800 brent routine, but not as heavily tested. It is a safe version of the 

801 secant method that uses hyperbolic extrapolation. 

802 The version here is by Chuck Harris, and implements Algorithm M of 

803 [BusAndDekker1975]_, where further details (convergence properties, 

804 additional remarks and such) can be found 

805 

806 Parameters 

807 ---------- 

808 f : function 

809 Python function returning a number. f must be continuous, and f(a) and 

810 f(b) must have opposite signs. 

811 a : scalar 

812 One end of the bracketing interval [a,b]. 

813 b : scalar 

814 The other end of the bracketing interval [a,b]. 

815 xtol : number, optional 

816 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

817 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

818 parameter must be nonnegative. As with `brentq`, for nice 

819 functions the method will often satisfy the above condition 

820 with ``xtol/2`` and ``rtol/2``. 

821 rtol : number, optional 

822 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

823 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

824 parameter cannot be smaller than its default value of 

825 ``4*np.finfo(float).eps``. As with `brentq`, for nice functions 

826 the method will often satisfy the above condition with 

827 ``xtol/2`` and ``rtol/2``. 

828 maxiter : int, optional 

829 If convergence is not achieved in `maxiter` iterations, an error is 

830 raised. Must be >= 0. 

831 args : tuple, optional 

832 Containing extra arguments for the function `f`. 

833 `f` is called by ``apply(f, (x)+args)``. 

834 full_output : bool, optional 

835 If `full_output` is False, the root is returned. If `full_output` is 

836 True, the return value is ``(x, r)``, where `x` is the root, and `r` is 

837 a `RootResults` object. 

838 disp : bool, optional 

839 If True, raise RuntimeError if the algorithm didn't converge. 

840 Otherwise, the convergence status is recorded in any `RootResults` 

841 return object. 

842 

843 Returns 

844 ------- 

845 x0 : float 

846 Zero of `f` between `a` and `b`. 

847 r : `RootResults` (present if ``full_output = True``) 

848 Object containing information about the convergence. In particular, 

849 ``r.converged`` is True if the routine converged. 

850 

851 See Also 

852 -------- 

853 fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers 

854 leastsq : nonlinear least squares minimizer 

855 fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers 

856 basinhopping, differential_evolution, brute : global optimizers 

857 fminbound, brent, golden, bracket : local scalar minimizers 

858 fsolve : N-D root-finding 

859 brentq, brenth, ridder, bisect, newton : 1-D root-finding 

860 fixed_point : scalar fixed-point finder 

861 

862 References 

863 ---------- 

864 .. [BusAndDekker1975] 

865 Bus, J. C. P., Dekker, T. J., 

866 "Two Efficient Algorithms with Guaranteed Convergence for Finding a Zero 

867 of a Function", ACM Transactions on Mathematical Software, Vol. 1, Issue 

868 4, Dec. 1975, pp. 330-345. Section 3: "Algorithm M". 

869 :doi:`10.1145/355656.355659` 

870 

871 Examples 

872 -------- 

873 >>> def f(x): 

874 ... return (x**2 - 1) 

875 

876 >>> from scipy import optimize 

877 

878 >>> root = optimize.brenth(f, -2, 0) 

879 >>> root 

880 -1.0 

881 

882 >>> root = optimize.brenth(f, 0, 2) 

883 >>> root 

884 1.0 

885 

886 """ 

887 if not isinstance(args, tuple): 

888 args = (args,) 

889 maxiter = operator.index(maxiter) 

890 if xtol <= 0: 

891 raise ValueError("xtol too small (%g <= 0)" % xtol) 

892 if rtol < _rtol: 

893 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) 

894 r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp) 

895 return results_c(full_output, r) 

896 

897 

898################################ 

899# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by 

900# Alefeld, G. E. and Potra, F. A. and Shi, Yixun, 

901# See [1] 

902 

903 

904def _notclose(fs, rtol=_rtol, atol=_xtol): 

905 # Ensure not None, not 0, all finite, and not very close to each other 

906 notclosefvals = ( 

907 all(fs) and all(np.isfinite(fs)) and 

908 not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol)) 

909 for i, _f in enumerate(fs[:-1]))) 

910 return notclosefvals 

911 

912 

913def _secant(xvals, fvals): 

914 """Perform a secant step, taking a little care""" 

915 # Secant has many "mathematically" equivalent formulations 

916 # x2 = x0 - (x1 - x0)/(f1 - f0) * f0 

917 # = x1 - (x1 - x0)/(f1 - f0) * f1 

918 # = (-x1 * f0 + x0 * f1) / (f1 - f0) 

919 # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) 

920 # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) 

921 x0, x1 = xvals[:2] 

922 f0, f1 = fvals[:2] 

923 if f0 == f1: 

924 return np.nan 

925 if np.abs(f1) > np.abs(f0): 

926 x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) 

927 else: 

928 x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) 

929 return x2 

930 

931 

932def _update_bracket(ab, fab, c, fc): 

933 """Update a bracket given (c, fc), return the discarded endpoints.""" 

934 fa, fb = fab 

935 idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1) 

936 rx, rfx = ab[idx], fab[idx] 

937 fab[idx] = fc 

938 ab[idx] = c 

939 return rx, rfx 

940 

941 

942def _compute_divided_differences(xvals, fvals, N=None, full=True, 

943 forward=True): 

944 """Return a matrix of divided differences for the xvals, fvals pairs 

945 

946 DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i 

947 

948 If full is False, just return the main diagonal(or last row): 

949 f[a], f[a, b] and f[a, b, c]. 

950 If forward is False, return f[c], f[b, c], f[a, b, c].""" 

951 if full: 

952 if forward: 

953 xvals = np.asarray(xvals) 

954 else: 

955 xvals = np.array(xvals)[::-1] 

956 M = len(xvals) 

957 N = M if N is None else min(N, M) 

958 DD = np.zeros([M, N]) 

959 DD[:, 0] = fvals[:] 

960 for i in range(1, N): 

961 DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) / 

962 (xvals[i:] - xvals[:M - i])) 

963 return DD 

964 

965 xvals = np.asarray(xvals) 

966 dd = np.array(fvals) 

967 row = np.array(fvals) 

968 idx2Use = (0 if forward else -1) 

969 dd[0] = fvals[idx2Use] 

970 for i in range(1, len(xvals)): 

971 denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1] 

972 row = np.diff(row)[:] / denom 

973 dd[i] = row[idx2Use] 

974 return dd 

975 

976 

977def _interpolated_poly(xvals, fvals, x): 

978 """Compute p(x) for the polynomial passing through the specified locations. 

979 

980 Use Neville's algorithm to compute p(x) where p is the minimal degree 

981 polynomial passing through the points xvals, fvals""" 

982 xvals = np.asarray(xvals) 

983 N = len(xvals) 

984 Q = np.zeros([N, N]) 

985 D = np.zeros([N, N]) 

986 Q[:, 0] = fvals[:] 

987 D[:, 0] = fvals[:] 

988 for k in range(1, N): 

989 alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1] 

990 diffik = xvals[0:N - k] - xvals[k:N] 

991 Q[k:, k] = (xvals[k:] - x) / diffik * alpha 

992 D[k:, k] = (xvals[:N - k] - x) / diffik * alpha 

993 # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root 

994 return np.sum(Q[-1, 1:]) + Q[-1, 0] 

995 

996 

997def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd): 

998 """Inverse cubic interpolation f-values -> x-values 

999 

1000 Given four points (fa, a), (fb, b), (fc, c), (fd, d) with 

1001 fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points 

1002 and compute x=IP(0). 

1003 """ 

1004 return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0) 

1005 

1006 

1007def _newton_quadratic(ab, fab, d, fd, k): 

1008 """Apply Newton-Raphson like steps, using divided differences to approximate f' 

1009 

1010 ab is a real interval [a, b] containing a root, 

1011 fab holds the real values of f(a), f(b) 

1012 d is a real number outside [ab, b] 

1013 k is the number of steps to apply 

1014 """ 

1015 a, b = ab 

1016 fa, fb = fab 

1017 _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], 

1018 forward=True, full=False) 

1019 

1020 # _P is the quadratic polynomial through the 3 points 

1021 def _P(x): 

1022 # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b) 

1023 return (A * (x - b) + B) * (x - a) + fa 

1024 

1025 if A == 0: 

1026 r = a - fa / B 

1027 else: 

1028 r = (a if np.sign(A) * np.sign(fa) > 0 else b) 

1029 # Apply k Newton-Raphson steps to _P(x), starting from x=r 

1030 for i in range(k): 

1031 r1 = r - _P(r) / (B + A * (2 * r - a - b)) 

1032 if not (ab[0] < r1 < ab[1]): 

1033 if (ab[0] < r < ab[1]): 

1034 return r 

1035 r = sum(ab) / 2.0 

1036 break 

1037 r = r1 

1038 

1039 return r 

1040 

1041 

1042class TOMS748Solver: 

1043 """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi. 

1044 """ 

1045 _MU = 0.5 

1046 _K_MIN = 1 

1047 _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3. 

1048 

1049 def __init__(self): 

1050 self.f = None 

1051 self.args = None 

1052 self.function_calls = 0 

1053 self.iterations = 0 

1054 self.k = 2 

1055 # ab=[a,b] is a global interval containing a root 

1056 self.ab = [np.nan, np.nan] 

1057 # fab is function values at a, b 

1058 self.fab = [np.nan, np.nan] 

1059 self.d = None 

1060 self.fd = None 

1061 self.e = None 

1062 self.fe = None 

1063 self.disp = False 

1064 self.xtol = _xtol 

1065 self.rtol = _rtol 

1066 self.maxiter = _iter 

1067 

1068 def configure(self, xtol, rtol, maxiter, disp, k): 

1069 self.disp = disp 

1070 self.xtol = xtol 

1071 self.rtol = rtol 

1072 self.maxiter = maxiter 

1073 # Silently replace a low value of k with 1 

1074 self.k = max(k, self._K_MIN) 

1075 # Noisily replace a high value of k with self._K_MAX 

1076 if self.k > self._K_MAX: 

1077 msg = "toms748: Overriding k: ->%d" % self._K_MAX 

1078 warnings.warn(msg, RuntimeWarning) 

1079 self.k = self._K_MAX 

1080 

1081 def _callf(self, x, error=True): 

1082 """Call the user-supplied function, update book-keeping""" 

1083 fx = self.f(x, *self.args) 

1084 self.function_calls += 1 

1085 if not np.isfinite(fx) and error: 

1086 raise ValueError("Invalid function value: f(%f) -> %s " % (x, fx)) 

1087 return fx 

1088 

1089 def get_result(self, x, flag=_ECONVERGED): 

1090 r"""Package the result and statistics into a tuple.""" 

1091 return (x, self.function_calls, self.iterations, flag) 

1092 

1093 def _update_bracket(self, c, fc): 

1094 return _update_bracket(self.ab, self.fab, c, fc) 

1095 

1096 def start(self, f, a, b, args=()): 

1097 r"""Prepare for the iterations.""" 

1098 self.function_calls = 0 

1099 self.iterations = 0 

1100 

1101 self.f = f 

1102 self.args = args 

1103 self.ab[:] = [a, b] 

1104 if not np.isfinite(a) or np.imag(a) != 0: 

1105 raise ValueError("Invalid x value: %s " % (a)) 

1106 if not np.isfinite(b) or np.imag(b) != 0: 

1107 raise ValueError("Invalid x value: %s " % (b)) 

1108 

1109 fa = self._callf(a) 

1110 if not np.isfinite(fa) or np.imag(fa) != 0: 

1111 raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa)) 

1112 if fa == 0: 

1113 return _ECONVERGED, a 

1114 fb = self._callf(b) 

1115 if not np.isfinite(fb) or np.imag(fb) != 0: 

1116 raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb)) 

1117 if fb == 0: 

1118 return _ECONVERGED, b 

1119 

1120 if np.sign(fb) * np.sign(fa) > 0: 

1121 raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " % 

1122 (a, fa, b, fb)) 

1123 self.fab[:] = [fa, fb] 

1124 

1125 return _EINPROGRESS, sum(self.ab) / 2.0 

1126 

1127 def get_status(self): 

1128 """Determine the current status.""" 

1129 a, b = self.ab[:2] 

1130 if np.isclose(a, b, rtol=self.rtol, atol=self.xtol): 

1131 return _ECONVERGED, sum(self.ab) / 2.0 

1132 if self.iterations >= self.maxiter: 

1133 return _ECONVERR, sum(self.ab) / 2.0 

1134 return _EINPROGRESS, sum(self.ab) / 2.0 

1135 

1136 def iterate(self): 

1137 """Perform one step in the algorithm. 

1138 

1139 Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995] 

1140 """ 

1141 self.iterations += 1 

1142 eps = np.finfo(float).eps 

1143 d, fd, e, fe = self.d, self.fd, self.e, self.fe 

1144 ab_width = self.ab[1] - self.ab[0] # Need the start width below 

1145 c = None 

1146 

1147 for nsteps in range(2, self.k+2): 

1148 # If the f-values are sufficiently separated, perform an inverse 

1149 # polynomial interpolation step. Otherwise, nsteps repeats of 

1150 # an approximate Newton-Raphson step. 

1151 if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps): 

1152 c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e, 

1153 self.fab[0], self.fab[1], fd, fe) 

1154 if self.ab[0] < c0 < self.ab[1]: 

1155 c = c0 

1156 if c is None: 

1157 c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps) 

1158 

1159 fc = self._callf(c) 

1160 if fc == 0: 

1161 return _ECONVERGED, c 

1162 

1163 # re-bracket 

1164 e, fe = d, fd 

1165 d, fd = self._update_bracket(c, fc) 

1166 

1167 # u is the endpoint with the smallest f-value 

1168 uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1) 

1169 u, fu = self.ab[uix], self.fab[uix] 

1170 

1171 _, A = _compute_divided_differences(self.ab, self.fab, 

1172 forward=(uix == 0), full=False) 

1173 c = u - 2 * fu / A 

1174 if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]): 

1175 c = sum(self.ab) / 2.0 

1176 else: 

1177 if np.isclose(c, u, rtol=eps, atol=0): 

1178 # c didn't change (much). 

1179 # Either because the f-values at the endpoints have vastly 

1180 # differing magnitudes, or because the root is very close to 

1181 # that endpoint 

1182 frs = np.frexp(self.fab)[1] 

1183 if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50 

1184 c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32 

1185 else: 

1186 # Make a bigger adjustment, about the 

1187 # size of the requested tolerance. 

1188 mm = (1 if uix == 0 else -1) 

1189 adj = mm * np.abs(c) * self.rtol + mm * self.xtol 

1190 c = u + adj 

1191 if not self.ab[0] < c < self.ab[1]: 

1192 c = sum(self.ab) / 2.0 

1193 

1194 fc = self._callf(c) 

1195 if fc == 0: 

1196 return _ECONVERGED, c 

1197 

1198 e, fe = d, fd 

1199 d, fd = self._update_bracket(c, fc) 

1200 

1201 # If the width of the new interval did not decrease enough, bisect 

1202 if self.ab[1] - self.ab[0] > self._MU * ab_width: 

1203 e, fe = d, fd 

1204 z = sum(self.ab) / 2.0 

1205 fz = self._callf(z) 

1206 if fz == 0: 

1207 return _ECONVERGED, z 

1208 d, fd = self._update_bracket(z, fz) 

1209 

1210 # Record d and e for next iteration 

1211 self.d, self.fd = d, fd 

1212 self.e, self.fe = e, fe 

1213 

1214 status, xn = self.get_status() 

1215 return status, xn 

1216 

1217 def solve(self, f, a, b, args=(), 

1218 xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True): 

1219 r"""Solve f(x) = 0 given an interval containing a zero.""" 

1220 self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k) 

1221 status, xn = self.start(f, a, b, args) 

1222 if status == _ECONVERGED: 

1223 return self.get_result(xn) 

1224 

1225 # The first step only has two x-values. 

1226 c = _secant(self.ab, self.fab) 

1227 if not self.ab[0] < c < self.ab[1]: 

1228 c = sum(self.ab) / 2.0 

1229 fc = self._callf(c) 

1230 if fc == 0: 

1231 return self.get_result(c) 

1232 

1233 self.d, self.fd = self._update_bracket(c, fc) 

1234 self.e, self.fe = None, None 

1235 self.iterations += 1 

1236 

1237 while True: 

1238 status, xn = self.iterate() 

1239 if status == _ECONVERGED: 

1240 return self.get_result(xn) 

1241 if status == _ECONVERR: 

1242 fmt = "Failed to converge after %d iterations, bracket is %s" 

1243 if disp: 

1244 msg = fmt % (self.iterations + 1, self.ab) 

1245 raise RuntimeError(msg) 

1246 return self.get_result(xn, _ECONVERR) 

1247 

1248 

1249def toms748(f, a, b, args=(), k=1, 

1250 xtol=_xtol, rtol=_rtol, maxiter=_iter, 

1251 full_output=False, disp=True): 

1252 """ 

1253 Find a zero using TOMS Algorithm 748 method. 

1254 

1255 Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a 

1256 zero of the function `f` on the interval `[a , b]`, where `f(a)` and 

1257 `f(b)` must have opposite signs. 

1258 

1259 It uses a mixture of inverse cubic interpolation and 

1260 "Newton-quadratic" steps. [APS1995]. 

1261 

1262 Parameters 

1263 ---------- 

1264 f : function 

1265 Python function returning a scalar. The function :math:`f` 

1266 must be continuous, and :math:`f(a)` and :math:`f(b)` 

1267 have opposite signs. 

1268 a : scalar, 

1269 lower boundary of the search interval 

1270 b : scalar, 

1271 upper boundary of the search interval 

1272 args : tuple, optional 

1273 containing extra arguments for the function `f`. 

1274 `f` is called by ``f(x, *args)``. 

1275 k : int, optional 

1276 The number of Newton quadratic steps to perform each 

1277 iteration. ``k>=1``. 

1278 xtol : scalar, optional 

1279 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

1280 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The 

1281 parameter must be nonnegative. 

1282 rtol : scalar, optional 

1283 The computed root ``x0`` will satisfy ``np.allclose(x, x0, 

1284 atol=xtol, rtol=rtol)``, where ``x`` is the exact root. 

1285 maxiter : int, optional 

1286 If convergence is not achieved in `maxiter` iterations, an error is 

1287 raised. Must be >= 0. 

1288 full_output : bool, optional 

1289 If `full_output` is False, the root is returned. If `full_output` is 

1290 True, the return value is ``(x, r)``, where `x` is the root, and `r` is 

1291 a `RootResults` object. 

1292 disp : bool, optional 

1293 If True, raise RuntimeError if the algorithm didn't converge. 

1294 Otherwise, the convergence status is recorded in the `RootResults` 

1295 return object. 

1296 

1297 Returns 

1298 ------- 

1299 x0 : float 

1300 Approximate Zero of `f` 

1301 r : `RootResults` (present if ``full_output = True``) 

1302 Object containing information about the convergence. In particular, 

1303 ``r.converged`` is True if the routine converged. 

1304 

1305 See Also 

1306 -------- 

1307 brentq, brenth, ridder, bisect, newton 

1308 fsolve : find zeroes in N dimensions. 

1309 

1310 Notes 

1311 ----- 

1312 `f` must be continuous. 

1313 Algorithm 748 with ``k=2`` is asymptotically the most efficient 

1314 algorithm known for finding roots of a four times continuously 

1315 differentiable function. 

1316 In contrast with Brent's algorithm, which may only decrease the length of 

1317 the enclosing bracket on the last step, Algorithm 748 decreases it each 

1318 iteration with the same asymptotic efficiency as it finds the root. 

1319 

1320 For easy statement of efficiency indices, assume that `f` has 4 

1321 continuouous deriviatives. 

1322 For ``k=1``, the convergence order is at least 2.7, and with about 

1323 asymptotically 2 function evaluations per iteration, the efficiency 

1324 index is approximately 1.65. 

1325 For ``k=2``, the order is about 4.6 with asymptotically 3 function 

1326 evaluations per iteration, and the efficiency index 1.66. 

1327 For higher values of `k`, the efficiency index approaches 

1328 the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are 

1329 usually appropriate. 

1330 

1331 References 

1332 ---------- 

1333 .. [APS1995] 

1334 Alefeld, G. E. and Potra, F. A. and Shi, Yixun, 

1335 *Algorithm 748: Enclosing Zeros of Continuous Functions*, 

1336 ACM Trans. Math. Softw. Volume 221(1995) 

1337 doi = {10.1145/210089.210111} 

1338 

1339 Examples 

1340 -------- 

1341 >>> def f(x): 

1342 ... return (x**3 - 1) # only one real root at x = 1 

1343 

1344 >>> from scipy import optimize 

1345 >>> root, results = optimize.toms748(f, 0, 2, full_output=True) 

1346 >>> root 

1347 1.0 

1348 >>> results 

1349 converged: True 

1350 flag: 'converged' 

1351 function_calls: 11 

1352 iterations: 5 

1353 root: 1.0 

1354 """ 

1355 if xtol <= 0: 

1356 raise ValueError("xtol too small (%g <= 0)" % xtol) 

1357 if rtol < _rtol / 4: 

1358 raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) 

1359 maxiter = operator.index(maxiter) 

1360 if maxiter < 1: 

1361 raise ValueError("maxiter must be greater than 0") 

1362 if not np.isfinite(a): 

1363 raise ValueError("a is not finite %s" % a) 

1364 if not np.isfinite(b): 

1365 raise ValueError("b is not finite %s" % b) 

1366 if a >= b: 

1367 raise ValueError("a and b are not an interval [{}, {}]".format(a, b)) 

1368 if not k >= 1: 

1369 raise ValueError("k too small (%s < 1)" % k) 

1370 

1371 if not isinstance(args, tuple): 

1372 args = (args,) 

1373 solver = TOMS748Solver() 

1374 result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, 

1375 maxiter=maxiter, disp=disp) 

1376 x, function_calls, iterations, flag = result 

1377 return _results_select(full_output, (x, function_calls, iterations, flag))