Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/integrate/_ivp/radau.py: 12%

261 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1import numpy as np 

2from scipy.linalg import lu_factor, lu_solve 

3from scipy.sparse import csc_matrix, issparse, eye 

4from scipy.sparse.linalg import splu 

5from scipy.optimize._numdiff import group_columns 

6from .common import (validate_max_step, validate_tol, select_initial_step, 

7 norm, num_jac, EPS, warn_extraneous, 

8 validate_first_step) 

9from .base import OdeSolver, DenseOutput 

10 

11S6 = 6 ** 0.5 

12 

13# Butcher tableau. A is not used directly, see below. 

14C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1]) 

15E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3 

16 

17# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue 

18# and a complex conjugate pair. They are written below. 

19MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3) 

20MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3)) 

21 - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6))) 

22 

23# These are transformation matrices. 

24T = np.array([ 

25 [0.09443876248897524, -0.14125529502095421, 0.03002919410514742], 

26 [0.25021312296533332, 0.20412935229379994, -0.38294211275726192], 

27 [1, 1, 0]]) 

28TI = np.array([ 

29 [4.17871859155190428, 0.32768282076106237, 0.52337644549944951], 

30 [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044], 

31 [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]]) 

32# These linear combinations are used in the algorithm. 

33TI_REAL = TI[0] 

34TI_COMPLEX = TI[1] + 1j * TI[2] 

35 

36# Interpolator coefficients. 

37P = np.array([ 

38 [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6], 

39 [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6], 

40 [1/3, -8/3, 10/3]]) 

41 

42 

43NEWTON_MAXITER = 6 # Maximum number of Newton iterations. 

44MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. 

45MAX_FACTOR = 10 # Maximum allowed increase in a step size. 

46 

47 

48def solve_collocation_system(fun, t, y, h, Z0, scale, tol, 

49 LU_real, LU_complex, solve_lu): 

50 """Solve the collocation system. 

51 

52 Parameters 

53 ---------- 

54 fun : callable 

55 Right-hand side of the system. 

56 t : float 

57 Current time. 

58 y : ndarray, shape (n,) 

59 Current state. 

60 h : float 

61 Step to try. 

62 Z0 : ndarray, shape (3, n) 

63 Initial guess for the solution. It determines new values of `y` at 

64 ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants. 

65 scale : ndarray, shape (n) 

66 Problem tolerance scale, i.e. ``rtol * abs(y) + atol``. 

67 tol : float 

68 Tolerance to which solve the system. This value is compared with 

69 the normalized by `scale` error. 

70 LU_real, LU_complex 

71 LU decompositions of the system Jacobians. 

72 solve_lu : callable 

73 Callable which solves a linear system given a LU decomposition. The 

74 signature is ``solve_lu(LU, b)``. 

75 

76 Returns 

77 ------- 

78 converged : bool 

79 Whether iterations converged. 

80 n_iter : int 

81 Number of completed iterations. 

82 Z : ndarray, shape (3, n) 

83 Found solution. 

84 rate : float 

85 The rate of convergence. 

86 """ 

87 n = y.shape[0] 

88 M_real = MU_REAL / h 

89 M_complex = MU_COMPLEX / h 

90 

91 W = TI.dot(Z0) 

92 Z = Z0 

93 

94 F = np.empty((3, n)) 

95 ch = h * C 

96 

97 dW_norm_old = None 

98 dW = np.empty_like(W) 

99 converged = False 

100 rate = None 

101 for k in range(NEWTON_MAXITER): 

102 for i in range(3): 

103 F[i] = fun(t + ch[i], y + Z[i]) 

104 

105 if not np.all(np.isfinite(F)): 

106 break 

107 

108 f_real = F.T.dot(TI_REAL) - M_real * W[0] 

109 f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2]) 

110 

111 dW_real = solve_lu(LU_real, f_real) 

112 dW_complex = solve_lu(LU_complex, f_complex) 

113 

114 dW[0] = dW_real 

115 dW[1] = dW_complex.real 

116 dW[2] = dW_complex.imag 

117 

118 dW_norm = norm(dW / scale) 

119 if dW_norm_old is not None: 

120 rate = dW_norm / dW_norm_old 

121 

122 if (rate is not None and (rate >= 1 or 

123 rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)): 

124 break 

125 

126 W += dW 

127 Z = T.dot(W) 

128 

129 if (dW_norm == 0 or 

130 rate is not None and rate / (1 - rate) * dW_norm < tol): 

131 converged = True 

132 break 

133 

134 dW_norm_old = dW_norm 

135 

136 return converged, k + 1, Z, rate 

137 

138 

139def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old): 

140 """Predict by which factor to increase/decrease the step size. 

141 

142 The algorithm is described in [1]_. 

143 

144 Parameters 

145 ---------- 

146 h_abs, h_abs_old : float 

147 Current and previous values of the step size, `h_abs_old` can be None 

148 (see Notes). 

149 error_norm, error_norm_old : float 

150 Current and previous values of the error norm, `error_norm_old` can 

151 be None (see Notes). 

152 

153 Returns 

154 ------- 

155 factor : float 

156 Predicted factor. 

157 

158 Notes 

159 ----- 

160 If `h_abs_old` and `error_norm_old` are both not None then a two-step 

161 algorithm is used, otherwise a one-step algorithm is used. 

162 

163 References 

164 ---------- 

165 .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential 

166 Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. 

167 """ 

168 if error_norm_old is None or h_abs_old is None or error_norm == 0: 

169 multiplier = 1 

170 else: 

171 multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25 

172 

173 with np.errstate(divide='ignore'): 

174 factor = min(1, multiplier) * error_norm ** -0.25 

175 

176 return factor 

177 

178 

179class Radau(OdeSolver): 

180 """Implicit Runge-Kutta method of Radau IIA family of order 5. 

181 

182 The implementation follows [1]_. The error is controlled with a 

183 third-order accurate embedded formula. A cubic polynomial which satisfies 

184 the collocation conditions is used for the dense output. 

185 

186 Parameters 

187 ---------- 

188 fun : callable 

189 Right-hand side of the system. The calling signature is ``fun(t, y)``. 

190 Here ``t`` is a scalar, and there are two options for the ndarray ``y``: 

191 It can either have shape (n,); then ``fun`` must return array_like with 

192 shape (n,). Alternatively it can have shape (n, k); then ``fun`` 

193 must return an array_like with shape (n, k), i.e., each column 

194 corresponds to a single column in ``y``. The choice between the two 

195 options is determined by `vectorized` argument (see below). The 

196 vectorized implementation allows a faster approximation of the Jacobian 

197 by finite differences (required for this solver). 

198 t0 : float 

199 Initial time. 

200 y0 : array_like, shape (n,) 

201 Initial state. 

202 t_bound : float 

203 Boundary time - the integration won't continue beyond it. It also 

204 determines the direction of the integration. 

205 first_step : float or None, optional 

206 Initial step size. Default is ``None`` which means that the algorithm 

207 should choose. 

208 max_step : float, optional 

209 Maximum allowed step size. Default is np.inf, i.e., the step size is not 

210 bounded and determined solely by the solver. 

211 rtol, atol : float and array_like, optional 

212 Relative and absolute tolerances. The solver keeps the local error 

213 estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a 

214 relative accuracy (number of correct digits), while `atol` controls 

215 absolute accuracy (number of correct decimal places). To achieve the 

216 desired `rtol`, set `atol` to be smaller than the smallest value that 

217 can be expected from ``rtol * abs(y)`` so that `rtol` dominates the 

218 allowable error. If `atol` is larger than ``rtol * abs(y)`` the 

219 number of correct digits is not guaranteed. Conversely, to achieve the 

220 desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller 

221 than `atol`. If components of y have different scales, it might be 

222 beneficial to set different `atol` values for different components by 

223 passing array_like with shape (n,) for `atol`. Default values are 

224 1e-3 for `rtol` and 1e-6 for `atol`. 

225 jac : {None, array_like, sparse_matrix, callable}, optional 

226 Jacobian matrix of the right-hand side of the system with respect to 

227 y, required by this method. The Jacobian matrix has shape (n, n) and 

228 its element (i, j) is equal to ``d f_i / d y_j``. 

229 There are three ways to define the Jacobian: 

230 

231 * If array_like or sparse_matrix, the Jacobian is assumed to 

232 be constant. 

233 * If callable, the Jacobian is assumed to depend on both 

234 t and y; it will be called as ``jac(t, y)`` as necessary. 

235 For the 'Radau' and 'BDF' methods, the return value might be a 

236 sparse matrix. 

237 * If None (default), the Jacobian will be approximated by 

238 finite differences. 

239 

240 It is generally recommended to provide the Jacobian rather than 

241 relying on a finite-difference approximation. 

242 jac_sparsity : {None, array_like, sparse matrix}, optional 

243 Defines a sparsity structure of the Jacobian matrix for a 

244 finite-difference approximation. Its shape must be (n, n). This argument 

245 is ignored if `jac` is not `None`. If the Jacobian has only few non-zero 

246 elements in *each* row, providing the sparsity structure will greatly 

247 speed up the computations [2]_. A zero entry means that a corresponding 

248 element in the Jacobian is always zero. If None (default), the Jacobian 

249 is assumed to be dense. 

250 vectorized : bool, optional 

251 Whether `fun` is implemented in a vectorized fashion. Default is False. 

252 

253 Attributes 

254 ---------- 

255 n : int 

256 Number of equations. 

257 status : string 

258 Current status of the solver: 'running', 'finished' or 'failed'. 

259 t_bound : float 

260 Boundary time. 

261 direction : float 

262 Integration direction: +1 or -1. 

263 t : float 

264 Current time. 

265 y : ndarray 

266 Current state. 

267 t_old : float 

268 Previous time. None if no steps were made yet. 

269 step_size : float 

270 Size of the last successful step. None if no steps were made yet. 

271 nfev : int 

272 Number of evaluations of the right-hand side. 

273 njev : int 

274 Number of evaluations of the Jacobian. 

275 nlu : int 

276 Number of LU decompositions. 

277 

278 References 

279 ---------- 

280 .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: 

281 Stiff and Differential-Algebraic Problems", Sec. IV.8. 

282 .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of 

283 sparse Jacobian matrices", Journal of the Institute of Mathematics 

284 and its Applications, 13, pp. 117-120, 1974. 

285 """ 

286 def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, 

287 rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, 

288 vectorized=False, first_step=None, **extraneous): 

289 warn_extraneous(extraneous) 

290 super().__init__(fun, t0, y0, t_bound, vectorized) 

291 self.y_old = None 

292 self.max_step = validate_max_step(max_step) 

293 self.rtol, self.atol = validate_tol(rtol, atol, self.n) 

294 self.f = self.fun(self.t, self.y) 

295 # Select initial step assuming the same order which is used to control 

296 # the error. 

297 if first_step is None: 

298 self.h_abs = select_initial_step( 

299 self.fun, self.t, self.y, self.f, self.direction, 

300 3, self.rtol, self.atol) 

301 else: 

302 self.h_abs = validate_first_step(first_step, t0, t_bound) 

303 self.h_abs_old = None 

304 self.error_norm_old = None 

305 

306 self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) 

307 self.sol = None 

308 

309 self.jac_factor = None 

310 self.jac, self.J = self._validate_jac(jac, jac_sparsity) 

311 if issparse(self.J): 

312 def lu(A): 

313 self.nlu += 1 

314 return splu(A) 

315 

316 def solve_lu(LU, b): 

317 return LU.solve(b) 

318 

319 I = eye(self.n, format='csc') 

320 else: 

321 def lu(A): 

322 self.nlu += 1 

323 return lu_factor(A, overwrite_a=True) 

324 

325 def solve_lu(LU, b): 

326 return lu_solve(LU, b, overwrite_b=True) 

327 

328 I = np.identity(self.n) 

329 

330 self.lu = lu 

331 self.solve_lu = solve_lu 

332 self.I = I 

333 

334 self.current_jac = True 

335 self.LU_real = None 

336 self.LU_complex = None 

337 self.Z = None 

338 

339 def _validate_jac(self, jac, sparsity): 

340 t0 = self.t 

341 y0 = self.y 

342 

343 if jac is None: 

344 if sparsity is not None: 

345 if issparse(sparsity): 

346 sparsity = csc_matrix(sparsity) 

347 groups = group_columns(sparsity) 

348 sparsity = (sparsity, groups) 

349 

350 def jac_wrapped(t, y, f): 

351 self.njev += 1 

352 J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, 

353 self.atol, self.jac_factor, 

354 sparsity) 

355 return J 

356 J = jac_wrapped(t0, y0, self.f) 

357 elif callable(jac): 

358 J = jac(t0, y0) 

359 self.njev = 1 

360 if issparse(J): 

361 J = csc_matrix(J) 

362 

363 def jac_wrapped(t, y, _=None): 

364 self.njev += 1 

365 return csc_matrix(jac(t, y), dtype=float) 

366 

367 else: 

368 J = np.asarray(J, dtype=float) 

369 

370 def jac_wrapped(t, y, _=None): 

371 self.njev += 1 

372 return np.asarray(jac(t, y), dtype=float) 

373 

374 if J.shape != (self.n, self.n): 

375 raise ValueError("`jac` is expected to have shape {}, but " 

376 "actually has {}." 

377 .format((self.n, self.n), J.shape)) 

378 else: 

379 if issparse(jac): 

380 J = csc_matrix(jac) 

381 else: 

382 J = np.asarray(jac, dtype=float) 

383 

384 if J.shape != (self.n, self.n): 

385 raise ValueError("`jac` is expected to have shape {}, but " 

386 "actually has {}." 

387 .format((self.n, self.n), J.shape)) 

388 jac_wrapped = None 

389 

390 return jac_wrapped, J 

391 

392 def _step_impl(self): 

393 t = self.t 

394 y = self.y 

395 f = self.f 

396 

397 max_step = self.max_step 

398 atol = self.atol 

399 rtol = self.rtol 

400 

401 min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) 

402 if self.h_abs > max_step: 

403 h_abs = max_step 

404 h_abs_old = None 

405 error_norm_old = None 

406 elif self.h_abs < min_step: 

407 h_abs = min_step 

408 h_abs_old = None 

409 error_norm_old = None 

410 else: 

411 h_abs = self.h_abs 

412 h_abs_old = self.h_abs_old 

413 error_norm_old = self.error_norm_old 

414 

415 J = self.J 

416 LU_real = self.LU_real 

417 LU_complex = self.LU_complex 

418 

419 current_jac = self.current_jac 

420 jac = self.jac 

421 

422 rejected = False 

423 step_accepted = False 

424 message = None 

425 while not step_accepted: 

426 if h_abs < min_step: 

427 return False, self.TOO_SMALL_STEP 

428 

429 h = h_abs * self.direction 

430 t_new = t + h 

431 

432 if self.direction * (t_new - self.t_bound) > 0: 

433 t_new = self.t_bound 

434 

435 h = t_new - t 

436 h_abs = np.abs(h) 

437 

438 if self.sol is None: 

439 Z0 = np.zeros((3, y.shape[0])) 

440 else: 

441 Z0 = self.sol(t + h * C).T - y 

442 

443 scale = atol + np.abs(y) * rtol 

444 

445 converged = False 

446 while not converged: 

447 if LU_real is None or LU_complex is None: 

448 LU_real = self.lu(MU_REAL / h * self.I - J) 

449 LU_complex = self.lu(MU_COMPLEX / h * self.I - J) 

450 

451 converged, n_iter, Z, rate = solve_collocation_system( 

452 self.fun, t, y, h, Z0, scale, self.newton_tol, 

453 LU_real, LU_complex, self.solve_lu) 

454 

455 if not converged: 

456 if current_jac: 

457 break 

458 

459 J = self.jac(t, y, f) 

460 current_jac = True 

461 LU_real = None 

462 LU_complex = None 

463 

464 if not converged: 

465 h_abs *= 0.5 

466 LU_real = None 

467 LU_complex = None 

468 continue 

469 

470 y_new = y + Z[-1] 

471 ZE = Z.T.dot(E) / h 

472 error = self.solve_lu(LU_real, f + ZE) 

473 scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol 

474 error_norm = norm(error / scale) 

475 safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER 

476 + n_iter) 

477 

478 if rejected and error_norm > 1: 

479 error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE) 

480 error_norm = norm(error / scale) 

481 

482 if error_norm > 1: 

483 factor = predict_factor(h_abs, h_abs_old, 

484 error_norm, error_norm_old) 

485 h_abs *= max(MIN_FACTOR, safety * factor) 

486 

487 LU_real = None 

488 LU_complex = None 

489 rejected = True 

490 else: 

491 step_accepted = True 

492 

493 recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3 

494 

495 factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old) 

496 factor = min(MAX_FACTOR, safety * factor) 

497 

498 if not recompute_jac and factor < 1.2: 

499 factor = 1 

500 else: 

501 LU_real = None 

502 LU_complex = None 

503 

504 f_new = self.fun(t_new, y_new) 

505 if recompute_jac: 

506 J = jac(t_new, y_new, f_new) 

507 current_jac = True 

508 elif jac is not None: 

509 current_jac = False 

510 

511 self.h_abs_old = self.h_abs 

512 self.error_norm_old = error_norm 

513 

514 self.h_abs = h_abs * factor 

515 

516 self.y_old = y 

517 

518 self.t = t_new 

519 self.y = y_new 

520 self.f = f_new 

521 

522 self.Z = Z 

523 

524 self.LU_real = LU_real 

525 self.LU_complex = LU_complex 

526 self.current_jac = current_jac 

527 self.J = J 

528 

529 self.t_old = t 

530 self.sol = self._compute_dense_output() 

531 

532 return step_accepted, message 

533 

534 def _compute_dense_output(self): 

535 Q = np.dot(self.Z.T, P) 

536 return RadauDenseOutput(self.t_old, self.t, self.y_old, Q) 

537 

538 def _dense_output_impl(self): 

539 return self.sol 

540 

541 

542class RadauDenseOutput(DenseOutput): 

543 def __init__(self, t_old, t, y_old, Q): 

544 super().__init__(t_old, t) 

545 self.h = t - t_old 

546 self.Q = Q 

547 self.order = Q.shape[1] - 1 

548 self.y_old = y_old 

549 

550 def _call_impl(self, t): 

551 x = (t - self.t_old) / self.h 

552 if t.ndim == 0: 

553 p = np.tile(x, self.order + 1) 

554 p = np.cumprod(p) 

555 else: 

556 p = np.tile(x, (self.order + 1, 1)) 

557 p = np.cumprod(p, axis=0) 

558 # Here we don't multiply by h, not a mistake. 

559 y = np.dot(self.Q, p) 

560 if y.ndim == 2: 

561 y += self.y_old[:, None] 

562 else: 

563 y += self.y_old 

564 

565 return y