Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/integrate/_ivp/bdf.py: 9%

244 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1import numpy as np 

2from scipy.linalg import lu_factor, lu_solve 

3from scipy.sparse import issparse, csc_matrix, eye 

4from scipy.sparse.linalg import splu 

5from scipy.optimize._numdiff import group_columns 

6from .common import (validate_max_step, validate_tol, select_initial_step, 

7 norm, EPS, num_jac, validate_first_step, 

8 warn_extraneous) 

9from .base import OdeSolver, DenseOutput 

10 

11 

12MAX_ORDER = 5 

13NEWTON_MAXITER = 4 

14MIN_FACTOR = 0.2 

15MAX_FACTOR = 10 

16 

17 

18def compute_R(order, factor): 

19 """Compute the matrix for changing the differences array.""" 

20 I = np.arange(1, order + 1)[:, None] 

21 J = np.arange(1, order + 1) 

22 M = np.zeros((order + 1, order + 1)) 

23 M[1:, 1:] = (I - 1 - factor * J) / I 

24 M[0] = 1 

25 return np.cumprod(M, axis=0) 

26 

27 

28def change_D(D, order, factor): 

29 """Change differences array in-place when step size is changed.""" 

30 R = compute_R(order, factor) 

31 U = compute_R(order, 1) 

32 RU = R.dot(U) 

33 D[:order + 1] = np.dot(RU.T, D[:order + 1]) 

34 

35 

36def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol): 

37 """Solve the algebraic system resulting from BDF method.""" 

38 d = 0 

39 y = y_predict.copy() 

40 dy_norm_old = None 

41 converged = False 

42 for k in range(NEWTON_MAXITER): 

43 f = fun(t_new, y) 

44 if not np.all(np.isfinite(f)): 

45 break 

46 

47 dy = solve_lu(LU, c * f - psi - d) 

48 dy_norm = norm(dy / scale) 

49 

50 if dy_norm_old is None: 

51 rate = None 

52 else: 

53 rate = dy_norm / dy_norm_old 

54 

55 if (rate is not None and (rate >= 1 or 

56 rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)): 

57 break 

58 

59 y += dy 

60 d += dy 

61 

62 if (dy_norm == 0 or 

63 rate is not None and rate / (1 - rate) * dy_norm < tol): 

64 converged = True 

65 break 

66 

67 dy_norm_old = dy_norm 

68 

69 return converged, k + 1, y, d 

70 

71 

72class BDF(OdeSolver): 

73 """Implicit method based on backward-differentiation formulas. 

74 

75 This is a variable order method with the order varying automatically from 

76 1 to 5. The general framework of the BDF algorithm is described in [1]_. 

77 This class implements a quasi-constant step size as explained in [2]_. 

78 The error estimation strategy for the constant-step BDF is derived in [3]_. 

79 An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented. 

80 

81 Can be applied in the complex domain. 

82 

83 Parameters 

84 ---------- 

85 fun : callable 

86 Right-hand side of the system. The calling signature is ``fun(t, y)``. 

87 Here ``t`` is a scalar, and there are two options for the ndarray ``y``: 

88 It can either have shape (n,); then ``fun`` must return array_like with 

89 shape (n,). Alternatively it can have shape (n, k); then ``fun`` 

90 must return an array_like with shape (n, k), i.e. each column 

91 corresponds to a single column in ``y``. The choice between the two 

92 options is determined by `vectorized` argument (see below). The 

93 vectorized implementation allows a faster approximation of the Jacobian 

94 by finite differences (required for this solver). 

95 t0 : float 

96 Initial time. 

97 y0 : array_like, shape (n,) 

98 Initial state. 

99 t_bound : float 

100 Boundary time - the integration won't continue beyond it. It also 

101 determines the direction of the integration. 

102 first_step : float or None, optional 

103 Initial step size. Default is ``None`` which means that the algorithm 

104 should choose. 

105 max_step : float, optional 

106 Maximum allowed step size. Default is np.inf, i.e., the step size is not 

107 bounded and determined solely by the solver. 

108 rtol, atol : float and array_like, optional 

109 Relative and absolute tolerances. The solver keeps the local error 

110 estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 

111 relative accuracy (number of correct digits), while `atol` controls 

112 absolute accuracy (number of correct decimal places). To achieve the 

113 desired `rtol`, set `atol` to be smaller than the smallest value that 

114 can be expected from ``rtol * abs(y)`` so that `rtol` dominates the 

115 allowable error. If `atol` is larger than ``rtol * abs(y)`` the 

116 number of correct digits is not guaranteed. Conversely, to achieve the 

117 desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller 

118 than `atol`. If components of y have different scales, it might be 

119 beneficial to set different `atol` values for different components by 

120 passing array_like with shape (n,) for `atol`. Default values are 

121 1e-3 for `rtol` and 1e-6 for `atol`. 

122 jac : {None, array_like, sparse_matrix, callable}, optional 

123 Jacobian matrix of the right-hand side of the system with respect to y, 

124 required by this method. The Jacobian matrix has shape (n, n) and its 

125 element (i, j) is equal to ``d f_i / d y_j``. 

126 There are three ways to define the Jacobian: 

127 

128 * If array_like or sparse_matrix, the Jacobian is assumed to 

129 be constant. 

130 * If callable, the Jacobian is assumed to depend on both 

131 t and y; it will be called as ``jac(t, y)`` as necessary. 

132 For the 'Radau' and 'BDF' methods, the return value might be a 

133 sparse matrix. 

134 * If None (default), the Jacobian will be approximated by 

135 finite differences. 

136 

137 It is generally recommended to provide the Jacobian rather than 

138 relying on a finite-difference approximation. 

139 jac_sparsity : {None, array_like, sparse matrix}, optional 

140 Defines a sparsity structure of the Jacobian matrix for a 

141 finite-difference approximation. Its shape must be (n, n). This argument 

142 is ignored if `jac` is not `None`. If the Jacobian has only few non-zero 

143 elements in *each* row, providing the sparsity structure will greatly 

144 speed up the computations [4]_. A zero entry means that a corresponding 

145 element in the Jacobian is always zero. If None (default), the Jacobian 

146 is assumed to be dense. 

147 vectorized : bool, optional 

148 Whether `fun` is implemented in a vectorized fashion. Default is False. 

149 

150 Attributes 

151 ---------- 

152 n : int 

153 Number of equations. 

154 status : string 

155 Current status of the solver: 'running', 'finished' or 'failed'. 

156 t_bound : float 

157 Boundary time. 

158 direction : float 

159 Integration direction: +1 or -1. 

160 t : float 

161 Current time. 

162 y : ndarray 

163 Current state. 

164 t_old : float 

165 Previous time. None if no steps were made yet. 

166 step_size : float 

167 Size of the last successful step. None if no steps were made yet. 

168 nfev : int 

169 Number of evaluations of the right-hand side. 

170 njev : int 

171 Number of evaluations of the Jacobian. 

172 nlu : int 

173 Number of LU decompositions. 

174 

175 References 

176 ---------- 

177 .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical 

178 Solution of Ordinary Differential Equations", ACM Transactions on 

179 Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975. 

180 .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. 

181 COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. 

182 .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I: 

183 Nonstiff Problems", Sec. III.2. 

184 .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of 

185 sparse Jacobian matrices", Journal of the Institute of Mathematics 

186 and its Applications, 13, pp. 117-120, 1974. 

187 """ 

188 def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, 

189 rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, 

190 vectorized=False, first_step=None, **extraneous): 

191 warn_extraneous(extraneous) 

192 super().__init__(fun, t0, y0, t_bound, vectorized, 

193 support_complex=True) 

194 self.max_step = validate_max_step(max_step) 

195 self.rtol, self.atol = validate_tol(rtol, atol, self.n) 

196 f = self.fun(self.t, self.y) 

197 if first_step is None: 

198 self.h_abs = select_initial_step(self.fun, self.t, self.y, f, 

199 self.direction, 1, 

200 self.rtol, self.atol) 

201 else: 

202 self.h_abs = validate_first_step(first_step, t0, t_bound) 

203 self.h_abs_old = None 

204 self.error_norm_old = None 

205 

206 self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) 

207 

208 self.jac_factor = None 

209 self.jac, self.J = self._validate_jac(jac, jac_sparsity) 

210 if issparse(self.J): 

211 def lu(A): 

212 self.nlu += 1 

213 return splu(A) 

214 

215 def solve_lu(LU, b): 

216 return LU.solve(b) 

217 

218 I = eye(self.n, format='csc', dtype=self.y.dtype) 

219 else: 

220 def lu(A): 

221 self.nlu += 1 

222 return lu_factor(A, overwrite_a=True) 

223 

224 def solve_lu(LU, b): 

225 return lu_solve(LU, b, overwrite_b=True) 

226 

227 I = np.identity(self.n, dtype=self.y.dtype) 

228 

229 self.lu = lu 

230 self.solve_lu = solve_lu 

231 self.I = I 

232 

233 kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0]) 

234 self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1)))) 

235 self.alpha = (1 - kappa) * self.gamma 

236 self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2) 

237 

238 D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype) 

239 D[0] = self.y 

240 D[1] = f * self.h_abs * self.direction 

241 self.D = D 

242 

243 self.order = 1 

244 self.n_equal_steps = 0 

245 self.LU = None 

246 

247 def _validate_jac(self, jac, sparsity): 

248 t0 = self.t 

249 y0 = self.y 

250 

251 if jac is None: 

252 if sparsity is not None: 

253 if issparse(sparsity): 

254 sparsity = csc_matrix(sparsity) 

255 groups = group_columns(sparsity) 

256 sparsity = (sparsity, groups) 

257 

258 def jac_wrapped(t, y): 

259 self.njev += 1 

260 f = self.fun_single(t, y) 

261 J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, 

262 self.atol, self.jac_factor, 

263 sparsity) 

264 return J 

265 J = jac_wrapped(t0, y0) 

266 elif callable(jac): 

267 J = jac(t0, y0) 

268 self.njev += 1 

269 if issparse(J): 

270 J = csc_matrix(J, dtype=y0.dtype) 

271 

272 def jac_wrapped(t, y): 

273 self.njev += 1 

274 return csc_matrix(jac(t, y), dtype=y0.dtype) 

275 else: 

276 J = np.asarray(J, dtype=y0.dtype) 

277 

278 def jac_wrapped(t, y): 

279 self.njev += 1 

280 return np.asarray(jac(t, y), dtype=y0.dtype) 

281 

282 if J.shape != (self.n, self.n): 

283 raise ValueError("`jac` is expected to have shape {}, but " 

284 "actually has {}." 

285 .format((self.n, self.n), J.shape)) 

286 else: 

287 if issparse(jac): 

288 J = csc_matrix(jac, dtype=y0.dtype) 

289 else: 

290 J = np.asarray(jac, dtype=y0.dtype) 

291 

292 if J.shape != (self.n, self.n): 

293 raise ValueError("`jac` is expected to have shape {}, but " 

294 "actually has {}." 

295 .format((self.n, self.n), J.shape)) 

296 jac_wrapped = None 

297 

298 return jac_wrapped, J 

299 

300 def _step_impl(self): 

301 t = self.t 

302 D = self.D 

303 

304 max_step = self.max_step 

305 min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) 

306 if self.h_abs > max_step: 

307 h_abs = max_step 

308 change_D(D, self.order, max_step / self.h_abs) 

309 self.n_equal_steps = 0 

310 elif self.h_abs < min_step: 

311 h_abs = min_step 

312 change_D(D, self.order, min_step / self.h_abs) 

313 self.n_equal_steps = 0 

314 else: 

315 h_abs = self.h_abs 

316 

317 atol = self.atol 

318 rtol = self.rtol 

319 order = self.order 

320 

321 alpha = self.alpha 

322 gamma = self.gamma 

323 error_const = self.error_const 

324 

325 J = self.J 

326 LU = self.LU 

327 current_jac = self.jac is None 

328 

329 step_accepted = False 

330 while not step_accepted: 

331 if h_abs < min_step: 

332 return False, self.TOO_SMALL_STEP 

333 

334 h = h_abs * self.direction 

335 t_new = t + h 

336 

337 if self.direction * (t_new - self.t_bound) > 0: 

338 t_new = self.t_bound 

339 change_D(D, order, np.abs(t_new - t) / h_abs) 

340 self.n_equal_steps = 0 

341 LU = None 

342 

343 h = t_new - t 

344 h_abs = np.abs(h) 

345 

346 y_predict = np.sum(D[:order + 1], axis=0) 

347 

348 scale = atol + rtol * np.abs(y_predict) 

349 psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order] 

350 

351 converged = False 

352 c = h / alpha[order] 

353 while not converged: 

354 if LU is None: 

355 LU = self.lu(self.I - c * J) 

356 

357 converged, n_iter, y_new, d = solve_bdf_system( 

358 self.fun, t_new, y_predict, c, psi, LU, self.solve_lu, 

359 scale, self.newton_tol) 

360 

361 if not converged: 

362 if current_jac: 

363 break 

364 J = self.jac(t_new, y_predict) 

365 LU = None 

366 current_jac = True 

367 

368 if not converged: 

369 factor = 0.5 

370 h_abs *= factor 

371 change_D(D, order, factor) 

372 self.n_equal_steps = 0 

373 LU = None 

374 continue 

375 

376 safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER 

377 + n_iter) 

378 

379 scale = atol + rtol * np.abs(y_new) 

380 error = error_const[order] * d 

381 error_norm = norm(error / scale) 

382 

383 if error_norm > 1: 

384 factor = max(MIN_FACTOR, 

385 safety * error_norm ** (-1 / (order + 1))) 

386 h_abs *= factor 

387 change_D(D, order, factor) 

388 self.n_equal_steps = 0 

389 # As we didn't have problems with convergence, we don't 

390 # reset LU here. 

391 else: 

392 step_accepted = True 

393 

394 self.n_equal_steps += 1 

395 

396 self.t = t_new 

397 self.y = y_new 

398 

399 self.h_abs = h_abs 

400 self.J = J 

401 self.LU = LU 

402 

403 # Update differences. The principal relation here is 

404 # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D 

405 # contained difference for previous interpolating polynomial and 

406 # d = D^{k + 1} y_n. Thus this elegant code follows. 

407 D[order + 2] = d - D[order + 1] 

408 D[order + 1] = d 

409 for i in reversed(range(order + 1)): 

410 D[i] += D[i + 1] 

411 

412 if self.n_equal_steps < order + 1: 

413 return True, None 

414 

415 if order > 1: 

416 error_m = error_const[order - 1] * D[order] 

417 error_m_norm = norm(error_m / scale) 

418 else: 

419 error_m_norm = np.inf 

420 

421 if order < MAX_ORDER: 

422 error_p = error_const[order + 1] * D[order + 2] 

423 error_p_norm = norm(error_p / scale) 

424 else: 

425 error_p_norm = np.inf 

426 

427 error_norms = np.array([error_m_norm, error_norm, error_p_norm]) 

428 with np.errstate(divide='ignore'): 

429 factors = error_norms ** (-1 / np.arange(order, order + 3)) 

430 

431 delta_order = np.argmax(factors) - 1 

432 order += delta_order 

433 self.order = order 

434 

435 factor = min(MAX_FACTOR, safety * np.max(factors)) 

436 self.h_abs *= factor 

437 change_D(D, order, factor) 

438 self.n_equal_steps = 0 

439 self.LU = None 

440 

441 return True, None 

442 

443 def _dense_output_impl(self): 

444 return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction, 

445 self.order, self.D[:self.order + 1].copy()) 

446 

447 

448class BdfDenseOutput(DenseOutput): 

449 def __init__(self, t_old, t, h, order, D): 

450 super().__init__(t_old, t) 

451 self.order = order 

452 self.t_shift = self.t - h * np.arange(self.order) 

453 self.denom = h * (1 + np.arange(self.order)) 

454 self.D = D 

455 

456 def _call_impl(self, t): 

457 if t.ndim == 0: 

458 x = (t - self.t_shift) / self.denom 

459 p = np.cumprod(x) 

460 else: 

461 x = (t - self.t_shift[:, None]) / self.denom[:, None] 

462 p = np.cumprod(x, axis=0) 

463 

464 y = np.dot(self.D[1:].T, p) 

465 if y.ndim == 1: 

466 y += self.D[0] 

467 else: 

468 y += self.D[0, :, None] 

469 

470 return y