Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/scipy/sparse/linalg/_expm_multiply.py: 12%

284 statements  

« prev     ^ index     » next       coverage.py v7.3.1, created at 2023-09-23 06:43 +0000

1"""Compute the action of the matrix exponential.""" 

2from warnings import warn 

3 

4import numpy as np 

5 

6import scipy.linalg 

7import scipy.sparse.linalg 

8from scipy.linalg._decomp_qr import qr 

9from scipy.sparse._sputils import is_pydata_spmatrix 

10from scipy.sparse.linalg import aslinearoperator 

11from scipy.sparse.linalg._interface import IdentityOperator 

12from scipy.sparse.linalg._onenormest import onenormest 

13 

14__all__ = ['expm_multiply'] 

15 

16 

17def _exact_inf_norm(A): 

18 # A compatibility function which should eventually disappear. 

19 if scipy.sparse.issparse(A): 

20 return max(abs(A).sum(axis=1).flat) 

21 elif is_pydata_spmatrix(A): 

22 return max(abs(A).sum(axis=1)) 

23 else: 

24 return np.linalg.norm(A, np.inf) 

25 

26 

27def _exact_1_norm(A): 

28 # A compatibility function which should eventually disappear. 

29 if scipy.sparse.issparse(A): 

30 return max(abs(A).sum(axis=0).flat) 

31 elif is_pydata_spmatrix(A): 

32 return max(abs(A).sum(axis=0)) 

33 else: 

34 return np.linalg.norm(A, 1) 

35 

36 

37def _trace(A): 

38 # A compatibility function which should eventually disappear. 

39 if is_pydata_spmatrix(A): 

40 return A.to_scipy_sparse().trace() 

41 else: 

42 return A.trace() 

43 

44 

45def traceest(A, m3, seed=None): 

46 """Estimate `np.trace(A)` using `3*m3` matrix-vector products. 

47 

48 The result is not deterministic. 

49 

50 Parameters 

51 ---------- 

52 A : LinearOperator 

53 Linear operator whose trace will be estimated. Has to be square. 

54 m3 : int 

55 Number of matrix-vector products divided by 3 used to estimate the 

56 trace. 

57 seed : optional 

58 Seed for `numpy.random.default_rng`. 

59 Can be provided to obtain deterministic results. 

60 

61 Returns 

62 ------- 

63 trace : LinearOperator.dtype 

64 Estimate of the trace 

65 

66 Notes 

67 ----- 

68 This is the Hutch++ algorithm given in [1]_. 

69 

70 References 

71 ---------- 

72 .. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P. 

73 Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium 

74 on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial 

75 and Applied Mathematics, 2021 

76 https://doi.org/10.1137/1.9781611976496.16 

77 

78 """ 

79 rng = np.random.default_rng(seed) 

80 if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]: 

81 raise ValueError("Expected A to be like a square matrix.") 

82 n = A.shape[-1] 

83 S = rng.choice([-1.0, +1.0], [n, m3]) 

84 Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic') 

85 trQAQ = np.trace(Q.conj().T @ A.matmat(Q)) 

86 G = rng.choice([-1, +1], [n, m3]) 

87 right = G - Q@(Q.conj().T @ G) 

88 trGAG = np.trace(right.conj().T @ A.matmat(right)) 

89 return trQAQ + trGAG/m3 

90 

91 

92def _ident_like(A): 

93 # A compatibility function which should eventually disappear. 

94 if scipy.sparse.issparse(A): 

95 # Creates a sparse matrix in dia format 

96 out = scipy.sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype) 

97 if isinstance(A, scipy.sparse.spmatrix): 

98 return out.asformat(A.format) 

99 return scipy.sparse.dia_array(out).asformat(A.format) 

100 elif is_pydata_spmatrix(A): 

101 import sparse 

102 return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype) 

103 elif isinstance(A, scipy.sparse.linalg.LinearOperator): 

104 return IdentityOperator(A.shape, dtype=A.dtype) 

105 else: 

106 return np.eye(A.shape[0], A.shape[1], dtype=A.dtype) 

107 

108 

109def expm_multiply(A, B, start=None, stop=None, num=None, 

110 endpoint=None, traceA=None): 

111 """ 

112 Compute the action of the matrix exponential of A on B. 

113 

114 Parameters 

115 ---------- 

116 A : transposable linear operator 

117 The operator whose exponential is of interest. 

118 B : ndarray 

119 The matrix or vector to be multiplied by the matrix exponential of A. 

120 start : scalar, optional 

121 The starting time point of the sequence. 

122 stop : scalar, optional 

123 The end time point of the sequence, unless `endpoint` is set to False. 

124 In that case, the sequence consists of all but the last of ``num + 1`` 

125 evenly spaced time points, so that `stop` is excluded. 

126 Note that the step size changes when `endpoint` is False. 

127 num : int, optional 

128 Number of time points to use. 

129 endpoint : bool, optional 

130 If True, `stop` is the last time point. Otherwise, it is not included. 

131 traceA : scalar, optional 

132 Trace of `A`. If not given the trace is estimated for linear operators, 

133 or calculated exactly for sparse matrices. It is used to precondition 

134 `A`, thus an approximate trace is acceptable. 

135 For linear operators, `traceA` should be provided to ensure performance 

136 as the estimation is not guaranteed to be reliable for all cases. 

137 

138 .. versionadded:: 1.9.0 

139 

140 Returns 

141 ------- 

142 expm_A_B : ndarray 

143 The result of the action :math:`e^{t_k A} B`. 

144 

145 Warns 

146 ----- 

147 UserWarning 

148 If `A` is a linear operator and ``traceA=None`` (default). 

149 

150 Notes 

151 ----- 

152 The optional arguments defining the sequence of evenly spaced time points 

153 are compatible with the arguments of `numpy.linspace`. 

154 

155 The output ndarray shape is somewhat complicated so I explain it here. 

156 The ndim of the output could be either 1, 2, or 3. 

157 It would be 1 if you are computing the expm action on a single vector 

158 at a single time point. 

159 It would be 2 if you are computing the expm action on a vector 

160 at multiple time points, or if you are computing the expm action 

161 on a matrix at a single time point. 

162 It would be 3 if you want the action on a matrix with multiple 

163 columns at multiple time points. 

164 If multiple time points are requested, expm_A_B[0] will always 

165 be the action of the expm at the first time point, 

166 regardless of whether the action is on a vector or a matrix. 

167 

168 References 

169 ---------- 

170 .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011) 

171 "Computing the Action of the Matrix Exponential, 

172 with an Application to Exponential Integrators." 

173 SIAM Journal on Scientific Computing, 

174 33 (2). pp. 488-511. ISSN 1064-8275 

175 http://eprints.ma.man.ac.uk/1591/ 

176 

177 .. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010) 

178 "Computing Matrix Functions." 

179 Acta Numerica, 

180 19. 159-208. ISSN 0962-4929 

181 http://eprints.ma.man.ac.uk/1451/ 

182 

183 Examples 

184 -------- 

185 >>> import numpy as np 

186 >>> from scipy.sparse import csc_matrix 

187 >>> from scipy.sparse.linalg import expm, expm_multiply 

188 >>> A = csc_matrix([[1, 0], [0, 1]]) 

189 >>> A.toarray() 

190 array([[1, 0], 

191 [0, 1]], dtype=int64) 

192 >>> B = np.array([np.exp(-1.), np.exp(-2.)]) 

193 >>> B 

194 array([ 0.36787944, 0.13533528]) 

195 >>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True) 

196 array([[ 1. , 0.36787944], 

197 [ 1.64872127, 0.60653066], 

198 [ 2.71828183, 1. ]]) 

199 >>> expm(A).dot(B) # Verify 1st timestep 

200 array([ 1. , 0.36787944]) 

201 >>> expm(1.5*A).dot(B) # Verify 2nd timestep 

202 array([ 1.64872127, 0.60653066]) 

203 >>> expm(2*A).dot(B) # Verify 3rd timestep 

204 array([ 2.71828183, 1. ]) 

205 """ 

206 if all(arg is None for arg in (start, stop, num, endpoint)): 

207 X = _expm_multiply_simple(A, B, traceA=traceA) 

208 else: 

209 X, status = _expm_multiply_interval(A, B, start, stop, num, 

210 endpoint, traceA=traceA) 

211 return X 

212 

213 

214def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False): 

215 """ 

216 Compute the action of the matrix exponential at a single time point. 

217 

218 Parameters 

219 ---------- 

220 A : transposable linear operator 

221 The operator whose exponential is of interest. 

222 B : ndarray 

223 The matrix to be multiplied by the matrix exponential of A. 

224 t : float 

225 A time point. 

226 traceA : scalar, optional 

227 Trace of `A`. If not given the trace is estimated for linear operators, 

228 or calculated exactly for sparse matrices. It is used to precondition 

229 `A`, thus an approximate trace is acceptable 

230 balance : bool 

231 Indicates whether or not to apply balancing. 

232 

233 Returns 

234 ------- 

235 F : ndarray 

236 :math:`e^{t A} B` 

237 

238 Notes 

239 ----- 

240 This is algorithm (3.2) in Al-Mohy and Higham (2011). 

241 

242 """ 

243 if balance: 

244 raise NotImplementedError 

245 if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

246 raise ValueError('expected A to be like a square matrix') 

247 if A.shape[1] != B.shape[0]: 

248 raise ValueError('shapes of matrices A {} and B {} are incompatible' 

249 .format(A.shape, B.shape)) 

250 ident = _ident_like(A) 

251 is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator) 

252 n = A.shape[0] 

253 if len(B.shape) == 1: 

254 n0 = 1 

255 elif len(B.shape) == 2: 

256 n0 = B.shape[1] 

257 else: 

258 raise ValueError('expected B to be like a matrix or a vector') 

259 u_d = 2**-53 

260 tol = u_d 

261 if traceA is None: 

262 if is_linear_operator: 

263 warn("Trace of LinearOperator not available, it will be estimated." 

264 " Provide `traceA` to ensure performance.", stacklevel=3) 

265 # m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might 

266 # speed up exponential calculation, but trace estimation is more costly 

267 traceA = traceest(A, m3=1) if is_linear_operator else _trace(A) 

268 mu = traceA / float(n) 

269 A = A - mu * ident 

270 A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A) 

271 if t*A_1_norm == 0: 

272 m_star, s = 0, 1 

273 else: 

274 ell = 2 

275 norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) 

276 m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) 

277 return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance) 

278 

279 

280def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False): 

281 """ 

282 A helper function. 

283 """ 

284 if balance: 

285 raise NotImplementedError 

286 if tol is None: 

287 u_d = 2 ** -53 

288 tol = u_d 

289 F = B 

290 eta = np.exp(t*mu / float(s)) 

291 for i in range(s): 

292 c1 = _exact_inf_norm(B) 

293 for j in range(m_star): 

294 coeff = t / float(s*(j+1)) 

295 B = coeff * A.dot(B) 

296 c2 = _exact_inf_norm(B) 

297 F = F + B 

298 if c1 + c2 <= tol * _exact_inf_norm(F): 

299 break 

300 c1 = c2 

301 F = eta * F 

302 B = F 

303 return F 

304 

305 

306# This table helps to compute bounds. 

307# They seem to have been difficult to calculate, involving symbolic 

308# manipulation of equations, followed by numerical root finding. 

309_theta = { 

310 # The first 30 values are from table A.3 of Computing Matrix Functions. 

311 1: 2.29e-16, 

312 2: 2.58e-8, 

313 3: 1.39e-5, 

314 4: 3.40e-4, 

315 5: 2.40e-3, 

316 6: 9.07e-3, 

317 7: 2.38e-2, 

318 8: 5.00e-2, 

319 9: 8.96e-2, 

320 10: 1.44e-1, 

321 # 11 

322 11: 2.14e-1, 

323 12: 3.00e-1, 

324 13: 4.00e-1, 

325 14: 5.14e-1, 

326 15: 6.41e-1, 

327 16: 7.81e-1, 

328 17: 9.31e-1, 

329 18: 1.09, 

330 19: 1.26, 

331 20: 1.44, 

332 # 21 

333 21: 1.62, 

334 22: 1.82, 

335 23: 2.01, 

336 24: 2.22, 

337 25: 2.43, 

338 26: 2.64, 

339 27: 2.86, 

340 28: 3.08, 

341 29: 3.31, 

342 30: 3.54, 

343 # The rest are from table 3.1 of 

344 # Computing the Action of the Matrix Exponential. 

345 35: 4.7, 

346 40: 6.0, 

347 45: 7.2, 

348 50: 8.5, 

349 55: 9.9, 

350 } 

351 

352 

353def _onenormest_matrix_power(A, p, 

354 t=2, itmax=5, compute_v=False, compute_w=False): 

355 """ 

356 Efficiently estimate the 1-norm of A^p. 

357 

358 Parameters 

359 ---------- 

360 A : ndarray 

361 Matrix whose 1-norm of a power is to be computed. 

362 p : int 

363 Non-negative integer power. 

364 t : int, optional 

365 A positive parameter controlling the tradeoff between 

366 accuracy versus time and memory usage. 

367 Larger values take longer and use more memory 

368 but give more accurate output. 

369 itmax : int, optional 

370 Use at most this many iterations. 

371 compute_v : bool, optional 

372 Request a norm-maximizing linear operator input vector if True. 

373 compute_w : bool, optional 

374 Request a norm-maximizing linear operator output vector if True. 

375 

376 Returns 

377 ------- 

378 est : float 

379 An underestimate of the 1-norm of the sparse matrix. 

380 v : ndarray, optional 

381 The vector such that ||Av||_1 == est*||v||_1. 

382 It can be thought of as an input to the linear operator 

383 that gives an output with particularly large norm. 

384 w : ndarray, optional 

385 The vector Av which has relatively large 1-norm. 

386 It can be thought of as an output of the linear operator 

387 that is relatively large in norm compared to the input. 

388 

389 """ 

390 #XXX Eventually turn this into an API function in the _onenormest module, 

391 #XXX and remove its underscore, 

392 #XXX but wait until expm_multiply goes into scipy. 

393 from scipy.sparse.linalg._onenormest import onenormest 

394 return onenormest(aslinearoperator(A) ** p) 

395 

396class LazyOperatorNormInfo: 

397 """ 

398 Information about an operator is lazily computed. 

399 

400 The information includes the exact 1-norm of the operator, 

401 in addition to estimates of 1-norms of powers of the operator. 

402 This uses the notation of Computing the Action (2011). 

403 This class is specialized enough to probably not be of general interest 

404 outside of this module. 

405 

406 """ 

407 

408 def __init__(self, A, A_1_norm=None, ell=2, scale=1): 

409 """ 

410 Provide the operator and some norm-related information. 

411 

412 Parameters 

413 ---------- 

414 A : linear operator 

415 The operator of interest. 

416 A_1_norm : float, optional 

417 The exact 1-norm of A. 

418 ell : int, optional 

419 A technical parameter controlling norm estimation quality. 

420 scale : int, optional 

421 If specified, return the norms of scale*A instead of A. 

422 

423 """ 

424 self._A = A 

425 self._A_1_norm = A_1_norm 

426 self._ell = ell 

427 self._d = {} 

428 self._scale = scale 

429 

430 def set_scale(self,scale): 

431 """ 

432 Set the scale parameter. 

433 """ 

434 self._scale = scale 

435 

436 def onenorm(self): 

437 """ 

438 Compute the exact 1-norm. 

439 """ 

440 if self._A_1_norm is None: 

441 self._A_1_norm = _exact_1_norm(self._A) 

442 return self._scale*self._A_1_norm 

443 

444 def d(self, p): 

445 """ 

446 Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm. 

447 """ 

448 if p not in self._d: 

449 est = _onenormest_matrix_power(self._A, p, self._ell) 

450 self._d[p] = est ** (1.0 / p) 

451 return self._scale*self._d[p] 

452 

453 def alpha(self, p): 

454 """ 

455 Lazily compute max(d(p), d(p+1)). 

456 """ 

457 return max(self.d(p), self.d(p+1)) 

458 

459def _compute_cost_div_m(m, p, norm_info): 

460 """ 

461 A helper function for computing bounds. 

462 

463 This is equation (3.10). 

464 It measures cost in terms of the number of required matrix products. 

465 

466 Parameters 

467 ---------- 

468 m : int 

469 A valid key of _theta. 

470 p : int 

471 A matrix power. 

472 norm_info : LazyOperatorNormInfo 

473 Information about 1-norms of related operators. 

474 

475 Returns 

476 ------- 

477 cost_div_m : int 

478 Required number of matrix products divided by m. 

479 

480 """ 

481 return int(np.ceil(norm_info.alpha(p) / _theta[m])) 

482 

483 

484def _compute_p_max(m_max): 

485 """ 

486 Compute the largest positive integer p such that p*(p-1) <= m_max + 1. 

487 

488 Do this in a slightly dumb way, but safe and not too slow. 

489 

490 Parameters 

491 ---------- 

492 m_max : int 

493 A count related to bounds. 

494 

495 """ 

496 sqrt_m_max = np.sqrt(m_max) 

497 p_low = int(np.floor(sqrt_m_max)) 

498 p_high = int(np.ceil(sqrt_m_max + 1)) 

499 return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1) 

500 

501 

502def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2): 

503 """ 

504 A helper function for the _expm_multiply_* functions. 

505 

506 Parameters 

507 ---------- 

508 norm_info : LazyOperatorNormInfo 

509 Information about norms of certain linear operators of interest. 

510 n0 : int 

511 Number of columns in the _expm_multiply_* B matrix. 

512 tol : float 

513 Expected to be 

514 :math:`2^{-24}` for single precision or 

515 :math:`2^{-53}` for double precision. 

516 m_max : int 

517 A value related to a bound. 

518 ell : int 

519 The number of columns used in the 1-norm approximation. 

520 This is usually taken to be small, maybe between 1 and 5. 

521 

522 Returns 

523 ------- 

524 best_m : int 

525 Related to bounds for error control. 

526 best_s : int 

527 Amount of scaling. 

528 

529 Notes 

530 ----- 

531 This is code fragment (3.1) in Al-Mohy and Higham (2011). 

532 The discussion of default values for m_max and ell 

533 is given between the definitions of equation (3.11) 

534 and the definition of equation (3.12). 

535 

536 """ 

537 if ell < 1: 

538 raise ValueError('expected ell to be a positive integer') 

539 best_m = None 

540 best_s = None 

541 if _condition_3_13(norm_info.onenorm(), n0, m_max, ell): 

542 for m, theta in _theta.items(): 

543 s = int(np.ceil(norm_info.onenorm() / theta)) 

544 if best_m is None or m * s < best_m * best_s: 

545 best_m = m 

546 best_s = s 

547 else: 

548 # Equation (3.11). 

549 for p in range(2, _compute_p_max(m_max) + 1): 

550 for m in range(p*(p-1)-1, m_max+1): 

551 if m in _theta: 

552 s = _compute_cost_div_m(m, p, norm_info) 

553 if best_m is None or m * s < best_m * best_s: 

554 best_m = m 

555 best_s = s 

556 best_s = max(best_s, 1) 

557 return best_m, best_s 

558 

559 

560def _condition_3_13(A_1_norm, n0, m_max, ell): 

561 """ 

562 A helper function for the _expm_multiply_* functions. 

563 

564 Parameters 

565 ---------- 

566 A_1_norm : float 

567 The precomputed 1-norm of A. 

568 n0 : int 

569 Number of columns in the _expm_multiply_* B matrix. 

570 m_max : int 

571 A value related to a bound. 

572 ell : int 

573 The number of columns used in the 1-norm approximation. 

574 This is usually taken to be small, maybe between 1 and 5. 

575 

576 Returns 

577 ------- 

578 value : bool 

579 Indicates whether or not the condition has been met. 

580 

581 Notes 

582 ----- 

583 This is condition (3.13) in Al-Mohy and Higham (2011). 

584 

585 """ 

586 

587 # This is the rhs of equation (3.12). 

588 p_max = _compute_p_max(m_max) 

589 a = 2 * ell * p_max * (p_max + 3) 

590 

591 # Evaluate the condition (3.13). 

592 b = _theta[m_max] / float(n0 * m_max) 

593 return A_1_norm <= a * b 

594 

595 

596def _expm_multiply_interval(A, B, start=None, stop=None, num=None, 

597 endpoint=None, traceA=None, balance=False, 

598 status_only=False): 

599 """ 

600 Compute the action of the matrix exponential at multiple time points. 

601 

602 Parameters 

603 ---------- 

604 A : transposable linear operator 

605 The operator whose exponential is of interest. 

606 B : ndarray 

607 The matrix to be multiplied by the matrix exponential of A. 

608 start : scalar, optional 

609 The starting time point of the sequence. 

610 stop : scalar, optional 

611 The end time point of the sequence, unless `endpoint` is set to False. 

612 In that case, the sequence consists of all but the last of ``num + 1`` 

613 evenly spaced time points, so that `stop` is excluded. 

614 Note that the step size changes when `endpoint` is False. 

615 num : int, optional 

616 Number of time points to use. 

617 traceA : scalar, optional 

618 Trace of `A`. If not given the trace is estimated for linear operators, 

619 or calculated exactly for sparse matrices. It is used to precondition 

620 `A`, thus an approximate trace is acceptable 

621 endpoint : bool, optional 

622 If True, `stop` is the last time point. Otherwise, it is not included. 

623 balance : bool 

624 Indicates whether or not to apply balancing. 

625 status_only : bool 

626 A flag that is set to True for some debugging and testing operations. 

627 

628 Returns 

629 ------- 

630 F : ndarray 

631 :math:`e^{t_k A} B` 

632 status : int 

633 An integer status for testing and debugging. 

634 

635 Notes 

636 ----- 

637 This is algorithm (5.2) in Al-Mohy and Higham (2011). 

638 

639 There seems to be a typo, where line 15 of the algorithm should be 

640 moved to line 6.5 (between lines 6 and 7). 

641 

642 """ 

643 if balance: 

644 raise NotImplementedError 

645 if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

646 raise ValueError('expected A to be like a square matrix') 

647 if A.shape[1] != B.shape[0]: 

648 raise ValueError('shapes of matrices A {} and B {} are incompatible' 

649 .format(A.shape, B.shape)) 

650 ident = _ident_like(A) 

651 is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator) 

652 n = A.shape[0] 

653 if len(B.shape) == 1: 

654 n0 = 1 

655 elif len(B.shape) == 2: 

656 n0 = B.shape[1] 

657 else: 

658 raise ValueError('expected B to be like a matrix or a vector') 

659 u_d = 2**-53 

660 tol = u_d 

661 if traceA is None: 

662 if is_linear_operator: 

663 warn("Trace of LinearOperator not available, it will be estimated." 

664 " Provide `traceA` to ensure performance.", stacklevel=3) 

665 # m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might 

666 # speed up exponential calculation, but trace estimation is also costly 

667 # an educated guess would need to consider the number of time points 

668 traceA = traceest(A, m3=5) if is_linear_operator else _trace(A) 

669 mu = traceA / float(n) 

670 

671 # Get the linspace samples, attempting to preserve the linspace defaults. 

672 linspace_kwargs = {'retstep': True} 

673 if num is not None: 

674 linspace_kwargs['num'] = num 

675 if endpoint is not None: 

676 linspace_kwargs['endpoint'] = endpoint 

677 samples, step = np.linspace(start, stop, **linspace_kwargs) 

678 

679 # Convert the linspace output to the notation used by the publication. 

680 nsamples = len(samples) 

681 if nsamples < 2: 

682 raise ValueError('at least two time points are required') 

683 q = nsamples - 1 

684 h = step 

685 t_0 = samples[0] 

686 t_q = samples[q] 

687 

688 # Define the output ndarray. 

689 # Use an ndim=3 shape, such that the last two indices 

690 # are the ones that may be involved in level 3 BLAS operations. 

691 X_shape = (nsamples,) + B.shape 

692 X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float)) 

693 t = t_q - t_0 

694 A = A - mu * ident 

695 A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A) 

696 ell = 2 

697 norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) 

698 if t*A_1_norm == 0: 

699 m_star, s = 0, 1 

700 else: 

701 m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) 

702 

703 # Compute the expm action up to the initial time point. 

704 X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s) 

705 

706 # Compute the expm action at the rest of the time points. 

707 if q <= s: 

708 if status_only: 

709 return 0 

710 else: 

711 return _expm_multiply_interval_core_0(A, X, 

712 h, mu, q, norm_info, tol, ell,n0) 

713 elif not (q % s): 

714 if status_only: 

715 return 1 

716 else: 

717 return _expm_multiply_interval_core_1(A, X, 

718 h, mu, m_star, s, q, tol) 

719 elif (q % s): 

720 if status_only: 

721 return 2 

722 else: 

723 return _expm_multiply_interval_core_2(A, X, 

724 h, mu, m_star, s, q, tol) 

725 else: 

726 raise Exception('internal error') 

727 

728 

729def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0): 

730 """ 

731 A helper function, for the case q <= s. 

732 """ 

733 

734 # Compute the new values of m_star and s which should be applied 

735 # over intervals of size t/q 

736 if norm_info.onenorm() == 0: 

737 m_star, s = 0, 1 

738 else: 

739 norm_info.set_scale(1./q) 

740 m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) 

741 norm_info.set_scale(1) 

742 

743 for k in range(q): 

744 X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s) 

745 return X, 0 

746 

747 

748def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol): 

749 """ 

750 A helper function, for the case q > s and q % s == 0. 

751 """ 

752 d = q // s 

753 input_shape = X.shape[1:] 

754 K_shape = (m_star + 1, ) + input_shape 

755 K = np.empty(K_shape, dtype=X.dtype) 

756 for i in range(s): 

757 Z = X[i*d] 

758 K[0] = Z 

759 high_p = 0 

760 for k in range(1, d+1): 

761 F = K[0] 

762 c1 = _exact_inf_norm(F) 

763 for p in range(1, m_star+1): 

764 if p > high_p: 

765 K[p] = h * A.dot(K[p-1]) / float(p) 

766 coeff = float(pow(k, p)) 

767 F = F + coeff * K[p] 

768 inf_norm_K_p_1 = _exact_inf_norm(K[p]) 

769 c2 = coeff * inf_norm_K_p_1 

770 if c1 + c2 <= tol * _exact_inf_norm(F): 

771 break 

772 c1 = c2 

773 X[k + i*d] = np.exp(k*h*mu) * F 

774 return X, 1 

775 

776 

777def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol): 

778 """ 

779 A helper function, for the case q > s and q % s > 0. 

780 """ 

781 d = q // s 

782 j = q // d 

783 r = q - d * j 

784 input_shape = X.shape[1:] 

785 K_shape = (m_star + 1, ) + input_shape 

786 K = np.empty(K_shape, dtype=X.dtype) 

787 for i in range(j + 1): 

788 Z = X[i*d] 

789 K[0] = Z 

790 high_p = 0 

791 if i < j: 

792 effective_d = d 

793 else: 

794 effective_d = r 

795 for k in range(1, effective_d+1): 

796 F = K[0] 

797 c1 = _exact_inf_norm(F) 

798 for p in range(1, m_star+1): 

799 if p == high_p + 1: 

800 K[p] = h * A.dot(K[p-1]) / float(p) 

801 high_p = p 

802 coeff = float(pow(k, p)) 

803 F = F + coeff * K[p] 

804 inf_norm_K_p_1 = _exact_inf_norm(K[p]) 

805 c2 = coeff * inf_norm_K_p_1 

806 if c1 + c2 <= tol * _exact_inf_norm(F): 

807 break 

808 c1 = c2 

809 X[k + i*d] = np.exp(k*h*mu) * F 

810 return X, 2