Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/sparse/linalg/_matfuncs.py: 20%

355 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1""" 

2Sparse matrix functions 

3""" 

4 

5# 

6# Authors: Travis Oliphant, March 2002 

7# Anthony Scopatz, August 2012 (Sparse Updates) 

8# Jake Vanderplas, August 2012 (Sparse Updates) 

9# 

10 

11__all__ = ['expm', 'inv'] 

12 

13import numpy as np 

14from scipy.linalg._basic import solve, solve_triangular 

15 

16from scipy.sparse._base import isspmatrix 

17from scipy.sparse.linalg import spsolve 

18from scipy.sparse._sputils import is_pydata_spmatrix 

19 

20import scipy.sparse 

21import scipy.sparse.linalg 

22from scipy.sparse.linalg._interface import LinearOperator 

23 

24from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm 

25 

26 

27UPPER_TRIANGULAR = 'upper_triangular' 

28 

29 

30def inv(A): 

31 """ 

32 Compute the inverse of a sparse matrix 

33 

34 Parameters 

35 ---------- 

36 A : (M, M) sparse matrix 

37 square matrix to be inverted 

38 

39 Returns 

40 ------- 

41 Ainv : (M, M) sparse matrix 

42 inverse of `A` 

43 

44 Notes 

45 ----- 

46 This computes the sparse inverse of `A`. If the inverse of `A` is expected 

47 to be non-sparse, it will likely be faster to convert `A` to dense and use 

48 `scipy.linalg.inv`. 

49 

50 Examples 

51 -------- 

52 >>> from scipy.sparse import csc_matrix 

53 >>> from scipy.sparse.linalg import inv 

54 >>> A = csc_matrix([[1., 0.], [1., 2.]]) 

55 >>> Ainv = inv(A) 

56 >>> Ainv 

57 <2x2 sparse matrix of type '<class 'numpy.float64'>' 

58 with 3 stored elements in Compressed Sparse Column format> 

59 >>> A.dot(Ainv) 

60 <2x2 sparse matrix of type '<class 'numpy.float64'>' 

61 with 2 stored elements in Compressed Sparse Column format> 

62 >>> A.dot(Ainv).toarray() 

63 array([[ 1., 0.], 

64 [ 0., 1.]]) 

65 

66 .. versionadded:: 0.12.0 

67 

68 """ 

69 # Check input 

70 if not (scipy.sparse.isspmatrix(A) or is_pydata_spmatrix(A)): 

71 raise TypeError('Input must be a sparse matrix') 

72 

73 # Use sparse direct solver to solve "AX = I" accurately 

74 I = _ident_like(A) 

75 Ainv = spsolve(A, I) 

76 return Ainv 

77 

78 

79def _onenorm_matrix_power_nnm(A, p): 

80 """ 

81 Compute the 1-norm of a non-negative integer power of a non-negative matrix. 

82 

83 Parameters 

84 ---------- 

85 A : a square ndarray or matrix or sparse matrix 

86 Input matrix with non-negative entries. 

87 p : non-negative integer 

88 The power to which the matrix is to be raised. 

89 

90 Returns 

91 ------- 

92 out : float 

93 The 1-norm of the matrix power p of A. 

94 

95 """ 

96 # Check input 

97 if int(p) != p or p < 0: 

98 raise ValueError('expected non-negative integer p') 

99 p = int(p) 

100 if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

101 raise ValueError('expected A to be like a square matrix') 

102 

103 # Explicitly make a column vector so that this works when A is a 

104 # numpy matrix (in addition to ndarray and sparse matrix). 

105 v = np.ones((A.shape[0], 1), dtype=float) 

106 M = A.T 

107 for i in range(p): 

108 v = M.dot(v) 

109 return np.max(v) 

110 

111 

112def _is_upper_triangular(A): 

113 # This function could possibly be of wider interest. 

114 if isspmatrix(A): 

115 lower_part = scipy.sparse.tril(A, -1) 

116 # Check structural upper triangularity, 

117 # then coincidental upper triangularity if needed. 

118 return lower_part.nnz == 0 or lower_part.count_nonzero() == 0 

119 elif is_pydata_spmatrix(A): 

120 import sparse 

121 lower_part = sparse.tril(A, -1) 

122 return lower_part.nnz == 0 

123 else: 

124 return not np.tril(A, -1).any() 

125 

126 

127def _smart_matrix_product(A, B, alpha=None, structure=None): 

128 """ 

129 A matrix product that knows about sparse and structured matrices. 

130 

131 Parameters 

132 ---------- 

133 A : 2d ndarray 

134 First matrix. 

135 B : 2d ndarray 

136 Second matrix. 

137 alpha : float 

138 The matrix product will be scaled by this constant. 

139 structure : str, optional 

140 A string describing the structure of both matrices `A` and `B`. 

141 Only `upper_triangular` is currently supported. 

142 

143 Returns 

144 ------- 

145 M : 2d ndarray 

146 Matrix product of A and B. 

147 

148 """ 

149 if len(A.shape) != 2: 

150 raise ValueError('expected A to be a rectangular matrix') 

151 if len(B.shape) != 2: 

152 raise ValueError('expected B to be a rectangular matrix') 

153 f = None 

154 if structure == UPPER_TRIANGULAR: 

155 if (not isspmatrix(A) and not isspmatrix(B) 

156 and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)): 

157 f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B)) 

158 if f is not None: 

159 if alpha is None: 

160 alpha = 1. 

161 out = f(alpha, A, B) 

162 else: 

163 if alpha is None: 

164 out = A.dot(B) 

165 else: 

166 out = alpha * A.dot(B) 

167 return out 

168 

169 

170class MatrixPowerOperator(LinearOperator): 

171 

172 def __init__(self, A, p, structure=None): 

173 if A.ndim != 2 or A.shape[0] != A.shape[1]: 

174 raise ValueError('expected A to be like a square matrix') 

175 if p < 0: 

176 raise ValueError('expected p to be a non-negative integer') 

177 self._A = A 

178 self._p = p 

179 self._structure = structure 

180 self.dtype = A.dtype 

181 self.ndim = A.ndim 

182 self.shape = A.shape 

183 

184 def _matvec(self, x): 

185 for i in range(self._p): 

186 x = self._A.dot(x) 

187 return x 

188 

189 def _rmatvec(self, x): 

190 A_T = self._A.T 

191 x = x.ravel() 

192 for i in range(self._p): 

193 x = A_T.dot(x) 

194 return x 

195 

196 def _matmat(self, X): 

197 for i in range(self._p): 

198 X = _smart_matrix_product(self._A, X, structure=self._structure) 

199 return X 

200 

201 @property 

202 def T(self): 

203 return MatrixPowerOperator(self._A.T, self._p) 

204 

205 

206class ProductOperator(LinearOperator): 

207 """ 

208 For now, this is limited to products of multiple square matrices. 

209 """ 

210 

211 def __init__(self, *args, **kwargs): 

212 self._structure = kwargs.get('structure', None) 

213 for A in args: 

214 if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

215 raise ValueError( 

216 'For now, the ProductOperator implementation is ' 

217 'limited to the product of multiple square matrices.') 

218 if args: 

219 n = args[0].shape[0] 

220 for A in args: 

221 for d in A.shape: 

222 if d != n: 

223 raise ValueError( 

224 'The square matrices of the ProductOperator ' 

225 'must all have the same shape.') 

226 self.shape = (n, n) 

227 self.ndim = len(self.shape) 

228 self.dtype = np.result_type(*[x.dtype for x in args]) 

229 self._operator_sequence = args 

230 

231 def _matvec(self, x): 

232 for A in reversed(self._operator_sequence): 

233 x = A.dot(x) 

234 return x 

235 

236 def _rmatvec(self, x): 

237 x = x.ravel() 

238 for A in self._operator_sequence: 

239 x = A.T.dot(x) 

240 return x 

241 

242 def _matmat(self, X): 

243 for A in reversed(self._operator_sequence): 

244 X = _smart_matrix_product(A, X, structure=self._structure) 

245 return X 

246 

247 @property 

248 def T(self): 

249 T_args = [A.T for A in reversed(self._operator_sequence)] 

250 return ProductOperator(*T_args) 

251 

252 

253def _onenormest_matrix_power(A, p, 

254 t=2, itmax=5, compute_v=False, compute_w=False, structure=None): 

255 """ 

256 Efficiently estimate the 1-norm of A^p. 

257 

258 Parameters 

259 ---------- 

260 A : ndarray 

261 Matrix whose 1-norm of a power is to be computed. 

262 p : int 

263 Non-negative integer power. 

264 t : int, optional 

265 A positive parameter controlling the tradeoff between 

266 accuracy versus time and memory usage. 

267 Larger values take longer and use more memory 

268 but give more accurate output. 

269 itmax : int, optional 

270 Use at most this many iterations. 

271 compute_v : bool, optional 

272 Request a norm-maximizing linear operator input vector if True. 

273 compute_w : bool, optional 

274 Request a norm-maximizing linear operator output vector if True. 

275 

276 Returns 

277 ------- 

278 est : float 

279 An underestimate of the 1-norm of the sparse matrix. 

280 v : ndarray, optional 

281 The vector such that ||Av||_1 == est*||v||_1. 

282 It can be thought of as an input to the linear operator 

283 that gives an output with particularly large norm. 

284 w : ndarray, optional 

285 The vector Av which has relatively large 1-norm. 

286 It can be thought of as an output of the linear operator 

287 that is relatively large in norm compared to the input. 

288 

289 """ 

290 return scipy.sparse.linalg.onenormest( 

291 MatrixPowerOperator(A, p, structure=structure)) 

292 

293 

294def _onenormest_product(operator_seq, 

295 t=2, itmax=5, compute_v=False, compute_w=False, structure=None): 

296 """ 

297 Efficiently estimate the 1-norm of the matrix product of the args. 

298 

299 Parameters 

300 ---------- 

301 operator_seq : linear operator sequence 

302 Matrices whose 1-norm of product is to be computed. 

303 t : int, optional 

304 A positive parameter controlling the tradeoff between 

305 accuracy versus time and memory usage. 

306 Larger values take longer and use more memory 

307 but give more accurate output. 

308 itmax : int, optional 

309 Use at most this many iterations. 

310 compute_v : bool, optional 

311 Request a norm-maximizing linear operator input vector if True. 

312 compute_w : bool, optional 

313 Request a norm-maximizing linear operator output vector if True. 

314 structure : str, optional 

315 A string describing the structure of all operators. 

316 Only `upper_triangular` is currently supported. 

317 

318 Returns 

319 ------- 

320 est : float 

321 An underestimate of the 1-norm of the sparse matrix. 

322 v : ndarray, optional 

323 The vector such that ||Av||_1 == est*||v||_1. 

324 It can be thought of as an input to the linear operator 

325 that gives an output with particularly large norm. 

326 w : ndarray, optional 

327 The vector Av which has relatively large 1-norm. 

328 It can be thought of as an output of the linear operator 

329 that is relatively large in norm compared to the input. 

330 

331 """ 

332 return scipy.sparse.linalg.onenormest( 

333 ProductOperator(*operator_seq, structure=structure)) 

334 

335 

336class _ExpmPadeHelper: 

337 """ 

338 Help lazily evaluate a matrix exponential. 

339 

340 The idea is to not do more work than we need for high expm precision, 

341 so we lazily compute matrix powers and store or precompute 

342 other properties of the matrix. 

343 

344 """ 

345 

346 def __init__(self, A, structure=None, use_exact_onenorm=False): 

347 """ 

348 Initialize the object. 

349 

350 Parameters 

351 ---------- 

352 A : a dense or sparse square numpy matrix or ndarray 

353 The matrix to be exponentiated. 

354 structure : str, optional 

355 A string describing the structure of matrix `A`. 

356 Only `upper_triangular` is currently supported. 

357 use_exact_onenorm : bool, optional 

358 If True then only the exact one-norm of matrix powers and products 

359 will be used. Otherwise, the one-norm of powers and products 

360 may initially be estimated. 

361 """ 

362 self.A = A 

363 self._A2 = None 

364 self._A4 = None 

365 self._A6 = None 

366 self._A8 = None 

367 self._A10 = None 

368 self._d4_exact = None 

369 self._d6_exact = None 

370 self._d8_exact = None 

371 self._d10_exact = None 

372 self._d4_approx = None 

373 self._d6_approx = None 

374 self._d8_approx = None 

375 self._d10_approx = None 

376 self.ident = _ident_like(A) 

377 self.structure = structure 

378 self.use_exact_onenorm = use_exact_onenorm 

379 

380 @property 

381 def A2(self): 

382 if self._A2 is None: 

383 self._A2 = _smart_matrix_product( 

384 self.A, self.A, structure=self.structure) 

385 return self._A2 

386 

387 @property 

388 def A4(self): 

389 if self._A4 is None: 

390 self._A4 = _smart_matrix_product( 

391 self.A2, self.A2, structure=self.structure) 

392 return self._A4 

393 

394 @property 

395 def A6(self): 

396 if self._A6 is None: 

397 self._A6 = _smart_matrix_product( 

398 self.A4, self.A2, structure=self.structure) 

399 return self._A6 

400 

401 @property 

402 def A8(self): 

403 if self._A8 is None: 

404 self._A8 = _smart_matrix_product( 

405 self.A6, self.A2, structure=self.structure) 

406 return self._A8 

407 

408 @property 

409 def A10(self): 

410 if self._A10 is None: 

411 self._A10 = _smart_matrix_product( 

412 self.A4, self.A6, structure=self.structure) 

413 return self._A10 

414 

415 @property 

416 def d4_tight(self): 

417 if self._d4_exact is None: 

418 self._d4_exact = _onenorm(self.A4)**(1/4.) 

419 return self._d4_exact 

420 

421 @property 

422 def d6_tight(self): 

423 if self._d6_exact is None: 

424 self._d6_exact = _onenorm(self.A6)**(1/6.) 

425 return self._d6_exact 

426 

427 @property 

428 def d8_tight(self): 

429 if self._d8_exact is None: 

430 self._d8_exact = _onenorm(self.A8)**(1/8.) 

431 return self._d8_exact 

432 

433 @property 

434 def d10_tight(self): 

435 if self._d10_exact is None: 

436 self._d10_exact = _onenorm(self.A10)**(1/10.) 

437 return self._d10_exact 

438 

439 @property 

440 def d4_loose(self): 

441 if self.use_exact_onenorm: 

442 return self.d4_tight 

443 if self._d4_exact is not None: 

444 return self._d4_exact 

445 else: 

446 if self._d4_approx is None: 

447 self._d4_approx = _onenormest_matrix_power(self.A2, 2, 

448 structure=self.structure)**(1/4.) 

449 return self._d4_approx 

450 

451 @property 

452 def d6_loose(self): 

453 if self.use_exact_onenorm: 

454 return self.d6_tight 

455 if self._d6_exact is not None: 

456 return self._d6_exact 

457 else: 

458 if self._d6_approx is None: 

459 self._d6_approx = _onenormest_matrix_power(self.A2, 3, 

460 structure=self.structure)**(1/6.) 

461 return self._d6_approx 

462 

463 @property 

464 def d8_loose(self): 

465 if self.use_exact_onenorm: 

466 return self.d8_tight 

467 if self._d8_exact is not None: 

468 return self._d8_exact 

469 else: 

470 if self._d8_approx is None: 

471 self._d8_approx = _onenormest_matrix_power(self.A4, 2, 

472 structure=self.structure)**(1/8.) 

473 return self._d8_approx 

474 

475 @property 

476 def d10_loose(self): 

477 if self.use_exact_onenorm: 

478 return self.d10_tight 

479 if self._d10_exact is not None: 

480 return self._d10_exact 

481 else: 

482 if self._d10_approx is None: 

483 self._d10_approx = _onenormest_product((self.A4, self.A6), 

484 structure=self.structure)**(1/10.) 

485 return self._d10_approx 

486 

487 def pade3(self): 

488 b = (120., 60., 12., 1.) 

489 U = _smart_matrix_product(self.A, 

490 b[3]*self.A2 + b[1]*self.ident, 

491 structure=self.structure) 

492 V = b[2]*self.A2 + b[0]*self.ident 

493 return U, V 

494 

495 def pade5(self): 

496 b = (30240., 15120., 3360., 420., 30., 1.) 

497 U = _smart_matrix_product(self.A, 

498 b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, 

499 structure=self.structure) 

500 V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident 

501 return U, V 

502 

503 def pade7(self): 

504 b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.) 

505 U = _smart_matrix_product(self.A, 

506 b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, 

507 structure=self.structure) 

508 V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident 

509 return U, V 

510 

511 def pade9(self): 

512 b = (17643225600., 8821612800., 2075673600., 302702400., 30270240., 

513 2162160., 110880., 3960., 90., 1.) 

514 U = _smart_matrix_product(self.A, 

515 (b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 + 

516 b[3]*self.A2 + b[1]*self.ident), 

517 structure=self.structure) 

518 V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 + 

519 b[2]*self.A2 + b[0]*self.ident) 

520 return U, V 

521 

522 def pade13_scaled(self, s): 

523 b = (64764752532480000., 32382376266240000., 7771770303897600., 

524 1187353796428800., 129060195264000., 10559470521600., 

525 670442572800., 33522128640., 1323241920., 40840800., 960960., 

526 16380., 182., 1.) 

527 B = self.A * 2**-s 

528 B2 = self.A2 * 2**(-2*s) 

529 B4 = self.A4 * 2**(-4*s) 

530 B6 = self.A6 * 2**(-6*s) 

531 U2 = _smart_matrix_product(B6, 

532 b[13]*B6 + b[11]*B4 + b[9]*B2, 

533 structure=self.structure) 

534 U = _smart_matrix_product(B, 

535 (U2 + b[7]*B6 + b[5]*B4 + 

536 b[3]*B2 + b[1]*self.ident), 

537 structure=self.structure) 

538 V2 = _smart_matrix_product(B6, 

539 b[12]*B6 + b[10]*B4 + b[8]*B2, 

540 structure=self.structure) 

541 V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident 

542 return U, V 

543 

544 

545def expm(A): 

546 """ 

547 Compute the matrix exponential using Pade approximation. 

548 

549 Parameters 

550 ---------- 

551 A : (M,M) array_like or sparse matrix 

552 2D Array or Matrix (sparse or dense) to be exponentiated 

553 

554 Returns 

555 ------- 

556 expA : (M,M) ndarray 

557 Matrix exponential of `A` 

558 

559 Notes 

560 ----- 

561 This is algorithm (6.1) which is a simplification of algorithm (5.1). 

562 

563 .. versionadded:: 0.12.0 

564 

565 References 

566 ---------- 

567 .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) 

568 "A New Scaling and Squaring Algorithm for the Matrix Exponential." 

569 SIAM Journal on Matrix Analysis and Applications. 

570 31 (3). pp. 970-989. ISSN 1095-7162 

571 

572 Examples 

573 -------- 

574 >>> from scipy.sparse import csc_matrix 

575 >>> from scipy.sparse.linalg import expm 

576 >>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) 

577 >>> A.toarray() 

578 array([[1, 0, 0], 

579 [0, 2, 0], 

580 [0, 0, 3]], dtype=int64) 

581 >>> Aexp = expm(A) 

582 >>> Aexp 

583 <3x3 sparse matrix of type '<class 'numpy.float64'>' 

584 with 3 stored elements in Compressed Sparse Column format> 

585 >>> Aexp.toarray() 

586 array([[ 2.71828183, 0. , 0. ], 

587 [ 0. , 7.3890561 , 0. ], 

588 [ 0. , 0. , 20.08553692]]) 

589 """ 

590 return _expm(A, use_exact_onenorm='auto') 

591 

592 

593def _expm(A, use_exact_onenorm): 

594 # Core of expm, separated to allow testing exact and approximate 

595 # algorithms. 

596 

597 # Avoid indiscriminate asarray() to allow sparse or other strange arrays. 

598 if isinstance(A, (list, tuple, np.matrix)): 

599 A = np.asarray(A) 

600 if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

601 raise ValueError('expected a square matrix') 

602 

603 # gracefully handle size-0 input, 

604 # carefully handling sparse scenario 

605 if A.shape == (0, 0): 

606 out = np.zeros([0, 0], dtype=A.dtype) 

607 if isspmatrix(A) or is_pydata_spmatrix(A): 

608 return A.__class__(out) 

609 return out 

610 

611 # Trivial case 

612 if A.shape == (1, 1): 

613 out = [[np.exp(A[0, 0])]] 

614 

615 # Avoid indiscriminate casting to ndarray to 

616 # allow for sparse or other strange arrays 

617 if isspmatrix(A) or is_pydata_spmatrix(A): 

618 return A.__class__(out) 

619 

620 return np.array(out) 

621 

622 # Ensure input is of float type, to avoid integer overflows etc. 

623 if ((isinstance(A, np.ndarray) or isspmatrix(A) or is_pydata_spmatrix(A)) 

624 and not np.issubdtype(A.dtype, np.inexact)): 

625 A = A.astype(float) 

626 

627 # Detect upper triangularity. 

628 structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None 

629 

630 if use_exact_onenorm == "auto": 

631 # Hardcode a matrix order threshold for exact vs. estimated one-norms. 

632 use_exact_onenorm = A.shape[0] < 200 

633 

634 # Track functions of A to help compute the matrix exponential. 

635 h = _ExpmPadeHelper( 

636 A, structure=structure, use_exact_onenorm=use_exact_onenorm) 

637 

638 # Try Pade order 3. 

639 eta_1 = max(h.d4_loose, h.d6_loose) 

640 if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0: 

641 U, V = h.pade3() 

642 return _solve_P_Q(U, V, structure=structure) 

643 

644 # Try Pade order 5. 

645 eta_2 = max(h.d4_tight, h.d6_loose) 

646 if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0: 

647 U, V = h.pade5() 

648 return _solve_P_Q(U, V, structure=structure) 

649 

650 # Try Pade orders 7 and 9. 

651 eta_3 = max(h.d6_tight, h.d8_loose) 

652 if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0: 

653 U, V = h.pade7() 

654 return _solve_P_Q(U, V, structure=structure) 

655 if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0: 

656 U, V = h.pade9() 

657 return _solve_P_Q(U, V, structure=structure) 

658 

659 # Use Pade order 13. 

660 eta_4 = max(h.d8_loose, h.d10_loose) 

661 eta_5 = min(eta_3, eta_4) 

662 theta_13 = 4.25 

663 

664 # Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13 

665 if eta_5 == 0: 

666 # Nilpotent special case 

667 s = 0 

668 else: 

669 s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0) 

670 s = s + _ell(2**-s * h.A, 13) 

671 U, V = h.pade13_scaled(s) 

672 X = _solve_P_Q(U, V, structure=structure) 

673 if structure == UPPER_TRIANGULAR: 

674 # Invoke Code Fragment 2.1. 

675 X = _fragment_2_1(X, h.A, s) 

676 else: 

677 # X = r_13(A)^(2^s) by repeated squaring. 

678 for i in range(s): 

679 X = X.dot(X) 

680 return X 

681 

682 

683def _solve_P_Q(U, V, structure=None): 

684 """ 

685 A helper function for expm_2009. 

686 

687 Parameters 

688 ---------- 

689 U : ndarray 

690 Pade numerator. 

691 V : ndarray 

692 Pade denominator. 

693 structure : str, optional 

694 A string describing the structure of both matrices `U` and `V`. 

695 Only `upper_triangular` is currently supported. 

696 

697 Notes 

698 ----- 

699 The `structure` argument is inspired by similar args 

700 for theano and cvxopt functions. 

701 

702 """ 

703 P = U + V 

704 Q = -U + V 

705 if isspmatrix(U) or is_pydata_spmatrix(U): 

706 return spsolve(Q, P) 

707 elif structure is None: 

708 return solve(Q, P) 

709 elif structure == UPPER_TRIANGULAR: 

710 return solve_triangular(Q, P) 

711 else: 

712 raise ValueError('unsupported matrix structure: ' + str(structure)) 

713 

714 

715def _exp_sinch(a, x): 

716 """ 

717 Stably evaluate exp(a)*sinh(x)/x 

718 

719 Notes 

720 ----- 

721 The strategy of falling back to a sixth order Taylor expansion 

722 was suggested by the Spallation Neutron Source docs 

723 which was found on the internet by google search. 

724 http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html 

725 The details of the cutoff point and the Horner-like evaluation 

726 was picked without reference to anything in particular. 

727 

728 Note that sinch is not currently implemented in scipy.special, 

729 whereas the "engineer's" definition of sinc is implemented. 

730 The implementation of sinc involves a scaling factor of pi 

731 that distinguishes it from the "mathematician's" version of sinc. 

732 

733 """ 

734 

735 # If x is small then use sixth order Taylor expansion. 

736 # How small is small? I am using the point where the relative error 

737 # of the approximation is less than 1e-14. 

738 # If x is large then directly evaluate sinh(x) / x. 

739 if abs(x) < 0.0135: 

740 x2 = x*x 

741 return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))) 

742 else: 

743 return (np.exp(a + x) - np.exp(a - x)) / (2*x) 

744 

745 

746def _eq_10_42(lam_1, lam_2, t_12): 

747 """ 

748 Equation (10.42) of Functions of Matrices: Theory and Computation. 

749 

750 Notes 

751 ----- 

752 This is a helper function for _fragment_2_1 of expm_2009. 

753 Equation (10.42) is on page 251 in the section on Schur algorithms. 

754 In particular, section 10.4.3 explains the Schur-Parlett algorithm. 

755 expm([[lam_1, t_12], [0, lam_1]) 

756 = 

757 [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)], 

758 [0, exp(lam_2)] 

759 """ 

760 

761 # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1) 

762 # apparently suffers from cancellation, according to Higham's textbook. 

763 # A nice implementation of sinch, defined as sinh(x)/x, 

764 # will apparently work around the cancellation. 

765 a = 0.5 * (lam_1 + lam_2) 

766 b = 0.5 * (lam_1 - lam_2) 

767 return t_12 * _exp_sinch(a, b) 

768 

769 

770def _fragment_2_1(X, T, s): 

771 """ 

772 A helper function for expm_2009. 

773 

774 Notes 

775 ----- 

776 The argument X is modified in-place, but this modification is not the same 

777 as the returned value of the function. 

778 This function also takes pains to do things in ways that are compatible 

779 with sparse matrices, for example by avoiding fancy indexing 

780 and by using methods of the matrices whenever possible instead of 

781 using functions of the numpy or scipy libraries themselves. 

782 

783 """ 

784 # Form X = r_m(2^-s T) 

785 # Replace diag(X) by exp(2^-s diag(T)). 

786 n = X.shape[0] 

787 diag_T = np.ravel(T.diagonal().copy()) 

788 

789 # Replace diag(X) by exp(2^-s diag(T)). 

790 scale = 2 ** -s 

791 exp_diag = np.exp(scale * diag_T) 

792 for k in range(n): 

793 X[k, k] = exp_diag[k] 

794 

795 for i in range(s-1, -1, -1): 

796 X = X.dot(X) 

797 

798 # Replace diag(X) by exp(2^-i diag(T)). 

799 scale = 2 ** -i 

800 exp_diag = np.exp(scale * diag_T) 

801 for k in range(n): 

802 X[k, k] = exp_diag[k] 

803 

804 # Replace (first) superdiagonal of X by explicit formula 

805 # for superdiagonal of exp(2^-i T) from Eq (10.42) of 

806 # the author's 2008 textbook 

807 # Functions of Matrices: Theory and Computation. 

808 for k in range(n-1): 

809 lam_1 = scale * diag_T[k] 

810 lam_2 = scale * diag_T[k+1] 

811 t_12 = scale * T[k, k+1] 

812 value = _eq_10_42(lam_1, lam_2, t_12) 

813 X[k, k+1] = value 

814 

815 # Return the updated X matrix. 

816 return X 

817 

818 

819def _ell(A, m): 

820 """ 

821 A helper function for expm_2009. 

822 

823 Parameters 

824 ---------- 

825 A : linear operator 

826 A linear operator whose norm of power we care about. 

827 m : int 

828 The power of the linear operator 

829 

830 Returns 

831 ------- 

832 value : int 

833 A value related to a bound. 

834 

835 """ 

836 if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

837 raise ValueError('expected A to be like a square matrix') 

838 

839 # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper. 

840 # They are coefficients of terms of a generating function series expansion. 

841 c_i = {3: 100800., 

842 5: 10059033600., 

843 7: 4487938430976000., 

844 9: 5914384781877411840000., 

845 13: 113250775606021113483283660800000000. 

846 } 

847 abs_c_recip = c_i[m] 

848 

849 # This is explained after Eq. (1.2) of the 2009 expm paper. 

850 # It is the "unit roundoff" of IEEE double precision arithmetic. 

851 u = 2**-53 

852 

853 # Compute the one-norm of matrix power p of abs(A). 

854 A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1) 

855 

856 # Treat zero norm as a special case. 

857 if not A_abs_onenorm: 

858 return 0 

859 

860 alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip) 

861 log2_alpha_div_u = np.log2(alpha/u) 

862 value = int(np.ceil(log2_alpha_div_u / (2 * m))) 

863 return max(value, 0)