Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py: 3%

430 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1""" 

2Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). 

3 

4References 

5---------- 

6.. [1] A. V. Knyazev (2001), 

7 Toward the Optimal Preconditioned Eigensolver: Locally Optimal 

8 Block Preconditioned Conjugate Gradient Method. 

9 SIAM Journal on Scientific Computing 23, no. 2, 

10 pp. 517-541. :doi:`10.1137/S1064827500366124` 

11 

12.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007), 

13 Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) 

14 in hypre and PETSc. :arxiv:`0705.2626` 

15 

16.. [3] A. V. Knyazev's C and MATLAB implementations: 

17 https://github.com/lobpcg/blopex 

18""" 

19 

20import warnings 

21import numpy as np 

22from scipy.linalg import (inv, eigh, cho_factor, cho_solve, 

23 cholesky, LinAlgError) 

24from scipy.sparse.linalg import LinearOperator 

25from scipy.sparse import isspmatrix 

26from numpy import block as bmat 

27 

28__all__ = ["lobpcg"] 

29 

30 

31def _report_nonhermitian(M, name): 

32 """ 

33 Report if `M` is not a Hermitian matrix given its type. 

34 """ 

35 from scipy.linalg import norm 

36 

37 md = M - M.T.conj() 

38 nmd = norm(md, 1) 

39 tol = 10 * np.finfo(M.dtype).eps 

40 tol = max(tol, tol * norm(M, 1)) 

41 if nmd > tol: 

42 warnings.warn( 

43 f"Matrix {name} of the type {M.dtype} is not Hermitian: " 

44 f"condition: {nmd} < {tol} fails.", 

45 UserWarning, stacklevel=4 

46 ) 

47 

48def _as2d(ar): 

49 """ 

50 If the input array is 2D return it, if it is 1D, append a dimension, 

51 making it a column vector. 

52 """ 

53 if ar.ndim == 2: 

54 return ar 

55 else: # Assume 1! 

56 aux = np.array(ar, copy=False) 

57 aux.shape = (ar.shape[0], 1) 

58 return aux 

59 

60 

61def _makeMatMat(m): 

62 if m is None: 

63 return None 

64 elif callable(m): 

65 return lambda v: m(v) 

66 else: 

67 return lambda v: m @ v 

68 

69 

70def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY): 

71 """Changes blockVectorV in place.""" 

72 YBV = np.dot(blockVectorBY.T.conj(), blockVectorV) 

73 tmp = cho_solve(factYBY, YBV) 

74 blockVectorV -= np.dot(blockVectorY, tmp) 

75 

76 

77def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, 

78 verbosityLevel=0): 

79 """in-place B-orthonormalize the given block vector using Cholesky.""" 

80 normalization = blockVectorV.max(axis=0) + np.finfo(blockVectorV.dtype).eps 

81 blockVectorV = blockVectorV / normalization 

82 if blockVectorBV is None: 

83 if B is not None: 

84 try: 

85 blockVectorBV = B(blockVectorV) 

86 except Exception as e: 

87 if verbosityLevel: 

88 warnings.warn( 

89 f"Secondary MatMul call failed with error\n" 

90 f"{e}\n", 

91 UserWarning, stacklevel=3 

92 ) 

93 return None, None, None, normalization 

94 if blockVectorBV.shape != blockVectorV.shape: 

95 raise ValueError( 

96 f"The shape {blockVectorV.shape} " 

97 f"of the orthogonalized matrix not preserved\n" 

98 f"and changed to {blockVectorBV.shape} " 

99 f"after multiplying by the secondary matrix.\n" 

100 ) 

101 else: 

102 blockVectorBV = blockVectorV # Shared data!!! 

103 else: 

104 blockVectorBV = blockVectorBV / normalization 

105 VBV = blockVectorV.T.conj() @ blockVectorBV 

106 try: 

107 # VBV is a Cholesky factor from now on... 

108 VBV = cholesky(VBV, overwrite_a=True) 

109 VBV = inv(VBV, overwrite_a=True) 

110 blockVectorV = blockVectorV @ VBV 

111 # blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T 

112 if B is not None: 

113 blockVectorBV = blockVectorBV @ VBV 

114 # blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T 

115 return blockVectorV, blockVectorBV, VBV, normalization 

116 except LinAlgError: 

117 if verbosityLevel: 

118 warnings.warn( 

119 "Cholesky has failed.", 

120 UserWarning, stacklevel=3 

121 ) 

122 return None, None, None, normalization 

123 

124 

125def _get_indx(_lambda, num, largest): 

126 """Get `num` indices into `_lambda` depending on `largest` option.""" 

127 ii = np.argsort(_lambda) 

128 if largest: 

129 ii = ii[:-num - 1:-1] 

130 else: 

131 ii = ii[:num] 

132 

133 return ii 

134 

135 

136def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel): 

137 if verbosityLevel: 

138 _report_nonhermitian(gramA, "gramA") 

139 _report_nonhermitian(gramB, "gramB") 

140 

141 

142def lobpcg( 

143 A, 

144 X, 

145 B=None, 

146 M=None, 

147 Y=None, 

148 tol=None, 

149 maxiter=None, 

150 largest=True, 

151 verbosityLevel=0, 

152 retLambdaHistory=False, 

153 retResidualNormsHistory=False, 

154 restartControl=20, 

155): 

156 """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). 

157 

158 LOBPCG is a preconditioned eigensolver for large symmetric positive 

159 definite (SPD) generalized eigenproblems. 

160 

161 Parameters 

162 ---------- 

163 A : {sparse matrix, dense matrix, LinearOperator, callable object} 

164 The symmetric linear operator of the problem, usually a 

165 sparse matrix. Often called the "stiffness matrix". 

166 X : ndarray, float32 or float64 

167 Initial approximation to the ``k`` eigenvectors (non-sparse). If `A` 

168 has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``. 

169 B : {dense matrix, sparse matrix, LinearOperator, callable object} 

170 Optional. 

171 The right hand side operator in a generalized eigenproblem. 

172 By default, ``B = Identity``. Often called the "mass matrix". 

173 M : {dense matrix, sparse matrix, LinearOperator, callable object} 

174 Optional. 

175 Preconditioner to `A`; by default ``M = Identity``. 

176 `M` should approximate the inverse of `A`. 

177 Y : ndarray, float32 or float64, optional. 

178 An n-by-sizeY matrix of constraints (non-sparse), sizeY < n. 

179 The iterations will be performed in the B-orthogonal complement 

180 of the column-space of Y. Y must be full rank. 

181 tol : scalar, optional. 

182 Solver tolerance (stopping criterion). 

183 The default is ``tol=n*sqrt(eps)``. 

184 maxiter : int, optional. 

185 Maximum number of iterations. The default is ``maxiter=20``. 

186 largest : bool, optional. 

187 When True, solve for the largest eigenvalues, otherwise the smallest. 

188 verbosityLevel : int, optional 

189 Controls solver output. The default is ``verbosityLevel=0``. 

190 retLambdaHistory : bool, optional. 

191 Whether to return eigenvalue history. Default is False. 

192 retResidualNormsHistory : bool, optional. 

193 Whether to return history of residual norms. Default is False. 

194 restartControl : int, optional. 

195 Iterations restart if the residuals jump up 2**restartControl times 

196 compared to the smallest ones recorded in retResidualNormsHistory. 

197 The default is ``restartControl=20``, making the restarts rare for 

198 backward compatibility. 

199 

200 Returns 

201 ------- 

202 w : ndarray 

203 Array of ``k`` eigenvalues. 

204 v : ndarray 

205 An array of ``k`` eigenvectors. `v` has the same shape as `X`. 

206 lambdas : ndarray, optional 

207 The eigenvalue history, if `retLambdaHistory` is True. 

208 rnorms : ndarray, optional 

209 The history of residual norms, if `retResidualNormsHistory` is True. 

210 

211 Notes 

212 ----- 

213 The iterative loop in lobpcg runs maxit=maxiter (or 20 if maxit=None) 

214 iterations at most and finishes earler if the tolerance is met. 

215 Breaking backward compatibility with the previous version, lobpcg 

216 now returns the block of iterative vectors with the best accuracy rather 

217 than the last one iterated, as a cure for possible divergence. 

218 

219 The size of the iteration history output equals to the number of the best 

220 (limited by maxit) iterations plus 3 (initial, final, and postprocessing). 

221 

222 If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True, 

223 the return tuple has the following format 

224 ``(lambda, V, lambda history, residual norms history)``. 

225 

226 In the following ``n`` denotes the matrix size and ``k`` the number 

227 of required eigenvalues (smallest or largest). 

228 

229 The LOBPCG code internally solves eigenproblems of the size ``3k`` on every 

230 iteration by calling the dense eigensolver `eigh`, so if ``k`` is not 

231 small enough compared to ``n``, it makes no sense to call the LOBPCG code. 

232 Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely 

233 break internally, so the code calls the standard function `eigh` instead. 

234 It is not that ``n`` should be large for the LOBPCG to work, but rather the 

235 ratio ``n / k`` should be large. It you call LOBPCG with ``k=1`` 

236 and ``n=10``, it works though ``n`` is small. The method is intended 

237 for extremely large ``n / k``. 

238 

239 The convergence speed depends basically on two factors: 

240 

241 1. Relative separation of the seeking eigenvalues from the rest 

242 of the eigenvalues. One can vary ``k`` to improve the absolute 

243 separation and use proper preconditioning to shrink the spectral spread. 

244 For example, a rod vibration test problem (under tests 

245 directory) is ill-conditioned for large ``n``, so convergence will be 

246 slow, unless efficient preconditioning is used. For this specific 

247 problem, a good simple preconditioner function would be a linear solve 

248 for `A`, which is easy to code since `A` is tridiagonal. 

249 

250 2. Quality of the initial approximations `X` to the seeking eigenvectors. 

251 Randomly distributed around the origin vectors work well if no better 

252 choice is known. 

253 

254 References 

255 ---------- 

256 .. [1] A. V. Knyazev (2001), 

257 Toward the Optimal Preconditioned Eigensolver: Locally Optimal 

258 Block Preconditioned Conjugate Gradient Method. 

259 SIAM Journal on Scientific Computing 23, no. 2, 

260 pp. 517-541. :doi:`10.1137/S1064827500366124` 

261 

262 .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov 

263 (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers 

264 (BLOPEX) in hypre and PETSc. :arxiv:`0705.2626` 

265 

266 .. [3] A. V. Knyazev's C and MATLAB implementations: 

267 https://github.com/lobpcg/blopex 

268 

269 Examples 

270 -------- 

271 Solve ``A x = lambda x`` with constraints and preconditioning. 

272 

273 >>> import numpy as np 

274 >>> from scipy.sparse import spdiags, issparse 

275 >>> from scipy.sparse.linalg import lobpcg, LinearOperator 

276 

277 The square matrix size: 

278 

279 >>> n = 100 

280 >>> vals = np.arange(1, n + 1) 

281 

282 The first mandatory input parameter, in this test 

283 a sparse 2D array representing the square matrix 

284 of the eigenvalue problem to solve: 

285 

286 >>> A = spdiags(vals, 0, n, n) 

287 >>> A.toarray() 

288 array([[ 1, 0, 0, ..., 0, 0, 0], 

289 [ 0, 2, 0, ..., 0, 0, 0], 

290 [ 0, 0, 3, ..., 0, 0, 0], 

291 ..., 

292 [ 0, 0, 0, ..., 98, 0, 0], 

293 [ 0, 0, 0, ..., 0, 99, 0], 

294 [ 0, 0, 0, ..., 0, 0, 100]]) 

295 

296 Initial guess for eigenvectors, should have linearly independent 

297 columns. The second mandatory input parameter, a 2D array with the 

298 row dimension determining the number of requested eigenvalues. 

299 If no initial approximations available, randomly oriented vectors 

300 commonly work best, e.g., with components normally disrtibuted 

301 around zero or uniformly distributed on the interval [-1 1]. 

302 

303 >>> rng = np.random.default_rng() 

304 >>> X = rng.normal(size=(n, 3)) 

305 

306 Constraints - an optional input parameter is a 2D array comprising 

307 of column vectors that the eigenvectors must be orthogonal to: 

308 

309 >>> Y = np.eye(n, 3) 

310 

311 Preconditioner in the inverse of A in this example: 

312 

313 >>> invA = spdiags([1./vals], 0, n, n) 

314 

315 The preconditiner must be defined by a function: 

316 

317 >>> def precond( x ): 

318 ... return invA @ x 

319 

320 The argument x of the preconditioner function is a matrix inside `lobpcg`, 

321 thus the use of matrix-matrix product ``@``. 

322 

323 The preconditioner function is passed to lobpcg as a `LinearOperator`: 

324 

325 >>> M = LinearOperator(matvec=precond, matmat=precond, 

326 ... shape=(n, n), dtype=np.float64) 

327 

328 Let us now solve the eigenvalue problem for the matrix A: 

329 

330 >>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False) 

331 >>> eigenvalues 

332 array([4., 5., 6.]) 

333 

334 Note that the vectors passed in Y are the eigenvectors of the 3 smallest 

335 eigenvalues. The results returned are orthogonal to those. 

336 """ 

337 blockVectorX = X 

338 bestblockVectorX = blockVectorX 

339 blockVectorY = Y 

340 residualTolerance = tol 

341 if maxiter is None: 

342 maxiter = 20 

343 

344 bestIterationNumber = maxiter 

345 

346 sizeY = 0 

347 if blockVectorY is not None: 

348 if len(blockVectorY.shape) != 2: 

349 warnings.warn( 

350 f"Expected rank-2 array for argument Y, instead got " 

351 f"{len(blockVectorY.shape)}, " 

352 f"so ignore it and use no constraints.", 

353 UserWarning, stacklevel=2 

354 ) 

355 blockVectorY = None 

356 else: 

357 sizeY = blockVectorY.shape[1] 

358 

359 # Block size. 

360 if blockVectorX is None: 

361 raise ValueError("The mandatory initial matrix X cannot be None") 

362 if len(blockVectorX.shape) != 2: 

363 raise ValueError("expected rank-2 array for argument X") 

364 

365 n, sizeX = blockVectorX.shape 

366 

367 # Data type of iterates, determined by X, must be inexact 

368 if not np.issubdtype(blockVectorX.dtype, np.inexact): 

369 warnings.warn( 

370 f"Data type for argument X is {blockVectorX.dtype}, " 

371 f"which is not inexact, so casted to np.float32.", 

372 UserWarning, stacklevel=2 

373 ) 

374 blockVectorX = np.asarray(blockVectorX, dtype=np.float32) 

375 

376 if retLambdaHistory: 

377 lambdaHistory = np.zeros((maxiter + 3, sizeX), 

378 dtype=blockVectorX.dtype) 

379 if retResidualNormsHistory: 

380 residualNormsHistory = np.zeros((maxiter + 3, sizeX), 

381 dtype=blockVectorX.dtype) 

382 

383 if verbosityLevel: 

384 aux = "Solving " 

385 if B is None: 

386 aux += "standard" 

387 else: 

388 aux += "generalized" 

389 aux += " eigenvalue problem with" 

390 if M is None: 

391 aux += "out" 

392 aux += " preconditioning\n\n" 

393 aux += "matrix size %d\n" % n 

394 aux += "block size %d\n\n" % sizeX 

395 if blockVectorY is None: 

396 aux += "No constraints\n\n" 

397 else: 

398 if sizeY > 1: 

399 aux += "%d constraints\n\n" % sizeY 

400 else: 

401 aux += "%d constraint\n\n" % sizeY 

402 print(aux) 

403 

404 if (n - sizeY) < (5 * sizeX): 

405 warnings.warn( 

406 f"The problem size {n} minus the constraints size {sizeY} " 

407 f"is too small relative to the block size {sizeX}. " 

408 f"Using a dense eigensolver instead of LOBPCG iterations." 

409 f"No output of the history of the iterations.", 

410 UserWarning, stacklevel=2 

411 ) 

412 

413 sizeX = min(sizeX, n) 

414 

415 if blockVectorY is not None: 

416 raise NotImplementedError( 

417 "The dense eigensolver does not support constraints." 

418 ) 

419 

420 # Define the closed range of indices of eigenvalues to return. 

421 if largest: 

422 eigvals = (n - sizeX, n - 1) 

423 else: 

424 eigvals = (0, sizeX - 1) 

425 

426 try: 

427 if isinstance(A, LinearOperator): 

428 A = A(np.eye(n, dtype=int)) 

429 elif callable(A): 

430 A = A(np.eye(n, dtype=int)) 

431 if A.shape != (n, n): 

432 raise ValueError( 

433 f"The shape {A.shape} of the primary matrix\n" 

434 f"defined by a callable object is wrong.\n" 

435 ) 

436 elif isspmatrix(A): 

437 A = A.toarray() 

438 else: 

439 A = np.asarray(A) 

440 except Exception as e: 

441 raise Exception( 

442 f"Primary MatMul call failed with error\n" 

443 f"{e}\n") 

444 

445 if B is not None: 

446 try: 

447 if isinstance(B, LinearOperator): 

448 B = B(np.eye(n, dtype=int)) 

449 elif callable(B): 

450 B = B(np.eye(n, dtype=int)) 

451 if B.shape != (n, n): 

452 raise ValueError( 

453 f"The shape {B.shape} of the secondary matrix\n" 

454 f"defined by a callable object is wrong.\n" 

455 ) 

456 elif isspmatrix(B): 

457 B = B.toarray() 

458 else: 

459 B = np.asarray(B) 

460 except Exception as e: 

461 raise Exception( 

462 f"Secondary MatMul call failed with error\n" 

463 f"{e}\n") 

464 

465 try: 

466 vals, vecs = eigh(A, 

467 B, 

468 subset_by_index=eigvals, 

469 check_finite=False) 

470 if largest: 

471 # Reverse order to be compatible with eigs() in 'LM' mode. 

472 vals = vals[::-1] 

473 vecs = vecs[:, ::-1] 

474 

475 return vals, vecs 

476 except Exception as e: 

477 raise Exception( 

478 f"Dense eigensolver failed with error\n" 

479 f"{e}\n" 

480 ) 

481 

482 if (residualTolerance is None) or (residualTolerance <= 0.0): 

483 residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n 

484 

485 A = _makeMatMat(A) 

486 B = _makeMatMat(B) 

487 M = _makeMatMat(M) 

488 

489 # Apply constraints to X. 

490 if blockVectorY is not None: 

491 

492 if B is not None: 

493 blockVectorBY = B(blockVectorY) 

494 if blockVectorBY.shape != blockVectorY.shape: 

495 raise ValueError( 

496 f"The shape {blockVectorY.shape} " 

497 f"of the constraint not preserved\n" 

498 f"and changed to {blockVectorBY.shape} " 

499 f"after multiplying by the secondary matrix.\n" 

500 ) 

501 else: 

502 blockVectorBY = blockVectorY 

503 

504 # gramYBY is a dense array. 

505 gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY) 

506 try: 

507 # gramYBY is a Cholesky factor from now on... 

508 gramYBY = cho_factor(gramYBY) 

509 except LinAlgError as e: 

510 raise ValueError("Linearly dependent constraints") from e 

511 

512 _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY) 

513 

514 ## 

515 # B-orthonormalize X. 

516 blockVectorX, blockVectorBX, _, _ = _b_orthonormalize( 

517 B, blockVectorX, verbosityLevel=verbosityLevel) 

518 if blockVectorX is None: 

519 raise ValueError("Linearly dependent initial approximations") 

520 

521 ## 

522 # Compute the initial Ritz vectors: solve the eigenproblem. 

523 blockVectorAX = A(blockVectorX) 

524 if blockVectorAX.shape != blockVectorX.shape: 

525 raise ValueError( 

526 f"The shape {blockVectorX.shape} " 

527 f"of the initial approximations not preserved\n" 

528 f"and changed to {blockVectorAX.shape} " 

529 f"after multiplying by the primary matrix.\n" 

530 ) 

531 

532 gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) 

533 

534 _lambda, eigBlockVector = eigh(gramXAX, check_finite=False) 

535 ii = _get_indx(_lambda, sizeX, largest) 

536 _lambda = _lambda[ii] 

537 if retLambdaHistory: 

538 lambdaHistory[0, :] = _lambda 

539 

540 eigBlockVector = np.asarray(eigBlockVector[:, ii]) 

541 blockVectorX = np.dot(blockVectorX, eigBlockVector) 

542 blockVectorAX = np.dot(blockVectorAX, eigBlockVector) 

543 if B is not None: 

544 blockVectorBX = np.dot(blockVectorBX, eigBlockVector) 

545 

546 ## 

547 # Active index set. 

548 activeMask = np.ones((sizeX,), dtype=bool) 

549 

550 ## 

551 # Main iteration loop. 

552 

553 blockVectorP = None # set during iteration 

554 blockVectorAP = None 

555 blockVectorBP = None 

556 

557 smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max) 

558 

559 iterationNumber = -1 

560 restart = True 

561 forcedRestart = False 

562 explicitGramFlag = False 

563 while iterationNumber < maxiter: 

564 iterationNumber += 1 

565 

566 if B is not None: 

567 aux = blockVectorBX * _lambda[np.newaxis, :] 

568 else: 

569 aux = blockVectorX * _lambda[np.newaxis, :] 

570 

571 blockVectorR = blockVectorAX - aux 

572 

573 aux = np.sum(blockVectorR.conj() * blockVectorR, 0) 

574 residualNorms = np.sqrt(np.abs(aux)) 

575 if retResidualNormsHistory: 

576 residualNormsHistory[iterationNumber, :] = residualNorms 

577 residualNorm = np.sum(np.abs(residualNorms)) / sizeX 

578 

579 if residualNorm < smallestResidualNorm: 

580 smallestResidualNorm = residualNorm 

581 bestIterationNumber = iterationNumber 

582 bestblockVectorX = blockVectorX 

583 elif residualNorm > 2**restartControl * smallestResidualNorm: 

584 forcedRestart = True 

585 blockVectorAX = A(blockVectorX) 

586 if blockVectorAX.shape != blockVectorX.shape: 

587 raise ValueError( 

588 f"The shape {blockVectorX.shape} " 

589 f"of the restarted iterate not preserved\n" 

590 f"and changed to {blockVectorAX.shape} " 

591 f"after multiplying by the primary matrix.\n" 

592 ) 

593 if B is not None: 

594 blockVectorBX = B(blockVectorX) 

595 if blockVectorBX.shape != blockVectorX.shape: 

596 raise ValueError( 

597 f"The shape {blockVectorX.shape} " 

598 f"of the restarted iterate not preserved\n" 

599 f"and changed to {blockVectorBX.shape} " 

600 f"after multiplying by the secondary matrix.\n" 

601 ) 

602 

603 ii = np.where(residualNorms > residualTolerance, True, False) 

604 activeMask = activeMask & ii 

605 currentBlockSize = activeMask.sum() 

606 

607 if verbosityLevel: 

608 print(f"iteration {iterationNumber}") 

609 print(f"current block size: {currentBlockSize}") 

610 print(f"eigenvalue(s):\n{_lambda}") 

611 print(f"residual norm(s):\n{residualNorms}") 

612 

613 if currentBlockSize == 0: 

614 break 

615 

616 activeBlockVectorR = _as2d(blockVectorR[:, activeMask]) 

617 

618 if iterationNumber > 0: 

619 activeBlockVectorP = _as2d(blockVectorP[:, activeMask]) 

620 activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask]) 

621 if B is not None: 

622 activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask]) 

623 

624 if M is not None: 

625 # Apply preconditioner T to the active residuals. 

626 activeBlockVectorR = M(activeBlockVectorR) 

627 

628 ## 

629 # Apply constraints to the preconditioned residuals. 

630 if blockVectorY is not None: 

631 _applyConstraints(activeBlockVectorR, 

632 gramYBY, 

633 blockVectorBY, 

634 blockVectorY) 

635 

636 ## 

637 # B-orthogonalize the preconditioned residuals to X. 

638 if B is not None: 

639 activeBlockVectorR = activeBlockVectorR - ( 

640 blockVectorX @ 

641 (blockVectorBX.T.conj() @ activeBlockVectorR) 

642 ) 

643 else: 

644 activeBlockVectorR = activeBlockVectorR - ( 

645 blockVectorX @ 

646 (blockVectorX.T.conj() @ activeBlockVectorR) 

647 ) 

648 

649 ## 

650 # B-orthonormalize the preconditioned residuals. 

651 aux = _b_orthonormalize( 

652 B, activeBlockVectorR, verbosityLevel=verbosityLevel) 

653 activeBlockVectorR, activeBlockVectorBR, _, _ = aux 

654 

655 if activeBlockVectorR is None: 

656 warnings.warn( 

657 f"Failed at iteration {iterationNumber} with accuracies " 

658 f"{residualNorms}\n not reaching the requested " 

659 f"tolerance {residualTolerance}.", 

660 UserWarning, stacklevel=2 

661 ) 

662 break 

663 activeBlockVectorAR = A(activeBlockVectorR) 

664 

665 if iterationNumber > 0: 

666 if B is not None: 

667 aux = _b_orthonormalize( 

668 B, activeBlockVectorP, activeBlockVectorBP, 

669 verbosityLevel=verbosityLevel 

670 ) 

671 activeBlockVectorP, activeBlockVectorBP, invR, normal = aux 

672 else: 

673 aux = _b_orthonormalize(B, activeBlockVectorP, 

674 verbosityLevel=verbosityLevel) 

675 activeBlockVectorP, _, invR, normal = aux 

676 # Function _b_orthonormalize returns None if Cholesky fails 

677 if activeBlockVectorP is not None: 

678 activeBlockVectorAP = activeBlockVectorAP / normal 

679 activeBlockVectorAP = np.dot(activeBlockVectorAP, invR) 

680 restart = forcedRestart 

681 else: 

682 restart = True 

683 

684 ## 

685 # Perform the Rayleigh Ritz Procedure: 

686 # Compute symmetric Gram matrices: 

687 

688 if activeBlockVectorAR.dtype == "float32": 

689 myeps = 1 

690 else: 

691 myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps) 

692 

693 if residualNorms.max() > myeps and not explicitGramFlag: 

694 explicitGramFlag = False 

695 else: 

696 # Once explicitGramFlag, forever explicitGramFlag. 

697 explicitGramFlag = True 

698 

699 # Shared memory assingments to simplify the code 

700 if B is None: 

701 blockVectorBX = blockVectorX 

702 activeBlockVectorBR = activeBlockVectorR 

703 if not restart: 

704 activeBlockVectorBP = activeBlockVectorP 

705 

706 # Common submatrices: 

707 gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR) 

708 gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR) 

709 

710 gramDtype = activeBlockVectorAR.dtype 

711 if explicitGramFlag: 

712 gramRAR = (gramRAR + gramRAR.T.conj()) / 2 

713 gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) 

714 gramXAX = (gramXAX + gramXAX.T.conj()) / 2 

715 gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX) 

716 gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR) 

717 gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR) 

718 else: 

719 gramXAX = np.diag(_lambda).astype(gramDtype) 

720 gramXBX = np.eye(sizeX, dtype=gramDtype) 

721 gramRBR = np.eye(currentBlockSize, dtype=gramDtype) 

722 gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype) 

723 

724 if not restart: 

725 gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP) 

726 gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP) 

727 gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP) 

728 gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP) 

729 gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP) 

730 if explicitGramFlag: 

731 gramPAP = (gramPAP + gramPAP.T.conj()) / 2 

732 gramPBP = np.dot(activeBlockVectorP.T.conj(), 

733 activeBlockVectorBP) 

734 else: 

735 gramPBP = np.eye(currentBlockSize, dtype=gramDtype) 

736 

737 gramA = bmat( 

738 [ 

739 [gramXAX, gramXAR, gramXAP], 

740 [gramXAR.T.conj(), gramRAR, gramRAP], 

741 [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP], 

742 ] 

743 ) 

744 gramB = bmat( 

745 [ 

746 [gramXBX, gramXBR, gramXBP], 

747 [gramXBR.T.conj(), gramRBR, gramRBP], 

748 [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP], 

749 ] 

750 ) 

751 

752 _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel) 

753 

754 try: 

755 _lambda, eigBlockVector = eigh(gramA, 

756 gramB, 

757 check_finite=False) 

758 except LinAlgError as e: 

759 # raise ValueError("eigh failed in lobpcg iterations") from e 

760 if verbosityLevel: 

761 warnings.warn( 

762 f"eigh failed at iteration {iterationNumber} \n" 

763 f"with error {e} causing a restart.\n", 

764 UserWarning, stacklevel=2 

765 ) 

766 # try again after dropping the direction vectors P from RR 

767 restart = True 

768 

769 if restart: 

770 gramA = bmat([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]]) 

771 gramB = bmat([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]]) 

772 

773 _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel) 

774 

775 try: 

776 _lambda, eigBlockVector = eigh(gramA, 

777 gramB, 

778 check_finite=False) 

779 except LinAlgError as e: 

780 # raise ValueError("eigh failed in lobpcg iterations") from e 

781 warnings.warn( 

782 f"eigh failed at iteration {iterationNumber} with error\n" 

783 f"{e}\n", 

784 UserWarning, stacklevel=2 

785 ) 

786 break 

787 

788 ii = _get_indx(_lambda, sizeX, largest) 

789 _lambda = _lambda[ii] 

790 eigBlockVector = eigBlockVector[:, ii] 

791 if retLambdaHistory: 

792 lambdaHistory[iterationNumber + 1, :] = _lambda 

793 

794 # Compute Ritz vectors. 

795 if B is not None: 

796 if not restart: 

797 eigBlockVectorX = eigBlockVector[:sizeX] 

798 eigBlockVectorR = eigBlockVector[sizeX: 

799 sizeX + currentBlockSize] 

800 eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] 

801 

802 pp = np.dot(activeBlockVectorR, eigBlockVectorR) 

803 pp += np.dot(activeBlockVectorP, eigBlockVectorP) 

804 

805 app = np.dot(activeBlockVectorAR, eigBlockVectorR) 

806 app += np.dot(activeBlockVectorAP, eigBlockVectorP) 

807 

808 bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) 

809 bpp += np.dot(activeBlockVectorBP, eigBlockVectorP) 

810 else: 

811 eigBlockVectorX = eigBlockVector[:sizeX] 

812 eigBlockVectorR = eigBlockVector[sizeX:] 

813 

814 pp = np.dot(activeBlockVectorR, eigBlockVectorR) 

815 app = np.dot(activeBlockVectorAR, eigBlockVectorR) 

816 bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) 

817 

818 blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp 

819 blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app 

820 blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp 

821 

822 blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp 

823 

824 else: 

825 if not restart: 

826 eigBlockVectorX = eigBlockVector[:sizeX] 

827 eigBlockVectorR = eigBlockVector[sizeX: 

828 sizeX + currentBlockSize] 

829 eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] 

830 

831 pp = np.dot(activeBlockVectorR, eigBlockVectorR) 

832 pp += np.dot(activeBlockVectorP, eigBlockVectorP) 

833 

834 app = np.dot(activeBlockVectorAR, eigBlockVectorR) 

835 app += np.dot(activeBlockVectorAP, eigBlockVectorP) 

836 else: 

837 eigBlockVectorX = eigBlockVector[:sizeX] 

838 eigBlockVectorR = eigBlockVector[sizeX:] 

839 

840 pp = np.dot(activeBlockVectorR, eigBlockVectorR) 

841 app = np.dot(activeBlockVectorAR, eigBlockVectorR) 

842 

843 blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp 

844 blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app 

845 

846 blockVectorP, blockVectorAP = pp, app 

847 

848 if B is not None: 

849 aux = blockVectorBX * _lambda[np.newaxis, :] 

850 else: 

851 aux = blockVectorX * _lambda[np.newaxis, :] 

852 

853 blockVectorR = blockVectorAX - aux 

854 

855 aux = np.sum(blockVectorR.conj() * blockVectorR, 0) 

856 residualNorms = np.sqrt(np.abs(aux)) 

857 # Use old lambda in case of early loop exit. 

858 if retLambdaHistory: 

859 lambdaHistory[iterationNumber + 1, :] = _lambda 

860 if retResidualNormsHistory: 

861 residualNormsHistory[iterationNumber + 1, :] = residualNorms 

862 residualNorm = np.sum(np.abs(residualNorms)) / sizeX 

863 if residualNorm < smallestResidualNorm: 

864 smallestResidualNorm = residualNorm 

865 bestIterationNumber = iterationNumber + 1 

866 bestblockVectorX = blockVectorX 

867 

868 if np.max(np.abs(residualNorms)) > residualTolerance: 

869 warnings.warn( 

870 f"Exited at iteration {iterationNumber} with accuracies \n" 

871 f"{residualNorms}\n" 

872 f"not reaching the requested tolerance {residualTolerance}.\n" 

873 f"Use iteration {bestIterationNumber} instead with accuracy \n" 

874 f"{smallestResidualNorm}.\n", 

875 UserWarning, stacklevel=2 

876 ) 

877 

878 if verbosityLevel: 

879 print(f"Final iterative eigenvalue(s):\n{_lambda}") 

880 print(f"Final iterative residual norm(s):\n{residualNorms}") 

881 

882 blockVectorX = bestblockVectorX 

883 # Making eigenvectors "exactly" satisfy the blockVectorY constrains 

884 if blockVectorY is not None: 

885 _applyConstraints(blockVectorX, 

886 gramYBY, 

887 blockVectorBY, 

888 blockVectorY) 

889 

890 # Making eigenvectors "exactly" othonormalized by final "exact" RR 

891 blockVectorAX = A(blockVectorX) 

892 if blockVectorAX.shape != blockVectorX.shape: 

893 raise ValueError( 

894 f"The shape {blockVectorX.shape} " 

895 f"of the postprocessing iterate not preserved\n" 

896 f"and changed to {blockVectorAX.shape} " 

897 f"after multiplying by the primary matrix.\n" 

898 ) 

899 gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) 

900 

901 blockVectorBX = blockVectorX 

902 if B is not None: 

903 blockVectorBX = B(blockVectorX) 

904 if blockVectorBX.shape != blockVectorX.shape: 

905 raise ValueError( 

906 f"The shape {blockVectorX.shape} " 

907 f"of the postprocessing iterate not preserved\n" 

908 f"and changed to {blockVectorBX.shape} " 

909 f"after multiplying by the secondary matrix.\n" 

910 ) 

911 

912 gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX) 

913 _handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel) 

914 gramXAX = (gramXAX + gramXAX.T.conj()) / 2 

915 gramXBX = (gramXBX + gramXBX.T.conj()) / 2 

916 try: 

917 _lambda, eigBlockVector = eigh(gramXAX, 

918 gramXBX, 

919 check_finite=False) 

920 except LinAlgError as e: 

921 raise ValueError("eigh has failed in lobpcg postprocessing") from e 

922 

923 ii = _get_indx(_lambda, sizeX, largest) 

924 _lambda = _lambda[ii] 

925 eigBlockVector = np.asarray(eigBlockVector[:, ii]) 

926 

927 blockVectorX = np.dot(blockVectorX, eigBlockVector) 

928 blockVectorAX = np.dot(blockVectorAX, eigBlockVector) 

929 

930 if B is not None: 

931 blockVectorBX = np.dot(blockVectorBX, eigBlockVector) 

932 aux = blockVectorBX * _lambda[np.newaxis, :] 

933 else: 

934 aux = blockVectorX * _lambda[np.newaxis, :] 

935 

936 blockVectorR = blockVectorAX - aux 

937 

938 aux = np.sum(blockVectorR.conj() * blockVectorR, 0) 

939 residualNorms = np.sqrt(np.abs(aux)) 

940 

941 if retLambdaHistory: 

942 lambdaHistory[bestIterationNumber + 1, :] = _lambda 

943 if retResidualNormsHistory: 

944 residualNormsHistory[bestIterationNumber + 1, :] = residualNorms 

945 

946 if retLambdaHistory: 

947 lambdaHistory = lambdaHistory[ 

948 : bestIterationNumber + 2, :] 

949 if retResidualNormsHistory: 

950 residualNormsHistory = residualNormsHistory[ 

951 : bestIterationNumber + 2, :] 

952 

953 if np.max(np.abs(residualNorms)) > residualTolerance: 

954 warnings.warn( 

955 f"Exited postprocessing with accuracies \n" 

956 f"{residualNorms}\n" 

957 f"not reaching the requested tolerance {residualTolerance}.", 

958 UserWarning, stacklevel=2 

959 ) 

960 

961 if verbosityLevel: 

962 print(f"Final postprocessing eigenvalue(s):\n{_lambda}") 

963 print(f"Final residual norm(s):\n{residualNorms}") 

964 

965 if retLambdaHistory: 

966 lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0]) 

967 lambdaHistory = [np.squeeze(i) for i in lambdaHistory] 

968 if retResidualNormsHistory: 

969 residualNormsHistory = np.vsplit(residualNormsHistory, 

970 np.shape(residualNormsHistory)[0]) 

971 residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory] 

972 

973 if retLambdaHistory: 

974 if retResidualNormsHistory: 

975 return _lambda, blockVectorX, lambdaHistory, residualNormsHistory 

976 else: 

977 return _lambda, blockVectorX, lambdaHistory 

978 else: 

979 if retResidualNormsHistory: 

980 return _lambda, blockVectorX, residualNormsHistory 

981 else: 

982 return _lambda, blockVectorX