Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py : 4%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1"""
2Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
4References
5----------
6.. [1] A. V. Knyazev (2001),
7 Toward the Optimal Preconditioned Eigensolver: Locally Optimal
8 Block Preconditioned Conjugate Gradient Method.
9 SIAM Journal on Scientific Computing 23, no. 2,
10 pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
12.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
13 Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
14 in hypre and PETSc. https://arxiv.org/abs/0705.2626
16.. [3] A. V. Knyazev's C and MATLAB implementations:
17 https://github.com/lobpcg/blopex
18"""
20import numpy as np
21from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky,
22 LinAlgError)
23from scipy.sparse.linalg import aslinearoperator
24from scipy.sparse.sputils import bmat
26__all__ = ['lobpcg']
29def _report_nonhermitian(M, name):
30 """
31 Report if `M` is not a hermitian matrix given its type.
32 """
33 from scipy.linalg import norm
35 md = M - M.T.conj()
37 nmd = norm(md, 1)
38 tol = 10 * np.finfo(M.dtype).eps
39 tol = max(tol, tol * norm(M, 1))
40 if nmd > tol:
41 print('matrix %s of the type %s is not sufficiently Hermitian:'
42 % (name, M.dtype))
43 print('condition: %.e < %e' % (nmd, tol))
46def _as2d(ar):
47 """
48 If the input array is 2D return it, if it is 1D, append a dimension,
49 making it a column vector.
50 """
51 if ar.ndim == 2:
52 return ar
53 else: # Assume 1!
54 aux = np.array(ar, copy=False)
55 aux.shape = (ar.shape[0], 1)
56 return aux
59def _makeOperator(operatorInput, expectedShape):
60 """Takes a dense numpy array or a sparse matrix or
61 a function and makes an operator performing matrix * blockvector
62 products."""
63 if operatorInput is None:
64 return None
65 else:
66 operator = aslinearoperator(operatorInput)
68 if operator.shape != expectedShape:
69 raise ValueError('operator has invalid shape')
71 return operator
74def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
75 """Changes blockVectorV in place."""
76 YBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
77 tmp = cho_solve(factYBY, YBV)
78 blockVectorV -= np.dot(blockVectorY, tmp)
81def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
82 """B-orthonormalize the given block vector using Cholesky."""
83 normalization = blockVectorV.max(axis=0)+np.finfo(blockVectorV.dtype).eps
84 blockVectorV = blockVectorV / normalization
85 if blockVectorBV is None:
86 if B is not None:
87 blockVectorBV = B(blockVectorV)
88 else:
89 blockVectorBV = blockVectorV # Shared data!!!
90 else:
91 blockVectorBV = blockVectorBV / normalization
92 VBV = np.matmul(blockVectorV.T.conj(), blockVectorBV)
93 try:
94 # VBV is a Cholesky factor from now on...
95 VBV = cholesky(VBV, overwrite_a=True)
96 VBV = inv(VBV, overwrite_a=True)
97 blockVectorV = np.matmul(blockVectorV, VBV)
98 # blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T
99 if B is not None:
100 blockVectorBV = np.matmul(blockVectorBV, VBV)
101 # blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T
102 else:
103 blockVectorBV = None
104 except LinAlgError:
105 #raise ValueError('Cholesky has failed')
106 blockVectorV = None
107 blockVectorBV = None
108 VBV = None
110 if retInvR:
111 return blockVectorV, blockVectorBV, VBV, normalization
112 else:
113 return blockVectorV, blockVectorBV
116def _get_indx(_lambda, num, largest):
117 """Get `num` indices into `_lambda` depending on `largest` option."""
118 ii = np.argsort(_lambda)
119 if largest:
120 ii = ii[:-num-1:-1]
121 else:
122 ii = ii[:num]
124 return ii
127def lobpcg(A, X,
128 B=None, M=None, Y=None,
129 tol=None, maxiter=None,
130 largest=True, verbosityLevel=0,
131 retLambdaHistory=False, retResidualNormsHistory=False):
132 """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
134 LOBPCG is a preconditioned eigensolver for large symmetric positive
135 definite (SPD) generalized eigenproblems.
137 Parameters
138 ----------
139 A : {sparse matrix, dense matrix, LinearOperator}
140 The symmetric linear operator of the problem, usually a
141 sparse matrix. Often called the "stiffness matrix".
142 X : ndarray, float32 or float64
143 Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
144 has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
145 B : {dense matrix, sparse matrix, LinearOperator}, optional
146 The right hand side operator in a generalized eigenproblem.
147 By default, ``B = Identity``. Often called the "mass matrix".
148 M : {dense matrix, sparse matrix, LinearOperator}, optional
149 Preconditioner to `A`; by default ``M = Identity``.
150 `M` should approximate the inverse of `A`.
151 Y : ndarray, float32 or float64, optional
152 n-by-sizeY matrix of constraints (non-sparse), sizeY < n
153 The iterations will be performed in the B-orthogonal complement
154 of the column-space of Y. Y must be full rank.
155 tol : scalar, optional
156 Solver tolerance (stopping criterion).
157 The default is ``tol=n*sqrt(eps)``.
158 maxiter : int, optional
159 Maximum number of iterations. The default is ``maxiter = 20``.
160 largest : bool, optional
161 When True, solve for the largest eigenvalues, otherwise the smallest.
162 verbosityLevel : int, optional
163 Controls solver output. The default is ``verbosityLevel=0``.
164 retLambdaHistory : bool, optional
165 Whether to return eigenvalue history. Default is False.
166 retResidualNormsHistory : bool, optional
167 Whether to return history of residual norms. Default is False.
169 Returns
170 -------
171 w : ndarray
172 Array of ``k`` eigenvalues
173 v : ndarray
174 An array of ``k`` eigenvectors. `v` has the same shape as `X`.
175 lambdas : list of ndarray, optional
176 The eigenvalue history, if `retLambdaHistory` is True.
177 rnorms : list of ndarray, optional
178 The history of residual norms, if `retResidualNormsHistory` is True.
180 Notes
181 -----
182 If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
183 the return tuple has the following format
184 ``(lambda, V, lambda history, residual norms history)``.
186 In the following ``n`` denotes the matrix size and ``m`` the number
187 of required eigenvalues (smallest or largest).
189 The LOBPCG code internally solves eigenproblems of the size ``3m`` on every
190 iteration by calling the "standard" dense eigensolver, so if ``m`` is not
191 small enough compared to ``n``, it does not make sense to call the LOBPCG
192 code, but rather one should use the "standard" eigensolver, e.g. numpy or
193 scipy function in this case.
194 If one calls the LOBPCG algorithm for ``5m > n``, it will most likely break
195 internally, so the code tries to call the standard function instead.
197 It is not that ``n`` should be large for the LOBPCG to work, but rather the
198 ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
199 and ``n=10``, it works though ``n`` is small. The method is intended
200 for extremely large ``n / m``, see e.g., reference [28] in
201 https://arxiv.org/abs/0705.2626
203 The convergence speed depends basically on two factors:
205 1. How well relatively separated the seeking eigenvalues are from the rest
206 of the eigenvalues. One can try to vary ``m`` to make this better.
208 2. How well conditioned the problem is. This can be changed by using proper
209 preconditioning. For example, a rod vibration test problem (under tests
210 directory) is ill-conditioned for large ``n``, so convergence will be
211 slow, unless efficient preconditioning is used. For this specific
212 problem, a good simple preconditioner function would be a linear solve
213 for `A`, which is easy to code since A is tridiagonal.
215 References
216 ----------
217 .. [1] A. V. Knyazev (2001),
218 Toward the Optimal Preconditioned Eigensolver: Locally Optimal
219 Block Preconditioned Conjugate Gradient Method.
220 SIAM Journal on Scientific Computing 23, no. 2,
221 pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
223 .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
224 (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
225 (BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626
227 .. [3] A. V. Knyazev's C and MATLAB implementations:
228 https://bitbucket.org/joseroman/blopex
230 Examples
231 --------
233 Solve ``A x = lambda x`` with constraints and preconditioning.
235 >>> import numpy as np
236 >>> from scipy.sparse import spdiags, issparse
237 >>> from scipy.sparse.linalg import lobpcg, LinearOperator
238 >>> n = 100
239 >>> vals = np.arange(1, n + 1)
240 >>> A = spdiags(vals, 0, n, n)
241 >>> A.toarray()
242 array([[ 1., 0., 0., ..., 0., 0., 0.],
243 [ 0., 2., 0., ..., 0., 0., 0.],
244 [ 0., 0., 3., ..., 0., 0., 0.],
245 ...,
246 [ 0., 0., 0., ..., 98., 0., 0.],
247 [ 0., 0., 0., ..., 0., 99., 0.],
248 [ 0., 0., 0., ..., 0., 0., 100.]])
250 Constraints:
252 >>> Y = np.eye(n, 3)
254 Initial guess for eigenvectors, should have linearly independent
255 columns. Column dimension = number of requested eigenvalues.
257 >>> X = np.random.rand(n, 3)
259 Preconditioner in the inverse of A in this example:
261 >>> invA = spdiags([1./vals], 0, n, n)
263 The preconditiner must be defined by a function:
265 >>> def precond( x ):
266 ... return invA @ x
268 The argument x of the preconditioner function is a matrix inside `lobpcg`,
269 thus the use of matrix-matrix product ``@``.
271 The preconditioner function is passed to lobpcg as a `LinearOperator`:
273 >>> M = LinearOperator(matvec=precond, matmat=precond,
274 ... shape=(n, n), dtype=float)
276 Let us now solve the eigenvalue problem for the matrix A:
278 >>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
279 >>> eigenvalues
280 array([4., 5., 6.])
282 Note that the vectors passed in Y are the eigenvectors of the 3 smallest
283 eigenvalues. The results returned are orthogonal to those.
285 """
286 blockVectorX = X
287 blockVectorY = Y
288 residualTolerance = tol
289 if maxiter is None:
290 maxiter = 20
292 if blockVectorY is not None:
293 sizeY = blockVectorY.shape[1]
294 else:
295 sizeY = 0
297 # Block size.
298 if len(blockVectorX.shape) != 2:
299 raise ValueError('expected rank-2 array for argument X')
301 n, sizeX = blockVectorX.shape
303 if verbosityLevel:
304 aux = "Solving "
305 if B is None:
306 aux += "standard"
307 else:
308 aux += "generalized"
309 aux += " eigenvalue problem with"
310 if M is None:
311 aux += "out"
312 aux += " preconditioning\n\n"
313 aux += "matrix size %d\n" % n
314 aux += "block size %d\n\n" % sizeX
315 if blockVectorY is None:
316 aux += "No constraints\n\n"
317 else:
318 if sizeY > 1:
319 aux += "%d constraints\n\n" % sizeY
320 else:
321 aux += "%d constraint\n\n" % sizeY
322 print(aux)
324 A = _makeOperator(A, (n, n))
325 B = _makeOperator(B, (n, n))
326 M = _makeOperator(M, (n, n))
328 if (n - sizeY) < (5 * sizeX):
329 # warn('The problem size is small compared to the block size.' \
330 # ' Using dense eigensolver instead of LOBPCG.')
332 sizeX = min(sizeX, n)
334 if blockVectorY is not None:
335 raise NotImplementedError('The dense eigensolver '
336 'does not support constraints.')
338 # Define the closed range of indices of eigenvalues to return.
339 if largest:
340 eigvals = (n - sizeX, n-1)
341 else:
342 eigvals = (0, sizeX-1)
344 A_dense = A(np.eye(n, dtype=A.dtype))
345 B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))
347 vals, vecs = eigh(A_dense, B_dense, eigvals=eigvals,
348 check_finite=False)
349 if largest:
350 # Reverse order to be compatible with eigs() in 'LM' mode.
351 vals = vals[::-1]
352 vecs = vecs[:, ::-1]
354 return vals, vecs
356 if (residualTolerance is None) or (residualTolerance <= 0.0):
357 residualTolerance = np.sqrt(1e-15) * n
359 # Apply constraints to X.
360 if blockVectorY is not None:
362 if B is not None:
363 blockVectorBY = B(blockVectorY)
364 else:
365 blockVectorBY = blockVectorY
367 # gramYBY is a dense array.
368 gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
369 try:
370 # gramYBY is a Cholesky factor from now on...
371 gramYBY = cho_factor(gramYBY)
372 except LinAlgError:
373 raise ValueError('cannot handle linearly dependent constraints')
375 _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
377 ##
378 # B-orthonormalize X.
379 blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)
381 ##
382 # Compute the initial Ritz vectors: solve the eigenproblem.
383 blockVectorAX = A(blockVectorX)
384 gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
386 _lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
387 ii = _get_indx(_lambda, sizeX, largest)
388 _lambda = _lambda[ii]
390 eigBlockVector = np.asarray(eigBlockVector[:, ii])
391 blockVectorX = np.dot(blockVectorX, eigBlockVector)
392 blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
393 if B is not None:
394 blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
396 ##
397 # Active index set.
398 activeMask = np.ones((sizeX,), dtype=bool)
400 lambdaHistory = [_lambda]
401 residualNormsHistory = []
403 previousBlockSize = sizeX
404 ident = np.eye(sizeX, dtype=A.dtype)
405 ident0 = np.eye(sizeX, dtype=A.dtype)
407 ##
408 # Main iteration loop.
410 blockVectorP = None # set during iteration
411 blockVectorAP = None
412 blockVectorBP = None
414 iterationNumber = -1
415 restart = True
416 explicitGramFlag = False
417 while iterationNumber < maxiter:
418 iterationNumber += 1
419 if verbosityLevel > 0:
420 print('iteration %d' % iterationNumber)
422 if B is not None:
423 aux = blockVectorBX * _lambda[np.newaxis, :]
424 else:
425 aux = blockVectorX * _lambda[np.newaxis, :]
427 blockVectorR = blockVectorAX - aux
429 aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
430 residualNorms = np.sqrt(aux)
432 residualNormsHistory.append(residualNorms)
434 ii = np.where(residualNorms > residualTolerance, True, False)
435 activeMask = activeMask & ii
436 if verbosityLevel > 2:
437 print(activeMask)
439 currentBlockSize = activeMask.sum()
440 if currentBlockSize != previousBlockSize:
441 previousBlockSize = currentBlockSize
442 ident = np.eye(currentBlockSize, dtype=A.dtype)
444 if currentBlockSize == 0:
445 break
447 if verbosityLevel > 0:
448 print('current block size:', currentBlockSize)
449 print('eigenvalue:', _lambda)
450 print('residual norms:', residualNorms)
451 if verbosityLevel > 10:
452 print(eigBlockVector)
454 activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
456 if iterationNumber > 0:
457 activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
458 activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
459 if B is not None:
460 activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
462 if M is not None:
463 # Apply preconditioner T to the active residuals.
464 activeBlockVectorR = M(activeBlockVectorR)
466 ##
467 # Apply constraints to the preconditioned residuals.
468 if blockVectorY is not None:
469 _applyConstraints(activeBlockVectorR,
470 gramYBY, blockVectorBY, blockVectorY)
472 ##
473 # B-orthogonalize the preconditioned residuals to X.
474 if B is not None:
475 activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
476 np.matmul(blockVectorBX.T.conj(),
477 activeBlockVectorR))
478 else:
479 activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
480 np.matmul(blockVectorX.T.conj(),
481 activeBlockVectorR))
483 ##
484 # B-orthonormalize the preconditioned residuals.
485 aux = _b_orthonormalize(B, activeBlockVectorR)
486 activeBlockVectorR, activeBlockVectorBR = aux
488 activeBlockVectorAR = A(activeBlockVectorR)
490 if iterationNumber > 0:
491 if B is not None:
492 aux = _b_orthonormalize(B, activeBlockVectorP,
493 activeBlockVectorBP, retInvR=True)
494 activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
495 else:
496 aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
497 activeBlockVectorP, _, invR, normal = aux
498 # Function _b_orthonormalize returns None if Cholesky fails
499 if activeBlockVectorP is not None:
500 activeBlockVectorAP = activeBlockVectorAP / normal
501 activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
502 restart = False
503 else:
504 restart = True
506 ##
507 # Perform the Rayleigh Ritz Procedure:
508 # Compute symmetric Gram matrices:
510 if activeBlockVectorAR.dtype == 'float32':
511 myeps = 1
512 elif activeBlockVectorR.dtype == 'float32':
513 myeps = 1e-4
514 else:
515 myeps = 1e-8
517 if residualNorms.max() > myeps and not explicitGramFlag:
518 explicitGramFlag = False
519 else:
520 # Once explicitGramFlag, forever explicitGramFlag.
521 explicitGramFlag = True
523 # Shared memory assingments to simplify the code
524 if B is None:
525 blockVectorBX = blockVectorX
526 activeBlockVectorBR = activeBlockVectorR
527 if not restart:
528 activeBlockVectorBP = activeBlockVectorP
530 # Common submatrices:
531 gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
532 gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
534 if explicitGramFlag:
535 gramRAR = (gramRAR + gramRAR.T.conj())/2
536 gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
537 gramXAX = (gramXAX + gramXAX.T.conj())/2
538 gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
539 gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
540 gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
541 else:
542 gramXAX = np.diag(_lambda)
543 gramXBX = ident0
544 gramRBR = ident
545 gramXBR = np.zeros((sizeX, currentBlockSize), dtype=A.dtype)
547 def _handle_gramA_gramB_verbosity(gramA, gramB):
548 if verbosityLevel > 0:
549 _report_nonhermitian(gramA, 'gramA')
550 _report_nonhermitian(gramB, 'gramB')
551 if verbosityLevel > 10:
552 # Note: not documented, but leave it in here for now
553 np.savetxt('gramA.txt', gramA)
554 np.savetxt('gramB.txt', gramB)
556 if not restart:
557 gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
558 gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
559 gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
560 gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
561 gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
562 if explicitGramFlag:
563 gramPAP = (gramPAP + gramPAP.T.conj())/2
564 gramPBP = np.dot(activeBlockVectorP.T.conj(),
565 activeBlockVectorBP)
566 else:
567 gramPBP = ident
569 gramA = bmat([[gramXAX, gramXAR, gramXAP],
570 [gramXAR.T.conj(), gramRAR, gramRAP],
571 [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP]])
572 gramB = bmat([[gramXBX, gramXBR, gramXBP],
573 [gramXBR.T.conj(), gramRBR, gramRBP],
574 [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP]])
576 _handle_gramA_gramB_verbosity(gramA, gramB)
578 try:
579 _lambda, eigBlockVector = eigh(gramA, gramB,
580 check_finite=False)
581 except LinAlgError:
582 # try again after dropping the direction vectors P from RR
583 restart = True
585 if restart:
586 gramA = bmat([[gramXAX, gramXAR],
587 [gramXAR.T.conj(), gramRAR]])
588 gramB = bmat([[gramXBX, gramXBR],
589 [gramXBR.T.conj(), gramRBR]])
591 _handle_gramA_gramB_verbosity(gramA, gramB)
593 try:
594 _lambda, eigBlockVector = eigh(gramA, gramB,
595 check_finite=False)
596 except LinAlgError:
597 raise ValueError('eigh has failed in lobpcg iterations')
599 ii = _get_indx(_lambda, sizeX, largest)
600 if verbosityLevel > 10:
601 print(ii)
602 print(_lambda)
604 _lambda = _lambda[ii]
605 eigBlockVector = eigBlockVector[:, ii]
607 lambdaHistory.append(_lambda)
609 if verbosityLevel > 10:
610 print('lambda:', _lambda)
611# # Normalize eigenvectors!
612# aux = np.sum( eigBlockVector.conj() * eigBlockVector, 0 )
613# eigVecNorms = np.sqrt( aux )
614# eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]
615# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )
617 if verbosityLevel > 10:
618 print(eigBlockVector)
620 # Compute Ritz vectors.
621 if B is not None:
622 if not restart:
623 eigBlockVectorX = eigBlockVector[:sizeX]
624 eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
625 eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
627 pp = np.dot(activeBlockVectorR, eigBlockVectorR)
628 pp += np.dot(activeBlockVectorP, eigBlockVectorP)
630 app = np.dot(activeBlockVectorAR, eigBlockVectorR)
631 app += np.dot(activeBlockVectorAP, eigBlockVectorP)
633 bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
634 bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
635 else:
636 eigBlockVectorX = eigBlockVector[:sizeX]
637 eigBlockVectorR = eigBlockVector[sizeX:]
639 pp = np.dot(activeBlockVectorR, eigBlockVectorR)
640 app = np.dot(activeBlockVectorAR, eigBlockVectorR)
641 bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
643 if verbosityLevel > 10:
644 print(pp)
645 print(app)
646 print(bpp)
648 blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
649 blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
650 blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
652 blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
654 else:
655 if not restart:
656 eigBlockVectorX = eigBlockVector[:sizeX]
657 eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
658 eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
660 pp = np.dot(activeBlockVectorR, eigBlockVectorR)
661 pp += np.dot(activeBlockVectorP, eigBlockVectorP)
663 app = np.dot(activeBlockVectorAR, eigBlockVectorR)
664 app += np.dot(activeBlockVectorAP, eigBlockVectorP)
665 else:
666 eigBlockVectorX = eigBlockVector[:sizeX]
667 eigBlockVectorR = eigBlockVector[sizeX:]
669 pp = np.dot(activeBlockVectorR, eigBlockVectorR)
670 app = np.dot(activeBlockVectorAR, eigBlockVectorR)
672 if verbosityLevel > 10:
673 print(pp)
674 print(app)
676 blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
677 blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
679 blockVectorP, blockVectorAP = pp, app
681 if B is not None:
682 aux = blockVectorBX * _lambda[np.newaxis, :]
684 else:
685 aux = blockVectorX * _lambda[np.newaxis, :]
687 blockVectorR = blockVectorAX - aux
689 aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
690 residualNorms = np.sqrt(aux)
692 # Future work: Need to add Postprocessing here:
693 # Making sure eigenvectors "exactly" satisfy the blockVectorY constrains?
694 # Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR
695 # Computing the actual true residuals
697 if verbosityLevel > 0:
698 print('final eigenvalue:', _lambda)
699 print('final residual norms:', residualNorms)
701 if retLambdaHistory:
702 if retResidualNormsHistory:
703 return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
704 else:
705 return _lambda, blockVectorX, lambdaHistory
706 else:
707 if retResidualNormsHistory:
708 return _lambda, blockVectorX, residualNormsHistory
709 else:
710 return _lambda, blockVectorX