/src/quantlib/ql/math/matrixutilities/pseudosqrt.cpp
Line | Count | Source |
1 | | /* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
2 | | |
3 | | /* |
4 | | Copyright (C) 2003, 2004, 2007 Ferdinando Ametrano |
5 | | Copyright (C) 2006 Yiping Chen |
6 | | Copyright (C) 2007 Neil Firth |
7 | | |
8 | | This file is part of QuantLib, a free-software/open-source library |
9 | | for financial quantitative analysts and developers - http://quantlib.org/ |
10 | | |
11 | | QuantLib is free software: you can redistribute it and/or modify it |
12 | | under the terms of the QuantLib license. You should have received a |
13 | | copy of the license along with this program; if not, please email |
14 | | <quantlib-dev@lists.sf.net>. The license is also available online at |
15 | | <https://www.quantlib.org/license.shtml>. |
16 | | |
17 | | This program is distributed in the hope that it will be useful, but WITHOUT |
18 | | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
19 | | FOR A PARTICULAR PURPOSE. See the license for more details. |
20 | | */ |
21 | | |
22 | | #include <algorithm> |
23 | | #include <ql/math/comparison.hpp> |
24 | | #include <ql/math/matrixutilities/choleskydecomposition.hpp> |
25 | | #include <ql/math/matrixutilities/pseudosqrt.hpp> |
26 | | #include <ql/math/matrixutilities/symmetricschurdecomposition.hpp> |
27 | | #include <ql/math/optimization/conjugategradient.hpp> |
28 | | #include <ql/math/optimization/constraint.hpp> |
29 | | #include <ql/math/optimization/problem.hpp> |
30 | | #include <utility> |
31 | | |
32 | | namespace QuantLib { |
33 | | |
34 | | namespace { |
35 | | |
36 | | #if defined(QL_EXTRA_SAFETY_CHECKS) |
37 | | void checkSymmetry(const Matrix& matrix) { |
38 | | Size size = matrix.rows(); |
39 | | QL_REQUIRE(size == matrix.columns(), |
40 | | "non square matrix: " << size << " rows, " << |
41 | | matrix.columns() << " columns"); |
42 | | for (Size i=0; i<size; ++i) |
43 | | for (Size j=0; j<i; ++j) |
44 | | QL_REQUIRE(close(matrix[i][j], matrix[j][i]), |
45 | | "non symmetric matrix: " << |
46 | | "[" << i << "][" << j << "]=" << matrix[i][j] << |
47 | | ", [" << j << "][" << i << "]=" << matrix[j][i]); |
48 | | } |
49 | | #endif |
50 | | |
51 | | void normalizePseudoRoot(const Matrix& matrix, |
52 | 0 | Matrix& pseudo) { |
53 | 0 | Size size = matrix.rows(); |
54 | 0 | QL_REQUIRE(size == pseudo.rows(), |
55 | 0 | "matrix/pseudo mismatch: matrix rows are " << size << |
56 | 0 | " while pseudo rows are " << pseudo.columns()); |
57 | 0 | Size pseudoCols = pseudo.columns(); |
58 | | |
59 | | // row normalization |
60 | 0 | for (Size i=0; i<size; ++i) { |
61 | 0 | Real norm = 0.0; |
62 | 0 | for (Size j=0; j<pseudoCols; ++j) |
63 | 0 | norm += pseudo[i][j]*pseudo[i][j]; |
64 | 0 | if (norm>0.0) { |
65 | 0 | Real normAdj = std::sqrt(matrix[i][i]/norm); |
66 | 0 | for (Size j=0; j<pseudoCols; ++j) |
67 | 0 | pseudo[i][j] *= normAdj; |
68 | 0 | } |
69 | 0 | } |
70 | | |
71 | |
|
72 | 0 | } |
73 | | |
74 | | //cost function for hypersphere and lower-diagonal algorithm |
75 | | class HypersphereCostFunction : public CostFunction { |
76 | | private: |
77 | | Size size_; |
78 | | bool lowerDiagonal_; |
79 | | Matrix targetMatrix_; |
80 | | Array targetVariance_; |
81 | | mutable Matrix currentRoot_, tempMatrix_, currentMatrix_; |
82 | | public: |
83 | | HypersphereCostFunction(const Matrix& targetMatrix, |
84 | | Array targetVariance, |
85 | | bool lowerDiagonal) |
86 | 0 | : size_(targetMatrix.rows()), lowerDiagonal_(lowerDiagonal), |
87 | 0 | targetMatrix_(targetMatrix), targetVariance_(std::move(targetVariance)), |
88 | 0 | currentRoot_(size_, size_), tempMatrix_(size_, size_), currentMatrix_(size_, size_) {} |
89 | 0 | Array values(const Array&) const override { |
90 | 0 | QL_FAIL("values method not implemented"); |
91 | 0 | } |
92 | 0 | Real value(const Array& x) const override { |
93 | 0 | Size i,j,k; |
94 | 0 | std::fill(currentRoot_.begin(), currentRoot_.end(), 1.0); |
95 | 0 | if (lowerDiagonal_) { |
96 | 0 | for (i=0; i<size_; i++) { |
97 | 0 | for (k=0; k<size_; k++) { |
98 | 0 | if (k>i) { |
99 | 0 | currentRoot_[i][k]=0; |
100 | 0 | } else { |
101 | 0 | for (j=0; j<=k; j++) { |
102 | 0 | if (j == k && k!=i) |
103 | 0 | currentRoot_[i][k] *= |
104 | 0 | std::cos(x[i*(i-1)/2+j]); |
105 | 0 | else if (j!=i) |
106 | 0 | currentRoot_[i][k] *= |
107 | 0 | std::sin(x[i*(i-1)/2+j]); |
108 | 0 | } |
109 | 0 | } |
110 | 0 | } |
111 | 0 | } |
112 | 0 | } else { |
113 | 0 | for (i=0; i<size_; i++) { |
114 | 0 | for (k=0; k<size_; k++) { |
115 | 0 | for (j=0; j<=k; j++) { |
116 | 0 | if (j == k && k!=size_-1) |
117 | 0 | currentRoot_[i][k] *= |
118 | 0 | std::cos(x[j*size_+i]); |
119 | 0 | else if (j!=size_-1) |
120 | 0 | currentRoot_[i][k] *= |
121 | 0 | std::sin(x[j*size_+i]); |
122 | 0 | } |
123 | 0 | } |
124 | 0 | } |
125 | 0 | } |
126 | 0 | Real temp, error=0; |
127 | 0 | tempMatrix_ = transpose(currentRoot_); |
128 | 0 | currentMatrix_ = currentRoot_ * tempMatrix_; |
129 | 0 | for (i=0;i<size_;i++) { |
130 | 0 | for (j=0;j<size_;j++) { |
131 | 0 | temp = currentMatrix_[i][j]*targetVariance_[i] |
132 | 0 | *targetVariance_[j]-targetMatrix_[i][j]; |
133 | 0 | error += temp*temp; |
134 | 0 | } |
135 | 0 | } |
136 | 0 | return error; |
137 | 0 | } |
138 | | }; |
139 | | |
140 | | // Optimization function for hypersphere and lower-diagonal algorithm |
141 | | Matrix hypersphereOptimize(const Matrix& targetMatrix, |
142 | | const Matrix& currentRoot, |
143 | 0 | const bool lowerDiagonal) { |
144 | 0 | Size i,j,k,size = targetMatrix.rows(); |
145 | 0 | Matrix result(currentRoot); |
146 | 0 | Array variance(size, 0); |
147 | 0 | for (i=0; i<size; i++){ |
148 | 0 | variance[i]=std::sqrt(targetMatrix[i][i]); |
149 | 0 | } |
150 | 0 | if (lowerDiagonal) { |
151 | 0 | Matrix approxMatrix(result*transpose(result)); |
152 | 0 | result = CholeskyDecomposition(approxMatrix, true); |
153 | 0 | for (i=0; i<size; i++) { |
154 | 0 | for (j=0; j<size; j++) { |
155 | 0 | result[i][j]/=std::sqrt(approxMatrix[i][i]); |
156 | 0 | } |
157 | 0 | } |
158 | 0 | } else { |
159 | 0 | for (i=0; i<size; i++) { |
160 | 0 | for (j=0; j<size; j++) { |
161 | 0 | result[i][j]/=variance[i]; |
162 | 0 | } |
163 | 0 | } |
164 | 0 | } |
165 | |
|
166 | 0 | ConjugateGradient optimize; |
167 | 0 | EndCriteria endCriteria(100, 10, 1e-8, 1e-8, 1e-8); |
168 | 0 | HypersphereCostFunction costFunction(targetMatrix, variance, |
169 | 0 | lowerDiagonal); |
170 | 0 | NoConstraint constraint; |
171 | | |
172 | | // hypersphere vector optimization |
173 | |
|
174 | 0 | if (lowerDiagonal) { |
175 | 0 | Array theta(size * (size-1)/2); |
176 | 0 | const Real eps=1e-16; |
177 | 0 | for (i=1; i<size; i++) { |
178 | 0 | for (j=0; j<i; j++) { |
179 | 0 | theta[i*(i-1)/2+j]=result[i][j]; |
180 | 0 | if (theta[i*(i-1)/2+j]>1-eps) |
181 | 0 | theta[i*(i-1)/2+j]=1-eps; |
182 | 0 | if (theta[i*(i-1)/2+j]<-1+eps) |
183 | 0 | theta[i*(i-1)/2+j]=-1+eps; |
184 | 0 | for (k=0; k<j; k++) { |
185 | 0 | theta[i*(i-1)/2+j] /= std::sin(theta[i*(i-1)/2+k]); |
186 | 0 | if (theta[i*(i-1)/2+j]>1-eps) |
187 | 0 | theta[i*(i-1)/2+j]=1-eps; |
188 | 0 | if (theta[i*(i-1)/2+j]<-1+eps) |
189 | 0 | theta[i*(i-1)/2+j]=-1+eps; |
190 | 0 | } |
191 | 0 | theta[i*(i-1)/2+j] = std::acos(theta[i*(i-1)/2+j]); |
192 | 0 | if (j==i-1) { |
193 | 0 | if (result[i][i]<0) |
194 | 0 | theta[i*(i-1)/2+j]=-theta[i*(i-1)/2+j]; |
195 | 0 | } |
196 | 0 | } |
197 | 0 | } |
198 | 0 | Problem p(costFunction, constraint, theta); |
199 | 0 | optimize.minimize(p, endCriteria); |
200 | 0 | theta = p.currentValue(); |
201 | 0 | std::fill(result.begin(),result.end(),1.0); |
202 | 0 | for (i=0; i<size; i++) { |
203 | 0 | for (k=0; k<size; k++) { |
204 | 0 | if (k>i) { |
205 | 0 | result[i][k]=0; |
206 | 0 | } else { |
207 | 0 | for (j=0; j<=k; j++) { |
208 | 0 | if (j == k && k!=i) |
209 | 0 | result[i][k] *= |
210 | 0 | std::cos(theta[i*(i-1)/2+j]); |
211 | 0 | else if (j!=i) |
212 | 0 | result[i][k] *= |
213 | 0 | std::sin(theta[i*(i-1)/2+j]); |
214 | 0 | } |
215 | 0 | } |
216 | 0 | } |
217 | 0 | } |
218 | 0 | } else { |
219 | 0 | Array theta(size * (size-1)); |
220 | 0 | const Real eps=1e-16; |
221 | 0 | for (i=0; i<size; i++) { |
222 | 0 | for (j=0; j<size-1; j++) { |
223 | 0 | theta[j*size+i]=result[i][j]; |
224 | 0 | if (theta[j*size+i]>1-eps) |
225 | 0 | theta[j*size+i]=1-eps; |
226 | 0 | if (theta[j*size+i]<-1+eps) |
227 | 0 | theta[j*size+i]=-1+eps; |
228 | 0 | for (k=0;k<j;k++) { |
229 | 0 | theta[j*size+i] /= std::sin(theta[k*size+i]); |
230 | 0 | if (theta[j*size+i]>1-eps) |
231 | 0 | theta[j*size+i]=1-eps; |
232 | 0 | if (theta[j*size+i]<-1+eps) |
233 | 0 | theta[j*size+i]=-1+eps; |
234 | 0 | } |
235 | 0 | theta[j*size+i] = std::acos(theta[j*size+i]); |
236 | 0 | if (j==size-2) { |
237 | 0 | if (result[i][j+1]<0) |
238 | 0 | theta[j*size+i]=-theta[j*size+i]; |
239 | 0 | } |
240 | 0 | } |
241 | 0 | } |
242 | 0 | Problem p(costFunction, constraint, theta); |
243 | 0 | optimize.minimize(p, endCriteria); |
244 | 0 | theta=p.currentValue(); |
245 | 0 | std::fill(result.begin(),result.end(),1.0); |
246 | 0 | for (i=0; i<size; i++) { |
247 | 0 | for (k=0; k<size; k++) { |
248 | 0 | for (j=0; j<=k; j++) { |
249 | 0 | if (j == k && k!=size-1) |
250 | 0 | result[i][k] *= std::cos(theta[j*size+i]); |
251 | 0 | else if (j!=size-1) |
252 | 0 | result[i][k] *= std::sin(theta[j*size+i]); |
253 | 0 | } |
254 | 0 | } |
255 | 0 | } |
256 | 0 | } |
257 | |
|
258 | 0 | for (i=0; i<size; i++) { |
259 | 0 | for (j=0; j<size; j++) { |
260 | 0 | result[i][j]*=variance[i]; |
261 | 0 | } |
262 | 0 | } |
263 | 0 | return result; |
264 | 0 | } |
265 | | |
266 | | // Matrix infinity norm. See Golub and van Loan (2.3.10) or |
267 | | // <http://en.wikipedia.org/wiki/Matrix_norm> |
268 | 0 | Real normInf(const Matrix& M) { |
269 | 0 | Size rows = M.rows(); |
270 | 0 | Size cols = M.columns(); |
271 | 0 | Real norm = 0.0; |
272 | 0 | for (Size i=0; i<rows; ++i) { |
273 | 0 | Real colSum = 0.0; |
274 | 0 | for (Size j=0; j<cols; ++j) |
275 | 0 | colSum += std::fabs(M[i][j]); |
276 | 0 | norm = std::max(norm, colSum); |
277 | 0 | } |
278 | 0 | return norm; |
279 | 0 | } |
280 | | |
281 | | // Take a matrix and make all the diagonal entries 1. |
282 | 0 | Matrix projectToUnitDiagonalMatrix(const Matrix& M) { |
283 | 0 | Size size = M.rows(); |
284 | 0 | QL_REQUIRE(size == M.columns(), |
285 | 0 | "matrix not square"); |
286 | | |
287 | 0 | Matrix result(M); |
288 | 0 | for (Size i=0; i<size; ++i) |
289 | 0 | result[i][i] = 1.0; |
290 | |
|
291 | 0 | return result; |
292 | 0 | } |
293 | | |
294 | | // Take a matrix and make all the eigenvalues non-negative |
295 | 0 | Matrix projectToPositiveSemidefiniteMatrix(Matrix& M) { |
296 | 0 | Size size = M.rows(); |
297 | 0 | QL_REQUIRE(size == M.columns(), |
298 | 0 | "matrix not square"); |
299 | | |
300 | 0 | Matrix diagonal(size, size, 0.0); |
301 | 0 | SymmetricSchurDecomposition jd(M); |
302 | 0 | for (Size i=0; i<size; ++i) |
303 | 0 | diagonal[i][i] = std::max<Real>(jd.eigenvalues()[i], 0.0); |
304 | |
|
305 | 0 | Matrix result = |
306 | 0 | jd.eigenvectors()*diagonal*transpose(jd.eigenvectors()); |
307 | 0 | return result; |
308 | 0 | } |
309 | | |
310 | | // implementation of the Higham algorithm to find the nearest |
311 | | // correlation matrix. |
312 | 0 | Matrix highamImplementation(const Matrix& A, const Size maxIterations, const Real& tolerance) { |
313 | |
|
314 | 0 | Size size = A.rows(); |
315 | 0 | Matrix R, Y(A), X(A), deltaS(size, size, 0.0); |
316 | |
|
317 | 0 | Matrix lastX(X); |
318 | 0 | Matrix lastY(Y); |
319 | |
|
320 | 0 | for (Size i=0; i<maxIterations; ++i) { |
321 | 0 | R = Y - deltaS; |
322 | 0 | X = projectToPositiveSemidefiniteMatrix(R); |
323 | 0 | deltaS = X - R; |
324 | 0 | Y = projectToUnitDiagonalMatrix(X); |
325 | | |
326 | | // convergence test |
327 | 0 | if (std::max({ |
328 | 0 | normInf(X-lastX)/normInf(X), |
329 | 0 | normInf(Y-lastY)/normInf(Y), |
330 | 0 | normInf(Y-X)/normInf(Y) |
331 | 0 | }) |
332 | 0 | <= tolerance) |
333 | 0 | { |
334 | 0 | break; |
335 | 0 | } |
336 | 0 | lastX = X; |
337 | 0 | lastY = Y; |
338 | 0 | } |
339 | | |
340 | | // ensure we return a symmetric matrix |
341 | 0 | for (Size i=0; i<size; ++i) |
342 | 0 | for (Size j=0; j<i; ++j) |
343 | 0 | Y[i][j] = Y[j][i]; |
344 | |
|
345 | 0 | return Y; |
346 | 0 | } |
347 | | } |
348 | | |
349 | | |
350 | 0 | Matrix pseudoSqrt(const Matrix& matrix, SalvagingAlgorithm::Type sa) { |
351 | 0 | Size size = matrix.rows(); |
352 | |
|
353 | | #if defined(QL_EXTRA_SAFETY_CHECKS) |
354 | | checkSymmetry(matrix); |
355 | | #else |
356 | 0 | QL_REQUIRE(size == matrix.columns(), |
357 | 0 | "non square matrix: " << size << " rows, " << |
358 | 0 | matrix.columns() << " columns"); |
359 | 0 | #endif |
360 | | |
361 | | // spectral (a.k.a Principal Component) analysis |
362 | 0 | SymmetricSchurDecomposition jd(matrix); |
363 | 0 | Matrix diagonal(size, size, 0.0); |
364 | | |
365 | | // salvaging algorithm |
366 | 0 | Matrix result(size, size); |
367 | 0 | bool negative; |
368 | 0 | switch (sa) { |
369 | 0 | case SalvagingAlgorithm::None: |
370 | | // eigenvalues are sorted in decreasing order |
371 | 0 | QL_REQUIRE(jd.eigenvalues()[size-1]>=-1e-16, |
372 | 0 | "negative eigenvalue(s) (" |
373 | 0 | << std::scientific << jd.eigenvalues()[size-1] |
374 | 0 | << ")"); |
375 | 0 | result = CholeskyDecomposition(matrix, true); |
376 | 0 | break; |
377 | 0 | case SalvagingAlgorithm::Spectral: |
378 | | // negative eigenvalues set to zero |
379 | 0 | for (Size i=0; i<size; i++) |
380 | 0 | diagonal[i][i] = |
381 | 0 | std::sqrt(std::max<Real>(jd.eigenvalues()[i], 0.0)); |
382 | |
|
383 | 0 | result = jd.eigenvectors() * diagonal; |
384 | 0 | normalizePseudoRoot(matrix, result); |
385 | 0 | break; |
386 | 0 | case SalvagingAlgorithm::Hypersphere: |
387 | | // negative eigenvalues set to zero |
388 | 0 | negative=false; |
389 | 0 | for (Size i=0; i<size; ++i){ |
390 | 0 | diagonal[i][i] = |
391 | 0 | std::sqrt(std::max<Real>(jd.eigenvalues()[i], 0.0)); |
392 | 0 | if (jd.eigenvalues()[i]<0.0) negative=true; |
393 | 0 | } |
394 | 0 | result = jd.eigenvectors() * diagonal; |
395 | 0 | normalizePseudoRoot(matrix, result); |
396 | |
|
397 | 0 | if (negative) |
398 | 0 | result = hypersphereOptimize(matrix, result, false); |
399 | 0 | break; |
400 | 0 | case SalvagingAlgorithm::LowerDiagonal: |
401 | | // negative eigenvalues set to zero |
402 | 0 | negative=false; |
403 | 0 | for (Size i=0; i<size; ++i){ |
404 | 0 | diagonal[i][i] = |
405 | 0 | std::sqrt(std::max<Real>(jd.eigenvalues()[i], 0.0)); |
406 | 0 | if (jd.eigenvalues()[i]<0.0) negative=true; |
407 | 0 | } |
408 | 0 | result = jd.eigenvectors() * diagonal; |
409 | |
|
410 | 0 | normalizePseudoRoot(matrix, result); |
411 | |
|
412 | 0 | if (negative) |
413 | 0 | result = hypersphereOptimize(matrix, result, true); |
414 | 0 | break; |
415 | 0 | case SalvagingAlgorithm::Higham: { |
416 | 0 | int maxIterations = 40; |
417 | 0 | Real tol = 1e-6; |
418 | 0 | result = highamImplementation(matrix, maxIterations, tol); |
419 | 0 | result = CholeskyDecomposition(result, true); |
420 | 0 | } |
421 | 0 | break; |
422 | 0 | case SalvagingAlgorithm::Principal: { |
423 | 0 | QL_REQUIRE(jd.eigenvalues().back()>=-10*QL_EPSILON, |
424 | 0 | "negative eigenvalue(s) (" |
425 | 0 | << std::scientific << jd.eigenvalues().back() |
426 | 0 | << ")"); |
427 | | |
428 | 0 | Array sqrtEigenvalues(size); |
429 | 0 | std::transform( |
430 | 0 | jd.eigenvalues().begin(), jd.eigenvalues().end(), |
431 | 0 | sqrtEigenvalues.begin(), |
432 | 0 | [](Real lambda) -> Real { |
433 | 0 | return std::sqrt(std::max<Real>(lambda, 0.0)); |
434 | 0 | } |
435 | 0 | ); |
436 | |
|
437 | 0 | for (Size i=0; i < size; ++i) |
438 | 0 | std::transform( |
439 | 0 | sqrtEigenvalues.begin(), sqrtEigenvalues.end(), |
440 | 0 | jd.eigenvectors().row_begin(i), |
441 | 0 | diagonal.column_begin(i), |
442 | 0 | std::multiplies<>() |
443 | 0 | ); |
444 | |
|
445 | 0 | result = jd.eigenvectors()*diagonal; |
446 | 0 | result = 0.5*(result + transpose(result)); |
447 | 0 | } |
448 | 0 | break; |
449 | 0 | default: |
450 | 0 | QL_FAIL("unknown salvaging algorithm"); |
451 | 0 | } |
452 | | |
453 | 0 | return result; |
454 | 0 | } |
455 | | |
456 | | |
457 | | Matrix rankReducedSqrt(const Matrix& matrix, |
458 | | Size maxRank, |
459 | | Real componentRetainedPercentage, |
460 | 0 | SalvagingAlgorithm::Type sa) { |
461 | 0 | Size size = matrix.rows(); |
462 | |
|
463 | | #if defined(QL_EXTRA_SAFETY_CHECKS) |
464 | | checkSymmetry(matrix); |
465 | | #else |
466 | 0 | QL_REQUIRE(size == matrix.columns(), |
467 | 0 | "non square matrix: " << size << " rows, " << |
468 | 0 | matrix.columns() << " columns"); |
469 | 0 | #endif |
470 | | |
471 | 0 | QL_REQUIRE(componentRetainedPercentage>0.0, |
472 | 0 | "no eigenvalues retained"); |
473 | | |
474 | 0 | QL_REQUIRE(componentRetainedPercentage<=1.0, |
475 | 0 | "percentage to be retained > 100%"); |
476 | | |
477 | 0 | QL_REQUIRE(maxRank>=1, |
478 | 0 | "max rank required < 1"); |
479 | | |
480 | | // spectral (a.k.a Principal Component) analysis |
481 | 0 | SymmetricSchurDecomposition jd(matrix); |
482 | 0 | Array eigenValues = jd.eigenvalues(); |
483 | | |
484 | | // salvaging algorithm |
485 | 0 | switch (sa) { |
486 | 0 | case SalvagingAlgorithm::None: |
487 | | // eigenvalues are sorted in decreasing order |
488 | 0 | QL_REQUIRE(eigenValues[size-1]>=-1e-16, |
489 | 0 | "negative eigenvalue(s) (" |
490 | 0 | << std::scientific << eigenValues[size-1] |
491 | 0 | << ")"); |
492 | 0 | break; |
493 | 0 | case SalvagingAlgorithm::Spectral: |
494 | | // negative eigenvalues set to zero |
495 | 0 | for (Size i=0; i<size; ++i) |
496 | 0 | eigenValues[i] = std::max<Real>(eigenValues[i], 0.0); |
497 | 0 | break; |
498 | 0 | case SalvagingAlgorithm::Higham: |
499 | 0 | { |
500 | 0 | int maxIterations = 40; |
501 | 0 | Real tolerance = 1e-6; |
502 | 0 | Matrix adjustedMatrix = highamImplementation(matrix, maxIterations, tolerance); |
503 | 0 | jd = SymmetricSchurDecomposition(adjustedMatrix); |
504 | 0 | eigenValues = jd.eigenvalues(); |
505 | 0 | } |
506 | 0 | break; |
507 | 0 | default: |
508 | 0 | QL_FAIL("unknown or invalid salvaging algorithm"); |
509 | 0 | } |
510 | | |
511 | | // factor reduction |
512 | 0 | Real enough = componentRetainedPercentage * |
513 | 0 | std::accumulate(eigenValues.begin(), |
514 | 0 | eigenValues.end(), Real(0.0)); |
515 | 0 | if (componentRetainedPercentage == 1.0) { |
516 | | // numerical glitches might cause some factors to be discarded |
517 | 0 | enough *= 1.1; |
518 | 0 | } |
519 | | // retain at least one factor |
520 | 0 | Real components = eigenValues[0]; |
521 | 0 | Size retainedFactors = 1; |
522 | 0 | for (Size i=1; components<enough && i<size; ++i) { |
523 | 0 | components += eigenValues[i]; |
524 | 0 | retainedFactors++; |
525 | 0 | } |
526 | | // output is granted to have a rank<=maxRank |
527 | 0 | retainedFactors=std::min(retainedFactors, maxRank); |
528 | |
|
529 | 0 | Matrix diagonal(size, retainedFactors, 0.0); |
530 | 0 | for (Size i=0; i<retainedFactors; ++i) |
531 | 0 | diagonal[i][i] = std::sqrt(eigenValues[i]); |
532 | 0 | Matrix result = jd.eigenvectors() * diagonal; |
533 | |
|
534 | 0 | normalizePseudoRoot(matrix, result); |
535 | 0 | return result; |
536 | 0 | } |
537 | | } |