Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/optimize/_trustregion_constr/projections.py: 11%
163 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
1"""Basic linear factorizations needed by the solver."""
3from scipy.sparse import (bmat, csc_matrix, eye, issparse)
4from scipy.sparse.linalg import LinearOperator
5import scipy.linalg
6import scipy.sparse.linalg
7try:
8 from sksparse.cholmod import cholesky_AAt
9 sksparse_available = True
10except ImportError:
11 import warnings
12 sksparse_available = False
13import numpy as np
14from warnings import warn
16__all__ = [
17 'orthogonality',
18 'projections',
19]
22def orthogonality(A, g):
23 """Measure orthogonality between a vector and the null space of a matrix.
25 Compute a measure of orthogonality between the null space
26 of the (possibly sparse) matrix ``A`` and a given vector ``g``.
28 The formula is a simplified (and cheaper) version of formula (3.13)
29 from [1]_.
30 ``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.
32 References
33 ----------
34 .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
35 "On the solution of equality constrained quadratic
36 programming problems arising in optimization."
37 SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
38 """
39 # Compute vector norms
40 norm_g = np.linalg.norm(g)
41 # Compute Froebnius norm of the matrix A
42 if issparse(A):
43 norm_A = scipy.sparse.linalg.norm(A, ord='fro')
44 else:
45 norm_A = np.linalg.norm(A, ord='fro')
47 # Check if norms are zero
48 if norm_g == 0 or norm_A == 0:
49 return 0
51 norm_A_g = np.linalg.norm(A.dot(g))
52 # Orthogonality measure
53 orth = norm_A_g / (norm_A*norm_g)
54 return orth
57def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):
58 """Return linear operators for matrix A using ``NormalEquation`` approach.
59 """
60 # Cholesky factorization
61 factor = cholesky_AAt(A)
63 # z = x - A.T inv(A A.T) A x
64 def null_space(x):
65 v = factor(A.dot(x))
66 z = x - A.T.dot(v)
68 # Iterative refinement to improve roundoff
69 # errors described in [2]_, algorithm 5.1.
70 k = 0
71 while orthogonality(A, z) > orth_tol:
72 if k >= max_refin:
73 break
74 # z_next = z - A.T inv(A A.T) A z
75 v = factor(A.dot(z))
76 z = z - A.T.dot(v)
77 k += 1
79 return z
81 # z = inv(A A.T) A x
82 def least_squares(x):
83 return factor(A.dot(x))
85 # z = A.T inv(A A.T) x
86 def row_space(x):
87 return A.T.dot(factor(x))
89 return null_space, least_squares, row_space
92def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):
93 """Return linear operators for matrix A - ``AugmentedSystem``."""
94 # Form augmented system
95 K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))
96 # LU factorization
97 # TODO: Use a symmetric indefinite factorization
98 # to solve the system twice as fast (because
99 # of the symmetry).
100 try:
101 solve = scipy.sparse.linalg.factorized(K)
102 except RuntimeError:
103 warn("Singular Jacobian matrix. Using dense SVD decomposition to "
104 "perform the factorizations.")
105 return svd_factorization_projections(A.toarray(),
106 m, n, orth_tol,
107 max_refin, tol)
109 # z = x - A.T inv(A A.T) A x
110 # is computed solving the extended system:
111 # [I A.T] * [ z ] = [x]
112 # [A O ] [aux] [0]
113 def null_space(x):
114 # v = [x]
115 # [0]
116 v = np.hstack([x, np.zeros(m)])
117 # lu_sol = [ z ]
118 # [aux]
119 lu_sol = solve(v)
120 z = lu_sol[:n]
122 # Iterative refinement to improve roundoff
123 # errors described in [2]_, algorithm 5.2.
124 k = 0
125 while orthogonality(A, z) > orth_tol:
126 if k >= max_refin:
127 break
128 # new_v = [x] - [I A.T] * [ z ]
129 # [0] [A O ] [aux]
130 new_v = v - K.dot(lu_sol)
131 # [I A.T] * [delta z ] = new_v
132 # [A O ] [delta aux]
133 lu_update = solve(new_v)
134 # [ z ] += [delta z ]
135 # [aux] [delta aux]
136 lu_sol += lu_update
137 z = lu_sol[:n]
138 k += 1
140 # return z = x - A.T inv(A A.T) A x
141 return z
143 # z = inv(A A.T) A x
144 # is computed solving the extended system:
145 # [I A.T] * [aux] = [x]
146 # [A O ] [ z ] [0]
147 def least_squares(x):
148 # v = [x]
149 # [0]
150 v = np.hstack([x, np.zeros(m)])
151 # lu_sol = [aux]
152 # [ z ]
153 lu_sol = solve(v)
154 # return z = inv(A A.T) A x
155 return lu_sol[n:m+n]
157 # z = A.T inv(A A.T) x
158 # is computed solving the extended system:
159 # [I A.T] * [ z ] = [0]
160 # [A O ] [aux] [x]
161 def row_space(x):
162 # v = [0]
163 # [x]
164 v = np.hstack([np.zeros(n), x])
165 # lu_sol = [ z ]
166 # [aux]
167 lu_sol = solve(v)
168 # return z = A.T inv(A A.T) x
169 return lu_sol[:n]
171 return null_space, least_squares, row_space
174def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):
175 """Return linear operators for matrix A using ``QRFactorization`` approach.
176 """
177 # QRFactorization
178 Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')
180 if np.linalg.norm(R[-1, :], np.inf) < tol:
181 warn('Singular Jacobian matrix. Using SVD decomposition to ' +
182 'perform the factorizations.')
183 return svd_factorization_projections(A, m, n,
184 orth_tol,
185 max_refin,
186 tol)
188 # z = x - A.T inv(A A.T) A x
189 def null_space(x):
190 # v = P inv(R) Q.T x
191 aux1 = Q.T.dot(x)
192 aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
193 v = np.zeros(m)
194 v[P] = aux2
195 z = x - A.T.dot(v)
197 # Iterative refinement to improve roundoff
198 # errors described in [2]_, algorithm 5.1.
199 k = 0
200 while orthogonality(A, z) > orth_tol:
201 if k >= max_refin:
202 break
203 # v = P inv(R) Q.T x
204 aux1 = Q.T.dot(z)
205 aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
206 v[P] = aux2
207 # z_next = z - A.T v
208 z = z - A.T.dot(v)
209 k += 1
211 return z
213 # z = inv(A A.T) A x
214 def least_squares(x):
215 # z = P inv(R) Q.T x
216 aux1 = Q.T.dot(x)
217 aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
218 z = np.zeros(m)
219 z[P] = aux2
220 return z
222 # z = A.T inv(A A.T) x
223 def row_space(x):
224 # z = Q inv(R.T) P.T x
225 aux1 = x[P]
226 aux2 = scipy.linalg.solve_triangular(R, aux1,
227 lower=False,
228 trans='T')
229 z = Q.dot(aux2)
230 return z
232 return null_space, least_squares, row_space
235def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):
236 """Return linear operators for matrix A using ``SVDFactorization`` approach.
237 """
238 # SVD Factorization
239 U, s, Vt = scipy.linalg.svd(A, full_matrices=False)
241 # Remove dimensions related with very small singular values
242 U = U[:, s > tol]
243 Vt = Vt[s > tol, :]
244 s = s[s > tol]
246 # z = x - A.T inv(A A.T) A x
247 def null_space(x):
248 # v = U 1/s V.T x = inv(A A.T) A x
249 aux1 = Vt.dot(x)
250 aux2 = 1/s*aux1
251 v = U.dot(aux2)
252 z = x - A.T.dot(v)
254 # Iterative refinement to improve roundoff
255 # errors described in [2]_, algorithm 5.1.
256 k = 0
257 while orthogonality(A, z) > orth_tol:
258 if k >= max_refin:
259 break
260 # v = U 1/s V.T x = inv(A A.T) A x
261 aux1 = Vt.dot(z)
262 aux2 = 1/s*aux1
263 v = U.dot(aux2)
264 # z_next = z - A.T v
265 z = z - A.T.dot(v)
266 k += 1
268 return z
270 # z = inv(A A.T) A x
271 def least_squares(x):
272 # z = U 1/s V.T x = inv(A A.T) A x
273 aux1 = Vt.dot(x)
274 aux2 = 1/s*aux1
275 z = U.dot(aux2)
276 return z
278 # z = A.T inv(A A.T) x
279 def row_space(x):
280 # z = V 1/s U.T x
281 aux1 = U.T.dot(x)
282 aux2 = 1/s*aux1
283 z = Vt.T.dot(aux2)
284 return z
286 return null_space, least_squares, row_space
289def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15):
290 """Return three linear operators related with a given matrix A.
292 Parameters
293 ----------
294 A : sparse matrix (or ndarray), shape (m, n)
295 Matrix ``A`` used in the projection.
296 method : string, optional
297 Method used for compute the given linear
298 operators. Should be one of:
300 - 'NormalEquation': The operators
301 will be computed using the
302 so-called normal equation approach
303 explained in [1]_. In order to do
304 so the Cholesky factorization of
305 ``(A A.T)`` is computed. Exclusive
306 for sparse matrices.
307 - 'AugmentedSystem': The operators
308 will be computed using the
309 so-called augmented system approach
310 explained in [1]_. Exclusive
311 for sparse matrices.
312 - 'QRFactorization': Compute projections
313 using QR factorization. Exclusive for
314 dense matrices.
315 - 'SVDFactorization': Compute projections
316 using SVD factorization. Exclusive for
317 dense matrices.
319 orth_tol : float, optional
320 Tolerance for iterative refinements.
321 max_refin : int, optional
322 Maximum number of iterative refinements.
323 tol : float, optional
324 Tolerance for singular values.
326 Returns
327 -------
328 Z : LinearOperator, shape (n, n)
329 Null-space operator. For a given vector ``x``,
330 the null space operator is equivalent to apply
331 a projection matrix ``P = I - A.T inv(A A.T) A``
332 to the vector. It can be shown that this is
333 equivalent to project ``x`` into the null space
334 of A.
335 LS : LinearOperator, shape (m, n)
336 Least-squares operator. For a given vector ``x``,
337 the least-squares operator is equivalent to apply a
338 pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
339 to the vector. It can be shown that this vector
340 ``pinv(A.T) x`` is the least_square solution to
341 ``A.T y = x``.
342 Y : LinearOperator, shape (n, m)
343 Row-space operator. For a given vector ``x``,
344 the row-space operator is equivalent to apply a
345 projection matrix ``Q = A.T inv(A A.T)``
346 to the vector. It can be shown that this
347 vector ``y = Q x`` the minimum norm solution
348 of ``A y = x``.
350 Notes
351 -----
352 Uses iterative refinements described in [1]
353 during the computation of ``Z`` in order to
354 cope with the possibility of large roundoff errors.
356 References
357 ----------
358 .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
359 "On the solution of equality constrained quadratic
360 programming problems arising in optimization."
361 SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
362 """
363 m, n = np.shape(A)
365 # The factorization of an empty matrix
366 # only works for the sparse representation.
367 if m*n == 0:
368 A = csc_matrix(A)
370 # Check Argument
371 if issparse(A):
372 if method is None:
373 method = "AugmentedSystem"
374 if method not in ("NormalEquation", "AugmentedSystem"):
375 raise ValueError("Method not allowed for sparse matrix.")
376 if method == "NormalEquation" and not sksparse_available:
377 warnings.warn(("Only accepts 'NormalEquation' option when"
378 " scikit-sparse is available. Using "
379 "'AugmentedSystem' option instead."),
380 ImportWarning)
381 method = 'AugmentedSystem'
382 else:
383 if method is None:
384 method = "QRFactorization"
385 if method not in ("QRFactorization", "SVDFactorization"):
386 raise ValueError("Method not allowed for dense array.")
388 if method == 'NormalEquation':
389 null_space, least_squares, row_space \
390 = normal_equation_projections(A, m, n, orth_tol, max_refin, tol)
391 elif method == 'AugmentedSystem':
392 null_space, least_squares, row_space \
393 = augmented_system_projections(A, m, n, orth_tol, max_refin, tol)
394 elif method == "QRFactorization":
395 null_space, least_squares, row_space \
396 = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol)
397 elif method == "SVDFactorization":
398 null_space, least_squares, row_space \
399 = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol)
401 Z = LinearOperator((n, n), null_space)
402 LS = LinearOperator((m, n), least_squares)
403 Y = LinearOperator((n, m), row_space)
405 return Z, LS, Y