Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/linalg/_matfuncs.py: 15%
213 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
1#
2# Author: Travis Oliphant, March 2002
3#
4from itertools import product
6import numpy as np
7from numpy import (Inf, dot, diag, prod, logical_not, ravel, transpose,
8 conjugate, absolute, amax, sign, isfinite)
9from numpy.lib.scimath import sqrt as csqrt
11# Local imports
12from scipy.linalg import LinAlgError, bandwidth
13from ._misc import norm
14from ._basic import solve, inv
15from ._special_matrices import triu
16from ._decomp_svd import svd
17from ._decomp_schur import schur, rsf2csf
18from ._expm_frechet import expm_frechet, expm_cond
19from ._matfuncs_sqrtm import sqrtm
20from ._matfuncs_expm import pick_pade_structure, pade_UV_calc
22__all__ = ['expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm', 'tanhm', 'logm',
23 'funm', 'signm', 'sqrtm', 'fractional_matrix_power', 'expm_frechet',
24 'expm_cond', 'khatri_rao']
26eps = np.finfo('d').eps
27feps = np.finfo('f').eps
29_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
32###############################################################################
33# Utility functions.
36def _asarray_square(A):
37 """
38 Wraps asarray with the extra requirement that the input be a square matrix.
40 The motivation is that the matfuncs module has real functions that have
41 been lifted to square matrix functions.
43 Parameters
44 ----------
45 A : array_like
46 A square matrix.
48 Returns
49 -------
50 out : ndarray
51 An ndarray copy or view or other representation of A.
53 """
54 A = np.asarray(A)
55 if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
56 raise ValueError('expected square array_like input')
57 return A
60def _maybe_real(A, B, tol=None):
61 """
62 Return either B or the real part of B, depending on properties of A and B.
64 The motivation is that B has been computed as a complicated function of A,
65 and B may be perturbed by negligible imaginary components.
66 If A is real and B is complex with small imaginary components,
67 then return a real copy of B. The assumption in that case would be that
68 the imaginary components of B are numerical artifacts.
70 Parameters
71 ----------
72 A : ndarray
73 Input array whose type is to be checked as real vs. complex.
74 B : ndarray
75 Array to be returned, possibly without its imaginary part.
76 tol : float
77 Absolute tolerance.
79 Returns
80 -------
81 out : real or complex array
82 Either the input array B or only the real part of the input array B.
84 """
85 # Note that booleans and integers compare as real.
86 if np.isrealobj(A) and np.iscomplexobj(B):
87 if tol is None:
88 tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
89 if np.allclose(B.imag, 0.0, atol=tol):
90 B = B.real
91 return B
94###############################################################################
95# Matrix functions.
98def fractional_matrix_power(A, t):
99 """
100 Compute the fractional power of a matrix.
102 Proceeds according to the discussion in section (6) of [1]_.
104 Parameters
105 ----------
106 A : (N, N) array_like
107 Matrix whose fractional power to evaluate.
108 t : float
109 Fractional power.
111 Returns
112 -------
113 X : (N, N) array_like
114 The fractional power of the matrix.
116 References
117 ----------
118 .. [1] Nicholas J. Higham and Lijing lin (2011)
119 "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
120 SIAM Journal on Matrix Analysis and Applications,
121 32 (3). pp. 1056-1078. ISSN 0895-4798
123 Examples
124 --------
125 >>> import numpy as np
126 >>> from scipy.linalg import fractional_matrix_power
127 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
128 >>> b = fractional_matrix_power(a, 0.5)
129 >>> b
130 array([[ 0.75592895, 1.13389342],
131 [ 0.37796447, 1.88982237]])
132 >>> np.dot(b, b) # Verify square root
133 array([[ 1., 3.],
134 [ 1., 4.]])
136 """
137 # This fixes some issue with imports;
138 # this function calls onenormest which is in scipy.sparse.
139 A = _asarray_square(A)
140 import scipy.linalg._matfuncs_inv_ssq
141 return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
144def logm(A, disp=True):
145 """
146 Compute matrix logarithm.
148 The matrix logarithm is the inverse of
149 expm: expm(logm(`A`)) == `A`
151 Parameters
152 ----------
153 A : (N, N) array_like
154 Matrix whose logarithm to evaluate
155 disp : bool, optional
156 Print warning if error in the result is estimated large
157 instead of returning estimated error. (Default: True)
159 Returns
160 -------
161 logm : (N, N) ndarray
162 Matrix logarithm of `A`
163 errest : float
164 (if disp == False)
166 1-norm of the estimated error, ||err||_1 / ||A||_1
168 References
169 ----------
170 .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
171 "Improved Inverse Scaling and Squaring Algorithms
172 for the Matrix Logarithm."
173 SIAM Journal on Scientific Computing, 34 (4). C152-C169.
174 ISSN 1095-7197
176 .. [2] Nicholas J. Higham (2008)
177 "Functions of Matrices: Theory and Computation"
178 ISBN 978-0-898716-46-7
180 .. [3] Nicholas J. Higham and Lijing lin (2011)
181 "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
182 SIAM Journal on Matrix Analysis and Applications,
183 32 (3). pp. 1056-1078. ISSN 0895-4798
185 Examples
186 --------
187 >>> import numpy as np
188 >>> from scipy.linalg import logm, expm
189 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
190 >>> b = logm(a)
191 >>> b
192 array([[-1.02571087, 2.05142174],
193 [ 0.68380725, 1.02571087]])
194 >>> expm(b) # Verify expm(logm(a)) returns a
195 array([[ 1., 3.],
196 [ 1., 4.]])
198 """
199 A = _asarray_square(A)
200 # Avoid circular import ... this is OK, right?
201 import scipy.linalg._matfuncs_inv_ssq
202 F = scipy.linalg._matfuncs_inv_ssq._logm(A)
203 F = _maybe_real(A, F)
204 errtol = 1000*eps
205 #TODO use a better error approximation
206 errest = norm(expm(F)-A,1) / norm(A,1)
207 if disp:
208 if not isfinite(errest) or errest >= errtol:
209 print("logm result may be inaccurate, approximate err =", errest)
210 return F
211 else:
212 return F, errest
215def expm(A):
216 """Compute the matrix exponential of an array.
218 Parameters
219 ----------
220 A : ndarray
221 Input with last two dimensions are square ``(..., n, n)``.
223 Returns
224 -------
225 eA : ndarray
226 The resulting matrix exponential with the same shape of ``A``
228 Notes
229 -----
230 Implements the algorithm given in [1], which is essentially a Pade
231 approximation with a variable order that is decided based on the array
232 data.
234 For input with size ``n``, the memory usage is in the worst case in the
235 order of ``8*(n**2)``. If the input data is not of single and double
236 precision of real and complex dtypes, it is copied to a new array.
238 For cases ``n >= 400``, the exact 1-norm computation cost, breaks even with
239 1-norm estimation and from that point on the estimation scheme given in
240 [2] is used to decide on the approximation order.
242 References
243 ----------
244 .. [1] Awad H. Al-Mohy and Nicholas J. Higham, (2009), "A New Scaling
245 and Squaring Algorithm for the Matrix Exponential", SIAM J. Matrix
246 Anal. Appl. 31(3):970-989, :doi:`10.1137/09074721X`
248 .. [2] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm
249 for Matrix 1-Norm Estimation, with an Application to 1-Norm
250 Pseudospectra." SIAM J. Matrix Anal. Appl. 21(4):1185-1201,
251 :doi:`10.1137/S0895479899356080`
253 Examples
254 --------
255 >>> import numpy as np
256 >>> from scipy.linalg import expm, sinm, cosm
258 Matrix version of the formula exp(0) = 1:
260 >>> expm(np.zeros((3, 2, 2)))
261 array([[[1., 0.],
262 [0., 1.]],
263 <BLANKLINE>
264 [[1., 0.],
265 [0., 1.]],
266 <BLANKLINE>
267 [[1., 0.],
268 [0., 1.]]])
270 Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
271 applied to a matrix:
273 >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
274 >>> expm(1j*a)
275 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
276 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
277 >>> cosm(a) + 1j*sinm(a)
278 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
279 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
281 """
282 a = np.asarray(A)
283 if a.size == 1 and a.ndim < 2:
284 return np.array([[np.exp(a.item())]])
286 if a.ndim < 2:
287 raise LinAlgError('The input array must be at least two-dimensional')
288 if a.shape[-1] != a.shape[-2]:
289 raise LinAlgError('Last 2 dimensions of the array must be square')
290 n = a.shape[-1]
291 # Empty array
292 if min(*a.shape) == 0:
293 return np.empty_like(a)
295 # Scalar case
296 if a.shape[-2:] == (1, 1):
297 return np.exp(a)
299 if not np.issubdtype(a.dtype, np.inexact):
300 a = a.astype(float)
301 elif a.dtype == np.float16:
302 a = a.astype(np.float32)
304 # Explicit formula for 2x2 case, formula (2.2) in [1]
305 # without Kahan's method numerical instabilities can occur.
306 if a.shape[-2:] == (2, 2):
307 a1, a2, a3, a4 = (a[..., [0], [0]],
308 a[..., [0], [1]],
309 a[..., [1], [0]],
310 a[..., [1], [1]])
311 mu = csqrt((a1-a4)**2 + 4*a2*a3)/2. # csqrt slow but handles neg.vals
313 eApD2 = np.exp((a1+a4)/2.)
314 AmD2 = (a1 - a4)/2.
315 coshMu = np.cosh(mu)
316 sinchMu = np.ones_like(coshMu)
317 mask = mu != 0
318 sinchMu[mask] = np.sinh(mu[mask]) / mu[mask]
319 eA = np.empty((a.shape), dtype=mu.dtype)
320 eA[..., [0], [0]] = eApD2 * (coshMu + AmD2*sinchMu)
321 eA[..., [0], [1]] = eApD2 * a2 * sinchMu
322 eA[..., [1], [0]] = eApD2 * a3 * sinchMu
323 eA[..., [1], [1]] = eApD2 * (coshMu - AmD2*sinchMu)
324 if np.isrealobj(a):
325 return eA.real
326 return eA
328 # larger problem with unspecified stacked dimensions.
329 n = a.shape[-1]
330 eA = np.empty(a.shape, dtype=a.dtype)
331 # working memory to hold intermediate arrays
332 Am = np.empty((5, n, n), dtype=a.dtype)
334 # Main loop to go through the slices of an ndarray and passing to expm
335 for ind in product(*[range(x) for x in a.shape[:-2]]):
336 aw = a[ind]
338 lu = bandwidth(aw)
339 if not any(lu): # a is diagonal?
340 eA[ind] = np.diag(np.exp(np.diag(aw)))
341 continue
343 # Generic/triangular case; copy the slice into scratch and send.
344 # Am will be mutated by pick_pade_structure
345 Am[0, :, :] = aw
346 m, s = pick_pade_structure(Am)
348 if s != 0: # scaling needed
349 Am[:4] *= [[[2**(-s)]], [[4**(-s)]], [[16**(-s)]], [[64**(-s)]]]
351 pade_UV_calc(Am, n, m)
352 eAw = Am[0]
354 if s != 0: # squaring needed
356 if (lu[1] == 0) or (lu[0] == 0): # lower/upper triangular
357 # This branch implements Code Fragment 2.1 of [1]
359 diag_aw = np.diag(aw)
360 # einsum returns a writable view
361 np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2**(-s))
362 # super/sub diagonal
363 sd = np.diag(aw, k=-1 if lu[1] == 0 else 1)
365 for i in range(s-1, -1, -1):
366 eAw = eAw @ eAw
368 # diagonal
369 np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2.**(-i))
370 exp_sd = _exp_sinch(diag_aw * (2.**(-i))) * (sd * 2**(-i))
371 if lu[1] == 0: # lower
372 np.einsum('ii->i', eAw[1:, :-1])[:] = exp_sd
373 else: # upper
374 np.einsum('ii->i', eAw[:-1, 1:])[:] = exp_sd
376 else: # generic
377 for _ in range(s):
378 eAw = eAw @ eAw
380 # Zero out the entries from np.empty in case of triangular input
381 if (lu[0] == 0) or (lu[1] == 0):
382 eA[ind] = np.triu(eAw) if lu[0] == 0 else np.tril(eAw)
383 else:
384 eA[ind] = eAw
386 return eA
389def _exp_sinch(x):
390 # Higham's formula (10.42), might overflow, see GH-11839
391 lexp_diff = np.diff(np.exp(x))
392 l_diff = np.diff(x)
393 mask_z = l_diff == 0.
394 lexp_diff[~mask_z] /= l_diff[~mask_z]
395 lexp_diff[mask_z] = np.exp(x[:-1][mask_z])
396 return lexp_diff
399def cosm(A):
400 """
401 Compute the matrix cosine.
403 This routine uses expm to compute the matrix exponentials.
405 Parameters
406 ----------
407 A : (N, N) array_like
408 Input array
410 Returns
411 -------
412 cosm : (N, N) ndarray
413 Matrix cosine of A
415 Examples
416 --------
417 >>> import numpy as np
418 >>> from scipy.linalg import expm, sinm, cosm
420 Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
421 applied to a matrix:
423 >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
424 >>> expm(1j*a)
425 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
426 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
427 >>> cosm(a) + 1j*sinm(a)
428 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
429 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
431 """
432 A = _asarray_square(A)
433 if np.iscomplexobj(A):
434 return 0.5*(expm(1j*A) + expm(-1j*A))
435 else:
436 return expm(1j*A).real
439def sinm(A):
440 """
441 Compute the matrix sine.
443 This routine uses expm to compute the matrix exponentials.
445 Parameters
446 ----------
447 A : (N, N) array_like
448 Input array.
450 Returns
451 -------
452 sinm : (N, N) ndarray
453 Matrix sine of `A`
455 Examples
456 --------
457 >>> import numpy as np
458 >>> from scipy.linalg import expm, sinm, cosm
460 Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
461 applied to a matrix:
463 >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
464 >>> expm(1j*a)
465 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
466 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
467 >>> cosm(a) + 1j*sinm(a)
468 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
469 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
471 """
472 A = _asarray_square(A)
473 if np.iscomplexobj(A):
474 return -0.5j*(expm(1j*A) - expm(-1j*A))
475 else:
476 return expm(1j*A).imag
479def tanm(A):
480 """
481 Compute the matrix tangent.
483 This routine uses expm to compute the matrix exponentials.
485 Parameters
486 ----------
487 A : (N, N) array_like
488 Input array.
490 Returns
491 -------
492 tanm : (N, N) ndarray
493 Matrix tangent of `A`
495 Examples
496 --------
497 >>> import numpy as np
498 >>> from scipy.linalg import tanm, sinm, cosm
499 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
500 >>> t = tanm(a)
501 >>> t
502 array([[ -2.00876993, -8.41880636],
503 [ -2.80626879, -10.42757629]])
505 Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
507 >>> s = sinm(a)
508 >>> c = cosm(a)
509 >>> s.dot(np.linalg.inv(c))
510 array([[ -2.00876993, -8.41880636],
511 [ -2.80626879, -10.42757629]])
513 """
514 A = _asarray_square(A)
515 return _maybe_real(A, solve(cosm(A), sinm(A)))
518def coshm(A):
519 """
520 Compute the hyperbolic matrix cosine.
522 This routine uses expm to compute the matrix exponentials.
524 Parameters
525 ----------
526 A : (N, N) array_like
527 Input array.
529 Returns
530 -------
531 coshm : (N, N) ndarray
532 Hyperbolic matrix cosine of `A`
534 Examples
535 --------
536 >>> import numpy as np
537 >>> from scipy.linalg import tanhm, sinhm, coshm
538 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
539 >>> c = coshm(a)
540 >>> c
541 array([[ 11.24592233, 38.76236492],
542 [ 12.92078831, 50.00828725]])
544 Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
546 >>> t = tanhm(a)
547 >>> s = sinhm(a)
548 >>> t - s.dot(np.linalg.inv(c))
549 array([[ 2.72004641e-15, 4.55191440e-15],
550 [ 0.00000000e+00, -5.55111512e-16]])
552 """
553 A = _asarray_square(A)
554 return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
557def sinhm(A):
558 """
559 Compute the hyperbolic matrix sine.
561 This routine uses expm to compute the matrix exponentials.
563 Parameters
564 ----------
565 A : (N, N) array_like
566 Input array.
568 Returns
569 -------
570 sinhm : (N, N) ndarray
571 Hyperbolic matrix sine of `A`
573 Examples
574 --------
575 >>> import numpy as np
576 >>> from scipy.linalg import tanhm, sinhm, coshm
577 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
578 >>> s = sinhm(a)
579 >>> s
580 array([[ 10.57300653, 39.28826594],
581 [ 13.09608865, 49.86127247]])
583 Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
585 >>> t = tanhm(a)
586 >>> c = coshm(a)
587 >>> t - s.dot(np.linalg.inv(c))
588 array([[ 2.72004641e-15, 4.55191440e-15],
589 [ 0.00000000e+00, -5.55111512e-16]])
591 """
592 A = _asarray_square(A)
593 return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
596def tanhm(A):
597 """
598 Compute the hyperbolic matrix tangent.
600 This routine uses expm to compute the matrix exponentials.
602 Parameters
603 ----------
604 A : (N, N) array_like
605 Input array
607 Returns
608 -------
609 tanhm : (N, N) ndarray
610 Hyperbolic matrix tangent of `A`
612 Examples
613 --------
614 >>> import numpy as np
615 >>> from scipy.linalg import tanhm, sinhm, coshm
616 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
617 >>> t = tanhm(a)
618 >>> t
619 array([[ 0.3428582 , 0.51987926],
620 [ 0.17329309, 0.86273746]])
622 Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
624 >>> s = sinhm(a)
625 >>> c = coshm(a)
626 >>> t - s.dot(np.linalg.inv(c))
627 array([[ 2.72004641e-15, 4.55191440e-15],
628 [ 0.00000000e+00, -5.55111512e-16]])
630 """
631 A = _asarray_square(A)
632 return _maybe_real(A, solve(coshm(A), sinhm(A)))
635def funm(A, func, disp=True):
636 """
637 Evaluate a matrix function specified by a callable.
639 Returns the value of matrix-valued function ``f`` at `A`. The
640 function ``f`` is an extension of the scalar-valued function `func`
641 to matrices.
643 Parameters
644 ----------
645 A : (N, N) array_like
646 Matrix at which to evaluate the function
647 func : callable
648 Callable object that evaluates a scalar function f.
649 Must be vectorized (eg. using vectorize).
650 disp : bool, optional
651 Print warning if error in the result is estimated large
652 instead of returning estimated error. (Default: True)
654 Returns
655 -------
656 funm : (N, N) ndarray
657 Value of the matrix function specified by func evaluated at `A`
658 errest : float
659 (if disp == False)
661 1-norm of the estimated error, ||err||_1 / ||A||_1
663 Notes
664 -----
665 This function implements the general algorithm based on Schur decomposition
666 (Algorithm 9.1.1. in [1]_).
668 If the input matrix is known to be diagonalizable, then relying on the
669 eigendecomposition is likely to be faster. For example, if your matrix is
670 Hermitian, you can do
672 >>> from scipy.linalg import eigh
673 >>> def funm_herm(a, func, check_finite=False):
674 ... w, v = eigh(a, check_finite=check_finite)
675 ... ## if you further know that your matrix is positive semidefinite,
676 ... ## you can optionally guard against precision errors by doing
677 ... # w = np.maximum(w, 0)
678 ... w = func(w)
679 ... return (v * w).dot(v.conj().T)
681 References
682 ----------
683 .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
685 Examples
686 --------
687 >>> import numpy as np
688 >>> from scipy.linalg import funm
689 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
690 >>> funm(a, lambda x: x*x)
691 array([[ 4., 15.],
692 [ 5., 19.]])
693 >>> a.dot(a)
694 array([[ 4., 15.],
695 [ 5., 19.]])
697 """
698 A = _asarray_square(A)
699 # Perform Shur decomposition (lapack ?gees)
700 T, Z = schur(A)
701 T, Z = rsf2csf(T,Z)
702 n,n = T.shape
703 F = diag(func(diag(T))) # apply function to diagonal elements
704 F = F.astype(T.dtype.char) # e.g., when F is real but T is complex
706 minden = abs(T[0,0])
708 # implement Algorithm 11.1.1 from Golub and Van Loan
709 # "matrix Computations."
710 for p in range(1,n):
711 for i in range(1,n-p+1):
712 j = i + p
713 s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
714 ksl = slice(i,j-1)
715 val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
716 s = s + val
717 den = T[j-1,j-1] - T[i-1,i-1]
718 if den != 0.0:
719 s = s / den
720 F[i-1,j-1] = s
721 minden = min(minden,abs(den))
723 F = dot(dot(Z, F), transpose(conjugate(Z)))
724 F = _maybe_real(A, F)
726 tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
727 if minden == 0.0:
728 minden = tol
729 err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
730 if prod(ravel(logical_not(isfinite(F))),axis=0):
731 err = Inf
732 if disp:
733 if err > 1000*tol:
734 print("funm result may be inaccurate, approximate err =", err)
735 return F
736 else:
737 return F, err
740def signm(A, disp=True):
741 """
742 Matrix sign function.
744 Extension of the scalar sign(x) to matrices.
746 Parameters
747 ----------
748 A : (N, N) array_like
749 Matrix at which to evaluate the sign function
750 disp : bool, optional
751 Print warning if error in the result is estimated large
752 instead of returning estimated error. (Default: True)
754 Returns
755 -------
756 signm : (N, N) ndarray
757 Value of the sign function at `A`
758 errest : float
759 (if disp == False)
761 1-norm of the estimated error, ||err||_1 / ||A||_1
763 Examples
764 --------
765 >>> from scipy.linalg import signm, eigvals
766 >>> a = [[1,2,3], [1,2,1], [1,1,1]]
767 >>> eigvals(a)
768 array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
769 >>> eigvals(signm(a))
770 array([-1.+0.j, 1.+0.j, 1.+0.j])
772 """
773 A = _asarray_square(A)
775 def rounded_sign(x):
776 rx = np.real(x)
777 if rx.dtype.char == 'f':
778 c = 1e3*feps*amax(x)
779 else:
780 c = 1e3*eps*amax(x)
781 return sign((absolute(rx) > c) * rx)
782 result, errest = funm(A, rounded_sign, disp=0)
783 errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
784 if errest < errtol:
785 return result
787 # Handle signm of defective matrices:
789 # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
790 # 8:237-250,1981" for how to improve the following (currently a
791 # rather naive) iteration process:
793 # a = result # sometimes iteration converges faster but where??
795 # Shifting to avoid zero eigenvalues. How to ensure that shifting does
796 # not change the spectrum too much?
797 vals = svd(A, compute_uv=False)
798 max_sv = np.amax(vals)
799 # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
800 # c = 0.5/min_nonzero_sv
801 c = 0.5/max_sv
802 S0 = A + c*np.identity(A.shape[0])
803 prev_errest = errest
804 for i in range(100):
805 iS0 = inv(S0)
806 S0 = 0.5*(S0 + iS0)
807 Pp = 0.5*(dot(S0,S0)+S0)
808 errest = norm(dot(Pp,Pp)-Pp,1)
809 if errest < errtol or prev_errest == errest:
810 break
811 prev_errest = errest
812 if disp:
813 if not isfinite(errest) or errest >= errtol:
814 print("signm result may be inaccurate, approximate err =", errest)
815 return S0
816 else:
817 return S0, errest
820def khatri_rao(a, b):
821 r"""
822 Khatri-rao product
824 A column-wise Kronecker product of two matrices
826 Parameters
827 ----------
828 a : (n, k) array_like
829 Input array
830 b : (m, k) array_like
831 Input array
833 Returns
834 -------
835 c: (n*m, k) ndarray
836 Khatri-rao product of `a` and `b`.
838 See Also
839 --------
840 kron : Kronecker product
842 Notes
843 -----
844 The mathematical definition of the Khatri-Rao product is:
846 .. math::
848 (A_{ij} \bigotimes B_{ij})_{ij}
850 which is the Kronecker product of every column of A and B, e.g.::
852 c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
854 Examples
855 --------
856 >>> import numpy as np
857 >>> from scipy import linalg
858 >>> a = np.array([[1, 2, 3], [4, 5, 6]])
859 >>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]])
860 >>> linalg.khatri_rao(a, b)
861 array([[ 3, 8, 15],
862 [ 6, 14, 24],
863 [ 2, 6, 27],
864 [12, 20, 30],
865 [24, 35, 48],
866 [ 8, 15, 54]])
868 """
869 a = np.asarray(a)
870 b = np.asarray(b)
872 if not (a.ndim == 2 and b.ndim == 2):
873 raise ValueError("The both arrays should be 2-dimensional.")
875 if not a.shape[1] == b.shape[1]:
876 raise ValueError("The number of columns for both arrays "
877 "should be equal.")
879 # c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
880 c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :]
881 return c.reshape((-1,) + c.shape[2:])