Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/scipy/linalg/_matfuncs.py: 15%
213 statements
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-23 06:43 +0000
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-23 06:43 +0000
1#
2# Author: Travis Oliphant, March 2002
3#
4from itertools import product
6import numpy as np
7from numpy import (dot, diag, prod, logical_not, ravel, transpose,
8 conjugate, absolute, amax, sign, isfinite, triu)
9from numpy.lib.scimath import sqrt as csqrt
11# Local imports
12from scipy.linalg import LinAlgError, bandwidth
13from ._misc import norm
14from ._basic import solve, inv
15from ._decomp_svd import svd
16from ._decomp_schur import schur, rsf2csf
17from ._expm_frechet import expm_frechet, expm_cond
18from ._matfuncs_sqrtm import sqrtm
19from ._matfuncs_expm import pick_pade_structure, pade_UV_calc
21# deprecated imports to be removed in SciPy 1.13.0
22from numpy import single # noqa
24__all__ = ['expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm', 'tanhm', 'logm',
25 'funm', 'signm', 'sqrtm', 'fractional_matrix_power', 'expm_frechet',
26 'expm_cond', 'khatri_rao']
28eps = np.finfo('d').eps
29feps = np.finfo('f').eps
31_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
34###############################################################################
35# Utility functions.
38def _asarray_square(A):
39 """
40 Wraps asarray with the extra requirement that the input be a square matrix.
42 The motivation is that the matfuncs module has real functions that have
43 been lifted to square matrix functions.
45 Parameters
46 ----------
47 A : array_like
48 A square matrix.
50 Returns
51 -------
52 out : ndarray
53 An ndarray copy or view or other representation of A.
55 """
56 A = np.asarray(A)
57 if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
58 raise ValueError('expected square array_like input')
59 return A
62def _maybe_real(A, B, tol=None):
63 """
64 Return either B or the real part of B, depending on properties of A and B.
66 The motivation is that B has been computed as a complicated function of A,
67 and B may be perturbed by negligible imaginary components.
68 If A is real and B is complex with small imaginary components,
69 then return a real copy of B. The assumption in that case would be that
70 the imaginary components of B are numerical artifacts.
72 Parameters
73 ----------
74 A : ndarray
75 Input array whose type is to be checked as real vs. complex.
76 B : ndarray
77 Array to be returned, possibly without its imaginary part.
78 tol : float
79 Absolute tolerance.
81 Returns
82 -------
83 out : real or complex array
84 Either the input array B or only the real part of the input array B.
86 """
87 # Note that booleans and integers compare as real.
88 if np.isrealobj(A) and np.iscomplexobj(B):
89 if tol is None:
90 tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
91 if np.allclose(B.imag, 0.0, atol=tol):
92 B = B.real
93 return B
96###############################################################################
97# Matrix functions.
100def fractional_matrix_power(A, t):
101 """
102 Compute the fractional power of a matrix.
104 Proceeds according to the discussion in section (6) of [1]_.
106 Parameters
107 ----------
108 A : (N, N) array_like
109 Matrix whose fractional power to evaluate.
110 t : float
111 Fractional power.
113 Returns
114 -------
115 X : (N, N) array_like
116 The fractional power of the matrix.
118 References
119 ----------
120 .. [1] Nicholas J. Higham and Lijing lin (2011)
121 "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
122 SIAM Journal on Matrix Analysis and Applications,
123 32 (3). pp. 1056-1078. ISSN 0895-4798
125 Examples
126 --------
127 >>> import numpy as np
128 >>> from scipy.linalg import fractional_matrix_power
129 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
130 >>> b = fractional_matrix_power(a, 0.5)
131 >>> b
132 array([[ 0.75592895, 1.13389342],
133 [ 0.37796447, 1.88982237]])
134 >>> np.dot(b, b) # Verify square root
135 array([[ 1., 3.],
136 [ 1., 4.]])
138 """
139 # This fixes some issue with imports;
140 # this function calls onenormest which is in scipy.sparse.
141 A = _asarray_square(A)
142 import scipy.linalg._matfuncs_inv_ssq
143 return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
146def logm(A, disp=True):
147 """
148 Compute matrix logarithm.
150 The matrix logarithm is the inverse of
151 expm: expm(logm(`A`)) == `A`
153 Parameters
154 ----------
155 A : (N, N) array_like
156 Matrix whose logarithm to evaluate
157 disp : bool, optional
158 Print warning if error in the result is estimated large
159 instead of returning estimated error. (Default: True)
161 Returns
162 -------
163 logm : (N, N) ndarray
164 Matrix logarithm of `A`
165 errest : float
166 (if disp == False)
168 1-norm of the estimated error, ||err||_1 / ||A||_1
170 References
171 ----------
172 .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
173 "Improved Inverse Scaling and Squaring Algorithms
174 for the Matrix Logarithm."
175 SIAM Journal on Scientific Computing, 34 (4). C152-C169.
176 ISSN 1095-7197
178 .. [2] Nicholas J. Higham (2008)
179 "Functions of Matrices: Theory and Computation"
180 ISBN 978-0-898716-46-7
182 .. [3] Nicholas J. Higham and Lijing lin (2011)
183 "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
184 SIAM Journal on Matrix Analysis and Applications,
185 32 (3). pp. 1056-1078. ISSN 0895-4798
187 Examples
188 --------
189 >>> import numpy as np
190 >>> from scipy.linalg import logm, expm
191 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
192 >>> b = logm(a)
193 >>> b
194 array([[-1.02571087, 2.05142174],
195 [ 0.68380725, 1.02571087]])
196 >>> expm(b) # Verify expm(logm(a)) returns a
197 array([[ 1., 3.],
198 [ 1., 4.]])
200 """
201 A = _asarray_square(A)
202 # Avoid circular import ... this is OK, right?
203 import scipy.linalg._matfuncs_inv_ssq
204 F = scipy.linalg._matfuncs_inv_ssq._logm(A)
205 F = _maybe_real(A, F)
206 errtol = 1000*eps
207 #TODO use a better error approximation
208 errest = norm(expm(F)-A,1) / norm(A,1)
209 if disp:
210 if not isfinite(errest) or errest >= errtol:
211 print("logm result may be inaccurate, approximate err =", errest)
212 return F
213 else:
214 return F, errest
217def expm(A):
218 """Compute the matrix exponential of an array.
220 Parameters
221 ----------
222 A : ndarray
223 Input with last two dimensions are square ``(..., n, n)``.
225 Returns
226 -------
227 eA : ndarray
228 The resulting matrix exponential with the same shape of ``A``
230 Notes
231 -----
232 Implements the algorithm given in [1], which is essentially a Pade
233 approximation with a variable order that is decided based on the array
234 data.
236 For input with size ``n``, the memory usage is in the worst case in the
237 order of ``8*(n**2)``. If the input data is not of single and double
238 precision of real and complex dtypes, it is copied to a new array.
240 For cases ``n >= 400``, the exact 1-norm computation cost, breaks even with
241 1-norm estimation and from that point on the estimation scheme given in
242 [2] is used to decide on the approximation order.
244 References
245 ----------
246 .. [1] Awad H. Al-Mohy and Nicholas J. Higham, (2009), "A New Scaling
247 and Squaring Algorithm for the Matrix Exponential", SIAM J. Matrix
248 Anal. Appl. 31(3):970-989, :doi:`10.1137/09074721X`
250 .. [2] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm
251 for Matrix 1-Norm Estimation, with an Application to 1-Norm
252 Pseudospectra." SIAM J. Matrix Anal. Appl. 21(4):1185-1201,
253 :doi:`10.1137/S0895479899356080`
255 Examples
256 --------
257 >>> import numpy as np
258 >>> from scipy.linalg import expm, sinm, cosm
260 Matrix version of the formula exp(0) = 1:
262 >>> expm(np.zeros((3, 2, 2)))
263 array([[[1., 0.],
264 [0., 1.]],
265 <BLANKLINE>
266 [[1., 0.],
267 [0., 1.]],
268 <BLANKLINE>
269 [[1., 0.],
270 [0., 1.]]])
272 Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
273 applied to a matrix:
275 >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
276 >>> expm(1j*a)
277 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
278 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
279 >>> cosm(a) + 1j*sinm(a)
280 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
281 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
283 """
284 a = np.asarray(A)
285 if a.size == 1 and a.ndim < 2:
286 return np.array([[np.exp(a.item())]])
288 if a.ndim < 2:
289 raise LinAlgError('The input array must be at least two-dimensional')
290 if a.shape[-1] != a.shape[-2]:
291 raise LinAlgError('Last 2 dimensions of the array must be square')
292 n = a.shape[-1]
293 # Empty array
294 if min(*a.shape) == 0:
295 return np.empty_like(a)
297 # Scalar case
298 if a.shape[-2:] == (1, 1):
299 return np.exp(a)
301 if not np.issubdtype(a.dtype, np.inexact):
302 a = a.astype(float)
303 elif a.dtype == np.float16:
304 a = a.astype(np.float32)
306 # Explicit formula for 2x2 case, formula (2.2) in [1]
307 # without Kahan's method numerical instabilities can occur.
308 if a.shape[-2:] == (2, 2):
309 a1, a2, a3, a4 = (a[..., [0], [0]],
310 a[..., [0], [1]],
311 a[..., [1], [0]],
312 a[..., [1], [1]])
313 mu = csqrt((a1-a4)**2 + 4*a2*a3)/2. # csqrt slow but handles neg.vals
315 eApD2 = np.exp((a1+a4)/2.)
316 AmD2 = (a1 - a4)/2.
317 coshMu = np.cosh(mu)
318 sinchMu = np.ones_like(coshMu)
319 mask = mu != 0
320 sinchMu[mask] = np.sinh(mu[mask]) / mu[mask]
321 eA = np.empty((a.shape), dtype=mu.dtype)
322 eA[..., [0], [0]] = eApD2 * (coshMu + AmD2*sinchMu)
323 eA[..., [0], [1]] = eApD2 * a2 * sinchMu
324 eA[..., [1], [0]] = eApD2 * a3 * sinchMu
325 eA[..., [1], [1]] = eApD2 * (coshMu - AmD2*sinchMu)
326 if np.isrealobj(a):
327 return eA.real
328 return eA
330 # larger problem with unspecified stacked dimensions.
331 n = a.shape[-1]
332 eA = np.empty(a.shape, dtype=a.dtype)
333 # working memory to hold intermediate arrays
334 Am = np.empty((5, n, n), dtype=a.dtype)
336 # Main loop to go through the slices of an ndarray and passing to expm
337 for ind in product(*[range(x) for x in a.shape[:-2]]):
338 aw = a[ind]
340 lu = bandwidth(aw)
341 if not any(lu): # a is diagonal?
342 eA[ind] = np.diag(np.exp(np.diag(aw)))
343 continue
345 # Generic/triangular case; copy the slice into scratch and send.
346 # Am will be mutated by pick_pade_structure
347 Am[0, :, :] = aw
348 m, s = pick_pade_structure(Am)
350 if s != 0: # scaling needed
351 Am[:4] *= [[[2**(-s)]], [[4**(-s)]], [[16**(-s)]], [[64**(-s)]]]
353 pade_UV_calc(Am, n, m)
354 eAw = Am[0]
356 if s != 0: # squaring needed
358 if (lu[1] == 0) or (lu[0] == 0): # lower/upper triangular
359 # This branch implements Code Fragment 2.1 of [1]
361 diag_aw = np.diag(aw)
362 # einsum returns a writable view
363 np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2**(-s))
364 # super/sub diagonal
365 sd = np.diag(aw, k=-1 if lu[1] == 0 else 1)
367 for i in range(s-1, -1, -1):
368 eAw = eAw @ eAw
370 # diagonal
371 np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2.**(-i))
372 exp_sd = _exp_sinch(diag_aw * (2.**(-i))) * (sd * 2**(-i))
373 if lu[1] == 0: # lower
374 np.einsum('ii->i', eAw[1:, :-1])[:] = exp_sd
375 else: # upper
376 np.einsum('ii->i', eAw[:-1, 1:])[:] = exp_sd
378 else: # generic
379 for _ in range(s):
380 eAw = eAw @ eAw
382 # Zero out the entries from np.empty in case of triangular input
383 if (lu[0] == 0) or (lu[1] == 0):
384 eA[ind] = np.triu(eAw) if lu[0] == 0 else np.tril(eAw)
385 else:
386 eA[ind] = eAw
388 return eA
391def _exp_sinch(x):
392 # Higham's formula (10.42), might overflow, see GH-11839
393 lexp_diff = np.diff(np.exp(x))
394 l_diff = np.diff(x)
395 mask_z = l_diff == 0.
396 lexp_diff[~mask_z] /= l_diff[~mask_z]
397 lexp_diff[mask_z] = np.exp(x[:-1][mask_z])
398 return lexp_diff
401def cosm(A):
402 """
403 Compute the matrix cosine.
405 This routine uses expm to compute the matrix exponentials.
407 Parameters
408 ----------
409 A : (N, N) array_like
410 Input array
412 Returns
413 -------
414 cosm : (N, N) ndarray
415 Matrix cosine of A
417 Examples
418 --------
419 >>> import numpy as np
420 >>> from scipy.linalg import expm, sinm, cosm
422 Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
423 applied to a matrix:
425 >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
426 >>> expm(1j*a)
427 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
428 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
429 >>> cosm(a) + 1j*sinm(a)
430 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
431 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
433 """
434 A = _asarray_square(A)
435 if np.iscomplexobj(A):
436 return 0.5*(expm(1j*A) + expm(-1j*A))
437 else:
438 return expm(1j*A).real
441def sinm(A):
442 """
443 Compute the matrix sine.
445 This routine uses expm to compute the matrix exponentials.
447 Parameters
448 ----------
449 A : (N, N) array_like
450 Input array.
452 Returns
453 -------
454 sinm : (N, N) ndarray
455 Matrix sine of `A`
457 Examples
458 --------
459 >>> import numpy as np
460 >>> from scipy.linalg import expm, sinm, cosm
462 Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
463 applied to a matrix:
465 >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
466 >>> expm(1j*a)
467 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
468 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
469 >>> cosm(a) + 1j*sinm(a)
470 array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
471 [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
473 """
474 A = _asarray_square(A)
475 if np.iscomplexobj(A):
476 return -0.5j*(expm(1j*A) - expm(-1j*A))
477 else:
478 return expm(1j*A).imag
481def tanm(A):
482 """
483 Compute the matrix tangent.
485 This routine uses expm to compute the matrix exponentials.
487 Parameters
488 ----------
489 A : (N, N) array_like
490 Input array.
492 Returns
493 -------
494 tanm : (N, N) ndarray
495 Matrix tangent of `A`
497 Examples
498 --------
499 >>> import numpy as np
500 >>> from scipy.linalg import tanm, sinm, cosm
501 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
502 >>> t = tanm(a)
503 >>> t
504 array([[ -2.00876993, -8.41880636],
505 [ -2.80626879, -10.42757629]])
507 Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
509 >>> s = sinm(a)
510 >>> c = cosm(a)
511 >>> s.dot(np.linalg.inv(c))
512 array([[ -2.00876993, -8.41880636],
513 [ -2.80626879, -10.42757629]])
515 """
516 A = _asarray_square(A)
517 return _maybe_real(A, solve(cosm(A), sinm(A)))
520def coshm(A):
521 """
522 Compute the hyperbolic matrix cosine.
524 This routine uses expm to compute the matrix exponentials.
526 Parameters
527 ----------
528 A : (N, N) array_like
529 Input array.
531 Returns
532 -------
533 coshm : (N, N) ndarray
534 Hyperbolic matrix cosine of `A`
536 Examples
537 --------
538 >>> import numpy as np
539 >>> from scipy.linalg import tanhm, sinhm, coshm
540 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
541 >>> c = coshm(a)
542 >>> c
543 array([[ 11.24592233, 38.76236492],
544 [ 12.92078831, 50.00828725]])
546 Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
548 >>> t = tanhm(a)
549 >>> s = sinhm(a)
550 >>> t - s.dot(np.linalg.inv(c))
551 array([[ 2.72004641e-15, 4.55191440e-15],
552 [ 0.00000000e+00, -5.55111512e-16]])
554 """
555 A = _asarray_square(A)
556 return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
559def sinhm(A):
560 """
561 Compute the hyperbolic matrix sine.
563 This routine uses expm to compute the matrix exponentials.
565 Parameters
566 ----------
567 A : (N, N) array_like
568 Input array.
570 Returns
571 -------
572 sinhm : (N, N) ndarray
573 Hyperbolic matrix sine of `A`
575 Examples
576 --------
577 >>> import numpy as np
578 >>> from scipy.linalg import tanhm, sinhm, coshm
579 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
580 >>> s = sinhm(a)
581 >>> s
582 array([[ 10.57300653, 39.28826594],
583 [ 13.09608865, 49.86127247]])
585 Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
587 >>> t = tanhm(a)
588 >>> c = coshm(a)
589 >>> t - s.dot(np.linalg.inv(c))
590 array([[ 2.72004641e-15, 4.55191440e-15],
591 [ 0.00000000e+00, -5.55111512e-16]])
593 """
594 A = _asarray_square(A)
595 return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
598def tanhm(A):
599 """
600 Compute the hyperbolic matrix tangent.
602 This routine uses expm to compute the matrix exponentials.
604 Parameters
605 ----------
606 A : (N, N) array_like
607 Input array
609 Returns
610 -------
611 tanhm : (N, N) ndarray
612 Hyperbolic matrix tangent of `A`
614 Examples
615 --------
616 >>> import numpy as np
617 >>> from scipy.linalg import tanhm, sinhm, coshm
618 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
619 >>> t = tanhm(a)
620 >>> t
621 array([[ 0.3428582 , 0.51987926],
622 [ 0.17329309, 0.86273746]])
624 Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
626 >>> s = sinhm(a)
627 >>> c = coshm(a)
628 >>> t - s.dot(np.linalg.inv(c))
629 array([[ 2.72004641e-15, 4.55191440e-15],
630 [ 0.00000000e+00, -5.55111512e-16]])
632 """
633 A = _asarray_square(A)
634 return _maybe_real(A, solve(coshm(A), sinhm(A)))
637def funm(A, func, disp=True):
638 """
639 Evaluate a matrix function specified by a callable.
641 Returns the value of matrix-valued function ``f`` at `A`. The
642 function ``f`` is an extension of the scalar-valued function `func`
643 to matrices.
645 Parameters
646 ----------
647 A : (N, N) array_like
648 Matrix at which to evaluate the function
649 func : callable
650 Callable object that evaluates a scalar function f.
651 Must be vectorized (eg. using vectorize).
652 disp : bool, optional
653 Print warning if error in the result is estimated large
654 instead of returning estimated error. (Default: True)
656 Returns
657 -------
658 funm : (N, N) ndarray
659 Value of the matrix function specified by func evaluated at `A`
660 errest : float
661 (if disp == False)
663 1-norm of the estimated error, ||err||_1 / ||A||_1
665 Notes
666 -----
667 This function implements the general algorithm based on Schur decomposition
668 (Algorithm 9.1.1. in [1]_).
670 If the input matrix is known to be diagonalizable, then relying on the
671 eigendecomposition is likely to be faster. For example, if your matrix is
672 Hermitian, you can do
674 >>> from scipy.linalg import eigh
675 >>> def funm_herm(a, func, check_finite=False):
676 ... w, v = eigh(a, check_finite=check_finite)
677 ... ## if you further know that your matrix is positive semidefinite,
678 ... ## you can optionally guard against precision errors by doing
679 ... # w = np.maximum(w, 0)
680 ... w = func(w)
681 ... return (v * w).dot(v.conj().T)
683 References
684 ----------
685 .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
687 Examples
688 --------
689 >>> import numpy as np
690 >>> from scipy.linalg import funm
691 >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
692 >>> funm(a, lambda x: x*x)
693 array([[ 4., 15.],
694 [ 5., 19.]])
695 >>> a.dot(a)
696 array([[ 4., 15.],
697 [ 5., 19.]])
699 """
700 A = _asarray_square(A)
701 # Perform Shur decomposition (lapack ?gees)
702 T, Z = schur(A)
703 T, Z = rsf2csf(T,Z)
704 n,n = T.shape
705 F = diag(func(diag(T))) # apply function to diagonal elements
706 F = F.astype(T.dtype.char) # e.g., when F is real but T is complex
708 minden = abs(T[0,0])
710 # implement Algorithm 11.1.1 from Golub and Van Loan
711 # "matrix Computations."
712 for p in range(1,n):
713 for i in range(1,n-p+1):
714 j = i + p
715 s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
716 ksl = slice(i,j-1)
717 val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
718 s = s + val
719 den = T[j-1,j-1] - T[i-1,i-1]
720 if den != 0.0:
721 s = s / den
722 F[i-1,j-1] = s
723 minden = min(minden,abs(den))
725 F = dot(dot(Z, F), transpose(conjugate(Z)))
726 F = _maybe_real(A, F)
728 tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
729 if minden == 0.0:
730 minden = tol
731 err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
732 if prod(ravel(logical_not(isfinite(F))),axis=0):
733 err = np.inf
734 if disp:
735 if err > 1000*tol:
736 print("funm result may be inaccurate, approximate err =", err)
737 return F
738 else:
739 return F, err
742def signm(A, disp=True):
743 """
744 Matrix sign function.
746 Extension of the scalar sign(x) to matrices.
748 Parameters
749 ----------
750 A : (N, N) array_like
751 Matrix at which to evaluate the sign function
752 disp : bool, optional
753 Print warning if error in the result is estimated large
754 instead of returning estimated error. (Default: True)
756 Returns
757 -------
758 signm : (N, N) ndarray
759 Value of the sign function at `A`
760 errest : float
761 (if disp == False)
763 1-norm of the estimated error, ||err||_1 / ||A||_1
765 Examples
766 --------
767 >>> from scipy.linalg import signm, eigvals
768 >>> a = [[1,2,3], [1,2,1], [1,1,1]]
769 >>> eigvals(a)
770 array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
771 >>> eigvals(signm(a))
772 array([-1.+0.j, 1.+0.j, 1.+0.j])
774 """
775 A = _asarray_square(A)
777 def rounded_sign(x):
778 rx = np.real(x)
779 if rx.dtype.char == 'f':
780 c = 1e3*feps*amax(x)
781 else:
782 c = 1e3*eps*amax(x)
783 return sign((absolute(rx) > c) * rx)
784 result, errest = funm(A, rounded_sign, disp=0)
785 errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
786 if errest < errtol:
787 return result
789 # Handle signm of defective matrices:
791 # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
792 # 8:237-250,1981" for how to improve the following (currently a
793 # rather naive) iteration process:
795 # a = result # sometimes iteration converges faster but where??
797 # Shifting to avoid zero eigenvalues. How to ensure that shifting does
798 # not change the spectrum too much?
799 vals = svd(A, compute_uv=False)
800 max_sv = np.amax(vals)
801 # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
802 # c = 0.5/min_nonzero_sv
803 c = 0.5/max_sv
804 S0 = A + c*np.identity(A.shape[0])
805 prev_errest = errest
806 for i in range(100):
807 iS0 = inv(S0)
808 S0 = 0.5*(S0 + iS0)
809 Pp = 0.5*(dot(S0,S0)+S0)
810 errest = norm(dot(Pp,Pp)-Pp,1)
811 if errest < errtol or prev_errest == errest:
812 break
813 prev_errest = errest
814 if disp:
815 if not isfinite(errest) or errest >= errtol:
816 print("signm result may be inaccurate, approximate err =", errest)
817 return S0
818 else:
819 return S0, errest
822def khatri_rao(a, b):
823 r"""
824 Khatri-rao product
826 A column-wise Kronecker product of two matrices
828 Parameters
829 ----------
830 a : (n, k) array_like
831 Input array
832 b : (m, k) array_like
833 Input array
835 Returns
836 -------
837 c: (n*m, k) ndarray
838 Khatri-rao product of `a` and `b`.
840 See Also
841 --------
842 kron : Kronecker product
844 Notes
845 -----
846 The mathematical definition of the Khatri-Rao product is:
848 .. math::
850 (A_{ij} \bigotimes B_{ij})_{ij}
852 which is the Kronecker product of every column of A and B, e.g.::
854 c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
856 Examples
857 --------
858 >>> import numpy as np
859 >>> from scipy import linalg
860 >>> a = np.array([[1, 2, 3], [4, 5, 6]])
861 >>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]])
862 >>> linalg.khatri_rao(a, b)
863 array([[ 3, 8, 15],
864 [ 6, 14, 24],
865 [ 2, 6, 27],
866 [12, 20, 30],
867 [24, 35, 48],
868 [ 8, 15, 54]])
870 """
871 a = np.asarray(a)
872 b = np.asarray(b)
874 if not (a.ndim == 2 and b.ndim == 2):
875 raise ValueError("The both arrays should be 2-dimensional.")
877 if not a.shape[1] == b.shape[1]:
878 raise ValueError("The number of columns for both arrays "
879 "should be equal.")
881 # c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
882 c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :]
883 return c.reshape((-1,) + c.shape[2:])