summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron Meurer <asmeurer@gmail.com>2022-12-12 18:23:04 -0700
committerAaron Meurer <asmeurer@gmail.com>2022-12-12 18:23:04 -0700
commit18e88b4a89a4cbc59bbb00423c957537fa9cdb58 (patch)
tree142f5fb189ae06e46420ce9ee332edfba59baa29
parent4ca8204c5c13b3a5c9482772813e67184f3f47c8 (diff)
downloadnumpy-18e88b4a89a4cbc59bbb00423c957537fa9cdb58.tar.gz
Add namedtuple return types to linalg functions that return tuples
That is, eig(), eigh(), qr(), slogdet(), and svd(). For those functions that return non-tuples with certain keyword arguments, the return type is unchanged. This change should be completely backwards compatible. The namedtuple attribute names come from the array API specification (see, e.g., https://data-apis.org/array-api/latest/extensions/generated/signatures.linalg.eigh.html), with the exception of eig() which is just the same as eigh(). The name of the namedtuple object itself is not part of the specification or the public API. I have not used a namedtuple for the tuple output for qr(mode='raw'), which returns (h, tau). This updates the docstrings to use the updated namedtuple return names, and also the examples to use those names more consistently. This also updates the tests to check each function for the namedtuple attributes at least once.
-rw-r--r--numpy/linalg/linalg.py255
-rw-r--r--numpy/linalg/tests/test_linalg.py57
2 files changed, 179 insertions, 133 deletions
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index ee0fe6166..6373966e3 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -17,6 +17,7 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
import functools
import operator
import warnings
+from typing import NamedTuple
from .._utils import set_module
from numpy.core import (
@@ -33,6 +34,28 @@ from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import _umath_linalg
+from numpy._typing import NDArray
+
+class EigResult(NamedTuple):
+ eigenvalues: NDArray
+ eigenvectors: NDArray
+
+class EighResult(NamedTuple):
+ eigenvalues: NDArray
+ eigenvectors: NDArray
+
+class QRResult(NamedTuple):
+ Q: NDArray
+ R: NDArray
+
+class SlogdetResult(NamedTuple):
+ sign: NDArray
+ logabsdet: NDArray
+
+class SVDResult(NamedTuple):
+ U: NDArray
+ S: NDArray
+ Vh: NDArray
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg')
@@ -778,10 +801,9 @@ def qr(a, mode='reduced'):
mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
- * 'reduced' : returns q, r with dimensions
- (..., M, K), (..., K, N) (default)
- * 'complete' : returns q, r with dimensions (..., M, M), (..., M, N)
- * 'r' : returns r only with dimensions (..., K, N)
+ * 'reduced' : returns Q, R with dimensions (..., M, K), (..., K, N) (default)
+ * 'complete' : returns Q, R with dimensions (..., M, M), (..., M, N)
+ * 'r' : returns R only with dimensions (..., K, N)
* 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
@@ -797,14 +819,17 @@ def qr(a, mode='reduced'):
Returns
-------
- q : ndarray of float or complex, optional
+ When mode is 'reduced' or 'complete', the result will be a namedtuple with
+ the attributes `Q` and `R`.
+
+ Q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case. In case the number of dimensions in the input array is
greater than 2 then a stack of the matrices with above properties
is returned.
- r : ndarray of float or complex, optional
+ R : ndarray of float or complex, optional
The upper-triangular matrix or a stack of upper-triangular
matrices if the number of dimensions in the input array is greater
than 2.
@@ -849,19 +874,19 @@ def qr(a, mode='reduced'):
Examples
--------
>>> a = np.random.randn(9, 6)
- >>> q, r = np.linalg.qr(a)
- >>> np.allclose(a, np.dot(q, r)) # a does equal qr
+ >>> Q, R = np.linalg.qr(a)
+ >>> np.allclose(a, np.dot(Q, R)) # a does equal QR
True
- >>> r2 = np.linalg.qr(a, mode='r')
- >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
+ >>> R2 = np.linalg.qr(a, mode='r')
+ >>> np.allclose(R, R2) # mode='r' returns the same R as mode='full'
True
>>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input
- >>> q, r = np.linalg.qr(a)
- >>> q.shape
+ >>> Q, R = np.linalg.qr(a)
+ >>> Q.shape
(3, 2, 2)
- >>> r.shape
+ >>> R.shape
(3, 2, 2)
- >>> np.allclose(a, np.matmul(q, r))
+ >>> np.allclose(a, np.matmul(Q, R))
True
Example illustrating a common use of `qr`: solving of least squares
@@ -876,8 +901,8 @@ def qr(a, mode='reduced'):
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
- If A = qr such that q is orthonormal (which is always possible via
- Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
+ If A = QR such that Q is orthonormal (which is always possible via
+ Gram-Schmidt), then ``x = inv(R) * (Q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
@@ -887,9 +912,9 @@ def qr(a, mode='reduced'):
[1, 1],
[2, 1]])
>>> b = np.array([1, 2, 2, 3])
- >>> q, r = np.linalg.qr(A)
- >>> p = np.dot(q.T, b)
- >>> np.dot(np.linalg.inv(r), p)
+ >>> Q, R = np.linalg.qr(A)
+ >>> p = np.dot(Q.T, b)
+ >>> np.dot(np.linalg.inv(R), p)
array([ 1., 1.])
"""
@@ -961,7 +986,7 @@ def qr(a, mode='reduced'):
q = q.astype(result_t, copy=False)
r = r.astype(result_t, copy=False)
- return wrap(q), wrap(r)
+ return QRResult(wrap(q), wrap(r))
# Eigenvalues
@@ -1178,7 +1203,9 @@ def eig(a):
Returns
-------
- w : (..., M) array
+ A namedtuple with the following attributes:
+
+ eigenvalues : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
@@ -1186,10 +1213,10 @@ def eig(a):
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
- v : (..., M, M) array
+ eigenvectors : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
- column ``v[:,i]`` is the eigenvector corresponding to the
- eigenvalue ``w[i]``.
+ column ``eigenvectors[:,i]`` is the eigenvector corresponding to the
+ eigenvalue ``eigenvalues[i]``.
Raises
------
@@ -1219,30 +1246,30 @@ def eig(a):
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
- The number `w` is an eigenvalue of `a` if there exists a vector
- `v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
- `v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
- for :math:`i \\in \\{0,...,M-1\\}`.
+ The number `w` is an eigenvalue of `a` if there exists a vector `v` such
+ that ``a @ v = w * v``. Thus, the arrays `a`, `eigenvalues`, and
+ `eigenvectors` satisfy the equations ``a @ eigenvectors[:,i] =
+ eigenvalues[i] * eigenvalues[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`.
- The array `v` of eigenvectors may not be of maximum rank, that is, some
- of the columns may be linearly dependent, although round-off error may
- obscure that fact. If the eigenvalues are all different, then theoretically
- the eigenvectors are linearly independent and `a` can be diagonalized by
- a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
+ The array `eigenvectors` may not be of maximum rank, that is, some of the
+ columns may be linearly dependent, although round-off error may obscure
+ that fact. If the eigenvalues are all different, then theoretically the
+ eigenvectors are linearly independent and `a` can be diagonalized by a
+ similarity transformation using `eigenvectors`, i.e, ``inv(eigenvectors) @
+ a @ eigenvectors`` is diagonal.
For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
- is preferred because the matrix `v` is guaranteed to be unitary, which is
- not the case when using `eig`. The Schur factorization produces an
- upper triangular matrix rather than a diagonal matrix, but for normal
- matrices only the diagonal of the upper triangular matrix is needed, the
- rest is roundoff error.
-
- Finally, it is emphasized that `v` consists of the *right* (as in
- right-hand side) eigenvectors of `a`. A vector `y` satisfying
- ``y.T @ a = z * y.T`` for some number `z` is called a *left*
- eigenvector of `a`, and, in general, the left and right eigenvectors
- of a matrix are not necessarily the (perhaps conjugate) transposes
- of each other.
+ is preferred because the matrix `eigenvectors` is guaranteed to be
+ unitary, which is not the case when using `eig`. The Schur factorization
+ produces an upper triangular matrix rather than a diagonal matrix, but for
+ normal matrices only the diagonal of the upper triangular matrix is
+ needed, the rest is roundoff error.
+
+ Finally, it is emphasized that `eigenvectors` consists of the *right* (as
+ in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``y.T @ a
+ = z * y.T`` for some number `z` is called a *left* eigenvector of `a`,
+ and, in general, the left and right eigenvectors of a matrix are not
+ necessarily the (perhaps conjugate) transposes of each other.
References
----------
@@ -1253,41 +1280,45 @@ def eig(a):
--------
>>> from numpy import linalg as LA
- (Almost) trivial example with real e-values and e-vectors.
+ (Almost) trivial example with real eigenvalues and eigenvectors.
- >>> w, v = LA.eig(np.diag((1, 2, 3)))
- >>> w; v
+ >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3)))
+ >>> eigenvalues
array([1., 2., 3.])
+ >>> eigenvectors
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
- Real matrix possessing complex e-values and e-vectors; note that the
- e-values are complex conjugates of each other.
+ Real matrix possessing complex eigenvalues and eigenvectors; note that the
+ eigenvalues are complex conjugates of each other.
- >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
- >>> w; v
+ >>> eigenvalues, eigenvectors = LA.eig(np.array([[1, -1], [1, 1]]))
+ >>> eigenvalues
array([1.+1.j, 1.-1.j])
+ >>> eigenvectors
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
- Complex-valued matrix with real e-values (but complex-valued e-vectors);
+ Complex-valued matrix with real eigenvalues (but complex-valued eigenvectors);
note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
- >>> w, v = LA.eig(a)
- >>> w; v
+ >>> eigenvalues, eigenvectors = LA.eig(a)
+ >>> eigenvalues
array([2.+0.j, 0.+0.j])
+ >>> eigenvectors
array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
[ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
- >>> # Theor. e-values are 1 +/- 1e-9
- >>> w, v = LA.eig(a)
- >>> w; v
+ >>> # Theor. eigenvalues are 1 +/- 1e-9
+ >>> eigenvalues, eigenvectors = LA.eig(a)
+ >>> eigenvalues
array([1., 1.])
+ >>> eigenvectors
array([[1., 0.],
[0., 1.]])
@@ -1311,7 +1342,7 @@ def eig(a):
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
- return w.astype(result_t, copy=False), wrap(vt)
+ return EigResult(w.astype(result_t, copy=False), wrap(vt))
@array_function_dispatch(_eigvalsh_dispatcher)
@@ -1339,13 +1370,15 @@ def eigh(a, UPLO='L'):
Returns
-------
- w : (..., M) ndarray
+ A namedtuple with the following attributes:
+
+ eigenvalues : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
- v : {(..., M, M) ndarray, (..., M, M) matrix}
- The column ``v[:, i]`` is the normalized eigenvector corresponding
- to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
- a matrix object.
+ eigenvectors : {(..., M, M) ndarray, (..., M, M) matrix}
+ The column ``eigenvectors[:, i]`` is the normalized eigenvector
+ corresponding to the eigenvalue ``eigenvalues[i]``. Will return a
+ matrix object if `a` is a matrix object.
Raises
------
@@ -1372,10 +1405,10 @@ def eigh(a, UPLO='L'):
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
``_heevd``.
- The eigenvalues of real symmetric or complex Hermitian matrices are
- always real. [1]_ The array `v` of (column) eigenvectors is unitary
- and `a`, `w`, and `v` satisfy the equations
- ``dot(a, v[:, i]) = w[i] * v[:, i]``.
+ The eigenvalues of real symmetric or complex Hermitian matrices are always
+ real. [1]_ The array `eigenvalues` of (column) eigenvectors is unitary and
+ `a`, `eigenvalues`, and `eigenvectors` satisfy the equations ``dot(a,
+ eigenvectors[:, i]) = eigenvalues[i] * eigenvectors[:, i]``.
References
----------
@@ -1389,24 +1422,26 @@ def eigh(a, UPLO='L'):
>>> a
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
- >>> w, v = LA.eigh(a)
- >>> w; v
+ >>> eigenvalues, eigenvectors = LA.eigh(a)
+ >>> eigenvalues
array([0.17157288, 5.82842712])
+ >>> eigenvectors
array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
- >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
+ >>> np.dot(a, eigenvectors[:, 0]) - eigenvalues[0] * eigenvectors[:, 0] # verify 1st eigenval/vec pair
array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
- >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
+ >>> np.dot(a, eigenvectors[:, 1]) - eigenvalues[1] * eigenvectors[:, 1] # verify 2nd eigenval/vec pair
array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
- >>> w, v = LA.eigh(A)
- >>> w; v
+ >>> eigenvalues, eigenvectors = LA.eigh(A)
+ >>> eigenvalues
array([0.17157288, 5.82842712])
+ >>> eigenvectors
matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
@@ -1430,6 +1465,7 @@ def eigh(a, UPLO='L'):
[ 0. +0.89442719j, 0. -0.4472136j ]])
array([[ 0.89442719+0.j , -0. +0.4472136j],
[-0. +0.4472136j, 0.89442719+0.j ]])
+
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
@@ -1451,7 +1487,7 @@ def eigh(a, UPLO='L'):
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
- return w, wrap(vt)
+ return EighResult(w, wrap(vt))
# Singular value decomposition
@@ -1493,16 +1529,19 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
Returns
-------
- u : { (..., M, M), (..., M, K) } array
+ When `compute_uv` is True, the result is a namedtuple with the following
+ attribute names:
+
+ U : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
- s : (..., K) array
+ S : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
- vh : { (..., N, N), (..., K, N) } array
+ Vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
@@ -1555,45 +1594,45 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
Reconstruction based on full SVD, 2D case:
- >>> u, s, vh = np.linalg.svd(a, full_matrices=True)
- >>> u.shape, s.shape, vh.shape
+ >>> U, S, Vh = np.linalg.svd(a, full_matrices=True)
+ >>> U.shape, S.shape, Vh.shape
((9, 9), (6,), (6, 6))
- >>> np.allclose(a, np.dot(u[:, :6] * s, vh))
+ >>> np.allclose(a, np.dot(U[:, :6] * S, Vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
- >>> smat[:6, :6] = np.diag(s)
- >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
+ >>> smat[:6, :6] = np.diag(S)
+ >>> np.allclose(a, np.dot(U, np.dot(smat, Vh)))
True
Reconstruction based on reduced SVD, 2D case:
- >>> u, s, vh = np.linalg.svd(a, full_matrices=False)
- >>> u.shape, s.shape, vh.shape
+ >>> U, S, Vh = np.linalg.svd(a, full_matrices=False)
+ >>> U.shape, S.shape, Vh.shape
((9, 6), (6,), (6, 6))
- >>> np.allclose(a, np.dot(u * s, vh))
+ >>> np.allclose(a, np.dot(U * S, Vh))
True
- >>> smat = np.diag(s)
- >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
+ >>> smat = np.diag(S)
+ >>> np.allclose(a, np.dot(U, np.dot(smat, Vh)))
True
Reconstruction based on full SVD, 4D case:
- >>> u, s, vh = np.linalg.svd(b, full_matrices=True)
- >>> u.shape, s.shape, vh.shape
+ >>> U, S, Vh = np.linalg.svd(b, full_matrices=True)
+ >>> U.shape, S.shape, Vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
- >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
+ >>> np.allclose(b, np.matmul(U[..., :3] * S[..., None, :], Vh))
True
- >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
+ >>> np.allclose(b, np.matmul(U[..., :3], S[..., None] * Vh))
True
Reconstruction based on reduced SVD, 4D case:
- >>> u, s, vh = np.linalg.svd(b, full_matrices=False)
- >>> u.shape, s.shape, vh.shape
+ >>> U, S, Vh = np.linalg.svd(b, full_matrices=False)
+ >>> U.shape, S.shape, Vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
- >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
+ >>> np.allclose(b, np.matmul(U * S[..., None, :], Vh))
True
- >>> np.allclose(b, np.matmul(u, s[..., None] * vh))
+ >>> np.allclose(b, np.matmul(U, S[..., None] * Vh))
True
"""
@@ -1614,7 +1653,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = transpose(u * sgn[..., None, :]).conjugate()
- return wrap(u), s, wrap(vt)
+ return SVDResult(wrap(u), s, wrap(vt))
else:
s = eigvalsh(a)
s = abs(s)
@@ -1643,7 +1682,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
- return wrap(u), s, wrap(vh)
+ return SVDResult(wrap(u), s, wrap(vh))
else:
if m < n:
gufunc = _umath_linalg.svd_m
@@ -2012,15 +2051,17 @@ def slogdet(a):
Returns
-------
+ A namedtuple with the following attributes:
+
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
- logdet : (...) array_like
+ logabsdet : (...) array_like
The natural log of the absolute value of the determinant.
- If the determinant is zero, then `sign` will be 0 and `logdet` will be
- -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
+ If the determinant is zero, then `sign` will be 0 and `logabsdet` will be
+ -Inf. In all cases, the determinant is equal to ``sign * np.exp(logabsdet)``.
See Also
--------
@@ -2045,10 +2086,10 @@ def slogdet(a):
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
- >>> (sign, logdet) = np.linalg.slogdet(a)
- >>> (sign, logdet)
+ >>> (sign, logabsdet) = np.linalg.slogdet(a)
+ >>> (sign, logabsdet)
(-1, 0.69314718055994529) # may vary
- >>> sign * np.exp(logdet)
+ >>> sign * np.exp(logabsdet)
-2.0
Computing log-determinants for a stack of matrices:
@@ -2056,8 +2097,8 @@ def slogdet(a):
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
- >>> sign, logdet = np.linalg.slogdet(a)
- >>> (sign, logdet)
+ >>> sign, logabsdet = np.linalg.slogdet(a)
+ >>> (sign, logabsdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
@@ -2079,7 +2120,7 @@ def slogdet(a):
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
- return sign, logdet
+ return SlogdetResult(sign, logdet)
@array_function_dispatch(_unary_dispatcher)
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index b1dbd4c22..17ee40042 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -589,11 +589,12 @@ class TestEigvals(EigvalsCases):
class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
- evalues, evectors = linalg.eig(a)
- assert_allclose(dot_generalized(a, evectors),
- np.asarray(evectors) * np.asarray(evalues)[..., None, :],
- rtol=get_rtol(evalues.dtype))
- assert_(consistent_subclass(evectors, a))
+ res = linalg.eig(a)
+ eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors
+ assert_allclose(dot_generalized(a, eigenvectors),
+ np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :],
+ rtol=get_rtol(eigenvalues.dtype))
+ assert_(consistent_subclass(eigenvectors, a))
class TestEig(EigCases):
@@ -638,10 +639,11 @@ class SVDBaseTests:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- u, s, vh = linalg.svd(x)
- assert_equal(u.dtype, dtype)
- assert_equal(s.dtype, get_real_dtype(dtype))
- assert_equal(vh.dtype, dtype)
+ res = linalg.svd(x)
+ U, S, Vh = res.U, res.S, res.Vh
+ assert_equal(U.dtype, dtype)
+ assert_equal(S.dtype, get_real_dtype(dtype))
+ assert_equal(Vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
@@ -844,7 +846,8 @@ class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
- (s, ld) = linalg.slogdet(a)
+ res = linalg.slogdet(a)
+ s, ld = res.sign, res.logabsdet
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
@@ -1144,7 +1147,8 @@ class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
- ev, evc = linalg.eigh(a)
+ res = linalg.eigh(a)
+ ev, evc = res.eigenvalues, res.eigenvectors
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
@@ -1632,16 +1636,17 @@ class TestQR:
k = min(m, n)
# mode == 'complete'
- q, r = linalg.qr(a, mode='complete')
- assert_(q.dtype == a_dtype)
- assert_(r.dtype == a_dtype)
- assert_(isinstance(q, a_type))
- assert_(isinstance(r, a_type))
- assert_(q.shape == (m, m))
- assert_(r.shape == (m, n))
- assert_almost_equal(dot(q, r), a)
- assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
- assert_almost_equal(np.triu(r), r)
+ res = linalg.qr(a, mode='complete')
+ Q, R = res.Q, res.R
+ assert_(Q.dtype == a_dtype)
+ assert_(R.dtype == a_dtype)
+ assert_(isinstance(Q, a_type))
+ assert_(isinstance(R, a_type))
+ assert_(Q.shape == (m, m))
+ assert_(R.shape == (m, n))
+ assert_almost_equal(dot(Q, R), a)
+ assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m))
+ assert_almost_equal(np.triu(R), R)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
@@ -1736,7 +1741,7 @@ class TestQR:
assert_(r.shape[-2:] == (m, n))
assert_almost_equal(matmul(q, r), a)
I_mat = np.identity(q.shape[-1])
- stack_I_mat = np.broadcast_to(I_mat,
+ stack_I_mat = np.broadcast_to(I_mat,
q.shape[:-2] + (q.shape[-1],)*2)
assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat)
assert_almost_equal(np.triu(r[..., :, :]), r)
@@ -1751,9 +1756,9 @@ class TestQR:
assert_(r1.shape[-2:] == (k, n))
assert_almost_equal(matmul(q1, r1), a)
I_mat = np.identity(q1.shape[-1])
- stack_I_mat = np.broadcast_to(I_mat,
+ stack_I_mat = np.broadcast_to(I_mat,
q1.shape[:-2] + (q1.shape[-1],)*2)
- assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1),
+ assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1),
stack_I_mat)
assert_almost_equal(np.triu(r1[..., :, :]), r1)
@@ -1764,12 +1769,12 @@ class TestQR:
assert_almost_equal(r2, r1)
@pytest.mark.parametrize("size", [
- (3, 4), (4, 3), (4, 4),
+ (3, 4), (4, 3), (4, 4),
(3, 0), (0, 3)])
@pytest.mark.parametrize("outer_size", [
(2, 2), (2,), (2, 3, 4)])
@pytest.mark.parametrize("dt", [
- np.single, np.double,
+ np.single, np.double,
np.csingle, np.cdouble])
def test_stacked_inputs(self, outer_size, size, dt):