diff options
Diffstat (limited to 'numpy')
-rw-r--r-- | numpy/linalg/__init__.pyi | 2 | ||||
-rw-r--r-- | numpy/linalg/linalg.py | 44 |
2 files changed, 23 insertions, 23 deletions
diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 5080019f4..00db05079 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -17,7 +17,7 @@ def eig(a): ... def eigh(a, UPLO=...): ... def svd(a, full_matrices=..., compute_uv=..., hermitian=...): ... def cond(x, p=...): ... -def matrix_rank(M, tol=..., hermitian=...): ... +def matrix_rank(A, tol=..., hermitian=...): ... def pinv(a, rcond=..., hermitian=...): ... def slogdet(a): ... def det(a): ... diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 46fb2502e..e150952e3 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1797,12 +1797,12 @@ def cond(x, p=None): return r -def _matrix_rank_dispatcher(M, tol=None, hermitian=None): - return (M,) +def _matrix_rank_dispatcher(A, tol=None, hermitian=None): + return (A,) @array_function_dispatch(_matrix_rank_dispatcher) -def matrix_rank(M, tol=None, hermitian=False): +def matrix_rank(A, tol=None, hermitian=False): """ Return matrix rank of array using SVD method @@ -1814,18 +1814,18 @@ def matrix_rank(M, tol=None, hermitian=False): Parameters ---------- - M : {(M,), (..., M, N)} array_like + A : {(M,), (..., M, N)} array_like Input vector or stack of matrices. tol : (...) array_like, float, optional Threshold below which SVD values are considered zero. If `tol` is None, and ``S`` is an array with singular values for `M`, and ``eps`` is the epsilon value for datatype of ``S``, then `tol` is - set to ``S.max() * max(M.shape) * eps``. + set to ``S.max() * max(M, N) * eps``. .. versionchanged:: 1.14 Broadcasted against the stack of matrices hermitian : bool, optional - If True, `M` is assumed to be Hermitian (symmetric if real-valued), + If True, `A` is assumed to be Hermitian (symmetric if real-valued), enabling a more efficient method for finding singular values. Defaults to False. @@ -1834,39 +1834,39 @@ def matrix_rank(M, tol=None, hermitian=False): Returns ------- rank : (...) array_like - Rank of M. + Rank of A. Notes ----- The default threshold to detect rank deficiency is a test on the magnitude - of the singular values of `M`. By default, we identify singular values less - than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with + of the singular values of `A`. By default, we identify singular values less + than ``S.max() * max(M, N) * eps`` as indicating rank deficiency (with the symbols defined above). This is the algorithm MATLAB uses [1]. It also appears in *Numerical recipes* in the discussion of SVD solutions for linear least squares [2]. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there is a column - in `M` that is an exact (in floating point) linear combination of other - columns in `M`. Computing the SVD on `M` will not produce a singular value + in `A` that is an exact (in floating point) linear combination of other + columns in `A`. Computing the SVD on `A` will not produce a singular value exactly equal to 0 in general: any difference of the smallest SVD value from 0 will be caused by numerical imprecision in the calculation of the SVD. Our threshold for small SVD values takes this numerical imprecision into account, and the default threshold will detect such numerical rank - deficiency. The threshold may declare a matrix `M` rank deficient even if - the linear combination of some columns of `M` is not exactly equal to - another column of `M` but only numerically very close to another column of - `M`. + deficiency. The threshold may declare a matrix `A` rank deficient even if + the linear combination of some columns of `A` is not exactly equal to + another column of `A` but only numerically very close to another column of + `A`. We chose our default threshold because it is in wide use. Other thresholds are possible. For example, elsewhere in the 2007 edition of *Numerical recipes* there is an alternative threshold of ``S.max() * - np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe this threshold as being based on "expected roundoff error" (p 71). The thresholds above deal with floating point roundoff error in the calculation of the SVD. However, you may have more information about the - sources of error in `M` that would make you consider other tolerance values + sources of error in `A` that would make you consider other tolerance values to detect *effective* rank deficiency. The most useful measure of the tolerance depends on the operations you intend to use on your matrix. For example, if your data come from uncertain measurements with uncertainties @@ -1895,12 +1895,12 @@ def matrix_rank(M, tol=None, hermitian=False): >>> matrix_rank(np.zeros((4,))) 0 """ - M = asarray(M) - if M.ndim < 2: - return int(not all(M==0)) - S = svd(M, compute_uv=False, hermitian=hermitian) + A = asarray(A) + if A.ndim < 2: + return int(not all(A==0)) + S = svd(A, compute_uv=False, hermitian=hermitian) if tol is None: - tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps + tol = S.max(axis=-1, keepdims=True) * max(A.shape[-2:]) * finfo(S.dtype).eps else: tol = asarray(tol)[..., newaxis] return count_nonzero(S > tol, axis=-1) |