summaryrefslogtreecommitdiff
path: root/numpy/lib
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib')
-rw-r--r--numpy/lib/__init__.py19
-rw-r--r--numpy/lib/__init__.pyi2
-rw-r--r--numpy/lib/_datasource.py2
-rw-r--r--numpy/lib/_iotools.py4
-rw-r--r--numpy/lib/arraypad.py66
-rw-r--r--numpy/lib/arraysetops.py20
-rw-r--r--numpy/lib/format.py36
-rw-r--r--numpy/lib/function_base.py291
-rw-r--r--numpy/lib/function_base.pyi8
-rw-r--r--numpy/lib/histograms.py4
-rw-r--r--numpy/lib/index_tricks.py51
-rw-r--r--numpy/lib/index_tricks.pyi2
-rw-r--r--numpy/lib/mixins.py1
-rw-r--r--numpy/lib/nanfunctions.py45
-rw-r--r--numpy/lib/npyio.py79
-rw-r--r--numpy/lib/npyio.pyi3
-rw-r--r--numpy/lib/polynomial.py7
-rw-r--r--numpy/lib/shape_base.py32
-rw-r--r--numpy/lib/tests/test_arraypad.py17
-rw-r--r--numpy/lib/tests/test_arraysetops.py39
-rw-r--r--numpy/lib/tests/test_format.py9
-rw-r--r--numpy/lib/tests/test_function_base.py264
-rw-r--r--numpy/lib/tests/test_histograms.py11
-rw-r--r--numpy/lib/tests/test_io.py68
-rw-r--r--numpy/lib/tests/test_loadtxt.py37
-rw-r--r--numpy/lib/tests/test_nanfunctions.py98
-rw-r--r--numpy/lib/tests/test_shape_base.py4
-rw-r--r--numpy/lib/tests/test_twodim_base.py11
-rw-r--r--numpy/lib/tests/test_type_check.py2
-rw-r--r--numpy/lib/tests/test_ufunclike.py6
-rw-r--r--numpy/lib/twodim_base.py93
-rw-r--r--numpy/lib/type_check.py14
-rw-r--r--numpy/lib/ufunclike.py60
-rw-r--r--numpy/lib/utils.py69
-rw-r--r--numpy/lib/utils.pyi2
35 files changed, 1067 insertions, 409 deletions
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index 58166d4b1..d3cc9fee4 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -11,7 +11,6 @@ Most contains basic functions that are used by several submodules and are
useful to have in the main name-space.
"""
-import math
from numpy.version import version as __version__
@@ -58,7 +57,7 @@ from .arraypad import *
from ._version import *
from numpy.core._multiarray_umath import tracemalloc_domain
-__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator']
+__all__ = ['emath', 'tracemalloc_domain', 'Arrayterator']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
@@ -77,3 +76,19 @@ __all__ += histograms.__all__
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
+
+def __getattr__(attr):
+ # Warn for reprecated attributes
+ import math
+ import warnings
+
+ if attr == 'math':
+ warnings.warn(
+ "`np.lib.math` is a deprecated alias for the standard library "
+ "`math` module (Deprecated Numpy 1.25). Replace usages of "
+ "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
+ return math
+ else:
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
index 0e3da5b41..d3553bbcc 100644
--- a/numpy/lib/__init__.pyi
+++ b/numpy/lib/__init__.pyi
@@ -64,7 +64,6 @@ from numpy.lib.function_base import (
digitize as digitize,
cov as cov,
corrcoef as corrcoef,
- msort as msort,
median as median,
sinc as sinc,
hamming as hamming,
@@ -231,6 +230,7 @@ from numpy.lib.utils import (
lookfor as lookfor,
byte_bounds as byte_bounds,
safe_eval as safe_eval,
+ show_runtime as show_runtime,
)
from numpy.core.multiarray import (
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index b7778234e..613733fa5 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -37,7 +37,7 @@ Example::
import os
import io
-from numpy.core.overrides import set_module
+from .._utils import set_module
_open = open
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 4a5ac1285..534d1b3ee 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -513,8 +513,8 @@ class StringConverter:
(nx.complexfloating, complex, nx.nan + 0j),
# Last, try with the string types (must be last, because
# `_mapper[-1]` is used as default in some cases)
- (nx.unicode_, asunicode, '???'),
- (nx.string_, asbytes, '???'),
+ (nx.str_, asunicode, '???'),
+ (nx.bytes_, asbytes, '???'),
])
@classmethod
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 8830b8147..b06a645d8 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -378,7 +378,7 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
return left_pad, right_pad
-def _set_wrap_both(padded, axis, width_pair):
+def _set_wrap_both(padded, axis, width_pair, original_period):
"""
Pad `axis` of `arr` with wrapped values.
@@ -391,6 +391,8 @@ def _set_wrap_both(padded, axis, width_pair):
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
+ original_period : int
+ Original length of data on `axis` of `arr`.
Returns
-------
@@ -400,6 +402,9 @@ def _set_wrap_both(padded, axis, width_pair):
"""
left_pad, right_pad = width_pair
period = padded.shape[axis] - right_pad - left_pad
+ # Avoid wrapping with only a subset of the original area by ensuring period
+ # can only be a multiple of the original area's length.
+ period = period // original_period * original_period
# If the current dimension of `arr` doesn't contain enough valid values
# (not part of the undefined pad area) we need to pad multiple times.
@@ -410,14 +415,12 @@ def _set_wrap_both(padded, axis, width_pair):
if left_pad > 0:
# Pad with wrapped values on left side
- # First slice chunk from right side of the non-pad area.
+ # First slice chunk from left side of the non-pad area.
# Use min(period, left_pad) to ensure that chunk is not larger than
- # pad area
- right_slice = _slice_at_axis(
- slice(-right_pad - min(period, left_pad),
- -right_pad if right_pad != 0 else None),
- axis
- )
+ # pad area.
+ slice_end = left_pad + period
+ slice_start = slice_end - min(period, left_pad)
+ right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
right_chunk = padded[right_slice]
if left_pad > period:
@@ -431,11 +434,12 @@ def _set_wrap_both(padded, axis, width_pair):
if right_pad > 0:
# Pad with wrapped values on right side
- # First slice chunk from left side of the non-pad area.
+ # First slice chunk from right side of the non-pad area.
# Use min(period, right_pad) to ensure that chunk is not larger than
- # pad area
- left_slice = _slice_at_axis(
- slice(left_pad, left_pad + min(period, right_pad),), axis)
+ # pad area.
+ slice_start = -right_pad - period
+ slice_end = slice_start + min(period, right_pad)
+ left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
left_chunk = padded[left_slice]
if right_pad > period:
@@ -537,11 +541,12 @@ def pad(array, pad_width, mode='constant', **kwargs):
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
- ((before_1, after_1), ... (before_N, after_N)) unique pad widths
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
for each axis.
- ((before, after),) yields same before and after pad for each axis.
- (pad,) or int is a shortcut for before = after = pad width for all
- axes.
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after pad for each axis.
+ ``(pad,)`` or ``int`` is a shortcut for before = after = pad width
+ for all axes.
mode : str or function, optional
One of the following string values or a user supplied function.
@@ -586,14 +591,14 @@ def pad(array, pad_width, mode='constant', **kwargs):
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
- ((before_1, after_1), ... (before_N, after_N)) unique statistic
+ ``((before_1, after_1), ... (before_N, after_N))`` unique statistic
lengths for each axis.
- ((before, after),) yields same before and after statistic lengths
- for each axis.
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after statistic lengths for each axis.
- (stat_length,) or int is a shortcut for before = after = statistic
- length for all axes.
+ ``(stat_length,)`` or ``int`` is a shortcut for
+ ``before = after = statistic`` length for all axes.
Default is ``None``, to use the entire axis.
constant_values : sequence or scalar, optional
@@ -603,11 +608,11 @@ def pad(array, pad_width, mode='constant', **kwargs):
``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
for each axis.
- ``((before, after),)`` yields same before and after constants for each
- axis.
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after constants for each axis.
- ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
- all axes.
+ ``(constant,)`` or ``constant`` is a shortcut for
+ ``before = after = constant`` for all axes.
Default is 0.
end_values : sequence or scalar, optional
@@ -617,11 +622,11 @@ def pad(array, pad_width, mode='constant', **kwargs):
``((before_1, after_1), ... (before_N, after_N))`` unique end values
for each axis.
- ``((before, after),)`` yields same before and after end values for each
- axis.
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after end values for each axis.
- ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
- all axes.
+ ``(constant,)`` or ``constant`` is a shortcut for
+ ``before = after = constant`` for all axes.
Default is 0.
reflect_type : {'even', 'odd'}, optional
@@ -866,11 +871,12 @@ def pad(array, pad_width, mode='constant', **kwargs):
elif mode == "wrap":
for axis, (left_index, right_index) in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
+ original_period = padded.shape[axis] - right_index - left_index
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with wrapped
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_wrap_both(
- roi, axis, (left_index, right_index))
+ roi, axis, (left_index, right_index), original_period)
return padded
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index cf5f47a82..300bbda26 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -649,8 +649,24 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
ar2_range = int(ar2_max) - int(ar2_min)
# Constraints on whether we can actually use the table method:
- range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max
+ # 1. Assert memory usage is not too large
below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
+ # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
+ range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
+ # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
+ if ar1.size > 0:
+ ar1_min = np.min(ar1)
+ ar1_max = np.max(ar1)
+
+ # After masking, the range of ar1 is guaranteed to be
+ # within the range of ar2:
+ ar1_upper = min(int(ar1_max), int(ar2_max))
+ ar1_lower = max(int(ar1_min), int(ar2_min))
+
+ range_safe_from_overflow &= all((
+ ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
+ ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
+ ))
# Optimal performance is for approximately
# log10(size) > (log10(range) - 2.27) / 0.927.
@@ -687,7 +703,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
elif kind == 'table': # not range_safe_from_overflow
raise RuntimeError(
"You have specified kind='table', "
- "but the range of values in `ar2` exceeds the "
+ "but the range of values in `ar2` or `ar1` exceed the "
"maximum integer of the datatype. "
"Please set `kind` to None or 'sort'."
)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 54fd0b0bc..ef50fb19d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -437,15 +437,15 @@ def _write_array_header(fp, d, version=None):
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
-
+
# Add some spare space so that the array header can be modified in-place
# when changing the array size, e.g. when growing it by appending data at
- # the end.
+ # the end.
shape = d['shape']
header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
shape[-1 if d['fortran_order'] else 0]
))) if len(shape) > 0 else 0)
-
+
if version is None:
header = _wrap_header_guess_version(header)
else:
@@ -505,7 +505,7 @@ def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Raises
------
@@ -532,7 +532,7 @@ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Returns
-------
@@ -623,13 +623,27 @@ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
# "descr" : dtype.descr
# Versions (2, 0) and (1, 0) could have been created by a Python 2
# implementation before header filtering was implemented.
- if version <= (2, 0):
- header = _filter_header(header)
+ #
+ # For performance reasons, we try without _filter_header first though
try:
d = safe_eval(header)
except SyntaxError as e:
- msg = "Cannot parse header: {!r}"
- raise ValueError(msg.format(header)) from e
+ if version <= (2, 0):
+ header = _filter_header(header)
+ try:
+ d = safe_eval(header)
+ except SyntaxError as e2:
+ msg = "Cannot parse header: {!r}"
+ raise ValueError(msg.format(header)) from e2
+ else:
+ warnings.warn(
+ "Reading `.npy` or `.npz` file required additional "
+ "header parsing as it was created on Python 2. Save the "
+ "file again to speed up loading and avoid this warning.",
+ UserWarning, stacklevel=4)
+ else:
+ msg = "Cannot parse header: {!r}"
+ raise ValueError(msg.format(header)) from e
if not isinstance(d, dict):
msg = "Header is not a dictionary: {!r}"
raise ValueError(msg.format(d))
@@ -750,7 +764,7 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -869,7 +883,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
Returns
-------
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 6065dd0d3..02e141920 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -4,6 +4,7 @@ import re
import sys
import warnings
+from .._utils import set_module
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import transpose
@@ -19,12 +20,11 @@ from numpy.core.fromnumeric import (
ravel, nonzero, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes
-from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.core.function_base import add_newdoc
from numpy.lib.twodim_base import diag
from numpy.core.multiarray import (
- _insert, add_docstring, bincount, normalize_axis_index, _monotonicity,
+ _place, add_docstring, bincount, normalize_axis_index, _monotonicity,
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
@@ -161,6 +161,8 @@ def rot90(m, k=1, axes=(0, 1)):
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
+ This means for a 2D array with the default `k` and `axes`, the
+ rotation will be counterclockwise.
Parameters
----------
@@ -1309,6 +1311,8 @@ def gradient(f, *varargs, axis=None, edge_order=1):
if len_axes == 1:
return outvals[0]
+ elif np._using_numpy2_behavior():
+ return tuple(outvals)
else:
return outvals
@@ -1947,11 +1951,7 @@ def place(arr, mask, vals):
[44, 55, 44]])
"""
- if not isinstance(arr, np.ndarray):
- raise TypeError("argument 1 must be numpy.ndarray, "
- "not {name}".format(name=type(arr).__name__))
-
- return _insert(arr, mask, vals)
+ return _place(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
@@ -2117,10 +2117,10 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes,
@set_module('numpy')
class vectorize:
"""
- vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
- signature=None)
+ vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None,
+ cache=False, signature=None)
- Generalized function class.
+ Returns an object that acts like pyfunc, but takes arrays as input.
Define a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns a single numpy array or a tuple of numpy
@@ -2134,8 +2134,9 @@ class vectorize:
Parameters
----------
- pyfunc : callable
+ pyfunc : callable, optional
A python function or method.
+ Can be omitted to produce a decorator with keyword arguments.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
@@ -2167,8 +2168,9 @@ class vectorize:
Returns
-------
- vectorized : callable
- Vectorized function.
+ out : callable
+ A vectorized function if ``pyfunc`` was provided,
+ a decorator otherwise.
See Also
--------
@@ -2265,18 +2267,44 @@ class vectorize:
[0., 0., 1., 2., 1., 0.],
[0., 0., 0., 1., 2., 1.]])
+ Decorator syntax is supported. The decorator can be called as
+ a function to provide keyword arguments.
+ >>>@np.vectorize
+ ...def identity(x):
+ ... return x
+ ...
+ >>>identity([0, 1, 2])
+ array([0, 1, 2])
+ >>>@np.vectorize(otypes=[float])
+ ...def as_float(x):
+ ... return x
+ ...
+ >>>as_float([0, 1, 2])
+ array([0., 1., 2.])
"""
- def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
- cache=False, signature=None):
+ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None,
+ excluded=None, cache=False, signature=None):
+
+ if (pyfunc != np._NoValue) and (not callable(pyfunc)):
+ #Splitting the error message to keep
+ #the length below 79 characters.
+ part1 = "When used as a decorator, "
+ part2 = "only accepts keyword arguments."
+ raise TypeError(part1 + part2)
+
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
- self._ufunc = {} # Caching to improve default performance
+ if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'):
+ self.__name__ = pyfunc.__name__
- if doc is None:
+ self._ufunc = {} # Caching to improve default performance
+ self._doc = None
+ self.__doc__ = doc
+ if doc is None and hasattr(pyfunc, '__doc__'):
self.__doc__ = pyfunc.__doc__
else:
- self.__doc__ = doc
+ self._doc = doc
if isinstance(otypes, str):
for char in otypes:
@@ -2298,7 +2326,15 @@ class vectorize:
else:
self._in_and_out_core_dims = None
- def __call__(self, *args, **kwargs):
+ def _init_stage_2(self, pyfunc, *args, **kwargs):
+ self.__name__ = pyfunc.__name__
+ self.pyfunc = pyfunc
+ if self._doc is None:
+ self.__doc__ = pyfunc.__doc__
+ else:
+ self.__doc__ = self._doc
+
+ def _call_as_normal(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
@@ -2328,6 +2364,13 @@ class vectorize:
return self._vectorize_call(func=func, args=vargs)
+ def __call__(self, *args, **kwargs):
+ if self.pyfunc is np._NoValue:
+ self._init_stage_2(*args, **kwargs)
+ return self
+
+ return self._call_as_normal(*args, **kwargs)
+
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
@@ -2693,7 +2736,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
- RuntimeWarning, stacklevel=3)
+ RuntimeWarning, stacklevel=2)
fact = 0.0
X -= avg[:, None]
@@ -2842,7 +2885,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *,
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
- DeprecationWarning, stacklevel=3)
+ DeprecationWarning, stacklevel=2)
c = cov(x, y, rowvar, dtype=dtype)
try:
d = diag(c)
@@ -2956,10 +2999,15 @@ def blackman(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
@@ -3064,10 +3112,15 @@ def bartlett(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
@@ -3168,10 +3221,15 @@ def hanning(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.5 + 0.5*cos(pi*n/(M-1))
@@ -3268,10 +3326,15 @@ def hamming(M):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
if M < 1:
- return array([], dtype=np.result_type(M, 0.0))
+ return array([], dtype=values.dtype)
if M == 1:
- return ones(1, dtype=np.result_type(M, 0.0))
+ return ones(1, dtype=values.dtype)
n = arange(1-M, M, 2)
return 0.54 + 0.46*cos(pi*n/(M-1))
@@ -3547,11 +3610,19 @@ def kaiser(M, beta):
>>> plt.show()
"""
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range. (Simplified result_type with 0.0
+ # strongly typed. result-type is not/less order sensitive, but that mainly
+ # matters for integers anyway.)
+ values = np.array([0.0, M, beta])
+ M = values[1]
+ beta = values[2]
+
if M == 1:
- return np.ones(1, dtype=np.result_type(M, 0.0))
+ return np.ones(1, dtype=values.dtype)
n = arange(0, M)
alpha = (M-1)/2.0
- return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
+ return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta)
def _sinc_dispatcher(x):
@@ -3682,14 +3753,14 @@ def msort(a):
warnings.warn(
"msort is deprecated, use np.sort(a, axis=0) instead",
DeprecationWarning,
- stacklevel=3,
+ stacklevel=2,
)
b = array(a, subok=True, copy=True)
b.sort(0)
return b
-def _ureduce(a, func, **kwargs):
+def _ureduce(a, func, keepdims=False, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
@@ -3717,13 +3788,20 @@ def _ureduce(a, func, **kwargs):
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
+ out = kwargs.get('out', None)
+
+ if keepdims is np._NoValue:
+ keepdims = False
+
+ nd = a.ndim
if axis is not None:
- keepdim = list(a.shape)
- nd = a.ndim
axis = _nx.normalize_axis_tuple(axis, nd)
- for ax in axis:
- keepdim[ax] = 1
+ if keepdims:
+ if out is not None:
+ index_out = tuple(
+ 0 if i in axis else slice(None) for i in range(nd))
+ kwargs['out'] = out[(Ellipsis, ) + index_out]
if len(axis) == 1:
kwargs['axis'] = axis[0]
@@ -3736,12 +3814,27 @@ def _ureduce(a, func, **kwargs):
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
- keepdim = tuple(keepdim)
else:
- keepdim = (1,) * a.ndim
+ if keepdims:
+ if out is not None:
+ index_out = (0, ) * nd
+ kwargs['out'] = out[(Ellipsis, ) + index_out]
r = func(a, **kwargs)
- return r, keepdim
+
+ if out is not None:
+ return out
+
+ if keepdims:
+ if axis is None:
+ index_r = (np.newaxis, ) * nd
+ else:
+ index_r = tuple(
+ np.newaxis if i in axis else slice(None)
+ for i in range(nd))
+ r = r[(Ellipsis, ) + index_r]
+
+ return r
def _median_dispatcher(
@@ -3831,12 +3924,8 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
>>> assert not np.all(a==b)
"""
- r, k = _ureduce(a, func=_median, axis=axis, out=out,
+ return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out,
overwrite_input=overwrite_input)
- if keepdims:
- return r.reshape(k)
- else:
- return r
def _median(a, axis=None, out=None, overwrite_input=False):
@@ -3916,11 +4005,11 @@ def percentile(a,
Parameters
----------
- a : array_like
+ a : array_like of real numbers
Input array or object that can be converted to an array.
q : array_like of float
- Percentile or sequence of percentiles to compute, which must be between
- 0 and 100 inclusive.
+ Percentage or sequence of percentages for the percentiles to compute.
+ Values must be between 0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
@@ -4017,7 +4106,7 @@ def percentile(a,
since Python uses 0-based indexing, the code subtracts another 1 from the
index internally.
- The following formula determines the virtual index ``i + g``, the location
+ The following formula determines the virtual index ``i + g``, the location
of the percentile in the sorted sample:
.. math::
@@ -4167,7 +4256,8 @@ def percentile(a,
xlabel='Percentile',
ylabel='Estimated percentile value',
yticks=a)
- ax.legend()
+ ax.legend(bbox_to_anchor=(1.03, 1))
+ plt.tight_layout()
plt.show()
References
@@ -4180,6 +4270,11 @@ def percentile(a,
if interpolation is not None:
method = _check_interpolation_as_method(
method, interpolation, "percentile")
+
+ a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
q = np.true_divide(q, 100)
q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105)
if not _quantile_is_valid(q):
@@ -4210,11 +4305,11 @@ def quantile(a,
Parameters
----------
- a : array_like
+ a : array_like of real numbers
Input array or object that can be converted to an array.
q : array_like of float
- Quantile or sequence of quantiles to compute, which must be between
- 0 and 1 inclusive.
+ Probability or sequence of probabilities for the quantiles to compute.
+ Values must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The default is
to compute the quantile(s) along a flattened version of the array.
@@ -4268,8 +4363,8 @@ def quantile(a,
Returns
-------
quantile : scalar or ndarray
- If `q` is a single quantile and `axis=None`, then the result
- is a scalar. If multiple quantiles are given, first axis of
+ If `q` is a single probability and `axis=None`, then the result
+ is a scalar. If multiple probabilies levels are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
@@ -4306,7 +4401,7 @@ def quantile(a,
since Python uses 0-based indexing, the code subtracts another 1 from the
index internally.
- The following formula determines the virtual index ``i + g``, the location
+ The following formula determines the virtual index ``i + g``, the location
of the quantile in the sorted sample:
.. math::
@@ -4437,6 +4532,10 @@ def quantile(a,
method = _check_interpolation_as_method(
method, interpolation, "quantile")
+ a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
q = np.asanyarray(q)
if not _quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
@@ -4452,17 +4551,14 @@ def _quantile_unchecked(a,
method="linear",
keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
- r, k = _ureduce(a,
+ return _ureduce(a,
func=_quantile_ureduce_func,
q=q,
+ keepdims=keepdims,
axis=axis,
out=out,
overwrite_input=overwrite_input,
method=method)
- if keepdims:
- return r.reshape(q.shape + k)
- else:
- return r
def _quantile_is_valid(q):
@@ -4812,26 +4908,42 @@ def trapz(y, x=None, dx=1.0, axis=-1):
Examples
--------
- >>> np.trapz([1,2,3])
+ Use the trapezoidal rule on evenly spaced points:
+
+ >>> np.trapz([1, 2, 3])
4.0
- >>> np.trapz([1,2,3], x=[4,6,8])
+
+ The spacing between sample points can be selected by either the
+ ``x`` or ``dx`` arguments:
+
+ >>> np.trapz([1, 2, 3], x=[4, 6, 8])
8.0
- >>> np.trapz([1,2,3], dx=2)
+ >>> np.trapz([1, 2, 3], dx=2)
8.0
- Using a decreasing `x` corresponds to integrating in reverse:
+ Using a decreasing ``x`` corresponds to integrating in reverse:
- >>> np.trapz([1,2,3], x=[8,6,4])
+ >>> np.trapz([1, 2, 3], x=[8, 6, 4])
-8.0
- More generally `x` is used to integrate along a parametric curve.
- This finds the area of a circle, noting we repeat the sample which closes
+ More generally ``x`` is used to integrate along a parametric curve. We can
+ estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
+
+ >>> x = np.linspace(0, 1, num=50)
+ >>> y = x**2
+ >>> np.trapz(y, x)
+ 0.33340274885464394
+
+ Or estimate the area of a circle, noting we repeat the sample which closes
the curve:
>>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
>>> np.trapz(np.cos(theta), x=np.sin(theta))
3.141571941375841
+ ``np.trapz`` can be applied along a specified axis to do multiple
+ computations in one call:
+
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
@@ -4869,6 +4981,24 @@ def trapz(y, x=None, dx=1.0, axis=-1):
return ret
+# __array_function__ has no __code__ or other attributes normal Python funcs we
+# wrap everything into a C callable. SciPy however, tries to "clone" `trapz`
+# into a new Python function which requires `__code__` and a few other
+# attributes. So we create a dummy clone and copy over its attributes allowing
+# SciPy <= 1.10 to work: https://github.com/scipy/scipy/issues/17811
+assert not hasattr(trapz, "__code__")
+
+def _fake_trapz(y, x=None, dx=1.0, axis=-1):
+ return trapz(y, x=x, dx=dx, axis=axis)
+
+
+trapz.__code__ = _fake_trapz.__code__
+trapz.__globals__ = _fake_trapz.__globals__
+trapz.__defaults__ = _fake_trapz.__defaults__
+trapz.__closure__ = _fake_trapz.__closure__
+trapz.__kwdefaults__ = _fake_trapz.__kwdefaults__
+
+
def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
return xi
@@ -4877,7 +5007,7 @@ def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
@array_function_dispatch(_meshgrid_dispatcher)
def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
"""
- Return coordinate matrices from coordinate vectors.
+ Return a list of coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
@@ -4918,7 +5048,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
Returns
-------
- X1, X2,..., XN : ndarray
+ X1, X2,..., XN : list of ndarrays
For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``,
returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy'
@@ -4953,6 +5083,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
mgrid : Construct a multi-dimensional "meshgrid" using indexing notation.
ogrid : Construct an open multi-dimensional "meshgrid" using indexing
notation.
+ how-to-index
Examples
--------
@@ -4966,16 +5097,25 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
>>> yv
array([[0., 0., 0.],
[1., 1., 1.]])
- >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays
+
+ The result of `meshgrid` is a coordinate grid:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none')
+ >>> plt.show()
+
+ You can create sparse output arrays to save memory and computation time.
+
+ >>> xv, yv = np.meshgrid(x, y, sparse=True)
>>> xv
array([[0. , 0.5, 1. ]])
>>> yv
array([[0.],
[1.]])
- `meshgrid` is very useful to evaluate functions on a grid. If the
- function depends on all coordinates, you can use the parameter
- ``sparse=True`` to save memory and computation time.
+ `meshgrid` is very useful to evaluate functions on a grid. If the
+ function depends on all coordinates, both dense and sparse outputs can be
+ used.
>>> x = np.linspace(-5, 5, 101)
>>> y = np.linspace(-5, 5, 101)
@@ -4992,7 +5132,6 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
>>> np.array_equal(zz, zs)
True
- >>> import matplotlib.pyplot as plt
>>> h = plt.contourf(x, y, zs)
>>> plt.axis('scaled')
>>> plt.colorbar()
@@ -5346,7 +5485,7 @@ def insert(arr, obj, values, axis=None):
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
- "integer", FutureWarning, stacklevel=3)
+ "integer", FutureWarning, stacklevel=2)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi
index c14a54c60..687e4ab17 100644
--- a/numpy/lib/function_base.pyi
+++ b/numpy/lib/function_base.pyi
@@ -441,12 +441,8 @@ def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
@overload
def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
-@overload
-def msort(a: _ArrayType) -> _ArrayType: ...
-@overload
-def msort(a: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
-@overload
-def msort(a: ArrayLike) -> NDArray[Any]: ...
+# NOTE: Deprecated
+# def msort(a: ArrayLike) -> NDArray[Any]: ...
@overload
def median(
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 704f69dce..35745e6dd 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -970,7 +970,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None):
sample = np.atleast_2d(sample).T
N, D = sample.shape
- nbin = np.empty(D, int)
+ nbin = np.empty(D, np.intp)
edges = D*[None]
dedges = D*[None]
if weights is not None:
@@ -981,7 +981,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None):
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
- ' sample x.')
+ 'sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 3daacaff6..1da73dee5 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -3,16 +3,15 @@ import sys
import math
import warnings
+import numpy as np
+from .._utils import set_module
import numpy.core.numeric as _nx
-from numpy.core.numeric import (
- asarray, ScalarType, array, alltrue, cumprod, arange, ndim
-)
+from numpy.core.numeric import ScalarType, array
from numpy.core.numerictypes import issubdtype
import numpy.matrixlib as matrixlib
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
-from numpy.core.overrides import set_module
from numpy.core import overrides, linspace
from numpy.lib.stride_tricks import as_strided
@@ -94,7 +93,7 @@ def ix_(*args):
nd = len(args)
for k, new in enumerate(args):
if not isinstance(new, _nx.ndarray):
- new = asarray(new)
+ new = np.asarray(new)
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
@@ -232,7 +231,9 @@ class MGridClass(nd_grid):
--------
lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
+ meshgrid: return coordinate matrices from coordinate vectors
r_ : array concatenator
+ :ref:`how-to-partition`
Examples
--------
@@ -283,7 +284,9 @@ class OGridClass(nd_grid):
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
+ meshgrid: return coordinate matrices from coordinate vectors
r_ : array concatenator
+ :ref:`how-to-partition`
Examples
--------
@@ -389,7 +392,7 @@ class AxisConcatenator:
scalar = True
newobj = item
else:
- item_ndim = ndim(item)
+ item_ndim = np.ndim(item)
newobj = array(item, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and item_ndim < ndmin:
k2 = ndmin - item_ndim
@@ -594,7 +597,7 @@ class ndenumerate:
"""
def __init__(self, arr):
- self.iter = asarray(arr).flat
+ self.iter = np.asarray(arr).flat
def __next__(self):
"""
@@ -907,9 +910,9 @@ def fill_diagonal(a, val, wrap=False):
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(a.shape) == 0):
+ if not np.all(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
- step = 1 + (cumprod(a.shape[:-1])).sum()
+ step = 1 + (np.cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
@@ -980,7 +983,7 @@ def diag_indices(n, ndim=2):
[0, 1]]])
"""
- idx = arange(n)
+ idx = np.arange(n)
return (idx,) * ndim
@@ -1007,13 +1010,39 @@ def diag_indices_from(arr):
-----
.. versionadded:: 1.4.0
+ Examples
+ --------
+
+ Create a 4 by 4 array.
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Get the indices of the diagonal elements.
+
+ >>> di = np.diag_indices_from(a)
+ >>> di
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
+ >>> a[di]
+ array([ 0, 5, 10, 15])
+
+ This is simply syntactic sugar for diag_indices.
+
+ >>> np.diag_indices(a.shape[0])
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(arr.shape) == 0):
+ if not np.all(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi
index c9251abd1..29a6b9e2b 100644
--- a/numpy/lib/index_tricks.pyi
+++ b/numpy/lib/index_tricks.pyi
@@ -119,7 +119,7 @@ class AxisConcatenator:
@staticmethod
def makemat(
data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
- ) -> _Matrix: ...
+ ) -> _Matrix[Any, Any]: ...
# TODO: Sort out this `__getitem__` method
def __getitem__(self, key: Any) -> Any: ...
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index c81239f6b..117cc7851 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -133,6 +133,7 @@ class NDArrayOperatorsMixin:
.. versionadded:: 1.13
"""
+ __slots__ = ()
# Like np.ndarray, this mixin class implements "Option 1" from the ufunc
# overrides NEP.
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 3814c0727..b3b570860 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -169,7 +169,7 @@ def _remove_nan_1d(arr1d, overwrite_input=False):
s = np.nonzero(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=5)
+ stacklevel=6)
return arr1d[:0], True
elif s.size == 0:
return arr1d, overwrite_input
@@ -343,7 +343,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
@@ -357,7 +357,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
return res
@@ -476,7 +476,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
@@ -490,7 +490,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
return res
@@ -1049,7 +1049,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
isbad = (cnt == 0)
if isbad.any():
- warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3)
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
@@ -1109,7 +1109,7 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
warnings.warn("All-NaN slice encountered", RuntimeWarning,
- stacklevel=4)
+ stacklevel=5)
fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan
if out is not None:
@@ -1214,12 +1214,9 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
- r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out,
+ return function_base._ureduce(a, func=_nanmedian, keepdims=keepdims,
+ axis=axis, out=out,
overwrite_input=overwrite_input)
- if keepdims and keepdims is not np._NoValue:
- return r.reshape(k)
- else:
- return r
def _nanpercentile_dispatcher(
@@ -1376,6 +1373,9 @@ def nanpercentile(
method, interpolation, "nanpercentile")
a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
q = np.true_divide(q, 100.0)
# undo any decay that the ufunc performed (see gh-13105)
q = np.asanyarray(q)
@@ -1415,8 +1415,8 @@ def nanquantile(
Input array or object that can be converted to an array, containing
nan values to be ignored
q : array_like of float
- Quantile or sequence of quantiles to compute, which must be between
- 0 and 1 inclusive.
+ Probability or sequence of probabilities for the quantiles to compute.
+ Values must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The
default is to compute the quantile(s) along a flattened
@@ -1476,8 +1476,8 @@ def nanquantile(
Returns
-------
quantile : scalar or ndarray
- If `q` is a single percentile and `axis=None`, then the result
- is a scalar. If multiple quantiles are given, first axis of
+ If `q` is a single probability and `axis=None`, then the result
+ is a scalar. If multiple probability levels are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
@@ -1530,11 +1530,15 @@ def nanquantile(
The American Statistician, 50(4), pp. 361-365, 1996
"""
+
if interpolation is not None:
method = function_base._check_interpolation_as_method(
method, interpolation, "nanquantile")
a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
q = np.asanyarray(q)
if not function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
@@ -1556,17 +1560,14 @@ def _nanquantile_unchecked(
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
- r, k = function_base._ureduce(a,
+ return function_base._ureduce(a,
func=_nanquantile_ureduce_func,
q=q,
+ keepdims=keepdims,
axis=axis,
out=out,
overwrite_input=overwrite_input,
method=method)
- if keepdims and keepdims is not np._NoValue:
- return r.reshape(q.shape + k)
- else:
- return r
def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
@@ -1762,7 +1763,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
- stacklevel=3)
+ stacklevel=2)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 4a27c7898..339b1dc62 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -142,7 +142,7 @@ class NpzFile(Mapping):
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -167,6 +167,8 @@ class NpzFile(Mapping):
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.npyio.NpzFile)
True
+ >>> npz
+ NpzFile 'object' with keys x, y
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
@@ -178,6 +180,7 @@ class NpzFile(Mapping):
# Make __exit__ safe if zipfile_factory raises an exception
zip = None
fid = None
+ _MAX_REPR_ARRAY_COUNT = 5
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None, *,
@@ -257,7 +260,23 @@ class NpzFile(Mapping):
else:
return self.zip.read(key)
else:
- raise KeyError("%s is not a file in the archive" % key)
+ raise KeyError(f"{key} is not a file in the archive")
+
+ def __contains__(self, key):
+ return (key in self._files or key in self.files)
+
+ def __repr__(self):
+ # Get filename or default to `object`
+ if isinstance(self.fid, str):
+ filename = self.fid
+ else:
+ filename = getattr(self.fid, "name", "object")
+
+ # Get the name of arrays
+ array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
+ if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
+ array_names += "..."
+ return f"NpzFile {filename!r} with keys: {array_names}"
@set_module('numpy')
@@ -309,7 +328,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
- See :py:meth:`ast.literal_eval()` for details.
+ See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
@@ -327,6 +346,9 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
ValueError
The file contains an object array, but ``allow_pickle=False`` given.
+ EOFError
+ When calling ``np.load`` multiple times on the same file handle,
+ if all data has already been read
See Also
--------
@@ -410,6 +432,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
+ if not magic:
+ raise EOFError("No data left in file")
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
@@ -760,13 +784,6 @@ def _ensure_ndmin_ndarray(a, *, ndmin: int):
_loadtxt_chunksize = 50000
-def _loadtxt_dispatcher(
- fname, dtype=None, comments=None, delimiter=None,
- converters=None, skiprows=None, usecols=None, unpack=None,
- ndmin=None, encoding=None, max_rows=None, *, like=None):
- return (like,)
-
-
def _check_nonneg_int(value, name="argument"):
try:
operator.index(value)
@@ -1161,10 +1178,10 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
while such lines are counted in `skiprows`.
.. versionadded:: 1.16.0
-
+
.. versionchanged:: 1.23.0
- Lines containing no data, including comment lines (e.g., lines
- starting with '#' or as specified via `comments`) are not counted
+ Lines containing no data, including comment lines (e.g., lines
+ starting with '#' or as specified via `comments`) are not counted
towards `max_rows`.
quotechar : unicode character or None, optional
The character used to denote the start and end of a quoted item.
@@ -1303,6 +1320,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
array([('alpha, #42', 10.), ('beta, #64', 2.)],
dtype=[('label', '<U12'), ('value', '<f8')])
+ Quoted fields can be separated by multiple whitespace characters:
+
+ >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+ >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
+ dtype=[('label', '<U12'), ('value', '<f8')])
+
Two consecutive quote characters within a quoted field are treated as a
single escaped character:
@@ -1323,10 +1348,10 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if like is not None:
return _loadtxt_with_like(
- fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
converters=converters, skiprows=skiprows, usecols=usecols,
unpack=unpack, ndmin=ndmin, encoding=encoding,
- max_rows=max_rows, like=like
+ max_rows=max_rows
)
if isinstance(delimiter, bytes):
@@ -1353,9 +1378,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
return arr
-_loadtxt_with_like = array_function_dispatch(
- _loadtxt_dispatcher
-)(loadtxt)
+_loadtxt_with_like = array_function_dispatch()(loadtxt)
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
@@ -1716,17 +1739,6 @@ def fromregex(file, regexp, dtype, encoding=None):
#####--------------------------------------------------------------------------
-def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
- skip_header=None, skip_footer=None, converters=None,
- missing_values=None, filling_values=None, usecols=None,
- names=None, excludelist=None, deletechars=None,
- replace_space=None, autostrip=None, case_sensitive=None,
- defaultfmt=None, unpack=None, usemask=None, loose=None,
- invalid_raise=None, max_rows=None, encoding=None,
- *, ndmin=None, like=None):
- return (like,)
-
-
@set_array_function_like_doc
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
@@ -1924,7 +1936,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if like is not None:
return _genfromtxt_with_like(
- fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
skip_header=skip_header, skip_footer=skip_footer,
converters=converters, missing_values=missing_values,
filling_values=filling_values, usecols=usecols, names=names,
@@ -1934,7 +1946,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
unpack=unpack, usemask=usemask, loose=loose,
invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
ndmin=ndmin,
- like=like
)
_ensure_ndmin_ndarray_check_param(ndmin)
@@ -2327,7 +2338,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
- if v == np.unicode_]
+ if v == np.str_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
@@ -2463,9 +2474,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
return output
-_genfromtxt_with_like = array_function_dispatch(
- _genfromtxt_dispatcher
-)(genfromtxt)
+_genfromtxt_with_like = array_function_dispatch()(genfromtxt)
def recfromtxt(fname, **kwargs):
diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi
index 8007b2dc7..cc81e82b7 100644
--- a/numpy/lib/npyio.pyi
+++ b/numpy/lib/npyio.pyi
@@ -72,6 +72,7 @@ class NpzFile(Mapping[str, NDArray[Any]]):
files: list[str]
allow_pickle: bool
pickle_kwargs: None | Mapping[str, Any]
+ _MAX_REPR_ARRAY_COUNT: int
# Represent `f` as a mutable property so we can access the type of `self`
@property
def f(self: _T) -> BagObj[_T]: ...
@@ -97,6 +98,8 @@ class NpzFile(Mapping[str, NDArray[Any]]):
def __iter__(self) -> Iterator[str]: ...
def __len__(self) -> int: ...
def __getitem__(self, key: str) -> NDArray[Any]: ...
+ def __contains__(self, key: str) -> bool: ...
+ def __repr__(self) -> str: ...
# NOTE: Returns a `NpzFile` if file is a zip file;
# returns an `ndarray`/`memmap` otherwise
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 6aa708861..3b8db2a95 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -9,12 +9,13 @@ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
import functools
import re
import warnings
+
+from .._utils import set_module
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.core import overrides
-from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
@@ -103,7 +104,7 @@ def poly(seq_of_zeros):
References
----------
- .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
@@ -671,7 +672,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
- warnings.warn(msg, RankWarning, stacklevel=4)
+ warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index ab91423d9..5d8a41bfe 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -1,9 +1,7 @@
import functools
import numpy.core.numeric as _nx
-from numpy.core.numeric import (
- asarray, zeros, outer, concatenate, array, asanyarray
- )
+from numpy.core.numeric import asarray, zeros, array, asanyarray
from numpy.core.fromnumeric import reshape, transpose
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
@@ -124,19 +122,21 @@ def take_along_axis(arr, indices, axis):
>>> np.sort(a, axis=1)
array([[10, 20, 30],
[40, 50, 60]])
- >>> ai = np.argsort(a, axis=1); ai
+ >>> ai = np.argsort(a, axis=1)
+ >>> ai
array([[0, 2, 1],
[1, 2, 0]])
>>> np.take_along_axis(a, ai, axis=1)
array([[10, 20, 30],
[40, 50, 60]])
- The same works for max and min, if you expand the dimensions:
+ The same works for max and min, if you maintain the trivial dimension
+ with ``keepdims``:
- >>> np.expand_dims(np.max(a, axis=1), axis=1)
+ >>> np.max(a, axis=1, keepdims=True)
array([[30],
[60]])
- >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
>>> ai
array([[1],
[0]])
@@ -147,8 +147,8 @@ def take_along_axis(arr, indices, axis):
If we want to get the max and min at the same time, we can stack the
indices first
- >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)
- >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai_min = np.argmin(a, axis=1, keepdims=True)
+ >>> ai_max = np.argmax(a, axis=1, keepdims=True)
>>> ai = np.concatenate([ai_min, ai_max], axis=1)
>>> ai
array([[0, 1],
@@ -237,7 +237,7 @@ def put_along_axis(arr, indices, values, axis):
We can replace the maximum values with:
- >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
>>> ai
array([[1],
[0]])
@@ -372,7 +372,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
# invoke the function on the first item
try:
ind0 = next(inds)
- except StopIteration as e:
+ except StopIteration:
raise ValueError(
'Cannot apply_along_axis when any iteration dimensions are 0'
) from None
@@ -643,10 +643,6 @@ def column_stack(tup):
[3, 4]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # raise warning if necessary
- _arrays_for_stack_dispatcher(tup, stacklevel=2)
-
arrays = []
for v in tup:
arr = asanyarray(v)
@@ -713,10 +709,6 @@ def dstack(tup):
[[3, 4]]])
"""
- if not overrides.ARRAY_FUNCTION_ENABLED:
- # raise warning if necessary
- _arrays_for_stack_dispatcher(tup, stacklevel=2)
-
arrs = atleast_3d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
@@ -1041,6 +1033,7 @@ def dsplit(ary, indices_or_sections):
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
+
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
@@ -1053,6 +1046,7 @@ def get_array_prepare(*args):
return wrappers[-1][-1]
return None
+
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index a59681573..0bebe3693 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -1139,6 +1139,23 @@ class TestWrap:
a = np.arange(5)
b = np.pad(a, (0, 12), mode="wrap")
assert_array_equal(np.r_[a, a, a, a][:-3], b)
+
+ def test_repeated_wrapping_multiple_origin(self):
+ """
+ Assert that 'wrap' pads only with multiples of the original area if
+ the pad width is larger than the original array.
+ """
+ a = np.arange(4).reshape(2, 2)
+ a = np.pad(a, [(1, 3), (3, 1)], mode='wrap')
+ b = np.array(
+ [[3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0],
+ [3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0],
+ [3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0]]
+ )
+ assert_array_equal(a, b)
class TestEdge:
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index bb07e25a9..a180accbe 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -414,13 +414,48 @@ class TestSetOps:
with pytest.raises(ValueError):
in1d(a, b, kind="table")
+ @pytest.mark.parametrize(
+ "dtype1,dtype2",
+ [
+ (np.int8, np.int16),
+ (np.int16, np.int8),
+ (np.uint8, np.uint16),
+ (np.uint16, np.uint8),
+ (np.uint8, np.int16),
+ (np.int16, np.uint8),
+ ]
+ )
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d_mixed_dtype(self, dtype1, dtype2, kind):
+ """Test that in1d works as expected for mixed dtype input."""
+ is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)
+ ar1 = np.array([0, 0, 1, 1], dtype=dtype1)
+
+ if is_dtype2_signed:
+ ar2 = np.array([-128, 0, 127], dtype=dtype2)
+ else:
+ ar2 = np.array([127, 0, 255], dtype=dtype2)
+
+ expected = np.array([True, True, False, False])
+
+ expect_failure = kind == "table" and any((
+ dtype1 == np.int8 and dtype2 == np.int16,
+ dtype1 == np.int16 and dtype2 == np.int8
+ ))
+
+ if expect_failure:
+ with pytest.raises(RuntimeError, match="exceed the maximum"):
+ in1d(ar1, ar2, kind=kind)
+ else:
+ assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
def test_in1d_mixed_boolean(self, kind):
"""Test that in1d works as expected for bool/int input."""
for dtype in np.typecodes["AllInteger"]:
a = np.array([True, False, False], dtype=bool)
- b = np.array([1, 1, 1, 1], dtype=dtype)
- expected = np.array([True, False, False], dtype=bool)
+ b = np.array([0, 0, 0, 0], dtype=dtype)
+ expected = np.array([False, True, True], dtype=bool)
assert_array_equal(in1d(a, b, kind=kind), expected)
a, b = b, a
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 08878a1f9..58d08f1e5 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -283,7 +283,7 @@ from io import BytesIO
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_raises, assert_raises_regex,
- assert_warns, IS_PYPY,
+ assert_warns, IS_PYPY, IS_WASM
)
from numpy.testing._private.utils import requires_memory
from numpy.lib import format
@@ -459,6 +459,7 @@ def test_long_str():
assert_array_equal(long_str_arr, long_str_arr2)
+@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
@pytest.mark.slow
def test_memmap_roundtrip(tmpdir):
for i, arr in enumerate(basic_arrays + record_arrays):
@@ -526,10 +527,12 @@ def test_load_padded_dtype(tmpdir, dt):
assert_array_equal(arr, arr1)
+@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup")
def test_python2_python3_interoperability():
fname = 'win64python2.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
- data = np.load(path)
+ with pytest.warns(UserWarning, match="Reading.*this warning\\."):
+ data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_pickle_python2_python3():
@@ -675,6 +678,7 @@ def test_version_2_0():
assert_raises(ValueError, format.write_array, f, d, (1, 0))
+@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
def test_version_2_0_memmap(tmpdir):
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
@@ -920,6 +924,7 @@ def test_large_file_support(tmpdir):
assert_array_equal(r, d)
+@pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy")
@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
reason="test requires 64-bit system")
@pytest.mark.slow
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 88d4987e6..b0944ec85 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -8,14 +8,14 @@ import pytest
import hypothesis
from hypothesis.extra.numpy import arrays
import hypothesis.strategies as st
-
+from functools import partial
import numpy as np
from numpy import ma
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY,
- assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT,
+ assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, IS_WASM
)
import numpy.lib.function_base as nfb
from numpy.random import rand
@@ -25,6 +25,7 @@ from numpy.lib import (
i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90,
select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize
)
+from numpy.core.numeric import normalize_axis_tuple
def get_mat(n):
@@ -228,8 +229,8 @@ class TestAny:
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert_(np.any(y1))
- assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
- assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
+ assert_array_equal(np.any(y1, axis=0), [1, 1, 0])
+ assert_array_equal(np.any(y1, axis=1), [0, 1, 1])
class TestAll:
@@ -246,8 +247,8 @@ class TestAll:
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert_(not np.all(y1))
- assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
- assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=0), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=1), [0, 0, 1])
class TestCopy:
@@ -1216,6 +1217,13 @@ class TestGradient:
dfdx = gradient(f, x)
assert_array_equal(dfdx, [0.5, 0.5])
+ def test_return_type(self):
+ res = np.gradient(([1, 2], [2, 3]))
+ if np._using_numpy2_behavior():
+ assert type(res) is tuple
+ else:
+ assert type(res) is list
+
class TestAngle:
@@ -1779,6 +1787,70 @@ class TestVectorize:
assert_equal(type(r), subclass)
assert_equal(r, m * v)
+ def test_name(self):
+ #See gh-23021
+ @np.vectorize
+ def f2(a, b):
+ return a + b
+
+ assert f2.__name__ == 'f2'
+
+ def test_decorator(self):
+ @vectorize
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_docstring(self):
+ @vectorize
+ def f(x):
+ """Docstring"""
+ return x
+
+ if sys.flags.optimize < 2:
+ assert f.__doc__ == "Docstring"
+
+ def test_partial(self):
+ def foo(x, y):
+ return x + y
+
+ bar = partial(foo, 3)
+ vbar = np.vectorize(bar)
+ assert vbar(1) == 4
+
+ def test_signature_otypes_decorator(self):
+ @vectorize(signature='(n)->(n)', otypes=['float64'])
+ def f(x):
+ return x
+
+ r = f([1, 2, 3])
+ assert_equal(r.dtype, np.dtype('float64'))
+ assert_array_equal(r, [1, 2, 3])
+ assert f.__name__ == 'f'
+
+ def test_bad_input(self):
+ with assert_raises(TypeError):
+ A = np.vectorize(pyfunc = 3)
+
+ def test_no_keywords(self):
+ with assert_raises(TypeError):
+ @np.vectorize("string")
+ def foo():
+ return "bar"
+
+ def test_positional_regression_9477(self):
+ # This supplies the first keyword argument as a positional,
+ # to ensure that they are still properly forwarded after the
+ # enhancement for #9477
+ f = vectorize((lambda x: x), ['float64'])
+ r = f([2])
+ assert_equal(r.dtype, np.dtype('float64'))
+
class TestLeaks:
class A:
@@ -2972,6 +3044,14 @@ class TestPercentile:
o = np.ones((1,))
np.percentile(d, 5, None, o, False, 'linear')
+ def test_complex(self):
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+
def test_2D(self):
x = np.array([[1, 1, 1],
[1, 1, 1],
@@ -2980,7 +3060,7 @@ class TestPercentile:
[1, 1, 1]])
assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
- @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
def test_linear_nan_1D(self, dtype):
# METHOD 1 of H&F
arr = np.asarray([15.0, np.NAN, 35.0, 40.0, 50.0], dtype=dtype)
@@ -2997,9 +3077,6 @@ class TestPercentile:
(np.float32, np.float32),
(np.float64, np.float64),
(np.longdouble, np.longdouble),
- (np.complex64, np.complex64),
- (np.complex128, np.complex128),
- (np.clongdouble, np.clongdouble),
(np.dtype("O"), np.float64)]
@pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES)
@@ -3039,7 +3116,7 @@ class TestPercentile:
np.testing.assert_equal(np.asarray(actual).dtype,
np.dtype(expected_dtype))
- TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O"
+ TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O"
@pytest.mark.parametrize("dtype", TYPE_CODES)
def test_lower_higher(self, dtype):
@@ -3331,6 +3408,32 @@ class TestPercentile:
assert_equal(np.percentile(d, [1, 7], axis=(0, 3),
keepdims=True).shape, (2, 1, 5, 7, 1))
+ @pytest.mark.parametrize('q', [7, [1, 7]])
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1,),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ def test_keepdims_out(self, q, axis):
+ d = np.ones((3, 5, 7, 11))
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ shape_out = np.shape(q) + shape_out
+
+ out = np.empty(shape_out)
+ result = np.percentile(d, q, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
@@ -3435,9 +3538,20 @@ class TestPercentile:
np.percentile([1, 2, 3, 4.0], q)
+quantile_methods = [
+ 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
+ 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
+ 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher',
+ 'midpoint']
+
+
class TestQuantile:
# most of this is already tested by TestPercentile
+ def V(self, x, y, alpha):
+ # Identification function used in several tests.
+ return (x >= y) - alpha
+
def test_max_ulp(self):
x = [0.0, 0.2, 0.4]
a = np.quantile(x, 0.45)
@@ -3452,7 +3566,6 @@ class TestQuantile:
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
- @pytest.mark.xfail(reason="See gh-19154")
def test_correct_quantile_value(self):
a = np.array([True])
tf_quant = np.quantile(True, False)
@@ -3490,6 +3603,15 @@ class TestQuantile:
x = np.arange(8)
assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2))
+ def test_complex(self):
+ #See gh-22652
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
@@ -3508,11 +3630,7 @@ class TestQuantile:
method="nearest")
assert res.dtype == dtype
- @pytest.mark.parametrize("method",
- ['inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
- 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
- 'median_unbiased', 'normal_unbiased',
- 'nearest', 'lower', 'higher', 'midpoint'])
+ @pytest.mark.parametrize("method", quantile_methods)
def test_quantile_monotonic(self, method):
# GH 14685
# test that the return value of quantile is monotonic if p0 is ordered
@@ -3543,6 +3661,94 @@ class TestQuantile:
assert np.isscalar(actual)
assert_equal(np.quantile(a, 0.5), np.nan)
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_identification_equation(self, method, alpha):
+ # Test that the identification equation holds for the empirical
+ # CDF:
+ # E[V(x, Y)] = 0 <=> x is quantile
+ # with Y the random variable for which we have observed values and
+ # V(x, y) the canonical identification function for the quantile (at
+ # level alpha), see
+ # https://doi.org/10.48550/arXiv.0912.0902
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we cover 3 cases:
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ x = np.quantile(y, alpha, method=method)
+ if method in ("higher",):
+ # These methods do not fulfill the identification equation.
+ assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n
+ elif int(n * alpha) == n * alpha:
+ # We can expect exact results, up to machine precision.
+ assert_allclose(np.mean(self.V(x, y, alpha)), 0, atol=1e-14)
+ else:
+ # V = (x >= y) - alpha cannot sum to zero exactly but within
+ # "sample precision".
+ assert_allclose(np.mean(self.V(x, y, alpha)), 0,
+ atol=1 / n / np.amin([alpha, 1 - alpha]))
+
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_add_and_multiply_constant(self, method, alpha):
+ # Test that
+ # 1. quantile(c + x) = c + quantile(x)
+ # 2. quantile(c * x) = c * quantile(x)
+ # 3. quantile(-x) = -quantile(x, 1 - alpha)
+ # On empirical quantiles, this equation does not hold exactly.
+ # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these
+ # properties equivariance.
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we have cases for
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ q = np.quantile(y, alpha, method=method)
+ c = 13.5
+
+ # 1
+ assert_allclose(np.quantile(c + y, alpha, method=method), c + q)
+ # 2
+ assert_allclose(np.quantile(c * y, alpha, method=method), c * q)
+ # 3
+ q = -np.quantile(-y, 1 - alpha, method=method)
+ if method == "inverted_cdf":
+ if (
+ n * alpha == int(n * alpha)
+ or np.round(n * alpha) == int(n * alpha) + 1
+ ):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "closest_observation":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif np.round(n * alpha) == int(n * alpha) + 1:
+ assert_allclose(
+ q, np.quantile(y, alpha + 1/n, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "interpolated_inverted_cdf":
+ assert_allclose(q, np.quantile(y, alpha + 1/n, method=method))
+ elif method == "nearest":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha + 1/n, method=method))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+ elif method == "lower":
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif method == "higher":
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ else:
+ # "averaged_inverted_cdf", "hazen", "weibull", "linear",
+ # "median_unbiased", "normal_unbiased", "midpoint"
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+
class TestLerp:
@hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False,
@@ -3754,6 +3960,7 @@ class TestMedian:
b[2] = np.nan
assert_equal(np.median(a, (0, 2)), b)
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly")
def test_empty(self):
# mean(empty array) emits two warnings: empty slice and divide by 0
a = np.array([], dtype=float)
@@ -3842,6 +4049,29 @@ class TestMedian:
assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1, ),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ def test_keepdims_out(self, axis):
+ d = np.ones((3, 5, 7, 11))
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ out = np.empty(shape_out)
+ result = np.median(d, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
class TestAdd_newdoc_ufunc:
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index 87643169b..87e6e1d41 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -6,6 +6,7 @@ from numpy.testing import (
assert_array_almost_equal, assert_raises, assert_allclose,
assert_array_max_ulp, assert_raises_regex, suppress_warnings,
)
+from numpy.testing._private.utils import requires_memory
import pytest
@@ -397,6 +398,16 @@ class TestHistogram:
edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
assert_array_equal(edges, e)
+ @requires_memory(free_bytes=1e10)
+ @pytest.mark.slow
+ def test_big_arrays(self):
+ sample = np.zeros([100000000, 3])
+ xbins = 400
+ ybins = 400
+ zbins = np.arange(16000)
+ hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))
+ assert_equal(type(hist), type((1, 2)))
+
class TestHistogramOptimBinNums:
"""
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index a5462749f..c1032df8e 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -25,7 +25,7 @@ from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings,
- break_cycles
+ break_cycles, IS_WASM
)
from numpy.testing._private.utils import requires_memory
@@ -232,6 +232,17 @@ class TestSavezLoad(RoundtripTest):
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
+
+ def test_tuple_getitem_raises(self):
+ # gh-23748
+ a = np.array([1, 2, 3])
+ f = BytesIO()
+ np.savez(f, a=a)
+ f.seek(0)
+ l = np.load(f)
+ with pytest.raises(KeyError, match="(1, 2)"):
+ l[1, 2]
+
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
@@ -243,6 +254,7 @@ class TestSavezLoad(RoundtripTest):
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
+ @pytest.mark.skipif(IS_WASM, reason="Cannot start thread")
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
@@ -320,6 +332,21 @@ class TestSavezLoad(RoundtripTest):
data.close()
assert_(fp.closed)
+ @pytest.mark.parametrize("count, expected_repr", [
+ (1, "NpzFile {fname!r} with keys: arr_0"),
+ (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"),
+ # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are
+ # expected to end in '...'
+ (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."),
+ ])
+ def test_repr_lists_keys(self, count, expected_repr):
+ a = np.array([[1, 2], [3, 4]], float)
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, *[a]*count)
+ l = np.load(tmp)
+ assert repr(l) == expected_repr.format(fname=tmp)
+ l.close()
+
class TestSaveTxt:
def test_array(self):
@@ -521,7 +548,7 @@ class TestSaveTxt:
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
@@ -529,7 +556,7 @@ class TestSaveTxt:
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
if HAS_BZ2:
@@ -541,12 +568,12 @@ class TestSaveTxt:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
- encoding='UTF-16-LE', dtype=np.unicode_)
+ encoding='UTF-16-LE', dtype=np.str_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -554,7 +581,7 @@ class TestSaveTxt:
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -596,8 +623,8 @@ class TestSaveTxt:
# in our process if needed, see gh-16889
memoryerror_raised = Value(c_bool)
- # Since Python 3.8, the default start method for multiprocessing has
- # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
+ # Since Python 3.8, the default start method for multiprocessing has
+ # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
# on memory sharing model, lead to failed test for check_large_zip
ctx = get_context('fork')
p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,))
@@ -651,12 +678,12 @@ class LoadTxtBase:
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
- x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
- v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
+ v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
@@ -664,7 +691,7 @@ class LoadTxtBase:
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
- x = self.loadfunc(c, dtype=np.unicode_,
+ x = self.loadfunc(c, dtype=np.str_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
@@ -675,7 +702,7 @@ class LoadTxtBase:
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
- x = self.loadfunc(path, dtype=np.unicode_,
+ x = self.loadfunc(path, dtype=np.str_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
@@ -1160,7 +1187,7 @@ class TestLoadTxt(LoadTxtBase):
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
- x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.str_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
@@ -2218,7 +2245,7 @@ M 33 21.99
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
- dtype=np.unicode_)
+ dtype=np.str_)
assert_array_equal(test, ctl)
# test a mixed dtype
@@ -2261,7 +2288,7 @@ M 33 21.99
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
- dtype=np.unicode_)
+ dtype=np.str_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
@@ -2539,6 +2566,7 @@ class TestPathUsage:
break_cycles()
break_cycles()
+ @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly")
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
@@ -2735,3 +2763,13 @@ def test_load_refcount():
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
+
+def test_load_multiple_arrays_until_eof():
+ f = BytesIO()
+ np.save(f, 1)
+ np.save(f, 2)
+ f.seek(0)
+ assert np.load(f) == 1
+ assert np.load(f) == 2
+ with pytest.raises(EOFError):
+ np.load(f)
diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py
index 0b8fe3c47..2d805e434 100644
--- a/numpy/lib/tests/test_loadtxt.py
+++ b/numpy/lib/tests/test_loadtxt.py
@@ -244,6 +244,14 @@ def test_converters_negative_indices_with_usecols():
usecols=[0, -1], converters={-1: (lambda x: -1)})
assert_array_equal(res, [[0, -1], [0, -1]])
+
+def test_ragged_error():
+ rows = ["1,2,3", "1,2,3", "4,3,2,1"]
+ with pytest.raises(ValueError,
+ match="the number of columns changed from 3 to 4 at row 3"):
+ np.loadtxt(rows, delimiter=",")
+
+
def test_ragged_usecols():
# usecols, and negative ones, work even with varying number of columns.
txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
@@ -534,12 +542,27 @@ def test_quoted_field(q):
assert_array_equal(res, expected)
+@pytest.mark.parametrize("q", ('"', "'", "`"))
+def test_quoted_field_with_whitepace_delimiter(q):
+ txt = StringIO(
+ f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n"
+ )
+ dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
+ expected = np.array(
+ [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
+ )
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q)
+ assert_array_equal(res, expected)
+
+
def test_quote_support_default():
"""Support for quoted fields is disabled by default."""
txt = StringIO('"lat,long", 45, 30\n')
dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)])
- with pytest.raises(ValueError, match="the number of columns changed"):
+ with pytest.raises(ValueError,
+ match="the dtype passed requires 3 columns but 4 were"):
np.loadtxt(txt, dtype=dtype, delimiter=",")
# Enable quoting support with non-None value for quotechar param
@@ -1011,3 +1034,15 @@ def test_control_characters_as_bytes():
"""Byte control characters (comments, delimiter) are supported."""
a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",")
assert_equal(a, [1, 2, 3])
+
+
+@pytest.mark.filterwarnings('ignore::UserWarning')
+def test_field_growing_cases():
+ # Test empty field appending/growing (each field still takes 1 character)
+ # to see if the final field appending does not create issues.
+ res = np.loadtxt([""], delimiter=",", dtype=bytes)
+ assert len(res) == 0
+
+ for i in range(1, 1024):
+ res = np.loadtxt(["," * i], delimiter=",", dtype=bytes)
+ assert len(res) == i+1
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 733a077ea..257de381b 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -3,6 +3,7 @@ import pytest
import inspect
import numpy as np
+from numpy.core.numeric import normalize_axis_tuple
from numpy.lib.nanfunctions import _nan_mask, _replace_nan
from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_raises,
@@ -403,14 +404,20 @@ class TestNanFunctions_NumberTypes:
)
def test_nanfunc_q(self, mat, dtype, nanfunc, func):
mat = mat.astype(dtype)
- tgt = func(mat, q=1)
- out = nanfunc(mat, q=1)
+ if mat.dtype.kind == "c":
+ assert_raises(TypeError, func, mat, q=1)
+ assert_raises(TypeError, nanfunc, mat, q=1)
- assert_almost_equal(out, tgt)
- if dtype == "O":
- assert type(out) is type(tgt)
else:
- assert out.dtype == tgt.dtype
+ tgt = func(mat, q=1)
+ out = nanfunc(mat, q=1)
+
+ assert_almost_equal(out, tgt)
+
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
@pytest.mark.parametrize(
"nanfunc,func",
@@ -807,6 +814,34 @@ class TestNanFunctions_Median:
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1, ),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
+ def test_keepdims_out(self, axis):
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ out = np.empty(shape_out)
+ result = np.nanmedian(d, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
@@ -982,6 +1017,37 @@ class TestNanFunctions_Percentile:
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
+ @pytest.mark.parametrize('q', [7, [1, 7]])
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1,),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
+ def test_keepdims_out(self, q, axis):
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ shape_out = np.shape(q) + shape_out
+
+ out = np.empty(shape_out)
+ result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
@@ -1000,6 +1066,14 @@ class TestNanFunctions_Percentile:
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
+ def test_complex(self):
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
@@ -1010,7 +1084,7 @@ class TestNanFunctions_Percentile:
assert_almost_equal(res, tgt)
@pytest.mark.parametrize("axis", [None, 0, 1])
- @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
@@ -1104,6 +1178,14 @@ class TestNanFunctions_Quantile:
assert_equal(np.nanquantile(x, 1), 3.5)
assert_equal(np.nanquantile(x, 0.5), 1.75)
+ def test_complex(self):
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
@@ -1117,7 +1199,7 @@ class TestNanFunctions_Quantile:
assert_array_equal(p, p0)
@pytest.mark.parametrize("axis", [None, 0, 1])
- @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 76058cf20..eb6628904 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -492,7 +492,7 @@ class TestColumnStack:
assert_equal(actual, expected)
def test_generator(self):
- with assert_warns(FutureWarning):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
column_stack((np.arange(3) for _ in range(2)))
@@ -529,7 +529,7 @@ class TestDstack:
assert_array_equal(res, desired)
def test_generator(self):
- with assert_warns(FutureWarning):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
dstack((np.arange(3) for _ in range(2)))
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 141f508fd..eb008c600 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -4,20 +4,14 @@
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises, assert_
- )
-
+)
from numpy import (
arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
tril_indices_from, vander,
- )
-
+)
import numpy as np
-
-from numpy.core.tests.test_overrides import requires_array_function
-
-
import pytest
@@ -283,7 +277,6 @@ class TestHistogram2d:
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
- @requires_array_function
def test_dispatch(self):
class ShouldDispatch:
def __array_function__(self, function, types, args, kwargs):
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 3f4ca6309..ea0326139 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -155,7 +155,7 @@ class TestIscomplex:
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
- assert_(not np.sometrue(res, axis=0))
+ assert_(not np.any(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index c280b6969..fac4f41d0 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -80,12 +80,6 @@ class TestUfunclike:
assert_(isinstance(f0d, MyArray))
assert_equal(f0d.metadata, 'bar')
- def test_deprecated(self):
- # NumPy 1.13.0, 2017-04-26
- assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2))
-
def test_scalar(self):
x = np.inf
actual = np.isposinf(x)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 654ee4cf5..6dcb65651 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -155,10 +155,6 @@ def flipud(m):
return m[::-1, ...]
-def _eye_dispatcher(N, M=None, k=None, dtype=None, order=None, *, like=None):
- return (like,)
-
-
@set_array_function_like_doc
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
@@ -209,7 +205,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
"""
if like is not None:
- return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
+ return _eye_with_like(like, N, M=M, k=k, dtype=dtype, order=order)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
@@ -228,9 +224,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
return m
-_eye_with_like = array_function_dispatch(
- _eye_dispatcher
-)(eye)
+_eye_with_like = array_function_dispatch()(eye)
def _diag_dispatcher(v, k=None):
@@ -369,10 +363,6 @@ def diagflat(v, k=0):
return wrap(res)
-def _tri_dispatcher(N, M=None, k=None, dtype=None, *, like=None):
- return (like,)
-
-
@set_array_function_like_doc
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float, *, like=None):
@@ -416,7 +406,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None):
"""
if like is not None:
- return _tri_with_like(N, M=M, k=k, dtype=dtype, like=like)
+ return _tri_with_like(like, N, M=M, k=k, dtype=dtype)
if M is None:
M = N
@@ -430,9 +420,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None):
return m
-_tri_with_like = array_function_dispatch(
- _tri_dispatcher
-)(tri)
+_tri_with_like = array_function_dispatch()(tri)
def _trilu_dispatcher(m, k=None):
@@ -766,7 +754,7 @@ def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
- >>> ax.images.append(im)
+ >>> ax.add_image(im)
>>> plt.show()
It is also possible to construct a 2-D histogram without specifying bin
@@ -995,9 +983,42 @@ def tril_indices_from(arr, k=0):
k : int, optional
Diagonal offset (see `tril` for details).
+ Examples
+ --------
+
+ Create a 4 by 4 array.
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Pass the array to get the indices of the lower triangular elements.
+
+ >>> trili = np.tril_indices_from(a)
+ >>> trili
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+ >>> a[trili]
+ array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
+
+ This is syntactic sugar for tril_indices().
+
+ >>> np.tril_indices(a.shape[0])
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+ Use the `k` parameter to return the indices for the lower triangular array
+ up to the k-th diagonal.
+
+ >>> trili1 = np.tril_indices_from(a, k=1)
+ >>> a[trili1]
+ array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15])
+
See Also
--------
- tril_indices, tril
+ tril_indices, tril, triu_indices_from
Notes
-----
@@ -1114,9 +1135,43 @@ def triu_indices_from(arr, k=0):
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
+ Examples
+ --------
+
+ Create a 4 by 4 array.
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Pass the array to get the indices of the upper triangular elements.
+
+ >>> triui = np.triu_indices_from(a)
+ >>> triui
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+ >>> a[triui]
+ array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
+
+ This is syntactic sugar for triu_indices().
+
+ >>> np.triu_indices(a.shape[0])
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+ Use the `k` parameter to return the indices for the upper triangular array
+ from the k-th diagonal.
+
+ >>> triuim1 = np.triu_indices_from(a, k=1)
+ >>> a[triuim1]
+ array([ 1, 2, 3, 6, 7, 11])
+
+
See Also
--------
- triu_indices, triu
+ triu_indices, triu, tril_indices_from
Notes
-----
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 94d525f51..3f84b80e5 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -2,17 +2,16 @@
"""
import functools
-import warnings
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
'typename', 'asfarray', 'mintypecode',
'common_type']
+from .._utils import set_module
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, isnan, zeros
-from numpy.core.overrides import set_module
-from numpy.core import overrides
+from numpy.core import overrides, getlimits
from .ufunclike import isneginf, isposinf
@@ -541,7 +540,8 @@ def real_if_close(a, tol=100):
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
- in the array.
+ in the array. If the tolerance is <=1, then the absolute tolerance
+ is used.
Returns
-------
@@ -572,11 +572,11 @@ def real_if_close(a, tol=100):
"""
a = asanyarray(a)
- if not issubclass(a.dtype.type, _nx.complexfloating):
+ type_ = a.dtype.type
+ if not issubclass(type_, _nx.complexfloating):
return a
if tol > 1:
- from numpy.core import getlimits
- f = getlimits.finfo(a.dtype.type)
+ f = getlimits.finfo(type_)
tol = f.eps * tol
if _nx.all(_nx.absolute(a.imag) < tol):
a = a.real
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index a93c4773b..05fe60c5b 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -6,72 +6,16 @@ storing results in an output array.
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
-from numpy.core.overrides import (
- array_function_dispatch, ARRAY_FUNCTION_ENABLED,
-)
+from numpy.core.overrides import array_function_dispatch
import warnings
import functools
-def _deprecate_out_named_y(f):
- """
- Allow the out argument to be passed as the name `y` (deprecated)
-
- In future, this decorator should be removed.
- """
- @functools.wraps(f)
- def func(x, out=None, **kwargs):
- if 'y' in kwargs:
- if 'out' in kwargs:
- raise TypeError(
- "{} got multiple values for argument 'out'/'y'"
- .format(f.__name__)
- )
- out = kwargs.pop('y')
- # NumPy 1.13.0, 2017-04-26
- warnings.warn(
- "The name of the out argument to {} has changed from `y` to "
- "`out`, to match other ufuncs.".format(f.__name__),
- DeprecationWarning, stacklevel=3)
- return f(x, out=out, **kwargs)
-
- return func
-
-
-def _fix_out_named_y(f):
- """
- Allow the out argument to be passed as the name `y` (deprecated)
-
- This decorator should only be used if _deprecate_out_named_y is used on
- a corresponding dispatcher function.
- """
- @functools.wraps(f)
- def func(x, out=None, **kwargs):
- if 'y' in kwargs:
- # we already did error checking in _deprecate_out_named_y
- out = kwargs.pop('y')
- return f(x, out=out, **kwargs)
-
- return func
-
-
-def _fix_and_maybe_deprecate_out_named_y(f):
- """
- Use the appropriate decorator, depending upon if dispatching is being used.
- """
- if ARRAY_FUNCTION_ENABLED:
- return _fix_out_named_y(f)
- else:
- return _deprecate_out_named_y(f)
-
-
-@_deprecate_out_named_y
def _dispatcher(x, out=None):
return (x, out)
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def fix(x, out=None):
"""
Round to nearest integer towards zero.
@@ -125,7 +69,6 @@ def fix(x, out=None):
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def isposinf(x, out=None):
"""
Test element-wise for positive infinity, return result as bool array.
@@ -197,7 +140,6 @@ def isposinf(x, out=None):
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
-@_fix_and_maybe_deprecate_out_named_y
def isneginf(x, out=None):
"""
Test element-wise for negative infinity, return result as bool array.
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index afde8cc60..095c914db 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -5,9 +5,10 @@ import types
import re
import warnings
import functools
+import platform
+from .._utils import set_module
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
-from numpy.core.overrides import set_module
from numpy.core import ndarray, ufunc, asarray
import numpy as np
@@ -24,6 +25,8 @@ def show_runtime():
including available intrinsic support and BLAS/LAPACK library
in use
+ .. versionadded:: 1.24.0
+
See Also
--------
show_config : Show libraries in the system on which NumPy was built.
@@ -31,45 +34,20 @@ def show_runtime():
Notes
-----
1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
- library.
+ library if available.
2. SIMD related information is derived from ``__cpu_features__``,
``__cpu_baseline__`` and ``__cpu_dispatch__``
- Examples
- --------
- >>> import numpy as np
- >>> np.show_runtime()
- [{'simd_extensions': {'baseline': ['SSE', 'SSE2', 'SSE3'],
- 'found': ['SSSE3',
- 'SSE41',
- 'POPCNT',
- 'SSE42',
- 'AVX',
- 'F16C',
- 'FMA3',
- 'AVX2'],
- 'not_found': ['AVX512F',
- 'AVX512CD',
- 'AVX512_KNL',
- 'AVX512_KNM',
- 'AVX512_SKX',
- 'AVX512_CLX',
- 'AVX512_CNL',
- 'AVX512_ICL']}},
- {'architecture': 'Zen',
- 'filepath': '/usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.20.so',
- 'internal_api': 'openblas',
- 'num_threads': 12,
- 'prefix': 'libopenblas',
- 'threading_layer': 'pthreads',
- 'user_api': 'blas',
- 'version': '0.3.20'}]
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
from pprint import pprint
- config_found = []
+ config_found = [{
+ "numpy_version": np.__version__,
+ "python": sys.version,
+ "uname": platform.uname(),
+ }]
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
@@ -550,15 +528,16 @@ def _info(obj, output=None):
@set_module('numpy')
def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
"""
- Get help information for a function, class, or module.
+ Get help information for an array, function, class, or module.
Parameters
----------
object : object or str, optional
- Input object or name to get information about. If `object` is a
- numpy object, its docstring is given. If it is a string, available
- modules are searched for matching objects. If None, information
- about `info` itself is returned.
+ Input object or name to get information about. If `object` is
+ an `ndarray` instance, information about the array is printed.
+ If `object` is a numpy object, its docstring is given. If it is
+ a string, available modules are searched for matching objects.
+ If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
@@ -597,6 +576,22 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
+ When the argument is an array, information about the array is printed.
+
+ >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
+ >>> np.info(a)
+ class: ndarray
+ shape: (2, 3)
+ strides: (24, 8)
+ itemsize: 8
+ aligned: True
+ contiguous: True
+ fortran: False
+ data pointer: 0x562b6e0d2860 # may vary
+ byteorder: little
+ byteswap: False
+ type: complex64
+
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
diff --git a/numpy/lib/utils.pyi b/numpy/lib/utils.pyi
index 407ce1120..52ca92774 100644
--- a/numpy/lib/utils.pyi
+++ b/numpy/lib/utils.pyi
@@ -87,3 +87,5 @@ def lookfor(
) -> None: ...
def safe_eval(source: str | AST) -> Any: ...
+
+def show_runtime() -> None: ...