summaryrefslogtreecommitdiff
path: root/numpy/core
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/core')
-rw-r--r--numpy/core/_add_newdocs.py100
-rw-r--r--numpy/core/_asarray.py2
-rw-r--r--numpy/core/_asarray.pyi9
-rw-r--r--numpy/core/_internal.pyi10
-rw-r--r--numpy/core/_type_aliases.pyi14
-rw-r--r--numpy/core/_ufunc_config.py2
-rw-r--r--numpy/core/_ufunc_config.pyi29
-rw-r--r--numpy/core/arrayprint.pyi97
-rw-r--r--numpy/core/code_generators/generate_umath.py33
-rw-r--r--numpy/core/code_generators/generate_umath_doc.py30
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py72
-rw-r--r--numpy/core/defchararray.pyi3
-rw-r--r--numpy/core/einsumfunc.pyi27
-rw-r--r--numpy/core/fromnumeric.py59
-rw-r--r--numpy/core/fromnumeric.pyi118
-rw-r--r--numpy/core/function_base.pyi185
-rw-r--r--numpy/core/getlimits.pyi4
-rw-r--r--numpy/core/include/numpy/experimental_dtype_api.h28
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h19
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h14
-rw-r--r--numpy/core/include/numpy/noprefix.h1
-rw-r--r--numpy/core/include/numpy/npy_1_7_deprecated_api.h1
-rw-r--r--numpy/core/include/numpy/numpyconfig.h14
-rw-r--r--numpy/core/memmap.pyi4
-rw-r--r--numpy/core/multiarray.pyi157
-rw-r--r--numpy/core/numeric.pyi34
-rw-r--r--numpy/core/numerictypes.py2
-rw-r--r--numpy/core/numerictypes.pyi101
-rw-r--r--numpy/core/records.pyi65
-rw-r--r--numpy/core/setup.py37
-rw-r--r--numpy/core/shape_base.pyi11
-rw-r--r--numpy/core/src/_simd/_simd_convert.inc1
-rw-r--r--numpy/core/src/common/npy_binsearch.h31
-rw-r--r--numpy/core/src/common/npy_binsearch.h.src144
-rw-r--r--numpy/core/src/common/npy_config.h12
-rw-r--r--numpy/core/src/common/npy_cpu_features.c.src37
-rw-r--r--numpy/core/src/common/npy_cpu_features.h11
-rw-r--r--numpy/core/src/common/npy_sort.h.src9
-rw-r--r--numpy/core/src/common/numpy_tag.h147
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src30
-rw-r--r--numpy/core/src/multiarray/array_method.c60
-rw-r--r--numpy/core/src/multiarray/array_method.h3
-rw-r--r--numpy/core/src/multiarray/arrayobject.c98
-rw-r--r--numpy/core/src/multiarray/calculation.c2
-rw-r--r--numpy/core/src/multiarray/common.h29
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c3
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c312
-rw-r--r--numpy/core/src/multiarray/convert_datatype.h11
-rw-r--r--numpy/core/src/multiarray/ctors.c90
-rw-r--r--numpy/core/src/multiarray/datetime.c33
-rw-r--r--numpy/core/src/multiarray/dragon4.c13
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c26
-rw-r--r--numpy/core/src/multiarray/experimental_public_dtype_api.c2
-rw-r--r--numpy/core/src/multiarray/flagsobject.c68
-rw-r--r--numpy/core/src/multiarray/getset.c64
-rw-r--r--numpy/core/src/multiarray/mapping.c2
-rw-r--r--numpy/core/src/multiarray/methods.c80
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c22
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src4
-rw-r--r--numpy/core/src/multiarray/temp_elide.c2
-rw-r--r--numpy/core/src/npymath/npy_math_internal.h.src3
-rw-r--r--numpy/core/src/npysort/binsearch.c.src250
-rw-r--r--numpy/core/src/npysort/binsearch.cpp387
-rw-r--r--numpy/core/src/npysort/npysort_common.h18
-rw-r--r--numpy/core/src/npysort/radixsort.cpp82
-rw-r--r--numpy/core/src/umath/_operand_flag_tests.c (renamed from numpy/core/src/umath/_operand_flag_tests.c.src)0
-rw-r--r--numpy/core/src/umath/_scaled_float_dtype.c21
-rw-r--r--numpy/core/src/umath/dispatching.c443
-rw-r--r--numpy/core/src/umath/dispatching.h14
-rw-r--r--numpy/core/src/umath/legacy_array_method.c44
-rw-r--r--numpy/core/src/umath/legacy_array_method.h2
-rw-r--r--numpy/core/src/umath/loops_exponent_log.dispatch.c.src18
-rw-r--r--numpy/core/src/umath/reduction.c34
-rw-r--r--numpy/core/src/umath/ufunc_object.c89
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c2
-rw-r--r--numpy/core/src/umath/umathmodule.c6
-rw-r--r--numpy/core/tests/test_casting_unittests.py180
-rw-r--r--numpy/core/tests/test_cpu_features.py11
-rw-r--r--numpy/core/tests/test_datetime.py16
-rw-r--r--numpy/core/tests/test_deprecations.py42
-rw-r--r--numpy/core/tests/test_multiarray.py149
-rw-r--r--numpy/core/tests/test_nditer.py2
-rw-r--r--numpy/core/tests/test_scalarmath.py21
-rw-r--r--numpy/core/tests/test_scalarprint.py1
-rw-r--r--numpy/core/tests/test_ufunc.py74
-rw-r--r--numpy/core/tests/test_umath.py23
-rw-r--r--numpy/core/tests/test_umath_accuracy.py5
87 files changed, 2799 insertions, 1766 deletions
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 078c58976..219383e1e 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2265,10 +2265,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
-add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
- """None."""))
-
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
@@ -2278,12 +2274,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack__',
"""a.__dlpack__(*, stream=None)
-
+
DLPack Protocol: Part of the Array API."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack_device__',
"""a.__dlpack_device__()
-
+
DLPack Protocol: Part of the Array API."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
@@ -2392,6 +2388,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
+ .. warning::
+
+ Setting ``arr.dtype`` is discouraged and may be deprecated in the
+ future. Setting will replace the ``dtype`` without modifying the
+ memory (see also `ndarray.view` and `ndarray.astype`).
+
Parameters
----------
None
@@ -2402,6 +2404,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
See Also
--------
+ ndarray.astype : Cast the values contained in the array to a new data-type.
+ ndarray.view : Create a view of the same data but a different data-type.
numpy.dtype
Examples
@@ -2477,11 +2481,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
- UPDATEIFCOPY (U)
- (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
- When this array is
- deallocated, the base array will be updated with the contents of
- this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
@@ -2499,13 +2498,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
- Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
+ Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- - UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
@@ -2633,6 +2631,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
+ .. warning::
+
+ Setting ``arr.shape`` is discouraged and may be deprecated in the
+ future. Using `ndarray.reshape` is the preferred approach.
+
Examples
--------
>>> x = np.array([1, 2, 3, 4])
@@ -2658,8 +2661,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
See Also
--------
- numpy.reshape : similar function
- ndarray.reshape : similar method
+ numpy.shape : Equivalent getter function.
+ numpy.reshape : Function similar to setting ``shape``.
+ ndarray.reshape : Method similar to setting ``shape``.
"""))
@@ -2702,6 +2706,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
+ .. warning::
+
+ Setting ``arr.strides`` is discouraged and may be deprecated in the
+ future. `numpy.lib.stride_tricks.as_strided` should be preferred
+ to create a new view of the same data in a safer way.
+
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
@@ -2797,6 +2807,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
"""))
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
+ """a.__array_finalize__(obj, /)
+
+ Present so subclasses can call super. Does nothing.
+
+ """))
+
+
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(array[, context], /)
@@ -3906,13 +3924,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
- Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
+ Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY,
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
- The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
+ The WRITEBACKIFCOPY and flag can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
@@ -3932,15 +3950,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
- WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
+ WRITEBACKIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
- UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
-
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
@@ -3964,7 +3980,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
- UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
@@ -3973,7 +3988,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
- UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
@@ -4459,14 +4473,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
- bytes per entry than the previous dtype (for example, converting a
- regular array to a structured array), then the behavior of the view
- cannot be predicted just from the superficial appearance of ``a`` (shown
- by ``print(a)``). It also depends on exactly how ``a`` is stored in
- memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
- defined as a slice or transpose, etc., the view may give different
- results.
+ bytes per entry than the previous dtype (for example, converting a regular
+ array to a structured array), then the last axis of ``a`` must be
+ contiguous. This axis will be resized in the result.
+ .. versionchanged:: 1.23.0
+ Only the last axis needs to be contiguous. Previously, the entire array
+ had to be C-contiguous.
Examples
--------
@@ -4511,19 +4524,34 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
- >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
- >>> y = x[:, 0:2]
+ >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)
+ >>> y = x[:, ::2]
>>> y
- array([[1, 2],
- [4, 5]], dtype=int16)
+ array([[1, 3],
+ [4, 6]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
- ValueError: To change to a dtype of a different size, the array must be C-contiguous
+ ValueError: To change to a dtype of a different size, the last axis must be contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
- array([[(1, 2)],
- [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
+ array([[(1, 3)],
+ [(4, 6)]], dtype=[('width', '<i2'), ('length', '<i2')])
+
+ However, views that change dtype are totally fine for arrays with a
+ contiguous last axis, even if the rest of the axes are not C-contiguous:
+
+ >>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4)
+ >>> x.transpose(1, 0, 2).view(np.int16)
+ array([[[ 256, 770],
+ [3340, 3854]],
+ <BLANKLINE>
+ [[1284, 1798],
+ [4368, 4882]],
+ <BLANKLINE>
+ [[2312, 2826],
+ [5396, 5910]]], dtype=int16)
+
"""))
diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py
index ecb4e7c39..89d422e99 100644
--- a/numpy/core/_asarray.py
+++ b/numpy/core/_asarray.py
@@ -78,7 +78,6 @@ def require(a, dtype=None, requirements=None, *, like=None):
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
- UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
@@ -88,7 +87,6 @@ def require(a, dtype=None, requirements=None, *, like=None):
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
- UPDATEIFCOPY : False
"""
if like is not None:
diff --git a/numpy/core/_asarray.pyi b/numpy/core/_asarray.pyi
index fee9b7b6e..0da2de912 100644
--- a/numpy/core/_asarray.pyi
+++ b/numpy/core/_asarray.pyi
@@ -1,4 +1,5 @@
-from typing import TypeVar, Union, Iterable, overload, Literal
+from collections.abc import Iterable
+from typing import TypeVar, Union, overload, Literal
from numpy import ndarray
from numpy.typing import ArrayLike, DTypeLike
@@ -19,7 +20,7 @@ _RequirementsWithE = Union[_Requirements, _E]
def require(
a: _ArrayType,
dtype: None = ...,
- requirements: Union[None, _Requirements, Iterable[_Requirements]] = ...,
+ requirements: None | _Requirements | Iterable[_Requirements] = ...,
*,
like: ArrayLike = ...
) -> _ArrayType: ...
@@ -27,7 +28,7 @@ def require(
def require(
a: object,
dtype: DTypeLike = ...,
- requirements: Union[_E, Iterable[_RequirementsWithE]] = ...,
+ requirements: _E | Iterable[_RequirementsWithE] = ...,
*,
like: ArrayLike = ...
) -> ndarray: ...
@@ -35,7 +36,7 @@ def require(
def require(
a: object,
dtype: DTypeLike = ...,
- requirements: Union[None, _Requirements, Iterable[_Requirements]] = ...,
+ requirements: None | _Requirements | Iterable[_Requirements] = ...,
*,
like: ArrayLike = ...
) -> ndarray: ...
diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi
index f4bfd770f..8a25ef2cb 100644
--- a/numpy/core/_internal.pyi
+++ b/numpy/core/_internal.pyi
@@ -1,4 +1,4 @@
-from typing import Any, TypeVar, Type, overload, Optional, Generic
+from typing import Any, TypeVar, overload, Generic
import ctypes as ct
from numpy import ndarray
@@ -6,7 +6,7 @@ from numpy.ctypeslib import c_intp
_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast`
_CT = TypeVar("_CT", bound=ct._CData)
-_PT = TypeVar("_PT", bound=Optional[int])
+_PT = TypeVar("_PT", bound=None | int)
# TODO: Let the likes of `shape_as` and `strides_as` return `None`
# for 0D arrays once we've got shape-support
@@ -25,6 +25,6 @@ class _ctypes(Generic[_PT]):
@property
def _as_parameter_(self) -> ct.c_void_p: ...
- def data_as(self, obj: Type[_CastT]) -> _CastT: ...
- def shape_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ...
- def strides_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ...
+ def data_as(self, obj: type[_CastT]) -> _CastT: ...
+ def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
+ def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
diff --git a/numpy/core/_type_aliases.pyi b/numpy/core/_type_aliases.pyi
index c10d072f9..bbead0cb5 100644
--- a/numpy/core/_type_aliases.pyi
+++ b/numpy/core/_type_aliases.pyi
@@ -1,13 +1,13 @@
-from typing import Dict, Union, Type, List, TypedDict
+from typing import TypedDict
from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating
class _SCTypes(TypedDict):
- int: List[Type[signedinteger]]
- uint: List[Type[unsignedinteger]]
- float: List[Type[floating]]
- complex: List[Type[complexfloating]]
- others: List[type]
+ int: list[type[signedinteger]]
+ uint: list[type[unsignedinteger]]
+ float: list[type[floating]]
+ complex: list[type[complexfloating]]
+ others: list[type]
-sctypeDict: Dict[Union[int, str], Type[generic]]
+sctypeDict: dict[int | str, type[generic]]
sctypes: _SCTypes
diff --git a/numpy/core/_ufunc_config.py b/numpy/core/_ufunc_config.py
index b40e7445e..a731f6bf7 100644
--- a/numpy/core/_ufunc_config.py
+++ b/numpy/core/_ufunc_config.py
@@ -290,7 +290,7 @@ def seterrcall(func):
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
- LOG: Warning: divide by zero encountered in true_divide
+ LOG: Warning: divide by zero encountered in divide
array([inf, inf, inf])
>>> np.seterrcall(saved_handler)
diff --git a/numpy/core/_ufunc_config.pyi b/numpy/core/_ufunc_config.pyi
index cd7129bcb..b7c2ebefc 100644
--- a/numpy/core/_ufunc_config.pyi
+++ b/numpy/core/_ufunc_config.pyi
@@ -1,4 +1,5 @@
-from typing import Optional, Union, Callable, Any, Literal, TypedDict
+from collections.abc import Callable
+from typing import Any, Literal, TypedDict
from numpy import _SupportsWrite
@@ -12,25 +13,25 @@ class _ErrDict(TypedDict):
invalid: _ErrKind
class _ErrDictOptional(TypedDict, total=False):
- all: Optional[_ErrKind]
- divide: Optional[_ErrKind]
- over: Optional[_ErrKind]
- under: Optional[_ErrKind]
- invalid: Optional[_ErrKind]
+ all: None | _ErrKind
+ divide: None | _ErrKind
+ over: None | _ErrKind
+ under: None | _ErrKind
+ invalid: None | _ErrKind
def seterr(
- all: Optional[_ErrKind] = ...,
- divide: Optional[_ErrKind] = ...,
- over: Optional[_ErrKind] = ...,
- under: Optional[_ErrKind] = ...,
- invalid: Optional[_ErrKind] = ...,
+ all: None | _ErrKind = ...,
+ divide: None | _ErrKind = ...,
+ over: None | _ErrKind = ...,
+ under: None | _ErrKind = ...,
+ invalid: None | _ErrKind = ...,
) -> _ErrDict: ...
def geterr() -> _ErrDict: ...
def setbufsize(size: int) -> int: ...
def getbufsize() -> int: ...
def seterrcall(
- func: Union[None, _ErrFunc, _SupportsWrite[str]]
-) -> Union[None, _ErrFunc, _SupportsWrite[str]]: ...
-def geterrcall() -> Union[None, _ErrFunc, _SupportsWrite[str]]: ...
+ func: None | _ErrFunc | _SupportsWrite[str]
+) -> None | _ErrFunc | _SupportsWrite[str]: ...
+def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ...
# See `numpy/__init__.pyi` for the `errstate` class
diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi
index 0d338206f..996d4c782 100644
--- a/numpy/core/arrayprint.pyi
+++ b/numpy/core/arrayprint.pyi
@@ -1,5 +1,6 @@
from types import TracebackType
-from typing import Any, Optional, Callable, Union, Type, Literal, TypedDict, SupportsIndex
+from collections.abc import Callable
+from typing import Any, Literal, TypedDict, SupportsIndex
# Using a private class is by no means ideal, but it is simply a consequence
# of a `contextlib.context` returning an instance of aforementioned class
@@ -50,92 +51,92 @@ class _FormatOptions(TypedDict):
suppress: bool
nanstr: str
infstr: str
- formatter: Optional[_FormatDict]
+ formatter: None | _FormatDict
sign: Literal["-", "+", " "]
floatmode: _FloatMode
legacy: Literal[False, "1.13", "1.21"]
def set_printoptions(
- precision: Optional[SupportsIndex] = ...,
- threshold: Optional[int] = ...,
- edgeitems: Optional[int] = ...,
- linewidth: Optional[int] = ...,
- suppress: Optional[bool] = ...,
- nanstr: Optional[str] = ...,
- infstr: Optional[str] = ...,
- formatter: Optional[_FormatDict] = ...,
- sign: Optional[Literal["-", "+", " "]] = ...,
- floatmode: Optional[_FloatMode] = ...,
+ precision: None | SupportsIndex = ...,
+ threshold: None | int = ...,
+ edgeitems: None | int = ...,
+ linewidth: None | int = ...,
+ suppress: None | bool = ...,
+ nanstr: None | str = ...,
+ infstr: None | str = ...,
+ formatter: None | _FormatDict = ...,
+ sign: Literal[None, "-", "+", " "] = ...,
+ floatmode: None | _FloatMode = ...,
*,
- legacy: Optional[Literal[False, "1.13", "1.21"]] = ...
+ legacy: Literal[None, False, "1.13", "1.21"] = ...
) -> None: ...
def get_printoptions() -> _FormatOptions: ...
def array2string(
a: ndarray[Any, Any],
- max_line_width: Optional[int] = ...,
- precision: Optional[SupportsIndex] = ...,
- suppress_small: Optional[bool] = ...,
+ max_line_width: None | int = ...,
+ precision: None | SupportsIndex = ...,
+ suppress_small: None | bool = ...,
separator: str = ...,
prefix: str = ...,
# NOTE: With the `style` argument being deprecated,
# all arguments between `formatter` and `suffix` are de facto
# keyworld-only arguments
*,
- formatter: Optional[_FormatDict] = ...,
- threshold: Optional[int] = ...,
- edgeitems: Optional[int] = ...,
- sign: Optional[Literal["-", "+", " "]] = ...,
- floatmode: Optional[_FloatMode] = ...,
+ formatter: None | _FormatDict = ...,
+ threshold: None | int = ...,
+ edgeitems: None | int = ...,
+ sign: Literal[None, "-", "+", " "] = ...,
+ floatmode: None | _FloatMode = ...,
suffix: str = ...,
- legacy: Optional[Literal[False, "1.13", "1.21"]] = ...,
+ legacy: Literal[None, False, "1.13", "1.21"] = ...,
) -> str: ...
def format_float_scientific(
x: _FloatLike_co,
- precision: Optional[int] = ...,
+ precision: None | int = ...,
unique: bool = ...,
trim: Literal["k", ".", "0", "-"] = ...,
sign: bool = ...,
- pad_left: Optional[int] = ...,
- exp_digits: Optional[int] = ...,
- min_digits: Optional[int] = ...,
+ pad_left: None | int = ...,
+ exp_digits: None | int = ...,
+ min_digits: None | int = ...,
) -> str: ...
def format_float_positional(
x: _FloatLike_co,
- precision: Optional[int] = ...,
+ precision: None | int = ...,
unique: bool = ...,
fractional: bool = ...,
trim: Literal["k", ".", "0", "-"] = ...,
sign: bool = ...,
- pad_left: Optional[int] = ...,
- pad_right: Optional[int] = ...,
- min_digits: Optional[int] = ...,
+ pad_left: None | int = ...,
+ pad_right: None | int = ...,
+ min_digits: None | int = ...,
) -> str: ...
def array_repr(
arr: ndarray[Any, Any],
- max_line_width: Optional[int] = ...,
- precision: Optional[SupportsIndex] = ...,
- suppress_small: Optional[bool] = ...,
+ max_line_width: None | int = ...,
+ precision: None | SupportsIndex = ...,
+ suppress_small: None | bool = ...,
) -> str: ...
def array_str(
a: ndarray[Any, Any],
- max_line_width: Optional[int] = ...,
- precision: Optional[SupportsIndex] = ...,
- suppress_small: Optional[bool] = ...,
+ max_line_width: None | int = ...,
+ precision: None | SupportsIndex = ...,
+ suppress_small: None | bool = ...,
) -> str: ...
def set_string_function(
- f: Optional[Callable[[ndarray[Any, Any]], str]], repr: bool = ...
+ f: None | Callable[[ndarray[Any, Any]], str], repr: bool = ...
) -> None: ...
def printoptions(
- precision: Optional[SupportsIndex] = ...,
- threshold: Optional[int] = ...,
- edgeitems: Optional[int] = ...,
- linewidth: Optional[int] = ...,
- suppress: Optional[bool] = ...,
- nanstr: Optional[str] = ...,
- infstr: Optional[str] = ...,
- formatter: Optional[_FormatDict] = ...,
- sign: Optional[Literal["-", "+", " "]] = ...,
- floatmode: Optional[_FloatMode] = ...,
+ precision: None | SupportsIndex = ...,
+ threshold: None | int = ...,
+ edgeitems: None | int = ...,
+ linewidth: None | int = ...,
+ suppress: None | bool = ...,
+ nanstr: None | str = ...,
+ infstr: None | str = ...,
+ formatter: None | _FormatDict = ...,
+ sign: Literal[None, "-", "+", " "] = ...,
+ floatmode: None | _FloatMode = ...,
*,
- legacy: Optional[Literal[False, "1.13", "1.21"]] = ...
+ legacy: Literal[None, False, "1.13", "1.21"] = ...
) -> _GeneratorContextManager[_FormatOptions]: ...
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index dc71fc5c9..054150b28 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -4,10 +4,6 @@ import struct
import sys
import textwrap
-sys.path.insert(0, os.path.dirname(__file__))
-import ufunc_docstrings as docstrings
-sys.path.pop(0)
-
Zero = "PyLong_FromLong(0)"
One = "PyLong_FromLong(1)"
True_ = "(Py_INCREF(Py_True), Py_True)"
@@ -17,6 +13,16 @@ AllOnes = "PyLong_FromLong(-1)"
MinusInfinity = 'PyFloat_FromDouble(-NPY_INFINITY)'
ReorderableNone = "(Py_INCREF(Py_None), Py_None)"
+class docstrings:
+ @staticmethod
+ def get(place):
+ """
+ Returns the C #definition name of docstring according
+ to ufunc place. C #definitions are generated by generate_umath_doc.py
+ in a separate C header.
+ """
+ return 'DOC_' + place.upper().replace('.', '_')
+
# Sentinel value to specify using the full type description in the
# function name
class FullTypeDescr:
@@ -322,7 +328,7 @@ defdict = {
],
TD(O, f='PyNumber_Multiply'),
),
-#'divide' : aliased to true_divide in umathmodule.c:initumath
+#'true_divide' : aliased to divide in umathmodule.c:initumath
'floor_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.floor_divide'),
@@ -336,9 +342,9 @@ defdict = {
],
TD(O, f='PyNumber_FloorDivide'),
),
-'true_divide':
+'divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
- docstrings.get('numpy.core.umath.true_divide'),
+ docstrings.get('numpy.core.umath.divide'),
'PyUFunc_TrueDivisionTypeResolver',
TD(flts+cmplx, cfunc_alias='divide', dispatch=[('loops_arithm_fp', 'fd')]),
[TypeDescription('m', FullTypeDescr, 'mq', 'm', cfunc_alias='divide'),
@@ -1153,14 +1159,6 @@ def make_ufuncs(funcdict):
for name in names:
uf = funcdict[name]
mlist = []
- docstring = textwrap.dedent(uf.docstring).strip()
- docstring = docstring.encode('unicode-escape').decode('ascii')
- docstring = docstring.replace(r'"', r'\"')
- docstring = docstring.replace(r"'", r"\'")
- # Split the docstring because some compilers (like MS) do not like big
- # string literal in C code. We split at endlines because textwrap.wrap
- # do not play well with \n
- docstring = '\\n\"\"'.join(docstring.split(r"\n"))
if uf.signature is None:
sig = "NULL"
else:
@@ -1173,7 +1171,7 @@ def make_ufuncs(funcdict):
f = PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
{name}_functions, {name}_data, {name}_signatures, {nloops},
{nin}, {nout}, {identity}, "{name}",
- "{doc}", 0, {sig}, identity
+ {doc}, 0, {sig}, identity
);
if ({has_identity}) {{
Py_DECREF(identity);
@@ -1188,7 +1186,7 @@ def make_ufuncs(funcdict):
has_identity='0' if uf.identity is None_ else '1',
identity='PyUFunc_IdentityValue',
identity_expr=uf.identity,
- doc=docstring,
+ doc=uf.docstring,
sig=sig,
)
@@ -1224,6 +1222,7 @@ def make_code(funcdict, filename):
#include "loops.h"
#include "matmul.h"
#include "clip.h"
+ #include "_umath_doc_generated.h"
%s
static int
diff --git a/numpy/core/code_generators/generate_umath_doc.py b/numpy/core/code_generators/generate_umath_doc.py
new file mode 100644
index 000000000..9888730fd
--- /dev/null
+++ b/numpy/core/code_generators/generate_umath_doc.py
@@ -0,0 +1,30 @@
+import sys
+import os
+import textwrap
+
+sys.path.insert(0, os.path.dirname(__file__))
+import ufunc_docstrings as docstrings
+sys.path.pop(0)
+
+def normalize_doc(docstring):
+ docstring = textwrap.dedent(docstring).strip()
+ docstring = docstring.encode('unicode-escape').decode('ascii')
+ docstring = docstring.replace(r'"', r'\"')
+ docstring = docstring.replace(r"'", r"\'")
+ # Split the docstring because some compilers (like MS) do not like big
+ # string literal in C code. We split at endlines because textwrap.wrap
+ # do not play well with \n
+ docstring = '\\n\"\"'.join(docstring.split(r"\n"))
+ return docstring
+
+def write_code(target):
+ with open(target, 'w') as fid:
+ fid.write(
+ "#ifndef NUMPY_CORE_INCLUDE__UMATH_DOC_GENERATED_H_\n"
+ "#define NUMPY_CORE_INCLUDE__UMATH_DOC_GENERATED_H_\n"
+ )
+ for place, string in docstrings.docdict.items():
+ cdef_name = f"DOC_{place.upper().replace('.', '_')}"
+ cdef_str = normalize_doc(string)
+ fid.write(f"#define {cdef_name} \"{cdef_str}\"\n")
+ fid.write("#endif //NUMPY_CORE_INCLUDE__UMATH_DOC_GENERATED_H\n")
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index cd584eea7..24e2eef3f 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -4,18 +4,15 @@ Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
-numpy/core/code_generators/generate_umath.py to generate the docstrings
-for the ufuncs in numpy.core at the C level when the ufuncs are created
-at compile time.
+numpy/core/code_generators/generate_umath_doc.py to generate the docstrings
+as a C #definitions for the ufuncs in numpy.core at the C level when the
+ufuncs are created at compile time.
"""
import textwrap
docdict = {}
-def get(name):
- return docdict.get(name)
-
# common parameter text to all ufuncs
subst = {
'PARAMS': textwrap.dedent("""
@@ -1089,9 +1086,8 @@ add_newdoc('numpy.core.umath', 'divide',
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
- Behavior on division by zero can be changed using ``seterr``.
-
- Behaves like ``true_divide``.
+ The ``true_divide(x1, x2)`` function is an alias for
+ ``divide(x1, x2)``.
Examples
--------
@@ -1100,13 +1096,9 @@ add_newdoc('numpy.core.umath', 'divide',
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
- array([[ NaN, 1. , 1. ],
- [ Inf, 4. , 2.5],
- [ Inf, 7. , 4. ]])
-
- >>> ignored_states = np.seterr(**old_err_state)
- >>> np.divide(1, 0)
- 0
+ array([[nan, 1. , 1. ],
+ [inf, 4. , 2.5],
+ [inf, 7. , 4. ]])
The ``/`` operator can be used as a shorthand for ``np.divide`` on
ndarrays.
@@ -4052,54 +4044,6 @@ add_newdoc('numpy.core.umath', 'tanh',
""")
-add_newdoc('numpy.core.umath', 'true_divide',
- """
- Returns a true division of the inputs, element-wise.
-
- Unlike 'floor division', true division adjusts the output type
- to present the best answer, regardless of input types.
-
- Parameters
- ----------
- x1 : array_like
- Dividend array.
- x2 : array_like
- Divisor array.
- $BROADCASTABLE_2
- $PARAMS
-
- Returns
- -------
- out : ndarray or scalar
- $OUT_SCALAR_2
-
- Notes
- -----
- In Python, ``//`` is the floor division operator and ``/`` the
- true division operator. The ``true_divide(x1, x2)`` function is
- equivalent to true division in Python.
-
- Examples
- --------
- >>> x = np.arange(5)
- >>> np.true_divide(x, 4)
- array([ 0. , 0.25, 0.5 , 0.75, 1. ])
-
- >>> x/4
- array([ 0. , 0.25, 0.5 , 0.75, 1. ])
-
- >>> x//4
- array([0, 0, 0, 0, 1])
-
- The ``/`` operator can be used as a shorthand for ``np.true_divide`` on
- ndarrays.
-
- >>> x = np.arange(5)
- >>> x / 4
- array([0. , 0.25, 0.5 , 0.75, 1. ])
-
- """)
-
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
diff --git a/numpy/core/defchararray.pyi b/numpy/core/defchararray.pyi
index 28d247b05..250706eb1 100644
--- a/numpy/core/defchararray.pyi
+++ b/numpy/core/defchararray.pyi
@@ -3,7 +3,6 @@ from typing import (
overload,
TypeVar,
Any,
- List,
)
from numpy import (
@@ -30,7 +29,7 @@ from numpy.core.multiarray import compare_chararrays as compare_chararrays
_SCT = TypeVar("_SCT", str_, bytes_)
_CharArray = chararray[Any, dtype[_SCT]]
-__all__: List[str]
+__all__: list[str]
# Comparison
@overload
diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi
index aabb04c47..278fa2044 100644
--- a/numpy/core/einsumfunc.pyi
+++ b/numpy/core/einsumfunc.pyi
@@ -1,4 +1,5 @@
-from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence, Literal
+from collections.abc import Sequence
+from typing import TypeVar, Any, overload, Union, Literal
from numpy import (
ndarray,
@@ -30,13 +31,11 @@ _ArrayType = TypeVar(
bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
)
-_OptimizeKind = Union[
- None, bool, Literal["greedy", "optimal"], Sequence[Any]
-]
+_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any]
_CastingSafe = Literal["no", "equiv", "safe", "same_kind"]
_CastingUnsafe = Literal["unsafe"]
-__all__: List[str]
+__all__: list[str]
# TODO: Properly handle the `casting`-based combinatorics
# TODO: We need to evaluate the content `__subscripts` in order
@@ -50,7 +49,7 @@ def einsum(
/,
*operands: _ArrayLikeBool_co,
out: None = ...,
- dtype: Optional[_DTypeLikeBool] = ...,
+ dtype: None | _DTypeLikeBool = ...,
order: _OrderKACF = ...,
casting: _CastingSafe = ...,
optimize: _OptimizeKind = ...,
@@ -61,7 +60,7 @@ def einsum(
/,
*operands: _ArrayLikeUInt_co,
out: None = ...,
- dtype: Optional[_DTypeLikeUInt] = ...,
+ dtype: None | _DTypeLikeUInt = ...,
order: _OrderKACF = ...,
casting: _CastingSafe = ...,
optimize: _OptimizeKind = ...,
@@ -72,7 +71,7 @@ def einsum(
/,
*operands: _ArrayLikeInt_co,
out: None = ...,
- dtype: Optional[_DTypeLikeInt] = ...,
+ dtype: None | _DTypeLikeInt = ...,
order: _OrderKACF = ...,
casting: _CastingSafe = ...,
optimize: _OptimizeKind = ...,
@@ -83,7 +82,7 @@ def einsum(
/,
*operands: _ArrayLikeFloat_co,
out: None = ...,
- dtype: Optional[_DTypeLikeFloat] = ...,
+ dtype: None | _DTypeLikeFloat = ...,
order: _OrderKACF = ...,
casting: _CastingSafe = ...,
optimize: _OptimizeKind = ...,
@@ -94,7 +93,7 @@ def einsum(
/,
*operands: _ArrayLikeComplex_co,
out: None = ...,
- dtype: Optional[_DTypeLikeComplex] = ...,
+ dtype: None | _DTypeLikeComplex = ...,
order: _OrderKACF = ...,
casting: _CastingSafe = ...,
optimize: _OptimizeKind = ...,
@@ -105,7 +104,7 @@ def einsum(
/,
*operands: Any,
casting: _CastingUnsafe,
- dtype: Optional[_DTypeLikeComplex_co] = ...,
+ dtype: None | _DTypeLikeComplex_co = ...,
out: None = ...,
order: _OrderKACF = ...,
optimize: _OptimizeKind = ...,
@@ -116,7 +115,7 @@ def einsum(
/,
*operands: _ArrayLikeComplex_co,
out: _ArrayType,
- dtype: Optional[_DTypeLikeComplex_co] = ...,
+ dtype: None | _DTypeLikeComplex_co = ...,
order: _OrderKACF = ...,
casting: _CastingSafe = ...,
optimize: _OptimizeKind = ...,
@@ -128,7 +127,7 @@ def einsum(
*operands: Any,
out: _ArrayType,
casting: _CastingUnsafe,
- dtype: Optional[_DTypeLikeComplex_co] = ...,
+ dtype: None | _DTypeLikeComplex_co = ...,
order: _OrderKACF = ...,
optimize: _OptimizeKind = ...,
) -> _ArrayType: ...
@@ -142,4 +141,4 @@ def einsum_path(
/,
*operands: _ArrayLikeComplex_co,
optimize: _OptimizeKind = ...,
-) -> Tuple[List[Any], str]: ...
+) -> tuple[list[Any], str]: ...
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 3242124ac..f26f306fa 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -17,7 +17,7 @@ _dt_ = nt.sctype2char
# functions that are methods
__all__ = [
- 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
+ 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
@@ -1980,25 +1980,27 @@ def shape(a):
See Also
--------
- len
+ len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
+ ``N>=1``.
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
- >>> np.shape([[1, 2]])
+ >>> np.shape([[1, 3]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
- >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ >>> a = np.array([(1, 2), (3, 4), (5, 6)],
+ ... dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
- (2,)
+ (3,)
>>> a.shape
- (2,)
+ (3,)
"""
try:
@@ -2917,51 +2919,6 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
keepdims=keepdims, initial=initial, where=where)
-def _alen_dispathcer(a):
- return (a,)
-
-
-@array_function_dispatch(_alen_dispathcer)
-def alen(a):
- """
- Return the length of the first dimension of the input array.
-
- .. deprecated:: 1.18
- `numpy.alen` is deprecated, use `len` instead.
-
- Parameters
- ----------
- a : array_like
- Input array.
-
- Returns
- -------
- alen : int
- Length of the first dimension of `a`.
-
- See Also
- --------
- shape, size
-
- Examples
- --------
- >>> a = np.zeros((7,4,5))
- >>> a.shape[0]
- 7
- >>> np.alen(a)
- 7
-
- """
- # NumPy 1.18.0, 2019-08-02
- warnings.warn(
- "`np.alen` is deprecated, use `len` instead",
- DeprecationWarning, stacklevel=2)
- try:
- return len(a)
- except TypeError:
- return len(array(a, ndmin=1))
-
-
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi
index 3cbe1d5c5..472c4ded5 100644
--- a/numpy/core/fromnumeric.pyi
+++ b/numpy/core/fromnumeric.pyi
@@ -1,5 +1,6 @@
import datetime as dt
-from typing import Optional, Union, Sequence, Tuple, Any, overload, TypeVar, Literal
+from collections.abc import Sequence
+from typing import Union, Any, overload, TypeVar, Literal
from numpy import (
ndarray,
@@ -18,6 +19,7 @@ from numpy import (
from numpy.typing import (
DTypeLike,
ArrayLike,
+ NDArray,
_ShapeLike,
_Shape,
_ArrayLikeBool_co,
@@ -47,8 +49,8 @@ _Number = TypeVar("_Number", bound=number)
def take(
a: ArrayLike,
indices: _ArrayLikeInt_co,
- axis: Optional[int] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | int = ...,
+ out: None | ndarray = ...,
mode: _ModeKind = ...,
) -> Any: ...
@@ -61,14 +63,14 @@ def reshape(
def choose(
a: _ArrayLikeInt_co,
choices: ArrayLike,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
mode: _ModeKind = ...,
) -> Any: ...
def repeat(
a: ArrayLike,
repeats: _ArrayLikeInt_co,
- axis: Optional[int] = ...,
+ axis: None | int = ...,
) -> ndarray: ...
def put(
@@ -86,52 +88,52 @@ def swapaxes(
def transpose(
a: ArrayLike,
- axes: Union[None, Sequence[int], ndarray] = ...
+ axes: None | Sequence[int] | NDArray[Any] = ...
) -> ndarray: ...
def partition(
a: ArrayLike,
kth: _ArrayLikeInt_co,
- axis: Optional[int] = ...,
+ axis: None | int = ...,
kind: _PartitionKind = ...,
- order: Union[None, str, Sequence[str]] = ...,
+ order: None | str | Sequence[str] = ...,
) -> ndarray: ...
def argpartition(
a: ArrayLike,
kth: _ArrayLikeInt_co,
- axis: Optional[int] = ...,
+ axis: None | int = ...,
kind: _PartitionKind = ...,
- order: Union[None, str, Sequence[str]] = ...,
+ order: None | str | Sequence[str] = ...,
) -> Any: ...
def sort(
a: ArrayLike,
- axis: Optional[int] = ...,
- kind: Optional[_SortKind] = ...,
- order: Union[None, str, Sequence[str]] = ...,
+ axis: None | int = ...,
+ kind: None | _SortKind = ...,
+ order: None | str | Sequence[str] = ...,
) -> ndarray: ...
def argsort(
a: ArrayLike,
- axis: Optional[int] = ...,
- kind: Optional[_SortKind] = ...,
- order: Union[None, str, Sequence[str]] = ...,
+ axis: None | int = ...,
+ kind: None | _SortKind = ...,
+ order: None | str | Sequence[str] = ...,
) -> ndarray: ...
@overload
def argmax(
a: ArrayLike,
axis: None = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
*,
keepdims: Literal[False] = ...,
) -> intp: ...
@overload
def argmax(
a: ArrayLike,
- axis: Optional[int] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | int = ...,
+ out: None | ndarray = ...,
*,
keepdims: bool = ...,
) -> Any: ...
@@ -140,15 +142,15 @@ def argmax(
def argmin(
a: ArrayLike,
axis: None = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
*,
keepdims: Literal[False] = ...,
) -> intp: ...
@overload
def argmin(
a: ArrayLike,
- axis: Optional[int] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | int = ...,
+ out: None | ndarray = ...,
*,
keepdims: bool = ...,
) -> Any: ...
@@ -158,14 +160,14 @@ def searchsorted(
a: ArrayLike,
v: _Scalar,
side: _SortSide = ...,
- sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array
+ sorter: None | _ArrayLikeInt_co = ..., # 1D int array
) -> intp: ...
@overload
def searchsorted(
a: ArrayLike,
v: ArrayLike,
side: _SortSide = ...,
- sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array
+ sorter: None | _ArrayLikeInt_co = ..., # 1D int array
) -> ndarray: ...
def resize(
@@ -176,12 +178,12 @@ def resize(
@overload
def squeeze(
a: _ScalarGeneric,
- axis: Optional[_ShapeLike] = ...,
+ axis: None | _ShapeLike = ...,
) -> _ScalarGeneric: ...
@overload
def squeeze(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
+ axis: None | _ShapeLike = ...,
) -> ndarray: ...
def diagonal(
@@ -197,28 +199,28 @@ def trace(
axis1: int = ...,
axis2: int = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
) -> Any: ...
def ravel(a: ArrayLike, order: _OrderKACF = ...) -> ndarray: ...
-def nonzero(a: ArrayLike) -> Tuple[ndarray, ...]: ...
+def nonzero(a: ArrayLike) -> tuple[ndarray, ...]: ...
def shape(a: ArrayLike) -> _Shape: ...
def compress(
condition: ArrayLike, # 1D bool array
a: ArrayLike,
- axis: Optional[int] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | int = ...,
+ out: None | ndarray = ...,
) -> ndarray: ...
@overload
def clip(
a: ArrayLike,
a_min: ArrayLike,
- a_max: Optional[ArrayLike],
- out: Optional[ndarray] = ...,
+ a_max: None | ArrayLike,
+ out: None | ndarray = ...,
**kwargs: Any,
) -> Any: ...
@overload
@@ -226,7 +228,7 @@ def clip(
a: ArrayLike,
a_min: None,
a_max: ArrayLike,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
**kwargs: Any,
) -> Any: ...
@@ -234,7 +236,7 @@ def sum(
a: ArrayLike,
axis: _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
@@ -250,8 +252,8 @@ def all(
@overload
def all(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | _ShapeLike = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
) -> Any: ...
@@ -265,29 +267,29 @@ def any(
@overload
def any(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | _ShapeLike = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
) -> Any: ...
def cumsum(
a: ArrayLike,
- axis: Optional[int] = ...,
+ axis: None | int = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
) -> ndarray: ...
def ptp(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | _ShapeLike = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
) -> Any: ...
def amax(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | _ShapeLike = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
@@ -295,8 +297,8 @@ def amax(
def amin(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
- out: Optional[ndarray] = ...,
+ axis: None | _ShapeLike = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
@@ -311,9 +313,9 @@ def amin(
# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
def prod(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
+ axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
@@ -321,43 +323,43 @@ def prod(
def cumprod(
a: ArrayLike,
- axis: Optional[int] = ...,
+ axis: None | int = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
) -> ndarray: ...
def ndim(a: ArrayLike) -> int: ...
-def size(a: ArrayLike, axis: Optional[int] = ...) -> int: ...
+def size(a: ArrayLike, axis: None | int = ...) -> int: ...
def around(
a: ArrayLike,
decimals: int = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
) -> Any: ...
def mean(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
+ axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
keepdims: bool = ...,
) -> Any: ...
def std(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
+ axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
ddof: int = ...,
keepdims: bool = ...,
) -> Any: ...
def var(
a: ArrayLike,
- axis: Optional[_ShapeLike] = ...,
+ axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
- out: Optional[ndarray] = ...,
+ out: None | ndarray = ...,
ddof: int = ...,
keepdims: bool = ...,
) -> Any: ...
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index 68d3b3a98..b21892177 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,60 +1,197 @@
-from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal, List
+from typing import (
+ Literal as L,
+ overload,
+ Union,
+ Any,
+ SupportsIndex,
+ TypeVar,
+)
-from numpy import ndarray
-from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co
+from numpy import floating, complexfloating, generic, dtype
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _SupportsDType,
+ _SupportsArray,
+ _NumberLike_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+)
-# TODO: wait for support for recursive types
-_ArrayLikeNested = Sequence[Sequence[Any]]
-_ArrayLikeNumber = Union[
- _NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested
+_SCT = TypeVar("_SCT", bound=generic)
+
+_DTypeLike = Union[
+ dtype[_SCT],
+ type[_SCT],
+ _SupportsDType[dtype[_SCT]],
]
-__all__: List[str]
+__all__: list[str]
@overload
def linspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- retstep: Literal[False] = ...,
+ retstep: L[False] = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
@overload
def linspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- retstep: Literal[True] = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> tuple[NDArray[floating[Any]], floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> tuple[NDArray[_SCT], _SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> Tuple[ndarray, Any]: ...
+) -> tuple[NDArray[Any], Any]: ...
+@overload
def logspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- base: _ArrayLikeNumber = ...,
+ base: _ArrayLikeFloat_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
def geomspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
# Re-exported to `np.lib.function_base`
def add_newdoc(
place: str,
obj: str,
- doc: str | Tuple[str, str] | List[Tuple[str, str]],
+ doc: str | tuple[str, str] | list[tuple[str, str]],
warn_on_python: bool = ...,
) -> None: ...
diff --git a/numpy/core/getlimits.pyi b/numpy/core/getlimits.pyi
index 66d062995..da5e3c23e 100644
--- a/numpy/core/getlimits.pyi
+++ b/numpy/core/getlimits.pyi
@@ -1,8 +1,6 @@
-from typing import List
-
from numpy import (
finfo as finfo,
iinfo as iinfo,
)
-__all__: List[str]
+__all__: list[str]
diff --git a/numpy/core/include/numpy/experimental_dtype_api.h b/numpy/core/include/numpy/experimental_dtype_api.h
index 554c7fb6c..83639f186 100644
--- a/numpy/core/include/numpy/experimental_dtype_api.h
+++ b/numpy/core/include/numpy/experimental_dtype_api.h
@@ -82,6 +82,15 @@
* The new DType API is designed in a way to make it potentially useful for
* alternative "array-like" implementations. This will require careful
* exposure of details and functions and is not part of this experimental API.
+ *
+ * Brief (incompatibility) changelog
+ * =================================
+ *
+ * 2. None (only additions).
+ * 3. New `npy_intp *view_offset` argument for `resolve_descriptors`.
+ * This replaces the `NPY_CAST_IS_VIEW` flag. It can be set to 0 if the
+ * operation is a view, and is pre-initialized to `NPY_MIN_INTP` indicating
+ * that the operation is not a view.
*/
#ifndef NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_
@@ -181,6 +190,12 @@ typedef PyObject *_ufunc_addloop_fromspec_func(
/*
* Type of the C promoter function, which must be wrapped into a
* PyCapsule with name "numpy._ufunc_promoter".
+ *
+ * Note that currently the output dtypes are always NULL unless they are
+ * also part of the signature. This is an implementation detail and could
+ * change in the future. However, in general promoters should not have a
+ * need for output dtypes.
+ * (There are potential use-cases, these are currently unsupported.)
*/
typedef int promoter_function(PyObject *ufunc,
PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
@@ -200,16 +215,6 @@ typedef int _ufunc_addpromoter_func(
#define PyUFunc_AddPromoter \
(*(_ufunc_addpromoter_func *)(__experimental_dtype_api_table[1]))
-/*
- * In addition to the normal casting levels, NPY_CAST_IS_VIEW indicates
- * that no cast operation is necessary at all (although a copy usually will be)
- *
- * NOTE: The most likely modification here is to add an additional
- * `view_offset` output to resolve_descriptors. If set, it would
- * indicate both that it is a view and what offset to use. This means that
- * e.g. `arr.imag` could be implemented by an ArrayMethod.
- */
-#define NPY_CAST_IS_VIEW _NPY_CAST_IS_VIEW
/*
* The resolve descriptors function, must be able to handle NULL values for
@@ -230,7 +235,8 @@ typedef NPY_CASTING (resolve_descriptors_function)(
/* Input descriptors (instances). Outputs may be NULL. */
PyArray_Descr **given_descrs,
/* Exact loop descriptors to use, must not hold references on error */
- PyArray_Descr **loop_descrs);
+ PyArray_Descr **loop_descrs,
+ npy_intp *view_offset);
/* NOT public yet: Signature needs adapting as external API. */
#define _NPY_METH_get_loop 2
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index 2eb951486..aaaefd7de 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -152,19 +152,16 @@ extern "C" {
(k)*PyArray_STRIDES(obj)[2] + \
(l)*PyArray_STRIDES(obj)[3]))
-/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */
static NPY_INLINE void
PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
{
PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
if (fa && fa->base) {
- if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) ||
- (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) {
+ if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) {
PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
Py_DECREF(fa->base);
fa->base = NULL;
PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
- PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
}
}
}
@@ -246,20 +243,6 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
-#if !defined(NPY_NO_DEPRECATED_API) || \
- (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION)
-static NPY_INLINE void
-PyArray_XDECREF_ERR(PyArrayObject *arr)
-{
- /* 2017-Nov-10 1.14 */
- DEPRECATE("PyArray_XDECREF_ERR is deprecated, call "
- "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead");
- PyArray_DiscardWritebackIfCopy(arr);
- Py_XDECREF(arr);
-}
-#endif
-
-
#ifdef __cplusplus
}
#endif
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 6240adc0c..47d063178 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -221,13 +221,6 @@ typedef enum {
NPY_SAME_KIND_CASTING=3,
/* Allow any casts */
NPY_UNSAFE_CASTING=4,
- /*
- * Flag to allow signalling that a cast is a view, this flag is not
- * valid when requesting a cast of specific safety.
- * _NPY_CAST_IS_VIEW|NPY_EQUIV_CASTING means the same as NPY_NO_CASTING.
- */
- // TODO-DTYPES: Needs to be documented.
- _NPY_CAST_IS_VIEW = 1 << 16,
} NPY_CASTING;
typedef enum {
@@ -934,7 +927,6 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
* This flag may be requested in constructor functions.
* This flag may be tested for in PyArray_FLAGS(arr).
*/
-#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */
#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000
/*
@@ -965,14 +957,12 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY)
#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO)
#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY)
-#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \
- NPY_ARRAY_UPDATEIFCOPY)
+#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY)
#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \
NPY_ARRAY_WRITEBACKIFCOPY)
#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO)
#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY)
-#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \
- NPY_ARRAY_UPDATEIFCOPY)
+#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY)
#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \
NPY_ARRAY_WRITEBACKIFCOPY)
diff --git a/numpy/core/include/numpy/noprefix.h b/numpy/core/include/numpy/noprefix.h
index 2c0ce1420..cea5b0d46 100644
--- a/numpy/core/include/numpy/noprefix.h
+++ b/numpy/core/include/numpy/noprefix.h
@@ -165,7 +165,6 @@
#define ALIGNED NPY_ALIGNED
#define NOTSWAPPED NPY_NOTSWAPPED
#define WRITEABLE NPY_WRITEABLE
-#define UPDATEIFCOPY NPY_UPDATEIFCOPY
#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY
#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR
#define BEHAVED NPY_BEHAVED
diff --git a/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
index 4fd4015a9..6455d40d2 100644
--- a/numpy/core/include/numpy/npy_1_7_deprecated_api.h
+++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -48,7 +48,6 @@
#define NPY_ALIGNED NPY_ARRAY_ALIGNED
#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED
#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE
-#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY
#define NPY_BEHAVED NPY_ARRAY_BEHAVED
#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS
#define NPY_CARRAY NPY_ARRAY_CARRAY
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index 4eac083e7..b2e7c458e 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -23,12 +23,18 @@
#undef NPY_SIZEOF_LONGDOUBLE
#undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
- #ifdef __x86_64
- #define NPY_SIZEOF_LONGDOUBLE 16
- #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
- #elif defined(__arm64__)
+ #if defined(__arm64__)
#define NPY_SIZEOF_LONGDOUBLE 8
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
+ #elif defined(__x86_64)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+ #elif defined (__i386)
+ #define NPY_SIZEOF_LONGDOUBLE 12
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
+ #elif defined(__ppc__) || defined (__ppc64__)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
#else
#error "unknown architecture"
#endif
diff --git a/numpy/core/memmap.pyi b/numpy/core/memmap.pyi
index ba595bf1e..03c6b772d 100644
--- a/numpy/core/memmap.pyi
+++ b/numpy/core/memmap.pyi
@@ -1,5 +1,3 @@
-from typing import List
-
from numpy import memmap as memmap
-__all__: List[str]
+__all__: list[str]
diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi
index a9f68e181..423aed85e 100644
--- a/numpy/core/multiarray.pyi
+++ b/numpy/core/multiarray.pyi
@@ -2,19 +2,13 @@
import os
import datetime as dt
+from collections.abc import Sequence, Callable, Iterable
from typing import (
Literal as L,
Any,
- Callable,
- Iterable,
- Optional,
overload,
TypeVar,
- List,
- Type,
Union,
- Sequence,
- Tuple,
SupportsIndex,
final,
Final,
@@ -67,6 +61,7 @@ from numpy.typing import (
NDArray,
ArrayLike,
_SupportsArray,
+ _NestedSequence,
_FiniteNestedSequence,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
@@ -90,7 +85,7 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
# Subscriptable subsets of `npt.DTypeLike` and `npt.ArrayLike`
_DTypeLike = Union[
dtype[_SCT],
- Type[_SCT],
+ type[_SCT],
_SupportsDType[dtype[_SCT]],
]
_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]]
@@ -120,7 +115,7 @@ _RollKind = L[ # `raise` is deliberately excluded
"modifiedpreceding",
]
-__all__: List[str]
+__all__: list[str]
ALLOW_THREADS: Final[int] # 0 or 1 (system-specific)
BUFSIZE: L[8192]
@@ -138,7 +133,7 @@ def empty_like(
dtype: None = ...,
order: _OrderKACF = ...,
subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
+ shape: None | _ShapeLike = ...,
) -> _ArrayType: ...
@overload
def empty_like(
@@ -146,7 +141,7 @@ def empty_like(
dtype: None = ...,
order: _OrderKACF = ...,
subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
+ shape: None | _ShapeLike = ...,
) -> NDArray[_SCT]: ...
@overload
def empty_like(
@@ -154,7 +149,7 @@ def empty_like(
dtype: None = ...,
order: _OrderKACF = ...,
subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
+ shape: None | _ShapeLike = ...,
) -> NDArray[Any]: ...
@overload
def empty_like(
@@ -162,7 +157,7 @@ def empty_like(
dtype: _DTypeLike[_SCT],
order: _OrderKACF = ...,
subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
+ shape: None | _ShapeLike = ...,
) -> NDArray[_SCT]: ...
@overload
def empty_like(
@@ -170,7 +165,7 @@ def empty_like(
dtype: DTypeLike,
order: _OrderKACF = ...,
subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
+ shape: None | _ShapeLike = ...,
) -> NDArray[Any]: ...
@overload
@@ -284,26 +279,26 @@ def unravel_index( # type: ignore[misc]
indices: _IntLike_co,
shape: _ShapeLike,
order: _OrderCF = ...,
-) -> Tuple[intp, ...]: ...
+) -> tuple[intp, ...]: ...
@overload
def unravel_index(
indices: _ArrayLikeInt_co,
shape: _ShapeLike,
order: _OrderCF = ...,
-) -> Tuple[NDArray[intp], ...]: ...
+) -> tuple[NDArray[intp], ...]: ...
@overload
def ravel_multi_index( # type: ignore[misc]
multi_index: Sequence[_IntLike_co],
dims: Sequence[SupportsIndex],
- mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ...,
+ mode: _ModeKind | tuple[_ModeKind, ...] = ...,
order: _OrderCF = ...,
) -> intp: ...
@overload
def ravel_multi_index(
multi_index: Sequence[_ArrayLikeInt_co],
dims: Sequence[SupportsIndex],
- mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ...,
+ mode: _ModeKind | tuple[_ModeKind, ...] = ...,
order: _OrderCF = ...,
) -> NDArray[intp]: ...
@@ -311,51 +306,51 @@ def ravel_multi_index(
def concatenate( # type: ignore[misc]
arrays: _ArrayLike[_SCT],
/,
- axis: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
*,
dtype: None = ...,
- casting: Optional[_CastingKind] = ...
+ casting: None | _CastingKind = ...
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
arrays: ArrayLike,
/,
- axis: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
*,
dtype: None = ...,
- casting: Optional[_CastingKind] = ...
+ casting: None | _CastingKind = ...
) -> NDArray[Any]: ...
@overload
def concatenate( # type: ignore[misc]
arrays: ArrayLike,
/,
- axis: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
*,
dtype: _DTypeLike[_SCT],
- casting: Optional[_CastingKind] = ...
+ casting: None | _CastingKind = ...
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
arrays: ArrayLike,
/,
- axis: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
out: None = ...,
*,
dtype: DTypeLike,
- casting: Optional[_CastingKind] = ...
+ casting: None | _CastingKind = ...
) -> NDArray[Any]: ...
@overload
def concatenate(
arrays: ArrayLike,
/,
- axis: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
out: _ArrayType = ...,
*,
dtype: DTypeLike = ...,
- casting: Optional[_CastingKind] = ...
+ casting: None | _CastingKind = ...
) -> _ArrayType: ...
def inner(
@@ -368,7 +363,7 @@ def inner(
def where(
condition: ArrayLike,
/,
-) -> Tuple[NDArray[intp], ...]: ...
+) -> tuple[NDArray[intp], ...]: ...
@overload
def where(
condition: ArrayLike,
@@ -379,13 +374,13 @@ def where(
def lexsort(
keys: ArrayLike,
- axis: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
) -> Any: ...
def can_cast(
- from_: Union[ArrayLike, DTypeLike],
+ from_: ArrayLike | DTypeLike,
to: DTypeLike,
- casting: Optional[_CastingKind] = ...,
+ casting: None | _CastingKind = ...,
) -> bool: ...
def min_scalar_type(
@@ -393,7 +388,7 @@ def min_scalar_type(
) -> dtype[Any]: ...
def result_type(
- *arrays_and_dtypes: Union[ArrayLike, DTypeLike],
+ *arrays_and_dtypes: ArrayLike | DTypeLike,
) -> dtype[Any]: ...
@overload
@@ -421,15 +416,15 @@ def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...
def bincount(
x: ArrayLike,
/,
- weights: Optional[ArrayLike] = ...,
+ weights: None | ArrayLike = ...,
minlength: SupportsIndex = ...,
) -> NDArray[intp]: ...
def copyto(
dst: NDArray[Any],
src: ArrayLike,
- casting: Optional[_CastingKind] = ...,
- where: Optional[_ArrayLikeBool_co] = ...,
+ casting: None | _CastingKind = ...,
+ where: None | _ArrayLikeBool_co = ...,
) -> None: ...
def putmask(
@@ -441,15 +436,15 @@ def putmask(
def packbits(
a: _ArrayLikeInt_co,
/,
- axis: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
def unpackbits(
a: _ArrayLike[uint8],
/,
- axis: Optional[SupportsIndex] = ...,
- count: Optional[SupportsIndex] = ...,
+ axis: None | SupportsIndex = ...,
+ count: None | SupportsIndex = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
@@ -457,14 +452,14 @@ def shares_memory(
a: object,
b: object,
/,
- max_work: Optional[int] = ...,
+ max_work: None | int = ...,
) -> bool: ...
def may_share_memory(
a: object,
b: object,
/,
- max_work: Optional[int] = ...,
+ max_work: None | int = ...,
) -> bool: ...
@overload
@@ -599,10 +594,10 @@ def asfortranarray(
like: ArrayLike = ...,
) -> NDArray[Any]: ...
-# In practice `List[Any]` is list with an int, int and a valid
+# In practice `list[Any]` is list with an int, int and a valid
# `np.seterrcall()` object
-def geterrobj() -> List[Any]: ...
-def seterrobj(errobj: List[Any], /) -> None: ...
+def geterrobj() -> list[Any]: ...
+def seterrobj(errobj: list[Any], /) -> None: ...
def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ...
@@ -811,35 +806,35 @@ def arange(
def datetime_data(
dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,
-) -> Tuple[str, int]: ...
+) -> tuple[str, int]: ...
# The datetime functions perform unsafe casts to `datetime64[D]`,
# so a lot of different argument types are allowed here
@overload
def busday_count( # type: ignore[misc]
- begindates: _ScalarLike_co,
- enddates: _ScalarLike_co,
+ begindates: _ScalarLike_co | dt.date,
+ enddates: _ScalarLike_co | dt.date,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> int_: ...
@overload
def busday_count( # type: ignore[misc]
- begindates: ArrayLike,
- enddates: ArrayLike,
+ begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[int_]: ...
@overload
def busday_count(
- begindates: ArrayLike,
- enddates: ArrayLike,
+ begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@@ -847,100 +842,100 @@ def busday_count(
# `roll="raise"` is (more or less?) equivalent to `casting="safe"`
@overload
def busday_offset( # type: ignore[misc]
- dates: datetime64,
- offsets: _TD64Like_co,
+ dates: datetime64 | dt.date,
+ offsets: _TD64Like_co | dt.timedelta,
roll: L["raise"] = ...,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> datetime64: ...
@overload
def busday_offset( # type: ignore[misc]
- dates: _ArrayLike[datetime64],
- offsets: _ArrayLikeTD64_co,
+ dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
+ offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
roll: L["raise"] = ...,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[datetime64]: ...
@overload
def busday_offset( # type: ignore[misc]
- dates: _ArrayLike[datetime64],
- offsets: _ArrayLike[timedelta64],
+ dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
+ offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
roll: L["raise"] = ...,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@overload
def busday_offset( # type: ignore[misc]
- dates: _ScalarLike_co,
- offsets: _ScalarLike_co,
+ dates: _ScalarLike_co | dt.date,
+ offsets: _ScalarLike_co | dt.timedelta,
roll: _RollKind,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> datetime64: ...
@overload
def busday_offset( # type: ignore[misc]
- dates: ArrayLike,
- offsets: ArrayLike,
+ dates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
roll: _RollKind,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[datetime64]: ...
@overload
def busday_offset(
- dates: ArrayLike,
- offsets: ArrayLike,
+ dates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
roll: _RollKind,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@overload
def is_busday( # type: ignore[misc]
- dates: _ScalarLike_co,
+ dates: _ScalarLike_co | dt.date,
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> bool_: ...
@overload
def is_busday( # type: ignore[misc]
- dates: ArrayLike,
+ dates: ArrayLike | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[bool_]: ...
@overload
def is_busday(
- dates: ArrayLike,
+ dates: ArrayLike | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
- holidays: None | ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@overload
def datetime_as_string( # type: ignore[misc]
- arr: datetime64,
+ arr: datetime64 | dt.date,
unit: None | L["auto"] | _UnitKind = ...,
timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
casting: _CastingKind = ...,
) -> str_: ...
@overload
def datetime_as_string(
- arr: _ArrayLikeDT64_co,
+ arr: _ArrayLikeDT64_co | _NestedSequence[dt.date],
unit: None | L["auto"] | _UnitKind = ...,
timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
casting: _CastingKind = ...,
@@ -1024,4 +1019,4 @@ def nested_iters(
order: _OrderKACF = ...,
casting: _CastingKind = ...,
buffersize: SupportsIndex = ...,
-) -> Tuple[nditer, ...]: ...
+) -> tuple[nditer, ...]: ...
diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi
index d7ec30351..8b92abab4 100644
--- a/numpy/core/numeric.pyi
+++ b/numpy/core/numeric.pyi
@@ -1,14 +1,10 @@
+from collections.abc import Callable, Sequence
from typing import (
Any,
Union,
- Sequence,
- Tuple,
- Callable,
- List,
overload,
TypeVar,
Literal,
- Type,
SupportsAbs,
SupportsIndex,
NoReturn,
@@ -57,13 +53,13 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
_DTypeLike = Union[
dtype[_SCT],
- Type[_SCT],
+ type[_SCT],
_SupportsDType[dtype[_SCT]],
]
_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]]
_CorrelateMode = Literal["valid", "same", "full"]
-__all__: List[str]
+__all__: list[str]
@overload
def zeros_like(
@@ -406,43 +402,43 @@ def outer(
def tensordot(
a: _ArrayLikeBool_co,
b: _ArrayLikeBool_co,
- axes: int | Tuple[_ShapeLike, _ShapeLike] = ...,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
) -> NDArray[bool_]: ...
@overload
def tensordot(
a: _ArrayLikeUInt_co,
b: _ArrayLikeUInt_co,
- axes: int | Tuple[_ShapeLike, _ShapeLike] = ...,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
) -> NDArray[unsignedinteger[Any]]: ...
@overload
def tensordot(
a: _ArrayLikeInt_co,
b: _ArrayLikeInt_co,
- axes: int | Tuple[_ShapeLike, _ShapeLike] = ...,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
) -> NDArray[signedinteger[Any]]: ...
@overload
def tensordot(
a: _ArrayLikeFloat_co,
b: _ArrayLikeFloat_co,
- axes: int | Tuple[_ShapeLike, _ShapeLike] = ...,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
) -> NDArray[floating[Any]]: ...
@overload
def tensordot(
a: _ArrayLikeComplex_co,
b: _ArrayLikeComplex_co,
- axes: int | Tuple[_ShapeLike, _ShapeLike] = ...,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def tensordot(
a: _ArrayLikeTD64_co,
b: _ArrayLikeTD64_co,
- axes: int | Tuple[_ShapeLike, _ShapeLike] = ...,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
) -> NDArray[timedelta64]: ...
@overload
def tensordot(
a: _ArrayLikeObject_co,
b: _ArrayLikeObject_co,
- axes: int | Tuple[_ShapeLike, _ShapeLike] = ...,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
) -> NDArray[object_]: ...
@overload
@@ -528,15 +524,15 @@ def cross(
@overload
def indices(
dimensions: Sequence[int],
- dtype: Type[int] = ...,
+ dtype: type[int] = ...,
sparse: Literal[False] = ...,
) -> NDArray[int_]: ...
@overload
def indices(
dimensions: Sequence[int],
- dtype: Type[int] = ...,
+ dtype: type[int] = ...,
sparse: Literal[True] = ...,
-) -> Tuple[NDArray[int_], ...]: ...
+) -> tuple[NDArray[int_], ...]: ...
@overload
def indices(
dimensions: Sequence[int],
@@ -548,7 +544,7 @@ def indices(
dimensions: Sequence[int],
dtype: _DTypeLike[_SCT],
sparse: Literal[True],
-) -> Tuple[NDArray[_SCT], ...]: ...
+) -> tuple[NDArray[_SCT], ...]: ...
@overload
def indices(
dimensions: Sequence[int],
@@ -560,7 +556,7 @@ def indices(
dimensions: Sequence[int],
dtype: DTypeLike,
sparse: Literal[True],
-) -> Tuple[NDArray[Any], ...]: ...
+) -> tuple[NDArray[Any], ...]: ...
def fromfunction(
function: Callable[..., _T],
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 8e5de852b..3d1cb6fd1 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -516,7 +516,7 @@ def _scalar_type_key(typ):
return (dt.kind.lower(), dt.itemsize)
-ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
+ScalarType = [int, float, complex, bool, bytes, str, memoryview]
ScalarType += sorted(_concrete_types, key=_scalar_type_key)
ScalarType = tuple(ScalarType)
diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi
index 1d3ff773b..09c180a0f 100644
--- a/numpy/core/numerictypes.pyi
+++ b/numpy/core/numerictypes.pyi
@@ -1,16 +1,12 @@
import sys
import types
+from collections.abc import Iterable
from typing import (
Literal as L,
- Type,
Union,
- Tuple,
overload,
Any,
TypeVar,
- Dict,
- List,
- Iterable,
Protocol,
TypedDict,
)
@@ -57,7 +53,7 @@ _SCT = TypeVar("_SCT", bound=generic)
# A paramtrizable subset of `npt.DTypeLike`
_DTypeLike = Union[
- Type[_SCT],
+ type[_SCT],
dtype[_SCT],
_SupportsDType[dtype[_SCT]],
]
@@ -78,48 +74,48 @@ class _TypeCodes(TypedDict):
Datetime: L['Mm']
All: L['?bhilqpBHILQPefdgFDGSUVOMm']
-class _typedict(Dict[Type[generic], _T]):
+class _typedict(dict[type[generic], _T]):
def __getitem__(self, key: DTypeLike) -> _T: ...
if sys.version_info >= (3, 10):
_TypeTuple = Union[
- Type[Any],
+ type[Any],
types.UnionType,
- Tuple[Union[Type[Any], types.UnionType, Tuple[Any, ...]], ...],
+ tuple[Union[type[Any], types.UnionType, tuple[Any, ...]], ...],
]
else:
_TypeTuple = Union[
- Type[Any],
- Tuple[Union[Type[Any], Tuple[Any, ...]], ...],
+ type[Any],
+ tuple[Union[type[Any], tuple[Any, ...]], ...],
]
-__all__: List[str]
+__all__: list[str]
@overload
-def maximum_sctype(t: _DTypeLike[_SCT]) -> Type[_SCT]: ...
+def maximum_sctype(t: _DTypeLike[_SCT]) -> type[_SCT]: ...
@overload
-def maximum_sctype(t: DTypeLike) -> Type[Any]: ...
+def maximum_sctype(t: DTypeLike) -> type[Any]: ...
@overload
-def issctype(rep: dtype[Any] | Type[Any]) -> bool: ...
+def issctype(rep: dtype[Any] | type[Any]) -> bool: ...
@overload
def issctype(rep: object) -> L[False]: ...
@overload
-def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | Type[_SCT]: ...
+def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | type[_SCT]: ...
@overload
-def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | Type[_SCT]: ...
+def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | type[_SCT]: ...
@overload
-def obj2sctype(rep: DTypeLike, default: None = ...) -> None | Type[Any]: ...
+def obj2sctype(rep: DTypeLike, default: None = ...) -> None | type[Any]: ...
@overload
-def obj2sctype(rep: DTypeLike, default: _T) -> _T | Type[Any]: ...
+def obj2sctype(rep: DTypeLike, default: _T) -> _T | type[Any]: ...
@overload
def obj2sctype(rep: object, default: None = ...) -> None: ...
@overload
def obj2sctype(rep: object, default: _T) -> _T: ...
@overload
-def issubclass_(arg1: Type[Any], arg2: _TypeTuple) -> bool: ...
+def issubclass_(arg1: type[Any], arg2: _TypeTuple) -> bool: ...
@overload
def issubclass_(arg1: object, arg2: object) -> L[False]: ...
@@ -137,37 +133,36 @@ def find_common_type(
cast: _typedict[_CastFunc]
nbytes: _typedict[int]
typecodes: _TypeCodes
-ScalarType: Tuple[
- Type[int],
- Type[float],
- Type[complex],
- Type[int],
- Type[bool],
- Type[bytes],
- Type[str],
- Type[memoryview],
- Type[bool_],
- Type[csingle],
- Type[cdouble],
- Type[clongdouble],
- Type[half],
- Type[single],
- Type[double],
- Type[longdouble],
- Type[byte],
- Type[short],
- Type[intc],
- Type[int_],
- Type[longlong],
- Type[timedelta64],
- Type[datetime64],
- Type[object_],
- Type[bytes_],
- Type[str_],
- Type[ubyte],
- Type[ushort],
- Type[uintc],
- Type[uint],
- Type[ulonglong],
- Type[void],
+ScalarType: tuple[
+ type[int],
+ type[float],
+ type[complex],
+ type[bool],
+ type[bytes],
+ type[str],
+ type[memoryview],
+ type[bool_],
+ type[csingle],
+ type[cdouble],
+ type[clongdouble],
+ type[half],
+ type[single],
+ type[double],
+ type[longdouble],
+ type[byte],
+ type[short],
+ type[intc],
+ type[int_],
+ type[longlong],
+ type[timedelta64],
+ type[datetime64],
+ type[object_],
+ type[bytes_],
+ type[str_],
+ type[ubyte],
+ type[ushort],
+ type[uintc],
+ type[uint],
+ type[ulonglong],
+ type[void],
]
diff --git a/numpy/core/records.pyi b/numpy/core/records.pyi
index fda118276..4eee93d87 100644
--- a/numpy/core/records.pyi
+++ b/numpy/core/records.pyi
@@ -1,12 +1,9 @@
import os
+from collections.abc import Sequence, Iterable
from typing import (
- List,
- Sequence,
Any,
TypeVar,
- Iterable,
overload,
- Tuple,
Protocol,
)
@@ -39,7 +36,7 @@ class _SupportsReadInto(Protocol):
def tell(self, /) -> int: ...
def readinto(self, buffer: memoryview, /) -> int: ...
-__all__: List[str]
+__all__: list[str]
@overload
def fromarrays(
@@ -67,7 +64,7 @@ def fromarrays(
@overload
def fromrecords(
- recList: _ArrayLikeVoid_co | Tuple[Any, ...] | _NestedSequence[Tuple[Any, ...]],
+ recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
dtype: DTypeLike = ...,
shape: None | _ShapeLike = ...,
formats: None = ...,
@@ -78,7 +75,7 @@ def fromrecords(
) -> _RecArray[record]: ...
@overload
def fromrecords(
- recList: _ArrayLikeVoid_co | Tuple[Any, ...] | _NestedSequence[Tuple[Any, ...]],
+ recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
dtype: None = ...,
shape: None | _ShapeLike = ...,
*,
@@ -181,3 +178,57 @@ def array(
byteorder: None | _ByteOrder = ...,
copy: bool = ...,
) -> _RecArray[record]: ...
+@overload
+def array(
+ obj: None,
+ dtype: DTypeLike,
+ shape: _ShapeLike,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+ copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+ obj: None,
+ dtype: None = ...,
+ *,
+ shape: _ShapeLike,
+ offset: int = ...,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+ copy: bool = ...,
+) -> _RecArray[record]: ...
+@overload
+def array(
+ obj: _SupportsReadInto,
+ dtype: DTypeLike,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+ copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+ obj: _SupportsReadInto,
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+ copy: bool = ...,
+) -> _RecArray[record]: ...
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 1ec178445..22cac1e9a 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -24,6 +24,11 @@ NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "
NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
+# Set NPY_DISABLE_SVML=1 in the environment to disable the vendored SVML
+# library. This option only has significance on a Linux x86_64 host and is most
+# useful to avoid improperly requiring SVML when cross compiling.
+NPY_DISABLE_SVML = (os.environ.get('NPY_DISABLE_SVML', "0") == "1")
+
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
@@ -68,6 +73,8 @@ def can_link_svml():
"""SVML library is supported only on x86_64 architecture and currently
only on linux
"""
+ if NPY_DISABLE_SVML:
+ return False
machine = platform.machine()
system = platform.system()
return "x86_64" in machine and system == "Linux"
@@ -411,7 +418,8 @@ def visibility_define(config):
return ''
def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration, dot_join
+ from numpy.distutils.misc_util import (Configuration, dot_join,
+ exec_mod_from_location)
from numpy.distutils.system_info import (get_info, blas_opt_info,
lapack_opt_info)
@@ -428,8 +436,8 @@ def configuration(parent_package='',top_path=None):
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
- generate_umath = npy_load_module('_'.join(n.split('.')),
- generate_umath_py, ('.py', 'U', 1))
+ generate_umath = exec_mod_from_location('_'.join(n.split('.')),
+ generate_umath_py)
header_dir = 'include/numpy' # this is relative to config.path_in_package
@@ -945,8 +953,8 @@ def configuration(parent_package='',top_path=None):
join('src', 'npysort', 'radixsort.cpp'),
join('src', 'common', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
- join('src', 'common', 'npy_binsearch.h.src'),
- join('src', 'npysort', 'binsearch.c.src'),
+ join('src', 'common', 'npy_binsearch.h'),
+ join('src', 'npysort', 'binsearch.cpp'),
]
#######################################################################
@@ -965,6 +973,21 @@ def configuration(parent_package='',top_path=None):
generate_umath.__file__))
return []
+ def generate_umath_doc_header(ext, build_dir):
+ from numpy.distutils.misc_util import exec_mod_from_location
+
+ target = join(build_dir, header_dir, '_umath_doc_generated.h')
+ dir = os.path.dirname(target)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+
+ generate_umath_doc_py = join(codegen_dir, 'generate_umath_doc.py')
+ if newer(generate_umath_doc_py, target):
+ n = dot_join(config.name, 'generate_umath_doc')
+ generate_umath_doc = exec_mod_from_location(
+ '_'.join(n.split('.')), generate_umath_doc_py)
+ generate_umath_doc.write_code(target)
+
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
@@ -1005,6 +1028,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'override.h'),
join(codegen_dir, 'generate_ufunc_api.py'),
+ join(codegen_dir, 'ufunc_docstrings.py'),
]
svml_path = join('numpy', 'core', 'src', 'umath', 'svml')
@@ -1024,6 +1048,7 @@ def configuration(parent_package='',top_path=None):
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py'),
generate_umath_c,
+ generate_umath_doc_header,
generate_ufunc_api,
],
depends=deps + multiarray_deps + umath_deps +
@@ -1066,7 +1091,7 @@ def configuration(parent_package='',top_path=None):
#######################################################################
config.add_extension('_operand_flag_tests',
- sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
+ sources=[join('src', 'umath', '_operand_flag_tests.c')])
#######################################################################
# SIMD module #
diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi
index 159ad2781..63cbd773c 100644
--- a/numpy/core/shape_base.pyi
+++ b/numpy/core/shape_base.pyi
@@ -1,4 +1,5 @@
-from typing import TypeVar, overload, List, Sequence, Any, SupportsIndex
+from collections.abc import Sequence
+from typing import TypeVar, overload, Any, SupportsIndex
from numpy import generic, dtype
from numpy.typing import ArrayLike, NDArray, _FiniteNestedSequence, _SupportsArray
@@ -8,28 +9,28 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]]
-__all__: List[str]
+__all__: list[str]
@overload
def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
-def atleast_1d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
+def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
@overload
def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
-def atleast_2d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
+def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
@overload
def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
-def atleast_3d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
+def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
@overload
def vstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
diff --git a/numpy/core/src/_simd/_simd_convert.inc b/numpy/core/src/_simd/_simd_convert.inc
index 73869ef1f..46e044479 100644
--- a/numpy/core/src/_simd/_simd_convert.inc
+++ b/numpy/core/src/_simd/_simd_convert.inc
@@ -94,6 +94,7 @@ simd_sequence_from_iterable(PyObject *obj, simd_data_type dtype, Py_ssize_t min_
"minimum acceptable size of the required sequence is %d, given(%d)",
min_size, seq_size
);
+ Py_DECREF(seq_obj);
return NULL;
}
npyv_lanetype_u8 *dst = simd_sequence_new(seq_size, dtype);
diff --git a/numpy/core/src/common/npy_binsearch.h b/numpy/core/src/common/npy_binsearch.h
new file mode 100644
index 000000000..8d2f0714d
--- /dev/null
+++ b/numpy/core/src/common/npy_binsearch.h
@@ -0,0 +1,31 @@
+#ifndef __NPY_BINSEARCH_H__
+#define __NPY_BINSEARCH_H__
+
+#include "npy_sort.h"
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void (PyArray_BinSearchFunc)(const char*, const char*, char*,
+ npy_intp, npy_intp,
+ npy_intp, npy_intp, npy_intp,
+ PyArrayObject*);
+
+typedef int (PyArray_ArgBinSearchFunc)(const char*, const char*,
+ const char*, char*,
+ npy_intp, npy_intp, npy_intp,
+ npy_intp, npy_intp, npy_intp,
+ PyArrayObject*);
+
+NPY_NO_EXPORT PyArray_BinSearchFunc* get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side);
+NPY_NO_EXPORT PyArray_ArgBinSearchFunc* get_argbinsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/numpy/core/src/common/npy_binsearch.h.src b/numpy/core/src/common/npy_binsearch.h.src
deleted file mode 100644
index 052c44482..000000000
--- a/numpy/core/src/common/npy_binsearch.h.src
+++ /dev/null
@@ -1,144 +0,0 @@
-#ifndef __NPY_BINSEARCH_H__
-#define __NPY_BINSEARCH_H__
-
-#include "npy_sort.h"
-#include <numpy/npy_common.h>
-#include <numpy/ndarraytypes.h>
-
-#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
-
-typedef void (PyArray_BinSearchFunc)(const char*, const char*, char*,
- npy_intp, npy_intp,
- npy_intp, npy_intp, npy_intp,
- PyArrayObject*);
-
-typedef int (PyArray_ArgBinSearchFunc)(const char*, const char*,
- const char*, char*,
- npy_intp, npy_intp, npy_intp,
- npy_intp, npy_intp, npy_intp,
- PyArrayObject*);
-
-typedef struct {
- int typenum;
- PyArray_BinSearchFunc *binsearch[NPY_NSEARCHSIDES];
-} binsearch_map;
-
-typedef struct {
- int typenum;
- PyArray_ArgBinSearchFunc *argbinsearch[NPY_NSEARCHSIDES];
-} argbinsearch_map;
-
-/**begin repeat
- *
- * #side = left, right#
- */
-
-/**begin repeat1
- *
- * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
- * longlong, ulonglong, half, float, double, longdouble,
- * cfloat, cdouble, clongdouble, datetime, timedelta#
- */
-
-NPY_NO_EXPORT void
-binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
- PyArrayObject *unused);
-NPY_NO_EXPORT int
-argbinsearch_@side@_@suff@(const char *arr, const char *key,
- const char *sort, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str,
- npy_intp sort_str, npy_intp ret_str,
- PyArrayObject *unused);
-/**end repeat1**/
-
-NPY_NO_EXPORT void
-npy_binsearch_@side@(const char *arr, const char *key, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str,
- npy_intp ret_str, PyArrayObject *cmp);
-NPY_NO_EXPORT int
-npy_argbinsearch_@side@(const char *arr, const char *key,
- const char *sort, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str,
- npy_intp sort_str, npy_intp ret_str,
- PyArrayObject *cmp);
-/**end repeat**/
-
-/**begin repeat
- *
- * #arg = , arg#
- * #Arg = , Arg#
- */
-
-static @arg@binsearch_map _@arg@binsearch_map[] = {
- /* If adding new types, make sure to keep them ordered by type num */
- /**begin repeat1
- *
- * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
- * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA, HALF#
- * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
- * longlong, ulonglong, float, double, longdouble,
- * cfloat, cdouble, clongdouble, datetime, timedelta, half#
- */
- {NPY_@TYPE@,
- {
- &@arg@binsearch_left_@suff@,
- &@arg@binsearch_right_@suff@,
- },
- },
- /**end repeat1**/
-};
-
-static PyArray_@Arg@BinSearchFunc *gen@arg@binsearch_map[] = {
- &npy_@arg@binsearch_left,
- &npy_@arg@binsearch_right,
-};
-
-static NPY_INLINE PyArray_@Arg@BinSearchFunc*
-get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
-{
- npy_intp nfuncs = ARRAY_SIZE(_@arg@binsearch_map);
- npy_intp min_idx = 0;
- npy_intp max_idx = nfuncs;
- int type = dtype->type_num;
-
- if (side >= NPY_NSEARCHSIDES) {
- return NULL;
- }
-
- /*
- * It seems only fair that a binary search function be searched for
- * using a binary search...
- */
- while (min_idx < max_idx) {
- npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
-
- if (_@arg@binsearch_map[mid_idx].typenum < type) {
- min_idx = mid_idx + 1;
- }
- else {
- max_idx = mid_idx;
- }
- }
-
- if (min_idx < nfuncs &&
- _@arg@binsearch_map[min_idx].typenum == type) {
- return _@arg@binsearch_map[min_idx].@arg@binsearch[side];
- }
-
- if (dtype->f->compare) {
- return gen@arg@binsearch_map[side];
- }
-
- return NULL;
-}
-/**end repeat**/
-
-#undef ARRAY_SIZE
-
-#endif
diff --git a/numpy/core/src/common/npy_config.h b/numpy/core/src/common/npy_config.h
index fd0f1855c..b01eca5ab 100644
--- a/numpy/core/src/common/npy_config.h
+++ b/numpy/core/src/common/npy_config.h
@@ -136,11 +136,23 @@
#undef HAVE_CPOWL
#undef HAVE_CEXPL
+#include <cygwin/version.h>
+#if CYGWIN_VERSION_DLL_MAJOR < 3003
+/* https://cygwin.com/pipermail/cygwin-announce/2021-October/010268.html */
/* Builtin abs reports overflow */
#undef HAVE_CABSL
#undef HAVE_HYPOTL
#endif
+#if CYGWIN_VERSION_DLL_MAJOR < 3002
+/* https://cygwin.com/pipermail/cygwin-announce/2021-March/009987.html */
+/* Segfault */
+#undef HAVE_MODFL
+/* sqrt(-inf) returns -inf instead of -nan */
+#undef HAVE_SQRTL
+#endif
+#endif
+
/* Disable broken gnu trig functions */
#if defined(HAVE_FEATURES_H)
#include <features.h>
diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src
index a2383c45f..1385220f9 100644
--- a/numpy/core/src/common/npy_cpu_features.c.src
+++ b/numpy/core/src/common/npy_cpu_features.c.src
@@ -62,6 +62,7 @@ npy_cpu_features_dict(void)
* AVX512IFMA, AVX512VBMI, AVX512VBMI2, AVX512BITALG,
* AVX512_KNL, AVX512_KNM, AVX512_SKX, AVX512_CLX, AVX512_CNL, AVX512_ICL,
* VSX, VSX2, VSX3,
+ * VX, VXE, VXE2,
* NEON, NEON_FP16, NEON_VFPV4, ASIMD, FPHP, ASIMDHP, ASIMDDP, ASIMDFHM#
*/
if (PyDict_SetItemString(dict, "@feature@",
@@ -509,6 +510,42 @@ npy__cpu_init_features(void)
#endif
}
+/***************** ZARCH ******************/
+
+#elif defined(__s390x__)
+
+#include <sys/auxv.h>
+#ifndef HWCAP_S390_VXE
+ #define HWCAP_S390_VXE 8192
+#endif
+
+#ifndef HWCAP_S390_VXRS_EXT2
+ #define HWCAP_S390_VXRS_EXT2 32768
+#endif
+
+static void
+npy__cpu_init_features(void)
+{
+ memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX);
+
+ unsigned int hwcap = getauxval(AT_HWCAP);
+ if ((hwcap & HWCAP_S390_VX) == 0) {
+ return;
+ }
+
+ if (hwcap & HWCAP_S390_VXRS_EXT2) {
+ npy__cpu_have[NPY_CPU_FEATURE_VX] =
+ npy__cpu_have[NPY_CPU_FEATURE_VXE] =
+ npy__cpu_have[NPY_CPU_FEATURE_VXE2] = 1;
+ return;
+ }
+
+ npy__cpu_have[NPY_CPU_FEATURE_VXE] = (hwcap & HWCAP_S390_VXE) != 0;
+
+ npy__cpu_have[NPY_CPU_FEATURE_VX] = 1;
+}
+
+
/***************** ARM ******************/
#elif defined(__arm__) || defined(__aarch64__)
diff --git a/numpy/core/src/common/npy_cpu_features.h b/numpy/core/src/common/npy_cpu_features.h
index ce1fc822a..1f52a445d 100644
--- a/numpy/core/src/common/npy_cpu_features.h
+++ b/numpy/core/src/common/npy_cpu_features.h
@@ -82,6 +82,15 @@ enum npy_cpu_features
// ARMv8.2 single&half-precision multiply
NPY_CPU_FEATURE_ASIMDFHM = 307,
+ // IBM/ZARCH
+ NPY_CPU_FEATURE_VX = 350,
+
+ // Vector-Enhancements Facility 1
+ NPY_CPU_FEATURE_VXE = 351,
+
+ // Vector-Enhancements Facility 2
+ NPY_CPU_FEATURE_VXE2 = 352,
+
NPY_CPU_FEATURE_MAX
};
@@ -138,6 +147,7 @@ npy_cpu_features_dict(void);
* On aarch64: ['NEON', 'NEON_FP16', 'NEON_VPFV4', 'ASIMD']
* On ppc64: []
* On ppc64le: ['VSX', 'VSX2']
+ * On s390x: []
* On any other arch or if the optimization is disabled: []
*/
NPY_VISIBILITY_HIDDEN PyObject *
@@ -159,6 +169,7 @@ npy_cpu_baseline_list(void);
* On aarch64: ['ASIMDHP', 'ASIMDDP', 'ASIMDFHM']
* On ppc64: ['VSX', 'VSX2', 'VSX3']
* On ppc64le: ['VSX3']
+ * On s390x: ['VX', 'VXE', VXE2]
* On any other arch or if the optimization is disabled: []
*/
NPY_VISIBILITY_HIDDEN PyObject *
diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src
index b4a1e9b0c..a3f556f56 100644
--- a/numpy/core/src/common/npy_sort.h.src
+++ b/numpy/core/src/common/npy_sort.h.src
@@ -18,6 +18,11 @@ static NPY_INLINE int npy_get_msb(npy_uintp unum)
return depth_limit;
}
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
/*
*****************************************************************************
@@ -102,4 +107,8 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar
NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/numpy/core/src/common/numpy_tag.h b/numpy/core/src/common/numpy_tag.h
index dc8d5286b..60e9b02cd 100644
--- a/numpy/core/src/common/numpy_tag.h
+++ b/numpy/core/src/common/numpy_tag.h
@@ -1,8 +1,15 @@
#ifndef _NPY_COMMON_TAG_H_
#define _NPY_COMMON_TAG_H_
+#include "../npysort/npysort_common.h"
+
namespace npy {
+template<typename... tags>
+struct taglist {
+ static constexpr unsigned size = sizeof...(tags);
+};
+
struct integral_tag {
};
struct floating_point_tag {
@@ -14,63 +21,203 @@ struct date_tag {
struct bool_tag : integral_tag {
using type = npy_bool;
+ static constexpr NPY_TYPES type_value = NPY_BOOL;
+ static int less(type const& a, type const& b) {
+ return BOOL_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct byte_tag : integral_tag {
using type = npy_byte;
+ static constexpr NPY_TYPES type_value = NPY_BYTE;
+ static int less(type const& a, type const& b) {
+ return BYTE_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct ubyte_tag : integral_tag {
using type = npy_ubyte;
+ static constexpr NPY_TYPES type_value = NPY_UBYTE;
+ static int less(type const& a, type const& b) {
+ return UBYTE_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct short_tag : integral_tag {
using type = npy_short;
+ static constexpr NPY_TYPES type_value = NPY_SHORT;
+ static int less(type const& a, type const& b) {
+ return SHORT_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct ushort_tag : integral_tag {
using type = npy_ushort;
+ static constexpr NPY_TYPES type_value = NPY_USHORT;
+ static int less(type const& a, type const& b) {
+ return USHORT_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct int_tag : integral_tag {
using type = npy_int;
+ static constexpr NPY_TYPES type_value = NPY_INT;
+ static int less(type const& a, type const& b) {
+ return INT_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct uint_tag : integral_tag {
using type = npy_uint;
+ static constexpr NPY_TYPES type_value = NPY_UINT;
+ static int less(type const& a, type const& b) {
+ return UINT_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct long_tag : integral_tag {
using type = npy_long;
+ static constexpr NPY_TYPES type_value = NPY_LONG;
+ static int less(type const& a, type const& b) {
+ return LONG_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct ulong_tag : integral_tag {
using type = npy_ulong;
+ static constexpr NPY_TYPES type_value = NPY_ULONG;
+ static int less(type const& a, type const& b) {
+ return ULONG_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct longlong_tag : integral_tag {
using type = npy_longlong;
+ static constexpr NPY_TYPES type_value = NPY_LONGLONG;
+ static int less(type const& a, type const& b) {
+ return LONGLONG_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct ulonglong_tag : integral_tag {
using type = npy_ulonglong;
+ static constexpr NPY_TYPES type_value = NPY_ULONGLONG;
+ static int less(type const& a, type const& b) {
+ return ULONGLONG_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct half_tag {
using type = npy_half;
+ static constexpr NPY_TYPES type_value = NPY_HALF;
+ static int less(type const& a, type const& b) {
+ return HALF_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct float_tag : floating_point_tag {
using type = npy_float;
+ static constexpr NPY_TYPES type_value = NPY_FLOAT;
+ static int less(type const& a, type const& b) {
+ return FLOAT_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct double_tag : floating_point_tag {
using type = npy_double;
+ static constexpr NPY_TYPES type_value = NPY_DOUBLE;
+ static int less(type const& a, type const& b) {
+ return DOUBLE_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct longdouble_tag : floating_point_tag {
using type = npy_longdouble;
+ static constexpr NPY_TYPES type_value = NPY_LONGDOUBLE;
+ static int less(type const& a, type const& b) {
+ return LONGDOUBLE_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct cfloat_tag : complex_tag {
using type = npy_cfloat;
+ static constexpr NPY_TYPES type_value = NPY_CFLOAT;
+ static int less(type const& a, type const& b) {
+ return CFLOAT_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct cdouble_tag : complex_tag {
using type = npy_cdouble;
+ static constexpr NPY_TYPES type_value = NPY_CDOUBLE;
+ static int less(type const& a, type const& b) {
+ return CDOUBLE_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct clongdouble_tag : complex_tag {
using type = npy_clongdouble;
+ static constexpr NPY_TYPES type_value = NPY_CLONGDOUBLE;
+ static int less(type const& a, type const& b) {
+ return CLONGDOUBLE_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct datetime_tag : date_tag {
using type = npy_datetime;
+ static constexpr NPY_TYPES type_value = NPY_DATETIME;
+ static int less(type const& a, type const& b) {
+ return DATETIME_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
struct timedelta_tag : date_tag {
using type = npy_timedelta;
+ static constexpr NPY_TYPES type_value = NPY_TIMEDELTA;
+ static int less(type const& a, type const& b) {
+ return TIMEDELTA_LT(a, b);
+ }
+ static int less_equal(type const& a, type const& b) {
+ return !less(b, a);
+ }
};
} // namespace npy
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 9486b7cff..36937629d 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -797,25 +797,6 @@ npy_char_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
return (PyObject *)descr;
}
-/* used to test UPDATEIFCOPY usage emits deprecation warning */
-static PyObject*
-npy_updateifcopy_deprecation(PyObject* NPY_UNUSED(self), PyObject* args)
-{
- int flags;
- PyObject* array;
- if (!PyArray_Check(args)) {
- PyErr_SetString(PyExc_TypeError, "test needs ndarray input");
- return NULL;
- }
- flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY;
- array = PyArray_FromArray((PyArrayObject*)args, NULL, flags);
- if (array == NULL)
- return NULL;
- PyArray_ResolveWritebackIfCopy((PyArrayObject*)array);
- Py_DECREF(array);
- Py_RETURN_NONE;
-}
-
/* used to test PyArray_As1D usage emits not implemented error */
static PyObject*
npy_pyarrayas1d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
@@ -1086,20 +1067,18 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args))
PyArrayMethodObject *cast = (PyArrayMethodObject *)cast_obj;
/* Pass some information about this cast out! */
- PyObject *cast_info = Py_BuildValue("{sOsOsisisisisisssi}",
+ PyObject *cast_info = Py_BuildValue("{sOsOsisisisisiss}",
"from", from_dtype,
"to", to_dtype,
"legacy", (cast->name != NULL &&
strncmp(cast->name, "legacy_", 7) == 0),
- "casting", cast->casting & ~_NPY_CAST_IS_VIEW,
+ "casting", cast->casting,
"requires_pyapi", cast->flags & NPY_METH_REQUIRES_PYAPI,
"supports_unaligned",
cast->flags & NPY_METH_SUPPORTS_UNALIGNED,
"no_floatingpoint_errors",
cast->flags & NPY_METH_NO_FLOATINGPOINT_ERRORS,
- "name", cast->name,
- "cast_is_view",
- cast->casting & _NPY_CAST_IS_VIEW);
+ "name", cast->name);
if (cast_info == NULL) {
goto fail;
}
@@ -2414,9 +2393,6 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"npy_char_deprecation",
npy_char_deprecation,
METH_NOARGS, NULL},
- {"npy_updateifcopy_deprecation",
- npy_updateifcopy_deprecation,
- METH_O, NULL},
{"npy_pyarrayas1d_deprecation",
npy_pyarrayas1d_deprecation,
METH_NOARGS, NULL},
diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c
index d93dac506..b421d9e4f 100644
--- a/numpy/core/src/multiarray/array_method.c
+++ b/numpy/core/src/multiarray/array_method.c
@@ -48,13 +48,19 @@
*
* We could allow setting the output descriptors specifically to simplify
* this step.
+ *
+ * Note that the default version will indicate that the cast can be done
+ * as using `arr.view(new_dtype)` if the default cast-safety is
+ * set to "no-cast". This default function cannot be used if a view may
+ * be sufficient for casting but the cast is not always "no-cast".
*/
static NPY_CASTING
default_resolve_descriptors(
PyArrayMethodObject *method,
PyArray_DTypeMeta **dtypes,
PyArray_Descr **input_descrs,
- PyArray_Descr **output_descrs)
+ PyArray_Descr **output_descrs,
+ npy_intp *view_offset)
{
int nin = method->nin;
int nout = method->nout;
@@ -76,6 +82,13 @@ default_resolve_descriptors(
* abstract ones or unspecified outputs). We can use the common-dtype
* operation to provide a default here.
*/
+ if (method->casting == NPY_NO_CASTING) {
+ /*
+ * By (current) definition no-casting should imply viewable. This
+ * is currently indicated for example for object to object cast.
+ */
+ *view_offset = 0;
+ }
return method->casting;
fail:
@@ -102,9 +115,10 @@ is_contiguous(
/**
* The default method to fetch the correct loop for a cast or ufunc
* (at the time of writing only casts).
- * The default version can return loops explicitly registered during method
- * creation. It does specialize contiguous loops, although has to check
- * all descriptors itemsizes for this.
+ * Note that the default function provided here will only indicate that a cast
+ * can be done as a view (i.e., arr.view(new_dtype)) when this is trivially
+ * true, i.e., for cast safety "no-cast". It will not recognize view as an
+ * option for other casts (e.g., viewing '>i8' as '>i4' with an offset of 4).
*
* @param context
* @param aligned
@@ -166,7 +180,7 @@ validate_spec(PyArrayMethod_Spec *spec)
"not exceed %d. (method: %s)", NPY_MAXARGS, spec->name);
return -1;
}
- switch (spec->casting & ~_NPY_CAST_IS_VIEW) {
+ switch (spec->casting) {
case NPY_NO_CASTING:
case NPY_EQUIV_CASTING:
case NPY_SAFE_CASTING:
@@ -495,8 +509,9 @@ boundarraymethod_dealloc(PyObject *self)
/*
- * Calls resolve_descriptors() and returns the casting level and the resolved
- * descriptors as a tuple. If the operation is impossible returns (-1, None).
+ * Calls resolve_descriptors() and returns the casting level, the resolved
+ * descriptors as a tuple, and a possible view-offset (integer or None).
+ * If the operation is impossible returns (-1, None, None).
* May raise an error, but usually should not.
* The function validates the casting attribute compared to the returned
* casting level.
@@ -551,14 +566,15 @@ boundarraymethod__resolve_descripors(
}
}
+ npy_intp view_offset = NPY_MIN_INTP;
NPY_CASTING casting = self->method->resolve_descriptors(
- self->method, self->dtypes, given_descrs, loop_descrs);
+ self->method, self->dtypes, given_descrs, loop_descrs, &view_offset);
if (casting < 0 && PyErr_Occurred()) {
return NULL;
}
else if (casting < 0) {
- return Py_BuildValue("iO", casting, Py_None);
+ return Py_BuildValue("iO", casting, Py_None, Py_None);
}
PyObject *result_tuple = PyTuple_New(nin + nout);
@@ -570,9 +586,22 @@ boundarraymethod__resolve_descripors(
PyTuple_SET_ITEM(result_tuple, i, (PyObject *)loop_descrs[i]);
}
+ PyObject *view_offset_obj;
+ if (view_offset == NPY_MIN_INTP) {
+ Py_INCREF(Py_None);
+ view_offset_obj = Py_None;
+ }
+ else {
+ view_offset_obj = PyLong_FromSsize_t(view_offset);
+ if (view_offset_obj == NULL) {
+ Py_DECREF(result_tuple);
+ return NULL;
+ }
+ }
+
/*
- * The casting flags should be the most generic casting level (except the
- * cast-is-view flag. If no input is parametric, it must match exactly.
+ * The casting flags should be the most generic casting level.
+ * If no input is parametric, it must match exactly.
*
* (Note that these checks are only debugging checks.)
*/
@@ -584,7 +613,7 @@ boundarraymethod__resolve_descripors(
}
}
if (self->method->casting != -1) {
- NPY_CASTING cast = casting & ~_NPY_CAST_IS_VIEW;
+ NPY_CASTING cast = casting;
if (self->method->casting !=
PyArray_MinCastSafety(cast, self->method->casting)) {
PyErr_Format(PyExc_RuntimeError,
@@ -592,6 +621,7 @@ boundarraymethod__resolve_descripors(
"(set level is %d, got %d for method %s)",
self->method->casting, cast, self->method->name);
Py_DECREF(result_tuple);
+ Py_DECREF(view_offset_obj);
return NULL;
}
if (!parametric) {
@@ -608,12 +638,13 @@ boundarraymethod__resolve_descripors(
"(set level is %d, got %d for method %s)",
self->method->casting, cast, self->method->name);
Py_DECREF(result_tuple);
+ Py_DECREF(view_offset_obj);
return NULL;
}
}
}
- return Py_BuildValue("iN", casting, result_tuple);
+ return Py_BuildValue("iNN", casting, result_tuple, view_offset_obj);
}
@@ -694,8 +725,9 @@ boundarraymethod__simple_strided_call(
return NULL;
}
+ npy_intp view_offset = NPY_MIN_INTP;
NPY_CASTING casting = self->method->resolve_descriptors(
- self->method, self->dtypes, descrs, out_descrs);
+ self->method, self->dtypes, descrs, out_descrs, &view_offset);
if (casting < 0) {
PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL;
diff --git a/numpy/core/src/multiarray/array_method.h b/numpy/core/src/multiarray/array_method.h
index 7b7372bd0..35b9033e0 100644
--- a/numpy/core/src/multiarray/array_method.h
+++ b/numpy/core/src/multiarray/array_method.h
@@ -70,7 +70,8 @@ typedef NPY_CASTING (resolve_descriptors_function)(
struct PyArrayMethodObject_tag *method,
PyArray_DTypeMeta **dtypes,
PyArray_Descr **given_descrs,
- PyArray_Descr **loop_descrs);
+ PyArray_Descr **loop_descrs,
+ npy_intp *view_offset);
typedef int (get_loop_function)(
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 1b197d0f2..3f080d902 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -75,36 +75,19 @@ PyArray_Size(PyObject *op)
}
}
-/*NUMPY_API
- *
- * Precondition: 'arr' is a copy of 'base' (though possibly with different
- * strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the
- * ->base pointer on 'arr', so that when 'arr' is destructed, it will copy any
- * changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase
- *
- * Steals a reference to 'base'.
- *
- * Returns 0 on success, -1 on failure.
- */
+/*NUMPY_API */
NPY_NO_EXPORT int
PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
{
- int ret;
- /* 2017-Nov -10 1.14 (for PyPy only) */
- /* 2018-April-21 1.15 (all Python implementations) */
- if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use "
- "PyArray_SetWritebackIfCopyBase instead, and be sure to call "
- "PyArray_ResolveWritebackIfCopy before the array is deallocated, "
- "i.e. before the last call to Py_DECREF. If cleaning up from an "
- "error, PyArray_DiscardWritebackIfCopy may be called instead to "
- "throw away the scratch buffer.") < 0)
- return -1;
- ret = PyArray_SetWritebackIfCopyBase(arr, base);
- if (ret >=0) {
- PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
- PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
- }
- return ret;
+ /* 2021-Dec-15 1.23*/
+ PyErr_SetString(PyExc_RuntimeError,
+ "PyArray_SetUpdateIfCopyBase is disabled, use "
+ "PyArray_SetWritebackIfCopyBase instead, and be sure to call "
+ "PyArray_ResolveWritebackIfCopy before the array is deallocated, "
+ "i.e. before the last call to Py_DECREF. If cleaning up from an "
+ "error, PyArray_DiscardWritebackIfCopy may be called instead to "
+ "throw away the scratch buffer.");
+ return -1;
}
/*NUMPY_API
@@ -377,9 +360,9 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self)
{
PyArrayObject_fields *fa = (PyArrayObject_fields *)self;
if (fa && fa->base) {
- if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) {
+ if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) {
/*
- * UPDATEIFCOPY or WRITEBACKIFCOPY means that fa->base's data
+ * WRITEBACKIFCOPY means that fa->base's data
* should be updated with the contents
* of self.
* fa->base->flags is not WRITEABLE to protect the relationship
@@ -388,7 +371,6 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self)
int retval = 0;
PyArray_ENABLEFLAGS(((PyArrayObject *)fa->base),
NPY_ARRAY_WRITEABLE);
- PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY);
retval = PyArray_CopyAnyInto((PyArrayObject *)fa->base, self);
Py_DECREF(fa->base);
@@ -462,25 +444,6 @@ array_dealloc(PyArrayObject *self)
PyErr_Clear();
}
}
- if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) {
- /* DEPRECATED, remove once the flag is removed */
- char const * msg = "UPDATEIFCOPY detected in array_dealloc. "
- " Required call to PyArray_ResolveWritebackIfCopy or "
- "PyArray_DiscardWritebackIfCopy is missing";
- /*
- * prevent reaching 0 twice and thus recursing into dealloc.
- * Increasing sys.gettotalrefcount, but path should not be taken.
- */
- Py_INCREF(self);
- /* 2017-Nov-10 1.14 */
- WARN_IN_DEALLOC(PyExc_DeprecationWarning, msg);
- retval = PyArray_ResolveWritebackIfCopy(self);
- if (retval < 0)
- {
- PyErr_Print();
- PyErr_Clear();
- }
- }
/*
* If fa->base is non-NULL, it is something
* to DECREF -- either a view or a buffer object
@@ -493,14 +456,6 @@ array_dealloc(PyArrayObject *self)
if (PyDataType_FLAGCHK(fa->descr, NPY_ITEM_REFCOUNT)) {
PyArray_XDECREF(self);
}
- /*
- * Allocation will never be 0, see comment in ctors.c
- * line 820
- */
- size_t nbytes = PyArray_NBYTES(self);
- if (nbytes == 0) {
- nbytes = fa->descr->elsize ? fa->descr->elsize : 1;
- }
if (fa->mem_handler == NULL) {
char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY");
if ((env != NULL) && (strncmp(env, "1", 1) == 0)) {
@@ -511,7 +466,16 @@ array_dealloc(PyArrayObject *self)
}
// Guess at malloc/free ???
free(fa->data);
- } else {
+ }
+ else {
+ /*
+ * In theory `PyArray_NBYTES_ALLOCATED`, but differs somewhere?
+ * So instead just use the knowledge that 0 is impossible.
+ */
+ size_t nbytes = PyArray_NBYTES(self);
+ if (nbytes == 0) {
+ nbytes = 1;
+ }
PyDataMem_UserFREE(fa->data, nbytes, fa->mem_handler);
Py_DECREF(fa->mem_handler);
}
@@ -571,8 +535,6 @@ PyArray_DebugPrint(PyArrayObject *obj)
printf(" NPY_ALIGNED");
if (fobj->flags & NPY_ARRAY_WRITEABLE)
printf(" NPY_WRITEABLE");
- if (fobj->flags & NPY_ARRAY_UPDATEIFCOPY)
- printf(" NPY_UPDATEIFCOPY");
if (fobj->flags & NPY_ARRAY_WRITEBACKIFCOPY)
printf(" NPY_WRITEBACKIFCOPY");
printf("\n");
@@ -660,15 +622,15 @@ array_might_be_written(PyArrayObject *obj)
/*NUMPY_API
*
- * This function does nothing if obj is writeable, and raises an exception
- * (and returns -1) if obj is not writeable. It may also do other
- * house-keeping, such as issuing warnings on arrays which are transitioning
- * to become views. Always call this function at some point before writing to
- * an array.
+ * This function does nothing and returns 0 if *obj* is writeable.
+ * It raises an exception and returns -1 if *obj* is not writeable.
+ * It may also do other house-keeping, such as issuing warnings on
+ * arrays which are transitioning to become views. Always call this
+ * function at some point before writing to an array.
*
- * 'name' is a name for the array, used to give better error
- * messages. Something like "assignment destination", "output array", or even
- * just "array".
+ * *name* is a name for the array, used to give better error messages.
+ * It can be something like "assignment destination", "output array",
+ * or even just "array".
*/
NPY_NO_EXPORT int
PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name)
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index 327f685d4..a985a2308 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -175,7 +175,7 @@ _PyArray_ArgMinMaxCommon(PyArrayObject *op,
NPY_END_THREADS_DESCR(PyArray_DESCR(ap));
Py_DECREF(ap);
- /* Trigger the UPDATEIFCOPY/WRITEBACKIFCOPY if necessary */
+ /* Trigger the WRITEBACKIFCOPY if necessary */
if (out != NULL && out != rp) {
PyArray_ResolveWritebackIfCopy(rp);
Py_DECREF(rp);
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index b3526c4c1..85fd3aab1 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -292,6 +292,35 @@ npy_memchr(char * haystack, char needle,
return p;
}
+/*
+ * Helper to work around issues with the allocation strategy currently
+ * allocating not 1 byte for empty arrays, but enough for an array where
+ * all 0 dimensions are replaced with size 1 (if the itemsize is not 0).
+ *
+ * This means that we can fill in nice (nonzero) strides and still handle
+ * slicing direct math without being in danger of leaving the allocated byte
+ * bounds.
+ * In practice, that probably does not matter, but in principle this would be
+ * undefined behaviour in C. Another solution may be to force the strides
+ * to 0 in these cases. See also gh-15788.
+ *
+ * Unlike the code in `PyArray_NewFromDescr` does no overflow checks.
+ */
+static NPY_INLINE npy_intp
+PyArray_NBYTES_ALLOCATED(PyArrayObject *arr)
+{
+ if (PyArray_ITEMSIZE(arr) == 0) {
+ return 1;
+ }
+ npy_intp nbytes = PyArray_ITEMSIZE(arr);
+ for (int i = 0; i < PyArray_NDIM(arr); i++) {
+ if (PyArray_DIMS(arr)[i] != 0) {
+ nbytes *= PyArray_DIMS(arr)[i];
+ }
+ }
+ return nbytes;
+}
+
/*
* Simple helper to create a tuple from an array of items. The `make_null_none`
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index ef101a78b..a1de580d9 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -172,7 +172,7 @@ PyArray_CopyConverter(PyObject *obj, _PyArray_CopyMode *copymode) {
}
int int_copymode;
- PyObject* numpy_CopyMode = NULL;
+ static PyObject* numpy_CopyMode = NULL;
npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode);
if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) {
@@ -182,6 +182,7 @@ PyArray_CopyConverter(PyObject *obj, _PyArray_CopyMode *copymode) {
}
int_copymode = (int)PyLong_AsLong(mode_value);
+ Py_DECREF(mode_value);
if (error_converting(int_copymode)) {
return NPY_FAIL;
}
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 3135d6989..b21fc3cfa 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -223,14 +223,11 @@ PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2)
if (casting1 < 0 || casting2 < 0) {
return -1;
}
- NPY_CASTING view = casting1 & casting2 & _NPY_CAST_IS_VIEW;
- casting1 = casting1 & ~_NPY_CAST_IS_VIEW;
- casting2 = casting2 & ~_NPY_CAST_IS_VIEW;
/* larger casting values are less safe */
if (casting1 > casting2) {
- return casting1 | view;
+ return casting1;
}
- return casting2 | view;
+ return casting2;
}
@@ -363,29 +360,41 @@ PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp)
static NPY_CASTING
_get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl,
- PyArray_DTypeMeta *dtypes[2], PyArray_Descr *from, PyArray_Descr *to)
+ PyArray_DTypeMeta *dtypes[2], PyArray_Descr *from, PyArray_Descr *to,
+ npy_intp *view_offset)
{
PyArray_Descr *descrs[2] = {from, to};
PyArray_Descr *out_descrs[2];
+ *view_offset = NPY_MIN_INTP;
NPY_CASTING casting = castingimpl->resolve_descriptors(
- castingimpl, dtypes, descrs, out_descrs);
+ castingimpl, dtypes, descrs, out_descrs, view_offset);
if (casting < 0) {
return -1;
}
/* The returned descriptors may not match, requiring a second check */
if (out_descrs[0] != descrs[0]) {
- NPY_CASTING from_casting = PyArray_GetCastSafety(
- descrs[0], out_descrs[0], NULL);
+ npy_intp from_offset = NPY_MIN_INTP;
+ NPY_CASTING from_casting = PyArray_GetCastInfo(
+ descrs[0], out_descrs[0], NULL, &from_offset);
casting = PyArray_MinCastSafety(casting, from_casting);
+ if (from_offset != *view_offset) {
+ /* `view_offset` differs: The multi-step cast cannot be a view. */
+ *view_offset = NPY_MIN_INTP;
+ }
if (casting < 0) {
goto finish;
}
}
if (descrs[1] != NULL && out_descrs[1] != descrs[1]) {
- NPY_CASTING from_casting = PyArray_GetCastSafety(
- descrs[1], out_descrs[1], NULL);
+ npy_intp from_offset = NPY_MIN_INTP;
+ NPY_CASTING from_casting = PyArray_GetCastInfo(
+ descrs[1], out_descrs[1], NULL, &from_offset);
casting = PyArray_MinCastSafety(casting, from_casting);
+ if (from_offset != *view_offset) {
+ /* `view_offset` differs: The multi-step cast cannot be a view. */
+ *view_offset = NPY_MIN_INTP;
+ }
if (casting < 0) {
goto finish;
}
@@ -396,15 +405,21 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl,
Py_DECREF(out_descrs[1]);
/*
* Check for less harmful non-standard returns. The following two returns
- * should never happen. They would be roughly equivalent, but less precise,
- * versions of `(NPY_NO_CASTING|_NPY_CAST_IS_VIEW)`.
- * 1. No-casting must imply cast-is-view.
- * 2. Equivalent-casting + cast-is-view is (currently) the definition
- * of a "no" cast (there may be reasons to relax this).
- * Note that e.g. `(NPY_UNSAFE_CASTING|_NPY_CAST_IS_VIEW)` is valid.
+ * should never happen:
+ * 1. No-casting must imply a view offset of 0.
+ * 2. Equivalent-casting + 0 view offset is (usually) the definition
+ * of a "no" cast. However, changing the order of fields can also
+ * create descriptors that are not equivalent but views.
+ * Note that unsafe casts can have a view offset. For example, in
+ * principle, casting `<i8` to `<i4` is a cast with 0 offset.
*/
- assert(casting != NPY_NO_CASTING);
- assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW));
+ if (*view_offset != 0) {
+ assert(casting != NPY_NO_CASTING);
+ }
+ else {
+ assert(casting != NPY_EQUIV_CASTING
+ || (PyDataType_HASFIELDS(from) && PyDataType_HASFIELDS(to)));
+ }
return casting;
}
@@ -420,11 +435,13 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl,
* @param to The descriptor to cast to (may be NULL)
* @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this
* is ignored).
+ * @param[out] view_offset
* @return NPY_CASTING or -1 on error or if the cast is not possible.
*/
NPY_NO_EXPORT NPY_CASTING
-PyArray_GetCastSafety(
- PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype)
+PyArray_GetCastInfo(
+ PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype,
+ npy_intp *view_offset)
{
if (to != NULL) {
to_dtype = NPY_DTYPE(to);
@@ -441,7 +458,7 @@ PyArray_GetCastSafety(
PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth;
PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype};
NPY_CASTING casting = _get_cast_safety_from_castingimpl(castingimpl,
- dtypes, from, to);
+ dtypes, from, to, view_offset);
Py_DECREF(meth);
return casting;
@@ -449,8 +466,8 @@ PyArray_GetCastSafety(
/**
- * Check whether a cast is safe, see also `PyArray_GetCastSafety` for
- * a similar function. Unlike GetCastSafety, this function checks the
+ * Check whether a cast is safe, see also `PyArray_GetCastInfo` for
+ * a similar function. Unlike GetCastInfo, this function checks the
* `castingimpl->casting` when available. This allows for two things:
*
* 1. It avoids calling `resolve_descriptors` in some cases.
@@ -493,8 +510,9 @@ PyArray_CheckCastSafety(NPY_CASTING casting,
}
PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype};
+ npy_intp view_offset;
NPY_CASTING safety = _get_cast_safety_from_castingimpl(castingimpl,
- dtypes, from, to);
+ dtypes, from, to, &view_offset);
Py_DECREF(meth);
/* If casting is the smaller (or equal) safety we match */
if (safety < 0) {
@@ -971,8 +989,9 @@ PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType)
PyArray_Descr *loop_descrs[2];
PyArrayMethodObject *meth = (PyArrayMethodObject *)tmp;
+ npy_intp view_offset = NPY_MIN_INTP;
NPY_CASTING casting = meth->resolve_descriptors(
- meth, dtypes, given_descrs, loop_descrs);
+ meth, dtypes, given_descrs, loop_descrs, &view_offset);
Py_DECREF(tmp);
if (casting < 0) {
goto error;
@@ -2289,7 +2308,8 @@ legacy_same_dtype_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]),
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
Py_INCREF(given_descrs[0]);
loop_descrs[0] = given_descrs[0];
@@ -2315,7 +2335,8 @@ legacy_same_dtype_resolve_descriptors(
*/
if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) ==
PyDataType_ISNOTSWAPPED(loop_descrs[1])) {
- return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ *view_offset = 0;
+ return NPY_NO_CASTING;
}
return NPY_EQUIV_CASTING;
}
@@ -2354,7 +2375,8 @@ simple_cast_resolve_descriptors(
PyArrayMethodObject *self,
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
assert(NPY_DT_is_legacy(dtypes[0]) && NPY_DT_is_legacy(dtypes[1]));
@@ -2378,7 +2400,8 @@ simple_cast_resolve_descriptors(
}
if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) ==
PyDataType_ISNOTSWAPPED(loop_descrs[1])) {
- return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ *view_offset = 0;
+ return NPY_NO_CASTING;
}
return NPY_EQUIV_CASTING;
}
@@ -2572,7 +2595,8 @@ cast_to_string_resolve_descriptors(
PyArrayMethodObject *self,
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *NPY_UNUSED(view_offset))
{
/*
* NOTE: The following code used to be part of PyArray_AdaptFlexibleDType
@@ -2723,7 +2747,8 @@ string_to_string_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]),
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
Py_INCREF(given_descrs[0]);
loop_descrs[0] = given_descrs[0];
@@ -2739,19 +2764,29 @@ string_to_string_resolve_descriptors(
loop_descrs[1] = given_descrs[1];
}
- if (loop_descrs[0]->elsize == loop_descrs[1]->elsize) {
- if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) ==
- PyDataType_ISNOTSWAPPED(loop_descrs[1])) {
- return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ if (loop_descrs[0]->elsize < loop_descrs[1]->elsize) {
+ /* New string is longer: safe but cannot be a view */
+ return NPY_SAFE_CASTING;
+ }
+ else {
+ /* New string fits into old: if the byte-order matches can be a view */
+ int not_swapped = (PyDataType_ISNOTSWAPPED(loop_descrs[0])
+ == PyDataType_ISNOTSWAPPED(loop_descrs[1]));
+ if (not_swapped) {
+ *view_offset = 0;
+ }
+
+ if (loop_descrs[0]->elsize > loop_descrs[1]->elsize) {
+ return NPY_SAME_KIND_CASTING;
+ }
+ /* The strings have the same length: */
+ if (not_swapped) {
+ return NPY_NO_CASTING;
}
else {
return NPY_EQUIV_CASTING;
}
}
- else if (loop_descrs[0]->elsize <= loop_descrs[1]->elsize) {
- return NPY_SAFE_CASTING;
- }
- return NPY_SAME_KIND_CASTING;
}
@@ -2866,7 +2901,8 @@ PyArray_InitializeStringCasts(void)
*/
static NPY_CASTING
cast_to_void_dtype_class(
- PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs)
+ PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs,
+ npy_intp *view_offset)
{
/* `dtype="V"` means unstructured currently (compare final path) */
loop_descrs[1] = PyArray_DescrNewFromType(NPY_VOID);
@@ -2876,11 +2912,13 @@ cast_to_void_dtype_class(
loop_descrs[1]->elsize = given_descrs[0]->elsize;
Py_INCREF(given_descrs[0]);
loop_descrs[0] = given_descrs[0];
+
+ *view_offset = 0;
if (loop_descrs[0]->type_num == NPY_VOID &&
loop_descrs[0]->subarray == NULL && loop_descrs[1]->names == NULL) {
- return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ return NPY_NO_CASTING;
}
- return NPY_SAFE_CASTING | _NPY_CAST_IS_VIEW;
+ return NPY_SAFE_CASTING;
}
@@ -2889,12 +2927,13 @@ nonstructured_to_structured_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]),
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
NPY_CASTING casting;
if (given_descrs[1] == NULL) {
- return cast_to_void_dtype_class(given_descrs, loop_descrs);
+ return cast_to_void_dtype_class(given_descrs, loop_descrs, view_offset);
}
if (given_descrs[1]->subarray != NULL) {
@@ -2903,12 +2942,18 @@ nonstructured_to_structured_resolve_descriptors(
* possible to allow a view if the field has exactly one element.
*/
casting = NPY_SAFE_CASTING;
+ npy_intp sub_view_offset = NPY_MIN_INTP;
/* Subarray dtype */
- NPY_CASTING base_casting = PyArray_GetCastSafety(
- given_descrs[0], given_descrs[1]->subarray->base, NULL);
+ NPY_CASTING base_casting = PyArray_GetCastInfo(
+ given_descrs[0], given_descrs[1]->subarray->base, NULL,
+ &sub_view_offset);
if (base_casting < 0) {
return -1;
}
+ if (given_descrs[1]->elsize == given_descrs[1]->subarray->base->elsize) {
+ /* A single field, view is OK if sub-view is */
+ *view_offset = sub_view_offset;
+ }
casting = PyArray_MinCastSafety(casting, base_casting);
}
else if (given_descrs[1]->names != NULL) {
@@ -2920,21 +2965,32 @@ nonstructured_to_structured_resolve_descriptors(
else {
/* Considered at most unsafe casting (but this could be changed) */
casting = NPY_UNSAFE_CASTING;
- if (PyTuple_Size(given_descrs[1]->names) == 1) {
- /* A view may be acceptable */
- casting |= _NPY_CAST_IS_VIEW;
- }
Py_ssize_t pos = 0;
PyObject *key, *tuple;
while (PyDict_Next(given_descrs[1]->fields, &pos, &key, &tuple)) {
PyArray_Descr *field_descr = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0);
- NPY_CASTING field_casting = PyArray_GetCastSafety(
- given_descrs[0], field_descr, NULL);
+ npy_intp field_view_off = NPY_MIN_INTP;
+ NPY_CASTING field_casting = PyArray_GetCastInfo(
+ given_descrs[0], field_descr, NULL, &field_view_off);
casting = PyArray_MinCastSafety(casting, field_casting);
if (casting < 0) {
return -1;
}
+ if (field_view_off != NPY_MIN_INTP) {
+ npy_intp to_off = PyLong_AsSsize_t(PyTuple_GET_ITEM(tuple, 1));
+ if (error_converting(to_off)) {
+ return -1;
+ }
+ *view_offset = field_view_off - to_off;
+ }
+ }
+ if (PyTuple_Size(given_descrs[1]->names) != 1) {
+ /*
+ * Assume that a view is impossible when there is more than one
+ * field. (Fields could overlap, but that seems weird...)
+ */
+ *view_offset = NPY_MIN_INTP;
}
}
}
@@ -2944,15 +3000,20 @@ nonstructured_to_structured_resolve_descriptors(
!PyDataType_REFCHK(given_descrs[0])) {
/*
* A simple view, at the moment considered "safe" (the refcheck is
- * probably not necessary, but more future proof
+ * probably not necessary, but more future proof)
*/
- casting = NPY_SAFE_CASTING | _NPY_CAST_IS_VIEW;
+ *view_offset = 0;
+ casting = NPY_SAFE_CASTING;
}
else if (given_descrs[0]->elsize <= given_descrs[1]->elsize) {
casting = NPY_SAFE_CASTING;
}
else {
casting = NPY_UNSAFE_CASTING;
+ /* new elsize is smaller so a view is OK (reject refs for now) */
+ if (!PyDataType_REFCHK(given_descrs[0])) {
+ *view_offset = 0;
+ }
}
}
@@ -3048,6 +3109,8 @@ PyArray_GetGenericToVoidCastingImpl(void)
method->casting = -1;
method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors;
method->get_strided_loop = &nonstructured_to_structured_get_loop;
+ method->nin = 1;
+ method->nout = 1;
return (PyObject *)method;
}
@@ -3058,12 +3121,19 @@ structured_to_nonstructured_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
PyArray_Descr *base_descr;
+ /* The structured part may allow a view (and have its own offset): */
+ npy_intp struct_view_offset = NPY_MIN_INTP;
if (given_descrs[0]->subarray != NULL) {
base_descr = given_descrs[0]->subarray->base;
+ /* A view is possible if the subarray has exactly one element: */
+ if (given_descrs[0]->elsize == given_descrs[0]->subarray->base->elsize) {
+ struct_view_offset = 0;
+ }
}
else if (given_descrs[0]->names != NULL) {
if (PyTuple_Size(given_descrs[0]->names) != 1) {
@@ -3073,6 +3143,10 @@ structured_to_nonstructured_resolve_descriptors(
PyObject *key = PyTuple_GetItem(given_descrs[0]->names, 0);
PyObject *base_tup = PyDict_GetItem(given_descrs[0]->fields, key);
base_descr = (PyArray_Descr *)PyTuple_GET_ITEM(base_tup, 0);
+ struct_view_offset = PyLong_AsSsize_t(PyTuple_GET_ITEM(base_tup, 1));
+ if (error_converting(struct_view_offset)) {
+ return -1;
+ }
}
else {
/*
@@ -3080,20 +3154,29 @@ structured_to_nonstructured_resolve_descriptors(
* at this time they go back to legacy behaviour using getitem/setitem.
*/
base_descr = NULL;
+ struct_view_offset = 0;
}
/*
- * The cast is always considered unsafe, so the PyArray_GetCastSafety
- * result currently does not matter.
+ * The cast is always considered unsafe, so the PyArray_GetCastInfo
+ * result currently only matters for the view_offset.
*/
- if (base_descr != NULL && PyArray_GetCastSafety(
- base_descr, given_descrs[1], dtypes[1]) < 0) {
+ npy_intp base_view_offset = NPY_MIN_INTP;
+ if (base_descr != NULL && PyArray_GetCastInfo(
+ base_descr, given_descrs[1], dtypes[1], &base_view_offset) < 0) {
return -1;
}
+ if (base_view_offset != NPY_MIN_INTP
+ && struct_view_offset != NPY_MIN_INTP) {
+ *view_offset = base_view_offset + struct_view_offset;
+ }
/* Void dtypes always do the full cast. */
if (given_descrs[1] == NULL) {
loop_descrs[1] = NPY_DT_CALL_default_descr(dtypes[1]);
+ if (loop_descrs[1] == NULL) {
+ return -1;
+ }
/*
* Special case strings here, it should be useless (and only actually
* work for empty arrays). Possibly this should simply raise for
@@ -3187,6 +3270,8 @@ PyArray_GetVoidToGenericCastingImpl(void)
method->casting = -1;
method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors;
method->get_strided_loop = &structured_to_nonstructured_get_loop;
+ method->nin = 1;
+ method->nout = 1;
return (PyObject *)method;
}
@@ -3201,16 +3286,19 @@ PyArray_GetVoidToGenericCastingImpl(void)
* implementations on the dtype, to avoid duplicate work.
*/
static NPY_CASTING
-can_cast_fields_safety(PyArray_Descr *from, PyArray_Descr *to)
+can_cast_fields_safety(
+ PyArray_Descr *from, PyArray_Descr *to, npy_intp *view_offset)
{
- NPY_CASTING casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
-
Py_ssize_t field_count = PyTuple_Size(from->names);
if (field_count != PyTuple_Size(to->names)) {
/* TODO: This should be rejected! */
return NPY_UNSAFE_CASTING;
}
+
+ NPY_CASTING casting = NPY_NO_CASTING;
+ *view_offset = 0; /* if there are no fields, a view is OK. */
for (Py_ssize_t i = 0; i < field_count; i++) {
+ npy_intp field_view_off = NPY_MIN_INTP;
PyObject *from_key = PyTuple_GET_ITEM(from->names, i);
PyObject *from_tup = PyDict_GetItemWithError(from->fields, from_key);
if (from_tup == NULL) {
@@ -3229,15 +3317,40 @@ can_cast_fields_safety(PyArray_Descr *from, PyArray_Descr *to)
}
PyArray_Descr *to_base = (PyArray_Descr*)PyTuple_GET_ITEM(to_tup, 0);
- NPY_CASTING field_casting = PyArray_GetCastSafety(from_base, to_base, NULL);
+ NPY_CASTING field_casting = PyArray_GetCastInfo(
+ from_base, to_base, NULL, &field_view_off);
if (field_casting < 0) {
return -1;
}
casting = PyArray_MinCastSafety(casting, field_casting);
+
+ /* Adjust the "view offset" by the field offsets: */
+ if (field_view_off != NPY_MIN_INTP) {
+ npy_intp to_off = PyLong_AsSsize_t(PyTuple_GET_ITEM(to_tup, 1));
+ if (error_converting(to_off)) {
+ return -1;
+ }
+ npy_intp from_off = PyLong_AsSsize_t(PyTuple_GET_ITEM(from_tup, 1));
+ if (error_converting(from_off)) {
+ return -1;
+ }
+ field_view_off = field_view_off - to_off + from_off;
+ }
+
+ /*
+ * If there is one field, use its field offset. After that propagate
+ * the view offset if they match and set to "invalid" if not.
+ */
+ if (i == 0) {
+ *view_offset = field_view_off;
+ }
+ else if (*view_offset != field_view_off) {
+ *view_offset = NPY_MIN_INTP;
+ }
}
- if (!(casting & _NPY_CAST_IS_VIEW)) {
- assert((casting & ~_NPY_CAST_IS_VIEW) != NPY_NO_CASTING);
- return casting;
+ if (*view_offset != 0) {
+ /* If the calculated `view_offset` is not 0, it can only be "equiv" */
+ return PyArray_MinCastSafety(casting, NPY_EQUIV_CASTING);
}
/*
@@ -3277,38 +3390,42 @@ void_to_void_resolve_descriptors(
PyArrayMethodObject *self,
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
NPY_CASTING casting;
if (given_descrs[1] == NULL) {
/* This is weird, since it doesn't return the original descr, but... */
- return cast_to_void_dtype_class(given_descrs, loop_descrs);
+ return cast_to_void_dtype_class(given_descrs, loop_descrs, view_offset);
}
if (given_descrs[0]->names != NULL && given_descrs[1]->names != NULL) {
/* From structured to structured, need to check fields */
- casting = can_cast_fields_safety(given_descrs[0], given_descrs[1]);
+ casting = can_cast_fields_safety(
+ given_descrs[0], given_descrs[1], view_offset);
}
else if (given_descrs[0]->names != NULL) {
return structured_to_nonstructured_resolve_descriptors(
- self, dtypes, given_descrs, loop_descrs);
+ self, dtypes, given_descrs, loop_descrs, view_offset);
}
else if (given_descrs[1]->names != NULL) {
return nonstructured_to_structured_resolve_descriptors(
- self, dtypes, given_descrs, loop_descrs);
+ self, dtypes, given_descrs, loop_descrs, view_offset);
}
else if (given_descrs[0]->subarray == NULL &&
given_descrs[1]->subarray == NULL) {
/* Both are plain void dtypes */
if (given_descrs[0]->elsize == given_descrs[1]->elsize) {
- casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ casting = NPY_NO_CASTING;
+ *view_offset = 0;
}
else if (given_descrs[0]->elsize < given_descrs[1]->elsize) {
casting = NPY_SAFE_CASTING;
}
else {
casting = NPY_SAME_KIND_CASTING;
+ *view_offset = 0;
}
}
else {
@@ -3322,20 +3439,51 @@ void_to_void_resolve_descriptors(
/* If the shapes do not match, this is at most an unsafe cast */
casting = NPY_UNSAFE_CASTING;
+ /*
+ * We can use a view in two cases:
+ * 1. The shapes and elsizes matches, so any view offset applies to
+ * each element of the subarray identically.
+ * (in practice this probably implies the `view_offset` will be 0)
+ * 2. There is exactly one element and the subarray has no effect
+ * (can be tested by checking if the itemsizes of the base matches)
+ */
+ npy_bool subarray_layout_supports_view = NPY_FALSE;
if (from_sub && to_sub) {
int res = PyObject_RichCompareBool(from_sub->shape, to_sub->shape, Py_EQ);
if (res < 0) {
return -1;
}
else if (res) {
- /* Both are subarrays and the shape matches */
- casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ /* Both are subarrays and the shape matches, could be no cast */
+ casting = NPY_NO_CASTING;
+ /* May be a view if there is one element or elsizes match */
+ if (from_sub->base->elsize == to_sub->base->elsize
+ || given_descrs[0]->elsize == from_sub->base->elsize) {
+ subarray_layout_supports_view = NPY_TRUE;
+ }
+ }
+ }
+ else if (from_sub) {
+ /* May use a view if "from" has only a single element: */
+ if (given_descrs[0]->elsize == from_sub->base->elsize) {
+ subarray_layout_supports_view = NPY_TRUE;
+ }
+ }
+ else {
+ /* May use a view if "from" has only a single element: */
+ if (given_descrs[1]->elsize == to_sub->base->elsize) {
+ subarray_layout_supports_view = NPY_TRUE;
}
}
PyArray_Descr *from_base = (from_sub == NULL) ? given_descrs[0] : from_sub->base;
PyArray_Descr *to_base = (to_sub == NULL) ? given_descrs[1] : to_sub->base;
- NPY_CASTING field_casting = PyArray_GetCastSafety(from_base, to_base, NULL);
+ /* An offset for */
+ NPY_CASTING field_casting = PyArray_GetCastInfo(
+ from_base, to_base, NULL, view_offset);
+ if (!subarray_layout_supports_view) {
+ *view_offset = NPY_MIN_INTP;
+ }
if (field_casting < 0) {
return -1;
}
@@ -3443,7 +3591,8 @@ object_to_any_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *NPY_UNUSED(view_offset))
{
if (given_descrs[1] == NULL) {
/*
@@ -3513,7 +3662,8 @@ any_to_object_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *NPY_UNUSED(view_offset))
{
if (given_descrs[1] == NULL) {
loop_descrs[1] = NPY_DT_CALL_default_descr(dtypes[1]);
@@ -3591,10 +3741,6 @@ object_to_object_get_loop(
static int
PyArray_InitializeObjectToObjectCast(void)
{
- /*
- * The object dtype does not support byte order changes, so its cast
- * is always a direct view.
- */
PyArray_DTypeMeta *Object = PyArray_DTypeFromTypeNum(NPY_OBJECT);
PyArray_DTypeMeta *dtypes[2] = {Object, Object};
PyType_Slot slots[] = {
@@ -3602,7 +3748,7 @@ PyArray_InitializeObjectToObjectCast(void)
{0, NULL}};
PyArrayMethod_Spec spec = {
.name = "object_to_object_cast",
- .casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW,
+ .casting = NPY_NO_CASTING,
.nin = 1,
.nout = 1,
.flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_SUPPORTS_UNALIGNED,
diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h
index 5e0682f22..6b4413959 100644
--- a/numpy/core/src/multiarray/convert_datatype.h
+++ b/numpy/core/src/multiarray/convert_datatype.h
@@ -68,8 +68,9 @@ NPY_NO_EXPORT NPY_CASTING
PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2);
NPY_NO_EXPORT NPY_CASTING
-PyArray_GetCastSafety(
- PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype);
+PyArray_GetCastInfo(
+ PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype,
+ npy_intp *view_offset);
NPY_NO_EXPORT int
PyArray_CheckCastSafety(NPY_CASTING casting,
@@ -80,7 +81,8 @@ legacy_same_dtype_resolve_descriptors(
PyArrayMethodObject *self,
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2]);
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset);
NPY_NO_EXPORT int
legacy_cast_get_strided_loop(
@@ -94,7 +96,8 @@ simple_cast_resolve_descriptors(
PyArrayMethodObject *self,
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *input_descrs[2],
- PyArray_Descr *loop_descrs[2]);
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset);
NPY_NO_EXPORT int
PyArray_InitializeCasts(void);
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index b62426854..25eb91977 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -740,7 +740,6 @@ PyArray_NewFromDescr_int(
}
else {
fa->flags = (flags & ~NPY_ARRAY_WRITEBACKIFCOPY);
- fa->flags &= ~NPY_ARRAY_UPDATEIFCOPY;
}
fa->descr = descr;
fa->base = (PyObject *)NULL;
@@ -754,14 +753,20 @@ PyArray_NewFromDescr_int(
}
fa->strides = fa->dimensions + nd;
- /* Copy dimensions, check them, and find total array size `nbytes` */
+ /*
+ * Copy dimensions, check them, and find total array size `nbytes`
+ *
+ * Note that we ignore 0-length dimensions, to match this in the `free`
+ * calls, `PyArray_NBYTES_ALLOCATED` is a private helper matching this
+ * behaviour, but without overflow checking.
+ */
for (int i = 0; i < nd; i++) {
fa->dimensions[i] = dims[i];
if (fa->dimensions[i] == 0) {
/*
* Compare to PyArray_OverflowMultiplyList that
- * returns 0 in this case.
+ * returns 0 in this case. See also `PyArray_NBYTES_ALLOCATED`.
*/
continue;
}
@@ -870,16 +875,39 @@ PyArray_NewFromDescr_int(
/*
* call the __array_finalize__ method if a subtype was requested.
* If obj is NULL use Py_None for the Python callback.
+ * For speed, we skip if __array_finalize__ is inherited from ndarray
+ * (since that function does nothing), or, for backward compatibility,
+ * if it is None.
*/
if (subtype != &PyArray_Type) {
PyObject *res, *func;
-
- func = PyObject_GetAttr((PyObject *)fa, npy_ma_str_array_finalize);
+ static PyObject *ndarray_array_finalize = NULL;
+ /* First time, cache ndarray's __array_finalize__ */
+ if (ndarray_array_finalize == NULL) {
+ ndarray_array_finalize = PyObject_GetAttr(
+ (PyObject *)&PyArray_Type, npy_ma_str_array_finalize);
+ }
+ func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str_array_finalize);
if (func == NULL) {
goto fail;
}
+ else if (func == ndarray_array_finalize) {
+ Py_DECREF(func);
+ }
else if (func == Py_None) {
Py_DECREF(func);
+ /*
+ * 2022-01-08, NumPy 1.23; when deprecation period is over, remove this
+ * whole stanza so one gets a "NoneType object is not callable" TypeError.
+ */
+ if (DEPRECATE(
+ "Setting __array_finalize__ = None to indicate no finalization"
+ "should be done is deprecated. Instead, just inherit from "
+ "ndarray or, if that is not possible, explicitly set to "
+ "ndarray.__array_function__; this will raise a TypeError "
+ "in the future. (Deprecated since NumPy 1.23)") < 0) {
+ goto fail;
+ }
}
else {
if (PyCapsule_CheckExact(func)) {
@@ -898,7 +926,7 @@ PyArray_NewFromDescr_int(
if (obj == NULL) {
obj = Py_None;
}
- res = PyObject_CallFunctionObjArgs(func, obj, NULL);
+ res = PyObject_CallFunctionObjArgs(func, (PyObject *)fa, obj, NULL);
Py_DECREF(func);
if (res == NULL) {
goto fail;
@@ -1152,6 +1180,16 @@ _array_from_buffer_3118(PyObject *memoryview)
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
view = PyMemoryView_GET_BUFFER(memoryview);
+
+ if (view->suboffsets != NULL) {
+ PyErr_SetString(PyExc_BufferError,
+ "NumPy currently does not support importing buffers which "
+ "include suboffsets as they are not compatible with the NumPy"
+ "memory layout without a copy. Consider copying the original "
+ "before trying to convert it to a NumPy array.");
+ return NULL;
+ }
+
nd = view->ndim;
descr = _dtype_from_buffer_3118(memoryview);
@@ -1301,9 +1339,10 @@ _array_from_array_like(PyObject *op,
* We skip bytes and unicode since they are considered scalars. Unicode
* would fail but bytes would be incorrectly converted to a uint8 array.
*/
- if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
+ if (PyObject_CheckBuffer(op) && !PyBytes_Check(op) && !PyUnicode_Check(op)) {
PyObject *memoryview = PyMemoryView_FromObject(op);
if (memoryview == NULL) {
+ /* TODO: Should probably not blanket ignore errors. */
PyErr_Clear();
}
else {
@@ -1710,10 +1749,12 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
if (flags & NPY_ARRAY_ENSURENOCOPY ) {
PyErr_SetString(PyExc_ValueError,
"Unable to avoid copy while creating an array.");
+ Py_DECREF(dtype);
+ npy_free_coercion_cache(cache);
return NULL;
}
- if (cache == 0 && newtype != NULL &&
+ if (cache == NULL && newtype != NULL &&
PyDataType_ISSIGNED(newtype) && PyArray_IsScalar(op, Generic)) {
assert(ndim == 0);
/*
@@ -1740,8 +1781,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
}
/* There was no array (or array-like) passed in directly. */
- if ((flags & NPY_ARRAY_WRITEBACKIFCOPY) ||
- (flags & NPY_ARRAY_UPDATEIFCOPY)) {
+ if (flags & NPY_ARRAY_WRITEBACKIFCOPY) {
PyErr_SetString(PyExc_TypeError,
"WRITEBACKIFCOPY used for non-array input.");
Py_DECREF(dtype);
@@ -1810,7 +1850,6 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
* NPY_ARRAY_WRITEABLE,
* NPY_ARRAY_NOTSWAPPED,
* NPY_ARRAY_ENSURECOPY,
- * NPY_ARRAY_UPDATEIFCOPY,
* NPY_ARRAY_WRITEBACKIFCOPY,
* NPY_ARRAY_FORCECAST,
* NPY_ARRAY_ENSUREARRAY,
@@ -1837,9 +1876,6 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
* Fortran arrays are always behaved (aligned,
* notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d).
*
- * NPY_ARRAY_UPDATEIFCOPY is deprecated in favor of
- * NPY_ARRAY_WRITEBACKIFCOPY in 1.14
-
* NPY_ARRAY_WRITEBACKIFCOPY flag sets this flag in the returned
* array if a copy is made and the base argument points to the (possibly)
* misbehaved array. Before returning to python, PyArray_ResolveWritebackIfCopy
@@ -1962,6 +1998,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
if (flags & NPY_ARRAY_ENSURENOCOPY ) {
PyErr_SetString(PyExc_ValueError,
"Unable to avoid copy while creating an array from given array.");
+ Py_DECREF(newtype);
return NULL;
}
@@ -1990,31 +2027,8 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
return NULL;
}
- if (flags & NPY_ARRAY_UPDATEIFCOPY) {
- /* This is the ONLY place the NPY_ARRAY_UPDATEIFCOPY flag
- * is still used.
- * Can be deleted once the flag itself is removed
- */
- /* 2017-Nov-10 1.14 */
- if (DEPRECATE(
- "NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and "
- "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, "
- "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively "
- "instead, and call PyArray_ResolveWritebackIfCopy before the "
- "array is deallocated, i.e. before the last call to Py_DECREF.") < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- Py_INCREF(arr);
- if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- PyArray_ENABLEFLAGS(ret, NPY_ARRAY_UPDATEIFCOPY);
- PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEBACKIFCOPY);
- }
- else if (flags & NPY_ARRAY_WRITEBACKIFCOPY) {
+ if (flags & NPY_ARRAY_WRITEBACKIFCOPY) {
Py_INCREF(arr);
if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) {
Py_DECREF(ret);
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index e0064c017..03ebaa7ce 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -3746,8 +3746,6 @@ find_object_datetime_type(PyObject *obj, int type_num)
}
-
-
/*
* Describes casting within datetimes or timedelta
*/
@@ -3756,7 +3754,8 @@ time_to_time_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]),
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
/* This is a within-dtype cast, which currently must handle byteswapping */
Py_INCREF(given_descrs[0]);
@@ -3772,14 +3771,14 @@ time_to_time_resolve_descriptors(
int is_timedelta = given_descrs[0]->type_num == NPY_TIMEDELTA;
if (given_descrs[0] == given_descrs[1]) {
- return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ *view_offset = 0;
+ return NPY_NO_CASTING;
}
- NPY_CASTING byteorder_may_allow_view = 0;
- if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) ==
- PyDataType_ISNOTSWAPPED(loop_descrs[1])) {
- byteorder_may_allow_view = _NPY_CAST_IS_VIEW;
- }
+ npy_bool byteorder_may_allow_view = (
+ PyDataType_ISNOTSWAPPED(loop_descrs[0])
+ == PyDataType_ISNOTSWAPPED(loop_descrs[1]));
+
PyArray_DatetimeMetaData *meta1, *meta2;
meta1 = get_datetime_metadata_from_dtype(loop_descrs[0]);
assert(meta1 != NULL);
@@ -3798,12 +3797,16 @@ time_to_time_resolve_descriptors(
((meta2->base >= 7) && (meta1->base - meta2->base == 3)
&& ((meta1->num / meta2->num) == 1000000000))) {
if (byteorder_may_allow_view) {
- return NPY_NO_CASTING | byteorder_may_allow_view;
+ *view_offset = 0;
+ return NPY_NO_CASTING;
}
return NPY_EQUIV_CASTING;
}
else if (meta1->base == NPY_FR_GENERIC) {
- return NPY_SAFE_CASTING | byteorder_may_allow_view;
+ if (byteorder_may_allow_view) {
+ *view_offset = 0;
+ }
+ return NPY_SAFE_CASTING ;
}
else if (meta2->base == NPY_FR_GENERIC) {
/* TODO: This is actually an invalid cast (casting will error) */
@@ -3931,10 +3934,11 @@ datetime_to_timedelta_resolve_descriptors(
/* In the current setup both strings and unicode casts support all outputs */
static NPY_CASTING
time_to_string_resolve_descriptors(
- PyArrayMethodObject *self,
+ PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr **given_descrs,
- PyArray_Descr **loop_descrs)
+ PyArray_Descr **loop_descrs,
+ npy_intp *NPY_UNUSED(view_offset))
{
if (given_descrs[1] != NULL && dtypes[0]->type_num == NPY_DATETIME) {
/*
@@ -4013,7 +4017,8 @@ string_to_datetime_cast_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *NPY_UNUSED(view_offset))
{
if (given_descrs[1] == NULL) {
/* NOTE: This doesn't actually work, and will error during the cast */
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index ce0293615..5d245b106 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -1809,9 +1809,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa,
pos--;
numFractionDigits--;
}
- if (trim_mode == TrimMode_LeaveOneZero && buffer[pos-1] == '.') {
- buffer[pos++] = '0';
- numFractionDigits++;
+ if (buffer[pos-1] == '.') {
+ /* in TrimMode_LeaveOneZero, add trailing 0 back */
+ if (trim_mode == TrimMode_LeaveOneZero){
+ buffer[pos++] = '0';
+ numFractionDigits++;
+ }
+ /* in TrimMode_DptZeros, remove trailing decimal point */
+ else if (trim_mode == TrimMode_DptZeros) {
+ pos--;
+ }
}
}
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 8fb44c4f6..4877f8dfa 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -2991,7 +2991,8 @@ _strided_to_strided_multistep_cast(
* transferfunction and transferdata.
*/
static NPY_INLINE int
-init_cast_info(NPY_cast_info *cast_info, NPY_CASTING *casting,
+init_cast_info(
+ NPY_cast_info *cast_info, NPY_CASTING *casting, npy_intp *view_offset,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype, int main_step)
{
PyObject *meth = PyArray_GetCastingImpl(
@@ -3016,7 +3017,8 @@ init_cast_info(NPY_cast_info *cast_info, NPY_CASTING *casting,
PyArray_Descr *in_descr[2] = {src_dtype, dst_dtype};
*casting = cast_info->context.method->resolve_descriptors(
- cast_info->context.method, dtypes, in_descr, cast_info->descriptors);
+ cast_info->context.method, dtypes,
+ in_descr, cast_info->descriptors, view_offset);
if (NPY_UNLIKELY(*casting < 0)) {
if (!PyErr_Occurred()) {
PyErr_Format(PyExc_TypeError,
@@ -3071,6 +3073,9 @@ _clear_cast_info_after_get_loop_failure(NPY_cast_info *cast_info)
* transfer function from the each casting implementation (ArrayMethod).
* May set the transfer function to NULL when the cast can be achieved using
* a view.
+ * TODO: Expand the view functionality for general offsets, not just 0:
+ * Partial casts could be skipped also for `view_offset != 0`.
+ *
* The `out_needs_api` flag must be initialized.
*
* NOTE: In theory casting errors here could be slightly misleading in case
@@ -3101,9 +3106,12 @@ define_cast_for_descrs(
castdata.main.func = NULL;
castdata.to.func = NULL;
castdata.from.func = NULL;
+ /* `view_offset` passed to `init_cast_info` but unused for the main cast */
+ npy_intp view_offset = NPY_MIN_INTP;
NPY_CASTING casting = -1;
- if (init_cast_info(cast_info, &casting, src_dtype, dst_dtype, 1) < 0) {
+ if (init_cast_info(
+ cast_info, &casting, &view_offset, src_dtype, dst_dtype, 1) < 0) {
return -1;
}
@@ -3123,17 +3131,18 @@ define_cast_for_descrs(
*/
if (NPY_UNLIKELY(src_dtype != cast_info->descriptors[0] || must_wrap)) {
NPY_CASTING from_casting = -1;
+ npy_intp from_view_offset = NPY_MIN_INTP;
/* Cast function may not support the input, wrap if necessary */
if (init_cast_info(
- &castdata.from, &from_casting,
+ &castdata.from, &from_casting, &from_view_offset,
src_dtype, cast_info->descriptors[0], 0) < 0) {
goto fail;
}
casting = PyArray_MinCastSafety(casting, from_casting);
/* Prepare the actual cast (if necessary): */
- if (from_casting & _NPY_CAST_IS_VIEW && !must_wrap) {
- /* This step is not necessary and can be skipped. */
+ if (from_view_offset == 0 && !must_wrap) {
+ /* This step is not necessary and can be skipped */
castdata.from.func = &_dec_src_ref_nop; /* avoid NULL */
NPY_cast_info_xfree(&castdata.from);
}
@@ -3161,16 +3170,17 @@ define_cast_for_descrs(
*/
if (NPY_UNLIKELY(dst_dtype != cast_info->descriptors[1] || must_wrap)) {
NPY_CASTING to_casting = -1;
+ npy_intp to_view_offset = NPY_MIN_INTP;
/* Cast function may not support the output, wrap if necessary */
if (init_cast_info(
- &castdata.to, &to_casting,
+ &castdata.to, &to_casting, &to_view_offset,
cast_info->descriptors[1], dst_dtype, 0) < 0) {
goto fail;
}
casting = PyArray_MinCastSafety(casting, to_casting);
/* Prepare the actual cast (if necessary): */
- if (to_casting & _NPY_CAST_IS_VIEW && !must_wrap) {
+ if (to_view_offset == 0 && !must_wrap) {
/* This step is not necessary and can be skipped. */
castdata.to.func = &_dec_src_ref_nop; /* avoid NULL */
NPY_cast_info_xfree(&castdata.to);
diff --git a/numpy/core/src/multiarray/experimental_public_dtype_api.c b/numpy/core/src/multiarray/experimental_public_dtype_api.c
index 4b9c7199b..e9d191002 100644
--- a/numpy/core/src/multiarray/experimental_public_dtype_api.c
+++ b/numpy/core/src/multiarray/experimental_public_dtype_api.c
@@ -16,7 +16,7 @@
#include "common_dtype.h"
-#define EXPERIMENTAL_DTYPE_API_VERSION 2
+#define EXPERIMENTAL_DTYPE_API_VERSION 3
typedef struct{
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index 3b1b4f406..b5bd7c8c1 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -238,25 +238,6 @@ _define_get_warn(NPY_ARRAY_ALIGNED|
NPY_ARRAY_C_CONTIGUOUS, carray)
static PyObject *
-arrayflags_updateifcopy_get(PyArrayFlagsObject *self, void *NPY_UNUSED(ignored))
-{
- PyObject *item;
- /* 2017-Nov-10 1.14 */
- if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) {
- return NULL;
- }
- if ((self->flags & (NPY_ARRAY_UPDATEIFCOPY)) == (NPY_ARRAY_UPDATEIFCOPY)) {
- item = Py_True;
- }
- else {
- item = Py_False;
- }
- Py_INCREF(item);
- return item;
-}
-
-
-static PyObject *
arrayflags_forc_get(PyArrayFlagsObject *self, void *NPY_UNUSED(ignored))
{
PyObject *item;
@@ -314,36 +295,6 @@ arrayflags_num_get(PyArrayFlagsObject *self, void *NPY_UNUSED(ignored))
/* relies on setflags order being write, align, uic */
static int
-arrayflags_updateifcopy_set(
- PyArrayFlagsObject *self, PyObject *obj, void *NPY_UNUSED(ignored))
-{
- PyObject *res;
-
- if (obj == NULL) {
- PyErr_SetString(PyExc_AttributeError,
- "Cannot delete flags updateifcopy attribute");
- return -1;
- }
- if (self->arr == NULL) {
- PyErr_SetString(PyExc_ValueError,
- "Cannot set flags on array scalars.");
- return -1;
- }
- /* 2017-Nov-10 1.14 */
- if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) {
- return -1;
- }
- res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None,
- (PyObject_IsTrue(obj) ? Py_True : Py_False));
- if (res == NULL) {
- return -1;
- }
- Py_DECREF(res);
- return 0;
-}
-
-/* relies on setflags order being write, align, uic */
-static int
arrayflags_writebackifcopy_set(
PyArrayFlagsObject *self, PyObject *obj, void *NPY_UNUSED(ignored))
{
@@ -473,10 +424,6 @@ static PyGetSetDef arrayflags_getsets[] = {
(getter)arrayflags_fortran_get,
NULL,
NULL, NULL},
- {"updateifcopy",
- (getter)arrayflags_updateifcopy_get,
- (setter)arrayflags_updateifcopy_set,
- NULL, NULL},
{"writebackifcopy",
(getter)arrayflags_writebackifcopy_get,
(setter)arrayflags_writebackifcopy_set,
@@ -574,8 +521,6 @@ arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind)
return arrayflags_aligned_get(self, NULL);
case 'X':
return arrayflags_writebackifcopy_get(self, NULL);
- case 'U':
- return arrayflags_updateifcopy_get(self, NULL);
default:
goto fail;
}
@@ -631,9 +576,6 @@ arrayflags_getitem(PyArrayFlagsObject *self, PyObject *ind)
}
break;
case 12:
- if (strncmp(key, "UPDATEIFCOPY", n) == 0) {
- return arrayflags_updateifcopy_get(self, NULL);
- }
if (strncmp(key, "C_CONTIGUOUS", n) == 0) {
return arrayflags_contiguous_get(self, NULL);
}
@@ -684,10 +626,6 @@ arrayflags_setitem(PyArrayFlagsObject *self, PyObject *ind, PyObject *item)
((n==1) && (strncmp(key, "A", n) == 0))) {
return arrayflags_aligned_set(self, item, NULL);
}
- else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n) == 0)) ||
- ((n==1) && (strncmp(key, "U", n) == 0))) {
- return arrayflags_updateifcopy_set(self, item, NULL);
- }
else if (((n==15) && (strncmp(key, "WRITEBACKIFCOPY", n) == 0)) ||
((n==1) && (strncmp(key, "X", n) == 0))) {
return arrayflags_writebackifcopy_set(self, item, NULL);
@@ -721,16 +659,14 @@ arrayflags_print(PyArrayFlagsObject *self)
return PyUnicode_FromFormat(
" %s : %s\n %s : %s\n"
" %s : %s\n %s : %s%s\n"
- " %s : %s\n %s : %s\n"
- " %s : %s\n",
+ " %s : %s\n %s : %s\n",
"C_CONTIGUOUS", _torf_(fl, NPY_ARRAY_C_CONTIGUOUS),
"F_CONTIGUOUS", _torf_(fl, NPY_ARRAY_F_CONTIGUOUS),
"OWNDATA", _torf_(fl, NPY_ARRAY_OWNDATA),
"WRITEABLE", _torf_(fl, NPY_ARRAY_WRITEABLE),
_warn_on_write,
"ALIGNED", _torf_(fl, NPY_ARRAY_ALIGNED),
- "WRITEBACKIFCOPY", _torf_(fl, NPY_ARRAY_WRITEBACKIFCOPY),
- "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY)
+ "WRITEBACKIFCOPY", _torf_(fl, NPY_ARRAY_WRITEBACKIFCOPY)
);
}
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index e81ca2947..a4f972ba4 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -384,15 +384,7 @@ array_data_set(PyArrayObject *self, PyObject *op, void *NPY_UNUSED(ignored))
}
if (PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA) {
PyArray_XDECREF(self);
- size_t nbytes = PyArray_NBYTES(self);
- /*
- * Allocation will never be 0, see comment in ctors.c
- * line 820
- */
- if (nbytes == 0) {
- PyArray_Descr *dtype = PyArray_DESCR(self);
- nbytes = dtype->elsize ? dtype->elsize : 1;
- }
+ size_t nbytes = PyArray_NBYTES_ALLOCATED(self);
PyObject *handler = PyArray_HANDLER(self);
if (handler == NULL) {
/* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */
@@ -401,14 +393,13 @@ array_data_set(PyArrayObject *self, PyObject *op, void *NPY_UNUSED(ignored))
return -1;
}
PyDataMem_UserFREE(PyArray_DATA(self), nbytes, handler);
+ Py_CLEAR(((PyArrayObject_fields *)self)->mem_handler);
}
if (PyArray_BASE(self)) {
- if ((PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) ||
- (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY)) {
+ if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) {
PyArray_ENABLEFLAGS((PyArrayObject *)PyArray_BASE(self),
NPY_ARRAY_WRITEABLE);
PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY);
- PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
}
Py_DECREF(PyArray_BASE(self));
((PyArrayObject_fields *)self)->base = NULL;
@@ -505,9 +496,6 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored))
/* Changing the size of the dtype results in a shape change */
if (newtype->elsize != PyArray_DESCR(self)->elsize) {
- int axis;
- npy_intp newdim;
-
/* forbidden cases */
if (PyArray_NDIM(self) == 0) {
PyErr_SetString(PyExc_ValueError,
@@ -522,31 +510,20 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored))
goto fail;
}
- /* determine which axis to resize */
- if (PyArray_IS_C_CONTIGUOUS(self)) {
- axis = PyArray_NDIM(self) - 1;
- }
- else if (PyArray_IS_F_CONTIGUOUS(self)) {
- /* 2015-11-27 1.11.0, gh-6747 */
- if (DEPRECATE(
- "Changing the shape of an F-contiguous array by "
- "descriptor assignment is deprecated. To maintain the "
- "Fortran contiguity of a multidimensional Fortran "
- "array, use 'a.T.view(...).T' instead") < 0) {
- goto fail;
- }
- axis = 0;
- }
- else {
- /* Don't mention the deprecated F-contiguous support */
+ /* resize on last axis only */
+ int axis = PyArray_NDIM(self) - 1;
+ if (PyArray_DIMS(self)[axis] != 1 &&
+ PyArray_STRIDES(self)[axis] != PyArray_DESCR(self)->elsize) {
PyErr_SetString(PyExc_ValueError,
- "To change to a dtype of a different size, the array must "
- "be C-contiguous");
+ "To change to a dtype of a different size, the last axis "
+ "must be contiguous");
goto fail;
}
+ npy_intp newdim;
+
if (newtype->elsize < PyArray_DESCR(self)->elsize) {
- /* if it is compatible, increase the size of the relevant axis */
+ /* if it is compatible, increase the size of the last axis */
if (newtype->elsize == 0 ||
PyArray_DESCR(self)->elsize % newtype->elsize != 0) {
PyErr_SetString(PyExc_ValueError,
@@ -558,7 +535,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored))
PyArray_DIMS(self)[axis] *= newdim;
PyArray_STRIDES(self)[axis] = newtype->elsize;
}
- else if (newtype->elsize > PyArray_DESCR(self)->elsize) {
+ else /* newtype->elsize > PyArray_DESCR(self)->elsize */ {
/* if it is compatible, decrease the size of the relevant axis */
newdim = PyArray_DIMS(self)[axis] * PyArray_DESCR(self)->elsize;
if ((newdim % newtype->elsize) != 0) {
@@ -633,7 +610,7 @@ array_struct_get(PyArrayObject *self, void *NPY_UNUSED(ignored))
inter->flags = inter->flags & ~NPY_ARRAY_WRITEABLE;
}
/* reset unused flags */
- inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_UPDATEIFCOPY |NPY_ARRAY_OWNDATA);
+ inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_OWNDATA);
if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NPY_ARRAY_NOTSWAPPED;
/*
* Copy shape and strides over since these can be reset
@@ -949,15 +926,6 @@ array_transpose_get(PyArrayObject *self, void *NPY_UNUSED(ignored))
return PyArray_Transpose(self, NULL);
}
-/* If this is None, no function call is made
- --- default sub-class behavior
-*/
-static PyObject *
-array_finalize_get(PyArrayObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored))
-{
- Py_RETURN_NONE;
-}
-
NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = {
{"ndim",
(getter)array_ndim_get,
@@ -1031,10 +999,6 @@ NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = {
(getter)array_priority_get,
NULL,
NULL, NULL},
- {"__array_finalize__",
- (getter)array_finalize_get,
- NULL,
- NULL, NULL},
{NULL, NULL, NULL, NULL, NULL}, /* Sentinel */
};
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 014a863d5..5d515d013 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -3254,7 +3254,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
* If copy_if_overlap != 0, check if `a` has memory overlap with any of the
* arrays in `index` and with `extra_op`. If yes, make copies as appropriate
* to avoid problems if `a` is modified during the iteration.
- * `iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set).
+ * `iter->array` may contain a copied array (WRITEBACKIFCOPY set).
*/
NPY_NO_EXPORT PyObject *
PyArray_MapIterArrayCopyIfOverlap(PyArrayObject * a, PyObject * index,
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index b0b6f42f1..33f78dff2 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -859,7 +859,7 @@ array_astype(PyArrayObject *self,
* and it's not a subtype if subok is False, then we
* can skip the copy.
*/
- if (forcecopy != NPY_COPY_ALWAYS &&
+ if (forcecopy != NPY_COPY_ALWAYS &&
(order == NPY_KEEPORDER ||
(order == NPY_ANYORDER &&
(PyArray_IS_C_CONTIGUOUS(self) ||
@@ -881,7 +881,7 @@ array_astype(PyArrayObject *self,
Py_DECREF(dtype);
return NULL;
}
-
+
if (!PyArray_CanCastArrayTo(self, dtype, casting)) {
PyErr_Clear();
npy_set_invalid_cast_error(
@@ -926,6 +926,13 @@ array_astype(PyArrayObject *self,
static PyObject *
+array_finalizearray(PyArrayObject *self, PyObject *obj)
+{
+ Py_RETURN_NONE;
+}
+
+
+static PyObject *
array_wraparray(PyArrayObject *self, PyObject *args)
{
PyArrayObject *arr;
@@ -1934,7 +1941,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
PyObject *rawdata = NULL;
char *datastr;
Py_ssize_t len;
- npy_intp size, dimensions[NPY_MAXDIMS];
+ npy_intp dimensions[NPY_MAXDIMS];
int nd;
npy_intp nbytes;
int overflowed;
@@ -1976,11 +1983,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
* since fa could be a 0-d or scalar, and then
* PyDataMem_UserFREE will be confused
*/
- size_t n_tofree = PyArray_NBYTES(self);
- if (n_tofree == 0) {
- PyArray_Descr *dtype = PyArray_DESCR(self);
- n_tofree = dtype->elsize ? dtype->elsize : 1;
- }
+ size_t n_tofree = PyArray_NBYTES_ALLOCATED(self);
Py_XDECREF(PyArray_DESCR(self));
fa->descr = typecode;
Py_INCREF(typecode);
@@ -1988,17 +1991,39 @@ array_setstate(PyArrayObject *self, PyObject *args)
if (nd < 0) {
return NULL;
}
- size = PyArray_MultiplyList(dimensions, nd);
- if (size < 0) {
- /* More items than are addressable */
- return PyErr_NoMemory();
+ /*
+ * We should do two things here:
+ * 1. Validate the input, that it is neither invalid, nor "too big"
+ * ("too big" ignores dimensios of size 0).
+ * 2. Find `PyArray_NBYTES` of the result, as this is what we may need to
+ * copy from the pickled data (may not match allocation currently if 0).
+ * Compare with `PyArray_NewFromDescr`, raise MemoryError for simplicity.
+ */
+ npy_bool empty = NPY_FALSE;
+ nbytes = 1;
+ for (int i = 0; i < nd; i++) {
+ if (dimensions[i] < 0) {
+ PyErr_SetString(PyExc_TypeError,
+ "impossible dimension while unpickling array");
+ return NULL;
+ }
+ if (dimensions[i] == 0) {
+ empty = NPY_TRUE;
+ }
+ overflowed = npy_mul_with_overflow_intp(
+ &nbytes, nbytes, dimensions[i]);
+ if (overflowed) {
+ return PyErr_NoMemory();
+ }
}
overflowed = npy_mul_with_overflow_intp(
- &nbytes, size, PyArray_DESCR(self)->elsize);
+ &nbytes, nbytes, PyArray_DESCR(self)->elsize);
if (overflowed) {
- /* More bytes than are addressable */
return PyErr_NoMemory();
}
+ if (empty) {
+ nbytes = 0;
+ }
if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) {
if (!PyList_Check(rawdata)) {
@@ -2039,8 +2064,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
if (len != nbytes) {
PyErr_SetString(PyExc_ValueError,
- "buffer size does not" \
- " match array size");
+ "buffer size does not match array size");
Py_DECREF(rawdata);
return NULL;
}
@@ -2065,7 +2089,6 @@ array_setstate(PyArrayObject *self, PyObject *args)
fa->base = NULL;
PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY);
- PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
if (PyArray_DIMS(self) != NULL) {
npy_free_cache_dim_array(self);
@@ -2097,21 +2120,18 @@ array_setstate(PyArrayObject *self, PyObject *args)
/* Bytes should always be considered immutable, but we just grab the
* pointer if they are large, to save memory. */
if (!IsAligned(self) || swap || (len <= 1000)) {
- npy_intp num = PyArray_NBYTES(self);
- if (num == 0) {
- Py_DECREF(rawdata);
- Py_RETURN_NONE;
- }
+ npy_intp num = PyArray_NBYTES_ALLOCATED(self);
/* Store the handler in case the default is modified */
Py_XDECREF(fa->mem_handler);
fa->mem_handler = PyDataMem_GetHandler();
if (fa->mem_handler == NULL) {
+ Py_CLEAR(fa->mem_handler);
Py_DECREF(rawdata);
return NULL;
}
fa->data = PyDataMem_UserNEW(num, PyArray_HANDLER(self));
if (PyArray_DATA(self) == NULL) {
- Py_DECREF(fa->mem_handler);
+ Py_CLEAR(fa->mem_handler);
Py_DECREF(rawdata);
return PyErr_NoMemory();
}
@@ -2158,11 +2178,8 @@ array_setstate(PyArrayObject *self, PyObject *args)
}
}
else {
- npy_intp num = PyArray_NBYTES(self);
- int elsize = PyArray_DESCR(self)->elsize;
- if (num == 0 || elsize == 0) {
- Py_RETURN_NONE;
- }
+ npy_intp num = PyArray_NBYTES_ALLOCATED(self);
+
/* Store the functions in case the default handler is modified */
Py_XDECREF(fa->mem_handler);
fa->mem_handler = PyDataMem_GetHandler();
@@ -2171,7 +2188,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
}
fa->data = PyDataMem_UserNEW(num, PyArray_HANDLER(self));
if (PyArray_DATA(self) == NULL) {
- Py_DECREF(fa->mem_handler);
+ Py_CLEAR(fa->mem_handler);
return PyErr_NoMemory();
}
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_NEEDS_INIT)) {
@@ -2180,7 +2197,6 @@ array_setstate(PyArrayObject *self, PyObject *args)
PyArray_ENABLEFLAGS(self, NPY_ARRAY_OWNDATA);
fa->base = NULL;
if (_setlist_pkl(self, rawdata) < 0) {
- Py_DECREF(fa->mem_handler);
return NULL;
}
}
@@ -2621,7 +2637,6 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
else {
PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY);
- PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
Py_XDECREF(fa->base);
fa->base = NULL;
}
@@ -2769,6 +2784,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
{"__array_prepare__",
(PyCFunction)array_preparearray,
METH_VARARGS, NULL},
+ {"__array_finalize__",
+ (PyCFunction)array_finalizearray,
+ METH_O, NULL},
{"__array_wrap__",
(PyCFunction)array_wraparray,
METH_VARARGS, NULL},
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index cf0160a2b..789446d0c 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -1478,11 +1478,30 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2)
if (type1 == type2) {
return 1;
}
+
+ if (Py_TYPE(Py_TYPE(type1)) == &PyType_Type) {
+ /*
+ * 2021-12-17: This case is nonsense and should be removed eventually!
+ *
+ * boost::python has/had a bug effectively using EquivTypes with
+ * `type(arbitrary_obj)`. That is clearly wrong as that cannot be a
+ * `PyArray_Descr *`. We assume that `type(type(type(arbitrary_obj))`
+ * is always in practice `type` (this is the type of the metaclass),
+ * but for our descriptors, `type(type(descr))` is DTypeMeta.
+ *
+ * In that case, we just return False. There is a possibility that
+ * this actually _worked_ effectively (returning 1 sometimes).
+ * We ignore that possibility for simplicity; it really is not our bug.
+ */
+ return 0;
+ }
+
/*
* Do not use PyArray_CanCastTypeTo because it supports legacy flexible
* dtypes as input.
*/
- NPY_CASTING safety = PyArray_GetCastSafety(type1, type2, NULL);
+ npy_intp view_offset;
+ NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, &view_offset);
if (safety < 0) {
PyErr_Clear();
return 0;
@@ -4621,7 +4640,6 @@ set_flaginfo(PyObject *d)
_addnew(FORTRAN, NPY_ARRAY_F_CONTIGUOUS, F);
_addnew(CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS, C);
_addnew(ALIGNED, NPY_ARRAY_ALIGNED, A);
- _addnew(UPDATEIFCOPY, NPY_ARRAY_UPDATEIFCOPY, U);
_addnew(WRITEBACKIFCOPY, NPY_ARRAY_WRITEBACKIFCOPY, X);
_addnew(WRITEABLE, NPY_ARRAY_WRITEABLE, W);
_addone(C_CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS);
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index db1e49db8..907761874 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -1203,8 +1203,7 @@ gentype_struct_get(PyObject *self, void *NPY_UNUSED(ignored))
inter->two = 2;
inter->nd = 0;
inter->flags = PyArray_FLAGS(arr);
- inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_WRITEBACKIFCOPY |
- NPY_ARRAY_OWNDATA);
+ inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_OWNDATA);
inter->flags |= NPY_ARRAY_NOTSWAPPED;
inter->typekind = PyArray_DESCR(arr)->kind;
inter->itemsize = PyArray_DESCR(arr)->elsize;
@@ -2585,6 +2584,7 @@ gentype_arrtype_getbuffer(PyObject *self, Py_buffer *view, int flags)
"user-defined scalar %R registered for built-in dtype %S? "
"This should be impossible.",
self, descr);
+ Py_DECREF(descr);
return -1;
}
view->ndim = 0;
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index f615aa336..34248076c 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -286,7 +286,6 @@ can_elide_temp(PyObject *olhs, PyObject *orhs, int *cannot)
!PyArray_ISNUMBER(alhs) ||
!PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) ||
!PyArray_ISWRITEABLE(alhs) ||
- PyArray_CHKFLAGS(alhs, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_CHKFLAGS(alhs, NPY_ARRAY_WRITEBACKIFCOPY) ||
PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) {
return 0;
@@ -365,7 +364,6 @@ can_elide_temp_unary(PyArrayObject * m1)
!PyArray_ISNUMBER(m1) ||
!PyArray_CHKFLAGS(m1, NPY_ARRAY_OWNDATA) ||
!PyArray_ISWRITEABLE(m1) ||
- PyArray_CHKFLAGS(m1, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) {
return 0;
}
diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src
index 5b418342f..15d35637f 100644
--- a/numpy/core/src/npymath/npy_math_internal.h.src
+++ b/numpy/core/src/npymath/npy_math_internal.h.src
@@ -54,6 +54,9 @@
* ====================================================
*/
#include "npy_math_private.h"
+#ifdef _MSC_VER
+# include <intrin.h> // for __popcnt
+#endif
/* Magic binary numbers used by bit_count
* For type T, the magic numbers are computed as follows:
diff --git a/numpy/core/src/npysort/binsearch.c.src b/numpy/core/src/npysort/binsearch.c.src
deleted file mode 100644
index 41165897b..000000000
--- a/numpy/core/src/npysort/binsearch.c.src
+++ /dev/null
@@ -1,250 +0,0 @@
-/* -*- c -*- */
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include "npy_sort.h"
-#include "npysort_common.h"
-#include "npy_binsearch.h"
-
-#define NOT_USED NPY_UNUSED(unused)
-
-/*
- *****************************************************************************
- ** NUMERIC SEARCHES **
- *****************************************************************************
- */
-
-/**begin repeat
- *
- * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
- * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA#
- * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
- * longlong, ulonglong, half, float, double, longdouble,
- * cfloat, cdouble, clongdouble, datetime, timedelta#
- * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
- * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat,
- * npy_cdouble, npy_clongdouble, npy_datetime, npy_timedelta#
- */
-
-#define @TYPE@_LTE(a, b) (!@TYPE@_LT((b), (a)))
-
-/**begin repeat1
- *
- * #side = left, right#
- * #CMP = LT, LTE#
- */
-
-NPY_NO_EXPORT void
-binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
- PyArrayObject *NOT_USED)
-{
- npy_intp min_idx = 0;
- npy_intp max_idx = arr_len;
- @type@ last_key_val;
-
- if (key_len == 0) {
- return;
- }
- last_key_val = *(const @type@ *)key;
-
- for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
- const @type@ key_val = *(const @type@ *)key;
- /*
- * Updating only one of the indices based on the previous key
- * gives the search a big boost when keys are sorted, but slightly
- * slows down things for purely random ones.
- */
- if (@TYPE@_LT(last_key_val, key_val)) {
- max_idx = arr_len;
- }
- else {
- min_idx = 0;
- max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
- }
-
- last_key_val = key_val;
-
- while (min_idx < max_idx) {
- const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const @type@ mid_val = *(const @type@ *)(arr + mid_idx*arr_str);
- if (@TYPE@_@CMP@(mid_val, key_val)) {
- min_idx = mid_idx + 1;
- }
- else {
- max_idx = mid_idx;
- }
- }
- *(npy_intp *)ret = min_idx;
- }
-}
-
-NPY_NO_EXPORT int
-argbinsearch_@side@_@suff@(const char *arr, const char *key,
- const char *sort, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str,
- npy_intp sort_str, npy_intp ret_str,
- PyArrayObject *NOT_USED)
-{
- npy_intp min_idx = 0;
- npy_intp max_idx = arr_len;
- @type@ last_key_val;
-
- if (key_len == 0) {
- return 0;
- }
- last_key_val = *(const @type@ *)key;
-
- for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
- const @type@ key_val = *(const @type@ *)key;
- /*
- * Updating only one of the indices based on the previous key
- * gives the search a big boost when keys are sorted, but slightly
- * slows down things for purely random ones.
- */
- if (@TYPE@_LT(last_key_val, key_val)) {
- max_idx = arr_len;
- }
- else {
- min_idx = 0;
- max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
- }
-
- last_key_val = key_val;
-
- while (min_idx < max_idx) {
- const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str);
- @type@ mid_val;
-
- if (sort_idx < 0 || sort_idx >= arr_len) {
- return -1;
- }
-
- mid_val = *(const @type@ *)(arr + sort_idx*arr_str);
-
- if (@TYPE@_@CMP@(mid_val, key_val)) {
- min_idx = mid_idx + 1;
- }
- else {
- max_idx = mid_idx;
- }
- }
- *(npy_intp *)ret = min_idx;
- }
- return 0;
-}
-
-/**end repeat1**/
-/**end repeat**/
-
-/*
- *****************************************************************************
- ** GENERIC SEARCH **
- *****************************************************************************
- */
-
- /**begin repeat
- *
- * #side = left, right#
- * #CMP = <, <=#
- */
-
-NPY_NO_EXPORT void
-npy_binsearch_@side@(const char *arr, const char *key, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
- PyArrayObject *cmp)
-{
- PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare;
- npy_intp min_idx = 0;
- npy_intp max_idx = arr_len;
- const char *last_key = key;
-
- for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
- /*
- * Updating only one of the indices based on the previous key
- * gives the search a big boost when keys are sorted, but slightly
- * slows down things for purely random ones.
- */
- if (compare(last_key, key, cmp) @CMP@ 0) {
- max_idx = arr_len;
- }
- else {
- min_idx = 0;
- max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
- }
-
- last_key = key;
-
- while (min_idx < max_idx) {
- const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const char *arr_ptr = arr + mid_idx*arr_str;
-
- if (compare(arr_ptr, key, cmp) @CMP@ 0) {
- min_idx = mid_idx + 1;
- }
- else {
- max_idx = mid_idx;
- }
- }
- *(npy_intp *)ret = min_idx;
- }
-}
-
-NPY_NO_EXPORT int
-npy_argbinsearch_@side@(const char *arr, const char *key,
- const char *sort, char *ret,
- npy_intp arr_len, npy_intp key_len,
- npy_intp arr_str, npy_intp key_str,
- npy_intp sort_str, npy_intp ret_str,
- PyArrayObject *cmp)
-{
- PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare;
- npy_intp min_idx = 0;
- npy_intp max_idx = arr_len;
- const char *last_key = key;
-
- for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
- /*
- * Updating only one of the indices based on the previous key
- * gives the search a big boost when keys are sorted, but slightly
- * slows down things for purely random ones.
- */
- if (compare(last_key, key, cmp) @CMP@ 0) {
- max_idx = arr_len;
- }
- else {
- min_idx = 0;
- max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
- }
-
- last_key = key;
-
- while (min_idx < max_idx) {
- const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
- const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str);
- const char *arr_ptr;
-
- if (sort_idx < 0 || sort_idx >= arr_len) {
- return -1;
- }
-
- arr_ptr = arr + sort_idx*arr_str;
-
- if (compare(arr_ptr, key, cmp) @CMP@ 0) {
- min_idx = mid_idx + 1;
- }
- else {
- max_idx = mid_idx;
- }
- }
- *(npy_intp *)ret = min_idx;
- }
- return 0;
-}
-
-/**end repeat**/
diff --git a/numpy/core/src/npysort/binsearch.cpp b/numpy/core/src/npysort/binsearch.cpp
new file mode 100644
index 000000000..cd5f03470
--- /dev/null
+++ b/numpy/core/src/npysort/binsearch.cpp
@@ -0,0 +1,387 @@
+/* -*- c -*- */
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "npy_sort.h"
+#include "numpy_tag.h"
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h>
+
+#include "npy_binsearch.h"
+
+#include <array>
+#include <functional> // for std::less and std::less_equal
+
+// Enumerators for the variant of binsearch
+enum arg_t { noarg, arg};
+enum side_t { left, right};
+
+// Mapping from enumerators to comparators
+template<class Tag, side_t side>
+struct side_to_cmp;
+template<class Tag>
+struct side_to_cmp<Tag, left> { static constexpr auto value = Tag::less; };
+template<class Tag>
+struct side_to_cmp<Tag, right> { static constexpr auto value = Tag::less_equal; };
+
+template<side_t side>
+struct side_to_generic_cmp;
+template<>
+struct side_to_generic_cmp<left> { using type = std::less<int>; };
+template<>
+struct side_to_generic_cmp<right> { using type = std::less_equal<int>; };
+
+/*
+ *****************************************************************************
+ ** NUMERIC SEARCHES **
+ *****************************************************************************
+ */
+template<class Tag, side_t side>
+static void
+binsearch(const char *arr, const char *key, char *ret,
+ npy_intp arr_len, npy_intp key_len,
+ npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
+ PyArrayObject*)
+{
+ using T = typename Tag::type;
+ auto cmp = side_to_cmp<Tag, side>::value;
+ npy_intp min_idx = 0;
+ npy_intp max_idx = arr_len;
+ T last_key_val;
+
+ if (key_len == 0) {
+ return;
+ }
+ last_key_val = *(const T *)key;
+
+ for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
+ const T key_val = *(const T *)key;
+ /*
+ * Updating only one of the indices based on the previous key
+ * gives the search a big boost when keys are sorted, but slightly
+ * slows down things for purely random ones.
+ */
+ if (cmp(last_key_val, key_val)) {
+ max_idx = arr_len;
+ }
+ else {
+ min_idx = 0;
+ max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
+ }
+
+ last_key_val = key_val;
+
+ while (min_idx < max_idx) {
+ const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
+ const T mid_val = *(const T *)(arr + mid_idx*arr_str);
+ if (cmp(mid_val, key_val)) {
+ min_idx = mid_idx + 1;
+ }
+ else {
+ max_idx = mid_idx;
+ }
+ }
+ *(npy_intp *)ret = min_idx;
+ }
+}
+
+template<class Tag, side_t side>
+static int
+argbinsearch(const char *arr, const char *key,
+ const char *sort, char *ret,
+ npy_intp arr_len, npy_intp key_len,
+ npy_intp arr_str, npy_intp key_str,
+ npy_intp sort_str, npy_intp ret_str, PyArrayObject*)
+{
+ using T = typename Tag::type;
+ auto cmp = side_to_cmp<Tag, side>::value;
+ npy_intp min_idx = 0;
+ npy_intp max_idx = arr_len;
+ T last_key_val;
+
+ if (key_len == 0) {
+ return 0;
+ }
+ last_key_val = *(const T *)key;
+
+ for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
+ const T key_val = *(const T *)key;
+ /*
+ * Updating only one of the indices based on the previous key
+ * gives the search a big boost when keys are sorted, but slightly
+ * slows down things for purely random ones.
+ */
+ if (cmp(last_key_val, key_val)) {
+ max_idx = arr_len;
+ }
+ else {
+ min_idx = 0;
+ max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
+ }
+
+ last_key_val = key_val;
+
+ while (min_idx < max_idx) {
+ const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
+ const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str);
+ T mid_val;
+
+ if (sort_idx < 0 || sort_idx >= arr_len) {
+ return -1;
+ }
+
+ mid_val = *(const T *)(arr + sort_idx*arr_str);
+
+ if (cmp(mid_val, key_val)) {
+ min_idx = mid_idx + 1;
+ }
+ else {
+ max_idx = mid_idx;
+ }
+ }
+ *(npy_intp *)ret = min_idx;
+ }
+ return 0;
+}
+
+/*
+ *****************************************************************************
+ ** GENERIC SEARCH **
+ *****************************************************************************
+ */
+
+template<side_t side>
+static void
+npy_binsearch(const char *arr, const char *key, char *ret,
+ npy_intp arr_len, npy_intp key_len,
+ npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
+ PyArrayObject *cmp)
+{
+ using Cmp = typename side_to_generic_cmp<side>::type;
+ PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare;
+ npy_intp min_idx = 0;
+ npy_intp max_idx = arr_len;
+ const char *last_key = key;
+
+ for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
+ /*
+ * Updating only one of the indices based on the previous key
+ * gives the search a big boost when keys are sorted, but slightly
+ * slows down things for purely random ones.
+ */
+ if (Cmp{}(compare(last_key, key, cmp), 0)) {
+ max_idx = arr_len;
+ }
+ else {
+ min_idx = 0;
+ max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
+ }
+
+ last_key = key;
+
+ while (min_idx < max_idx) {
+ const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
+ const char *arr_ptr = arr + mid_idx*arr_str;
+
+ if (Cmp{}(compare(arr_ptr, key, cmp), 0)) {
+ min_idx = mid_idx + 1;
+ }
+ else {
+ max_idx = mid_idx;
+ }
+ }
+ *(npy_intp *)ret = min_idx;
+ }
+}
+
+template<side_t side>
+static int
+npy_argbinsearch(const char *arr, const char *key,
+ const char *sort, char *ret,
+ npy_intp arr_len, npy_intp key_len,
+ npy_intp arr_str, npy_intp key_str,
+ npy_intp sort_str, npy_intp ret_str,
+ PyArrayObject *cmp)
+{
+ using Cmp = typename side_to_generic_cmp<side>::type;
+ PyArray_CompareFunc *compare = PyArray_DESCR(cmp)->f->compare;
+ npy_intp min_idx = 0;
+ npy_intp max_idx = arr_len;
+ const char *last_key = key;
+
+ for (; key_len > 0; key_len--, key += key_str, ret += ret_str) {
+ /*
+ * Updating only one of the indices based on the previous key
+ * gives the search a big boost when keys are sorted, but slightly
+ * slows down things for purely random ones.
+ */
+ if (Cmp{}(compare(last_key, key, cmp), 0)) {
+ max_idx = arr_len;
+ }
+ else {
+ min_idx = 0;
+ max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len;
+ }
+
+ last_key = key;
+
+ while (min_idx < max_idx) {
+ const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
+ const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx*sort_str);
+ const char *arr_ptr;
+
+ if (sort_idx < 0 || sort_idx >= arr_len) {
+ return -1;
+ }
+
+ arr_ptr = arr + sort_idx*arr_str;
+
+ if (Cmp{}(compare(arr_ptr, key, cmp), 0)) {
+ min_idx = mid_idx + 1;
+ }
+ else {
+ max_idx = mid_idx;
+ }
+ }
+ *(npy_intp *)ret = min_idx;
+ }
+ return 0;
+}
+
+/*
+ *****************************************************************************
+ ** GENERATOR **
+ *****************************************************************************
+ */
+
+template<arg_t arg>
+struct binsearch_base;
+
+template<>
+struct binsearch_base<arg> {
+ using function_type = PyArray_ArgBinSearchFunc*;
+ struct value_type {
+ int typenum;
+ function_type binsearch[NPY_NSEARCHSIDES];
+ };
+ template<class... Tags>
+ static constexpr std::array<value_type, sizeof...(Tags)> make_binsearch_map(npy::taglist<Tags...>) {
+ return std::array<value_type, sizeof...(Tags)>{
+ value_type{
+ Tags::type_value,
+ {
+ (function_type)&argbinsearch<Tags, left>,
+ (function_type)argbinsearch<Tags, right>
+ }
+ }...
+ };
+ }
+ static constexpr std::array<function_type, 2> npy_map = {
+ (function_type)&npy_argbinsearch<left>,
+ (function_type)&npy_argbinsearch<right>
+ };
+};
+constexpr std::array<binsearch_base<arg>::function_type, 2> binsearch_base<arg>::npy_map;
+
+template<>
+struct binsearch_base<noarg> {
+ using function_type = PyArray_BinSearchFunc*;
+ struct value_type {
+ int typenum;
+ function_type binsearch[NPY_NSEARCHSIDES];
+ };
+ template<class... Tags>
+ static constexpr std::array<value_type, sizeof...(Tags)> make_binsearch_map(npy::taglist<Tags...>) {
+ return std::array<value_type, sizeof...(Tags)>{
+ value_type{
+ Tags::type_value,
+ {
+ (function_type)&binsearch<Tags, left>,
+ (function_type)binsearch<Tags, right>
+ }
+ }...
+ };
+ }
+ static constexpr std::array<function_type, 2> npy_map = {
+ (function_type)&npy_binsearch<left>,
+ (function_type)&npy_binsearch<right>
+ };
+};
+constexpr std::array<binsearch_base<noarg>::function_type, 2> binsearch_base<noarg>::npy_map;
+
+// Handle generation of all binsearch variants
+template<arg_t arg>
+struct binsearch_t : binsearch_base<arg> {
+ using binsearch_base<arg>::make_binsearch_map;
+ using value_type = typename binsearch_base<arg>::value_type;
+
+ using taglist = npy::taglist<
+ /* If adding new types, make sure to keep them ordered by type num */
+ npy::bool_tag, npy::byte_tag, npy::ubyte_tag, npy::short_tag,
+ npy::ushort_tag, npy::int_tag, npy::uint_tag, npy::long_tag,
+ npy::ulong_tag, npy::longlong_tag, npy::ulonglong_tag, npy::half_tag,
+ npy::float_tag, npy::double_tag, npy::longdouble_tag, npy::cfloat_tag,
+ npy::cdouble_tag, npy::clongdouble_tag, npy::datetime_tag,
+ npy::timedelta_tag>;
+
+ static constexpr std::array<value_type, taglist::size> map = make_binsearch_map(taglist());
+};
+template<arg_t arg>
+constexpr std::array<typename binsearch_t<arg>::value_type, binsearch_t<arg>::taglist::size> binsearch_t<arg>::map;
+
+
+template<arg_t arg>
+static NPY_INLINE typename binsearch_t<arg>::function_type
+_get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
+{
+ using binsearch = binsearch_t<arg>;
+ npy_intp nfuncs = binsearch::map.size();;
+ npy_intp min_idx = 0;
+ npy_intp max_idx = nfuncs;
+ int type = dtype->type_num;
+
+ if ((int)side >= (int)NPY_NSEARCHSIDES) {
+ return NULL;
+ }
+
+ /*
+ * It seems only fair that a binary search function be searched for
+ * using a binary search...
+ */
+ while (min_idx < max_idx) {
+ npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1);
+
+ if (binsearch::map[mid_idx].typenum < type) {
+ min_idx = mid_idx + 1;
+ }
+ else {
+ max_idx = mid_idx;
+ }
+ }
+
+ if (min_idx < nfuncs &&
+ binsearch::map[min_idx].typenum == type) {
+ return binsearch::map[min_idx].binsearch[side];
+ }
+
+ if (dtype->f->compare) {
+ return binsearch::npy_map[side];
+ }
+
+ return NULL;
+}
+
+
+/*
+ *****************************************************************************
+ ** C INTERFACE **
+ *****************************************************************************
+ */
+extern "C" {
+ NPY_NO_EXPORT PyArray_BinSearchFunc* get_binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) {
+ return _get_binsearch_func<noarg>(dtype, side);
+ }
+ NPY_NO_EXPORT PyArray_ArgBinSearchFunc* get_argbinsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side) {
+ return _get_binsearch_func<arg>(dtype, side);
+ }
+}
diff --git a/numpy/core/src/npysort/npysort_common.h b/numpy/core/src/npysort/npysort_common.h
index 2a6e4d421..a537fcd08 100644
--- a/numpy/core/src/npysort/npysort_common.h
+++ b/numpy/core/src/npysort/npysort_common.h
@@ -4,6 +4,10 @@
#include <stdlib.h>
#include <numpy/ndarraytypes.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/*
*****************************************************************************
** SWAP MACROS **
@@ -139,14 +143,14 @@ LONGDOUBLE_LT(npy_longdouble a, npy_longdouble b)
NPY_INLINE static int
-npy_half_isnan(npy_half h)
+_npy_half_isnan(npy_half h)
{
return ((h&0x7c00u) == 0x7c00u) && ((h&0x03ffu) != 0x0000u);
}
NPY_INLINE static int
-npy_half_lt_nonan(npy_half h1, npy_half h2)
+_npy_half_lt_nonan(npy_half h1, npy_half h2)
{
if (h1&0x8000u) {
if (h2&0x8000u) {
@@ -173,11 +177,11 @@ HALF_LT(npy_half a, npy_half b)
{
int ret;
- if (npy_half_isnan(b)) {
- ret = !npy_half_isnan(a);
+ if (_npy_half_isnan(b)) {
+ ret = !_npy_half_isnan(a);
}
else {
- ret = !npy_half_isnan(a) && npy_half_lt_nonan(a, b);
+ ret = !_npy_half_isnan(a) && _npy_half_lt_nonan(a, b);
}
return ret;
@@ -373,4 +377,8 @@ GENERIC_SWAP(char *a, char *b, size_t len)
}
}
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/numpy/core/src/npysort/radixsort.cpp b/numpy/core/src/npysort/radixsort.cpp
index 017ea43b6..5393869ee 100644
--- a/numpy/core/src/npysort/radixsort.cpp
+++ b/numpy/core/src/npysort/radixsort.cpp
@@ -14,9 +14,9 @@
*/
// Reference: https://github.com/eloj/radix-sorting#-key-derivation
-template <class T>
-T
-KEY_OF(T x)
+template <class T, class UT>
+UT
+KEY_OF(UT x)
{
// Floating-point is currently disabled.
// Floating-point tests succeed for double and float on macOS but not on
@@ -27,12 +27,12 @@ KEY_OF(T x)
// For floats, we invert the key if the sign bit is set, else we invert
// the sign bit.
return ((x) ^ (-((x) >> (sizeof(T) * 8 - 1)) |
- ((T)1 << (sizeof(T) * 8 - 1))));
+ ((UT)1 << (sizeof(T) * 8 - 1))));
}
else if (std::is_signed<T>::value) {
// For signed ints, we flip the sign bit so the negatives are below the
// positives.
- return ((x) ^ ((T)1 << (sizeof(T) * 8 - 1)));
+ return ((x) ^ ((UT)1 << (sizeof(UT) * 8 - 1)));
}
else {
return x;
@@ -46,24 +46,24 @@ nth_byte(T key, npy_intp l)
return (key >> (l << 3)) & 0xFF;
}
-template <class T>
-static T *
-radixsort0(T *start, T *aux, npy_intp num)
+template <class T, class UT>
+static UT *
+radixsort0(UT *start, UT *aux, npy_intp num)
{
- npy_intp cnt[sizeof(T)][1 << 8] = {{0}};
- T key0 = KEY_OF(start[0]);
+ npy_intp cnt[sizeof(UT)][1 << 8] = {{0}};
+ UT key0 = KEY_OF<T>(start[0]);
for (npy_intp i = 0; i < num; i++) {
- T k = KEY_OF(start[i]);
+ UT k = KEY_OF<T>(start[i]);
- for (size_t l = 0; l < sizeof(T); l++) {
+ for (size_t l = 0; l < sizeof(UT); l++) {
cnt[l][nth_byte(k, l)]++;
}
}
size_t ncols = 0;
- npy_ubyte cols[sizeof(T)];
- for (size_t l = 0; l < sizeof(T); l++) {
+ npy_ubyte cols[sizeof(UT)];
+ for (size_t l = 0; l < sizeof(UT); l++) {
if (cnt[l][nth_byte(key0, l)] != num) {
cols[ncols++] = l;
}
@@ -79,9 +79,9 @@ radixsort0(T *start, T *aux, npy_intp num)
}
for (size_t l = 0; l < ncols; l++) {
- T *temp;
+ UT *temp;
for (npy_intp i = 0; i < num; i++) {
- T k = KEY_OF(start[i]);
+ UT k = KEY_OF<T>(start[i]);
npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++;
aux[dst] = start[i];
}
@@ -94,18 +94,18 @@ radixsort0(T *start, T *aux, npy_intp num)
return start;
}
-template <class T>
+template <class T, class UT>
static int
-radixsort_(T *start, npy_intp num)
+radixsort_(UT *start, npy_intp num)
{
if (num < 2) {
return 0;
}
npy_bool all_sorted = 1;
- T k1 = KEY_OF(start[0]), k2;
+ UT k1 = KEY_OF<T>(start[0]);
for (npy_intp i = 1; i < num; i++) {
- k2 = KEY_OF(start[i]);
+ UT k2 = KEY_OF<T>(start[i]);
if (k1 > k2) {
all_sorted = 0;
break;
@@ -117,14 +117,14 @@ radixsort_(T *start, npy_intp num)
return 0;
}
- T *aux = (T *)malloc(num * sizeof(T));
+ UT *aux = (UT *)malloc(num * sizeof(UT));
if (aux == nullptr) {
return -NPY_ENOMEM;
}
- T *sorted = radixsort0(start, aux, num);
+ UT *sorted = radixsort0<T>(start, aux, num);
if (sorted != start) {
- memcpy(start, sorted, num * sizeof(T));
+ memcpy(start, sorted, num * sizeof(UT));
}
free(aux);
@@ -135,27 +135,28 @@ template <class T>
static int
radixsort(void *start, npy_intp num)
{
- return radixsort_((T *)start, num);
+ using UT = typename std::make_unsigned<T>::type;
+ return radixsort_<T>((UT *)start, num);
}
-template <class T>
+template <class T, class UT>
static npy_intp *
-aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num)
+aradixsort0(UT *start, npy_intp *aux, npy_intp *tosort, npy_intp num)
{
- npy_intp cnt[sizeof(T)][1 << 8] = {{0}};
- T key0 = KEY_OF(start[0]);
+ npy_intp cnt[sizeof(UT)][1 << 8] = {{0}};
+ UT key0 = KEY_OF<T>(start[0]);
for (npy_intp i = 0; i < num; i++) {
- T k = KEY_OF(start[i]);
+ UT k = KEY_OF<T>(start[i]);
- for (size_t l = 0; l < sizeof(T); l++) {
+ for (size_t l = 0; l < sizeof(UT); l++) {
cnt[l][nth_byte(k, l)]++;
}
}
size_t ncols = 0;
- npy_ubyte cols[sizeof(T)];
- for (size_t l = 0; l < sizeof(T); l++) {
+ npy_ubyte cols[sizeof(UT)];
+ for (size_t l = 0; l < sizeof(UT); l++) {
if (cnt[l][nth_byte(key0, l)] != num) {
cols[ncols++] = l;
}
@@ -173,7 +174,7 @@ aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num)
for (size_t l = 0; l < ncols; l++) {
npy_intp *temp;
for (npy_intp i = 0; i < num; i++) {
- T k = KEY_OF(start[tosort[i]]);
+ UT k = KEY_OF<T>(start[tosort[i]]);
npy_intp dst = cnt[cols[l]][nth_byte(k, cols[l])]++;
aux[dst] = tosort[i];
}
@@ -186,22 +187,22 @@ aradixsort0(T *start, npy_intp *aux, npy_intp *tosort, npy_intp num)
return tosort;
}
-template <class T>
+template <class T, class UT>
static int
-aradixsort_(T *start, npy_intp *tosort, npy_intp num)
+aradixsort_(UT *start, npy_intp *tosort, npy_intp num)
{
npy_intp *sorted;
npy_intp *aux;
- T k1, k2;
+ UT k1, k2;
npy_bool all_sorted = 1;
if (num < 2) {
return 0;
}
- k1 = KEY_OF(start[tosort[0]]);
+ k1 = KEY_OF<T>(start[tosort[0]]);
for (npy_intp i = 1; i < num; i++) {
- k2 = KEY_OF(start[tosort[i]]);
+ k2 = KEY_OF<T>(start[tosort[i]]);
if (k1 > k2) {
all_sorted = 0;
break;
@@ -218,7 +219,7 @@ aradixsort_(T *start, npy_intp *tosort, npy_intp num)
return -NPY_ENOMEM;
}
- sorted = aradixsort0(start, aux, tosort, num);
+ sorted = aradixsort0<T>(start, aux, tosort, num);
if (sorted != tosort) {
memcpy(tosort, sorted, num * sizeof(npy_intp));
}
@@ -231,7 +232,8 @@ template <class T>
static int
aradixsort(void *start, npy_intp *tosort, npy_intp num)
{
- return aradixsort_((T *)start, tosort, num);
+ using UT = typename std::make_unsigned<T>::type;
+ return aradixsort_<T>((UT *)start, tosort, num);
}
extern "C" {
diff --git a/numpy/core/src/umath/_operand_flag_tests.c.src b/numpy/core/src/umath/_operand_flag_tests.c
index c59e13baf..c59e13baf 100644
--- a/numpy/core/src/umath/_operand_flag_tests.c.src
+++ b/numpy/core/src/umath/_operand_flag_tests.c
diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c
index b6c19362a..a214b32aa 100644
--- a/numpy/core/src/umath/_scaled_float_dtype.c
+++ b/numpy/core/src/umath/_scaled_float_dtype.c
@@ -325,7 +325,8 @@ sfloat_to_sfloat_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]),
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
loop_descrs[0] = given_descrs[0];
Py_INCREF(loop_descrs[0]);
@@ -341,7 +342,8 @@ sfloat_to_sfloat_resolve_descriptors(
if (((PyArray_SFloatDescr *)loop_descrs[0])->scaling
== ((PyArray_SFloatDescr *)loop_descrs[1])->scaling) {
/* same scaling is just a view */
- return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ *view_offset = 0;
+ return NPY_NO_CASTING;
}
else if (-((PyArray_SFloatDescr *)loop_descrs[0])->scaling
== ((PyArray_SFloatDescr *)loop_descrs[1])->scaling) {
@@ -384,7 +386,8 @@ float_to_from_sfloat_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *dtypes[2],
PyArray_Descr *NPY_UNUSED(given_descrs[2]),
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *view_offset)
{
loop_descrs[0] = NPY_DT_CALL_default_descr(dtypes[0]);
if (loop_descrs[0] == NULL) {
@@ -394,7 +397,8 @@ float_to_from_sfloat_resolve_descriptors(
if (loop_descrs[1] == NULL) {
return -1;
}
- return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+ *view_offset = 0;
+ return NPY_NO_CASTING;
}
@@ -422,7 +426,8 @@ sfloat_to_bool_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]),
PyArray_Descr *given_descrs[2],
- PyArray_Descr *loop_descrs[2])
+ PyArray_Descr *loop_descrs[2],
+ npy_intp *NPY_UNUSED(view_offset))
{
Py_INCREF(given_descrs[0]);
loop_descrs[0] = given_descrs[0];
@@ -541,7 +546,8 @@ multiply_sfloats_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]),
PyArray_Descr *given_descrs[3],
- PyArray_Descr *loop_descrs[3])
+ PyArray_Descr *loop_descrs[3],
+ npy_intp *NPY_UNUSED(view_offset))
{
/*
* Multiply the scaling for the result. If the result was passed in we
@@ -602,7 +608,8 @@ add_sfloats_resolve_descriptors(
PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]),
PyArray_Descr *given_descrs[3],
- PyArray_Descr *loop_descrs[3])
+ PyArray_Descr *loop_descrs[3],
+ npy_intp *NPY_UNUSED(view_offset))
{
/*
* Here we accept an output descriptor (the inner loop can deal with it),
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index 8e99c0420..81d47a0e1 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -46,19 +46,23 @@
#include "dispatching.h"
#include "dtypemeta.h"
+#include "common_dtype.h"
#include "npy_hashtable.h"
#include "legacy_array_method.h"
#include "ufunc_object.h"
#include "ufunc_type_resolution.h"
+#define PROMOTION_DEBUG_TRACING 0
+
+
/* forward declaration */
static NPY_INLINE PyObject *
promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
PyArrayObject *const ops[],
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
- npy_bool allow_legacy_promotion, npy_bool cache);
+ npy_bool allow_legacy_promotion);
/**
@@ -147,6 +151,23 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
* (Based on `isinstance()`, the knowledge that non-abstract DTypes cannot
* be subclassed is used, however.)
*
+ * NOTE: This currently does not take into account output dtypes which do not
+ * have to match. The possible extension here is that if an output
+ * is given (and thus an output dtype), but not part of the signature
+ * we could ignore it for matching, but *prefer* a loop that matches
+ * better.
+ * Why is this not done currently? First, it seems a niche feature that
+ * loops can only be distinguished based on the output dtype. Second,
+ * there are some nasty theoretical things because:
+ *
+ * np.add(f4, f4, out=f8)
+ * np.add(f4, f4, out=f8, dtype=f8)
+ *
+ * are different, the first uses the f4 loop, the second the f8 loop.
+ * The problem is, that the current cache only uses the op_dtypes and
+ * both are `(f4, f4, f8)`. The cache would need to store also which
+ * output was provided by `dtype=`/`signature=`.
+ *
* @param ufunc
* @param op_dtypes The DTypes that are either passed in (defined by an
* operand) or defined by the `signature` as also passed in as
@@ -159,17 +180,35 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
*/
static int
resolve_implementation_info(PyUFuncObject *ufunc,
- PyArray_DTypeMeta *op_dtypes[], PyObject **out_info)
+ PyArray_DTypeMeta *op_dtypes[], npy_bool only_promoters,
+ PyObject **out_info)
{
int nin = ufunc->nin, nargs = ufunc->nargs;
Py_ssize_t size = PySequence_Length(ufunc->_loops);
PyObject *best_dtypes = NULL;
PyObject *best_resolver_info = NULL;
+#if PROMOTION_DEBUG_TRACING
+ printf("Promoting for '%s' promoters only: %d\n",
+ ufunc->name ? ufunc->name : "<unknown>", (int)only_promoters);
+ printf(" DTypes: ");
+ PyObject *tmp = PyArray_TupleFromItems(ufunc->nargs, op_dtypes, 1);
+ PyObject_Print(tmp, stdout, 0);
+ Py_DECREF(tmp);
+ printf("\n");
+ Py_DECREF(tmp);
+#endif
+
for (Py_ssize_t res_idx = 0; res_idx < size; res_idx++) {
/* Test all resolvers */
PyObject *resolver_info = PySequence_Fast_GET_ITEM(
ufunc->_loops, res_idx);
+
+ if (only_promoters && PyObject_TypeCheck(
+ PyTuple_GET_ITEM(resolver_info, 1), &PyArrayMethod_Type)) {
+ continue;
+ }
+
PyObject *curr_dtypes = PyTuple_GET_ITEM(resolver_info, 0);
/*
* Test if the current resolver matches, it could make sense to
@@ -179,20 +218,31 @@ resolve_implementation_info(PyUFuncObject *ufunc,
npy_bool matches = NPY_TRUE;
/*
- * NOTE: We check also the output DType. In principle we do not
- * have to strictly match it (unless it is provided by the
- * `signature`). This assumes that a (fallback) promoter will
- * unset the output DType if no exact match is found.
+ * NOTE: We currently match the output dtype exactly here, this is
+ * actually only necessary if the signature includes.
+ * Currently, we rely that op-dtypes[nin:nout] is NULLed if not.
*/
for (Py_ssize_t i = 0; i < nargs; i++) {
PyArray_DTypeMeta *given_dtype = op_dtypes[i];
PyArray_DTypeMeta *resolver_dtype = (
(PyArray_DTypeMeta *)PyTuple_GET_ITEM(curr_dtypes, i));
assert((PyObject *)given_dtype != Py_None);
- if (given_dtype == NULL && i >= nin) {
- /* Unspecified out always matches (see below for inputs) */
- continue;
+ if (given_dtype == NULL) {
+ if (i >= nin) {
+ /* Unspecified out always matches (see below for inputs) */
+ continue;
+ }
+ /*
+ * This is a reduce-like operation, which always have the form
+ * `(res_DType, op_DType, res_DType)`. If the first and last
+ * dtype of the loops match, this should be reduce-compatible.
+ */
+ if (PyTuple_GET_ITEM(curr_dtypes, 0)
+ == PyTuple_GET_ITEM(curr_dtypes, 2)) {
+ continue;
+ }
}
+
if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) {
/* always matches */
continue;
@@ -204,24 +254,7 @@ resolve_implementation_info(PyUFuncObject *ufunc,
matches = NPY_FALSE;
break;
}
- if (given_dtype == NULL) {
- /*
- * If an input was not specified, this is a reduce-like
- * operation: reductions use `(operand_DType, NULL, out_DType)`
- * as they only have a single operand. This allows special
- * reduce promotion rules useful for example for sum/product.
- * E.g. `np.add.reduce([True, True])` promotes to integer.
- *
- * Continuing here allows a promoter to handle reduce-like
- * promotions explicitly if necessary.
- * TODO: The `!NPY_DT_is_abstract(resolver_dtype)` currently
- * ensures that this is a promoter. If we allow
- * `ArrayMethods` to use abstract DTypes, we may have to
- * reject it here or the `ArrayMethod` has to implement
- * the reduce promotion.
- */
- continue;
- }
+
int subclass = PyObject_IsSubclass(
(PyObject *)given_dtype, (PyObject *)resolver_dtype);
if (subclass < 0) {
@@ -254,8 +287,12 @@ resolve_implementation_info(PyUFuncObject *ufunc,
* In all cases, we give up resolution, since it would be
* necessary to compare to two "best" cases.
*/
- int unambiguously_equally_good = 1;
for (Py_ssize_t i = 0; i < nargs; i++) {
+ if (i == ufunc->nin && current_best != -1) {
+ /* inputs prefer one loop and outputs have lower priority */
+ break;
+ }
+
int best;
PyObject *prev_dtype = PyTuple_GET_ITEM(best_dtypes, i);
@@ -265,50 +302,18 @@ resolve_implementation_info(PyUFuncObject *ufunc,
/* equivalent, so this entry does not matter */
continue;
}
- /*
- * TODO: Even if the input is not specified, if we have
- * abstract DTypes and one is a subclass of the other,
- * the subclass should be considered a better match
- * (subclasses are always more specific).
- */
- /* Whether this (normally output) dtype was specified at all */
if (op_dtypes[i] == NULL) {
/*
- * When DType is completely unspecified, prefer abstract
- * over concrete, assuming it will resolve.
- * Furthermore, we cannot decide which abstract/None
- * is "better", only concrete ones which are subclasses
- * of Abstract ones are defined as worse.
+ * If an a dtype is NULL it always matches, so there is no
+ * point in defining one as more precise than the other.
*/
- npy_bool prev_is_concrete = NPY_FALSE;
- npy_bool new_is_concrete = NPY_FALSE;
- if ((prev_dtype != Py_None) &&
- !NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
- prev_is_concrete = NPY_TRUE;
- }
- if ((new_dtype != Py_None) &&
- !NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
- new_is_concrete = NPY_TRUE;
- }
- if (prev_is_concrete == new_is_concrete) {
- best = -1;
- }
- else if (prev_is_concrete) {
- unambiguously_equally_good = 0;
- best = 1;
- }
- else {
- unambiguously_equally_good = 0;
- best = 0;
- }
+ continue;
}
/* If either is None, the other is strictly more specific */
- else if (prev_dtype == Py_None) {
- unambiguously_equally_good = 0;
+ if (prev_dtype == Py_None) {
best = 1;
}
else if (new_dtype == Py_None) {
- unambiguously_equally_good = 0;
best = 0;
}
/*
@@ -318,20 +323,25 @@ resolve_implementation_info(PyUFuncObject *ufunc,
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype) &&
!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
/*
- * Ambiguous unless the are identical (checked above),
- * but since they are concrete it does not matter which
- * best to compare.
+ * Ambiguous unless they are identical (checked above),
+ * or one matches exactly.
*/
- best = -1;
+ if (prev_dtype == (PyObject *)op_dtypes[i]) {
+ best = 0;
+ }
+ else if (new_dtype == (PyObject *)op_dtypes[i]) {
+ best = 1;
+ }
+ else {
+ best = -1;
+ }
}
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
/* old is not abstract, so better (both not possible) */
- unambiguously_equally_good = 0;
best = 0;
}
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
/* new is not abstract, so better (both not possible) */
- unambiguously_equally_good = 0;
best = 1;
}
/*
@@ -349,6 +359,10 @@ resolve_implementation_info(PyUFuncObject *ufunc,
return -1;
}
+ if (best == -1) {
+ /* no new info, nothing to update */
+ continue;
+ }
if ((current_best != -1) && (current_best != best)) {
/*
* We need a clear best, this could be tricky, unless
@@ -367,15 +381,34 @@ resolve_implementation_info(PyUFuncObject *ufunc,
if (current_best == -1) {
/*
- * TODO: It would be nice to have a "diagnostic mode" that
- * informs if this happens! (An immediate error currently
- * blocks later legacy resolution, but may work in the
- * future.)
+ * We could not find a best loop, but promoters should be
+ * designed in a way to disambiguate such scenarios, so we
+ * retry the whole lookup using only promoters.
+ * (There is a small chance we already got two promoters.
+ * We just redo it anyway for simplicity.)
*/
- if (unambiguously_equally_good) {
- /* unset the best resolver to indicate this */
- best_resolver_info = NULL;
- continue;
+ if (!only_promoters) {
+ return resolve_implementation_info(ufunc,
+ op_dtypes, NPY_TRUE, out_info);
+ }
+ /*
+ * If this is already the retry, we are out of luck. Promoters
+ * should be designed in a way that this cannot happen!
+ * (It should be noted, that the retry might not find anything
+ * and we still do a legacy lookup later.)
+ */
+ PyObject *given = PyArray_TupleFromItems(
+ ufunc->nargs, (PyObject **)op_dtypes, 1);
+ if (given != NULL) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Could not find a loop for the inputs:\n %S\n"
+ "The two promoters %S and %S matched the input "
+ "equally well. Promoters must be designed "
+ "to be unambiguous. NOTE: This indicates an error "
+ "in NumPy or an extending library and should be "
+ "reported.",
+ given, best_dtypes, curr_dtypes);
+ Py_DECREF(given);
}
*out_info = NULL;
return 0;
@@ -457,10 +490,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter,
if (Py_EnterRecursiveCall(" during ufunc promotion.") != 0) {
goto finish;
}
- /* TODO: The caching logic here may need revising: */
resolved_info = promote_and_get_info_and_ufuncimpl(ufunc,
operands, signature, new_op_dtypes,
- /* no legacy promotion */ NPY_FALSE, /* cache */ NPY_TRUE);
+ /* no legacy promotion */ NPY_FALSE);
Py_LeaveRecursiveCall();
@@ -551,6 +583,10 @@ legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc,
NPY_UNSAFE_CASTING, (PyArrayObject **)ops, type_tuple,
out_descrs) < 0) {
Py_XDECREF(type_tuple);
+ /* Not all legacy resolvers clean up on failures: */
+ for (int i = 0; i < nargs; i++) {
+ Py_CLEAR(out_descrs[i]);
+ }
return -1;
}
Py_XDECREF(type_tuple);
@@ -560,17 +596,19 @@ legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc,
Py_INCREF(operation_DTypes[i]);
Py_DECREF(out_descrs[i]);
}
- if (ufunc->type_resolver == &PyUFunc_SimpleBinaryComparisonTypeResolver) {
- /*
- * In this one case, the deprecation means that we actually override
- * the signature.
- */
- for (int i = 0; i < nargs; i++) {
- if (signature[i] != NULL && signature[i] != operation_DTypes[i]) {
- Py_INCREF(operation_DTypes[i]);
- Py_SETREF(signature[i], operation_DTypes[i]);
- *out_cacheable = 0;
- }
+ /*
+ * The PyUFunc_SimpleBinaryComparisonTypeResolver has a deprecation
+ * warning (ignoring `dtype=`) and cannot be cached.
+ * All datetime ones *should* have a warning, but currently don't,
+ * but ignore all signature passing also. So they can also
+ * not be cached, and they mutate the signature which of course is wrong,
+ * but not doing it would confuse the code later.
+ */
+ for (int i = 0; i < nargs; i++) {
+ if (signature[i] != NULL && signature[i] != operation_DTypes[i]) {
+ Py_INCREF(operation_DTypes[i]);
+ Py_SETREF(signature[i], operation_DTypes[i]);
+ *out_cacheable = 0;
}
}
return 0;
@@ -607,7 +645,7 @@ add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc,
Py_DECREF(info);
return NULL;
}
-
+ Py_DECREF(info); /* now borrowed from the ufunc's list of loops */
return info;
}
@@ -625,7 +663,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
PyArrayObject *const ops[],
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
- npy_bool allow_legacy_promotion, npy_bool cache)
+ npy_bool allow_legacy_promotion)
{
/*
* Fetch the dispatching info which consists of the implementation and
@@ -644,11 +682,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
}
/*
- * If `info == NULL`, the caching failed, repeat using the full resolution
- * in `resolve_implementation_info`.
+ * If `info == NULL`, loading from cache failed, use the full resolution
+ * in `resolve_implementation_info` (which caches its result on success).
*/
if (info == NULL) {
- if (resolve_implementation_info(ufunc, op_dtypes, &info) < 0) {
+ if (resolve_implementation_info(ufunc,
+ op_dtypes, NPY_FALSE, &info) < 0) {
return NULL;
}
if (info != NULL && PyObject_TypeCheck(
@@ -657,41 +696,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* Found the ArrayMethod and NOT promoter. Before returning it
* add it to the cache for faster lookup in the future.
*/
- if (cache && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
(PyObject **)op_dtypes, info, 0) < 0) {
return NULL;
}
return info;
}
- else if (info == NULL && op_dtypes[0] == NULL) {
- /*
- * If we have a reduction, fill in the unspecified input/array
- * assuming it should have the same dtype as the operand input
- * (or the output one if given).
- * Then, try again. In some cases, this will choose different
- * paths, such as `ll->?` instead of an `??->?` loop for `np.equal`
- * when the input is `.l->.` (`.` meaning undefined). This will
- * then cause an error. But cast to `?` would always lose
- * information, and in many cases important information:
- *
- * ```python
- * from operator import eq
- * from functools import reduce
- *
- * reduce(eq, [1, 2, 3]) != reduce(eq, [True, True, True])
- * ```
- *
- * The special cases being `logical_(and|or|xor)` which can always
- * cast to boolean ahead of time and still give the right answer
- * (unsafe cast to bool is fine here). We special case these at
- * the time of this comment (NumPy 1.21).
- */
- assert(ufunc->nin == 2 && ufunc->nout == 1);
- op_dtypes[0] = op_dtypes[2] != NULL ? op_dtypes[2] : op_dtypes[1];
- Py_INCREF(op_dtypes[0]);
- return promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, op_dtypes, allow_legacy_promotion, 1);
- }
}
/*
@@ -707,6 +717,11 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
return NULL;
}
else if (info != NULL) {
+ /* Add result to the cache using the original types: */
+ if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ (PyObject **)op_dtypes, info, 0) < 0) {
+ return NULL;
+ }
return info;
}
}
@@ -730,9 +745,15 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
return NULL;
}
info = promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, new_op_dtypes, NPY_FALSE, cacheable);
+ ops, signature, new_op_dtypes, NPY_FALSE);
for (int i = 0; i < ufunc->nargs; i++) {
- Py_XDECREF(new_op_dtypes);
+ Py_XDECREF(new_op_dtypes[i]);
+ }
+
+ /* Add this to the cache using the original types: */
+ if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ (PyObject **)op_dtypes, info, 0) < 0) {
+ return NULL;
}
return info;
}
@@ -745,6 +766,14 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* only work with DType (classes/types). This is because it has to ensure
* that legacy (value-based promotion) is used when necessary.
*
+ * NOTE: The machinery here currently ignores output arguments unless
+ * they are part of the signature. This slightly limits unsafe loop
+ * specializations, which is important for the `ensure_reduce_compatible`
+ * fallback mode.
+ * To fix this, the caching mechanism (and dispatching) can be extended.
+ * When/if that happens, the `ensure_reduce_compatible` could be
+ * deprecated (it should never kick in because promotion kick in first).
+ *
* @param ufunc The ufunc object, used mainly for the fallback.
* @param ops The array operands (used only for the fallback).
* @param signature As input, the DType signature fixed explicitly by the user.
@@ -754,9 +783,16 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* either by the `signature` or by an `operand`.
* (outputs and the second input can be NULL for reductions).
* NOTE: In some cases, the promotion machinery may currently modify
- * these.
+ * these including clearing the output.
* @param force_legacy_promotion If set, we have to use the old type resolution
* to implement value-based promotion/casting.
+ * @param ensure_reduce_compatible Must be set for reductions, in which case
+ * the found implementation is checked for reduce-like compatibility.
+ * If it is *not* compatible and `signature[2] != NULL`, we assume its
+ * output DType is correct (see NOTE above).
+ * If removed, promotion may require information about whether this
+ * is a reduction, so the more likely case is to always keep fixing this
+ * when necessary, but push down the handling so it can be cached.
*/
NPY_NO_EXPORT PyArrayMethodObject *
promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
@@ -764,9 +800,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
- npy_bool allow_legacy_promotion)
+ npy_bool allow_legacy_promotion,
+ npy_bool ensure_reduce_compatible)
{
- int nargs = ufunc->nargs;
+ int nin = ufunc->nin, nargs = ufunc->nargs;
/*
* Get the actual DTypes we operate with by mixing the operand array
@@ -782,6 +819,15 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
Py_XSETREF(op_dtypes[i], signature[i]);
assert(i >= ufunc->nin || !NPY_DT_is_abstract(signature[i]));
}
+ else if (i >= nin) {
+ /*
+ * We currently just ignore outputs if not in signature, this will
+ * always give the/a correct result (limits registering specialized
+ * loops which include the cast).
+ * (See also comment in resolve_implementation_info.)
+ */
+ Py_CLEAR(op_dtypes[i]);
+ }
}
if (force_legacy_promotion) {
@@ -798,7 +844,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
}
PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, op_dtypes, allow_legacy_promotion, NPY_TRUE);
+ ops, signature, op_dtypes, allow_legacy_promotion);
if (info == NULL) {
if (!PyErr_Occurred()) {
@@ -809,8 +855,26 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1);
- /* Fill `signature` with final DTypes used by the ArrayMethod/inner-loop */
+ /*
+ * In certain cases (only the logical ufuncs really), the loop we found may
+ * not be reduce-compatible. Since the machinery can't distinguish a
+ * reduction with an output from a normal ufunc call, we have to assume
+ * the result DType is correct and force it for the input (if not forced
+ * already).
+ * NOTE: This does assume that all loops are "safe" see the NOTE in this
+ * comment. That could be relaxed, in which case we may need to
+ * cache if a call was for a reduction.
+ */
PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0);
+ if (ensure_reduce_compatible && signature[0] == NULL &&
+ PyTuple_GET_ITEM(all_dtypes, 0) != PyTuple_GET_ITEM(all_dtypes, 2)) {
+ signature[0] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, 2);
+ Py_INCREF(signature[0]);
+ return promote_and_get_ufuncimpl(ufunc,
+ ops, signature, op_dtypes,
+ force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
+ }
+
for (int i = 0; i < nargs; i++) {
if (signature[i] == NULL) {
signature[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, i);
@@ -826,6 +890,112 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
/*
+ * Generic promoter used by as a final fallback on ufuncs. Most operations are
+ * homogeneous, so we can try to find the homogeneous dtype on the inputs
+ * and use that.
+ * We need to special case the reduction case, where op_dtypes[0] == NULL
+ * is possible.
+ */
+NPY_NO_EXPORT int
+default_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[])
+{
+ if (ufunc->type_resolver == &PyUFunc_SimpleBinaryComparisonTypeResolver
+ && signature[0] == NULL && signature[1] == NULL
+ && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) {
+ /* bail out, this is _only_ to give future/deprecation warning! */
+ return -1;
+ }
+
+ /* If nin < 2 promotion is a no-op, so it should not be registered */
+ assert(ufunc->nin > 1);
+ if (op_dtypes[0] == NULL) {
+ assert(ufunc->nin == 2 && ufunc->nout == 1); /* must be reduction */
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[0] = op_dtypes[1];
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[1] = op_dtypes[1];
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[2] = op_dtypes[1];
+ return 0;
+ }
+ PyArray_DTypeMeta *common = NULL;
+ /*
+ * If a signature is used and homogeneous in its outputs use that
+ * (Could/should likely be rather applied to inputs also, although outs
+ * only could have some advantage and input dtypes are rarely enforced.)
+ */
+ for (int i = ufunc->nin; i < ufunc->nargs; i++) {
+ if (signature[i] != NULL) {
+ if (common == NULL) {
+ Py_INCREF(signature[i]);
+ common = signature[i];
+ }
+ else if (common != signature[i]) {
+ Py_CLEAR(common); /* Not homogeneous, unset common */
+ break;
+ }
+ }
+ }
+ /* Otherwise, use the common DType of all input operands */
+ if (common == NULL) {
+ common = PyArray_PromoteDTypeSequence(ufunc->nin, op_dtypes);
+ if (common == NULL) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear(); /* Do not propagate normal promotion errors */
+ }
+ return -1;
+ }
+ }
+
+ for (int i = 0; i < ufunc->nargs; i++) {
+ PyArray_DTypeMeta *tmp = common;
+ if (signature[i]) {
+ tmp = signature[i]; /* never replace a fixed one. */
+ }
+ Py_INCREF(tmp);
+ new_op_dtypes[i] = tmp;
+ }
+ for (int i = ufunc->nin; i < ufunc->nargs; i++) {
+ Py_XINCREF(op_dtypes[i]);
+ new_op_dtypes[i] = op_dtypes[i];
+ }
+
+ Py_DECREF(common);
+ return 0;
+}
+
+
+/*
+ * In some cases, we assume that there will only ever be object loops,
+ * and the object loop should *always* be chosen.
+ * (in those cases more specific loops should not really be registered, but
+ * we do not check that.)
+ *
+ * We default to this for "old-style" ufuncs which have exactly one loop
+ * consisting only of objects (during registration time, numba mutates this
+ * but presumably).
+ */
+NPY_NO_EXPORT int
+object_only_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
+ PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[])
+{
+ PyArray_DTypeMeta *object_DType = PyArray_DTypeFromTypeNum(NPY_OBJECT);
+
+ for (int i = 0; i < ufunc->nargs; i++) {
+ if (signature[i] == NULL) {
+ Py_INCREF(object_DType);
+ new_op_dtypes[i] = object_DType;
+ }
+ }
+ Py_DECREF(object_DType);
+ return 0;
+}
+
+/*
* Special promoter for the logical ufuncs. The logical ufuncs can always
* use the ??->? and still get the correct output (as long as the output
* is not supposed to be `object`).
@@ -843,6 +1013,12 @@ logical_ufunc_promoter(PyUFuncObject *NPY_UNUSED(ufunc),
*/
int force_object = 0;
+ if (signature[0] == NULL && signature[1] == NULL
+ && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) {
+ /* bail out, this is _only_ to give future/deprecation warning! */
+ return -1;
+ }
+
for (int i = 0; i < 3; i++) {
PyArray_DTypeMeta *item;
if (signature[i] != NULL) {
@@ -913,4 +1089,3 @@ install_logical_ufunc_promoter(PyObject *ufunc)
return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0);
}
-
diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h
index 2f314615d..a7e9e88d0 100644
--- a/numpy/core/src/umath/dispatching.h
+++ b/numpy/core/src/umath/dispatching.h
@@ -20,13 +20,25 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
- npy_bool allow_legacy_promotion);
+ npy_bool allow_legacy_promotion,
+ npy_bool ensure_reduce_compatible);
NPY_NO_EXPORT PyObject *
add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc,
PyArray_DTypeMeta *operation_dtypes[], int ignore_duplicate);
NPY_NO_EXPORT int
+default_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+NPY_NO_EXPORT int
+object_only_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
+ PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+NPY_NO_EXPORT int
install_logical_ufunc_promoter(PyObject *ufunc);
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index a423823d4..f4b2aed96 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -103,7 +103,8 @@ NPY_NO_EXPORT NPY_CASTING
wrapped_legacy_resolve_descriptors(PyArrayMethodObject *NPY_UNUSED(self),
PyArray_DTypeMeta *NPY_UNUSED(dtypes[]),
PyArray_Descr *NPY_UNUSED(given_descrs[]),
- PyArray_Descr *NPY_UNUSED(loop_descrs[]))
+ PyArray_Descr *NPY_UNUSED(loop_descrs[]),
+ npy_intp *NPY_UNUSED(view_offset))
{
PyErr_SetString(PyExc_RuntimeError,
"cannot use legacy wrapping ArrayMethod without calling the ufunc "
@@ -121,12 +122,43 @@ simple_legacy_resolve_descriptors(
PyArrayMethodObject *method,
PyArray_DTypeMeta **dtypes,
PyArray_Descr **given_descrs,
- PyArray_Descr **output_descrs)
+ PyArray_Descr **output_descrs,
+ npy_intp *NPY_UNUSED(view_offset))
{
+ int i = 0;
int nin = method->nin;
int nout = method->nout;
- for (int i = 0; i < nin + nout; i++) {
+ if (nin == 2 && nout == 1 && given_descrs[2] != NULL
+ && dtypes[0] == dtypes[2]) {
+ /*
+ * Could be a reduction, which requires `descr[0] is descr[2]`
+ * (identity) at least currently. This is because `op[0] is op[2]`.
+ * (If the output descriptor is not passed, the below works.)
+ */
+ output_descrs[2] = ensure_dtype_nbo(given_descrs[2]);
+ if (output_descrs[2] == NULL) {
+ Py_CLEAR(output_descrs[2]);
+ return -1;
+ }
+ Py_INCREF(output_descrs[2]);
+ output_descrs[0] = output_descrs[2];
+ if (dtypes[1] == dtypes[2]) {
+ /* Same for the second one (accumulation is stricter) */
+ Py_INCREF(output_descrs[2]);
+ output_descrs[1] = output_descrs[2];
+ }
+ else {
+ output_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ if (output_descrs[1] == NULL) {
+ i = 2;
+ goto fail;
+ }
+ }
+ return NPY_NO_CASTING;
+ }
+
+ for (; i < nin + nout; i++) {
if (given_descrs[i] != NULL) {
output_descrs[i] = ensure_dtype_nbo(given_descrs[i]);
}
@@ -146,7 +178,7 @@ simple_legacy_resolve_descriptors(
return NPY_NO_CASTING;
fail:
- for (int i = 0; i < nin + nout; i++) {
+ for (; i >= 0; i--) {
Py_CLEAR(output_descrs[i]);
}
return -1;
@@ -194,6 +226,10 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context,
*out_loop = &generic_wrapped_legacy_loop;
*out_transferdata = get_new_loop_data(
loop, user_data, (*flags & NPY_METH_REQUIRES_PYAPI) != 0);
+ if (*out_transferdata == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
return 0;
}
diff --git a/numpy/core/src/umath/legacy_array_method.h b/numpy/core/src/umath/legacy_array_method.h
index 0dec1fb3a..d20b4fb08 100644
--- a/numpy/core/src/umath/legacy_array_method.h
+++ b/numpy/core/src/umath/legacy_array_method.h
@@ -27,7 +27,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context,
NPY_NO_EXPORT NPY_CASTING
wrapped_legacy_resolve_descriptors(PyArrayMethodObject *,
- PyArray_DTypeMeta **, PyArray_Descr **, PyArray_Descr **);
+ PyArray_DTypeMeta **, PyArray_Descr **, PyArray_Descr **, npy_intp *);
#endif /*_NPY_LEGACY_ARRAY_METHOD_H */
diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
index 95cce553a..2dd43fb85 100644
--- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
+++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
@@ -386,7 +386,7 @@ avx512_permute_x8var_pd(__m512d t0, __m512d t1, __m512d t2, __m512d t3,
* #and_masks =_mm256_and_ps, _mm512_kand#
* #xor_masks =_mm256_xor_ps, _mm512_kxor#
* #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
- * #mask_to_int = _mm256_movemask_ps, #
+ * #mask_to_int = _mm256_movemask_ps, npyv_tobits_b32#
* #full_mask= 0xFF, 0xFFFF#
* #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
* #cvtps_epi32 = _mm256_cvtps_epi32, #
@@ -833,11 +833,19 @@ AVX512F_exp_DOUBLE(npy_double * op,
op += num_lanes;
num_remaining_elements -= num_lanes;
}
- if (overflow_mask) {
+ /*
+ * Don't count on the compiler for cast between mask and int registers.
+ * On gcc7 with flags -march>=nocona -O3 can cause FP stack overflow
+ * which may lead to putting NaN into certain HW/FP calculations.
+ *
+ * For more details, please check the comments in:
+ * - https://github.com/numpy/numpy/issues/20356
+ */
+ if (npyv_tobits_b64(overflow_mask)) {
npy_set_floatstatus_overflow();
}
- if (underflow_mask) {
+ if (npyv_tobits_b64(underflow_mask)) {
npy_set_floatstatus_underflow();
}
}
@@ -1062,10 +1070,10 @@ AVX512F_log_DOUBLE(npy_double * op,
num_remaining_elements -= num_lanes;
}
- if (invalid_mask) {
+ if (npyv_tobits_b64(invalid_mask)) {
npy_set_floatstatus_invalid();
}
- if (divide_by_zero_mask) {
+ if (npyv_tobits_b64(divide_by_zero_mask)) {
npy_set_floatstatus_divbyzero();
}
}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index c28c8abd8..8cb44d433 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -301,20 +301,6 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context,
PyArrayMethod_StridedLoop *strided_loop;
NPY_ARRAYMETHOD_FLAGS flags = 0;
- npy_intp fixed_strides[3];
- NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
- if (wheremask != NULL) {
- if (PyArrayMethod_GetMaskedStridedLoop(context,
- 1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
- goto fail;
- }
- }
- else {
- if (context->method->get_strided_loop(context,
- 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
- goto fail;
- }
- }
int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0;
needs_api |= NpyIter_IterationNeedsAPI(iter);
@@ -349,6 +335,25 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context,
goto fail;
}
+ /*
+ * Note that we need to ensure that the iterator is reset before getting
+ * the fixed strides. (The buffer information is unitialized before.)
+ */
+ npy_intp fixed_strides[3];
+ NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
+ if (wheremask != NULL) {
+ if (PyArrayMethod_GetMaskedStridedLoop(context,
+ 1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
+ goto fail;
+ }
+ }
+ else {
+ if (context->method->get_strided_loop(context,
+ 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) {
+ goto fail;
+ }
+ }
+
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
char **dataptr;
@@ -382,6 +387,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context,
}
Py_INCREF(result);
+ NPY_AUXDATA_FREE(auxdata);
if (!NpyIter_Deallocate(iter)) {
Py_DECREF(result);
return NULL;
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 186f18a62..415ff0f07 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -998,10 +998,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
}
if (*allow_legacy_promotion && (!all_scalar && any_scalar)) {
*force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL);
- /*
- * TODO: if this is False, we end up in a "very slow" path that should
- * be avoided. This makes `int_arr + 0.` ~40% slower.
- */
}
/* Convert and fill in output arguments */
@@ -1077,13 +1073,15 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl,
int must_copy = !PyArray_ISALIGNED(op[i]);
if (dtypes[i] != PyArray_DESCR(op[i])) {
- NPY_CASTING safety = PyArray_GetCastSafety(
- PyArray_DESCR(op[i]), dtypes[i], NULL);
+ npy_intp view_offset;
+ NPY_CASTING safety = PyArray_GetCastInfo(
+ PyArray_DESCR(op[i]), dtypes[i], NULL, &view_offset);
if (safety < 0 && PyErr_Occurred()) {
/* A proper error during a cast check, should be rare */
return -1;
}
- if (!(safety & _NPY_CAST_IS_VIEW)) {
+ if (view_offset != 0) {
+ /* NOTE: Could possibly implement non-zero view offsets */
must_copy = 1;
}
@@ -2717,11 +2715,11 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
char *method)
{
/*
- * Note that the `ops` is not realy correct. But legacy resolution
+ * Note that the `ops` is not really correct. But legacy resolution
* cannot quite handle the correct ops (e.g. a NULL first item if `out`
- * is NULL), and it should only matter in very strange cases.
+ * is NULL) so we pass `arr` instead in that case.
*/
- PyArrayObject *ops[3] = {arr, arr, NULL};
+ PyArrayObject *ops[3] = {out ? out : arr, arr, out};
/*
* TODO: If `out` is not provided, arguably `initial` could define
* the first DType (and maybe also the out one), that way
@@ -2741,12 +2739,11 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
}
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
- ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE);
- Py_DECREF(operation_DTypes[1]);
- if (out != NULL) {
- Py_DECREF(operation_DTypes[0]);
- Py_DECREF(operation_DTypes[2]);
- }
+ ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, NPY_TRUE);
+ /* DTypes may currently get filled in fallbacks and XDECREF for error: */
+ Py_XDECREF(operation_DTypes[0]);
+ Py_XDECREF(operation_DTypes[1]);
+ Py_XDECREF(operation_DTypes[2]);
if (ufuncimpl == NULL) {
return NULL;
}
@@ -2767,12 +2764,18 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
* The first operand and output should be the same array, so they should
* be identical. The second argument can be different for reductions,
* but is checked to be identical for accumulate and reduceat.
+ * Ideally, the type-resolver ensures that all are identical, but we do
+ * not enforce this here strictly. Otherwise correct handling of
+ * byte-order changes (or metadata) requires a lot of care; see gh-20699.
*/
- if (out_descrs[0] != out_descrs[2] || (
- enforce_uniform_args && out_descrs[0] != out_descrs[1])) {
+ if (!PyArray_EquivTypes(out_descrs[0], out_descrs[2]) || (
+ enforce_uniform_args && !PyArray_EquivTypes(
+ out_descrs[0], out_descrs[1]))) {
PyErr_Format(PyExc_TypeError,
- "the resolved dtypes are not compatible with %s.%s",
- ufunc_get_name_cstr(ufunc), method);
+ "the resolved dtypes are not compatible with %s.%s. "
+ "Resolved (%R, %R, %R)",
+ ufunc_get_name_cstr(ufunc), method,
+ out_descrs[0], out_descrs[1], out_descrs[2]);
goto fail;
}
/* TODO: This really should _not_ be unsafe casting (same above)! */
@@ -3029,8 +3032,12 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
return NULL;
}
- /* The below code assumes that all descriptors are identical: */
- assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]);
+ /*
+ * The below code assumes that all descriptors are interchangeable, we
+ * allow them to not be strictly identical (but they typically should be)
+ */
+ assert(PyArray_EquivTypes(descrs[0], descrs[1])
+ && PyArray_EquivTypes(descrs[0], descrs[2]));
if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) {
/* This can be removed, but the initial element copy needs fixing */
@@ -3442,8 +3449,12 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
return NULL;
}
- /* The below code assumes that all descriptors are identical: */
- assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]);
+ /*
+ * The below code assumes that all descriptors are interchangeable, we
+ * allow them to not be strictly identical (but they typically should be)
+ */
+ assert(PyArray_EquivTypes(descrs[0], descrs[1])
+ && PyArray_EquivTypes(descrs[0], descrs[2]));
if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) {
/* This can be removed, but the initial element copy needs fixing */
@@ -4511,8 +4522,10 @@ resolve_descriptors(int nop,
if (ufuncimpl->resolve_descriptors != &wrapped_legacy_resolve_descriptors) {
/* The default: use the `ufuncimpl` as nature intended it */
+ npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */
+
NPY_CASTING safety = ufuncimpl->resolve_descriptors(ufuncimpl,
- signature, original_dtypes, dtypes);
+ signature, original_dtypes, dtypes, &view_offset);
if (safety < 0) {
goto finish;
}
@@ -4852,7 +4865,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
*/
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature,
- operand_DTypes, force_legacy_promotion, allow_legacy_promotion);
+ operand_DTypes, force_legacy_promotion, allow_legacy_promotion,
+ NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
@@ -4892,6 +4906,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
*/
Py_XDECREF(wheremask);
for (int i = 0; i < nop; i++) {
+ Py_DECREF(signature[i]);
Py_XDECREF(operand_DTypes[i]);
Py_DECREF(operation_descrs[i]);
if (i < nin) {
@@ -4915,6 +4930,7 @@ fail:
Py_XDECREF(wheremask);
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(operands[i]);
+ Py_XDECREF(signature[i]);
Py_XDECREF(operand_DTypes[i]);
Py_XDECREF(operation_descrs[i]);
if (i < nout) {
@@ -5190,9 +5206,22 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi
info = add_and_return_legacy_wrapping_ufunc_loop(ufunc, op_dtypes, 1);
if (info == NULL) {
+ Py_DECREF(ufunc);
return NULL;
}
}
+ /*
+ * TODO: I tried adding a default promoter here (either all object for
+ * some special cases, or all homogeneous). Those are reasonable
+ * defaults, but short-cut a deprecated SciPy loop, where the
+ * homogeneous loop `ddd->d` was deprecated, but an inhomogeneous
+ * one `dld->d` should be picked.
+ * The default promoter *is* a reasonable default, but switched that
+ * behaviour.
+ * Another problem appeared due to buggy type-resolution for
+ * datetimes, this meant that `timedelta.sum(dtype="f8")` returned
+ * datetimes (and not floats or error), arguably wrong, but...
+ */
return (PyObject *)ufunc;
}
@@ -5963,7 +5992,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature, operand_DTypes,
- force_legacy_promotion, allow_legacy_promotion);
+ force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
@@ -6141,7 +6170,9 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
Py_XDECREF(op2_array);
Py_XDECREF(iter);
Py_XDECREF(iter2);
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < nop; i++) {
+ Py_DECREF(signature[i]);
+ Py_XDECREF(operand_DTypes[i]);
Py_XDECREF(operation_descrs[i]);
Py_XDECREF(array_operands[i]);
}
@@ -6167,6 +6198,8 @@ fail:
Py_XDECREF(iter);
Py_XDECREF(iter2);
for (int i = 0; i < 3; i++) {
+ Py_XDECREF(signature[i]);
+ Py_XDECREF(operand_DTypes[i]);
Py_XDECREF(operation_descrs[i]);
Py_XDECREF(array_operands[i]);
}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 9ed923cf5..90846ca55 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1528,7 +1528,7 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc,
}
if (j == nargs) {
*out_innerloop = ufunc->functions[i];
- *out_innerloopdata = ufunc->data[i];
+ *out_innerloopdata = (ufunc->data == NULL) ? NULL : ufunc->data[i];
return 0;
}
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 272555704..d79506000 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -56,7 +56,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc,
int *out_needs_api)
{
*out_innerloop = ufunc->functions[0];
- *out_innerloopdata = ufunc->data[0];
+ *out_innerloopdata = (ufunc->data == NULL) ? NULL : ufunc->data[0];
*out_needs_api = 1;
return 0;
@@ -288,8 +288,8 @@ int initumath(PyObject *m)
PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO));
PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN));
- s = PyDict_GetItemString(d, "true_divide");
- PyDict_SetItemString(d, "divide", s);
+ s = PyDict_GetItemString(d, "divide");
+ PyDict_SetItemString(d, "true_divide", s);
s = PyDict_GetItemString(d, "conjugate");
s2 = PyDict_GetItemString(d, "remainder");
diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py
index cb4792090..a57e46fd0 100644
--- a/numpy/core/tests/test_casting_unittests.py
+++ b/numpy/core/tests/test_casting_unittests.py
@@ -76,7 +76,6 @@ class Casting(enum.IntEnum):
safe = 2
same_kind = 3
unsafe = 4
- cast_is_view = 1 << 16
def _get_cancast_table():
@@ -259,14 +258,14 @@ class TestCasting:
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
- casting, (from_res, to_res) = cast._resolve_descriptors(
- (from_dt, to_dt))
+ casting, (from_res, to_res), view_off = (
+ cast._resolve_descriptors((from_dt, to_dt)))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
- if casting & Casting.cast_is_view:
+ if view_off is not None:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
- assert casting == Casting.no | Casting.cast_is_view
+ assert casting == Casting.no
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
@@ -299,7 +298,7 @@ class TestCasting:
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
- casting, (from_res, to_res) = cast._resolve_descriptors(
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
@@ -307,7 +306,7 @@ class TestCasting:
# each of which should is tested individually.
return
- safe = (casting & ~Casting.cast_is_view) <= Casting.safe
+ safe = casting <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
@@ -355,14 +354,15 @@ class TestCasting:
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
- casting, (from_res, to_res) = cast._resolve_descriptors(
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
- assert(casting & CAST_TABLE[from_Dt][type(time_dt)])
+ assert casting & CAST_TABLE[from_Dt][type(time_dt)]
+ assert view_off is None
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
@@ -391,31 +391,37 @@ class TestCasting:
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
- ["from_dt", "to_dt", "expected_casting", "nom", "denom"],
- [("M8[ns]", None,
- Casting.no | Casting.cast_is_view, 1, 1),
- (str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
- ("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
- ("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast
- ("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
- ("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6),
- ("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1),
- ("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7),
- ("M8[4D]", "M8[1M]", Casting.same_kind, None,
+ ["from_dt", "to_dt", "expected_casting", "expected_view_off",
+ "nom", "denom"],
+ [("M8[ns]", None, Casting.no, 0, 1, 1),
+ (str(np.dtype("M8[ns]").newbyteorder()), None,
+ Casting.equiv, None, 1, 1),
+ ("M8", "M8[ms]", Casting.safe, 0, 1, 1),
+ # should be invalid cast:
+ ("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
+ ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
+ ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
+ ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
+ ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
+ ("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
- ("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1),
- (str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
- ("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
- ("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast
- ("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
- ("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6),
- ("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1),
- ("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7),
- ("m8[4D]", "m8[1M]", Casting.unsafe, None,
+ ("m8[ns]", None, Casting.no, 0, 1, 1),
+ (str(np.dtype("m8[ns]").newbyteorder()), None,
+ Casting.equiv, None, 1, 1),
+ ("m8", "m8[ms]", Casting.safe, 0, 1, 1),
+ # should be invalid cast:
+ ("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
+ ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
+ ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
+ ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
+ ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
+ ("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
- def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom):
+ def test_time_to_time(self, from_dt, to_dt,
+ expected_casting, expected_view_off,
+ nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
@@ -428,10 +434,12 @@ class TestCasting:
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
- casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
+ (from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
+ assert view_off == expected_view_off
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
@@ -476,9 +484,11 @@ class TestCasting:
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
- safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
+ safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
+ assert view_off is None
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
@@ -490,19 +500,24 @@ class TestCasting:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
- safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
+ assert view_off is None
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
- safety, _ = cast._resolve_descriptors((string_dt, other_dt))
+ safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
+ assert view_off is None
cast = get_castingimpl(string_DT, other_DT)
- safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (string_dt, None))
assert safety == Casting.unsafe
+ assert view_off is None
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@@ -521,7 +536,8 @@ class TestCasting:
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
- _, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))
+ _, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
+ (other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
@@ -580,13 +596,16 @@ class TestCasting:
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
- safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
+ safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
+ expected_view_off = None
if other_dt.char == string_char:
if other_dt.isnative:
- expected_safety = Casting.no | Casting.cast_is_view
+ expected_safety = Casting.no
+ expected_view_off = 0
else:
expected_safety = Casting.equiv
elif string_char == "U":
@@ -594,13 +613,19 @@ class TestCasting:
else:
expected_safety = Casting.unsafe
+ assert view_off == expected_view_off
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
- safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, to_dt))
assert res_dt is to_dt
+ if change_length <= 0:
+ assert view_off == expected_view_off
+ else:
+ assert view_off is None
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
@@ -655,12 +680,16 @@ class TestCasting:
object_dtype = type(np.dtype(object))
cast = get_castingimpl(object_dtype, type(dtype))
- safety, (_, res_dt) = cast._resolve_descriptors((np.dtype("O"), dtype))
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (np.dtype("O"), dtype))
assert safety == Casting.unsafe
+ assert view_off is None
assert res_dt is dtype
- safety, (_, res_dt) = cast._resolve_descriptors((np.dtype("O"), None))
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (np.dtype("O"), None))
assert safety == Casting.unsafe
+ assert view_off is None
assert res_dt == dtype.newbyteorder("=")
@pytest.mark.parametrize("dtype", simple_dtype_instances())
@@ -669,8 +698,10 @@ class TestCasting:
object_dtype = type(np.dtype(object))
cast = get_castingimpl(type(dtype), object_dtype)
- safety, (_, res_dt) = cast._resolve_descriptors((dtype, None))
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (dtype, None))
assert safety == Casting.safe
+ assert view_off is None
assert res_dt is np.dtype("O")
@pytest.mark.parametrize("casting", ["no", "unsafe"])
@@ -681,6 +712,71 @@ class TestCasting:
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
+ @pytest.mark.parametrize(["to_dt", "expected_off"],
+ [ # Same as `from_dt` but with both fields shifted:
+ (np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
+ "offsets": [2, 6]}), -2),
+ # Additional change of the names
+ # TODO: Tests will need changing for order vs. name based casting:
+ (np.dtype({"names": ["b", "a"], "formats": ["f4", "i4"],
+ "offsets": [6, 2]}), -2),
+ # Incompatible field offset change (offsets -2 and 0)
+ (np.dtype({"names": ["b", "a"], "formats": ["f4", "i4"],
+ "offsets": [6, 0]}), None)])
+ def test_structured_field_offsets(self, to_dt, expected_off):
+ # This checks the cast-safety and view offset for swapped and "shifted"
+ # fields which are viewable
+ from_dt = np.dtype({"names": ["a", "b"],
+ "formats": ["i4", "f4"],
+ "offsets": [0, 4]})
+ cast = get_castingimpl(type(from_dt), type(to_dt))
+ safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
+ assert safety == Casting.equiv
+ # Shifting the original data pointer by -2 will align both by
+ # effectively adding 2 bytes of spacing before `from_dt`.
+ assert view_off == expected_off
+
+ @pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
+ # Subarray cases:
+ ("i", "(1,1)i", 0),
+ ("(1,1)i", "i", 0),
+ ("(2,1)i", "(2,1)i", 0),
+ # field cases (field to field is tested explicitly also):
+ ("i", dict(names=["a"], formats=["i"], offsets=[2]), -2),
+ (dict(names=["a"], formats=["i"], offsets=[2]), "i", 2),
+ # Currently considered not viewable, due to multiple fields
+ # even though they overlap (maybe we should not allow that?)
+ ("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]),
+ None),
+ # different number of fields can't work, should probably just fail
+ # so it never reports "viewable":
+ ("i,i", "i,i,i", None),
+ # Unstructured void cases:
+ ("i4", "V3", 0), # void smaller or equal
+ ("i4", "V4", 0), # void smaller or equal
+ ("i4", "V10", None), # void is larger (no view)
+ ("O", "V4", None), # currently reject objects for view here.
+ ("O", "V8", None), # currently reject objects for view here.
+ ("V4", "V3", 0),
+ ("V4", "V4", 0),
+ ("V3", "V4", None),
+ # Note that currently void-to-other cast goes via byte-strings
+ # and is not a "view" based cast like the opposite direction:
+ ("V4", "i4", None),
+ # completely invalid/impossible cast:
+ ("i,i", "i,i,i", None),
+ ])
+ def test_structured_view_offsets_paramteric(
+ self, from_dt, to_dt, expected_off):
+ # TODO: While this test is fairly thorough, right now, it does not
+ # really test some paths that may have nonzero offsets (they don't
+ # really exists).
+ from_dt = np.dtype(from_dt)
+ to_dt = np.dtype(to_dt)
+ cast = get_castingimpl(type(from_dt), type(to_dt))
+ _, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
+ assert view_off == expected_off
+
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_object_casts_NULL_None_equivalence(self, dtype):
# None to <other> casts may succeed or fail, but a NULL'ed array must
diff --git a/numpy/core/tests/test_cpu_features.py b/numpy/core/tests/test_cpu_features.py
index 2ccbff41c..706cf7a7e 100644
--- a/numpy/core/tests/test_cpu_features.py
+++ b/numpy/core/tests/test_cpu_features.py
@@ -146,6 +146,17 @@ class Test_POWER_Features(AbstractTest):
def load_flags(self):
self.load_flags_auxv()
+
+is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_zarch,
+ reason="Only for Linux and IBM Z")
+class Test_ZARCH_Features(AbstractTest):
+ features = ["VX", "VXE", "VXE2"]
+
+ def load_flags(self):
+ self.load_flags_auxv()
+
+
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
class Test_ARM_Features(AbstractTest):
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index b95d669a8..baae77a35 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1437,7 +1437,7 @@ class TestDateTime:
# NaTs
with suppress_warnings() as sup:
- sup.filter(RuntimeWarning, r".*encountered in true\_divide")
+ sup.filter(RuntimeWarning, r".*encountered in divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
@@ -2029,19 +2029,25 @@ class TestDateTime:
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
+ def test_timedelta_correct_mean(self):
+ # test mainly because it worked only via a bug in that allowed:
+ # `timedelta.sum(dtype="f8")` to ignore the dtype request.
+ a = np.arange(1000, dtype="m8[s]")
+ assert_array_equal(a.mean(), a.sum() / len(a))
+
def test_datetime_no_subtract_reducelike(self):
# subtracting two datetime64 works, but we cannot reduce it, since
# the result of that subtraction will have a different dtype.
arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
- msg = r"the resolved dtypes are not compatible with subtract\."
+ msg = r"the resolved dtypes are not compatible"
- with pytest.raises(TypeError, match=msg + "reduce"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.reduce(arr)
- with pytest.raises(TypeError, match=msg + "accumulate"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.accumulate(arr)
- with pytest.raises(TypeError, match=msg + "reduceat"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.reduceat(arr, [0])
def test_datetime_busday_offset(self):
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index e0b66defc..d2a69d4cf 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -257,20 +257,6 @@ class TestDatetime64Timezone(_DeprecationTestCase):
self.assert_deprecated(np.datetime64, args=(dt,))
-class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
- """View of non-C-contiguous arrays deprecated in 1.11.0.
-
- The deprecation will not be raised for arrays that are both C and F
- contiguous, as C contiguous is dominant. There are more such arrays
- with relaxed stride checking than without so the deprecation is not
- as visible with relaxed stride checking in force.
- """
-
- def test_fortran_contiguous(self):
- self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
- self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
-
-
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
@@ -380,18 +366,6 @@ class TestPyArray_AS2D(_DeprecationTestCase):
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
-class Test_UPDATEIFCOPY(_DeprecationTestCase):
- """
- v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
- WRITEBACKIFCOPY instead
- """
- def test_npy_updateifcopy_deprecation(self):
- from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
- arr = np.arange(9).reshape(3, 3)
- v = arr.T
- self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
-
-
class TestDatetimeEvent(_DeprecationTestCase):
# 2017-08-11, 1.14.0
def test_3_tuple(self):
@@ -427,11 +401,6 @@ class TestBincount(_DeprecationTestCase):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
-class TestAlen(_DeprecationTestCase):
- # 2019-08-02, 1.18.0
- def test_alen(self):
- self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
-
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
@@ -1270,3 +1239,14 @@ class TestMemEventHook(_DeprecationTestCase):
with pytest.warns(DeprecationWarning,
match='PyDataMem_SetEventHook is deprecated'):
ma_tests.test_pydatamem_seteventhook_end()
+
+
+class TestArrayFinalizeNone(_DeprecationTestCase):
+ message = "Setting __array_finalize__ = None"
+
+ def test_use_none_is_deprecated(self):
+ # Deprecated way that ndarray itself showed nothing needs finalizing.
+ class NoFinalize(np.ndarray):
+ __array_finalize__ = None
+
+ self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 23182470b..708e82910 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -234,11 +234,6 @@ class TestFlags:
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
- with assert_warns(DeprecationWarning):
- assert_equal(self.a.flags.updateifcopy, False)
- with assert_warns(DeprecationWarning):
- assert_equal(self.a.flags['U'], False)
- assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
@@ -5381,19 +5376,8 @@ class TestFlat:
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
- # for 1.14 all are set to non-writeable on the way to replacing the
- # UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
- with assert_warns(DeprecationWarning):
- assert_(c.flags.updateifcopy is False)
- with assert_warns(DeprecationWarning):
- assert_(d.flags.updateifcopy is False)
- with assert_warns(DeprecationWarning):
- assert_(e.flags.updateifcopy is False)
- with assert_warns(DeprecationWarning):
- # UPDATEIFCOPY is removed.
- assert_(f.flags.updateifcopy is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
@@ -6888,26 +6872,6 @@ class TestInner:
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestAlen:
- def test_basic(self):
- with pytest.warns(DeprecationWarning):
- m = np.array([1, 2, 3])
- assert_equal(np.alen(m), 3)
-
- m = np.array([[1, 2, 3], [4, 5, 7]])
- assert_equal(np.alen(m), 2)
-
- m = [1, 2, 3]
- assert_equal(np.alen(m), 3)
-
- m = [[1, 2, 3], [4, 5, 7]]
- assert_equal(np.alen(m), 2)
-
- def test_singleton(self):
- with pytest.warns(DeprecationWarning):
- assert_equal(np.alen(5), 1)
-
-
class TestChoose:
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
@@ -7813,6 +7777,26 @@ class TestNewBufferProtocol:
# Fix buffer info again before we delete (or we lose the memory)
_multiarray_tests.corrupt_or_fix_bufferinfo(obj)
+ def test_no_suboffsets(self):
+ try:
+ import _testbuffer
+ except ImportError:
+ raise pytest.skip("_testbuffer is not available")
+
+ for shape in [(2, 3), (2, 3, 4)]:
+ data = list(range(np.prod(shape)))
+ buffer = _testbuffer.ndarray(data, shape, format='i',
+ flags=_testbuffer.ND_PIL)
+ msg = "NumPy currently does not support.*suboffsets"
+ with pytest.raises(BufferError, match=msg):
+ np.asarray(buffer)
+ with pytest.raises(BufferError, match=msg):
+ np.asarray([buffer])
+
+ # Also check (unrelated and more limited but similar) frombuffer:
+ with pytest.raises(BufferError):
+ np.frombuffer(buffer)
+
class TestArrayCreationCopyArgument(object):
@@ -7832,9 +7816,9 @@ class TestArrayCreationCopyArgument(object):
pyscalar = arr.item(0)
# Test never-copy raises error:
- assert_raises(ValueError, np.array, scalar,
+ assert_raises(ValueError, np.array, scalar,
copy=np._CopyMode.NEVER)
- assert_raises(ValueError, np.array, pyscalar,
+ assert_raises(ValueError, np.array, pyscalar,
copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar,
copy=self.RaiseOnBool())
@@ -8970,12 +8954,28 @@ class TestArrayFinalize:
a = np.array(1).view(SavesBase)
assert_(a.saved_base is a.base)
- def test_bad_finalize(self):
+ def test_bad_finalize1(self):
class BadAttributeArray(np.ndarray):
@property
def __array_finalize__(self):
raise RuntimeError("boohoo!")
+ with pytest.raises(TypeError, match="not callable"):
+ np.arange(10).view(BadAttributeArray)
+
+ def test_bad_finalize2(self):
+ class BadAttributeArray(np.ndarray):
+ def __array_finalize__(self):
+ raise RuntimeError("boohoo!")
+
+ with pytest.raises(TypeError, match="takes 1 positional"):
+ np.arange(10).view(BadAttributeArray)
+
+ def test_bad_finalize3(self):
+ class BadAttributeArray(np.ndarray):
+ def __array_finalize__(self, obj):
+ raise RuntimeError("boohoo!")
+
with pytest.raises(RuntimeError, match="boohoo!"):
np.arange(10).view(BadAttributeArray)
@@ -9013,6 +9013,14 @@ class TestArrayFinalize:
break_cycles()
assert_(obj_ref() is None, "no references should remain")
+ def test_can_use_super(self):
+ class SuperFinalize(np.ndarray):
+ def __array_finalize__(self, obj):
+ self.saved_result = super().__array_finalize__(obj)
+
+ a = np.array(1).view(SuperFinalize)
+ assert_(a.saved_result is None)
+
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
@@ -9207,3 +9215,66 @@ def test_getfield():
pytest.raises(ValueError, a.getfield, 'uint8', -1)
pytest.raises(ValueError, a.getfield, 'uint8', 16)
pytest.raises(ValueError, a.getfield, 'uint64', 0)
+
+
+class TestViewDtype:
+ """
+ Verify that making a view of a non-contiguous array works as expected.
+ """
+ def test_smaller_dtype_multiple(self):
+ # x is non-contiguous
+ x = np.arange(10, dtype='<i4')[::2]
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('<i2')
+ expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]]
+ assert_array_equal(x[:, np.newaxis].view('<i2'), expected)
+
+ def test_smaller_dtype_not_multiple(self):
+ # x is non-contiguous
+ x = np.arange(5, dtype='<i4')[::2]
+
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('S3')
+ with pytest.raises(ValueError,
+ match='When changing to a smaller dtype'):
+ x[:, np.newaxis].view('S3')
+
+ # Make sure the problem is because of the dtype size
+ expected = [[b''], [b'\x02'], [b'\x04']]
+ assert_array_equal(x[:, np.newaxis].view('S4'), expected)
+
+ def test_larger_dtype_multiple(self):
+ # x is non-contiguous in the first dimension, contiguous in the last
+ x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
+ expected = np.array([[65536], [327684], [589832],
+ [851980], [1114128]], dtype='<i4')
+ assert_array_equal(x.view('<i4'), expected)
+
+ def test_larger_dtype_not_multiple(self):
+ # x is non-contiguous in the first dimension, contiguous in the last
+ x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
+ with pytest.raises(ValueError,
+ match='When changing to a larger dtype'):
+ x.view('S3')
+ # Make sure the problem is because of the dtype size
+ expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'],
+ [b'\x0c\x00\r'], [b'\x10\x00\x11']]
+ assert_array_equal(x.view('S4'), expected)
+
+ def test_f_contiguous(self):
+ # x is F-contiguous
+ x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('<i2')
+
+ def test_non_c_contiguous(self):
+ # x is contiguous in axis=-1, but not C-contiguous in other axes
+ x = np.arange(2 * 3 * 4, dtype='i1').\
+ reshape(2, 3, 4).transpose(1, 0, 2)
+ expected = [[[256, 770], [3340, 3854]],
+ [[1284, 1798], [4368, 4882]],
+ [[2312, 2826], [5396, 5910]]]
+ assert_array_equal(x.view('<i2'), expected)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index ed775cac6..d96c14e54 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -828,7 +828,7 @@ def test_iter_nbo_align_contig():
casting='equiv',
op_dtypes=[np.dtype('f4')])
with i:
- # context manager triggers UPDATEIFCOPY on i at exit
+ # context manager triggers WRITEBACKIFCOPY on i at exit
assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
assert_equal(i.operands[0], a)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 90078a2ea..8a77eca00 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -4,6 +4,7 @@ import warnings
import itertools
import operator
import platform
+from distutils.version import LooseVersion as _LooseVersion
import pytest
from hypothesis import given, settings, Verbosity
from hypothesis.strategies import sampled_from
@@ -680,17 +681,29 @@ class TestAbs:
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_builtin_abs(self, dtype):
- if sys.platform == "cygwin" and dtype == np.clongdouble:
+ if (
+ sys.platform == "cygwin" and dtype == np.clongdouble and
+ (
+ _LooseVersion(platform.release().split("-")[0])
+ < _LooseVersion("3.3.0")
+ )
+ ):
pytest.xfail(
- reason="absl is computed in double precision on cygwin"
+ reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(abs, dtype)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_numpy_abs(self, dtype):
- if sys.platform == "cygwin" and dtype == np.clongdouble:
+ if (
+ sys.platform == "cygwin" and dtype == np.clongdouble and
+ (
+ _LooseVersion(platform.release().split("-")[0])
+ < _LooseVersion("3.3.0")
+ )
+ ):
pytest.xfail(
- reason="absl is computed in double precision on cygwin"
+ reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(np.abs, dtype)
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index ee21d4aa5..4deb5a0a4 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -306,6 +306,7 @@ class TestRealScalars:
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
+ assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
reason="only applies to ppc float128 values")
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index ef0bac957..9a9d46da0 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1762,12 +1762,15 @@ class TestUfunc:
result = _rational_tests.test_add(a, b)
assert_equal(result, target)
- # But since we use the old type resolver, this may not work
- # for dtype variations unless the output dtype is given:
+ # This works even more generally, so long the default common-dtype
+ # promoter works out:
result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
assert_equal(result, target)
+
+ # But, it can be fooled, e.g. (use scalars, which forces legacy
+ # type resolution to kick in, which then fails):
with assert_raises(TypeError):
- _rational_tests.test_add(a, b.astype(np.uint16))
+ _rational_tests.test_add(a, np.uint16(2))
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
@@ -2123,6 +2126,17 @@ class TestUfunc:
c = np.array([1., 2.])
assert_array_equal(ufunc(a, c), ufunc([True, True], True))
assert ufunc.reduce(a) == True
+ # check that the output has no effect:
+ out = np.zeros(2, dtype=np.int32)
+ expected = ufunc([True, True], True).astype(out.dtype)
+ assert_array_equal(ufunc(a, c, out=out), expected)
+ out = np.zeros((), dtype=np.int32)
+ assert ufunc.reduce(a, out=out) == True
+ # Last check, test reduction when out and a match (the complexity here
+ # is that the "i,i->?" may seem right, but should not match.
+ a = np.array([3], dtype="i")
+ out = np.zeros((), dtype=a.dtype)
+ assert ufunc.reduce(a, out=out) == 1
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
@@ -2134,6 +2148,60 @@ class TestUfunc:
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
+ def test_reducelike_byteorder_resolution(self):
+ # See gh-20699, byte-order changes need some extra care in the type
+ # resolution to make the following succeed:
+ arr_be = np.arange(10, dtype=">i8")
+ arr_le = np.arange(10, dtype="<i8")
+
+ assert np.add.reduce(arr_be) == np.add.reduce(arr_le)
+ assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le))
+ assert_array_equal(
+ np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1]))
+
+ def test_reducelike_out_promotes(self):
+ # Check that the out argument to reductions is considered for
+ # promotion. See also gh-20455.
+ # Note that these paths could prefer `initial=` in the future and
+ # do not up-cast to the default integer for add and prod
+ arr = np.ones(1000, dtype=np.uint8)
+ out = np.zeros((), dtype=np.uint16)
+ assert np.add.reduce(arr, out=out) == 1000
+ arr[:10] = 2
+ assert np.multiply.reduce(arr, out=out) == 2**10
+
+ # For legacy dtypes, the signature currently has to be forced if `out=`
+ # is passed. The two paths below should differ, without `dtype=` the
+ # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`!
+ arr = np.full(5, 2**25-1, dtype=np.int64)
+
+ # float32 and int64 promote to float64:
+ res = np.zeros((), dtype=np.float32)
+ # If `dtype=` is passed, the calculation is forced to float32:
+ single_res = np.zeros((), dtype=np.float32)
+ np.multiply.reduce(arr, out=single_res, dtype=np.float32)
+ assert single_res != res
+
+ def test_reducelike_output_needs_identical_cast(self):
+ # Checks the case where the we have a simple byte-swap works, maily
+ # tests that this is not rejected directly.
+ # (interesting because we require descriptor identity in reducelikes).
+ arr = np.ones(20, dtype="f8")
+ out = np.empty((), dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduce(arr)
+ np.add.reduce(arr, out=out)
+ assert_array_equal(expected, out)
+ # Check reduceat:
+ out = np.empty(2, dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduceat(arr, [0, 1])
+ np.add.reduceat(arr, [0, 1], out=out)
+ assert_array_equal(expected, out)
+ # And accumulate:
+ out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder())
+ expected = np.add.accumulate(arr)
+ np.add.accumulate(arr, out=out)
+ assert_array_equal(expected, out)
+
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index fc7c592f0..e7fee46b7 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -17,18 +17,8 @@ from numpy.testing import (
assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
_gen_alignment_data, assert_array_almost_equal_nulp
)
+from numpy.testing._private.utils import _glibc_older_than
-def get_glibc_version():
- try:
- ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]
- except Exception as inst:
- ver = '0.0'
-
- return ver
-
-
-glibcver = get_glibc_version()
-glibc_older_than = lambda x: (glibcver != '0.0' and glibcver < x)
def on_powerpc():
""" True if we are running on a Power PC platform."""
@@ -1014,7 +1004,7 @@ class TestSpecialFloats:
# See: https://github.com/numpy/numpy/issues/19192
@pytest.mark.xfail(
- glibc_older_than("2.17"),
+ _glibc_older_than("2.17"),
reason="Older glibc versions may not raise appropriate FP exceptions"
)
def test_exp_exceptions(self):
@@ -1262,6 +1252,11 @@ class TestSpecialFloats:
assert_raises(FloatingPointError, np.arctanh,
np.array(value, dtype=dt))
+ # See: https://github.com/numpy/numpy/issues/20448
+ @pytest.mark.xfail(
+ _glibc_older_than("2.17"),
+ reason="Older glibc versions may not raise appropriate FP exceptions"
+ )
def test_exp2(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
@@ -1397,7 +1392,7 @@ class TestAVXFloat32Transcendental:
M = np.int_(N/20)
index = np.random.randint(low=0, high=N, size=M)
x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
- if not glibc_older_than("2.17"):
+ if not _glibc_older_than("2.17"):
# test coverage for elements > 117435.992f for which glibc is used
# this is known to be problematic on old glibc, so skip it there
x_f32[index] = np.float32(10E+10*np.random.rand(M))
@@ -3433,7 +3428,7 @@ class TestComplexFunctions:
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
- if glibc_older_than("2.19") and dtype is np.longcomplex:
+ if dtype is np.longcomplex:
if (platform.machine() == 'aarch64' and bad_arcsinh()):
pytest.skip("Trig functions of np.longcomplex values known "
"to be inaccurate on aarch64 for some compilation "
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
index 32e2dca66..3d4d5b5aa 100644
--- a/numpy/core/tests/test_umath_accuracy.py
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -5,11 +5,14 @@ import sys
import pytest
from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER
from numpy.testing import assert_array_max_ulp
+from numpy.testing._private.utils import _glibc_older_than
from numpy.core._multiarray_umath import __cpu_features__
IS_AVX = __cpu_features__.get('AVX512F', False) or \
(__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False))
-runtest = sys.platform.startswith('linux') and IS_AVX
+# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448).
+runtest = (sys.platform.startswith('linux')
+ and IS_AVX and not _glibc_older_than("2.17"))
platform_skip = pytest.mark.skipif(not runtest,
reason="avoid testing inconsistent platform "
"library implementations")