summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pyi501
-rw-r--r--numpy/core/__init__.py18
-rw-r--r--numpy/core/_add_newdocs.py2
-rw-r--r--numpy/core/_asarray.pyi77
-rw-r--r--numpy/core/_dtype.py2
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py4
-rw-r--r--numpy/core/einsumfunc.py11
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h6
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h25
-rw-r--r--numpy/core/multiarray.py2
-rw-r--r--numpy/core/src/common/array_assign.c27
-rw-r--r--numpy/core/src/multiarray/_datetime.h10
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src27
-rw-r--r--numpy/core/src/multiarray/array_coercion.c77
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c4
-rw-r--r--numpy/core/src/multiarray/common.c21
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c413
-rw-r--r--numpy/core/src/multiarray/convert_datatype.h9
-rw-r--r--numpy/core/src/multiarray/ctors.c3
-rw-r--r--numpy/core/src/multiarray/datetime.c138
-rw-r--r--numpy/core/src/multiarray/descriptor.c23
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c321
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c174
-rw-r--r--numpy/core/src/multiarray/dtypemeta.h16
-rw-r--r--numpy/core/src/multiarray/hashdescr.c4
-rw-r--r--numpy/core/src/multiarray/iterators.c2
-rw-r--r--numpy/core/src/multiarray/mapping.c72
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c8
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c234
-rw-r--r--numpy/core/src/multiarray/number.c13
-rw-r--r--numpy/core/src/multiarray/refcount.c24
-rw-r--r--numpy/core/src/multiarray/scalarapi.c60
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src94
-rw-r--r--numpy/core/src/multiarray/shape.c61
-rw-r--r--numpy/core/src/multiarray/shape.h7
-rw-r--r--numpy/core/src/multiarray/usertypes.c121
-rw-r--r--numpy/core/src/multiarray/usertypes.h4
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src9
-rw-r--r--numpy/core/src/umath/ufunc_object.c9
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c15
-rw-r--r--numpy/core/tests/test_array_coercion.py31
-rw-r--r--numpy/core/tests/test_numeric.py57
-rw-r--r--numpy/core/tests/test_ufunc.py13
-rw-r--r--numpy/distutils/system_info.py125
-rw-r--r--numpy/distutils/tests/test_system_info.py35
-rw-r--r--numpy/f2py/cfuncs.py250
-rw-r--r--numpy/f2py/src/fortranobject.c7
-rw-r--r--numpy/f2py/tests/src/module_data/mod.modbin0 -> 412 bytes
-rw-r--r--numpy/f2py/tests/src/module_data/module_data_docstring.f9012
-rw-r--r--numpy/f2py/tests/test_block_docstring.py2
-rw-r--r--numpy/f2py/tests/test_module_doc.py30
-rw-r--r--numpy/lib/arraysetops.py21
-rw-r--r--numpy/linalg/linalg.py6
-rw-r--r--numpy/ma/core.py8
-rw-r--r--numpy/random/_generator.pyx2
-rw-r--r--numpy/typing/_callable.py136
-rw-r--r--numpy/typing/tests/data/fail/arithmetic.py19
-rw-r--r--numpy/typing/tests/data/fail/array_constructors.py (renamed from numpy/typing/tests/data/fail/linspace.py)13
-rw-r--r--numpy/typing/tests/data/fail/modules.py1
-rw-r--r--numpy/typing/tests/data/fail/scalars.py16
-rw-r--r--numpy/typing/tests/data/fail/simple.py12
-rw-r--r--numpy/typing/tests/data/pass/arithmetic.py257
-rw-r--r--numpy/typing/tests/data/pass/array_constructors.py66
-rw-r--r--numpy/typing/tests/data/pass/linspace.py22
-rw-r--r--numpy/typing/tests/data/pass/literal.py2
-rw-r--r--numpy/typing/tests/data/pass/scalars.py13
-rw-r--r--numpy/typing/tests/data/pass/simple.py9
-rw-r--r--numpy/typing/tests/data/pass/ufuncs.py5
-rw-r--r--numpy/typing/tests/data/reveal/arithmetic.py256
-rw-r--r--numpy/typing/tests/data/reveal/array_constructors.py42
-rw-r--r--numpy/typing/tests/data/reveal/linspace.py6
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py17
-rw-r--r--numpy/typing/tests/test_typing.py4
73 files changed, 2833 insertions, 1310 deletions
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 633aa0ca3..9966ef199 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -5,6 +5,18 @@ from abc import abstractmethod
from numpy.core._internal import _ctypes
from numpy.typing import ArrayLike, DtypeLike, _Shape, _ShapeLike
+from numpy.typing._callable import (
+ _BoolOp,
+ _BoolSub,
+ _BoolTrueDiv,
+ _TD64Div,
+ _IntTrueDiv,
+ _UnsignedIntOp,
+ _SignedIntOp,
+ _FloatOp,
+ _ComplexOp,
+ _NumberOp,
+)
from typing import (
Any,
@@ -107,6 +119,14 @@ from numpy.core.fromnumeric import (
var,
)
+from numpy.core._asarray import (
+ asarray as asarray,
+ asanyarray as asanyarray,
+ ascontiguousarray as ascontiguousarray,
+ asfortranarray as asfortranarray,
+ require as require,
+)
+
# Add an object to `__all__` if their stubs are defined in an external file;
# their stubs will not be recognized otherwise.
# NOTE: This is redundant for objects defined within this file.
@@ -154,8 +174,316 @@ __all__ = [
"var",
]
-# TODO: remove when the full numpy namespace is defined
-def __getattr__(name: str) -> Any: ...
+DataSource: Any
+False_: Any
+MachAr: Any
+ScalarType: Any
+True_: Any
+UFUNC_PYVALS_NAME: Any
+angle: Any
+append: Any
+apply_along_axis: Any
+apply_over_axes: Any
+arange: Any
+array2string: Any
+array_repr: Any
+array_split: Any
+array_str: Any
+asarray_chkfinite: Any
+asfarray: Any
+asmatrix: Any
+asscalar: Any
+atleast_1d: Any
+atleast_2d: Any
+atleast_3d: Any
+average: Any
+bartlett: Any
+bincount: Any
+bitwise_not: Any
+blackman: Any
+block: Any
+bmat: Any
+bool8: Any
+broadcast: Any
+broadcast_arrays: Any
+broadcast_to: Any
+busday_count: Any
+busday_offset: Any
+busdaycalendar: Any
+byte: Any
+byte_bounds: Any
+bytes0: Any
+c_: Any
+can_cast: Any
+cast: Any
+cdouble: Any
+cfloat: Any
+char: Any
+chararray: Any
+clongdouble: Any
+clongfloat: Any
+column_stack: Any
+common_type: Any
+compare_chararrays: Any
+compat: Any
+complex256: Any
+complex_: Any
+concatenate: Any
+conj: Any
+copy: Any
+copyto: Any
+corrcoef: Any
+cov: Any
+csingle: Any
+ctypeslib: Any
+cumproduct: Any
+datetime_as_string: Any
+datetime_data: Any
+delete: Any
+deprecate: Any
+deprecate_with_doc: Any
+diag: Any
+diag_indices: Any
+diag_indices_from: Any
+diagflat: Any
+diff: Any
+digitize: Any
+disp: Any
+divide: Any
+dot: Any
+double: Any
+dsplit: Any
+dstack: Any
+ediff1d: Any
+einsum: Any
+einsum_path: Any
+emath: Any
+errstate: Any
+expand_dims: Any
+extract: Any
+eye: Any
+fft: Any
+fill_diagonal: Any
+finfo: Any
+fix: Any
+flip: Any
+fliplr: Any
+flipud: Any
+float128: Any
+float_: Any
+format_float_positional: Any
+format_float_scientific: Any
+format_parser: Any
+frombuffer: Any
+fromfile: Any
+fromiter: Any
+frompyfunc: Any
+fromregex: Any
+fromstring: Any
+genfromtxt: Any
+geomspace: Any
+get_include: Any
+get_printoptions: Any
+getbufsize: Any
+geterr: Any
+geterrcall: Any
+geterrobj: Any
+gradient: Any
+half: Any
+hamming: Any
+hanning: Any
+histogram: Any
+histogram2d: Any
+histogram_bin_edges: Any
+histogramdd: Any
+hsplit: Any
+hstack: Any
+i0: Any
+iinfo: Any
+imag: Any
+in1d: Any
+index_exp: Any
+info: Any
+inner: Any
+insert: Any
+int0: Any
+int_: Any
+intc: Any
+interp: Any
+intersect1d: Any
+intp: Any
+is_busday: Any
+iscomplex: Any
+iscomplexobj: Any
+isin: Any
+isneginf: Any
+isposinf: Any
+isreal: Any
+isrealobj: Any
+iterable: Any
+ix_: Any
+kaiser: Any
+kron: Any
+lexsort: Any
+lib: Any
+linalg: Any
+linspace: Any
+load: Any
+loads: Any
+loadtxt: Any
+logspace: Any
+longcomplex: Any
+longdouble: Any
+longfloat: Any
+longlong: Any
+lookfor: Any
+ma: Any
+mafromtxt: Any
+mask_indices: Any
+mat: Any
+math: Any
+matrix: Any
+matrixlib: Any
+max: Any
+may_share_memory: Any
+median: Any
+memmap: Any
+meshgrid: Any
+mgrid: Any
+min: Any
+min_scalar_type: Any
+mintypecode: Any
+mod: Any
+msort: Any
+nan_to_num: Any
+nanargmax: Any
+nanargmin: Any
+nancumprod: Any
+nancumsum: Any
+nanmax: Any
+nanmean: Any
+nanmedian: Any
+nanmin: Any
+nanpercentile: Any
+nanprod: Any
+nanquantile: Any
+nanstd: Any
+nansum: Any
+nanvar: Any
+nbytes: Any
+ndenumerate: Any
+ndfromtxt: Any
+ndindex: Any
+nditer: Any
+nested_iters: Any
+newaxis: Any
+numarray: Any
+object0: Any
+ogrid: Any
+packbits: Any
+pad: Any
+percentile: Any
+piecewise: Any
+place: Any
+poly: Any
+poly1d: Any
+polyadd: Any
+polyder: Any
+polydiv: Any
+polyfit: Any
+polyint: Any
+polymul: Any
+polynomial: Any
+polysub: Any
+polyval: Any
+printoptions: Any
+product: Any
+promote_types: Any
+put_along_axis: Any
+putmask: Any
+quantile: Any
+r_: Any
+random: Any
+ravel_multi_index: Any
+real: Any
+real_if_close: Any
+rec: Any
+recarray: Any
+recfromcsv: Any
+recfromtxt: Any
+record: Any
+result_type: Any
+roots: Any
+rot90: Any
+round: Any
+round_: Any
+row_stack: Any
+s_: Any
+save: Any
+savetxt: Any
+savez: Any
+savez_compressed: Any
+sctypeDict: Any
+sctypes: Any
+select: Any
+set_printoptions: Any
+set_string_function: Any
+setbufsize: Any
+setdiff1d: Any
+seterr: Any
+seterrcall: Any
+seterrobj: Any
+setxor1d: Any
+shares_memory: Any
+short: Any
+show_config: Any
+sinc: Any
+single: Any
+singlecomplex: Any
+sort_complex: Any
+source: Any
+split: Any
+stack: Any
+str0: Any
+string_: Any
+sys: Any
+take_along_axis: Any
+testing: Any
+tile: Any
+trapz: Any
+tri: Any
+tril: Any
+tril_indices: Any
+tril_indices_from: Any
+trim_zeros: Any
+triu: Any
+triu_indices: Any
+triu_indices_from: Any
+typeDict: Any
+typecodes: Any
+typename: Any
+ubyte: Any
+uint: Any
+uint0: Any
+uintc: Any
+uintp: Any
+ulonglong: Any
+unicode_: Any
+union1d: Any
+unique: Any
+unpackbits: Any
+unravel_index: Any
+unwrap: Any
+ushort: Any
+vander: Any
+vdot: Any
+vectorize: Any
+version: Any
+void0: Any
+vsplit: Any
+vstack: Any
+where: Any
+who: Any
_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
_ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"]
@@ -345,23 +673,10 @@ class _ArrayOrScalarCommon(
def __ne__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
- def __add__(self, other): ...
- def __radd__(self, other): ...
- def __sub__(self, other): ...
- def __rsub__(self, other): ...
- def __mul__(self, other): ...
- def __rmul__(self, other): ...
- def __truediv__(self, other): ...
- def __rtruediv__(self, other): ...
- def __floordiv__(self, other): ...
- def __rfloordiv__(self, other): ...
def __mod__(self, other): ...
def __rmod__(self, other): ...
def __divmod__(self, other): ...
def __rdivmod__(self, other): ...
- # NumPy's __pow__ doesn't handle a third argument
- def __pow__(self, other): ...
- def __rpow__(self, other): ...
def __lshift__(self, other): ...
def __rlshift__(self, other): ...
def __rshift__(self, other): ...
@@ -930,14 +1245,26 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
def __matmul__(self, other): ...
def __imatmul__(self, other): ...
def __rmatmul__(self, other): ...
+ def __add__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __radd__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __sub__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rsub__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __mul__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rmul__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __floordiv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rfloordiv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __pow__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rpow__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __truediv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rtruediv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
# `np.generic` does not support inplace operations
- def __iadd__(self, other): ...
- def __isub__(self, other): ...
- def __imul__(self, other): ...
- def __itruediv__(self, other): ...
- def __ifloordiv__(self, other): ...
+ def __iadd__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __isub__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __imul__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __itruediv__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __ifloordiv__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __ipow__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
def __imod__(self, other): ...
- def __ipow__(self, other): ...
def __ilshift__(self, other): ...
def __irshift__(self, other): ...
def __iand__(self, other): ...
@@ -953,6 +1280,11 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
# See https://github.com/numpy/numpy-stubs/pull/80 for more details.
_CharLike = Union[str, bytes]
+_BoolLike = Union[bool, bool_]
+_IntLike = Union[int, integer]
+_FloatLike = Union[_IntLike, float, floating]
+_ComplexLike = Union[_FloatLike, complex, complexfloating]
+_NumberLike = Union[int, float, complex, number, bool_]
class generic(_ArrayOrScalarCommon):
@abstractmethod
@@ -965,6 +1297,19 @@ class number(generic): # type: ignore
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
+ # Ensure that objects annotated as `number` support arithmetic operations
+ __add__: _NumberOp
+ __radd__: _NumberOp
+ __sub__: _NumberOp
+ __rsub__: _NumberOp
+ __mul__: _NumberOp
+ __rmul__: _NumberOp
+ __floordiv__: _NumberOp
+ __rfloordiv__: _NumberOp
+ __pow__: _NumberOp
+ __rpow__: _NumberOp
+ __truediv__: _NumberOp
+ __rtruediv__: _NumberOp
class bool_(generic):
def __init__(self, __value: object = ...) -> None: ...
@@ -972,6 +1317,18 @@ class bool_(generic):
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
+ __add__: _BoolOp[bool_]
+ __radd__: _BoolOp[bool_]
+ __sub__: _BoolSub
+ __rsub__: _BoolSub
+ __mul__: _BoolOp[bool_]
+ __rmul__: _BoolOp[bool_]
+ __floordiv__: _BoolOp[int8]
+ __rfloordiv__: _BoolOp[int8]
+ __pow__: _BoolOp[int8]
+ __rpow__: _BoolOp[int8]
+ __truediv__: _BoolTrueDiv
+ __rtruediv__: _BoolTrueDiv
class object_(generic):
def __init__(self, __value: object = ...) -> None: ...
@@ -988,10 +1345,18 @@ class datetime64(generic):
__format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
) -> None: ...
@overload
- def __init__(self, __value: int, __format: Union[_CharLike, Tuple[_CharLike, _IntLike]]) -> None: ...
- def __add__(self, other: Union[timedelta64, int]) -> datetime64: ...
- def __sub__(self, other: Union[timedelta64, datetime64, int]) -> timedelta64: ...
- def __rsub__(self, other: Union[datetime64, int]) -> timedelta64: ...
+ def __init__(
+ self,
+ __value: int,
+ __format: Union[_CharLike, Tuple[_CharLike, _IntLike]]
+ ) -> None: ...
+ def __add__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ...
+ def __radd__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ...
+ @overload
+ def __sub__(self, other: datetime64) -> timedelta64: ...
+ @overload
+ def __sub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ...
+ def __rsub__(self, other: datetime64) -> timedelta64: ...
# Support for `__index__` was added in python 3.8 (bpo-20092)
if sys.version_info >= (3, 8):
@@ -1007,8 +1372,20 @@ class integer(number): # type: ignore
# NOTE: `__index__` is technically defined in the bottom-most
# sub-classes (`int64`, `uint32`, etc)
def __index__(self) -> int: ...
-
-class signedinteger(integer): ... # type: ignore
+ __truediv__: _IntTrueDiv
+ __rtruediv__: _IntTrueDiv
+
+class signedinteger(integer): # type: ignore
+ __add__: _SignedIntOp
+ __radd__: _SignedIntOp
+ __sub__: _SignedIntOp
+ __rsub__: _SignedIntOp
+ __mul__: _SignedIntOp
+ __rmul__: _SignedIntOp
+ __floordiv__: _SignedIntOp
+ __rfloordiv__: _SignedIntOp
+ __pow__: _SignedIntOp
+ __rpow__: _SignedIntOp
class int8(signedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
@@ -1022,24 +1399,36 @@ class int32(signedinteger):
class int64(signedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
-class timedelta64(signedinteger):
+class timedelta64(generic):
def __init__(
self,
__value: Union[None, int, _CharLike, dt.timedelta, timedelta64] = ...,
__format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
) -> None: ...
- @overload
- def __add__(self, other: Union[timedelta64, int]) -> timedelta64: ...
- @overload
- def __add__(self, other: datetime64) -> datetime64: ...
- def __sub__(self, other: Union[timedelta64, int]) -> timedelta64: ...
- @overload
- def __truediv__(self, other: timedelta64) -> float: ...
- @overload
- def __truediv__(self, other: float) -> timedelta64: ...
+ def __add__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __radd__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __sub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __rsub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __mul__(self, other: Union[_FloatLike, _BoolLike]) -> timedelta64: ...
+ def __rmul__(self, other: Union[_FloatLike, _BoolLike]) -> timedelta64: ...
+ __truediv__: _TD64Div[float64]
+ __floordiv__: _TD64Div[signedinteger]
+ def __rtruediv__(self, other: timedelta64) -> float64: ...
+ def __rfloordiv__(self, other: timedelta64) -> signedinteger: ...
def __mod__(self, other: timedelta64) -> timedelta64: ...
-class unsignedinteger(integer): ... # type: ignore
+class unsignedinteger(integer): # type: ignore
+ # NOTE: `uint64 + signedinteger -> float64`
+ __add__: _UnsignedIntOp
+ __radd__: _UnsignedIntOp
+ __sub__: _UnsignedIntOp
+ __rsub__: _UnsignedIntOp
+ __mul__: _UnsignedIntOp
+ __rmul__: _UnsignedIntOp
+ __floordiv__: _UnsignedIntOp
+ __rfloordiv__: _UnsignedIntOp
+ __pow__: _UnsignedIntOp
+ __rpow__: _UnsignedIntOp
class uint8(unsignedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
@@ -1054,7 +1443,20 @@ class uint64(unsignedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
class inexact(number): ... # type: ignore
-class floating(inexact): ... # type: ignore
+
+class floating(inexact): # type: ignore
+ __add__: _FloatOp
+ __radd__: _FloatOp
+ __sub__: _FloatOp
+ __rsub__: _FloatOp
+ __mul__: _FloatOp
+ __rmul__: _FloatOp
+ __truediv__: _FloatOp
+ __rtruediv__: _FloatOp
+ __floordiv__: _FloatOp
+ __rfloordiv__: _FloatOp
+ __pow__: _FloatOp
+ __rpow__: _FloatOp
_FloatType = TypeVar('_FloatType', bound=floating)
@@ -1073,6 +1475,18 @@ class complexfloating(inexact, Generic[_FloatType]): # type: ignore
@property
def imag(self) -> _FloatType: ... # type: ignore[override]
def __abs__(self) -> _FloatType: ... # type: ignore[override]
+ __add__: _ComplexOp
+ __radd__: _ComplexOp
+ __sub__: _ComplexOp
+ __rsub__: _ComplexOp
+ __mul__: _ComplexOp
+ __rmul__: _ComplexOp
+ __truediv__: _ComplexOp
+ __rtruediv__: _ComplexOp
+ __floordiv__: _ComplexOp
+ __rfloordiv__: _ComplexOp
+ __pow__: _ComplexOp
+ __rpow__: _ComplexOp
class complex64(complexfloating[float32]):
def __init__(self, __value: _ComplexValue = ...) -> None: ...
@@ -1083,7 +1497,7 @@ class complex128(complexfloating[float64], complex):
class flexible(generic): ... # type: ignore
class void(flexible):
- def __init__(self, __value: Union[int, integer, bool_, bytes]): ...
+ def __init__(self, __value: Union[_IntLike, _BoolLike, bytes]): ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
@@ -1094,6 +1508,9 @@ class void(flexible):
class character(flexible): ... # type: ignore
+# NOTE: Most `np.bytes_` / `np.str_` methods return their
+# builtin `bytes` / `str` counterpart
+
class bytes_(character, bytes):
@overload
def __init__(self, __value: object = ...) -> None: ...
@@ -1495,7 +1912,3 @@ def sctype2char(sctype: object) -> str: ...
def find_common_type(
array_types: Sequence[DtypeLike], scalar_types: Sequence[DtypeLike]
) -> dtype: ...
-
-_NumberLike = Union[int, float, complex, number, bool_]
-_IntLike = Union[int, integer]
-_BoolLike = Union[bool, bool_]
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index c77885954..a0769cc89 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -113,10 +113,9 @@ __all__ += getlimits.__all__
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
-# Make it possible so that ufuncs can be pickled
-# Here are the loading and unloading functions
-# The name numpy.core._ufunc_reconstruct must be
-# available for unpickling to work.
+# We used to use `np.core._ufunc_reconstruct` to unpickle. This is unnecessary,
+# but old pickles saved before 1.20 will be using it, and there is no reason
+# to break loading them.
def _ufunc_reconstruct(module, name):
# The `fromlist` kwarg is required to ensure that `mod` points to the
# inner-most module rather than the parent package when module name is
@@ -126,14 +125,17 @@ def _ufunc_reconstruct(module, name):
return getattr(mod, name)
def _ufunc_reduce(func):
- from pickle import whichmodule
- name = func.__name__
- return _ufunc_reconstruct, (whichmodule(func, name), name)
+ # Report the `__name__`. pickle will try to find the module. Note that
+ # pickle supports for this `__name__` to be a `__qualname__`. It may
+ # make sense to add a `__qualname__` to ufuncs, to allow this more
+ # explicitly (Numba has ufuncs as attributes).
+ # See also: https://github.com/dask/distributed/issues/3450
+ return func.__name__
import copyreg
-copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
+copyreg.pickle(ufunc, _ufunc_reduce)
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
del copyreg
del _ufunc_reduce
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 879b3645d..b8cf12c60 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2618,7 +2618,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
- Return indices of the minimum values along the given axis of `a`.
+ Return indices of the minimum values along the given axis.
Refer to `numpy.argmin` for detailed documentation.
diff --git a/numpy/core/_asarray.pyi b/numpy/core/_asarray.pyi
new file mode 100644
index 000000000..e074d69d2
--- /dev/null
+++ b/numpy/core/_asarray.pyi
@@ -0,0 +1,77 @@
+import sys
+from typing import TypeVar, Union, Iterable, overload
+
+from numpy import ndarray, _OrderKACF
+from numpy.typing import ArrayLike, DtypeLike
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from typing_extensions import Literal
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray)
+
+def asarray(
+ a: object,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
+@overload
+def asanyarray(
+ a: _ArrayType,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: ArrayLike = ...
+) -> _ArrayType: ...
+@overload
+def asanyarray(
+ a: object,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
+def ascontiguousarray(
+ a: object, dtype: DtypeLike = ..., *, like: ArrayLike = ...
+) -> ndarray: ...
+def asfortranarray(
+ a: object, dtype: DtypeLike = ..., *, like: ArrayLike = ...
+) -> ndarray: ...
+
+_Requirements = Literal[
+ "C", "C_CONTIGUOUS", "CONTIGUOUS",
+ "F", "F_CONTIGUOUS", "FORTRAN",
+ "A", "ALIGNED",
+ "W", "WRITEABLE",
+ "O", "OWNDATA"
+]
+_E = Literal["E", "ENSUREARRAY"]
+_RequirementsWithE = Union[_Requirements, _E]
+
+@overload
+def require(
+ a: _ArrayType,
+ dtype: None = ...,
+ requirements: Union[None, _Requirements, Iterable[_Requirements]] = ...,
+ *,
+ like: ArrayLike = ...
+) -> _ArrayType: ...
+@overload
+def require(
+ a: object,
+ dtype: DtypeLike = ...,
+ requirements: Union[_E, Iterable[_RequirementsWithE]] = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
+@overload
+def require(
+ a: object,
+ dtype: DtypeLike = ...,
+ requirements: Union[None, _Requirements, Iterable[_Requirements]] = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py
index 50aeeb5bc..4249071ff 100644
--- a/numpy/core/_dtype.py
+++ b/numpy/core/_dtype.py
@@ -176,7 +176,7 @@ def _byte_order_str(dtype):
def _datetime_metadata_str(dtype):
- # TODO: this duplicates the C append_metastr_to_string
+ # TODO: this duplicates the C metastr_to_unicode functionality
unit, count = np.datetime_data(dtype)
if unit == 'generic':
return ''
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 82cd6fb27..1f8e4a6fb 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -2646,8 +2646,8 @@ add_newdoc('numpy.core.umath', 'matmul',
Raises
------
ValueError
- If the last dimension of `a` is not the same size as
- the second-to-last dimension of `b`.
+ If the last dimension of `x1` is not the same size as
+ the second-to-last dimension of `x2`.
If a scalar value is passed in.
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index f65f4015c..e0942beca 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -1062,6 +1062,17 @@ def einsum(*operands, out=None, optimize=False, **kwargs):
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+ einops:
+ similar verbose interface is provided by
+ `einops <https://github.com/arogozhnikov/einops>`_ package to cover
+ additional operations: transpose, reshape/flatten, repeat/tile,
+ squeeze/unsqueeze and reductions.
+
+ opt_einsum:
+ `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
+ optimizes contraction order for einsum-like expressions
+ in backend-agnostic manner.
+
Notes
-----
.. versionadded:: 1.6.0
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 6eca4afdb..6bf54938f 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1839,6 +1839,10 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
PyArray_DTypeMeta *cls, PyTypeObject *obj);
typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls);
+ typedef PyArray_DTypeMeta *(common_dtype_function)(
+ PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtyep2);
+ typedef PyArray_Descr *(common_instance_function)(
+ PyArray_Descr *dtype1, PyArray_Descr *dtyep2);
/*
* While NumPy DTypes would not need to be heap types the plan is to
@@ -1894,6 +1898,8 @@ typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
discover_descr_from_pyobject_function *discover_descr_from_pyobject;
is_known_scalar_type_function *is_known_scalar_type;
default_descr_function *default_descr;
+ common_dtype_function *common_dtype;
+ common_instance_function *common_instance;
};
#endif /* NPY_INTERNAL_BUILD */
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 4bc06fc96..191cd244f 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -28,6 +28,30 @@ extern "C" {
* PyInt -> PyLong
*/
+
+/*
+ * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is
+ * included here because it is missing from the PyPy API. It completes the PyLong_As*
+ * group of functions and can be useful in replacing PyInt_Check.
+ */
+static NPY_INLINE int
+Npy__PyLong_AsInt(PyObject *obj)
+{
+ int overflow;
+ long result = PyLong_AsLongAndOverflow(obj, &overflow);
+
+ /* INT_MAX and INT_MIN are defined in Python.h */
+ if (overflow || result > INT_MAX || result < INT_MIN) {
+ /* XXX: could be cute and give a different
+ message for overflow == -1 */
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C int");
+ return -1;
+ }
+ return (int)result;
+}
+
+
#if defined(NPY_PY3K)
/* Return True only if the long fits in a C long */
static NPY_INLINE int PyInt_Check(PyObject *op) {
@@ -39,6 +63,7 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
return (overflow == 0);
}
+
#define PyInt_FromLong PyLong_FromLong
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AsLong
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 540d1ea9b..6b335f1a6 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -1100,7 +1100,7 @@ def putmask(a, mask, values):
Parameters
----------
- a : array_like
+ a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index 67abcae24..c55f6bdb4 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -64,19 +64,22 @@ broadcast_strides(int ndim, npy_intp const *shape,
return 0;
broadcast_error: {
- PyObject *errmsg;
-
- errmsg = PyUnicode_FromFormat("could not broadcast %s from shape ",
- strides_name);
- PyUString_ConcatAndDel(&errmsg,
- build_shape_string(strides_ndim, strides_shape));
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromString(" into shape "));
- PyUString_ConcatAndDel(&errmsg,
- build_shape_string(ndim, shape));
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ PyObject *shape1 = convert_shape_to_string(strides_ndim,
+ strides_shape, "");
+ if (shape1 == NULL) {
+ return -1;
+ }
+ PyObject *shape2 = convert_shape_to_string(ndim, shape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
+ return -1;
+ }
+ PyErr_Format(PyExc_ValueError,
+ "could not broadcast %s from shape %S into shape %S",
+ strides_name, shape1, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
return -1;
}
}
diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h
index 4e7ade5ed..421b03f93 100644
--- a/numpy/core/src/multiarray/_datetime.h
+++ b/numpy/core/src/multiarray/_datetime.h
@@ -200,17 +200,15 @@ convert_pyobject_to_datetime_metadata(PyObject *obj,
PyArray_DatetimeMetaData *out_meta);
/*
- * 'ret' is a PyUString containing the datetime string, and this
- * function appends the metadata string to it.
+ * Returns datetime metadata as a new reference a Unicode object.
+ * Returns NULL on error.
*
* If 'skip_brackets' is true, skips the '[]'.
*
- * This function steals the reference 'ret'
*/
NPY_NO_EXPORT PyObject *
-append_metastr_to_string(PyArray_DatetimeMetaData *meta,
- int skip_brackets,
- PyObject *ret);
+metastr_to_unicode(PyArray_DatetimeMetaData *meta, int skip_brackets);
+
/*
* Tests for and converts a Python datetime.datetime or datetime.date
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index ea04c82bd..0bf6958cd 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -176,17 +176,20 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
/* Compute boundaries for the neighborhood iterator */
for (i = 0; i < 2 * PyArray_NDIM(ax); ++i) {
PyObject* bound;
+
bound = PySequence_GetItem(b, i);
if (bound == NULL) {
goto clean_itx;
}
- if (!PyInt_Check(bound)) {
+ /* PyLong_AsSsize checks for PyLong */
+ bounds[i] = PyLong_AsSsize_t(bound);
+ if (error_converting(bounds[i])) {
+ PyErr_Clear();
PyErr_SetString(PyExc_ValueError,
- "bound not long");
+ "bound is invalid");
Py_DECREF(bound);
goto clean_itx;
}
- bounds[i] = PyLong_AsSsize_t(bound);
Py_DECREF(bound);
}
@@ -335,17 +338,20 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
/* Compute boundaries for the neighborhood iterator */
for (i = 0; i < 2 * PyArray_NDIM(ax); ++i) {
PyObject* bound;
+
bound = PySequence_GetItem(b1, i);
if (bound == NULL) {
goto clean_itx;
}
- if (!PyInt_Check(bound)) {
+ /* PyLong_AsSsize checks for PyLong */
+ bounds[i] = PyLong_AsSsize_t(bound);
+ if (error_converting(bounds[i])) {
+ PyErr_Clear();
PyErr_SetString(PyExc_ValueError,
- "bound not long");
+ "bound is invalid");
Py_DECREF(bound);
goto clean_itx;
}
- bounds[i] = PyLong_AsSsize_t(bound);
Py_DECREF(bound);
}
@@ -359,17 +365,20 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
for (i = 0; i < 2 * PyArray_NDIM(ax); ++i) {
PyObject* bound;
+
bound = PySequence_GetItem(b2, i);
if (bound == NULL) {
goto clean_itx;
}
- if (!PyInt_Check(bound)) {
+ /* PyLong_AsSsize checks for PyLong */
+ bounds[i] = PyLong_AsSsize_t(bound);
+ if (error_converting(bounds[i])) {
+ PyErr_Clear();
PyErr_SetString(PyExc_ValueError,
- "bound not long");
+ "bound is invalid");
Py_DECREF(bound);
goto clean_itx;
}
- bounds[i] = PyLong_AsSsize_t(bound);
Py_DECREF(bound);
}
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index 3f3fd1387..64a06d58b 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -306,51 +306,6 @@ discover_dtype_from_pyobject(
}
-/*
- * This function should probably become public API eventually. At this
- * time it is implemented by falling back to `PyArray_AdaptFlexibleDType`.
- * We will use `CastingImpl[from, to].adjust_descriptors(...)` to implement
- * this logic.
- */
-static NPY_INLINE PyArray_Descr *
-cast_descriptor_to_fixed_dtype(
- PyArray_Descr *descr, PyArray_DTypeMeta *fixed_DType)
-{
- if (fixed_DType == NULL) {
- /* Nothing to do, we only need to promote the new dtype */
- Py_INCREF(descr);
- return descr;
- }
-
- if (!fixed_DType->parametric) {
- /*
- * Don't actually do anything, the default is always the result
- * of any cast.
- */
- return fixed_DType->default_descr(fixed_DType);
- }
- if (PyObject_TypeCheck((PyObject *)descr, (PyTypeObject *)fixed_DType)) {
- Py_INCREF(descr);
- return descr;
- }
- /*
- * TODO: When this is implemented for all dtypes, the special cases
- * can be removed...
- */
- if (fixed_DType->legacy && fixed_DType->parametric &&
- NPY_DTYPE(descr)->legacy) {
- PyArray_Descr *flex_dtype = PyArray_DescrFromType(fixed_DType->type_num);
- return PyArray_AdaptFlexibleDType(descr, flex_dtype);
- }
-
- PyErr_SetString(PyExc_NotImplementedError,
- "Must use casting to find the correct dtype, this is "
- "not yet implemented! "
- "(It should not be possible to hit this code currently!)");
- return NULL;
-}
-
-
/**
* Discover the correct descriptor from a known DType class and scalar.
* If the fixed DType can discover a dtype instance/descr all is fine,
@@ -392,7 +347,7 @@ find_scalar_descriptor(
return descr;
}
- Py_SETREF(descr, cast_descriptor_to_fixed_dtype(descr, fixed_DType));
+ Py_SETREF(descr, PyArray_CastDescrToDType(descr, fixed_DType));
return descr;
}
@@ -583,7 +538,7 @@ npy_new_coercion_cache(
cache = _coercion_cache_cache[_coercion_cache_num];
}
else {
- cache = PyObject_MALLOC(sizeof(coercion_cache_obj));
+ cache = PyMem_Malloc(sizeof(coercion_cache_obj));
}
if (cache == NULL) {
PyErr_NoMemory();
@@ -615,7 +570,7 @@ npy_unlink_coercion_cache(coercion_cache_obj *current)
_coercion_cache_num++;
}
else {
- PyObject_FREE(current);
+ PyMem_Free(current);
}
return next;
}
@@ -727,8 +682,13 @@ find_descriptor_from_array(
enum _dtype_discovery_flags flags = 0;
*out_descr = NULL;
- if (NPY_UNLIKELY(DType != NULL && DType->parametric &&
- PyArray_ISOBJECT(arr))) {
+ if (DType == NULL) {
+ *out_descr = PyArray_DESCR(arr);
+ Py_INCREF(*out_descr);
+ return 0;
+ }
+
+ if (NPY_UNLIKELY(DType->parametric && PyArray_ISOBJECT(arr))) {
/*
* We have one special case, if (and only if) the input array is of
* object DType and the dtype is not fixed already but parametric.
@@ -777,7 +737,7 @@ find_descriptor_from_array(
}
Py_DECREF(iter);
}
- else if (DType != NULL && NPY_UNLIKELY(DType->type_num == NPY_DATETIME) &&
+ else if (NPY_UNLIKELY(DType->type_num == NPY_DATETIME) &&
PyArray_ISSTRING(arr)) {
/*
* TODO: This branch should be deprecated IMO, the workaround is
@@ -806,8 +766,7 @@ find_descriptor_from_array(
* If this is not an object array figure out the dtype cast,
* or simply use the returned DType.
*/
- *out_descr = cast_descriptor_to_fixed_dtype(
- PyArray_DESCR(arr), DType);
+ *out_descr = PyArray_CastDescrToDType(PyArray_DESCR(arr), DType);
if (*out_descr == NULL) {
return -1;
}
@@ -1325,15 +1284,9 @@ PyArray_DiscoverDTypeAndShape(
* the correct default.
*/
if (fixed_DType != NULL) {
- if (fixed_DType->default_descr == NULL) {
- Py_INCREF(fixed_DType->singleton);
- *out_descr = fixed_DType->singleton;
- }
- else {
- *out_descr = fixed_DType->default_descr(fixed_DType);
- if (*out_descr == NULL) {
- goto fail;
- }
+ *out_descr = fixed_DType->default_descr(fixed_DType);
+ if (*out_descr == NULL) {
+ goto fail;
}
}
}
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
index 613fe6b3f..8e3bde78f 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.c
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -388,15 +388,18 @@ array_implement_c_array_function_creation(
PyObject *numpy_module = PyImport_Import(npy_ma_str_numpy);
if (numpy_module == NULL) {
+ Py_DECREF(relevant_args);
return NULL;
}
PyObject *public_api = PyObject_GetAttrString(numpy_module, function_name);
Py_DECREF(numpy_module);
if (public_api == NULL) {
+ Py_DECREF(relevant_args);
return NULL;
}
if (!PyCallable_Check(public_api)) {
+ Py_DECREF(relevant_args);
Py_DECREF(public_api);
return PyErr_Format(PyExc_RuntimeError,
"numpy.%s is not callable.",
@@ -406,6 +409,7 @@ array_implement_c_array_function_creation(
PyObject* result = array_implement_array_function_internal(
public_api, relevant_args, args, kwargs);
+ Py_DECREF(relevant_args);
Py_DECREF(public_api);
return result;
}
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 6af71f351..841ed799d 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -233,7 +233,6 @@ NPY_NO_EXPORT PyObject *
convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
{
npy_intp i;
- PyObject *ret, *tmp;
/*
* Negative dimension indicates "newaxis", which can
@@ -245,14 +244,14 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
if (i == n) {
return PyUnicode_FromFormat("()%s", ending);
}
- else {
- ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
- if (ret == NULL) {
- return NULL;
- }
- }
+ PyObject *ret = PyUnicode_FromFormat("%" NPY_INTP_FMT, vals[i++]);
+ if (ret == NULL) {
+ return NULL;
+ }
for (; i < n; ++i) {
+ PyObject *tmp;
+
if (vals[i] < 0) {
tmp = PyUnicode_FromString(",newaxis");
}
@@ -264,19 +263,19 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
return NULL;
}
- PyUString_ConcatAndDel(&ret, tmp);
+ Py_SETREF(ret, PyUnicode_Concat(ret, tmp));
+ Py_DECREF(tmp);
if (ret == NULL) {
return NULL;
}
}
if (i == 1) {
- tmp = PyUnicode_FromFormat(",)%s", ending);
+ Py_SETREF(ret, PyUnicode_FromFormat("(%S,)%s", ret, ending));
}
else {
- tmp = PyUnicode_FromFormat(")%s", ending);
+ Py_SETREF(ret, PyUnicode_FromFormat("(%S)%s", ret, ending));
}
- PyUString_ConcatAndDel(&ret, tmp);
return ret;
}
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index d9121707b..f700bdc99 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -1019,7 +1019,7 @@ promote_types(PyArray_Descr *type1, PyArray_Descr *type2,
* Returns a new reference to type if it is already NBO, otherwise
* returns a copy converted to NBO.
*/
-static PyArray_Descr *
+NPY_NO_EXPORT PyArray_Descr *
ensure_dtype_nbo(PyArray_Descr *type)
{
if (PyArray_ISNBO(type->byteorder)) {
@@ -1031,327 +1031,148 @@ ensure_dtype_nbo(PyArray_Descr *type)
}
}
-/*NUMPY_API
- * Produces the smallest size and lowest kind type to which both
- * input types can be cast.
+
+/**
+ * This function should possibly become public API eventually. At this
+ * time it is implemented by falling back to `PyArray_AdaptFlexibleDType`.
+ * We will use `CastingImpl[from, to].adjust_descriptors(...)` to implement
+ * this logic.
+ * Before that, the API needs to be reviewed though.
+ *
+ * WARNING: This function currently does not guarantee that `descr` can
+ * actually be cast to the given DType.
+ *
+ * @param descr The dtype instance to adapt "cast"
+ * @param given_DType The DType class for which we wish to find an instance able
+ * to represent `descr`.
+ * @returns Instance of `given_DType`. If `given_DType` is parametric the
+ * descr may be adapted to hold it.
*/
NPY_NO_EXPORT PyArray_Descr *
-PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
+PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType)
{
- int type_num1, type_num2, ret_type_num;
-
- /*
- * Fast path for identical dtypes.
- *
- * Non-native-byte-order types are converted to native ones below, so we
- * can't quit early.
- */
- if (type1 == type2 && PyArray_ISNBO(type1->byteorder)) {
- Py_INCREF(type1);
- return type1;
+ if (NPY_DTYPE(descr) == given_DType) {
+ Py_INCREF(descr);
+ return descr;
}
-
- type_num1 = type1->type_num;
- type_num2 = type2->type_num;
-
- /* If they're built-in types, use the promotion table */
- if (type_num1 < NPY_NTYPES && type_num2 < NPY_NTYPES) {
- ret_type_num = _npy_type_promotion_table[type_num1][type_num2];
+ if (!given_DType->parametric) {
/*
- * The table doesn't handle string/unicode/void/datetime/timedelta,
- * so check the result
+ * Don't actually do anything, the default is always the result
+ * of any cast.
*/
- if (ret_type_num >= 0) {
- return PyArray_DescrFromType(ret_type_num);
- }
+ return given_DType->default_descr(given_DType);
+ }
+ if (PyObject_TypeCheck((PyObject *)descr, (PyTypeObject *)given_DType)) {
+ Py_INCREF(descr);
+ return descr;
}
- /* If one or both are user defined, calculate it */
- else {
- int skind1 = NPY_NOSCALAR, skind2 = NPY_NOSCALAR, skind;
- if (PyArray_CanCastTo(type2, type1)) {
- /* Promoted types are always native byte order */
- return ensure_dtype_nbo(type1);
- }
- else if (PyArray_CanCastTo(type1, type2)) {
- /* Promoted types are always native byte order */
- return ensure_dtype_nbo(type2);
- }
+ if (!given_DType->legacy) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "Must use casting to find the correct DType for a parametric "
+ "user DType. This is not yet implemented (this error should be "
+ "unreachable).");
+ return NULL;
+ }
- /* Convert the 'kind' char into a scalar kind */
- switch (type1->kind) {
- case 'b':
- skind1 = NPY_BOOL_SCALAR;
- break;
- case 'u':
- skind1 = NPY_INTPOS_SCALAR;
- break;
- case 'i':
- skind1 = NPY_INTNEG_SCALAR;
- break;
- case 'f':
- skind1 = NPY_FLOAT_SCALAR;
- break;
- case 'c':
- skind1 = NPY_COMPLEX_SCALAR;
- break;
- }
- switch (type2->kind) {
- case 'b':
- skind2 = NPY_BOOL_SCALAR;
- break;
- case 'u':
- skind2 = NPY_INTPOS_SCALAR;
- break;
- case 'i':
- skind2 = NPY_INTNEG_SCALAR;
- break;
- case 'f':
- skind2 = NPY_FLOAT_SCALAR;
- break;
- case 'c':
- skind2 = NPY_COMPLEX_SCALAR;
- break;
- }
+ PyArray_Descr *flex_dtype = PyArray_DescrNew(given_DType->singleton);
+ return PyArray_AdaptFlexibleDType(descr, flex_dtype);
+}
- /* If both are scalars, there may be a promotion possible */
- if (skind1 != NPY_NOSCALAR && skind2 != NPY_NOSCALAR) {
- /* Start with the larger scalar kind */
- skind = (skind1 > skind2) ? skind1 : skind2;
- ret_type_num = _npy_smallest_type_of_kind_table[skind];
+/**
+ * This function defines the common DType operator.
+ *
+ * Note that the common DType will not be "object" (unless one of the dtypes
+ * is object), even though object can technically represent all values
+ * correctly.
+ *
+ * TODO: Before exposure, we should review the return value (e.g. no error
+ * when no common DType is found).
+ *
+ * @param dtype1 DType class to find the common type for.
+ * @param dtype2 Second DType class.
+ * @return The common DType or NULL with an error set
+ */
+NPY_NO_EXPORT PyArray_DTypeMeta *
+PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2)
+{
+ if (dtype1 == dtype2) {
+ Py_INCREF(dtype1);
+ return dtype1;
+ }
- for (;;) {
+ PyArray_DTypeMeta *common_dtype;
- /* If there is no larger type of this kind, try a larger kind */
- if (ret_type_num < 0) {
- ++skind;
- /* Use -1 to signal no promoted type found */
- if (skind < NPY_NSCALARKINDS) {
- ret_type_num = _npy_smallest_type_of_kind_table[skind];
- }
- else {
- break;
- }
- }
+ common_dtype = dtype1->common_dtype(dtype1, dtype2);
+ if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) {
+ Py_DECREF(common_dtype);
+ common_dtype = dtype2->common_dtype(dtype2, dtype1);
+ }
+ if (common_dtype == NULL) {
+ return NULL;
+ }
+ if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) {
+ Py_DECREF(Py_NotImplemented);
+ PyErr_Format(PyExc_TypeError,
+ "The DTypes %S and %S do not have a common DType. "
+ "For example they cannot be stored in a single array unless "
+ "the dtype is `object`.", dtype1, dtype2);
+ return NULL;
+ }
+ return common_dtype;
+}
- /* If we found a type to which we can promote both, done! */
- if (PyArray_CanCastSafely(type_num1, ret_type_num) &&
- PyArray_CanCastSafely(type_num2, ret_type_num)) {
- return PyArray_DescrFromType(ret_type_num);
- }
- /* Try the next larger type of this kind */
- ret_type_num = _npy_next_larger_type_table[ret_type_num];
- }
+/*NUMPY_API
+ * Produces the smallest size and lowest kind type to which both
+ * input types can be cast.
+ */
+NPY_NO_EXPORT PyArray_Descr *
+PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
+{
+ PyArray_DTypeMeta *common_dtype;
+ PyArray_Descr *res;
- }
+ /* Fast path for identical inputs (NOTE: This path preserves metadata!) */
+ if (type1 == type2 && PyArray_ISNBO(type1->byteorder)) {
+ Py_INCREF(type1);
+ return type1;
+ }
- PyErr_SetString(PyExc_TypeError,
- "invalid type promotion with custom data type");
+ common_dtype = PyArray_CommonDType(NPY_DTYPE(type1), NPY_DTYPE(type2));
+ if (common_dtype == NULL) {
return NULL;
}
- switch (type_num1) {
- /* BOOL can convert to anything except datetime/void */
- case NPY_BOOL:
- if (type_num2 == NPY_STRING || type_num2 == NPY_UNICODE) {
- int char_size = 1;
- if (type_num2 == NPY_UNICODE) {
- char_size = 4;
- }
- if (type2->elsize < 5 * char_size) {
- PyArray_Descr *ret = NULL;
- PyArray_Descr *temp = PyArray_DescrNew(type2);
- ret = ensure_dtype_nbo(temp);
- ret->elsize = 5 * char_size;
- Py_DECREF(temp);
- return ret;
- }
- return ensure_dtype_nbo(type2);
- }
- else if (type_num2 != NPY_DATETIME && type_num2 != NPY_VOID) {
- return ensure_dtype_nbo(type2);
- }
- break;
- /* For strings and unicodes, take the larger size */
- case NPY_STRING:
- if (type_num2 == NPY_STRING) {
- if (type1->elsize > type2->elsize) {
- return ensure_dtype_nbo(type1);
- }
- else {
- return ensure_dtype_nbo(type2);
- }
- }
- else if (type_num2 == NPY_UNICODE) {
- if (type2->elsize >= type1->elsize * 4) {
- return ensure_dtype_nbo(type2);
- }
- else {
- PyArray_Descr *d = PyArray_DescrNewFromType(NPY_UNICODE);
- if (d == NULL) {
- return NULL;
- }
- d->elsize = type1->elsize * 4;
- return d;
- }
- }
- /* Allow NUMBER -> STRING */
- else if (PyTypeNum_ISNUMBER(type_num2)) {
- PyArray_Descr *ret = NULL;
- PyArray_Descr *temp = PyArray_DescrNew(type1);
- PyDataType_MAKEUNSIZED(temp);
-
- temp = PyArray_AdaptFlexibleDType(type2, temp);
- if (temp == NULL) {
- return NULL;
- }
- if (temp->elsize > type1->elsize) {
- ret = ensure_dtype_nbo(temp);
- }
- else {
- ret = ensure_dtype_nbo(type1);
- }
- Py_DECREF(temp);
- return ret;
- }
- break;
- case NPY_UNICODE:
- if (type_num2 == NPY_UNICODE) {
- if (type1->elsize > type2->elsize) {
- return ensure_dtype_nbo(type1);
- }
- else {
- return ensure_dtype_nbo(type2);
- }
- }
- else if (type_num2 == NPY_STRING) {
- if (type1->elsize >= type2->elsize * 4) {
- return ensure_dtype_nbo(type1);
- }
- else {
- PyArray_Descr *d = PyArray_DescrNewFromType(NPY_UNICODE);
- if (d == NULL) {
- return NULL;
- }
- d->elsize = type2->elsize * 4;
- return d;
- }
- }
- /* Allow NUMBER -> UNICODE */
- else if (PyTypeNum_ISNUMBER(type_num2)) {
- PyArray_Descr *ret = NULL;
- PyArray_Descr *temp = PyArray_DescrNew(type1);
- PyDataType_MAKEUNSIZED(temp);
- temp = PyArray_AdaptFlexibleDType(type2, temp);
- if (temp == NULL) {
- return NULL;
- }
- if (temp->elsize > type1->elsize) {
- ret = ensure_dtype_nbo(temp);
- }
- else {
- ret = ensure_dtype_nbo(type1);
- }
- Py_DECREF(temp);
- return ret;
- }
- break;
- case NPY_DATETIME:
- case NPY_TIMEDELTA:
- if (type_num2 == NPY_DATETIME || type_num2 == NPY_TIMEDELTA) {
- return datetime_type_promotion(type1, type2);
- }
- break;
+ if (!common_dtype->parametric) {
+ res = common_dtype->default_descr(common_dtype);
+ Py_DECREF(common_dtype);
+ return res;
}
- switch (type_num2) {
- /* BOOL can convert to almost anything */
- case NPY_BOOL:
- if (type_num2 == NPY_STRING || type_num2 == NPY_UNICODE) {
- int char_size = 1;
- if (type_num2 == NPY_UNICODE) {
- char_size = 4;
- }
- if (type2->elsize < 5 * char_size) {
- PyArray_Descr *ret = NULL;
- PyArray_Descr *temp = PyArray_DescrNew(type2);
- ret = ensure_dtype_nbo(temp);
- ret->elsize = 5 * char_size;
- Py_DECREF(temp);
- return ret;
- }
- return ensure_dtype_nbo(type2);
- }
- else if (type_num1 != NPY_DATETIME && type_num1 != NPY_TIMEDELTA &&
- type_num1 != NPY_VOID) {
- return ensure_dtype_nbo(type1);
- }
- break;
- case NPY_STRING:
- /* Allow NUMBER -> STRING */
- if (PyTypeNum_ISNUMBER(type_num1)) {
- PyArray_Descr *ret = NULL;
- PyArray_Descr *temp = PyArray_DescrNew(type2);
- PyDataType_MAKEUNSIZED(temp);
- temp = PyArray_AdaptFlexibleDType(type1, temp);
- if (temp == NULL) {
- return NULL;
- }
- if (temp->elsize > type2->elsize) {
- ret = ensure_dtype_nbo(temp);
- }
- else {
- ret = ensure_dtype_nbo(type2);
- }
- Py_DECREF(temp);
- return ret;
- }
- break;
- case NPY_UNICODE:
- /* Allow NUMBER -> UNICODE */
- if (PyTypeNum_ISNUMBER(type_num1)) {
- PyArray_Descr *ret = NULL;
- PyArray_Descr *temp = PyArray_DescrNew(type2);
- PyDataType_MAKEUNSIZED(temp);
- temp = PyArray_AdaptFlexibleDType(type1, temp);
- if (temp == NULL) {
- return NULL;
- }
- if (temp->elsize > type2->elsize) {
- ret = ensure_dtype_nbo(temp);
- }
- else {
- ret = ensure_dtype_nbo(type2);
- }
- Py_DECREF(temp);
- return ret;
- }
- break;
- case NPY_TIMEDELTA:
- if (PyTypeNum_ISSIGNED(type_num1)) {
- return ensure_dtype_nbo(type2);
- }
- break;
+ /* Cast the input types to the common DType if necessary */
+ type1 = PyArray_CastDescrToDType(type1, common_dtype);
+ if (type1 == NULL) {
+ Py_DECREF(common_dtype);
+ return NULL;
}
-
- /* For types equivalent up to endianness, can return either */
- if (PyArray_CanCastTypeTo(type1, type2, NPY_EQUIV_CASTING)) {
- return ensure_dtype_nbo(type1);
+ type2 = PyArray_CastDescrToDType(type2, common_dtype);
+ if (type2 == NULL) {
+ Py_DECREF(type1);
+ Py_DECREF(common_dtype);
+ return NULL;
}
- /* TODO: Also combine fields, subarrays, strings, etc */
-
/*
- printf("invalid type promotion: ");
- PyObject_Print(type1, stdout, 0);
- printf(" ");
- PyObject_Print(type2, stdout, 0);
- printf("\n");
- */
- PyErr_SetString(PyExc_TypeError, "invalid type promotion");
- return NULL;
+ * And find the common instance of the two inputs
+ * NOTE: Common instance preserves metadata (normally and of one input)
+ */
+ res = common_dtype->common_instance(type1, type2);
+ Py_DECREF(type1);
+ Py_DECREF(type2);
+ Py_DECREF(common_dtype);
+ return res;
}
/*
diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h
index 9b7f39db2..a2b36b497 100644
--- a/numpy/core/src/multiarray/convert_datatype.h
+++ b/numpy/core/src/multiarray/convert_datatype.h
@@ -10,6 +10,9 @@ PyArray_ObjectType(PyObject *op, int minimum_type);
NPY_NO_EXPORT PyArrayObject **
PyArray_ConvertToCommonType(PyObject *op, int *retn);
+NPY_NO_EXPORT PyArray_DTypeMeta *
+PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);
+
NPY_NO_EXPORT int
PyArray_ValidType(int type);
@@ -18,6 +21,9 @@ NPY_NO_EXPORT npy_bool
can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
PyArray_Descr *to, NPY_CASTING casting);
+NPY_NO_EXPORT PyArray_Descr *
+ensure_dtype_nbo(PyArray_Descr *type);
+
NPY_NO_EXPORT int
should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
npy_intp ndtypes, PyArray_Descr **dtypes);
@@ -49,4 +55,7 @@ npy_set_invalid_cast_error(
NPY_NO_EXPORT PyArray_Descr *
PyArray_AdaptFlexibleDType(PyArray_Descr *data_dtype, PyArray_Descr *flex_dtype);
+NPY_NO_EXPORT PyArray_Descr *
+PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType);
+
#endif
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 956dfd3bb..55c0a31f0 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -610,6 +610,7 @@ PyArray_AssignFromCache(PyArrayObject *self, coercion_cache_obj *cache) {
PyErr_SetString(PyExc_RuntimeError,
"Inconsistent object during array creation? "
"Content of sequences changed (cache not consumed).");
+ npy_free_coercion_cache(cache);
return -1;
}
return 0;
@@ -1467,6 +1468,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
PyErr_SetString(PyExc_TypeError,
"WRITEBACKIFCOPY used for non-array input.");
Py_DECREF(dtype);
+ npy_free_coercion_cache(cache);
return NULL;
}
@@ -1475,6 +1477,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
&PyArray_Type, dtype, ndim, dims, NULL, NULL,
flags&NPY_ARRAY_F_CONTIGUOUS, NULL);
if (ret == NULL) {
+ npy_free_coercion_cache(cache);
return NULL;
}
if (cache == NULL) {
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index f2225809a..4afc45fb6 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -1434,18 +1434,20 @@ raise_if_datetime64_metadata_cast_error(char *object_type,
return 0;
}
else {
- PyObject *errmsg;
- errmsg = PyUnicode_FromFormat("Cannot cast %s "
- "from metadata ", object_type);
- errmsg = append_metastr_to_string(src_meta, 0, errmsg);
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromString(" to "));
- errmsg = append_metastr_to_string(dst_meta, 0, errmsg);
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromFormat(" according to the rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ PyObject *src = metastr_to_unicode(src_meta, 0);
+ if (src == NULL) {
+ return -1;
+ }
+ PyObject *dst = metastr_to_unicode(dst_meta, 0);
+ if (dst == NULL) {
+ Py_DECREF(src);
+ return -1;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "Cannot cast %s from metadata %S to %S according to the rule %s",
+ object_type, src, dst, npy_casting_to_string(casting));
+ Py_DECREF(src);
+ Py_DECREF(dst);
return -1;
}
}
@@ -1466,18 +1468,20 @@ raise_if_timedelta64_metadata_cast_error(char *object_type,
return 0;
}
else {
- PyObject *errmsg;
- errmsg = PyUnicode_FromFormat("Cannot cast %s "
- "from metadata ", object_type);
- errmsg = append_metastr_to_string(src_meta, 0, errmsg);
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromString(" to "));
- errmsg = append_metastr_to_string(dst_meta, 0, errmsg);
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromFormat(" according to the rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ PyObject *src = metastr_to_unicode(src_meta, 0);
+ if (src == NULL) {
+ return -1;
+ }
+ PyObject *dst = metastr_to_unicode(dst_meta, 0);
+ if (dst == NULL) {
+ Py_DECREF(src);
+ return -1;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "Cannot cast %s from metadata %S to %S according to the rule %s",
+ object_type, src, dst, npy_casting_to_string(casting));
+ Py_DECREF(src);
+ Py_DECREF(dst);
return -1;
}
}
@@ -1600,32 +1604,38 @@ compute_datetime_metadata_greatest_common_divisor(
return 0;
incompatible_units: {
- PyObject *errmsg;
- errmsg = PyUnicode_FromString("Cannot get "
- "a common metadata divisor for "
- "NumPy datetime metadata ");
- errmsg = append_metastr_to_string(meta1, 0, errmsg);
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromString(" and "));
- errmsg = append_metastr_to_string(meta2, 0, errmsg);
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromString(" because they have "
- "incompatible nonlinear base time units"));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ PyObject *umeta1 = metastr_to_unicode(meta1, 0);
+ if (umeta1 == NULL) {
+ return -1;
+ }
+ PyObject *umeta2 = metastr_to_unicode(meta2, 0);
+ if (umeta2 == NULL) {
+ Py_DECREF(umeta1);
+ return -1;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "Cannot get a common metadata divisor for Numpy datatime "
+ "metadata %S and %S because they have incompatible nonlinear "
+ "base time units.", umeta1, umeta2);
+ Py_DECREF(umeta1);
+ Py_DECREF(umeta2);
return -1;
}
units_overflow: {
- PyObject *errmsg;
- errmsg = PyUnicode_FromString("Integer overflow "
- "getting a common metadata divisor for "
- "NumPy datetime metadata ");
- errmsg = append_metastr_to_string(meta1, 0, errmsg);
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromString(" and "));
- errmsg = append_metastr_to_string(meta2, 0, errmsg);
- PyErr_SetObject(PyExc_OverflowError, errmsg);
- Py_DECREF(errmsg);
+ PyObject *umeta1 = metastr_to_unicode(meta1, 0);
+ if (umeta1 == NULL) {
+ return -1;
+ }
+ PyObject *umeta2 = metastr_to_unicode(meta2, 0);
+ if (umeta2 == NULL) {
+ Py_DECREF(umeta1);
+ return -1;
+ }
+ PyErr_Format(PyExc_OverflowError,
+ "Integer overflow getting a common metadata divisor for "
+ "NumPy datetime metadata %S and %S.", umeta1, umeta2);
+ Py_DECREF(umeta1);
+ Py_DECREF(umeta2);
return -1;
}
}
@@ -1950,35 +1960,27 @@ convert_pyobject_to_datetime_metadata(PyObject *obj,
}
/*
- * 'ret' is a PyUString containing the datetime string, and this
- * function appends the metadata string to it.
+ * Return the datetime metadata as a Unicode object.
+ *
+ * Returns new reference, NULL on error.
*
* If 'skip_brackets' is true, skips the '[]'.
*
- * This function steals the reference 'ret'
*/
NPY_NO_EXPORT PyObject *
-append_metastr_to_string(PyArray_DatetimeMetaData *meta,
- int skip_brackets,
- PyObject *ret)
+metastr_to_unicode(PyArray_DatetimeMetaData *meta, int skip_brackets)
{
- PyObject *res;
int num;
char const *basestr;
- if (ret == NULL) {
- return NULL;
- }
-
if (meta->base == NPY_FR_GENERIC) {
/* Without brackets, give a string "generic" */
if (skip_brackets) {
- PyUString_ConcatAndDel(&ret, PyUnicode_FromString("generic"));
- return ret;
+ return PyUnicode_FromString("generic");
}
- /* But with brackets, append nothing */
+ /* But with brackets, return nothing */
else {
- return ret;
+ return PyUnicode_FromString("");
}
}
@@ -1994,25 +1996,23 @@ append_metastr_to_string(PyArray_DatetimeMetaData *meta,
if (num == 1) {
if (skip_brackets) {
- res = PyUnicode_FromFormat("%s", basestr);
+ return PyUnicode_FromFormat("%s", basestr);
}
else {
- res = PyUnicode_FromFormat("[%s]", basestr);
+ return PyUnicode_FromFormat("[%s]", basestr);
}
}
else {
if (skip_brackets) {
- res = PyUnicode_FromFormat("%d%s", num, basestr);
+ return PyUnicode_FromFormat("%d%s", num, basestr);
}
else {
- res = PyUnicode_FromFormat("[%d%s]", num, basestr);
+ return PyUnicode_FromFormat("[%d%s]", num, basestr);
}
}
-
- PyUString_ConcatAndDel(&ret, res);
- return ret;
}
+
/*
* Adjusts a datetimestruct based on a seconds offset. Assumes
* the current values are valid.
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 6e378f626..24a3507bc 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -1892,18 +1892,26 @@ arraydescr_protocol_typestr_get(PyArray_Descr *self)
else {
ret = PyUnicode_FromFormat("%c%c%d", endian, basic_, size);
}
+ if (ret == NULL) {
+ return NULL;
+ }
+
if (PyDataType_ISDATETIME(self)) {
PyArray_DatetimeMetaData *meta;
-
meta = get_datetime_metadata_from_dtype(self);
if (meta == NULL) {
Py_DECREF(ret);
return NULL;
}
+ PyObject *umeta = metastr_to_unicode(meta, 0);
+ if (umeta == NULL) {
+ Py_DECREF(ret);
+ return NULL;
+ }
- ret = append_metastr_to_string(meta, 0, ret);
+ Py_SETREF(ret, PyUnicode_Concat(ret, umeta));
+ Py_DECREF(umeta);
}
-
return ret;
}
@@ -2890,14 +2898,13 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
}
if (PyDataType_ISDATETIME(self) && (metadata != NULL)) {
- PyObject *old_metadata, *errmsg;
+ PyObject *old_metadata;
PyArray_DatetimeMetaData temp_dt_data;
if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) {
- errmsg = PyUnicode_FromString("Invalid datetime dtype (metadata, c_metadata): ");
- PyUString_ConcatAndDel(&errmsg, PyObject_Repr(metadata));
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_ValueError,
+ "Invalid datetime dtype (metadata, c_metadata): %R",
+ metadata);
return NULL;
}
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 42c66ee7f..af4e6c22e 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -1322,95 +1322,21 @@ get_unicode_to_datetime_transfer_function(int aligned,
return NPY_SUCCEED;
}
+
static int
-get_nbo_cast_transfer_function(int aligned,
- npy_intp src_stride, npy_intp dst_stride,
- PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
- int move_references,
- PyArray_StridedUnaryOp **out_stransfer,
- NpyAuxData **out_transferdata,
- int *out_needs_api,
- int *out_needs_wrap)
+get_legacy_dtype_cast_function(
+ int aligned, npy_intp src_stride, npy_intp dst_stride,
+ PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
+ int move_references,
+ PyArray_StridedUnaryOp **out_stransfer, NpyAuxData **out_transferdata,
+ int *out_needs_api, int *out_needs_wrap)
{
_strided_cast_data *data;
PyArray_VectorUnaryFunc *castfunc;
PyArray_Descr *tmp_dtype;
- npy_intp shape = 1, src_itemsize = src_dtype->elsize,
- dst_itemsize = dst_dtype->elsize;
-
- if (PyTypeNum_ISNUMBER(src_dtype->type_num) &&
- PyTypeNum_ISNUMBER(dst_dtype->type_num)) {
- *out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder) ||
- !PyArray_ISNBO(dst_dtype->byteorder);
- return get_nbo_cast_numeric_transfer_function(aligned,
- src_stride, dst_stride,
- src_dtype->type_num, dst_dtype->type_num,
- out_stransfer, out_transferdata);
- }
-
- if (src_dtype->type_num == NPY_DATETIME ||
- src_dtype->type_num == NPY_TIMEDELTA ||
- dst_dtype->type_num == NPY_DATETIME ||
- dst_dtype->type_num == NPY_TIMEDELTA) {
- /* A parameterized type, datetime->datetime sometimes needs casting */
- if ((src_dtype->type_num == NPY_DATETIME &&
- dst_dtype->type_num == NPY_DATETIME) ||
- (src_dtype->type_num == NPY_TIMEDELTA &&
- dst_dtype->type_num == NPY_TIMEDELTA)) {
- *out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder) ||
- !PyArray_ISNBO(dst_dtype->byteorder);
- return get_nbo_cast_datetime_transfer_function(aligned,
- src_stride, dst_stride,
- src_dtype, dst_dtype,
- out_stransfer, out_transferdata);
- }
-
- /*
- * Datetime <-> string conversions can be handled specially.
- * The functions may raise an error if the strings have no
- * space, or can't be parsed properly.
- */
- if (src_dtype->type_num == NPY_DATETIME) {
- switch (dst_dtype->type_num) {
- case NPY_STRING:
- *out_needs_api = 1;
- *out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder);
- return get_nbo_datetime_to_string_transfer_function(
- aligned,
- src_stride, dst_stride,
- src_dtype, dst_dtype,
- out_stransfer, out_transferdata);
-
- case NPY_UNICODE:
- return get_datetime_to_unicode_transfer_function(
- aligned,
- src_stride, dst_stride,
- src_dtype, dst_dtype,
- out_stransfer, out_transferdata,
- out_needs_api);
- }
- }
- else if (dst_dtype->type_num == NPY_DATETIME) {
- switch (src_dtype->type_num) {
- case NPY_STRING:
- *out_needs_api = 1;
- *out_needs_wrap = !PyArray_ISNBO(dst_dtype->byteorder);
- return get_nbo_string_to_datetime_transfer_function(
- aligned,
- src_stride, dst_stride,
- src_dtype, dst_dtype,
- out_stransfer, out_transferdata);
-
- case NPY_UNICODE:
- return get_unicode_to_datetime_transfer_function(
- aligned,
- src_stride, dst_stride,
- src_dtype, dst_dtype,
- out_stransfer, out_transferdata,
- out_needs_api);
- }
- }
- }
+ npy_intp shape = 1;
+ npy_intp src_itemsize = src_dtype->elsize;
+ npy_intp dst_itemsize = dst_dtype->elsize;
*out_needs_wrap = !aligned ||
!PyArray_ISNBO(src_dtype->byteorder) ||
@@ -1543,6 +1469,167 @@ get_nbo_cast_transfer_function(int aligned,
return NPY_SUCCEED;
}
+
+static int
+get_nbo_cast_transfer_function(int aligned,
+ npy_intp src_stride, npy_intp dst_stride,
+ PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
+ int move_references,
+ PyArray_StridedUnaryOp **out_stransfer,
+ NpyAuxData **out_transferdata,
+ int *out_needs_api,
+ int *out_needs_wrap)
+{
+ if (PyTypeNum_ISNUMBER(src_dtype->type_num) &&
+ PyTypeNum_ISNUMBER(dst_dtype->type_num)) {
+ *out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder) ||
+ !PyArray_ISNBO(dst_dtype->byteorder);
+ return get_nbo_cast_numeric_transfer_function(aligned,
+ src_stride, dst_stride,
+ src_dtype->type_num, dst_dtype->type_num,
+ out_stransfer, out_transferdata);
+ }
+
+ if (src_dtype->type_num == NPY_DATETIME ||
+ src_dtype->type_num == NPY_TIMEDELTA ||
+ dst_dtype->type_num == NPY_DATETIME ||
+ dst_dtype->type_num == NPY_TIMEDELTA) {
+ /* A parameterized type, datetime->datetime sometimes needs casting */
+ if ((src_dtype->type_num == NPY_DATETIME &&
+ dst_dtype->type_num == NPY_DATETIME) ||
+ (src_dtype->type_num == NPY_TIMEDELTA &&
+ dst_dtype->type_num == NPY_TIMEDELTA)) {
+ *out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder) ||
+ !PyArray_ISNBO(dst_dtype->byteorder);
+ return get_nbo_cast_datetime_transfer_function(aligned,
+ src_stride, dst_stride,
+ src_dtype, dst_dtype,
+ out_stransfer, out_transferdata);
+ }
+
+ /*
+ * Datetime <-> string conversions can be handled specially.
+ * The functions may raise an error if the strings have no
+ * space, or can't be parsed properly.
+ */
+ if (src_dtype->type_num == NPY_DATETIME) {
+ switch (dst_dtype->type_num) {
+ case NPY_STRING:
+ *out_needs_api = 1;
+ *out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder);
+ return get_nbo_datetime_to_string_transfer_function(
+ aligned,
+ src_stride, dst_stride,
+ src_dtype, dst_dtype,
+ out_stransfer, out_transferdata);
+
+ case NPY_UNICODE:
+ return get_datetime_to_unicode_transfer_function(
+ aligned,
+ src_stride, dst_stride,
+ src_dtype, dst_dtype,
+ out_stransfer, out_transferdata,
+ out_needs_api);
+ }
+ }
+ else if (dst_dtype->type_num == NPY_DATETIME) {
+ switch (src_dtype->type_num) {
+ case NPY_STRING:
+ *out_needs_api = 1;
+ *out_needs_wrap = !PyArray_ISNBO(dst_dtype->byteorder);
+ return get_nbo_string_to_datetime_transfer_function(
+ aligned,
+ src_stride, dst_stride,
+ src_dtype, dst_dtype,
+ out_stransfer, out_transferdata);
+
+ case NPY_UNICODE:
+ return get_unicode_to_datetime_transfer_function(
+ aligned,
+ src_stride, dst_stride,
+ src_dtype, dst_dtype,
+ out_stransfer, out_transferdata,
+ out_needs_api);
+ }
+ }
+ }
+
+ return get_legacy_dtype_cast_function(
+ aligned, src_stride, dst_stride, src_dtype, dst_dtype,
+ move_references, out_stransfer, out_transferdata,
+ out_needs_api, out_needs_wrap);
+}
+
+
+static int
+wrap_aligned_contig_transfer_function_with_copyswapn(
+ int aligned, npy_intp src_stride, npy_intp dst_stride,
+ PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
+ PyArray_StridedUnaryOp **out_stransfer, NpyAuxData **out_transferdata,
+ int *out_needs_api,
+ PyArray_StridedUnaryOp *caststransfer, NpyAuxData *castdata)
+{
+ NpyAuxData *todata = NULL, *fromdata = NULL;
+ PyArray_StridedUnaryOp *tobuffer, *frombuffer;
+ npy_intp src_itemsize = src_dtype->elsize;
+ npy_intp dst_itemsize = dst_dtype->elsize;
+
+ /* Get the copy/swap operation from src */
+ PyArray_GetDTypeCopySwapFn(
+ aligned, src_stride, src_itemsize, src_dtype, &tobuffer, &todata);
+
+ if (!PyDataType_REFCHK(dst_dtype)) {
+ /* Copying from buffer is a simple copy/swap operation */
+ PyArray_GetDTypeCopySwapFn(
+ aligned, dst_itemsize, dst_stride, dst_dtype,
+ &frombuffer, &fromdata);
+ }
+ else {
+ /*
+ * Since the buffer is initialized to NULL, need to move the
+ * references in order to DECREF the existing data.
+ */
+ /* Object types cannot be byte swapped */
+ assert(PyDataType_ISNOTSWAPPED(dst_dtype));
+ /* The loop already needs the python api if this is reached */
+ assert(*out_needs_api);
+
+ if (PyArray_GetDTypeTransferFunction(
+ aligned, dst_itemsize, dst_stride,
+ dst_dtype, dst_dtype, 1,
+ &frombuffer, &fromdata, out_needs_api) != NPY_SUCCEED) {
+ return NPY_FAIL;
+ }
+ }
+
+ if (frombuffer == NULL || tobuffer == NULL) {
+ NPY_AUXDATA_FREE(castdata);
+ NPY_AUXDATA_FREE(todata);
+ NPY_AUXDATA_FREE(fromdata);
+ return NPY_FAIL;
+ }
+
+ *out_stransfer = caststransfer;
+
+ /* Wrap it all up in a new transfer function + data */
+ if (wrap_aligned_contig_transfer_function(
+ src_itemsize, dst_itemsize,
+ tobuffer, todata,
+ frombuffer, fromdata,
+ caststransfer, castdata,
+ PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_INIT),
+ *out_needs_api,
+ out_stransfer, out_transferdata) != NPY_SUCCEED) {
+ NPY_AUXDATA_FREE(castdata);
+ NPY_AUXDATA_FREE(todata);
+ NPY_AUXDATA_FREE(fromdata);
+ return NPY_FAIL;
+ }
+
+ return NPY_SUCCEED;
+}
+
+
static int
get_cast_transfer_function(int aligned,
npy_intp src_stride, npy_intp dst_stride,
@@ -1553,10 +1640,8 @@ get_cast_transfer_function(int aligned,
int *out_needs_api)
{
PyArray_StridedUnaryOp *caststransfer;
- NpyAuxData *castdata, *todata = NULL, *fromdata = NULL;
+ NpyAuxData *castdata;
int needs_wrap = 0;
- npy_intp src_itemsize = src_dtype->elsize,
- dst_itemsize = dst_dtype->elsize;
if (get_nbo_cast_transfer_function(aligned,
src_stride, dst_stride,
@@ -1581,64 +1666,10 @@ get_cast_transfer_function(int aligned,
}
/* Otherwise, we have to copy and/or swap to aligned temporaries */
else {
- PyArray_StridedUnaryOp *tobuffer, *frombuffer;
-
- /* Get the copy/swap operation from src */
- PyArray_GetDTypeCopySwapFn(aligned,
- src_stride, src_itemsize,
- src_dtype,
- &tobuffer, &todata);
-
- if (!PyDataType_REFCHK(dst_dtype)) {
- /* Copying from buffer is a simple copy/swap operation */
- PyArray_GetDTypeCopySwapFn(aligned,
- dst_itemsize, dst_stride,
- dst_dtype,
- &frombuffer, &fromdata);
- }
- else {
- /*
- * Since the buffer is initialized to NULL, need to move the
- * references in order to DECREF the existing data.
- */
- /* Object types cannot be byte swapped */
- assert(PyDataType_ISNOTSWAPPED(dst_dtype));
- /* The loop already needs the python api if this is reached */
- assert(*out_needs_api);
-
- if (PyArray_GetDTypeTransferFunction(
- aligned, dst_itemsize, dst_stride,
- dst_dtype, dst_dtype, 1,
- &frombuffer, &fromdata, out_needs_api) != NPY_SUCCEED) {
- return NPY_FAIL;
- }
- }
-
- if (frombuffer == NULL || tobuffer == NULL) {
- NPY_AUXDATA_FREE(castdata);
- NPY_AUXDATA_FREE(todata);
- NPY_AUXDATA_FREE(fromdata);
- return NPY_FAIL;
- }
-
- *out_stransfer = caststransfer;
-
- /* Wrap it all up in a new transfer function + data */
- if (wrap_aligned_contig_transfer_function(
- src_itemsize, dst_itemsize,
- tobuffer, todata,
- frombuffer, fromdata,
- caststransfer, castdata,
- PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_INIT),
- *out_needs_api,
- out_stransfer, out_transferdata) != NPY_SUCCEED) {
- NPY_AUXDATA_FREE(castdata);
- NPY_AUXDATA_FREE(todata);
- NPY_AUXDATA_FREE(fromdata);
- return NPY_FAIL;
- }
-
- return NPY_SUCCEED;
+ return wrap_aligned_contig_transfer_function_with_copyswapn(
+ aligned, src_stride, dst_stride, src_dtype, dst_dtype,
+ out_stransfer, out_transferdata, out_needs_api,
+ caststransfer, castdata);
}
}
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index d07dc700d..dbe5ba476 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -15,6 +15,9 @@
#include "dtypemeta.h"
#include "_datetime.h"
#include "array_coercion.h"
+#include "scalartypes.h"
+#include "convert_datatype.h"
+#include "usertypes.h"
static void
@@ -194,7 +197,39 @@ discover_datetime_and_timedelta_from_pyobject(
static PyArray_Descr *
-flexible_default_descr(PyArray_DTypeMeta *cls)
+nonparametric_default_descr(PyArray_DTypeMeta *cls)
+{
+ Py_INCREF(cls->singleton);
+ return cls->singleton;
+}
+
+
+/* Ensure a copy of the singleton (just in case we do adapt it somewhere) */
+static PyArray_Descr *
+datetime_and_timedelta_default_descr(PyArray_DTypeMeta *cls)
+{
+ return PyArray_DescrNew(cls->singleton);
+}
+
+
+static PyArray_Descr *
+void_default_descr(PyArray_DTypeMeta *cls)
+{
+ PyArray_Descr *res = PyArray_DescrNew(cls->singleton);
+ if (res == NULL) {
+ return NULL;
+ }
+ /*
+ * The legacy behaviour for `np.array([], dtype="V")` is to use "V8".
+ * This is because `[]` uses `float64` as dtype, and then that is used
+ * for the size of the requested void.
+ */
+ res->elsize = 8;
+ return res;
+}
+
+static PyArray_Descr *
+string_and_unicode_default_descr(PyArray_DTypeMeta *cls)
{
PyArray_Descr *res = PyArray_DescrNewFromType(cls->type_num);
if (res == NULL) {
@@ -208,6 +243,34 @@ flexible_default_descr(PyArray_DTypeMeta *cls)
}
+static PyArray_Descr *
+string_unicode_common_instance(PyArray_Descr *descr1, PyArray_Descr *descr2)
+{
+ if (descr1->elsize >= descr2->elsize) {
+ return ensure_dtype_nbo(descr1);
+ }
+ else {
+ return ensure_dtype_nbo(descr2);
+ }
+}
+
+
+static PyArray_Descr *
+void_common_instance(PyArray_Descr *descr1, PyArray_Descr *descr2)
+{
+ /*
+ * We currently do not support promotion of void types unless they
+ * are equivalent.
+ */
+ if (!PyArray_CanCastTypeTo(descr1, descr2, NPY_EQUIV_CASTING)) {
+ PyErr_SetString(PyExc_TypeError,
+ "invalid type promotion with structured or void datatype(s).");
+ return NULL;
+ }
+ Py_INCREF(descr1);
+ return descr1;
+}
+
static int
python_builtins_are_known_scalar_types(
PyArray_DTypeMeta *NPY_UNUSED(cls), PyTypeObject *pytype)
@@ -281,6 +344,86 @@ string_known_scalar_types(
}
+/*
+ * The following set of functions define the common dtype operator for
+ * the builtin types.
+ */
+static PyArray_DTypeMeta *
+default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
+{
+ assert(cls->type_num < NPY_NTYPES);
+ if (!other->legacy || other->type_num > cls->type_num) {
+ /* Let the more generic (larger type number) DType handle this */
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_DTypeMeta *)Py_NotImplemented;
+ }
+
+ /*
+ * Note: The use of the promotion table should probably be revised at
+ * some point. It may be most useful to remove it entirely and then
+ * consider adding a fast path/cache `PyArray_CommonDType()` itself.
+ */
+ int common_num = _npy_type_promotion_table[cls->type_num][other->type_num];
+ if (common_num < 0) {
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_DTypeMeta *)Py_NotImplemented;
+ }
+ return PyArray_DTypeFromTypeNum(common_num);
+}
+
+
+static PyArray_DTypeMeta *
+string_unicode_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
+{
+ assert(cls->type_num < NPY_NTYPES);
+ if (!other->legacy || other->type_num > cls->type_num ||
+ other->type_num == NPY_OBJECT) {
+ /* Let the more generic (larger type number) DType handle this */
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_DTypeMeta *)Py_NotImplemented;
+ }
+ /*
+ * The builtin types are ordered by complexity (aside from object) here.
+ * Arguably, we should not consider numbers and strings "common", but
+ * we currently do.
+ */
+ Py_INCREF(cls);
+ return cls;
+}
+
+static PyArray_DTypeMeta *
+datetime_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
+{
+ if (cls->type_num == NPY_DATETIME && other->type_num == NPY_TIMEDELTA) {
+ /*
+ * TODO: We actually currently do allow promotion here. This is
+ * currently relied on within `np.add(datetime, timedelta)`,
+ * while for concatenation the cast step will fail.
+ */
+ Py_INCREF(cls);
+ return cls;
+ }
+ return default_builtin_common_dtype(cls, other);
+}
+
+
+
+static PyArray_DTypeMeta *
+object_common_dtype(
+ PyArray_DTypeMeta *cls, PyArray_DTypeMeta *NPY_UNUSED(other))
+{
+ /*
+ * The object DType is special in that it can represent everything,
+ * including all potential user DTypes.
+ * One reason to defer (or error) here might be if the other DType
+ * does not support scalars so that e.g. `arr1d[0]` returns a 0-D array
+ * and `arr.astype(object)` would fail. But object casts are special.
+ */
+ Py_INCREF(cls);
+ return cls;
+}
+
+
/**
* This function takes a PyArray_Descr and replaces its base class with
* a newly created dtype subclass (DTypeMeta instances).
@@ -398,36 +541,49 @@ dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr)
dtype_class->f = descr->f;
dtype_class->kind = descr->kind;
- /* Strings and voids have (strange) logic around scalars. */
+ /* Set default functions (correct for most dtypes, override below) */
+ dtype_class->default_descr = nonparametric_default_descr;
+ dtype_class->discover_descr_from_pyobject = (
+ nonparametric_discover_descr_from_pyobject);
dtype_class->is_known_scalar_type = python_builtins_are_known_scalar_types;
+ dtype_class->common_dtype = default_builtin_common_dtype;
+ dtype_class->common_instance = NULL;
- if (PyTypeNum_ISDATETIME(descr->type_num)) {
+ if (PyTypeNum_ISUSERDEF(descr->type_num)) {
+ dtype_class->common_dtype = legacy_userdtype_common_dtype_function;
+ }
+ else if (descr->type_num == NPY_OBJECT) {
+ dtype_class->common_dtype = object_common_dtype;
+ }
+ else if (PyTypeNum_ISDATETIME(descr->type_num)) {
/* Datetimes are flexible, but were not considered previously */
dtype_class->parametric = NPY_TRUE;
+ dtype_class->default_descr = datetime_and_timedelta_default_descr;
dtype_class->discover_descr_from_pyobject = (
discover_datetime_and_timedelta_from_pyobject);
+ dtype_class->common_dtype = datetime_common_dtype;
+ dtype_class->common_instance = datetime_type_promotion;
if (descr->type_num == NPY_DATETIME) {
dtype_class->is_known_scalar_type = datetime_known_scalar_types;
}
}
else if (PyTypeNum_ISFLEXIBLE(descr->type_num)) {
dtype_class->parametric = NPY_TRUE;
- dtype_class->default_descr = flexible_default_descr;
if (descr->type_num == NPY_VOID) {
+ dtype_class->default_descr = void_default_descr;
dtype_class->discover_descr_from_pyobject = (
void_discover_descr_from_pyobject);
+ dtype_class->common_instance = void_common_instance;
}
else {
+ dtype_class->default_descr = string_and_unicode_default_descr;
dtype_class->is_known_scalar_type = string_known_scalar_types;
dtype_class->discover_descr_from_pyobject = (
string_discover_descr_from_pyobject);
+ dtype_class->common_dtype = string_unicode_common_dtype;
+ dtype_class->common_instance = string_unicode_common_instance;
}
}
- else {
- /* nonparametric case */
- dtype_class->discover_descr_from_pyobject = (
- nonparametric_discover_descr_from_pyobject);
- }
if (_PyArray_MapPyTypeToDType(dtype_class, descr->typeobj,
PyTypeNum_ISUSERDEF(dtype_class->type_num)) < 0) {
diff --git a/numpy/core/src/multiarray/dtypemeta.h b/numpy/core/src/multiarray/dtypemeta.h
index e0909a7eb..83cf7c07e 100644
--- a/numpy/core/src/multiarray/dtypemeta.h
+++ b/numpy/core/src/multiarray/dtypemeta.h
@@ -2,6 +2,22 @@
#define _NPY_DTYPEMETA_H
#define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr))
+/*
+ * This function will hopefully be phased out or replaced, but was convenient
+ * for incremental implementation of new DTypes based on DTypeMeta.
+ * (Error checking is not required for DescrFromType, assuming that the
+ * type is valid.)
+ */
+static NPY_INLINE PyArray_DTypeMeta *
+PyArray_DTypeFromTypeNum(int typenum)
+{
+ PyArray_Descr *descr = PyArray_DescrFromType(typenum);
+ PyArray_DTypeMeta *dtype = NPY_DTYPE(descr);
+ Py_INCREF(dtype);
+ Py_DECREF(descr);
+ return dtype;
+}
+
NPY_NO_EXPORT int
dtypemeta_wrap_legacy_descriptor(PyArray_Descr *dtypem);
diff --git a/numpy/core/src/multiarray/hashdescr.c b/numpy/core/src/multiarray/hashdescr.c
index c596a7098..e9a99cc8f 100644
--- a/numpy/core/src/multiarray/hashdescr.c
+++ b/numpy/core/src/multiarray/hashdescr.c
@@ -165,7 +165,7 @@ static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject*
}
foffset = PyTuple_GET_ITEM(value, 1);
- if (!PyInt_Check(foffset)) {
+ if (!PyLong_Check(foffset)) {
PyErr_SetString(PyExc_SystemError,
"(Hash) Second item in compound dtype tuple not an int ???");
return -1;
@@ -208,7 +208,7 @@ static int _array_descr_walk_subarray(PyArray_ArrayDescr* adescr, PyObject *l)
PyList_Append(l, item);
}
}
- else if (PyInt_Check(adescr->shape)) {
+ else if (PyLong_Check(adescr->shape)) {
PyList_Append(l, adescr->shape);
}
else {
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 31795b2d0..3ebd4c858 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -597,7 +597,7 @@ iter_subscript(PyArrayIterObject *self, PyObject *ind)
}
/* Check for Integer or Slice */
- if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) {
+ if (PyLong_Check(ind) || PySlice_Check(ind)) {
start = parse_index_entry(ind, &step_size, &n_steps,
self->size, 0, 1);
if (start == -1) {
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 0998a6b49..cb5c3823d 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1418,10 +1418,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return 0;
}
else if (tup == NULL){
- PyObject *errmsg = PyUnicode_FromString("no field of name ");
- PyUString_Concat(&errmsg, ind);
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_ValueError, "no field of name %S", ind);
return 0;
}
if (_unpack_field(tup, &fieldtype, &offset) < 0) {
@@ -2345,7 +2342,6 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
int consec_status = -1;
int axis, broadcast_axis;
npy_intp dimension;
- PyObject *errmsg, *tmp;
for (i = 0; i < mit->nd_fancy; i++) {
mit->dimensions[i] = 1;
@@ -2433,35 +2429,38 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
return 0;
- broadcast_error:
+broadcast_error: ; // Declarations cannot follow labels, add empty statement.
/*
* Attempt to set a meaningful exception. Could also find out
* if a boolean index was converted.
*/
- errmsg = PyUnicode_FromString("shape mismatch: indexing arrays could not "
- "be broadcast together with shapes ");
+ PyObject *errmsg = PyUnicode_FromString("");
if (errmsg == NULL) {
return -1;
}
-
for (i = 0; i < index_num; i++) {
if (!(indices[i].type & HAS_FANCY)) {
continue;
}
- tmp = convert_shape_to_string(
- PyArray_NDIM((PyArrayObject *)indices[i].object),
- PyArray_SHAPE((PyArrayObject *)indices[i].object),
- " ");
+
+ int ndim = PyArray_NDIM((PyArrayObject *)indices[i].object);
+ npy_intp *shape = PyArray_SHAPE((PyArrayObject *)indices[i].object);
+ PyObject *tmp = convert_shape_to_string(ndim, shape, " ");
if (tmp == NULL) {
+ Py_DECREF(errmsg);
return -1;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
+
+ Py_SETREF(errmsg, PyUnicode_Concat(errmsg, tmp));
+ Py_DECREF(tmp);
if (errmsg == NULL) {
return -1;
}
}
- PyErr_SetObject(PyExc_IndexError, errmsg);
+ PyErr_Format(PyExc_IndexError,
+ "shape mismatch: indexing arrays could not "
+ "be broadcast together with shapes %S", errmsg);
Py_DECREF(errmsg);
return -1;
}
@@ -2653,7 +2652,6 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
npy_uint32 extra_op_flags, PyArrayObject *extra_op,
PyArray_Descr *extra_op_dtype)
{
- PyObject *errmsg, *tmp;
/* For shape reporting on error */
PyArrayObject *original_extra_op = extra_op;
@@ -3183,45 +3181,29 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
goto finish;
broadcast_error:
- errmsg = PyUnicode_FromString("shape mismatch: value array "
- "of shape ");
- if (errmsg == NULL) {
- goto finish;
- }
-
/* Report the shape of the original array if it exists */
if (original_extra_op == NULL) {
original_extra_op = extra_op;
}
- tmp = convert_shape_to_string(PyArray_NDIM(original_extra_op),
- PyArray_DIMS(original_extra_op), " ");
- if (tmp == NULL) {
- goto finish;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ int extra_ndim = PyArray_NDIM(original_extra_op);
+ npy_intp *extra_dims = PyArray_DIMS(original_extra_op);
+ PyObject *shape1 = convert_shape_to_string(extra_ndim, extra_dims, " ");
+ if (shape1 == NULL) {
goto finish;
}
- tmp = PyUnicode_FromString("could not be broadcast to indexing "
- "result of shape ");
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ PyObject *shape2 = convert_shape_to_string(mit->nd, mit->dimensions, "");
+ if (shape2 == NULL)
+ Py_DECREF(shape1);
goto finish;
- }
- tmp = convert_shape_to_string(mit->nd, mit->dimensions, "");
- if (tmp == NULL) {
- goto finish;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- goto finish;
- }
+ PyErr_Format(PyExc_ValueError,
+ "shape mismatch: value array of shape %S could not be broadcast "
+ "to indexing result of shape %S", shape1, shape2);
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
finish:
Py_XDECREF(extra_op);
@@ -3320,7 +3302,7 @@ PyArray_MapIterArrayCopyIfOverlap(PyArrayObject * a, PyObject * index,
Py_XDECREF(a_copy);
Py_XDECREF(subspace);
Py_XDECREF((PyObject *)mit);
- for (i=0; i < index_num; i++) {
+ for (i = 0; i < index_num; i++) {
Py_XDECREF(indices[i].object);
}
return NULL;
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index f1d5ab694..1aad70dc6 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -2296,6 +2296,7 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
array_function_result = array_implement_c_array_function_creation(
"fromiter", args, keywds);
if (array_function_result != Py_NotImplemented) {
+ Py_DECREF(descr);
return array_function_result;
}
@@ -2942,6 +2943,7 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) {
array_function_result = array_implement_c_array_function_creation(
"arange", args, kws);
if (array_function_result != Py_NotImplemented) {
+ Py_XDECREF(typecode);
return array_function_result;
}
@@ -4409,12 +4411,6 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
/* Timedelta is an integer with an associated unit */
SINGLE_INHERIT(Timedelta, SignedInteger);
- /*
- fprintf(stderr,
- "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n",
- PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free,
- PySignedIntegerArrType_Type.tp_free);
- */
SINGLE_INHERIT(UByte, UnsignedInteger);
SINGLE_INHERIT(UShort, UnsignedInteger);
SINGLE_INHERIT(UInt, UnsignedInteger);
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 4bc6d2ca1..b379a28ac 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1750,73 +1750,70 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
return 1;
broadcast_error: {
- PyObject *errmsg, *tmp;
npy_intp remdims[NPY_MAXDIMS];
- char *tmpstr;
if (op_axes == NULL) {
- errmsg = PyUnicode_FromString("operands could not be broadcast "
- "together with shapes ");
- if (errmsg == NULL) {
+ PyObject *shape1 = PyUnicode_FromString("");
+ if (shape1 == NULL) {
return 0;
}
for (iop = 0; iop < nop; ++iop) {
if (op[iop] != NULL) {
- tmp = convert_shape_to_string(PyArray_NDIM(op[iop]),
- PyArray_DIMS(op[iop]),
- " ");
+ int ndims = PyArray_NDIM(op[iop]);
+ npy_intp *dims = PyArray_DIMS(op[iop]);
+ PyObject *tmp = convert_shape_to_string(ndims, dims, " ");
if (tmp == NULL) {
- Py_DECREF(errmsg);
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_SETREF(shape1, PyUnicode_Concat(shape1, tmp));
+ Py_DECREF(tmp);
+ if (shape1 == NULL) {
return 0;
}
}
}
- if (itershape != NULL) {
- tmp = PyUnicode_FromString("and requested shape ");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
- }
-
- tmp = convert_shape_to_string(ndim, itershape, "");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ if (itershape == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "shapes %S", shape1);
+ Py_DECREF(shape1);
+ return 0;
+ }
+ else {
+ PyObject *shape2 = convert_shape_to_string(ndim, itershape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
return 0;
}
-
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "shapes %S and requested shape %S", shape1, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ return 0;
}
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
}
else {
- errmsg = PyUnicode_FromString("operands could not be broadcast "
- "together with remapped shapes "
- "[original->remapped]: ");
+ PyObject *shape1 = PyUnicode_FromString("");
+ if (shape1 == NULL) {
+ return 0;
+ }
for (iop = 0; iop < nop; ++iop) {
if (op[iop] != NULL) {
int *axes = op_axes[iop];
+ int ndims = PyArray_NDIM(op[iop]);
+ npy_intp *dims = PyArray_DIMS(op[iop]);
+ char *tmpstr = (axes == NULL) ? " " : "->";
- tmpstr = (axes == NULL) ? " " : "->";
- tmp = convert_shape_to_string(PyArray_NDIM(op[iop]),
- PyArray_DIMS(op[iop]),
- tmpstr);
+ PyObject *tmp = convert_shape_to_string(ndims, dims, tmpstr);
if (tmp == NULL) {
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_SETREF(shape1, PyUnicode_Concat(shape1, tmp));
+ Py_DECREF(tmp);
+ if (shape1 == NULL) {
return 0;
}
@@ -1831,80 +1828,83 @@ broadcast_error: {
remdims[idim] = -1;
}
}
- tmp = convert_shape_to_string(ndim, remdims, " ");
+ PyObject *tmp = convert_shape_to_string(ndim, remdims, " ");
if (tmp == NULL) {
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_SETREF(shape1, PyUnicode_Concat(shape1, tmp));
+ Py_DECREF(tmp);
+ if (shape1 == NULL) {
return 0;
}
}
}
}
- if (itershape != NULL) {
- tmp = PyUnicode_FromString("and requested shape ");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
- }
-
- tmp = convert_shape_to_string(ndim, itershape, "");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ if (itershape == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "remapped shapes [original->remapped]: %S", shape1);
+ Py_DECREF(shape1);
+ return 0;
+ }
+ else {
+ PyObject *shape2 = convert_shape_to_string(ndim, itershape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
return 0;
}
-
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "remapped shapes [original->remapped]: %S and "
+ "requested shape %S", shape1, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ return 0;
}
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
}
-
- return 0;
}
operand_different_than_broadcast: {
- npy_intp remdims[NPY_MAXDIMS];
- PyObject *errmsg, *tmp;
-
- /* Start of error message */
- if (op_flags[iop] & NPY_ITER_READONLY) {
- errmsg = PyUnicode_FromString("non-broadcastable operand "
- "with shape ");
- }
- else {
- errmsg = PyUnicode_FromString("non-broadcastable output "
- "operand with shape ");
- }
- if (errmsg == NULL) {
+ /* operand shape */
+ int ndims = PyArray_NDIM(op[iop]);
+ npy_intp *dims = PyArray_DIMS(op[iop]);
+ PyObject *shape1 = convert_shape_to_string(ndims, dims, "");
+ if (shape1 == NULL) {
return 0;
}
- /* Operand shape */
- tmp = convert_shape_to_string(PyArray_NDIM(op[iop]),
- PyArray_DIMS(op[iop]), "");
- if (tmp == NULL) {
+ /* Broadcast shape */
+ PyObject *shape2 = convert_shape_to_string(ndim, broadcast_shape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+
+ if (op_axes == NULL || op_axes[iop] == NULL) {
+ /* operand shape not remapped */
+
+ if (op_flags[iop] & NPY_ITER_READONLY) {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable operand with shape %S doesn't "
+ "match the broadcast shape %S", shape1, shape2);
+ }
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable output operand with shape %S doesn't "
+ "match the broadcast shape %S", shape1, shape2);
+ }
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
return 0;
}
- /* Remapped operand shape */
- if (op_axes != NULL && op_axes[iop] != NULL) {
- int *axes = op_axes[iop];
+ else {
+ /* operand shape remapped */
+ npy_intp remdims[NPY_MAXDIMS];
+ int *axes = op_axes[iop];
for (idim = 0; idim < ndim; ++idim) {
- npy_intp i = axes[ndim-idim-1];
-
+ npy_intp i = axes[ndim - idim - 1];
if (i >= 0 && i < PyArray_NDIM(op[iop])) {
remdims[idim] = PyArray_DIM(op[iop], i);
}
@@ -1913,48 +1913,30 @@ operand_different_than_broadcast: {
}
}
- tmp = PyUnicode_FromString(" [remapped to ");
- if (tmp == NULL) {
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ PyObject *shape3 = convert_shape_to_string(ndim, remdims, "");
+ if (shape3 == NULL) {
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
return 0;
}
- tmp = convert_shape_to_string(ndim, remdims, "]");
- if (tmp == NULL) {
- return 0;
+ if (op_flags[iop] & NPY_ITER_READONLY) {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable operand with shape %S "
+ "[remapped to %S] doesn't match the broadcast shape %S",
+ shape1, shape3, shape2);
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable output operand with shape %S "
+ "[remapped to %S] doesn't match the broadcast shape %S",
+ shape1, shape3, shape2);
}
- }
-
- tmp = PyUnicode_FromString(" doesn't match the broadcast shape ");
- if (tmp == NULL) {
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ Py_DECREF(shape3);
return 0;
}
-
- /* Broadcast shape */
- tmp = convert_shape_to_string(ndim, broadcast_shape, "");
- if (tmp == NULL) {
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
- }
-
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
-
- return 0;
}
}
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 87c3c9b0a..a629dfe97 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -397,14 +397,21 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
PyObject *temp;
const int optimize_fpexps = 1;
- if (PyInt_Check(o2)) {
- *out_exponent = (double)PyLong_AsLong(o2);
+ if (PyLong_Check(o2)) {
+ long tmp = PyLong_AsLong(o2);
+ if (error_converting(tmp)) {
+ PyErr_Clear();
+ return NPY_NOSCALAR;
+ }
+ *out_exponent = (double)tmp;
return NPY_INTPOS_SCALAR;
}
+
if (optimize_fpexps && PyFloat_Check(o2)) {
*out_exponent = PyFloat_AsDouble(o2);
return NPY_FLOAT_SCALAR;
}
+
if (PyArray_Check(o2)) {
if ((PyArray_NDIM((PyArrayObject *)o2) == 0) &&
((PyArray_ISINTEGER((PyArrayObject *)o2) ||
@@ -442,7 +449,7 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
else if (PyIndex_Check(o2)) {
PyObject* value = PyNumber_Index(o2);
Py_ssize_t val;
- if (value==NULL) {
+ if (value == NULL) {
if (PyErr_Occurred()) {
PyErr_Clear();
}
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index 0f84449af..41dd059b0 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -292,20 +292,22 @@ static void
_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
{
if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) {
- if ((obj == Py_None) || (PyInt_Check(obj) && PyLong_AsLong(obj)==0)) {
+ PyObject *arr;
+
+ if ((obj == Py_None) ||
+ (PyLong_Check(obj) && PyLong_AsLong(obj) == 0)) {
return;
}
- else {
- PyObject *arr;
- Py_INCREF(dtype);
- arr = PyArray_NewFromDescr(&PyArray_Type, dtype,
- 0, NULL, NULL, NULL,
- 0, NULL);
- if (arr!=NULL) {
- dtype->f->setitem(obj, optr, arr);
- }
- Py_XDECREF(arr);
+ /* Clear possible long conversion error */
+ PyErr_Clear();
+ Py_INCREF(dtype);
+ arr = PyArray_NewFromDescr(&PyArray_Type, dtype,
+ 0, NULL, NULL, NULL,
+ 0, NULL);
+ if (arr!=NULL) {
+ dtype->f->setitem(obj, optr, arr);
}
+ Py_XDECREF(arr);
}
if (dtype->type_num == NPY_OBJECT) {
Py_XINCREF(obj);
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index b918786f2..f610ad468 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -373,7 +373,8 @@ PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode)
NPY_NO_EXPORT PyObject *
PyArray_ScalarFromObject(PyObject *object)
{
- PyObject *ret=NULL;
+ PyObject *ret = NULL;
+
if (PyArray_IsZeroDim(object)) {
return PyArray_ToScalar(PyArray_DATA((PyArrayObject *)object),
(PyArrayObject *)object);
@@ -390,42 +391,49 @@ PyArray_ScalarFromObject(PyObject *object)
PyArrayScalar_RETURN_FALSE;
}
}
- else if (PyInt_Check(object)) {
- ret = PyArrayScalar_New(Long);
- if (ret == NULL) {
- return NULL;
+ else if (PyLong_Check(object)) {
+ /* Check if fits in long */
+ npy_long val_long = PyLong_AsLong(object);
+ if (!error_converting(val_long)) {
+ ret = PyArrayScalar_New(Long);
+ if (ret != NULL) {
+ PyArrayScalar_VAL(ret, Long) = val_long;
+ }
+ return ret;
}
- PyArrayScalar_VAL(ret, Long) = PyLong_AsLong(object);
+ PyErr_Clear();
+
+ /* Check if fits in long long */
+ npy_longlong val_longlong = PyLong_AsLongLong(object);
+ if (!error_converting(val_longlong)) {
+ ret = PyArrayScalar_New(LongLong);
+ if (ret != NULL) {
+ PyArrayScalar_VAL(ret, LongLong) = val_longlong;
+ }
+ return ret;
+ }
+ PyErr_Clear();
+
+ return NULL;
}
else if (PyFloat_Check(object)) {
ret = PyArrayScalar_New(Double);
- if (ret == NULL) {
- return NULL;
+ if (ret != NULL) {
+ PyArrayScalar_VAL(ret, Double) = PyFloat_AS_DOUBLE(object);
}
- PyArrayScalar_VAL(ret, Double) = PyFloat_AS_DOUBLE(object);
+ return ret;
}
else if (PyComplex_Check(object)) {
ret = PyArrayScalar_New(CDouble);
- if (ret == NULL) {
- return NULL;
+ if (ret != NULL) {
+ PyArrayScalar_VAL(ret, CDouble).real = PyComplex_RealAsDouble(object);
+ PyArrayScalar_VAL(ret, CDouble).imag = PyComplex_ImagAsDouble(object);
}
- PyArrayScalar_VAL(ret, CDouble).real = PyComplex_RealAsDouble(object);
- PyArrayScalar_VAL(ret, CDouble).imag = PyComplex_ImagAsDouble(object);
+ return ret;
}
- else if (PyLong_Check(object)) {
- npy_longlong val;
- val = PyLong_AsLongLong(object);
- if (error_converting(val)) {
- PyErr_Clear();
- return NULL;
- }
- ret = PyArrayScalar_New(LongLong);
- if (ret == NULL) {
- return NULL;
- }
- PyArrayScalar_VAL(ret, LongLong) = val;
+ else {
+ return NULL;
}
- return ret;
}
/*New reference */
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 5a3f4922a..1a50927a8 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -518,21 +518,15 @@ datetimetype_repr(PyObject *self)
*/
if ((scal->obmeta.num == 1 && scal->obmeta.base != NPY_FR_h) ||
scal->obmeta.base == NPY_FR_GENERIC) {
- ret = PyUnicode_FromString("numpy.datetime64('");
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString(iso));
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString("')"));
+ ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso);
}
else {
- ret = PyUnicode_FromString("numpy.datetime64('");
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString(iso));
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString("','"));
- ret = append_metastr_to_string(&scal->obmeta, 1, ret);
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString("')"));
+ PyObject *meta = metastr_to_unicode(&scal->obmeta, 1);
+ if (meta == NULL) {
+ return NULL;
+ }
+ ret = PyUnicode_FromFormat("numpy.datetime64('%s','%S')", iso, meta);
+ Py_DECREF(meta);
}
return ret;
@@ -542,7 +536,7 @@ static PyObject *
timedeltatype_repr(PyObject *self)
{
PyTimedeltaScalarObject *scal;
- PyObject *ret;
+ PyObject *val, *ret;
if (!PyArray_IsScalar(self, Timedelta)) {
PyErr_SetString(PyExc_RuntimeError,
@@ -554,32 +548,34 @@ timedeltatype_repr(PyObject *self)
/* The value */
if (scal->obval == NPY_DATETIME_NAT) {
- ret = PyUnicode_FromString("numpy.timedelta64('NaT'");
+ val = PyUnicode_FromString("'NaT'");
}
else {
- /*
- * Can't use "%lld" if HAVE_LONG_LONG is not defined
- */
+ /* Can't use "%lld" if HAVE_LONG_LONG is not defined */
#if defined(HAVE_LONG_LONG)
- ret = PyUnicode_FromFormat("numpy.timedelta64(%lld",
- (long long)scal->obval);
+ val = PyUnicode_FromFormat("%lld", (long long)scal->obval);
#else
- ret = PyUnicode_FromFormat("numpy.timedelta64(%ld",
- (long)scal->obval);
+ val = PyUnicode_FromFormat("%ld", (long)scal->obval);
#endif
}
+ if (val == NULL) {
+ return NULL;
+ }
+
/* The metadata unit */
if (scal->obmeta.base == NPY_FR_GENERIC) {
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString(")"));
+ ret = PyUnicode_FromFormat("numpy.timedelta64(%S)", val);
}
else {
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString(",'"));
- ret = append_metastr_to_string(&scal->obmeta, 1, ret);
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString("')"));
+ PyObject *meta = metastr_to_unicode(&scal->obmeta, 1);
+ if (meta == NULL) {
+ Py_DECREF(val);
+ return NULL;
+ }
+ ret = PyUnicode_FromFormat("numpy.timedelta64(%S,'%S')", val, meta);
+ Py_DECREF(meta);
}
+ Py_DECREF(val);
return ret;
}
@@ -664,14 +660,12 @@ timedeltatype_str(PyObject *self)
* Can't use "%lld" if HAVE_LONG_LONG is not defined
*/
#if defined(HAVE_LONG_LONG)
- ret = PyUnicode_FromFormat("%lld ",
- (long long)(scal->obval * scal->obmeta.num));
+ ret = PyUnicode_FromFormat("%lld %s",
+ (long long)(scal->obval * scal->obmeta.num), basestr);
#else
- ret = PyUnicode_FromFormat("%ld ",
- (long)(scal->obval * scal->obmeta.num));
+ ret = PyUnicode_FromFormat("%ld %s",
+ (long)(scal->obval * scal->obmeta.num), basestr);
#endif
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString(basestr));
}
return ret;
@@ -890,7 +884,7 @@ static PyObject *
static PyObject *
c@name@type_@kind@(PyObject *self)
{
- PyObject *rstr, *istr, *ret;
+ PyObject *rstr, *istr;
npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@);
TrimMode trim = TrimMode_DptZeros;
@@ -903,16 +897,13 @@ c@name@type_@kind@(PyObject *self)
if (istr == NULL) {
return NULL;
}
-
- PyUString_ConcatAndDel(&istr, PyUnicode_FromString("j"));
- return istr;
+ PyObject *ret = PyUnicode_FromFormat("%Sj", istr);
+ Py_DECREF(istr);
+ return ret;
}
if (npy_isfinite(val.real)) {
rstr = @name@type_@kind@_either(val.real, trim, trim, 0);
- if (rstr == NULL) {
- return NULL;
- }
}
else if (npy_isnan(val.real)) {
rstr = PyUnicode_FromString("nan");
@@ -923,12 +914,12 @@ c@name@type_@kind@(PyObject *self)
else {
rstr = PyUnicode_FromString("-inf");
}
+ if (rstr == NULL) {
+ return NULL;
+ }
if (npy_isfinite(val.imag)) {
istr = @name@type_@kind@_either(val.imag, trim, trim, 1);
- if (istr == NULL) {
- return NULL;
- }
}
else if (npy_isnan(val.imag)) {
istr = PyUnicode_FromString("+nan");
@@ -939,11 +930,14 @@ c@name@type_@kind@(PyObject *self)
else {
istr = PyUnicode_FromString("-inf");
}
+ if (istr == NULL) {
+ Py_DECREF(rstr);
+ return NULL;
+ }
- ret = PyUnicode_FromString("(");
- PyUString_ConcatAndDel(&ret, rstr);
- PyUString_ConcatAndDel(&ret, istr);
- PyUString_ConcatAndDel(&ret, PyUnicode_FromString("j)"));
+ PyObject *ret = PyUnicode_FromFormat("(%S%Sj)", rstr, istr);
+ Py_DECREF(rstr);
+ Py_DECREF(istr);
return ret;
}
@@ -2952,7 +2946,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
* For a VOID scalar first see if obj is an integer or long
* and create new memory of that size (filled with 0) for the scalar
*/
- if (PyLong_Check(obj) || PyInt_Check(obj) ||
+ if (PyLong_Check(obj) ||
PyArray_IsScalar(obj, Integer) ||
(PyArray_Check(obj) &&
PyArray_NDIM((PyArrayObject *)obj)==0 &&
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 1a38fe956..02c349759 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -458,14 +458,12 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
static void
raise_reshape_size_mismatch(PyArray_Dims *newshape, PyArrayObject *arr)
{
- PyObject *msg = PyUnicode_FromFormat("cannot reshape array of size %zd "
- "into shape ", PyArray_SIZE(arr));
PyObject *tmp = convert_shape_to_string(newshape->len, newshape->ptr, "");
-
- PyUString_ConcatAndDel(&msg, tmp);
- if (msg != NULL) {
- PyErr_SetObject(PyExc_ValueError, msg);
- Py_DECREF(msg);
+ if (tmp != NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "cannot reshape array of size %zd into shape %S",
+ PyArray_SIZE(arr), tmp);
+ Py_DECREF(tmp);
}
}
@@ -979,55 +977,6 @@ PyArray_Flatten(PyArrayObject *a, NPY_ORDER order)
return (PyObject *)ret;
}
-/* See shape.h for parameters documentation */
-NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp const *vals)
-{
- npy_intp i;
- PyObject *ret, *tmp;
-
- /*
- * Negative dimension indicates "newaxis", which can
- * be discarded for printing if it's a leading dimension.
- * Find the first non-"newaxis" dimension.
- */
- i = 0;
- while (i < n && vals[i] < 0) {
- ++i;
- }
-
- if (i == n) {
- return PyUnicode_FromFormat("()");
- }
- else {
- ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
- if (ret == NULL) {
- return NULL;
- }
- }
-
- for (; i < n; ++i) {
- if (vals[i] < 0) {
- tmp = PyUnicode_FromString(",newaxis");
- }
- else {
- tmp = PyUnicode_FromFormat(",%" NPY_INTP_FMT, vals[i]);
- }
- if (tmp == NULL) {
- Py_DECREF(ret);
- return NULL;
- }
-
- PyUString_ConcatAndDel(&ret, tmp);
- if (ret == NULL) {
- return NULL;
- }
- }
-
- tmp = PyUnicode_FromFormat(")");
- PyUString_ConcatAndDel(&ret, tmp);
- return ret;
-}
/*NUMPY_API
*
diff --git a/numpy/core/src/multiarray/shape.h b/numpy/core/src/multiarray/shape.h
index d25292556..875b5430f 100644
--- a/numpy/core/src/multiarray/shape.h
+++ b/numpy/core/src/multiarray/shape.h
@@ -2,13 +2,6 @@
#define _NPY_ARRAY_SHAPE_H_
/*
- * Builds a string representation of the shape given in 'vals'.
- * A negative value in 'vals' gets interpreted as newaxis.
- */
-NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp const *vals);
-
-/*
* Creates a sorted stride perm matching the KEEPORDER behavior
* of the NpyIter object. Because this operates based on multiple
* input strides, the 'stride' member of the npy_stride_sort_item
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index d75fd3130..3727567e0 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -38,6 +38,7 @@ maintainer email: oliphant.travis@ieee.org
#include "usertypes.h"
#include "dtypemeta.h"
+#include "scalartypes.h"
NPY_NO_EXPORT PyArray_Descr **userdescrs=NULL;
@@ -350,3 +351,123 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
return _append_new(&descr->f->cancastscalarkindto[scalar], totype);
}
}
+
+
+/*
+ * Legacy user DTypes implemented the common DType operation
+ * (as used in type promotion/result_type, and e.g. the type for
+ * concatenation), by using "safe cast" logic.
+ *
+ * New DTypes do have this behaviour generally, but we use can-cast
+ * when legacy user dtypes are involved.
+ */
+NPY_NO_EXPORT PyArray_DTypeMeta *
+legacy_userdtype_common_dtype_function(
+ PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
+{
+ int skind1 = NPY_NOSCALAR, skind2 = NPY_NOSCALAR, skind;
+
+ if (!other->legacy) {
+ /* legacy DTypes can always defer to new style ones */
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_DTypeMeta *)Py_NotImplemented;
+ }
+ /* Defer so that only one of the types handles the cast */
+ if (cls->type_num < other->type_num) {
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_DTypeMeta *)Py_NotImplemented;
+ }
+
+ /* Check whether casting is possible from one type to the other */
+ if (PyArray_CanCastSafely(cls->type_num, other->type_num)) {
+ Py_INCREF(other);
+ return other;
+ }
+ if (PyArray_CanCastSafely(other->type_num, cls->type_num)) {
+ Py_INCREF(cls);
+ return cls;
+ }
+
+ /*
+ * The following code used to be part of PyArray_PromoteTypes().
+ * We can expect that this code is never used.
+ * In principle, it allows for promotion of two different user dtypes
+ * to a single NumPy dtype of the same "kind". In practice
+ * using the same `kind` as NumPy was never possible due to an
+ * simplification where `PyArray_EquivTypes(descr1, descr2)` will
+ * return True if both kind and element size match (e.g. bfloat16 and
+ * float16 would be equivalent).
+ * The option is also very obscure and not used in the examples.
+ */
+
+ /* Convert the 'kind' char into a scalar kind */
+ switch (cls->kind) {
+ case 'b':
+ skind1 = NPY_BOOL_SCALAR;
+ break;
+ case 'u':
+ skind1 = NPY_INTPOS_SCALAR;
+ break;
+ case 'i':
+ skind1 = NPY_INTNEG_SCALAR;
+ break;
+ case 'f':
+ skind1 = NPY_FLOAT_SCALAR;
+ break;
+ case 'c':
+ skind1 = NPY_COMPLEX_SCALAR;
+ break;
+ }
+ switch (other->kind) {
+ case 'b':
+ skind2 = NPY_BOOL_SCALAR;
+ break;
+ case 'u':
+ skind2 = NPY_INTPOS_SCALAR;
+ break;
+ case 'i':
+ skind2 = NPY_INTNEG_SCALAR;
+ break;
+ case 'f':
+ skind2 = NPY_FLOAT_SCALAR;
+ break;
+ case 'c':
+ skind2 = NPY_COMPLEX_SCALAR;
+ break;
+ }
+
+ /* If both are scalars, there may be a promotion possible */
+ if (skind1 != NPY_NOSCALAR && skind2 != NPY_NOSCALAR) {
+
+ /* Start with the larger scalar kind */
+ skind = (skind1 > skind2) ? skind1 : skind2;
+ int ret_type_num = _npy_smallest_type_of_kind_table[skind];
+
+ for (;;) {
+
+ /* If there is no larger type of this kind, try a larger kind */
+ if (ret_type_num < 0) {
+ ++skind;
+ /* Use -1 to signal no promoted type found */
+ if (skind < NPY_NSCALARKINDS) {
+ ret_type_num = _npy_smallest_type_of_kind_table[skind];
+ }
+ else {
+ break;
+ }
+ }
+
+ /* If we found a type to which we can promote both, done! */
+ if (PyArray_CanCastSafely(cls->type_num, ret_type_num) &&
+ PyArray_CanCastSafely(other->type_num, ret_type_num)) {
+ return PyArray_DTypeFromTypeNum(ret_type_num);
+ }
+
+ /* Try the next larger type of this kind */
+ ret_type_num = _npy_next_larger_type_table[ret_type_num];
+ }
+ }
+
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_DTypeMeta *)Py_NotImplemented;
+}
diff --git a/numpy/core/src/multiarray/usertypes.h b/numpy/core/src/multiarray/usertypes.h
index b3e386c5c..1b323d458 100644
--- a/numpy/core/src/multiarray/usertypes.h
+++ b/numpy/core/src/multiarray/usertypes.h
@@ -17,4 +17,8 @@ NPY_NO_EXPORT int
PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype,
PyArray_VectorUnaryFunc *castfunc);
+NPY_NO_EXPORT PyArray_DTypeMeta *
+legacy_userdtype_common_dtype_function(
+ PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other);
+
#endif
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 660c296d6..750fbeb92 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -461,6 +461,15 @@ addUfuncs(PyObject *dictionary) {
PyDict_SetItemString(dictionary, "cross1d", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(NULL, NULL,
+ NULL, 0, 0, 0, PyUFunc_None, "_pickleable_module_global.ufunc",
+ "A dotted name for pickle testing, does nothing.", 0, NULL);
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "_pickleable_module_global_ufunc", f);
+ Py_DECREF(f);
+
return 0;
}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index f693eb5c2..8660ee413 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -5977,6 +5977,7 @@ _typecharfromnum(int num) {
return ret;
}
+
static PyObject *
ufunc_get_doc(PyUFuncObject *ufunc)
{
@@ -5997,18 +5998,18 @@ ufunc_get_doc(PyUFuncObject *ufunc)
* introspection on name and nin + nout to automate the first part
* of it the doc string shouldn't need the calling convention
*/
- doc = PyObject_CallFunctionObjArgs(
- _sig_formatter, (PyObject *)ufunc, NULL);
+ doc = PyObject_CallFunctionObjArgs(_sig_formatter,
+ (PyObject *)ufunc, NULL);
if (doc == NULL) {
return NULL;
}
if (ufunc->doc != NULL) {
- PyUString_ConcatAndDel(&doc,
- PyUnicode_FromFormat("\n\n%s", ufunc->doc));
+ Py_SETREF(doc, PyUnicode_FromFormat("%S\n\n%s", doc, ufunc->doc));
}
return doc;
}
+
static PyObject *
ufunc_get_nin(PyUFuncObject *ufunc)
{
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index aa6f34d59..3abeb2c5a 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -236,21 +236,6 @@ PyUFunc_ValidateCasting(PyUFuncObject *ufunc,
return 0;
}
-/*
- * Returns a new reference to type if it is already NBO, otherwise
- * returns a copy converted to NBO.
- */
-static PyArray_Descr *
-ensure_dtype_nbo(PyArray_Descr *type)
-{
- if (PyArray_ISNBO(type->byteorder)) {
- Py_INCREF(type);
- return type;
- }
- else {
- return PyArray_DescrNewByteorder(type, NPY_NATIVE);
- }
-}
/*UFUNC_API
*
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index a6c8cc8b2..79954b998 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -324,6 +324,22 @@ class TestScalarDiscovery:
ass[()] = scalar
assert_array_equal(ass, cast)
+ @pytest.mark.parametrize("dtype_char", np.typecodes["All"])
+ def test_default_dtype_instance(self, dtype_char):
+ if dtype_char in "SU":
+ dtype = np.dtype(dtype_char + "1")
+ elif dtype_char == "V":
+ # Legacy behaviour was to use V8. The reason was float64 being the
+ # default dtype and that having 8 bytes.
+ dtype = np.dtype("V8")
+ else:
+ dtype = np.dtype(dtype_char)
+
+ discovered_dtype, _ = _discover_array_parameters([], type(dtype))
+
+ assert discovered_dtype == dtype
+ assert discovered_dtype.itemsize == dtype.itemsize
+
class TestTimeScalars:
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@@ -619,3 +635,18 @@ class TestArrayLikes:
assert arr[()] is ArrayLike
arr = np.array([ArrayLike])
assert arr[0] is ArrayLike
+
+ @pytest.mark.skipif(
+ np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
+ def test_too_large_array_error_paths(self):
+ """Test the error paths, including for memory leaks"""
+ arr = np.array(0, dtype="uint8")
+ # Guarantees that a contiguous copy won't work:
+ arr = np.broadcast_to(arr, 2**62)
+
+ for i in range(5):
+ # repeat, to ensure caching cannot have an effect:
+ with pytest.raises(MemoryError):
+ np.array(arr)
+ with pytest.raises(MemoryError):
+ np.array([arr])
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index ae5ee4c88..f5428f98c 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -941,7 +941,7 @@ class TestTypes:
return
res = np.promote_types(dtype, dtype)
- if res.char in "?bhilqpBHILQPefdgFDGOmM":
+ if res.char in "?bhilqpBHILQPefdgFDGOmM" or dtype.type is rational:
# Metadata is lost for simple promotions (they create a new dtype)
assert res.metadata is None
else:
@@ -976,41 +976,20 @@ class TestTypes:
# Promotion failed, this test only checks metadata
return
- # The rules for when metadata is preserved and which dtypes metadta
- # will be used are very confusing and depend on multiple paths.
- # This long if statement attempts to reproduce this:
- if dtype1.type is rational or dtype2.type is rational:
- # User dtype promotion preserves byte-order here:
- if np.can_cast(res, dtype1):
- assert res.metadata == dtype1.metadata
- else:
- assert res.metadata == dtype2.metadata
-
- elif res.char in "?bhilqpBHILQPefdgFDGOmM":
+ if res.char in "?bhilqpBHILQPefdgFDGOmM" or res.type is rational:
# All simple types lose metadata (due to using promotion table):
assert res.metadata is None
- elif res.kind in "SU" and dtype1 == dtype2:
- # Strings give precedence to the second dtype:
- assert res is dtype2
elif res == dtype1:
# If one result is the result, it is usually returned unchanged:
assert res is dtype1
elif res == dtype2:
- # If one result is the result, it is usually returned unchanged:
- assert res is dtype2
- elif dtype1.kind == "S" and dtype2.kind == "U":
- # Promotion creates a new unicode dtype from scratch
- assert res.metadata is None
- elif dtype1.kind == "U" and dtype2.kind == "S":
- # Promotion creates a new unicode dtype from scratch
- assert res.metadata is None
- elif res.kind in "SU" and dtype2.kind != res.kind:
- # We build on top of dtype1:
- assert res.metadata == dtype1.metadata
- elif res.kind in "SU" and res.kind == dtype1.kind:
- assert res.metadata == dtype1.metadata
- elif res.kind in "SU" and res.kind == dtype2.kind:
- assert res.metadata == dtype2.metadata
+ # dtype1 may have been cast to the same type/kind as dtype2.
+ # If the resulting dtype is identical we currently pick the cast
+ # version of dtype1, which lost the metadata:
+ if np.promote_types(dtype1, dtype2.kind) == dtype2:
+ res.metadata is None
+ else:
+ res.metadata == metadata2
else:
assert res.metadata is None
@@ -1025,6 +1004,24 @@ class TestTypes:
assert res_bs == res
assert res_bs.metadata == res.metadata
+ @pytest.mark.parametrize(["dtype1", "dtype2"],
+ [[np.dtype("V6"), np.dtype("V10")],
+ [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
+ [np.dtype("i8,i8"), np.dtype("i4,i4")],
+ ])
+ def test_invalid_void_promotion(self, dtype1, dtype2):
+ # Mainly test structured void promotion, which currently allows
+ # byte-swapping, but nothing else:
+ with pytest.raises(TypeError):
+ np.promote_types(dtype1, dtype2)
+
+ @pytest.mark.parametrize(["dtype1", "dtype2"],
+ [[np.dtype("V10"), np.dtype("V10")],
+ [np.dtype([("name1", "<i8")]), np.dtype([("name1", ">i8")])],
+ [np.dtype("i8,i8"), np.dtype("i8,>i8")],
+ ])
+ def test_valid_void_promotion(self, dtype1, dtype2):
+ assert np.promote_types(dtype1, dtype2) is dtype1
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 9eaa1a977..0e9760853 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -178,6 +178,10 @@ class TestUfuncGenericLoops:
assert_array_equal(res_num.astype("O"), res_obj)
+def _pickleable_module_global():
+ pass
+
+
class TestUfunc:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
@@ -195,6 +199,15 @@ class TestUfunc:
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
+ def test_pickle_name_is_qualname(self):
+ # This tests that a simplification of our ufunc pickle code will
+ # lead to allowing qualnames as names. Future ufuncs should
+ # possible add a specific qualname, or a hook into pickling instead
+ # (dask+numba may benefit).
+ _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc
+ obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc))
+ assert obj is umt._pickleable_module_global_ufunc
+
def test_reduceat_shifting_sum(self):
L = 6
x = np.arange(L)
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 19f7482f2..c3bd6347c 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -415,6 +415,89 @@ def get_standard_file(fname):
return filenames
+def _parse_env_order(base_order, env):
+ """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order`
+
+ This method will sequence the environment variable and check for their invidual elements in `base_order`.
+
+ The items in the environment variable may be negated via '^item' or '!itema,itemb'.
+ It must start with ^/! to negate all options.
+
+ Raises
+ ------
+ ValueError: for mixed negated and non-negated orders or multiple negated orders
+
+ Parameters
+ ----------
+ base_order : list of str
+ the base list of orders
+ env : str
+ the environment variable to be parsed, if none is found, `base_order` is returned
+
+ Returns
+ -------
+ allow_order : list of str
+ allowed orders in lower-case
+ unknown_order : list of str
+ for values not overlapping with `base_order`
+ """
+ order_str = os.environ.get(env, None)
+
+ # ensure all base-orders are lower-case (for easier comparison)
+ base_order = [order.lower() for order in base_order]
+ if order_str is None:
+ return base_order, []
+
+ neg = order_str.startswith('^') or order_str.startswith('!')
+ # Check format
+ order_str_l = list(order_str)
+ sum_neg = order_str_l.count('^') + order_str_l.count('!')
+ if neg:
+ if sum_neg > 1:
+ raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}")
+ # remove prefix
+ order_str = order_str[1:]
+ elif sum_neg > 0:
+ raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}")
+
+ # Split and lower case
+ orders = order_str.lower().split(',')
+
+ # to inform callee about non-overlapping elements
+ unknown_order = []
+
+ # if negated, we have to remove from the order
+ if neg:
+ allow_order = base_order.copy()
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order in allow_order:
+ allow_order.remove(order)
+
+ else:
+ allow_order = []
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order not in allow_order:
+ allow_order.append(order)
+
+ return allow_order, unknown_order
+
+
def get_info(name, notfound_action=0):
"""
notfound_action:
@@ -1766,24 +1849,11 @@ class lapack_opt_info(system_info):
return getattr(self, '_calc_info_{}'.format(name))()
def calc_info(self):
- user_order = os.environ.get(self.order_env_var_name, None)
- if user_order is None:
- lapack_order = self.lapack_order
- else:
- # the user has requested the order of the
- # check they are all in the available list, a COMMA SEPARATED list
- user_order = user_order.lower().split(',')
- non_existing = []
- lapack_order = []
- for order in user_order:
- if order in self.lapack_order:
- lapack_order.append(order)
- elif len(order) > 0:
- non_existing.append(order)
- if len(non_existing) > 0:
- raise ValueError("lapack_opt_info user defined "
- "LAPACK order has unacceptable "
- "values: {}".format(non_existing))
+ lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("lapack_opt_info user defined "
+ "LAPACK order has unacceptable "
+ "values: {}".format(unknown_order))
for lapack in lapack_order:
if self._calc_info(lapack):
@@ -1911,22 +1981,9 @@ class blas_opt_info(system_info):
return getattr(self, '_calc_info_{}'.format(name))()
def calc_info(self):
- user_order = os.environ.get(self.order_env_var_name, None)
- if user_order is None:
- blas_order = self.blas_order
- else:
- # the user has requested the order of the
- # check they are all in the available list
- user_order = user_order.lower().split(',')
- non_existing = []
- blas_order = []
- for order in user_order:
- if order in self.blas_order:
- blas_order.append(order)
- elif len(order) > 0:
- non_existing.append(order)
- if len(non_existing) > 0:
- raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(non_existing))
+ blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order))
for blas in blas_order:
if self._calc_info(blas):
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 0768ffdde..ec15126f7 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -284,4 +284,37 @@ class TestSystemInfoReading:
assert info.get_lib_dirs() == lib_dirs
finally:
os.chdir(previousDir)
-
+
+
+def test_distutils_parse_env_order(monkeypatch):
+ from numpy.distutils.system_info import _parse_env_order
+ env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER'
+
+ base_order = list('abcdef')
+
+ monkeypatch.setenv(env, 'b,i,e,f')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 3
+ assert order == list('bef')
+ assert len(unknown) == 1
+
+ # For when LAPACK/BLAS optimization is disabled
+ monkeypatch.setenv(env, '')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 0
+ assert len(unknown) == 0
+
+ for prefix in '^!':
+ monkeypatch.setenv(env, f'{prefix}b,i,e')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 4
+ assert order == list('acdf')
+ assert len(unknown) == 1
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, 'b,^e,i')
+ _parse_env_order(base_order, env)
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, '!b,^e,i')
+ _parse_env_order(base_order, env)
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 94867b309..26b43e7e6 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -629,7 +629,9 @@ capi_fail:
"""
needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN']
cfuncs['string_from_pyobj'] = """\
-static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) {
+static int
+string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess)
+{
PyArrayObject *arr = NULL;
PyObject *tmp = NULL;
#ifdef DEBUGCFUNCS
@@ -684,127 +686,165 @@ capi_fail:
Py_XDECREF(tmp);
{
PyObject* err = PyErr_Occurred();
- if (err==NULL) err = #modulename#_error;
- PyErr_SetString(err,errmess);
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
+ PyErr_SetString(err, errmess);
}
return 0;
}
"""
+
+
needs['char_from_pyobj'] = ['int_from_pyobj']
cfuncs['char_from_pyobj'] = """\
-static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) {
- int i=0;
- if (int_from_pyobj(&i,obj,errmess)) {
+static int
+char_from_pyobj(char* v, PyObject *obj, const char *errmess) {
+ int i = 0;
+ if (int_from_pyobj(&i, obj, errmess)) {
*v = (char)i;
return 1;
}
return 0;
}
"""
+
+
needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char']
cfuncs['signed_char_from_pyobj'] = """\
-static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) {
- int i=0;
- if (int_from_pyobj(&i,obj,errmess)) {
+static int
+signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) {
+ int i = 0;
+ if (int_from_pyobj(&i, obj, errmess)) {
*v = (signed_char)i;
return 1;
}
return 0;
}
"""
+
+
needs['short_from_pyobj'] = ['int_from_pyobj']
cfuncs['short_from_pyobj'] = """\
-static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) {
- int i=0;
- if (int_from_pyobj(&i,obj,errmess)) {
+static int
+short_from_pyobj(short* v, PyObject *obj, const char *errmess) {
+ int i = 0;
+ if (int_from_pyobj(&i, obj, errmess)) {
*v = (short)i;
return 1;
}
return 0;
}
"""
+
+
cfuncs['int_from_pyobj'] = """\
-static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
+static int
+int_from_pyobj(int* v, PyObject *obj, const char *errmess)
+{
PyObject* tmp = NULL;
- if (PyInt_Check(obj)) {
- *v = (int)PyInt_AS_LONG(obj);
- return 1;
+
+ if (PyLong_Check(obj)) {
+ *v = Npy__PyLong_AsInt(obj);
+ return !(*v == -1 && PyErr_Occurred());
}
+
tmp = PyNumber_Long(obj);
if (tmp) {
- *v = PyInt_AS_LONG(tmp);
+ *v = Npy__PyLong_AsInt(tmp);
Py_DECREF(tmp);
- return 1;
+ return !(*v == -1 && PyErr_Occurred());
}
+
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
/*pass*/;
else if (PySequence_Check(obj))
- tmp = PySequence_GetItem(obj,0);
+ tmp = PySequence_GetItem(obj, 0);
if (tmp) {
PyErr_Clear();
- if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ if (int_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
- if (err==NULL) err = #modulename#_error;
- PyErr_SetString(err,errmess);
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
+ PyErr_SetString(err, errmess);
}
return 0;
}
"""
+
+
cfuncs['long_from_pyobj'] = """\
-static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
+static int
+long_from_pyobj(long* v, PyObject *obj, const char *errmess) {
PyObject* tmp = NULL;
- if (PyInt_Check(obj)) {
- *v = PyInt_AS_LONG(obj);
- return 1;
+
+ if (PyLong_Check(obj)) {
+ *v = PyLong_AsLong(obj);
+ return !(*v == -1 && PyErr_Occurred());
}
+
tmp = PyNumber_Long(obj);
if (tmp) {
- *v = PyInt_AS_LONG(tmp);
+ *v = PyLong_AsLong(tmp);
Py_DECREF(tmp);
- return 1;
+ return !(*v == -1 && PyErr_Occurred());
}
+
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
/*pass*/;
else if (PySequence_Check(obj))
tmp = PySequence_GetItem(obj,0);
+
if (tmp) {
PyErr_Clear();
- if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ if (long_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
- if (err==NULL) err = #modulename#_error;
- PyErr_SetString(err,errmess);
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
+ PyErr_SetString(err, errmess);
}
return 0;
}
"""
+
+
needs['long_long_from_pyobj'] = ['long_long']
cfuncs['long_long_from_pyobj'] = """\
-static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) {
+static int
+long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess)
+{
PyObject* tmp = NULL;
+
if (PyLong_Check(obj)) {
*v = PyLong_AsLongLong(obj);
- return (!PyErr_Occurred());
- }
- if (PyInt_Check(obj)) {
- *v = (long_long)PyInt_AS_LONG(obj);
- return 1;
+ return !(*v == -1 && PyErr_Occurred());
}
+
tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyLong_AsLongLong(tmp);
Py_DECREF(tmp);
- return (!PyErr_Occurred());
+ return !(*v == -1 && PyErr_Occurred());
}
+
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
@@ -813,58 +853,64 @@ static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess)
tmp = PySequence_GetItem(obj,0);
if (tmp) {
PyErr_Clear();
- if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ if (long_long_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
- if (err==NULL) err = #modulename#_error;
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
PyErr_SetString(err,errmess);
}
return 0;
}
"""
+
+
needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double']
cfuncs['long_double_from_pyobj'] = """\
-static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) {
+static int
+long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess)
+{
double d=0;
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, LongDouble) {
PyArray_ScalarAsCtype(obj, v);
return 1;
}
- else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) {
+ else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) {
(*v) = *((npy_longdouble *)PyArray_DATA(obj));
return 1;
}
}
- if (double_from_pyobj(&d,obj,errmess)) {
+ if (double_from_pyobj(&d, obj, errmess)) {
*v = (long_double)d;
return 1;
}
return 0;
}
"""
+
+
cfuncs['double_from_pyobj'] = """\
-static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
+static int
+double_from_pyobj(double* v, PyObject *obj, const char *errmess)
+{
PyObject* tmp = NULL;
if (PyFloat_Check(obj)) {
-#ifdef __sgi
*v = PyFloat_AsDouble(obj);
-#else
- *v = PyFloat_AS_DOUBLE(obj);
-#endif
- return 1;
+ return !(*v == -1.0 && PyErr_Occurred());
}
+
tmp = PyNumber_Float(obj);
if (tmp) {
-#ifdef __sgi
*v = PyFloat_AsDouble(tmp);
-#else
- *v = PyFloat_AS_DOUBLE(tmp);
-#endif
Py_DECREF(tmp);
- return 1;
+ return !(*v == -1.0 && PyErr_Occurred());
}
if (PyComplex_Check(obj))
tmp = PyObject_GetAttrString(obj,\"real\");
@@ -885,9 +931,13 @@ static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
return 0;
}
"""
+
+
needs['float_from_pyobj'] = ['double_from_pyobj']
cfuncs['float_from_pyobj'] = """\
-static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
+static int
+float_from_pyobj(float* v, PyObject *obj, const char *errmess)
+{
double d=0.0;
if (double_from_pyobj(&d,obj,errmess)) {
*v = (float)d;
@@ -896,11 +946,15 @@ static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
return 0;
}
"""
+
+
needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double',
'complex_double_from_pyobj']
cfuncs['complex_long_double_from_pyobj'] = """\
-static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) {
- complex_double cd={0.0,0.0};
+static int
+complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess)
+{
+ complex_double cd = {0.0,0.0};
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, CLongDouble) {
PyArray_ScalarAsCtype(obj, v);
@@ -920,13 +974,17 @@ static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,c
return 0;
}
"""
+
+
needs['complex_double_from_pyobj'] = ['complex_double']
cfuncs['complex_double_from_pyobj'] = """\
-static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) {
+static int
+complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) {
Py_complex c;
if (PyComplex_Check(obj)) {
- c=PyComplex_AsCComplex(obj);
- (*v).r=c.real, (*v).i=c.imag;
+ c = PyComplex_AsCComplex(obj);
+ (*v).r = c.real;
+ (*v).i = c.imag;
return 1;
}
if (PyArray_IsScalar(obj, ComplexFloating)) {
@@ -955,28 +1013,22 @@ static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char
else {
arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
}
- if (arr==NULL) return 0;
+ if (arr == NULL) {
+ return 0;
+ }
(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
return 1;
}
/* Python does not provide PyNumber_Complex function :-( */
- (*v).i=0.0;
+ (*v).i = 0.0;
if (PyFloat_Check(obj)) {
-#ifdef __sgi
(*v).r = PyFloat_AsDouble(obj);
-#else
- (*v).r = PyFloat_AS_DOUBLE(obj);
-#endif
- return 1;
- }
- if (PyInt_Check(obj)) {
- (*v).r = (double)PyInt_AS_LONG(obj);
- return 1;
+ return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PyLong_Check(obj)) {
(*v).r = PyLong_AsDouble(obj);
- return (!PyErr_Occurred());
+ return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) {
PyObject *tmp = PySequence_GetItem(obj,0);
@@ -997,10 +1049,14 @@ static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char
return 0;
}
"""
+
+
needs['complex_float_from_pyobj'] = [
'complex_float', 'complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj'] = """\
-static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) {
+static int
+complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess)
+{
complex_double cd={0.0,0.0};
if (complex_double_from_pyobj(&cd,obj,errmess)) {
(*v).r = (float)cd.r;
@@ -1010,6 +1066,8 @@ static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *e
return 0;
}
"""
+
+
needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
@@ -1047,14 +1105,18 @@ needs['try_pyarr_from_complex_double'] = [
cfuncs[
'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
-needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
+needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
# create the list of arguments to be used when calling back to python
cfuncs['create_cb_arglist'] = """\
-static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
+static int
+create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs,
+ const int nofoptargs, int *nofargs, PyTupleObject **args,
+ const char *errmess)
+{
PyObject *tmp = NULL;
PyObject *tmp_fun = NULL;
- int tot,opt,ext,siz,i,di=0;
+ Py_ssize_t tot, opt, ext, siz, i, di = 0;
CFUNCSMESS(\"create_cb_arglist\\n\");
tot=opt=ext=siz=0;
/* Get the total number of arguments */
@@ -1103,10 +1165,15 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg
Py_INCREF(tmp_fun);
}
}
-if (tmp_fun==NULL) {
-fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name));
-goto capi_fail;
-}
+
+ if (tmp_fun == NULL) {
+ fprintf(stderr,
+ \"Call-back argument must be function|instance|instance.__call__|f2py-function \"
+ \"but got %s.\\n\",
+ ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name));
+ goto capi_fail;
+ }
+
if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
@@ -1114,7 +1181,7 @@ goto capi_fail;
if (tmp_argcount == NULL) {
goto capi_fail;
}
- tot = PyInt_AsLong(tmp_argcount) - di;
+ tot = PyLong_AsSsize_t(tmp_argcount) - di;
Py_DECREF(tmp_argcount);
}
}
@@ -1130,13 +1197,23 @@ goto capi_fail;
/* Calculate the size of call-backs argument list */
siz = MIN(maxnofargs+ext,tot);
*nofargs = MAX(0,siz-ext);
+
#ifdef DEBUGCFUNCS
- fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
+ fprintf(stderr,
+ \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\"
+ \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\",
+ maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs);
#endif
- if (siz<tot-opt) {
- fprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
+
+ if (siz < tot-opt) {
+ fprintf(stderr,
+ \"create_cb_arglist: Failed to build argument list \"
+ \"(siz) with enough arguments (tot-opt) required by \"
+ \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\",
+ siz, tot, opt);
goto capi_fail;
}
+
/* Initialize argument list */
*args = (PyTupleObject *)PyTuple_New(siz);
for (i=0;i<*nofargs;i++) {
@@ -1152,9 +1229,10 @@ goto capi_fail;
CFUNCSMESS(\"create_cb_arglist-end\\n\");
Py_DECREF(tmp_fun);
return 1;
+
capi_fail:
- if ((PyErr_Occurred())==NULL)
- PyErr_SetString(#modulename#_error,errmess);
+ if (PyErr_Occurred() == NULL)
+ PyErr_SetString(#modulename#_error, errmess);
Py_XDECREF(tmp_fun);
return 0;
}
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index aa46c57d0..3275f90ad 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -213,6 +213,8 @@ format_def(char *buf, Py_ssize_t size, FortranDataDef def)
return -1;
}
memcpy(p, notalloc, sizeof(notalloc));
+ p += sizeof(notalloc);
+ size -= sizeof(notalloc);
}
return p - buf;
@@ -255,7 +257,7 @@ fortran_doc(FortranDataDef def)
}
else {
PyArray_Descr *d = PyArray_DescrFromType(def.type);
- n = PyOS_snprintf(p, size, "'%c'-", d->type);
+ n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type);
Py_DECREF(d);
if (n < 0 || n >= size) {
goto fail;
@@ -264,7 +266,7 @@ fortran_doc(FortranDataDef def)
size -= n;
if (def.data == NULL) {
- n = format_def(p, size, def) == -1;
+ n = format_def(p, size, def);
if (n < 0) {
goto fail;
}
@@ -288,6 +290,7 @@ fortran_doc(FortranDataDef def)
p += n;
size -= n;
}
+
}
if (size <= 1) {
goto fail;
diff --git a/numpy/f2py/tests/src/module_data/mod.mod b/numpy/f2py/tests/src/module_data/mod.mod
new file mode 100644
index 000000000..8670a97e9
--- /dev/null
+++ b/numpy/f2py/tests/src/module_data/mod.mod
Binary files differ
diff --git a/numpy/f2py/tests/src/module_data/module_data_docstring.f90 b/numpy/f2py/tests/src/module_data/module_data_docstring.f90
new file mode 100644
index 000000000..4505e0cbc
--- /dev/null
+++ b/numpy/f2py/tests/src/module_data/module_data_docstring.f90
@@ -0,0 +1,12 @@
+module mod
+ integer :: i
+ integer :: x(4)
+ real, dimension(2,3) :: a
+ real, allocatable, dimension(:,:) :: b
+contains
+ subroutine foo
+ integer :: k
+ k = 1
+ a(1,2) = a(1,2)+3
+ end subroutine foo
+end module mod
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
index e431f5ba6..7d725165b 100644
--- a/numpy/f2py/tests/test_block_docstring.py
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -19,5 +19,5 @@ class TestBlockDocString(util.F2PyTest):
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_block_docstring(self):
- expected = "'i'-array(2,3)\n"
+ expected = "bar : 'i'-array(2,3)\n"
assert_equal(self.module.block.__doc__, expected)
diff --git a/numpy/f2py/tests/test_module_doc.py b/numpy/f2py/tests/test_module_doc.py
new file mode 100644
index 000000000..4b9555cee
--- /dev/null
+++ b/numpy/f2py/tests/test_module_doc.py
@@ -0,0 +1,30 @@
+import os
+import sys
+import pytest
+import textwrap
+
+from . import util
+from numpy.testing import assert_equal, IS_PYPY
+
+
+def _path(*a):
+ return os.path.join(*((os.path.dirname(__file__),) + a))
+
+
+class TestModuleDocString(util.F2PyTest):
+ sources = [_path('src', 'module_data', 'module_data_docstring.f90')]
+
+ @pytest.mark.skipif(sys.platform=='win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
+ def test_module_docstring(self):
+ assert_equal(self.module.mod.__doc__,
+ textwrap.dedent('''\
+ i : 'i'-scalar
+ x : 'i'-array(4)
+ a : 'f'-array(2,3)
+ b : 'f'-array(-1,-1), not allocated\x00
+ foo()\n
+ Wrapper for ``foo``.\n\n''')
+ )
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 6a2ad004c..9464692e0 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -1,28 +1,17 @@
"""
Set operations for arrays based on sorting.
-:Contains:
- unique,
- isin,
- ediff1d,
- intersect1d,
- setxor1d,
- in1d,
- union1d,
- setdiff1d
-
-:Notes:
+Notes
+-----
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
-sort(), that can provide directly the permutation vectors, avoiding
-thus calls to argsort().
+`numpy.sort`, that can provide directly the permutation vectors, thus avoiding
+calls to `numpy.argsort`.
-To do: Optionally return indices analogously to unique for all functions.
-
-:Author: Robert Cimrman
+Original author: Robert Cimrman
"""
import functools
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 92f93d671..b6d860dfa 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -2206,8 +2206,8 @@ def lstsq(a, b, rcond="warn"):
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
- Sums of residuals; squared Euclidean 2-norm for each column in
- ``b - a*x``.
+ Sums of squared residuals: Squared Euclidean 2-norm for each column in
+ ``b - a @ x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
@@ -2558,7 +2558,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
- # None of the str-type keywords for ord ('fro', 'nuc')
+ # None of the str-type keywords for ord ('fro', 'nuc')
# are valid for vectors
elif isinstance(ord, str):
raise ValueError(f"Invalid norm order '{ord}' for vectors")
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index b5371f51a..4e320576b 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -443,9 +443,9 @@ def _check_fill_value(fill_value, ndtype):
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
- except ValueError:
+ except ValueError as e:
err_msg = "Unable to transform %s to dtype %s"
- raise ValueError(err_msg % (fill_value, ndtype))
+ raise ValueError(err_msg % (fill_value, ndtype)) from e
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
@@ -460,12 +460,12 @@ def _check_fill_value(fill_value, ndtype):
# Also in case of converting string arrays.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
- except (OverflowError, ValueError):
+ except (OverflowError, ValueError) as e:
# Raise TypeError instead of OverflowError or ValueError.
# OverflowError is seldom used, and the real problem here is
# that the passed fill_value is not compatible with the ndtype.
err_msg = "Cannot convert fill_value %s to dtype %s"
- raise TypeError(err_msg % (fill_value, ndtype))
+ raise TypeError(err_msg % (fill_value, ndtype)) from e
return np.array(fill_value)
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index e40dcefe3..bff023d7f 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -4521,7 +4521,7 @@ def default_rng(seed=None):
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in a`SeedSequence` instance
+ pass in a `SeedSequence` instance.
Additionally, when passed a `BitGenerator`, it will be wrapped by
`Generator`. If passed a `Generator`, it will be returned unaltered.
diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py
new file mode 100644
index 000000000..5e14b708f
--- /dev/null
+++ b/numpy/typing/_callable.py
@@ -0,0 +1,136 @@
+"""
+A module with various ``typing.Protocol`` subclasses that implement
+the ``__call__`` magic method.
+
+See the `Mypy documentation`_ on protocols for more details.
+
+.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
+
+"""
+
+import sys
+from typing import Union, TypeVar, overload, Any
+
+from numpy import (
+ _BoolLike,
+ _IntLike,
+ _FloatLike,
+ _ComplexLike,
+ _NumberLike,
+ generic,
+ bool_,
+ timedelta64,
+ number,
+ integer,
+ unsignedinteger,
+ signedinteger,
+ int32,
+ int64,
+ floating,
+ float32,
+ float64,
+ complexfloating,
+ complex128,
+)
+
+if sys.version_info >= (3, 8):
+ from typing import Protocol
+ HAVE_PROTOCOL = True
+else:
+ try:
+ from typing_extensions import Protocol
+ except ImportError:
+ HAVE_PROTOCOL = False
+ else:
+ HAVE_PROTOCOL = True
+
+if HAVE_PROTOCOL:
+ _NumberType = TypeVar("_NumberType", bound=number)
+ _NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
+ _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
+
+ class _BoolOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, __other: _BoolLike) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, __other: int) -> Union[int32, int64]: ...
+ @overload
+ def __call__(self, __other: float) -> float64: ...
+ @overload
+ def __call__(self, __other: complex) -> complex128: ...
+ @overload
+ def __call__(self, __other: _NumberType) -> _NumberType: ...
+
+ class _BoolSub(Protocol):
+ # Note that `__other: bool_` is absent here
+ @overload # platform dependent
+ def __call__(self, __other: int) -> Union[int32, int64]: ...
+ @overload
+ def __call__(self, __other: float) -> float64: ...
+ @overload
+ def __call__(self, __other: complex) -> complex128: ...
+ @overload
+ def __call__(self, __other: _NumberType) -> _NumberType: ...
+
+ class _BoolTrueDiv(Protocol):
+ @overload
+ def __call__(self, __other: Union[float, _IntLike, _BoolLike]) -> float64: ...
+ @overload
+ def __call__(self, __other: complex) -> complex128: ...
+ @overload
+ def __call__(self, __other: _NumberType) -> _NumberType: ...
+
+ class _TD64Div(Protocol[_NumberType_co]):
+ @overload
+ def __call__(self, __other: timedelta64) -> _NumberType_co: ...
+ @overload
+ def __call__(self, __other: _FloatLike) -> timedelta64: ...
+
+ class _IntTrueDiv(Protocol):
+ @overload
+ def __call__(self, __other: Union[_IntLike, float]) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _UnsignedIntOp(Protocol):
+ # NOTE: `uint64 + signedinteger -> float64`
+ @overload
+ def __call__(self, __other: Union[bool, unsignedinteger]) -> unsignedinteger: ...
+ @overload
+ def __call__(self, __other: Union[int, signedinteger]) -> Union[signedinteger, float64]: ...
+ @overload
+ def __call__(self, __other: float) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _SignedIntOp(Protocol):
+ @overload
+ def __call__(self, __other: Union[int, signedinteger]) -> signedinteger: ...
+ @overload
+ def __call__(self, __other: float) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _FloatOp(Protocol):
+ @overload
+ def __call__(self, __other: _FloatLike) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _ComplexOp(Protocol):
+ def __call__(self, __other: _ComplexLike) -> complexfloating[floating]: ...
+
+ class _NumberOp(Protocol):
+ def __call__(self, __other: _NumberLike) -> number: ...
+
+else:
+ _BoolOp = Any
+ _BoolSub = Any
+ _BoolTrueDiv = Any
+ _TD64Div = Any
+ _IntTrueDiv = Any
+ _UnsignedIntOp = Any
+ _SignedIntOp = Any
+ _FloatOp = Any
+ _ComplexOp = Any
+ _NumberOp = Any
diff --git a/numpy/typing/tests/data/fail/arithmetic.py b/numpy/typing/tests/data/fail/arithmetic.py
new file mode 100644
index 000000000..169e104f9
--- /dev/null
+++ b/numpy/typing/tests/data/fail/arithmetic.py
@@ -0,0 +1,19 @@
+import numpy as np
+
+b_ = np.bool_()
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+b_ - b_ # E: No overload variant
+
+dt + dt # E: Unsupported operand types
+td - dt # E: Unsupported operand types
+td % 1 # E: Unsupported operand types
+td / dt # E: No overload
+
+# NOTE: The 1 tests below currently don't work due to the broad
+# (i.e. untyped) signature of `.__mod__()`.
+# TODO: Revisit this once annotations are added to the
+# `_ArrayOrScalarCommon` magic methods.
+
+# td % dt # E: Unsupported operand types
diff --git a/numpy/typing/tests/data/fail/linspace.py b/numpy/typing/tests/data/fail/array_constructors.py
index a9769c5d6..5218572a6 100644
--- a/numpy/typing/tests/data/fail/linspace.py
+++ b/numpy/typing/tests/data/fail/array_constructors.py
@@ -1,5 +1,18 @@
import numpy as np
+a: np.ndarray
+
+np.require(a, requirements=1) # E: No overload variant
+np.require(a, requirements="TEST") # E: incompatible type
+
+np.zeros("test") # E: incompatible type
+np.zeros() # E: Too few arguments
+
+np.ones("test") # E: incompatible type
+np.ones() # E: Too few arguments
+
+np.array(0, float, True) # E: Too many positional
+
np.linspace(None, 'bob') # E: No overload variant
np.linspace(0, 2, num=10.0) # E: No overload variant
np.linspace(0, 2, endpoint='True') # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py
index e7ffe8920..be031e6e1 100644
--- a/numpy/typing/tests/data/fail/modules.py
+++ b/numpy/typing/tests/data/fail/modules.py
@@ -1,3 +1,4 @@
import numpy as np
np.testing.bob # E: Module has no attribute
+np.bob # E: Module has no attribute \ No newline at end of file
diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py
index 47c031163..13bb45483 100644
--- a/numpy/typing/tests/data/fail/scalars.py
+++ b/numpy/typing/tests/data/fail/scalars.py
@@ -28,22 +28,6 @@ np.complex64(1, 2) # E: Too many arguments
np.datetime64(0) # E: non-matching overload
-dt_64 = np.datetime64(0, "D")
-td_64 = np.timedelta64(1, "h")
-
-dt_64 + dt_64 # E: Unsupported operand types
-td_64 - dt_64 # E: Unsupported operand types
-td_64 % 1 # E: Unsupported operand types
-
-# NOTE: The 2 tests below currently don't work due to the broad
-# (i.e. untyped) signature of `generic.__truediv__()` and `.__mod__()`.
-# TODO: Revisit this once annotations are added to the
-# `_ArrayOrScalarCommon` magic methods.
-
-# td_64 / dt_64 # E: No overload
-# td_64 % dt_64 # E: Unsupported operand types
-
-
class A:
def __float__(self):
return 1.0
diff --git a/numpy/typing/tests/data/fail/simple.py b/numpy/typing/tests/data/fail/simple.py
deleted file mode 100644
index 57c08fb7d..000000000
--- a/numpy/typing/tests/data/fail/simple.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""Simple expression that should fail with mypy."""
-
-import numpy as np
-
-# Array creation routines checks
-np.zeros("test") # E: incompatible type
-np.zeros() # E: Too few arguments
-
-np.ones("test") # E: incompatible type
-np.ones() # E: Too few arguments
-
-np.array(0, float, True) # E: Too many positional
diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py
new file mode 100644
index 000000000..f26eab879
--- /dev/null
+++ b/numpy/typing/tests/data/pass/arithmetic.py
@@ -0,0 +1,257 @@
+import numpy as np
+
+c16 = np.complex128(1)
+f8 = np.float64(1)
+i8 = np.int64(1)
+u8 = np.uint64(1)
+
+c8 = np.complex64(1)
+f4 = np.float32(1)
+i4 = np.int32(1)
+u4 = np.uint32(1)
+
+dt = np.datetime64(1, "D")
+td = np.timedelta64(1, "D")
+
+b_ = np.bool_(1)
+
+b = bool(1)
+c = complex(1)
+f = float(1)
+i = int(1)
+
+AR = np.ones(1, dtype=np.float64)
+AR.setflags(write=False)
+
+# Time structures
+
+dt + td
+dt + i
+dt + i4
+dt + i8
+dt - dt
+dt - i
+dt - i4
+dt - i8
+
+td + td
+td + i
+td + i4
+td + i8
+td - td
+td - i
+td - i4
+td - i8
+td / f
+td / f4
+td / f8
+td / td
+td // td
+td % td
+
+
+# boolean
+
+b_ / b
+b_ / b_
+b_ / i
+b_ / i8
+b_ / i4
+b_ / u8
+b_ / u4
+b_ / f
+b_ / f8
+b_ / f4
+b_ / c
+b_ / c16
+b_ / c8
+
+b / b_
+b_ / b_
+i / b_
+i8 / b_
+i4 / b_
+u8 / b_
+u4 / b_
+f / b_
+f8 / b_
+f4 / b_
+c / b_
+c16 / b_
+c8 / b_
+
+# Complex
+
+c16 + c16
+c16 + f8
+c16 + i8
+c16 + c8
+c16 + f4
+c16 + i4
+c16 + b_
+c16 + b
+c16 + c
+c16 + f
+c16 + i
+c16 + AR
+
+c16 + c16
+f8 + c16
+i8 + c16
+c8 + c16
+f4 + c16
+i4 + c16
+b_ + c16
+b + c16
+c + c16
+f + c16
+i + c16
+AR + c16
+
+c8 + c16
+c8 + f8
+c8 + i8
+c8 + c8
+c8 + f4
+c8 + i4
+c8 + b_
+c8 + b
+c8 + c
+c8 + f
+c8 + i
+c8 + AR
+
+c16 + c8
+f8 + c8
+i8 + c8
+c8 + c8
+f4 + c8
+i4 + c8
+b_ + c8
+b + c8
+c + c8
+f + c8
+i + c8
+AR + c8
+
+# Float
+
+f8 + f8
+f8 + i8
+f8 + f4
+f8 + i4
+f8 + b_
+f8 + b
+f8 + c
+f8 + f
+f8 + i
+f8 + AR
+
+f8 + f8
+i8 + f8
+f4 + f8
+i4 + f8
+b_ + f8
+b + f8
+c + f8
+f + f8
+i + f8
+AR + f8
+
+f4 + f8
+f4 + i8
+f4 + f4
+f4 + i4
+f4 + b_
+f4 + b
+f4 + c
+f4 + f
+f4 + i
+f4 + AR
+
+f8 + f4
+i8 + f4
+f4 + f4
+i4 + f4
+b_ + f4
+b + f4
+c + f4
+f + f4
+i + f4
+AR + f4
+
+# Int
+
+i8 + i8
+i8 + u8
+i8 + i4
+i8 + u4
+i8 + b_
+i8 + b
+i8 + c
+i8 + f
+i8 + i
+i8 + AR
+
+u8 + u8
+u8 + i4
+u8 + u4
+u8 + b_
+u8 + b
+u8 + c
+u8 + f
+u8 + i
+u8 + AR
+
+i8 + i8
+u8 + i8
+i4 + i8
+u4 + i8
+b_ + i8
+b + i8
+c + i8
+f + i8
+i + i8
+AR + i8
+
+u8 + u8
+i4 + u8
+u4 + u8
+b_ + u8
+b + u8
+c + u8
+f + u8
+i + u8
+AR + u8
+
+i4 + i8
+i4 + i4
+i4 + i
+i4 + b_
+i4 + b
+i4 + AR
+
+u4 + i8
+u4 + i4
+u4 + u8
+u4 + u4
+u4 + i
+u4 + b_
+u4 + b
+u4 + AR
+
+i8 + i4
+i4 + i4
+i + i4
+b_ + i4
+b + i4
+AR + i4
+
+i8 + u4
+i4 + u4
+u8 + u4
+u4 + u4
+b_ + u4
+b + u4
+i + u4
+AR + u4
diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py
new file mode 100644
index 000000000..bf29e52b9
--- /dev/null
+++ b/numpy/typing/tests/data/pass/array_constructors.py
@@ -0,0 +1,66 @@
+from typing import List
+import numpy as np
+
+class Index:
+ def __index__(self) -> int:
+ return 0
+
+class SubClass(np.ndarray): ...
+
+A = np.array([1])
+B = A.view(SubClass).copy()
+C = [1]
+
+np.array(1, dtype=float)
+np.array(1, copy=False)
+np.array(1, order='F')
+np.array(1, order=None)
+np.array(1, subok=True)
+np.array(1, ndmin=3)
+np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
+
+np.asarray(A)
+np.asarray(B)
+np.asarray(C)
+
+np.asanyarray(A)
+np.asanyarray(B)
+np.asanyarray(B, dtype=int)
+np.asanyarray(C)
+
+np.ascontiguousarray(A)
+np.ascontiguousarray(B)
+np.ascontiguousarray(C)
+
+np.asfortranarray(A)
+np.asfortranarray(B)
+np.asfortranarray(C)
+
+np.require(A)
+np.require(B)
+np.require(B, dtype=int)
+np.require(B, requirements=None)
+np.require(B, requirements="E")
+np.require(B, requirements=["ENSUREARRAY"])
+np.require(B, requirements={"F", "E"})
+np.require(B, requirements=["C", "OWNDATA"])
+np.require(B, requirements="W")
+np.require(B, requirements="A")
+np.require(C)
+
+np.linspace(0, 2)
+np.linspace(0.5, [0, 1, 2])
+np.linspace([0, 1, 2], 3)
+np.linspace(0j, 2)
+np.linspace(0, 2, num=10)
+np.linspace(0, 2, endpoint=True)
+np.linspace(0, 2, retstep=True)
+np.linspace(0j, 2j, retstep=True)
+np.linspace(0, 2, dtype=bool)
+np.linspace([0, 1], [2, 3], axis=Index())
+
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=[1j, 2j], num=2)
+
+np.geomspace(1, 2)
diff --git a/numpy/typing/tests/data/pass/linspace.py b/numpy/typing/tests/data/pass/linspace.py
deleted file mode 100644
index 8c6d0d56b..000000000
--- a/numpy/typing/tests/data/pass/linspace.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import numpy as np
-
-class Index:
- def __index__(self) -> int:
- return 0
-
-np.linspace(0, 2)
-np.linspace(0.5, [0, 1, 2])
-np.linspace([0, 1, 2], 3)
-np.linspace(0j, 2)
-np.linspace(0, 2, num=10)
-np.linspace(0, 2, endpoint=True)
-np.linspace(0, 2, retstep=True)
-np.linspace(0j, 2j, retstep=True)
-np.linspace(0, 2, dtype=bool)
-np.linspace([0, 1], [2, 3], axis=Index())
-
-np.logspace(0, 2, base=2)
-np.logspace(0, 2, base=2)
-np.logspace(0, 2, base=[1j, 2j], num=2)
-
-np.geomspace(1, 2)
diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py
index 321ce3c2b..8eaeb6afb 100644
--- a/numpy/typing/tests/data/pass/literal.py
+++ b/numpy/typing/tests/data/pass/literal.py
@@ -31,6 +31,8 @@ order_list: List[Tuple[frozenset, Callable]] = [
(KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__
(ACF, partial(np.reshape, AR, 1)),
(KACF, partial(np.ravel, AR)),
+ (KACF, partial(np.asarray, 1)),
+ (KACF, partial(np.asanyarray, 1)),
]
for order_set, func in order_list:
diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py
index c02e1ed36..49ddb8ed9 100644
--- a/numpy/typing/tests/data/pass/scalars.py
+++ b/numpy/typing/tests/data/pass/scalars.py
@@ -108,19 +108,6 @@ np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
-dt_64 = np.datetime64(0, "D")
-td_64 = np.timedelta64(1, "h")
-
-dt_64 + td_64
-dt_64 - dt_64
-dt_64 - td_64
-
-td_64 + td_64
-td_64 - td_64
-td_64 / 1.0
-td_64 / td_64
-td_64 % td_64
-
np.void(1)
np.void(np.int64(1))
np.void(True)
diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py
index 527050557..4d397bd29 100644
--- a/numpy/typing/tests/data/pass/simple.py
+++ b/numpy/typing/tests/data/pass/simple.py
@@ -17,15 +17,6 @@ ndarray_func(np.array([1, 2]))
array == 1
array.dtype == float
-# Array creation routines checks
-np.array(1, dtype=float)
-np.array(1, copy=False)
-np.array(1, order='F')
-np.array(1, order=None)
-np.array(1, subok=True)
-np.array(1, ndmin=3)
-np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
-
ndarray_func(np.zeros([1, 2]))
ndarray_func(np.ones([1, 2]))
ndarray_func(np.empty([1, 2]))
diff --git a/numpy/typing/tests/data/pass/ufuncs.py b/numpy/typing/tests/data/pass/ufuncs.py
index 82172952a..ad4d483d4 100644
--- a/numpy/typing/tests/data/pass/ufuncs.py
+++ b/numpy/typing/tests/data/pass/ufuncs.py
@@ -6,7 +6,10 @@ np.sin(1, out=np.empty(1))
np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)])
np.sin(1, signature="D")
np.sin(1, extobj=[16, 1, lambda: None])
-np.sin(1) + np.sin(1)
+# NOTE: `np.generic` subclasses are not guaranteed to support addition;
+# re-enable this we can infer the exact return type of `np.sin(...)`.
+#
+# np.sin(1) + np.sin(1)
np.sin.types[0]
np.sin.__name__
diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py
new file mode 100644
index 000000000..b8c457aaf
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/arithmetic.py
@@ -0,0 +1,256 @@
+import numpy as np
+
+c16 = np.complex128()
+f8 = np.float64()
+i8 = np.int64()
+u8 = np.uint64()
+
+c8 = np.complex64()
+f4 = np.float32()
+i4 = np.int32()
+u4 = np.uint32()
+
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+b_ = np.bool_()
+
+b = bool()
+c = complex()
+f = float()
+i = int()
+
+AR = np.array([0], dtype=np.float64)
+AR.setflags(write=False)
+
+# Time structures
+
+reveal_type(dt + td) # E: numpy.datetime64
+reveal_type(dt + i) # E: numpy.datetime64
+reveal_type(dt + i4) # E: numpy.datetime64
+reveal_type(dt + i8) # E: numpy.datetime64
+reveal_type(dt - dt) # E: numpy.timedelta64
+reveal_type(dt - i) # E: numpy.datetime64
+reveal_type(dt - i4) # E: numpy.datetime64
+reveal_type(dt - i8) # E: numpy.datetime64
+
+reveal_type(td + td) # E: numpy.timedelta64
+reveal_type(td + i) # E: numpy.timedelta64
+reveal_type(td + i4) # E: numpy.timedelta64
+reveal_type(td + i8) # E: numpy.timedelta64
+reveal_type(td - td) # E: numpy.timedelta64
+reveal_type(td - i) # E: numpy.timedelta64
+reveal_type(td - i4) # E: numpy.timedelta64
+reveal_type(td - i8) # E: numpy.timedelta64
+reveal_type(td / f) # E: numpy.timedelta64
+reveal_type(td / f4) # E: numpy.timedelta64
+reveal_type(td / f8) # E: numpy.timedelta64
+reveal_type(td / td) # E: float64
+reveal_type(td // td) # E: signedinteger
+reveal_type(td % td) # E: numpy.timedelta64
+
+# boolean
+
+reveal_type(b_ / b) # E: float64
+reveal_type(b_ / b_) # E: float64
+reveal_type(b_ / i) # E: float64
+reveal_type(b_ / i8) # E: float64
+reveal_type(b_ / i4) # E: float64
+reveal_type(b_ / u8) # E: float64
+reveal_type(b_ / u4) # E: float64
+reveal_type(b_ / f) # E: float64
+reveal_type(b_ / f8) # E: float64
+reveal_type(b_ / f4) # E: float32
+reveal_type(b_ / c) # E: complex128
+reveal_type(b_ / c16) # E: complex128
+reveal_type(b_ / c8) # E: complex64
+
+reveal_type(b / b_) # E: float64
+reveal_type(b_ / b_) # E: float64
+reveal_type(i / b_) # E: float64
+reveal_type(i8 / b_) # E: float64
+reveal_type(i4 / b_) # E: float64
+reveal_type(u8 / b_) # E: float64
+reveal_type(u4 / b_) # E: float64
+reveal_type(f / b_) # E: float64
+reveal_type(f8 / b_) # E: float64
+reveal_type(f4 / b_) # E: float32
+reveal_type(c / b_) # E: complex128
+reveal_type(c16 / b_) # E: complex128
+reveal_type(c8 / b_) # E: complex64
+
+# Complex
+
+reveal_type(c16 + c16) # E: complexfloating
+reveal_type(c16 + f8) # E: complexfloating
+reveal_type(c16 + i8) # E: complexfloating
+reveal_type(c16 + c8) # E: complexfloating
+reveal_type(c16 + f4) # E: complexfloating
+reveal_type(c16 + i4) # E: complexfloating
+reveal_type(c16 + b_) # E: complex128
+reveal_type(c16 + b) # E: complexfloating
+reveal_type(c16 + c) # E: complexfloating
+reveal_type(c16 + f) # E: complexfloating
+reveal_type(c16 + i) # E: complexfloating
+reveal_type(c16 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(c16 + c16) # E: complexfloating
+reveal_type(f8 + c16) # E: complexfloating
+reveal_type(i8 + c16) # E: complexfloating
+reveal_type(c8 + c16) # E: complexfloating
+reveal_type(f4 + c16) # E: complexfloating
+reveal_type(i4 + c16) # E: complexfloating
+reveal_type(b_ + c16) # E: complex128
+reveal_type(b + c16) # E: complexfloating
+reveal_type(c + c16) # E: complexfloating
+reveal_type(f + c16) # E: complexfloating
+reveal_type(i + c16) # E: complexfloating
+reveal_type(AR + c16) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(c8 + c16) # E: complexfloating
+reveal_type(c8 + f8) # E: complexfloating
+reveal_type(c8 + i8) # E: complexfloating
+reveal_type(c8 + c8) # E: complexfloating
+reveal_type(c8 + f4) # E: complexfloating
+reveal_type(c8 + i4) # E: complexfloating
+reveal_type(c8 + b_) # E: complex64
+reveal_type(c8 + b) # E: complexfloating
+reveal_type(c8 + c) # E: complexfloating
+reveal_type(c8 + f) # E: complexfloating
+reveal_type(c8 + i) # E: complexfloating
+reveal_type(c8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(c16 + c8) # E: complexfloating
+reveal_type(f8 + c8) # E: complexfloating
+reveal_type(i8 + c8) # E: complexfloating
+reveal_type(c8 + c8) # E: complexfloating
+reveal_type(f4 + c8) # E: complexfloating
+reveal_type(i4 + c8) # E: complexfloating
+reveal_type(b_ + c8) # E: complex64
+reveal_type(b + c8) # E: complexfloating
+reveal_type(c + c8) # E: complexfloating
+reveal_type(f + c8) # E: complexfloating
+reveal_type(i + c8) # E: complexfloating
+reveal_type(AR + c8) # E: Union[numpy.ndarray, numpy.generic]
+
+# Float
+
+reveal_type(f8 + f8) # E: floating
+reveal_type(f8 + i8) # E: floating
+reveal_type(f8 + f4) # E: floating
+reveal_type(f8 + i4) # E: floating
+reveal_type(f8 + b_) # E: float64
+reveal_type(f8 + b) # E: floating
+reveal_type(f8 + c) # E: complexfloating
+reveal_type(f8 + f) # E: floating
+reveal_type(f8 + i) # E: floating
+reveal_type(f8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(f8 + f8) # E: floating
+reveal_type(i8 + f8) # E: floating
+reveal_type(f4 + f8) # E: floating
+reveal_type(i4 + f8) # E: floating
+reveal_type(b_ + f8) # E: float64
+reveal_type(b + f8) # E: floating
+reveal_type(c + f8) # E: complexfloating
+reveal_type(f + f8) # E: floating
+reveal_type(i + f8) # E: floating
+reveal_type(AR + f8) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(f4 + f8) # E: floating
+reveal_type(f4 + i8) # E: floating
+reveal_type(f4 + f4) # E: floating
+reveal_type(f4 + i4) # E: floating
+reveal_type(f4 + b_) # E: float32
+reveal_type(f4 + b) # E: floating
+reveal_type(f4 + c) # E: complexfloating
+reveal_type(f4 + f) # E: floating
+reveal_type(f4 + i) # E: floating
+reveal_type(f4 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(f8 + f4) # E: floating
+reveal_type(i8 + f4) # E: floating
+reveal_type(f4 + f4) # E: floating
+reveal_type(i4 + f4) # E: floating
+reveal_type(b_ + f4) # E: float32
+reveal_type(b + f4) # E: floating
+reveal_type(c + f4) # E: complexfloating
+reveal_type(f + f4) # E: floating
+reveal_type(i + f4) # E: floating
+reveal_type(AR + f4) # E: Union[numpy.ndarray, numpy.generic]
+
+# Int
+
+reveal_type(i8 + i8) # E: signedinteger
+reveal_type(i8 + u8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i8 + i4) # E: signedinteger
+reveal_type(i8 + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i8 + b_) # E: int64
+reveal_type(i8 + b) # E: signedinteger
+reveal_type(i8 + c) # E: complexfloating
+reveal_type(i8 + f) # E: floating
+reveal_type(i8 + i) # E: signedinteger
+reveal_type(i8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(u8 + u8) # E: unsignedinteger
+reveal_type(u8 + i4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u8 + u4) # E: unsignedinteger
+reveal_type(u8 + b_) # E: uint64
+reveal_type(u8 + b) # E: unsignedinteger
+reveal_type(u8 + c) # E: complexfloating
+reveal_type(u8 + f) # E: floating
+reveal_type(u8 + i) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i8 + i8) # E: signedinteger
+reveal_type(u8 + i8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i4 + i8) # E: signedinteger
+reveal_type(u4 + i8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(b_ + i8) # E: int64
+reveal_type(b + i8) # E: signedinteger
+reveal_type(c + i8) # E: complexfloating
+reveal_type(f + i8) # E: floating
+reveal_type(i + i8) # E: signedinteger
+reveal_type(AR + i8) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(u8 + u8) # E: unsignedinteger
+reveal_type(i4 + u8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + u8) # E: unsignedinteger
+reveal_type(b_ + u8) # E: uint64
+reveal_type(b + u8) # E: unsignedinteger
+reveal_type(c + u8) # E: complexfloating
+reveal_type(f + u8) # E: floating
+reveal_type(i + u8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(AR + u8) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i4 + i8) # E: signedinteger
+reveal_type(i4 + i4) # E: signedinteger
+reveal_type(i4 + i) # E: signedinteger
+reveal_type(i4 + b_) # E: int32
+reveal_type(i4 + b) # E: signedinteger
+reveal_type(i4 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(u4 + i8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + i4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + u8) # E: unsignedinteger
+reveal_type(u4 + u4) # E: unsignedinteger
+reveal_type(u4 + i) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + b_) # E: uint32
+reveal_type(u4 + b) # E: unsignedinteger
+reveal_type(u4 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i8 + i4) # E: signedinteger
+reveal_type(i4 + i4) # E: signedinteger
+reveal_type(i + i4) # E: signedinteger
+reveal_type(b_ + i4) # E: int32
+reveal_type(b + i4) # E: signedinteger
+reveal_type(AR + i4) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i8 + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i4 + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u8 + u4) # E: unsignedinteger
+reveal_type(u4 + u4) # E: unsignedinteger
+reveal_type(b_ + u4) # E: uint32
+reveal_type(b + u4) # E: unsignedinteger
+reveal_type(i + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(AR + u4) # E: Union[numpy.ndarray, numpy.generic]
diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.py
new file mode 100644
index 000000000..ba8a8eda1
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/array_constructors.py
@@ -0,0 +1,42 @@
+from typing import List
+import numpy as np
+
+class SubClass(np.ndarray): ...
+
+A: np.ndarray
+B: SubClass
+C: List[int]
+
+reveal_type(np.asarray(A)) # E: ndarray
+reveal_type(np.asarray(B)) # E: ndarray
+reveal_type(np.asarray(C)) # E: ndarray
+
+reveal_type(np.asanyarray(A)) # E: ndarray
+reveal_type(np.asanyarray(B)) # E: SubClass
+reveal_type(np.asanyarray(B, dtype=int)) # E: ndarray
+reveal_type(np.asanyarray(C)) # E: ndarray
+
+reveal_type(np.ascontiguousarray(A)) # E: ndarray
+reveal_type(np.ascontiguousarray(B)) # E: ndarray
+reveal_type(np.ascontiguousarray(C)) # E: ndarray
+
+reveal_type(np.asfortranarray(A)) # E: ndarray
+reveal_type(np.asfortranarray(B)) # E: ndarray
+reveal_type(np.asfortranarray(C)) # E: ndarray
+
+reveal_type(np.require(A)) # E: ndarray
+reveal_type(np.require(B)) # E: SubClass
+reveal_type(np.require(B, requirements=None)) # E: SubClass
+reveal_type(np.require(B, dtype=int)) # E: ndarray
+reveal_type(np.require(B, requirements="E")) # E: ndarray
+reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray
+reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray
+reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass
+reveal_type(np.require(B, requirements="W")) # E: SubClass
+reveal_type(np.require(B, requirements="A")) # E: SubClass
+reveal_type(np.require(C)) # E: ndarray
+
+reveal_type(np.linspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, numpy.inexact]
+reveal_type(np.logspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/linspace.py b/numpy/typing/tests/data/reveal/linspace.py
deleted file mode 100644
index cfbbdf390..000000000
--- a/numpy/typing/tests/data/reveal/linspace.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import numpy as np
-
-reveal_type(np.linspace(0, 10)) # E: numpy.ndarray
-reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, numpy.inexact]
-reveal_type(np.logspace(0, 10)) # E: numpy.ndarray
-reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index 882fe9612..ec3713b0f 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -12,22 +12,5 @@ reveal_type(x.itemsize) # E: int
reveal_type(x.shape) # E: tuple[builtins.int]
reveal_type(x.strides) # E: tuple[builtins.int]
-# Time structures
-dt = np.datetime64(0, "D")
-td = np.timedelta64(0, "D")
-
-reveal_type(dt + td) # E: numpy.datetime64
-reveal_type(dt + 1) # E: numpy.datetime64
-reveal_type(dt - dt) # E: numpy.timedelta64
-reveal_type(dt - 1) # E: numpy.timedelta64
-
-reveal_type(td + td) # E: numpy.timedelta64
-reveal_type(td + 1) # E: numpy.timedelta64
-reveal_type(td - td) # E: numpy.timedelta64
-reveal_type(td - 1) # E: numpy.timedelta64
-reveal_type(td / 1.0) # E: numpy.timedelta64
-reveal_type(td / td) # E: float
-reveal_type(td % td) # E: numpy.timedelta64
-
reveal_type(np.complex64().real) # E: numpy.float32
reveal_type(np.complex128().imag) # E: numpy.float64
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index beb53ddec..cba1dc1be 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -36,6 +36,7 @@ def get_test_cases(directory):
)
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path):
@@ -50,6 +51,7 @@ def test_success(path):
assert re.match(r"Success: no issues found in \d+ source files?", stdout.strip())
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
@@ -99,6 +101,7 @@ def test_fail(path):
pytest.fail(f"Error {repr(errors[lineno])} not found")
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path):
@@ -130,6 +133,7 @@ def test_reveal(path):
assert marker in error_line
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_code_runs(path):