summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pyi125
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h2
-rw-r--r--numpy/core/overrides.py6
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src15
-rw-r--r--numpy/core/src/multiarray/array_method.c65
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c207
-rw-r--r--numpy/core/src/multiarray/datetime.c3
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c13
-rw-r--r--numpy/core/tests/test_api.py13
-rw-r--r--numpy/core/tests/test_deprecations.py35
-rw-r--r--numpy/core/tests/test_half.py6
-rw-r--r--numpy/core/tests/test_multiarray.py7
-rw-r--r--numpy/core/tests/test_numeric.py67
-rw-r--r--numpy/core/tests/test_regression.py4
-rw-r--r--numpy/distutils/intelccompiler.py2
-rw-r--r--numpy/lib/index_tricks.py6
-rw-r--r--numpy/lib/polynomial.py9
-rw-r--r--numpy/lib/tests/test_regression.py3
-rw-r--r--numpy/polynomial/_polybase.py10
-rw-r--r--numpy/polynomial/chebyshev.py9
-rw-r--r--numpy/polynomial/hermite.py9
-rw-r--r--numpy/polynomial/hermite_e.py9
-rw-r--r--numpy/polynomial/laguerre.py9
-rw-r--r--numpy/polynomial/legendre.py9
-rw-r--r--numpy/polynomial/polynomial.py9
-rw-r--r--numpy/typing/__init__.py14
-rw-r--r--numpy/typing/_array_like.py6
-rw-r--r--numpy/typing/_callable.py46
-rw-r--r--numpy/typing/_char_codes.py100
-rw-r--r--numpy/typing/_dtype_like.py7
-rw-r--r--numpy/typing/_extended_precision.py24
-rw-r--r--numpy/typing/_generic_alias.py17
-rw-r--r--numpy/typing/_shape.py4
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_conversion.py53
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py12
-rw-r--r--numpy/typing/tests/test_generic_alias.py10
36 files changed, 553 insertions, 392 deletions
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index ac37eb8ad..ec38d4943 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1226,20 +1226,9 @@ class _ArrayOrScalarCommon:
def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
- def astype(
- self: _ArraySelf,
- dtype: DTypeLike,
- order: _OrderKACF = ...,
- casting: _Casting = ...,
- subok: bool = ...,
- copy: bool = ...,
- ) -> _ArraySelf: ...
def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
def dump(self, file: str) -> None: ...
def dumps(self) -> bytes: ...
- def getfield(
- self: _ArraySelf, dtype: DTypeLike, offset: int = ...
- ) -> _ArraySelf: ...
def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
# NOTE: `tostring()` is deprecated and therefore excluded
# def tostring(self, order=...): ...
@@ -1248,14 +1237,6 @@ class _ArrayOrScalarCommon:
) -> None: ...
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
- @overload
- def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
- @overload
- def view(self: _ArraySelf, dtype: DTypeLike = ...) -> _ArraySelf: ...
- @overload
- def view(
- self, dtype: DTypeLike, type: Type[_NdArraySubClass]
- ) -> _NdArraySubClass: ...
# TODO: Add proper signatures
def __getitem__(self, key) -> Any: ...
@@ -1637,6 +1618,12 @@ _T_co = TypeVar("_T_co", covariant=True)
_2Tuple = Tuple[_T, _T]
_Casting = L["no", "equiv", "safe", "same_kind", "unsafe"]
+_DTypeLike = Union[
+ dtype[_ScalarType],
+ Type[_ScalarType],
+ _SupportsDType[dtype[_ScalarType]],
+]
+
_ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]]
_ArrayInt_co = NDArray[Union[bool_, integer[Any]]]
_ArrayFloat_co = NDArray[Union[bool_, integer[Any], floating[Any]]]
@@ -1874,6 +1861,53 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
self, *shape: SupportsIndex, order: _OrderACF = ...
) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> NDArray[_ScalarType]: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> NDArray[Any]: ...
+
+ @overload
+ def view(self: _ArraySelf) -> _ArraySelf: ...
+ @overload
+ def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
+ @overload
+ def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ...
+ @overload
+ def view(self, dtype: DTypeLike) -> NDArray[Any]: ...
+ @overload
+ def view(
+ self,
+ dtype: DTypeLike,
+ type: Type[_NdArraySubClass],
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def getfield(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ offset: SupportsIndex = ...
+ ) -> NDArray[_ScalarType]: ...
+ @overload
+ def getfield(
+ self,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...
+ ) -> NDArray[Any]: ...
+
# Dispatch to the underlying `generic` via protocols
def __int__(
self: ndarray[Any, dtype[SupportsInt]], # type: ignore[type-var]
@@ -2846,6 +2880,59 @@ class generic(_ArrayOrScalarCommon):
def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ...
@property
def flat(self: _ScalarType) -> flatiter[ndarray[Any, dtype[_ScalarType]]]: ...
+
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> Any: ...
+
+ # NOTE: `view` will perform a 0D->scalar cast,
+ # thus the array `type` is irrelevant to the output type
+ @overload
+ def view(
+ self: _ScalarType,
+ type: Type[ndarray[Any, Any]] = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def view(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ type: Type[ndarray[Any, Any]] = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def view(
+ self,
+ dtype: DTypeLike,
+ type: Type[ndarray[Any, Any]] = ...,
+ ) -> Any: ...
+
+ @overload
+ def getfield(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ offset: SupportsIndex = ...
+ ) -> _ScalarType: ...
+ @overload
+ def getfield(
+ self,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...
+ ) -> Any: ...
+
def item(
self,
__args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index d1acfdf26..3ea66b049 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1236,6 +1236,8 @@ struct PyArrayIterObject_tag {
_PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \
__npy_i++) { \
+ _PyAIT(it)->coordinates[__npy_i] = \
+ (__npy_ind / _PyAIT(it)->factors[__npy_i]); \
_PyAIT(it)->dataptr += \
(__npy_ind / _PyAIT(it)->factors[__npy_i]) \
* _PyAIT(it)->strides[__npy_i]; \
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index c2b5fb7fa..70085d896 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -18,11 +18,7 @@ array_function_like_doc = (
NumPy arrays. If an array-like passed in as ``like`` supports
the ``__array_function__`` protocol, the result will be defined
by it. In this case, it ensures the creation of an array object
- compatible with that passed in via this argument.
-
- .. note::
- The ``like`` keyword is an experimental feature pending on
- acceptance of :ref:`NEP 35 <NEP35>`."""
+ compatible with that passed in via this argument."""
)
def set_array_function_like_doc(public_api):
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index bfdeae079..79140bdb7 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -87,7 +87,7 @@ static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni
* For each point in itx, copy the current neighborhood into an array which
* is appended at the output list
*/
- for (i = 0; i < itx->size; ++i) {
+ for (i = itx->index; i < itx->size; ++i) {
PyArrayNeighborhoodIter_Reset(niterx);
for (j = 0; j < PyArray_NDIM(itx->ao); ++j) {
@@ -130,7 +130,7 @@ static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni
* For each point in itx, copy the current neighborhood into an array which
* is appended at the output list
*/
- for (i = 0; i < itx->size; ++i) {
+ for (i = itx->index; i < itx->size; ++i) {
PyArrayNeighborhoodIter_Reset(niterx);
for (j = 0; j < PyArray_NDIM(itx->ao); ++j) {
@@ -161,10 +161,11 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
PyArrayObject *ax, *afill;
PyArrayIterObject *itx;
int i, typenum, mode, st;
+ Py_ssize_t idxstart = 0;
npy_intp bounds[NPY_MAXDIMS*2];
PyArrayNeighborhoodIterObject *niterx;
- if (!PyArg_ParseTuple(args, "OOOi", &x, &b, &fill, &mode)) {
+ if (!PyArg_ParseTuple(args, "OOOi|n", &x, &b, &fill, &mode, &idxstart)) {
return NULL;
}
@@ -224,12 +225,20 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
}
}
+ if (idxstart >= itx->size) {
+ PyErr_SetString(PyExc_ValueError,
+ "start index not compatible with x input");
+ goto clean_itx;
+ }
+
niterx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(
(PyArrayIterObject*)itx, bounds, mode, afill);
if (niterx == NULL) {
goto clean_afill;
}
+ PyArray_ITER_GOTO1D((PyArrayIterObject*)itx, idxstart);
+
switch (typenum) {
case NPY_OBJECT:
st = copy_object(itx, niterx, bounds, &out);
diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c
index e13da12de..3ecc20d1d 100644
--- a/numpy/core/src/multiarray/array_method.c
+++ b/numpy/core/src/multiarray/array_method.c
@@ -210,10 +210,12 @@ validate_spec(PyArrayMethod_Spec *spec)
case NPY_UNSAFE_CASTING:
break;
default:
- PyErr_Format(PyExc_TypeError,
- "ArrayMethod has invalid casting `%d`. (method: %s)",
- spec->casting, spec->name);
- return -1;
+ if (spec->casting != -1) {
+ PyErr_Format(PyExc_TypeError,
+ "ArrayMethod has invalid casting `%d`. (method: %s)",
+ spec->casting, spec->name);
+ return -1;
+ }
}
for (int i = 0; i < nargs; i++) {
@@ -301,6 +303,13 @@ fill_arraymethod_from_slots(
/* Check whether the slots are valid: */
if (meth->resolve_descriptors == &default_resolve_descriptors) {
+ if (spec->casting == -1) {
+ PyErr_Format(PyExc_TypeError,
+ "Cannot set casting to -1 (invalid) when not providing "
+ "the default `resolve_descriptors` function. "
+ "(method: %s)", spec->name);
+ return -1;
+ }
for (int i = 0; i < meth->nin + meth->nout; i++) {
if (res->dtypes[i] == NULL) {
if (i < meth->nin) {
@@ -573,6 +582,8 @@ boundarraymethod__resolve_descripors(
/*
* The casting flags should be the most generic casting level (except the
* cast-is-view flag. If no input is parametric, it must match exactly.
+ *
+ * (Note that these checks are only debugging checks.)
*/
int parametric = 0;
for (int i = 0; i < nin + nout; i++) {
@@ -581,34 +592,34 @@ boundarraymethod__resolve_descripors(
break;
}
}
- if (!parametric) {
- /*
- * Non-parametric can only mismatch if it switches from no to equiv
- * (e.g. due to byteorder changes).
- */
- if (self->method->casting != (casting & ~_NPY_CAST_IS_VIEW) &&
- !(self->method->casting == NPY_NO_CASTING &&
- casting == NPY_EQUIV_CASTING)) {
- PyErr_Format(PyExc_RuntimeError,
- "resolve_descriptors cast level did not match stored one "
- "(expected %d, got %d) for method %s",
- self->method->casting, (casting & ~_NPY_CAST_IS_VIEW),
- self->method->name);
- Py_DECREF(result_tuple);
- return NULL;
- }
- }
- else {
+ if (self->method->casting != -1) {
NPY_CASTING cast = casting & ~_NPY_CAST_IS_VIEW;
- if (cast != PyArray_MinCastSafety(cast, self->method->casting)) {
+ if (self->method->casting !=
+ PyArray_MinCastSafety(cast, self->method->casting)) {
PyErr_Format(PyExc_RuntimeError,
- "resolve_descriptors cast level did not match stored one "
- "(expected %d, got %d) for method %s",
- self->method->casting, (casting & ~_NPY_CAST_IS_VIEW),
- self->method->name);
+ "resolve_descriptors cast level did not match stored one. "
+ "(set level is %d, got %d for method %s)",
+ self->method->casting, cast, self->method->name);
Py_DECREF(result_tuple);
return NULL;
}
+ if (!parametric) {
+ /*
+ * Non-parametric can only mismatch if it switches from equiv to no
+ * (e.g. due to byteorder changes).
+ */
+ if (cast != self->method->casting &&
+ self->method->casting != NPY_EQUIV_CASTING) {
+ PyErr_Format(PyExc_RuntimeError,
+ "resolve_descriptors cast level changed even though "
+ "the cast is non-parametric where the only possible "
+ "change should be from equivalent to no casting. "
+ "(set level is %d, got %d for method %s)",
+ self->method->casting, cast, self->method->name);
+ Py_DECREF(result_tuple);
+ return NULL;
+ }
+ }
}
return Py_BuildValue("iN", casting, result_tuple);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 01ee56d16..1a962ef78 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -358,6 +358,45 @@ PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp)
}
+static NPY_CASTING
+_get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl,
+ PyArray_DTypeMeta *dtypes[2], PyArray_Descr *from, PyArray_Descr *to)
+{
+ PyArray_Descr *descrs[2] = {from, to};
+ PyArray_Descr *out_descrs[2];
+
+ NPY_CASTING casting = castingimpl->resolve_descriptors(
+ castingimpl, dtypes, descrs, out_descrs);
+ if (casting < 0) {
+ return -1;
+ }
+ /* The returned descriptors may not match, requiring a second check */
+ if (out_descrs[0] != descrs[0]) {
+ NPY_CASTING from_casting = PyArray_GetCastSafety(
+ descrs[0], out_descrs[0], NULL);
+ casting = PyArray_MinCastSafety(casting, from_casting);
+ if (casting < 0) {
+ goto finish;
+ }
+ }
+ if (descrs[1] != NULL && out_descrs[1] != descrs[1]) {
+ NPY_CASTING from_casting = PyArray_GetCastSafety(
+ descrs[1], out_descrs[1], NULL);
+ casting = PyArray_MinCastSafety(casting, from_casting);
+ if (casting < 0) {
+ goto finish;
+ }
+ }
+
+ finish:
+ Py_DECREF(out_descrs[0]);
+ Py_DECREF(out_descrs[1]);
+ /* NPY_NO_CASTING has to be used for (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW) */
+ assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW));
+ return casting;
+}
+
+
/**
* Given two dtype instances, find the correct casting safety.
*
@@ -375,7 +414,6 @@ NPY_NO_EXPORT NPY_CASTING
PyArray_GetCastSafety(
PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype)
{
- NPY_CASTING casting;
if (to != NULL) {
to_dtype = NPY_DTYPE(to);
}
@@ -389,41 +427,67 @@ PyArray_GetCastSafety(
}
PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth;
-
PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype};
- PyArray_Descr *descrs[2] = {from, to};
- PyArray_Descr *out_descrs[2];
-
- casting = castingimpl->resolve_descriptors(
- castingimpl, dtypes, descrs, out_descrs);
+ NPY_CASTING casting = _get_cast_safety_from_castingimpl(castingimpl,
+ dtypes, from, to);
Py_DECREF(meth);
- if (casting < 0) {
+
+ return casting;
+}
+
+
+/**
+ * Check whether a cast is safe, see also `PyArray_GetCastSafety` for
+ * a similiar function. Unlike GetCastSafety, this function checks the
+ * `castingimpl->casting` when available. This allows for two things:
+ *
+ * 1. It avoids calling `resolve_descriptors` in some cases.
+ * 2. Strings need to discover the length, but in some cases we know that the
+ * cast is valid (assuming the string length is discovered first).
+ *
+ * The latter means that a `can_cast` could return True, but the cast fail
+ * because the parametric type cannot guess the correct output descriptor.
+ * (I.e. if `object_arr.astype("S")` did _not_ inspect the objects, and the
+ * user would have to guess the string length.)
+ *
+ * @param casting the requested casting safety.
+ * @param from
+ * @param to The descriptor to cast to (may be NULL)
+ * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this
+ * is ignored).
+ * @return 0 for an invalid cast, 1 for a valid and -1 for an error.
+ */
+static int
+PyArray_CheckCastSafety(NPY_CASTING casting,
+ PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype)
+{
+ if (to != NULL) {
+ to_dtype = NPY_DTYPE(to);
+ }
+ PyObject *meth = PyArray_GetCastingImpl(NPY_DTYPE(from), to_dtype);
+ if (meth == NULL) {
return -1;
}
- /* The returned descriptors may not match, requiring a second check */
- if (out_descrs[0] != descrs[0]) {
- NPY_CASTING from_casting = PyArray_GetCastSafety(
- descrs[0], out_descrs[0], NULL);
- casting = PyArray_MinCastSafety(casting, from_casting);
- if (casting < 0) {
- goto finish;
- }
+ if (meth == Py_None) {
+ Py_DECREF(Py_None);
+ return -1;
}
- if (descrs[1] != NULL && out_descrs[1] != descrs[1]) {
- NPY_CASTING from_casting = PyArray_GetCastSafety(
- descrs[1], out_descrs[1], NULL);
- casting = PyArray_MinCastSafety(casting, from_casting);
- if (casting < 0) {
- goto finish;
- }
+ PyArrayMethodObject *castingimpl = (PyArrayMethodObject *)meth;
+
+ if (PyArray_MinCastSafety(castingimpl->casting, casting) == casting) {
+ /* No need to check using `castingimpl.resolve_descriptors()` */
+ return 1;
}
- finish:
- Py_DECREF(out_descrs[0]);
- Py_DECREF(out_descrs[1]);
- /* NPY_NO_CASTING has to be used for (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW) */
- assert(casting != (NPY_EQUIV_CASTING|_NPY_CAST_IS_VIEW));
- return casting;
+ PyArray_DTypeMeta *dtypes[2] = {NPY_DTYPE(from), to_dtype};
+ NPY_CASTING safety = _get_cast_safety_from_castingimpl(castingimpl,
+ dtypes, from, to);
+ Py_DECREF(meth);
+ /* If casting is the smaller (or equal) safety we match */
+ if (safety < 0) {
+ return -1;
+ }
+ return PyArray_MinCastSafety(safety, casting) == casting;
}
@@ -565,6 +629,8 @@ NPY_NO_EXPORT npy_bool
PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
NPY_CASTING casting)
{
+ PyArray_DTypeMeta *to_dtype = NPY_DTYPE(to);
+
/*
* NOTE: This code supports U and S, this is identical to the code
* in `ctors.c` which does not allow these dtypes to be attached
@@ -576,21 +642,21 @@ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
* TODO: We should grow support for `np.can_cast("d", "S")` being
* different from `np.can_cast("d", "S0")` here, at least for
* the python side API.
+ * The `to = NULL` branch, which considers "S0" to be "flexible"
+ * should probably be deprecated.
+ * (This logic is duplicated in `PyArray_CanCastArrayTo`)
*/
- NPY_CASTING safety;
if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) {
- safety = PyArray_GetCastSafety(from, NULL, NPY_DTYPE(to));
- }
- else {
- safety = PyArray_GetCastSafety(from, to, NPY_DTYPE(to));
+ to = NULL; /* consider mainly S0 and U0 as S and U */
}
- if (safety < 0) {
+ int is_valid = PyArray_CheckCastSafety(casting, from, to, to_dtype);
+ /* Clear any errors and consider this unsafe (should likely be changed) */
+ if (is_valid < 0) {
PyErr_Clear();
return 0;
}
- /* If casting is the smaller (or equal) safety we match */
- return PyArray_MinCastSafety(safety, casting) == casting;
+ return is_valid;
}
@@ -610,28 +676,22 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
/*
* If the two dtypes are actually references to the same object
* or if casting type is forced unsafe then always OK.
+ *
+ * TODO: Assuming that unsafe casting always works is not actually correct
*/
if (scal_type == to || casting == NPY_UNSAFE_CASTING ) {
return 1;
}
- /* NOTE: This is roughly the same code as `PyArray_CanCastTypeTo`: */
- NPY_CASTING safety;
- if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) {
- safety = PyArray_GetCastSafety(scal_type, NULL, NPY_DTYPE(to));
- }
- else {
- safety = PyArray_GetCastSafety(scal_type, to, NPY_DTYPE(to));
- }
- if (safety < 0) {
- PyErr_Clear();
- return 0;
- }
- safety = PyArray_MinCastSafety(safety, casting);
- if (safety == casting) {
+ int valid = PyArray_CheckCastSafety(casting, scal_type, to, NPY_DTYPE(to));
+ if (valid == 1) {
/* This is definitely a valid cast. */
return 1;
}
+ if (valid < 0) {
+ /* Probably must return 0, but just keep trying for now. */
+ PyErr_Clear();
+ }
/*
* If the scalar isn't a number, value-based casting cannot kick in and
@@ -692,14 +752,29 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to,
NPY_CASTING casting)
{
PyArray_Descr *from = PyArray_DESCR(arr);
+ PyArray_DTypeMeta *to_dtype = NPY_DTYPE(to);
- /* If it's a scalar, check the value */
- if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr)) {
+ /* NOTE, TODO: The same logic as `PyArray_CanCastTypeTo`: */
+ if (PyDataType_ISUNSIZED(to) && to->subarray == NULL) {
+ to = NULL;
+ }
+
+ /*
+ * If it's a scalar, check the value. (This only currently matters for
+ * numeric types and for `to == NULL` it can't be numeric.)
+ */
+ if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) {
return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting);
}
- /* Otherwise, use the standard rules */
- return PyArray_CanCastTypeTo(from, to, casting);
+ /* Otherwise, use the standard rules (same as `PyArray_CanCastTypeTo`) */
+ int is_valid = PyArray_CheckCastSafety(casting, from, to, to_dtype);
+ /* Clear any errors and consider this unsafe (should likely be changed) */
+ if (is_valid < 0) {
+ PyErr_Clear();
+ return 0;
+ }
+ return is_valid;
}
@@ -2122,13 +2197,6 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth)
meth->method->name);
return -1;
}
- if ((meth->method->casting & ~_NPY_CAST_IS_VIEW) != NPY_NO_CASTING) {
- PyErr_Format(PyExc_TypeError,
- "A cast where input and output DType (class) are identical "
- "must signal `no-casting`. (method: %s)",
- meth->method->name);
- return -1;
- }
if (meth->dtypes[0]->within_dtype_castingimpl != NULL) {
PyErr_Format(PyExc_RuntimeError,
"A cast was already added for %S -> %S. (method: %s)",
@@ -2400,7 +2468,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to)
/* Find the correct casting level, and special case no-cast */
if (dtypes[0]->kind == dtypes[1]->kind && from_itemsize == to_itemsize) {
- spec.casting = NPY_NO_CASTING;
+ spec.casting = NPY_EQUIV_CASTING;
/* When there is no casting (equivalent C-types) use byteswap loops */
slots[0].slot = NPY_METH_resolve_descriptors;
@@ -2558,7 +2626,6 @@ cast_to_string_resolve_descriptors(
dtypes[1]->type_num == NPY_STRING);
return NPY_UNSAFE_CASTING;
}
- assert(self->casting == NPY_SAFE_CASTING);
if (loop_descrs[1]->elsize >= size) {
return NPY_SAFE_CASTING;
@@ -2600,9 +2667,9 @@ add_other_to_and_from_string_cast(
.dtypes = dtypes,
.slots = slots,
};
- /* Almost everything can be safely cast to string (except unicode) */
+ /* Almost everything can be same-kind cast to string (except unicode) */
if (other->type_num != NPY_UNICODE) {
- spec.casting = NPY_SAFE_CASTING;
+ spec.casting = NPY_SAME_KIND_CASTING; /* same-kind if too short */
}
else {
spec.casting = NPY_UNSAFE_CASTING;
@@ -2722,7 +2789,7 @@ PyArray_InitializeStringCasts(void)
{0, NULL}};
PyArrayMethod_Spec spec = {
.name = "string_to_string_cast",
- .casting = NPY_NO_CASTING,
+ .casting = NPY_UNSAFE_CASTING,
.nin = 1,
.nout = 1,
.flags = (NPY_METH_REQUIRES_PYAPI |
@@ -2935,7 +3002,7 @@ PyArray_GetGenericToVoidCastingImpl(void)
method->name = "any_to_void_cast";
method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI;
- method->casting = NPY_SAFE_CASTING;
+ method->casting = -1;
method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors;
method->get_strided_loop = &nonstructured_to_structured_get_loop;
@@ -3074,7 +3141,7 @@ PyArray_GetVoidToGenericCastingImpl(void)
method->name = "void_to_any_cast";
method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI;
- method->casting = NPY_UNSAFE_CASTING;
+ method->casting = -1;
method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors;
method->get_strided_loop = &structured_to_nonstructured_get_loop;
@@ -3306,7 +3373,7 @@ PyArray_InitializeVoidToVoidCast(void)
{0, NULL}};
PyArrayMethod_Spec spec = {
.name = "void_to_void_cast",
- .casting = NPY_NO_CASTING,
+ .casting = -1, /* may not cast at all */
.nin = 1,
.nout = 1,
.flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_SUPPORTS_UNALIGNED,
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index fdf4c0839..48dc15340 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -3952,7 +3952,6 @@ time_to_string_resolve_descriptors(
return -1;
}
- assert(self->casting == NPY_UNSAFE_CASTING);
return NPY_UNSAFE_CASTING;
}
@@ -4059,7 +4058,7 @@ PyArray_InitializeDatetimeCasts()
.name = "datetime_casts",
.nin = 1,
.nout = 1,
- .casting = NPY_NO_CASTING,
+ .casting = NPY_UNSAFE_CASTING,
.flags = NPY_METH_SUPPORTS_UNALIGNED,
.slots = slots,
.dtypes = dtypes,
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 40ca9ee2a..4ee721964 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -415,19 +415,6 @@ string_unicode_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
Py_INCREF(Py_NotImplemented);
return (PyArray_DTypeMeta *)Py_NotImplemented;
}
- if (other->type_num != NPY_STRING && other->type_num != NPY_UNICODE) {
- /* Deprecated 2020-12-19, NumPy 1.21. */
- if (DEPRECATE_FUTUREWARNING(
- "Promotion of numbers and bools to strings is deprecated. "
- "In the future, code such as `np.concatenate((['string'], [0]))` "
- "will raise an error, while `np.asarray(['string', 0])` will "
- "return an array with `dtype=object`. To avoid the warning "
- "while retaining a string result use `dtype='U'` (or 'S'). "
- "To get an array of Python objects use `dtype=object`. "
- "(Warning added in NumPy 1.21)") < 0) {
- return NULL;
- }
- }
/*
* The builtin types are ordered by complexity (aside from object) here.
* Arguably, we should not consider numbers and strings "common", but
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 9e99e0bc3..291cdae89 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -281,6 +281,19 @@ def test_array_astype():
a = np.array(1000, dtype='i4')
assert_raises(TypeError, a.astype, 'U1', casting='safe')
+@pytest.mark.parametrize("dt", ["S", "U"])
+def test_array_astype_to_string_discovery_empty(dt):
+ # See also gh-19085
+ arr = np.array([""], dtype=object)
+ # Note, the itemsize is the `0 -> 1` logic, which should change.
+ # The important part the test is rather that it does not error.
+ assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
+
+ # check the same thing for `np.can_cast` (since it accepts arrays)
+ assert np.can_cast(arr, dt, casting="unsafe")
+ assert not np.can_cast(arr, dt, casting="same_kind")
+ # as well as for the object as a descriptor:
+ assert np.can_cast("O", dt, casting="unsafe")
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
def test_array_astype_to_void(dt):
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index ffe0147b2..42e632e4a 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -1105,41 +1105,6 @@ class TestNoseDecoratorsDeprecated(_DeprecationTestCase):
self.assert_deprecated(_test_parametrize)
-class TestStringPromotion(_DeprecationTestCase):
- # Deprecated 2020-12-19, NumPy 1.21
- warning_cls = FutureWarning
- message = "Promotion of numbers and bools to strings is deprecated."
-
- @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG")
- @pytest.mark.parametrize("string_dt", ["S", "U"])
- def test_deprecated(self, dtype, string_dt):
- self.assert_deprecated(lambda: np.promote_types(dtype, string_dt))
-
- # concatenate has to be able to promote to find the result dtype:
- arr1 = np.ones(3, dtype=dtype)
- arr2 = np.ones(3, dtype=string_dt)
- self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=0))
- self.assert_deprecated(lambda: np.concatenate((arr1, arr2), axis=None))
-
- self.assert_deprecated(lambda: np.array([arr1[0], arr2[0]]))
-
- @pytest.mark.parametrize("dtype", "?bhilqpBHILQPefdgFDG")
- @pytest.mark.parametrize("string_dt", ["S", "U"])
- def test_not_deprecated(self, dtype, string_dt):
- # The ufunc type resolvers run into this, but giving a futurewarning
- # here is unnecessary (it ends up as an error anyway), so test that
- # no warning is given:
- arr1 = np.ones(3, dtype=dtype)
- arr2 = np.ones(3, dtype=string_dt)
-
- # Adding two arrays uses result_type normally, which would fail:
- with pytest.raises(TypeError):
- self.assert_not_deprecated(lambda: arr1 + arr2)
- # np.equal uses a different type resolver:
- with pytest.raises(TypeError):
- self.assert_not_deprecated(lambda: np.equal(arr1, arr2))
-
-
class TestSingleElementSignature(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21
message = r"The use of a length 1"
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 449a01d21..1b6fd21e1 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -71,10 +71,8 @@ class TestHalf:
def test_half_conversion_to_string(self, string_dt):
# Currently uses S/U32 (which is sufficient for float32)
expected_dt = np.dtype(f"{string_dt}32")
- with pytest.warns(FutureWarning):
- assert np.promote_types(np.float16, string_dt) == expected_dt
- with pytest.warns(FutureWarning):
- assert np.promote_types(string_dt, np.float16) == expected_dt
+ assert np.promote_types(np.float16, string_dt) == expected_dt
+ assert np.promote_types(string_dt, np.float16) == expected_dt
arr = np.ones(3, dtype=np.float16).astype(string_dt)
assert arr.dtype == expected_dt
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index d567653f5..25dd76256 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -6947,6 +6947,13 @@ class TestNeighborhoodIter:
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
+ # Test with start in the middle
+ r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+ np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2)
+ assert_array_equal(l, r)
+
def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index aba90ece5..f5113150e 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -848,12 +848,10 @@ class TestTypes:
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
- with pytest.warns(FutureWarning,
- match="Promotion of numbers and bools to strings"):
- assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
- assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
- assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
- assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
+ assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
+ assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
+ assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
+ assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
@@ -901,37 +899,32 @@ class TestTypes:
S = string_dtype
- with pytest.warns(FutureWarning,
- match="Promotion of numbers and bools to strings") as record:
- # Promote numeric with unsized string:
- assert_equal(promote_types('bool', S), np.dtype(S+'5'))
- assert_equal(promote_types('b', S), np.dtype(S+'4'))
- assert_equal(promote_types('u1', S), np.dtype(S+'3'))
- assert_equal(promote_types('u2', S), np.dtype(S+'5'))
- assert_equal(promote_types('u4', S), np.dtype(S+'10'))
- assert_equal(promote_types('u8', S), np.dtype(S+'20'))
- assert_equal(promote_types('i1', S), np.dtype(S+'4'))
- assert_equal(promote_types('i2', S), np.dtype(S+'6'))
- assert_equal(promote_types('i4', S), np.dtype(S+'11'))
- assert_equal(promote_types('i8', S), np.dtype(S+'21'))
- # Promote numeric with sized string:
- assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
- assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
- assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
- assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
- assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
- assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
- assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
- assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
- # Promote with object:
- assert_equal(promote_types('O', S+'30'), np.dtype('O'))
-
- assert len(record) == 22 # each string promotion gave one warning
-
+ # Promote numeric with unsized string:
+ assert_equal(promote_types('bool', S), np.dtype(S+'5'))
+ assert_equal(promote_types('b', S), np.dtype(S+'4'))
+ assert_equal(promote_types('u1', S), np.dtype(S+'3'))
+ assert_equal(promote_types('u2', S), np.dtype(S+'5'))
+ assert_equal(promote_types('u4', S), np.dtype(S+'10'))
+ assert_equal(promote_types('u8', S), np.dtype(S+'20'))
+ assert_equal(promote_types('i1', S), np.dtype(S+'4'))
+ assert_equal(promote_types('i2', S), np.dtype(S+'6'))
+ assert_equal(promote_types('i4', S), np.dtype(S+'11'))
+ assert_equal(promote_types('i8', S), np.dtype(S+'21'))
+ # Promote numeric with sized string:
+ assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
+ assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
+ assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
+ assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
+ assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
+ assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
+ assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
+ # Promote with object:
+ assert_equal(promote_types('O', S+'30'), np.dtype('O'))
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V6"), np.dtype("V10")],
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index dbfb75c9a..312d0683d 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -782,9 +782,7 @@ class TestRegression:
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
- with pytest.warns(FutureWarning,
- match="Promotion of numbers and bools to strings"):
- np.hstack((t, s))
+ np.hstack((t, s))
def test_arr_transpose(self):
# Ticket #516
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 0388ad577..0fa1c11dd 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -58,7 +58,7 @@ class IntelEM64TCCompiler(UnixCCompiler):
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
- self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '
+ self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 72d8e9de4..5140ffa61 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -631,7 +631,8 @@ class ndindex:
Examples
--------
- # dimensions as individual arguments
+ Dimensions as individual arguments
+
>>> for index in np.ndindex(3, 2, 1):
... print(index)
(0, 0, 0)
@@ -641,7 +642,8 @@ class ndindex:
(2, 0, 0)
(2, 1, 0)
- # same dimensions - but in a tuple (3, 2, 1)
+ Same dimensions - but in a tuple ``(3, 2, 1)``
+
>>> for index in np.ndindex((3, 2, 1)):
... print(index)
(0, 0, 0)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 56fcce621..23021cafa 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -489,8 +489,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
- Weights to apply to the y-coordinates of the sample points. For
- gaussian uncertainties, use 1/sigma (not 1/sigma**2).
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
cov : bool or str, optional
If given and not `False`, return not just the estimate but also its
covariance matrix. By default, the covariance are scaled by
@@ -498,7 +501,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
to be unreliable except in a relative sense and everything is scaled
such that the reduced chi2 is unity. This scaling is omitted if
``cov='unscaled'``, as is relevant for the case that the weights are
- 1/sigma**2, with sigma known to be a reliable estimate of the
+ w = 1/sigma, with sigma known to be a reliable estimate of the
uncertainty.
Returns
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 94fac7ef0..373226277 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -64,8 +64,7 @@ class TestRegression:
def test_mem_string_concat(self):
# Ticket #469
x = np.array([])
- with pytest.warns(FutureWarning):
- np.append(x, 'asdasd\tasdasd')
+ np.append(x, 'asdasd\tasdasd')
def test_poly_div(self):
# Ticket #553
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index b04b8e66b..5525b232b 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -936,11 +936,11 @@ class ABCPolyBase(abc.ABC):
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
- Weights. If not None the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products
- ``w[i]*y[i]`` all have the same variance. The default value is
- None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have
+ the same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index d24fc738f..210000ec4 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -1582,10 +1582,11 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index eef5c25b2..c1b9f71c0 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -1310,10 +1310,11 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
Returns
-------
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 05d1337b0..b7095c910 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -1301,10 +1301,11 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
Returns
-------
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 69d557510..d3b6432dc 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -1307,10 +1307,11 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
Returns
-------
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index cd4da2a79..d4cf4accf 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -1321,10 +1321,11 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 940eed5e3..d8a032068 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -1252,10 +1252,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None):
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
- Weights. If not None, the contribution of each point
- ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the
- weights are chosen so that the errors of the products ``w[i]*y[i]``
- all have the same variance. The default value is None.
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
.. versionadded:: 1.5.0
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 252123a19..19424169a 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -164,7 +164,7 @@ API
# NOTE: The API section will be appended with additional entries
# further down in this file
-from typing import TYPE_CHECKING, List
+from typing import TYPE_CHECKING, List, Any
if TYPE_CHECKING:
# typing_extensions is always available when type-checking
@@ -376,14 +376,14 @@ if TYPE_CHECKING:
_GUFunc_Nin2_Nout1,
)
else:
- _UFunc_Nin1_Nout1 = NotImplemented
- _UFunc_Nin2_Nout1 = NotImplemented
- _UFunc_Nin1_Nout2 = NotImplemented
- _UFunc_Nin2_Nout2 = NotImplemented
- _GUFunc_Nin2_Nout1 = NotImplemented
+ _UFunc_Nin1_Nout1 = Any
+ _UFunc_Nin2_Nout1 = Any
+ _UFunc_Nin1_Nout2 = Any
+ _UFunc_Nin2_Nout2 = Any
+ _GUFunc_Nin2_Nout1 = Any
# Clean up the namespace
-del TYPE_CHECKING, final, List
+del TYPE_CHECKING, final, List, Any
if __doc__ is not None:
from ._add_docstring import _docstrings
diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py
index 2b823ecc0..3bdbed8f8 100644
--- a/numpy/typing/_array_like.py
+++ b/numpy/typing/_array_like.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import sys
-from typing import Any, Sequence, TYPE_CHECKING, Union, TypeVar
+from typing import Any, Sequence, TYPE_CHECKING, Union, TypeVar, Generic
from numpy import (
ndarray,
@@ -34,7 +34,7 @@ _ScalarType = TypeVar("_ScalarType", bound=generic)
_DType = TypeVar("_DType", bound="dtype[Any]")
_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]")
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
# The `_SupportsArray` protocol only cares about the default dtype
# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
# array.
@@ -43,7 +43,7 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
class _SupportsArray(Protocol[_DType_co]):
def __array__(self) -> ndarray[Any, _DType_co]: ...
else:
- _SupportsArray = Any
+ class _SupportsArray(Generic[_DType_co]): ...
# TODO: Wait for support for recursive types
_NestedSequence = Union[
diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py
index 54f9b1425..8f911da3b 100644
--- a/numpy/typing/_callable.py
+++ b/numpy/typing/_callable.py
@@ -53,7 +53,7 @@ if sys.version_info >= (3, 8):
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import Protocol
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_2Tuple = Tuple[_T1, _T1]
@@ -332,25 +332,25 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
def __call__(self, __other: _T2) -> NDArray[bool_]: ...
else:
- _BoolOp = NotImplemented
- _BoolBitOp = NotImplemented
- _BoolSub = NotImplemented
- _BoolTrueDiv = NotImplemented
- _BoolMod = NotImplemented
- _BoolDivMod = NotImplemented
- _TD64Div = NotImplemented
- _IntTrueDiv = NotImplemented
- _UnsignedIntOp = NotImplemented
- _UnsignedIntBitOp = NotImplemented
- _UnsignedIntMod = NotImplemented
- _UnsignedIntDivMod = NotImplemented
- _SignedIntOp = NotImplemented
- _SignedIntBitOp = NotImplemented
- _SignedIntMod = NotImplemented
- _SignedIntDivMod = NotImplemented
- _FloatOp = NotImplemented
- _FloatMod = NotImplemented
- _FloatDivMod = NotImplemented
- _ComplexOp = NotImplemented
- _NumberOp = NotImplemented
- _ComparisonOp = NotImplemented
+ _BoolOp = Any
+ _BoolBitOp = Any
+ _BoolSub = Any
+ _BoolTrueDiv = Any
+ _BoolMod = Any
+ _BoolDivMod = Any
+ _TD64Div = Any
+ _IntTrueDiv = Any
+ _UnsignedIntOp = Any
+ _UnsignedIntBitOp = Any
+ _UnsignedIntMod = Any
+ _UnsignedIntDivMod = Any
+ _SignedIntOp = Any
+ _SignedIntBitOp = Any
+ _SignedIntMod = Any
+ _SignedIntDivMod = Any
+ _FloatOp = Any
+ _FloatMod = Any
+ _FloatDivMod = Any
+ _ComplexOp = Any
+ _NumberOp = Any
+ _ComparisonOp = Any
diff --git a/numpy/typing/_char_codes.py b/numpy/typing/_char_codes.py
index 6b33f995d..22ee168e9 100644
--- a/numpy/typing/_char_codes.py
+++ b/numpy/typing/_char_codes.py
@@ -8,7 +8,7 @@ if sys.version_info >= (3, 8):
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import Literal
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
_BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
_UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
@@ -120,52 +120,52 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
]
else:
- _BoolCodes = NotImplemented
-
- _UInt8Codes = NotImplemented
- _UInt16Codes = NotImplemented
- _UInt32Codes = NotImplemented
- _UInt64Codes = NotImplemented
-
- _Int8Codes = NotImplemented
- _Int16Codes = NotImplemented
- _Int32Codes = NotImplemented
- _Int64Codes = NotImplemented
-
- _Float16Codes = NotImplemented
- _Float32Codes = NotImplemented
- _Float64Codes = NotImplemented
-
- _Complex64Codes = NotImplemented
- _Complex128Codes = NotImplemented
-
- _ByteCodes = NotImplemented
- _ShortCodes = NotImplemented
- _IntCCodes = NotImplemented
- _IntPCodes = NotImplemented
- _IntCodes = NotImplemented
- _LongLongCodes = NotImplemented
-
- _UByteCodes = NotImplemented
- _UShortCodes = NotImplemented
- _UIntCCodes = NotImplemented
- _UIntPCodes = NotImplemented
- _UIntCodes = NotImplemented
- _ULongLongCodes = NotImplemented
-
- _HalfCodes = NotImplemented
- _SingleCodes = NotImplemented
- _DoubleCodes = NotImplemented
- _LongDoubleCodes = NotImplemented
-
- _CSingleCodes = NotImplemented
- _CDoubleCodes = NotImplemented
- _CLongDoubleCodes = NotImplemented
-
- _StrCodes = NotImplemented
- _BytesCodes = NotImplemented
- _VoidCodes = NotImplemented
- _ObjectCodes = NotImplemented
-
- _DT64Codes = NotImplemented
- _TD64Codes = NotImplemented
+ _BoolCodes = Any
+
+ _UInt8Codes = Any
+ _UInt16Codes = Any
+ _UInt32Codes = Any
+ _UInt64Codes = Any
+
+ _Int8Codes = Any
+ _Int16Codes = Any
+ _Int32Codes = Any
+ _Int64Codes = Any
+
+ _Float16Codes = Any
+ _Float32Codes = Any
+ _Float64Codes = Any
+
+ _Complex64Codes = Any
+ _Complex128Codes = Any
+
+ _ByteCodes = Any
+ _ShortCodes = Any
+ _IntCCodes = Any
+ _IntPCodes = Any
+ _IntCodes = Any
+ _LongLongCodes = Any
+
+ _UByteCodes = Any
+ _UShortCodes = Any
+ _UIntCCodes = Any
+ _UIntPCodes = Any
+ _UIntCodes = Any
+ _ULongLongCodes = Any
+
+ _HalfCodes = Any
+ _SingleCodes = Any
+ _DoubleCodes = Any
+ _LongDoubleCodes = Any
+
+ _CSingleCodes = Any
+ _CDoubleCodes = Any
+ _CLongDoubleCodes = Any
+
+ _StrCodes = Any
+ _BytesCodes = Any
+ _VoidCodes = Any
+ _ObjectCodes = Any
+
+ _DT64Codes = Any
+ _TD64Codes = Any
diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py
index 405cc4a3c..b2ce3adb4 100644
--- a/numpy/typing/_dtype_like.py
+++ b/numpy/typing/_dtype_like.py
@@ -11,9 +11,6 @@ if sys.version_info >= (3, 8):
from typing import Protocol, TypedDict
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import Protocol, TypedDict
-
-if sys.version_info >= (3, 9):
- from types import GenericAlias
else:
from ._generic_alias import _GenericAlias as GenericAlias
@@ -62,7 +59,7 @@ from ._char_codes import (
_DTypeLikeNested = Any # TODO: wait for support for recursive types
_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
+if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
# Mandatory keys
class _DTypeDictBase(TypedDict):
names: Sequence[str]
@@ -81,7 +78,7 @@ if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS:
def dtype(self) -> _DType_co: ...
else:
- _DTypeDict = NotImplemented
+ _DTypeDict = Any
class _SupportsDType: ...
_SupportsDType = GenericAlias(_SupportsDType, _DType_co)
diff --git a/numpy/typing/_extended_precision.py b/numpy/typing/_extended_precision.py
index 3f1ce2038..0900bc659 100644
--- a/numpy/typing/_extended_precision.py
+++ b/numpy/typing/_extended_precision.py
@@ -28,15 +28,15 @@ if TYPE_CHECKING:
complex256 = np.complexfloating[_128Bit, _128Bit]
complex512 = np.complexfloating[_256Bit, _256Bit]
else:
- uint128 = NotImplemented
- uint256 = NotImplemented
- int128 = NotImplemented
- int256 = NotImplemented
- float80 = NotImplemented
- float96 = NotImplemented
- float128 = NotImplemented
- float256 = NotImplemented
- complex160 = NotImplemented
- complex192 = NotImplemented
- complex256 = NotImplemented
- complex512 = NotImplemented
+ uint128 = Any
+ uint256 = Any
+ int128 = Any
+ int256 = Any
+ float80 = Any
+ float96 = Any
+ float128 = Any
+ float256 = Any
+ complex160 = Any
+ complex192 = Any
+ complex256 = Any
+ complex512 = Any
diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py
index 0d30f54ca..8d65ef855 100644
--- a/numpy/typing/_generic_alias.py
+++ b/numpy/typing/_generic_alias.py
@@ -93,7 +93,7 @@ class _GenericAlias:
return super().__getattribute__("_origin")
@property
- def __args__(self) -> Tuple[Any, ...]:
+ def __args__(self) -> Tuple[object, ...]:
return super().__getattribute__("_args")
@property
@@ -101,16 +101,23 @@ class _GenericAlias:
"""Type variables in the ``GenericAlias``."""
return super().__getattribute__("_parameters")
- def __init__(self, origin: type, args: Any) -> None:
+ def __init__(
+ self,
+ origin: type,
+ args: object | Tuple[object, ...],
+ ) -> None:
self._origin = origin
self._args = args if isinstance(args, tuple) else (args,)
- self._parameters = tuple(_parse_parameters(args))
+ self._parameters = tuple(_parse_parameters(self.__args__))
@property
def __call__(self) -> type:
return self.__origin__
- def __reduce__(self: _T) -> Tuple[Type[_T], Tuple[type, Tuple[Any, ...]]]:
+ def __reduce__(self: _T) -> Tuple[
+ Type[_T],
+ Tuple[type, Tuple[object, ...]],
+ ]:
cls = type(self)
return cls, (self.__origin__, self.__args__)
@@ -148,7 +155,7 @@ class _GenericAlias:
origin = _to_str(self.__origin__)
return f"{origin}[{args}]"
- def __getitem__(self: _T, key: Any) -> _T:
+ def __getitem__(self: _T, key: object | Tuple[object, ...]) -> _T:
"""Return ``self[key]``."""
key_tup = key if isinstance(key, tuple) else (key,)
diff --git a/numpy/typing/_shape.py b/numpy/typing/_shape.py
index 0742be8a9..75698f3d3 100644
--- a/numpy/typing/_shape.py
+++ b/numpy/typing/_shape.py
@@ -1,5 +1,5 @@
import sys
-from typing import Sequence, Tuple, Union
+from typing import Sequence, Tuple, Union, Any
from . import _HAS_TYPING_EXTENSIONS
@@ -8,7 +8,7 @@ if sys.version_info >= (3, 8):
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import SupportsIndex
else:
- SupportsIndex = NotImplemented
+ SupportsIndex = Any
_Shape = Tuple[int, ...]
diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.py b/numpy/typing/tests/data/reveal/ndarray_conversion.py
index 4ee637b75..03f2faf43 100644
--- a/numpy/typing/tests/data/reveal/ndarray_conversion.py
+++ b/numpy/typing/tests/data/reveal/ndarray_conversion.py
@@ -1,12 +1,13 @@
import numpy as np
+import numpy.typing as npt
-nd = np.array([[1, 2], [3, 4]])
+nd: npt.NDArray[np.int_] = np.array([[1, 2], [3, 4]])
# item
-reveal_type(nd.item()) # E: Any
-reveal_type(nd.item(1)) # E: Any
-reveal_type(nd.item(0, 1)) # E: Any
-reveal_type(nd.item((0, 1))) # E: Any
+reveal_type(nd.item()) # E: int
+reveal_type(nd.item(1)) # E: int
+reveal_type(nd.item(0, 1)) # E: int
+reveal_type(nd.item((0, 1))) # E: int
# tolist
reveal_type(nd.tolist()) # E: Any
@@ -19,36 +20,32 @@ reveal_type(nd.tolist()) # E: Any
# dumps is pretty simple
# astype
-reveal_type(nd.astype("float")) # E: numpy.ndarray
-reveal_type(nd.astype(float)) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K")) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K", "unsafe")) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K", "unsafe", True)) # E: numpy.ndarray
-reveal_type(nd.astype(float, "K", "unsafe", True, True)) # E: numpy.ndarray
+reveal_type(nd.astype("float")) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.astype(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.astype(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe", True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe", True, True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
# byteswap
-reveal_type(nd.byteswap()) # E: numpy.ndarray
-reveal_type(nd.byteswap(True)) # E: numpy.ndarray
+reveal_type(nd.byteswap()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(nd.byteswap(True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
# copy
-reveal_type(nd.copy()) # E: numpy.ndarray
-reveal_type(nd.copy("C")) # E: numpy.ndarray
+reveal_type(nd.copy()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(nd.copy("C")) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
-# view
-class SubArray(np.ndarray):
- pass
-
-
-reveal_type(nd.view()) # E: numpy.ndarray
-reveal_type(nd.view(np.int64)) # E: numpy.ndarray
-# replace `Any` with `numpy.matrix` when `matrix` will be added to stubs
-reveal_type(nd.view(np.int64, np.matrix)) # E: Any
-reveal_type(nd.view(np.int64, SubArray)) # E: SubArray
+reveal_type(nd.view()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]
+reveal_type(nd.view(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.view(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.view(np.float64, np.matrix)) # E: numpy.matrix[Any, Any]
# getfield
-reveal_type(nd.getfield("float")) # E: numpy.ndarray
-reveal_type(nd.getfield(float)) # E: numpy.ndarray
-reveal_type(nd.getfield(float, 8)) # E: numpy.ndarray
+reveal_type(nd.getfield("float")) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.getfield(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(nd.getfield(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(nd.getfield(np.float64, 8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
# setflags does not return a value
# fill does not return a value
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index d98388422..c081a5c67 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -114,3 +114,15 @@ reveal_type(f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(c16.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
reveal_type(U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(S.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(i8.astype(float)) # E: Any
+reveal_type(i8.astype(np.float64)) # E: {float64}
+
+reveal_type(i8.view()) # E: {int64}
+reveal_type(i8.view(np.float64)) # E: {float64}
+reveal_type(i8.view(float)) # E: Any
+reveal_type(i8.view(np.float64, np.ndarray)) # E: {float64}
+
+reveal_type(i8.getfield(float)) # E: Any
+reveal_type(i8.getfield(np.float64)) # E: {float64}
+reveal_type(i8.getfield(np.float64, 8)) # E: {float64}
diff --git a/numpy/typing/tests/test_generic_alias.py b/numpy/typing/tests/test_generic_alias.py
index 0b9917439..538d7eae5 100644
--- a/numpy/typing/tests/test_generic_alias.py
+++ b/numpy/typing/tests/test_generic_alias.py
@@ -21,8 +21,8 @@ if sys.version_info >= (3, 9):
NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref))
FuncType = Callable[[Union[_GenericAlias, types.GenericAlias]], Any]
else:
- DType_ref = NotImplemented
- NDArray_ref = NotImplemented
+ DType_ref = Any
+ NDArray_ref = Any
FuncType = Callable[[_GenericAlias], Any]
GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS)
@@ -41,6 +41,12 @@ class TestGenericAlias:
@pytest.mark.parametrize("name,func", [
("__init__", lambda n: n),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, Any)),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (Any,))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (Any, Any))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, T1)),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (T1,))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (T1, T2))),
("__origin__", lambda n: n.__origin__),
("__args__", lambda n: n.__args__),
("__parameters__", lambda n: n.__parameters__),