summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorCharles Harris <charlesr.harris@gmail.com>2022-06-26 12:52:52 -0600
committerGitHub <noreply@github.com>2022-06-26 12:52:52 -0600
commitb65f0b7b8ba7e80b65773e06aae22a8369678868 (patch)
tree596a6389b54c72b2ca5a9a66d974ef0ab5b2a4af /numpy
parent11e465e30823f044887d3f5e2f648007ce0077c2 (diff)
parent7dcfaafa0b78618a6ec2a5279090729f7ec583f0 (diff)
downloadnumpy-b65f0b7b8ba7e80b65773e06aae22a8369678868.tar.gz
Merge pull request #21626 from seberg/weak-scalars
API: Introduce optional (and partial) NEP 50 weak scalar logic
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py4
-rw-r--r--numpy/__init__.pyi7
-rw-r--r--numpy/conftest.py17
-rw-r--r--numpy/core/_methods.py16
-rw-r--r--numpy/core/_ufunc_config.py22
-rw-r--r--numpy/core/_ufunc_config.pyi2
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h11
-rw-r--r--numpy/core/multiarray.py6
-rw-r--r--numpy/core/numeric.py9
-rw-r--r--numpy/core/src/multiarray/abstractdtypes.c71
-rw-r--r--numpy/core/src/multiarray/abstractdtypes.h53
-rw-r--r--numpy/core/src/multiarray/arrayobject.h16
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c383
-rw-r--r--numpy/core/src/multiarray/convert_datatype.h27
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c17
-rw-r--r--numpy/core/src/umath/dispatching.c82
-rw-r--r--numpy/core/src/umath/dispatching.h1
-rw-r--r--numpy/core/src/umath/ufunc_object.c44
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c24
-rw-r--r--numpy/core/tests/test_dtype.py57
-rw-r--r--numpy/core/tests/test_einsum.py1
-rw-r--r--numpy/core/tests/test_half.py45
-rw-r--r--numpy/core/tests/test_nep50_promotions.py72
-rw-r--r--numpy/core/tests/test_scalarmath.py1
-rw-r--r--numpy/core/tests/test_ufunc.py1
-rw-r--r--numpy/lib/tests/test_function_base.py13
-rw-r--r--numpy/linalg/tests/test_linalg.py6
-rw-r--r--numpy/testing/_private/utils.py11
28 files changed, 800 insertions, 219 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 83487dc97..66b8e3eca 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -337,7 +337,7 @@ else:
"""
try:
x = ones(2, dtype=float32)
- if not abs(x.dot(x) - 2.0) < 1e-5:
+ if not abs(x.dot(x) - float32(2.0)) < 1e-5:
raise AssertionError()
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
@@ -413,6 +413,8 @@ else:
# it is tidier organized.
core.multiarray._multiarray_umath._reload_guard()
+ core._set_promotion_state(os.environ.get("NPY_PROMOTION_STATE", "legacy"))
+
# Tell PyInstaller where to find hook-numpy.py
def _pyinstaller_hooks_dir():
from pathlib import Path
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index d6faa9ca3..001fa4e88 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -9,6 +9,7 @@ import enum
from abc import abstractmethod
from types import TracebackType, MappingProxyType
from contextlib import ContextDecorator
+from contextlib import contextmanager
if sys.version_info >= (3, 9):
from types import GenericAlias
@@ -180,6 +181,7 @@ from collections.abc import (
from typing import (
Literal as L,
Any,
+ Generator,
Generic,
IO,
NoReturn,
@@ -3350,6 +3352,11 @@ class errstate(Generic[_CallType], ContextDecorator):
/,
) -> None: ...
+@contextmanager
+def _no_nep50_warning() -> Generator[None, None, None]: ...
+def _get_promotion_state() -> str: ...
+def _set_promotion_state(state: str, /) -> None: ...
+
class ndenumerate(Generic[_ScalarType]):
iter: flatiter[NDArray[_ScalarType]]
@overload
diff --git a/numpy/conftest.py b/numpy/conftest.py
index fd5fdd77d..8aa6587ee 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -117,3 +117,20 @@ def add_np(doctest_namespace):
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
monkeypatch.setenv('PYTHONHASHSEED', '0')
+
+
+@pytest.fixture(params=[True, False])
+def weak_promotion(request):
+ """
+ Fixture to ensure "legacy" promotion state or change it to use the new
+ weak promotion (plus warning). `old_promotion` should be used as a
+ parameter in the function.
+ """
+ state = numpy._get_promotion_state()
+ if request.param:
+ numpy._set_promotion_state("weak_and_warn")
+ else:
+ numpy._set_promotion_state("legacy")
+
+ yield request.param
+ numpy._set_promotion_state(state)
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index eda00147d..040f02a9d 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -11,6 +11,7 @@ from numpy.core import umath as um
from numpy.core.multiarray import asanyarray
from numpy.core import numerictypes as nt
from numpy.core import _exceptions
+from numpy.core._ufunc_config import _no_nep50_warning
from numpy._globals import _NoValue
from numpy.compat import pickle, os_fspath
@@ -179,8 +180,9 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
if isinstance(ret, mu.ndarray):
- ret = um.true_divide(
- ret, rcount, out=ret, casting='unsafe', subok=False)
+ with _no_nep50_warning():
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
@@ -220,8 +222,9 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
# matching rcount to arrmean when where is specified as array
div = rcount.reshape(arrmean.shape)
if isinstance(arrmean, mu.ndarray):
- arrmean = um.true_divide(arrmean, div, out=arrmean, casting='unsafe',
- subok=False)
+ with _no_nep50_warning():
+ arrmean = um.true_divide(arrmean, div, out=arrmean,
+ casting='unsafe', subok=False)
elif hasattr(arrmean, "dtype"):
arrmean = arrmean.dtype.type(arrmean / rcount)
else:
@@ -251,8 +254,9 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
- ret = um.true_divide(
- ret, rcount, out=ret, casting='unsafe', subok=False)
+ with _no_nep50_warning():
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
diff --git a/numpy/core/_ufunc_config.py b/numpy/core/_ufunc_config.py
index a731f6bf7..7f9995fa3 100644
--- a/numpy/core/_ufunc_config.py
+++ b/numpy/core/_ufunc_config.py
@@ -5,6 +5,7 @@ This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
"""
import collections.abc
import contextlib
+import contextvars
from .overrides import set_module
from .umath import (
@@ -16,7 +17,7 @@ from . import umath
__all__ = [
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
- "errstate",
+ "errstate", '_no_nep50_warning'
]
_errdict = {"ignore": ERR_IGNORE,
@@ -444,3 +445,22 @@ def _setdef():
# set the default values
_setdef()
+
+
+NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False)
+
+@set_module('numpy')
+@contextlib.contextmanager
+def _no_nep50_warning():
+ """
+ Context manager to disable NEP 50 warnings. This context manager is
+ only relevant if the NEP 50 warnings are enabled globally (which is not
+ thread/context safe).
+
+ This warning context manager itself is fully safe, however.
+ """
+ token = NO_NEP50_WARNING.set(True)
+ try:
+ yield
+ finally:
+ NO_NEP50_WARNING.reset(token)
diff --git a/numpy/core/_ufunc_config.pyi b/numpy/core/_ufunc_config.pyi
index b7c2ebefc..f56504507 100644
--- a/numpy/core/_ufunc_config.pyi
+++ b/numpy/core/_ufunc_config.pyi
@@ -34,4 +34,4 @@ def seterrcall(
) -> None | _ErrFunc | _SupportsWrite[str]: ...
def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ...
-# See `numpy/__init__.pyi` for the `errstate` class
+# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 97e0f4e2a..1db3b974f 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -872,17 +872,6 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
*/
#define NPY_ARRAY_ENSUREARRAY 0x0040
-#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
- /*
- * Dual use of the ENSUREARRAY flag, to indicate that this was converted
- * from a python float, int, or complex.
- * An array using this flag must be a temporary array that can never
- * leave the C internals of NumPy. Even if it does, ENSUREARRAY is
- * absolutely safe to abuse, since it already is a base class array :).
- */
- #define _NPY_ARRAY_WAS_PYSCALAR 0x0040
-#endif /* NPY_INTERNAL_BUILD */
-
/*
* Make sure that the strides are in units of the element size Needed
* for some operations with record-arrays.
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 8c14583e6..65b7cb46d 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -17,6 +17,7 @@ from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, from_dlpack, _insert, _reconstruct,
_vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
_get_madvise_hugepage, _set_madvise_hugepage,
+ _get_promotion_state, _set_promotion_state,
)
__all__ = [
@@ -40,7 +41,8 @@ __all__ = [
'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
'set_legacy_print_mode', 'set_numeric_ops', 'set_string_function',
'set_typeDict', 'shares_memory', 'tracemalloc_domain', 'typeinfo',
- 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros']
+ 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros',
+ '_get_promotion_state', '_set_promotion_state']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
@@ -68,6 +70,8 @@ promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
+_get_promotion_state.__module__ = 'numpy'
+_set_promotion_state.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index bb3cbf054..d6e1d4eec 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -17,7 +17,7 @@ from .multiarray import (
fromstring, inner, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
- zeros, normalize_axis_index)
+ zeros, normalize_axis_index, _get_promotion_state, _set_promotion_state)
from . import overrides
from . import umath
@@ -27,7 +27,7 @@ from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._exceptions import TooHardError, AxisError
-from ._ufunc_config import errstate
+from ._ufunc_config import errstate, _no_nep50_warning
bitwise_not = invert
ufunc = type(sin)
@@ -54,7 +54,8 @@ __all__ = [
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
- 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
+ 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError',
+ '_get_promotion_state', '_set_promotion_state']
@set_module('numpy')
@@ -2352,7 +2353,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
array([False, True])
"""
def within_tol(x, y, atol, rtol):
- with errstate(invalid='ignore'):
+ with errstate(invalid='ignore'), _no_nep50_warning():
return less_equal(abs(x-y), atol + rtol * abs(y))
x = asanyarray(a)
diff --git a/numpy/core/src/multiarray/abstractdtypes.c b/numpy/core/src/multiarray/abstractdtypes.c
index b0345c46b..3e89d045e 100644
--- a/numpy/core/src/multiarray/abstractdtypes.c
+++ b/numpy/core/src/multiarray/abstractdtypes.c
@@ -164,7 +164,39 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other)
}
else if (NPY_DT_is_legacy(other)) {
/* This is a back-compat fallback to usually do the right thing... */
- return PyArray_DTypeFromTypeNum(NPY_UINT8);
+ PyArray_DTypeMeta *uint8_dt = PyArray_DTypeFromTypeNum(NPY_UINT8);
+ PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, uint8_dt);
+ Py_DECREF(uint8_dt);
+ if (res == NULL) {
+ PyErr_Clear();
+ }
+ else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
+ Py_DECREF(res);
+ }
+ else {
+ return res;
+ }
+ /* Try again with `int8`, an error may have been set, though */
+ PyArray_DTypeMeta *int8_dt = PyArray_DTypeFromTypeNum(NPY_INT8);
+ res = NPY_DT_CALL_common_dtype(other, int8_dt);
+ Py_DECREF(int8_dt);
+ if (res == NULL) {
+ PyErr_Clear();
+ }
+ else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
+ Py_DECREF(res);
+ }
+ else {
+ return res;
+ }
+ /* And finally, we will try the default integer, just for sports... */
+ PyArray_DTypeMeta *default_int = PyArray_DTypeFromTypeNum(NPY_LONG);
+ res = NPY_DT_CALL_common_dtype(other, default_int);
+ Py_DECREF(default_int);
+ if (res == NULL) {
+ PyErr_Clear();
+ }
+ return res;
}
Py_INCREF(Py_NotImplemented);
return (PyArray_DTypeMeta *)Py_NotImplemented;
@@ -191,7 +223,23 @@ float_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
}
else if (NPY_DT_is_legacy(other)) {
/* This is a back-compat fallback to usually do the right thing... */
- return PyArray_DTypeFromTypeNum(NPY_HALF);
+ PyArray_DTypeMeta *half_dt = PyArray_DTypeFromTypeNum(NPY_HALF);
+ PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, half_dt);
+ Py_DECREF(half_dt);
+ if (res == NULL) {
+ PyErr_Clear();
+ }
+ else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
+ Py_DECREF(res);
+ }
+ else {
+ return res;
+ }
+ /* Retry with double (the default float) */
+ PyArray_DTypeMeta *double_dt = PyArray_DTypeFromTypeNum(NPY_DOUBLE);
+ res = NPY_DT_CALL_common_dtype(other, double_dt);
+ Py_DECREF(double_dt);
+ return res;
}
Py_INCREF(Py_NotImplemented);
return (PyArray_DTypeMeta *)Py_NotImplemented;
@@ -229,7 +277,24 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
}
else if (NPY_DT_is_legacy(other)) {
/* This is a back-compat fallback to usually do the right thing... */
- return PyArray_DTypeFromTypeNum(NPY_CFLOAT);
+ PyArray_DTypeMeta *cfloat_dt = PyArray_DTypeFromTypeNum(NPY_CFLOAT);
+ PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, cfloat_dt);
+ Py_DECREF(cfloat_dt);
+ if (res == NULL) {
+ PyErr_Clear();
+ }
+ else if (res == (PyArray_DTypeMeta *)Py_NotImplemented) {
+ Py_DECREF(res);
+ }
+ else {
+ return res;
+ }
+ /* Retry with cdouble (the default complex) */
+ PyArray_DTypeMeta *cdouble_dt = PyArray_DTypeFromTypeNum(NPY_CDOUBLE);
+ res = NPY_DT_CALL_common_dtype(other, cdouble_dt);
+ Py_DECREF(cdouble_dt);
+ return res;
+
}
else if (other == &PyArray_PyIntAbstractDType ||
other == &PyArray_PyFloatAbstractDType) {
diff --git a/numpy/core/src/multiarray/abstractdtypes.h b/numpy/core/src/multiarray/abstractdtypes.h
index 42c192cac..6901ec213 100644
--- a/numpy/core/src/multiarray/abstractdtypes.h
+++ b/numpy/core/src/multiarray/abstractdtypes.h
@@ -1,6 +1,7 @@
#ifndef NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_
#define NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_
+#include "arrayobject.h"
#include "dtypemeta.h"
@@ -16,4 +17,56 @@ NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexAbstractDType;
NPY_NO_EXPORT int
initialize_and_map_pytypes_to_dtypes(void);
+
+/*
+ * When we get a Python int, float, or complex, we may have to use weak
+ * promotion logic.
+ * To implement this, we sometimes have to tag the converted (temporary)
+ * array when the original object was a Python scalar.
+ *
+ * @param obj The original Python object.
+ * @param arr The array into which the Python object was converted.
+ * @param[in,out] **dtype A pointer to the array's DType, if not NULL it will be
+ * replaced with the abstract DType.
+ * @return 0 if the `obj` was not a python scalar, and 1 if it was.
+ */
+static NPY_INLINE int
+npy_mark_tmp_array_if_pyscalar(
+ PyObject *obj, PyArrayObject *arr, PyArray_DTypeMeta **dtype)
+{
+ /*
+ * We check the array dtype for two reasons: First, booleans are
+ * integer subclasses. Second, an int, float, or complex could have
+ * a custom DType registered, and then we should use that.
+ * Further, `np.float64` is a double subclass, so must reject it.
+ */
+ if (PyLong_Check(obj)
+ && (PyArray_ISINTEGER(arr) || PyArray_ISOBJECT(arr))) {
+ ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT;
+ if (dtype != NULL) {
+ Py_INCREF(&PyArray_PyIntAbstractDType);
+ Py_SETREF(*dtype, &PyArray_PyIntAbstractDType);
+ }
+ return 1;
+ }
+ else if (PyFloat_Check(obj) && !PyArray_IsScalar(obj, Double)
+ && PyArray_TYPE(arr) == NPY_DOUBLE) {
+ ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT;
+ if (dtype != NULL) {
+ Py_INCREF(&PyArray_PyFloatAbstractDType);
+ Py_SETREF(*dtype, &PyArray_PyFloatAbstractDType);
+ }
+ return 1;
+ }
+ else if (PyComplex_Check(obj) && PyArray_TYPE(arr) == NPY_CDOUBLE) {
+ ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX;
+ if (dtype != NULL) {
+ Py_INCREF(&PyArray_PyComplexAbstractDType);
+ Py_SETREF(*dtype, &PyArray_PyComplexAbstractDType);
+ }
+ return 1;
+ }
+ return 0;
+}
+
#endif /* NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ */
diff --git a/numpy/core/src/multiarray/arrayobject.h b/numpy/core/src/multiarray/arrayobject.h
index fb9b0bd81..f7d0734db 100644
--- a/numpy/core/src/multiarray/arrayobject.h
+++ b/numpy/core/src/multiarray/arrayobject.h
@@ -26,4 +26,20 @@ array_might_be_written(PyArrayObject *obj);
*/
static const int NPY_ARRAY_WARN_ON_WRITE = (1 << 31);
+
+/*
+ * These flags are used internally to indicate an array that was previously
+ * a Python scalar (int, float, complex). The dtype of such an array should
+ * be considered as any integer, floating, or complex rather than the explicit
+ * dtype attached to the array.
+ *
+ * These flags must only be used in local context when the array in question
+ * is not returned. Use three flags, to avoid having to double check the
+ * actual dtype when the flags are used.
+ */
+static const int NPY_ARRAY_WAS_PYTHON_INT = (1 << 30);
+static const int NPY_ARRAY_WAS_PYTHON_FLOAT = (1 << 29);
+static const int NPY_ARRAY_WAS_PYTHON_COMPLEX = (1 << 28);
+static const int NPY_ARRAY_WAS_PYTHON_LITERAL = (1 << 30 | 1 << 29 | 1 << 28);
+
#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYOBJECT_H_ */
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index bc8a3bf88..c578a1b44 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -31,6 +31,7 @@
#include "array_method.h"
#include "usertypes.h"
#include "dtype_transfer.h"
+#include "arrayobject.h"
/*
@@ -44,6 +45,11 @@
*/
NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20};
+/*
+ * Whether or not legacy value-based promotion/casting is used.
+ */
+NPY_NO_EXPORT int npy_promotion_state = NPY_USE_LEGACY_PROMOTION;
+NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX = NULL;
static PyObject *
PyArray_GetGenericToVoidCastingImpl(void);
@@ -58,6 +64,77 @@ static PyObject *
PyArray_GetObjectToGenericCastingImpl(void);
+/*
+ * Return 1 if promotion warnings should be given and 0 if they are currently
+ * suppressed in the local context.
+ */
+NPY_NO_EXPORT int
+npy_give_promotion_warnings(void)
+{
+ PyObject *val;
+
+ npy_cache_import(
+ "numpy.core._ufunc_config", "NO_NEP50_WARNING",
+ &NO_NEP50_WARNING_CTX);
+ if (NO_NEP50_WARNING_CTX == NULL) {
+ PyErr_WriteUnraisable(NULL);
+ return 1;
+ }
+
+ if (PyContextVar_Get(NO_NEP50_WARNING_CTX, Py_False, &val) < 0) {
+ /* Errors should not really happen, but if it does assume we warn. */
+ PyErr_WriteUnraisable(NULL);
+ return 1;
+ }
+ Py_DECREF(val);
+ /* only when the no-warnings context is false, we give warnings */
+ return val == Py_False;
+}
+
+
+NPY_NO_EXPORT PyObject *
+npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)) {
+ if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) {
+ return PyUnicode_FromString("weak");
+ }
+ else if (npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) {
+ return PyUnicode_FromString("weak_and_warn");
+ }
+ else if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) {
+ return PyUnicode_FromString("legacy");
+ }
+ PyErr_SetString(PyExc_SystemError, "invalid promotion state!");
+ return NULL;
+}
+
+
+NPY_NO_EXPORT PyObject *
+npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg)
+{
+ if (!PyUnicode_Check(arg)) {
+ PyErr_SetString(PyExc_TypeError,
+ "_set_promotion_state() argument or NPY_PROMOTION_STATE "
+ "must be a string.");
+ return NULL;
+ }
+ if (PyUnicode_CompareWithASCIIString(arg, "weak") == 0) {
+ npy_promotion_state = NPY_USE_WEAK_PROMOTION;
+ }
+ else if (PyUnicode_CompareWithASCIIString(arg, "weak_and_warn") == 0) {
+ npy_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN;
+ }
+ else if (PyUnicode_CompareWithASCIIString(arg, "legacy") == 0) {
+ npy_promotion_state = NPY_USE_LEGACY_PROMOTION;
+ }
+ else {
+ PyErr_Format(PyExc_TypeError,
+ "_set_promotion_state() argument or NPY_PROMOTION_STATE must be "
+ "'weak', 'legacy', or 'weak_and_warn' but got '%.100S'", arg);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
/**
* Fetch the casting implementation from one DType to another.
*
@@ -776,6 +853,55 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
return ret;
}
+
+NPY_NO_EXPORT npy_bool
+can_cast_pyscalar_scalar_to(
+ int flags, PyArray_Descr *to, NPY_CASTING casting)
+{
+ /*
+ * This function only works reliably for legacy (NumPy dtypes).
+ * If we end up here for a non-legacy DType, it is a bug.
+ */
+ assert(NPY_DT_is_legacy(NPY_DTYPE(to)));
+
+ /*
+ * Quickly check for the typical numeric cases, where the casting rules
+ * can be hardcoded fairly easily.
+ */
+ if (PyDataType_ISCOMPLEX(to)) {
+ return 1;
+ }
+ else if (PyDataType_ISFLOAT(to)) {
+ if (flags & NPY_ARRAY_WAS_PYTHON_COMPLEX) {
+ return casting == NPY_UNSAFE_CASTING;
+ }
+ return 1;
+ }
+ else if (PyDataType_ISINTEGER(to)) {
+ if (!(flags & NPY_ARRAY_WAS_PYTHON_INT)) {
+ return casting == NPY_UNSAFE_CASTING;
+ }
+ return 1;
+ }
+
+ /*
+ * For all other cases we use the default dtype.
+ */
+ PyArray_Descr *from;
+ if (flags & NPY_ARRAY_WAS_PYTHON_INT) {
+ from = PyArray_DescrFromType(NPY_LONG);
+ }
+ else if (flags & NPY_ARRAY_WAS_PYTHON_FLOAT) {
+ from = PyArray_DescrFromType(NPY_DOUBLE);
+ }
+ else {
+ from = PyArray_DescrFromType(NPY_CDOUBLE);
+ }
+ int res = PyArray_CanCastTypeTo(from, to, casting);
+ Py_DECREF(from);
+ return res;
+}
+
/*NUMPY_API
* Returns 1 if the array object may be cast to the given data type using
* the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in
@@ -794,12 +920,25 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to,
to = NULL;
}
- /*
- * If it's a scalar, check the value. (This only currently matters for
- * numeric types and for `to == NULL` it can't be numeric.)
- */
- if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) {
- return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting);
+ if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) {
+ /*
+ * If it's a scalar, check the value. (This only currently matters for
+ * numeric types and for `to == NULL` it can't be numeric.)
+ */
+ if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) {
+ return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting);
+ }
+ }
+ else {
+ /*
+ * If it's a scalar, check the value. (This only currently matters for
+ * numeric types and for `to == NULL` it can't be numeric.)
+ */
+ if (PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL && to != NULL) {
+ return can_cast_pyscalar_scalar_to(
+ PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL, to,
+ casting);
+ }
}
/* Otherwise, use the standard rules (same as `PyArray_CanCastTypeTo`) */
@@ -1561,40 +1700,44 @@ should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
}
-/*
- * Utility function used only in PyArray_ResultType for value-based logic.
- * See that function for the meaning and contents of the parameters.
- */
-static PyArray_Descr *
-get_descr_from_cast_or_value(
- npy_intp i,
- PyArrayObject *arrs[],
- npy_intp ndtypes,
- PyArray_Descr *descriptor,
- PyArray_DTypeMeta *common_dtype)
-{
- PyArray_Descr *curr;
- if (NPY_LIKELY(i < ndtypes ||
- !(PyArray_FLAGS(arrs[i-ndtypes]) & _NPY_ARRAY_WAS_PYSCALAR))) {
- curr = PyArray_CastDescrToDType(descriptor, common_dtype);
- }
- else {
- /*
- * Unlike `PyArray_CastToDTypeAndPromoteDescriptors`, deal with
- * plain Python values "graciously". This recovers the original
- * value the long route, but it should almost never happen...
- */
- PyObject *tmp = PyArray_GETITEM(arrs[i-ndtypes],
- PyArray_BYTES(arrs[i-ndtypes]));
- if (tmp == NULL) {
- return NULL;
+NPY_NO_EXPORT int
+should_use_min_scalar_weak_literals(int narrs, PyArrayObject **arr) {
+ int all_scalars = 1;
+ int max_scalar_kind = -1;
+ int max_array_kind = -1;
+
+ for (int i = 0; i < narrs; i++) {
+ if (PyArray_FLAGS(arr[i]) & NPY_ARRAY_WAS_PYTHON_INT) {
+ /* A Python integer could be `u` so is effectively that: */
+ int new = dtype_kind_to_simplified_ordering('u');
+ if (new > max_scalar_kind) {
+ max_scalar_kind = new;
+ }
}
- curr = NPY_DT_CALL_discover_descr_from_pyobject(common_dtype, tmp);
- Py_DECREF(tmp);
+ /* For the new logic, only complex or not matters: */
+ else if (PyArray_FLAGS(arr[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) {
+ max_scalar_kind = dtype_kind_to_simplified_ordering('f');
+ }
+ else if (PyArray_FLAGS(arr[i]) & NPY_ARRAY_WAS_PYTHON_COMPLEX) {
+ max_scalar_kind = dtype_kind_to_simplified_ordering('f');
+ }
+ else {
+ all_scalars = 0;
+ int kind = dtype_kind_to_simplified_ordering(
+ PyArray_DESCR(arr[i])->kind);
+ if (kind > max_array_kind) {
+ max_array_kind = kind;
+ }
+ }
+ }
+ if (!all_scalars && max_array_kind >= max_scalar_kind) {
+ return 1;
}
- return curr;
+
+ return 0;
}
+
/*NUMPY_API
*
* Produces the result type of a bunch of inputs, using the same rules
@@ -1667,30 +1810,13 @@ PyArray_ResultType(
at_least_one_scalar = 1;
}
- if (!(PyArray_FLAGS(arrs[i]) & _NPY_ARRAY_WAS_PYSCALAR)) {
- /* This was not a scalar with an abstract DType */
- all_descriptors[i_all] = PyArray_DTYPE(arrs[i]);
- all_DTypes[i_all] = NPY_DTYPE(all_descriptors[i_all]);
- Py_INCREF(all_DTypes[i_all]);
- all_pyscalar = 0;
- continue;
- }
-
/*
- * The original was a Python scalar with an abstract DType.
- * In a future world, this type of code may need to work on the
- * DType level first and discover those from the original value.
- * But, right now we limit the logic to int, float, and complex
- * and do it here to allow for a transition without losing all of
- * our remaining sanity.
+ * If the original was a Python scalar/literal, we use only the
+ * corresponding abstract DType (and no descriptor) below.
+ * Otherwise, we propagate the descriptor as well.
*/
- if (PyArray_ISFLOAT(arrs[i])) {
- all_DTypes[i_all] = &PyArray_PyFloatAbstractDType;
- }
- else if (PyArray_ISCOMPLEX(arrs[i])) {
- all_DTypes[i_all] = &PyArray_PyComplexAbstractDType;
- }
- else {
+ all_descriptors[i_all] = NULL; /* no descriptor for py-scalars */
+ if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_INT) {
/* This could even be an object dtype here for large ints */
all_DTypes[i_all] = &PyArray_PyIntAbstractDType;
if (PyArray_TYPE(arrs[i]) != NPY_LONG) {
@@ -1698,12 +1824,18 @@ PyArray_ResultType(
all_pyscalar = 0;
}
}
+ else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) {
+ all_DTypes[i_all] = &PyArray_PyFloatAbstractDType;
+ }
+ else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_COMPLEX) {
+ all_DTypes[i_all] = &PyArray_PyComplexAbstractDType;
+ }
+ else {
+ all_descriptors[i_all] = PyArray_DTYPE(arrs[i]);
+ all_DTypes[i_all] = NPY_DTYPE(all_descriptors[i_all]);
+ all_pyscalar = 0;
+ }
Py_INCREF(all_DTypes[i_all]);
- /*
- * Leave the descriptor empty, if we need it, we will have to go
- * to more extreme lengths unfortunately.
- */
- all_descriptors[i_all] = NULL;
}
PyArray_DTypeMeta *common_dtype = PyArray_PromoteDTypeSequence(
@@ -1730,23 +1862,20 @@ PyArray_ResultType(
* NOTE: Code duplicates `PyArray_CastToDTypeAndPromoteDescriptors`, but
* supports special handling of the abstract values.
*/
- if (!NPY_DT_is_parametric(common_dtype)) {
- /* Note that this "fast" path loses all metadata */
- result = NPY_DT_CALL_default_descr(common_dtype);
- }
- else {
- result = get_descr_from_cast_or_value(
- 0, arrs, ndtypes, all_descriptors[0], common_dtype);
- if (result == NULL) {
- goto error;
- }
-
- for (npy_intp i = 1; i < ndtypes+narrs; i++) {
- PyArray_Descr *curr = get_descr_from_cast_or_value(
- i, arrs, ndtypes, all_descriptors[i], common_dtype);
+ if (NPY_DT_is_parametric(common_dtype)) {
+ for (npy_intp i = 0; i < ndtypes+narrs; i++) {
+ if (all_descriptors[i] == NULL) {
+ continue; /* originally a python scalar/literal */
+ }
+ PyArray_Descr *curr = PyArray_CastDescrToDType(
+ all_descriptors[i], common_dtype);
if (curr == NULL) {
goto error;
}
+ if (result == NULL) {
+ result = curr;
+ continue;
+ }
Py_SETREF(result, NPY_DT_SLOTS(common_dtype)->common_instance(result, curr));
Py_DECREF(curr);
if (result == NULL) {
@@ -1754,27 +1883,33 @@ PyArray_ResultType(
}
}
}
+ if (result == NULL) {
+ /*
+ * If the DType is not parametric, or all were weak scalars,
+ * a result may not yet be set.
+ */
+ result = NPY_DT_CALL_default_descr(common_dtype);
+ if (result == NULL) {
+ goto error;
+ }
+ }
/*
- * Unfortunately, when 0-D "scalar" arrays are involved and mixed, we
- * have to use the value-based logic. The intention is to move away from
- * the complex logic arising from it. We thus fall back to the legacy
- * version here.
- * It may be possible to micro-optimize this to skip some of the above
- * logic when this path is necessary.
+ * Unfortunately, when 0-D "scalar" arrays are involved and mixed, we *may*
+ * have to use the value-based logic.
+ * `PyArray_CheckLegacyResultType` may behave differently based on the
+ * current value of `npy_legacy_promotion`:
+ * 1. It does nothing (we use the "new" behavior)
+ * 2. It does nothing, but warns if there the result would differ.
+ * 3. It replaces the result based on the legacy value-based logic.
*/
if (at_least_one_scalar && !all_pyscalar && result->type_num < NPY_NTYPES) {
- PyArray_Descr *legacy_result = PyArray_LegacyResultType(
- narrs, arrs, ndtypes, descrs);
- if (legacy_result == NULL) {
- /*
- * Going from error to success should not really happen, but is
- * probably OK if it does.
- */
- goto error;
+ if (PyArray_CheckLegacyResultType(
+ &result, narrs, arrs, ndtypes, descrs) < 0) {
+ Py_DECREF(common_dtype);
+ Py_DECREF(result);
+ return NULL;
}
- /* Return the old "legacy" result (could warn here if different) */
- Py_SETREF(result, legacy_result);
}
Py_DECREF(common_dtype);
@@ -1802,38 +1937,39 @@ PyArray_ResultType(
* of all the inputs. Data types passed directly are treated as array
* types.
*/
-NPY_NO_EXPORT PyArray_Descr *
-PyArray_LegacyResultType(
+NPY_NO_EXPORT int
+PyArray_CheckLegacyResultType(
+ PyArray_Descr **new_result,
npy_intp narrs, PyArrayObject **arr,
npy_intp ndtypes, PyArray_Descr **dtypes)
{
+ PyArray_Descr *ret = NULL;
+ if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) {
+ return 0;
+ }
+ if (npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN
+ && !npy_give_promotion_warnings()) {
+ return 0;
+ }
+
npy_intp i;
- /* If there's just one type, pass it through */
+ /* If there's just one type, results must match */
if (narrs + ndtypes == 1) {
- PyArray_Descr *ret = NULL;
- if (narrs == 1) {
- ret = PyArray_DESCR(arr[0]);
- }
- else {
- ret = dtypes[0];
- }
- Py_INCREF(ret);
- return ret;
+ return 0;
}
int use_min_scalar = should_use_min_scalar(narrs, arr, ndtypes, dtypes);
/* Loop through all the types, promoting them */
if (!use_min_scalar) {
- PyArray_Descr *ret;
/* Build a single array of all the dtypes */
PyArray_Descr **all_dtypes = PyArray_malloc(
sizeof(*all_dtypes) * (narrs + ndtypes));
if (all_dtypes == NULL) {
PyErr_NoMemory();
- return NULL;
+ return -1;
}
for (i = 0; i < narrs; ++i) {
all_dtypes[i] = PyArray_DESCR(arr[i]);
@@ -1843,11 +1979,9 @@ PyArray_LegacyResultType(
}
ret = PyArray_PromoteTypeSequence(all_dtypes, narrs + ndtypes);
PyArray_free(all_dtypes);
- return ret;
}
else {
int ret_is_small_unsigned = 0;
- PyArray_Descr *ret = NULL;
for (i = 0; i < narrs; ++i) {
int tmp_is_small_unsigned;
@@ -1855,7 +1989,7 @@ PyArray_LegacyResultType(
arr[i], &tmp_is_small_unsigned);
if (tmp == NULL) {
Py_XDECREF(ret);
- return NULL;
+ return -1;
}
/* Combine it with the existing type */
if (ret == NULL) {
@@ -1869,7 +2003,7 @@ PyArray_LegacyResultType(
Py_DECREF(ret);
ret = tmpret;
if (ret == NULL) {
- return NULL;
+ return -1;
}
ret_is_small_unsigned = tmp_is_small_unsigned &&
@@ -1890,7 +2024,7 @@ PyArray_LegacyResultType(
Py_DECREF(ret);
ret = tmpret;
if (ret == NULL) {
- return NULL;
+ return -1;
}
}
}
@@ -1899,9 +2033,32 @@ PyArray_LegacyResultType(
PyErr_SetString(PyExc_TypeError,
"no arrays or types available to calculate result type");
}
+ }
+
+ if (ret == NULL) {
+ return -1;
+ }
- return ret;
+ int unchanged_result = PyArray_EquivTypes(*new_result, ret);
+ if (unchanged_result) {
+ Py_DECREF(ret);
+ return 0;
}
+ if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) {
+ Py_SETREF(*new_result, ret);
+ return 0;
+ }
+
+ assert(npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN);
+ if (PyErr_WarnFormat(PyExc_UserWarning, 1,
+ "result dtype changed due to the removal of value-based "
+ "promotion from NumPy. Changed from %S to %S.",
+ ret, *new_result) < 0) {
+ Py_DECREF(ret);
+ return -1;
+ }
+ Py_DECREF(ret);
+ return 0;
}
/**
diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h
index af6d790cf..b6bc7d8a7 100644
--- a/numpy/core/src/multiarray/convert_datatype.h
+++ b/numpy/core/src/multiarray/convert_datatype.h
@@ -9,6 +9,21 @@ extern "C" {
extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[];
+#define NPY_USE_LEGACY_PROMOTION 0
+#define NPY_USE_WEAK_PROMOTION 1
+#define NPY_USE_WEAK_PROMOTION_AND_WARN 2
+extern NPY_NO_EXPORT int npy_promotion_state;
+extern NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX;
+
+NPY_NO_EXPORT int
+npy_give_promotion_warnings(void);
+
+NPY_NO_EXPORT PyObject *
+npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg));
+
+NPY_NO_EXPORT PyObject *
+npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg);
+
NPY_NO_EXPORT PyObject *
PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to);
@@ -24,8 +39,9 @@ PyArray_ObjectType(PyObject *op, int minimum_type);
NPY_NO_EXPORT PyArrayObject **
PyArray_ConvertToCommonType(PyObject *op, int *retn);
-NPY_NO_EXPORT PyArray_Descr *
-PyArray_LegacyResultType(
+NPY_NO_EXPORT int
+PyArray_CheckLegacyResultType(
+ PyArray_Descr **new_result,
npy_intp narrs, PyArrayObject **arr,
npy_intp ndtypes, PyArray_Descr **dtypes);
@@ -40,10 +56,17 @@ NPY_NO_EXPORT npy_bool
can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
PyArray_Descr *to, NPY_CASTING casting);
+NPY_NO_EXPORT npy_bool
+can_cast_pyscalar_scalar_to(
+ int flags, PyArray_Descr *to, NPY_CASTING casting);
+
NPY_NO_EXPORT int
should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
npy_intp ndtypes, PyArray_Descr **dtypes);
+NPY_NO_EXPORT int
+should_use_min_scalar_weak_literals(int narrs, PyArrayObject **arr);
+
NPY_NO_EXPORT const char *
npy_casting_to_string(NPY_CASTING casting);
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 96d0c893d..16069d619 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -3537,10 +3537,11 @@ array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (arr[narr] == NULL) {
goto finish;
}
- if (PyLong_CheckExact(obj) || PyFloat_CheckExact(obj) ||
- PyComplex_CheckExact(obj)) {
- ((PyArrayObject_fields *)arr[narr])->flags |= _NPY_ARRAY_WAS_PYSCALAR;
- }
+ /*
+ * Mark array if it was a python scalar (we do not need the actual
+ * DType here yet, this is figured out inside ResultType.
+ */
+ npy_mark_tmp_array_if_pyscalar(obj, arr[narr], NULL);
++narr;
}
else {
@@ -4494,6 +4495,14 @@ static struct PyMethodDef array_module_methods[] = {
{"get_handler_version",
(PyCFunction) get_handler_version,
METH_VARARGS, NULL},
+ {"_get_promotion_state",
+ (PyCFunction)npy__get_promotion_state,
+ METH_NOARGS, "Get the current NEP 50 promotion state."},
+ {"_set_promotion_state",
+ (PyCFunction)npy__set_promotion_state,
+ METH_O, "Set the NEP 50 promotion state. This is not thread-safe.\n"
+ "The optional warnings can be safely silenced using the \n"
+ "`np._no_nep50_warning()` context manager."},
{"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc,
METH_VARARGS, NULL},
{"_get_sfloat_dtype",
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index 620335d88..5aecdd1fc 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -40,6 +40,7 @@
#define PY_SSIZE_T_CLEAN
#include <Python.h>
+#include <convert_datatype.h>
#include "numpy/ndarraytypes.h"
#include "common.h"
@@ -595,7 +596,8 @@ _make_new_typetup(
static int
legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc,
PyArrayObject *const *ops, PyArray_DTypeMeta *signature[],
- PyArray_DTypeMeta *operation_DTypes[], int *out_cacheable)
+ PyArray_DTypeMeta *operation_DTypes[], int *out_cacheable,
+ npy_bool check_only)
{
int nargs = ufunc->nargs;
PyArray_Descr *out_descrs[NPY_MAXARGS] = {NULL};
@@ -623,6 +625,42 @@ legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc,
}
Py_XDECREF(type_tuple);
+ if (NPY_UNLIKELY(check_only)) {
+ /*
+ * When warnings are enabled, we don't replace the DTypes, but only
+ * check whether the old result is the same as the new one.
+ * For noise reason, we do this only on the *output* dtypes which
+ * ignores floating point precision changes for comparisons such as
+ * `np.float32(3.1) < 3.1`.
+ */
+ for (int i = ufunc->nin; i < ufunc->nargs; i++) {
+ /*
+ * If an output was provided and the new dtype matches, we
+ * should (at best) lose a tiny bit of precision, e.g.:
+ * `np.true_divide(float32_arr0d, 1, out=float32_arr0d)`
+ * (which operated on float64 before, although it is probably rare)
+ */
+ if (ops[i] != NULL
+ && PyArray_EquivTypenums(
+ operation_DTypes[i]->type_num,
+ PyArray_DESCR(ops[i])->type_num)) {
+ continue;
+ }
+ /* Otherwise, warn if the dtype doesn't match */
+ if (!PyArray_EquivTypenums(
+ operation_DTypes[i]->type_num, out_descrs[i]->type_num)) {
+ if (PyErr_WarnFormat(PyExc_UserWarning, 1,
+ "result dtype changed due to the removal of value-based "
+ "promotion from NumPy. Changed from %S to %S.",
+ out_descrs[i], operation_DTypes[i]->singleton) < 0) {
+ return -1;
+ }
+ return 0;
+ }
+ }
+ return 0;
+ }
+
for (int i = 0; i < nargs; i++) {
Py_XSETREF(operation_DTypes[i], NPY_DTYPE(out_descrs[i]));
Py_INCREF(operation_DTypes[i]);
@@ -773,7 +811,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *new_op_dtypes[NPY_MAXARGS] = {NULL};
int cacheable = 1; /* TODO: only the comparison deprecation needs this */
if (legacy_promote_using_legacy_type_resolver(ufunc,
- ops, signature, new_op_dtypes, &cacheable) < 0) {
+ ops, signature, new_op_dtypes, &cacheable, NPY_FALSE) < 0) {
return NULL;
}
info = promote_and_get_info_and_ufuncimpl(ufunc,
@@ -852,6 +890,11 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* these including clearing the output.
* @param force_legacy_promotion If set, we have to use the old type resolution
* to implement value-based promotion/casting.
+ * @param promoting_pyscalars Indication that some of the initial inputs were
+ * int, float, or complex. In this case weak-scalar promotion is used
+ * which can lead to a lower result precision even when legacy promotion
+ * does not kick in: `np.int8(1) + 1` is the example.
+ * (Legacy promotion is skipped because `np.int8(1)` is also scalar)
* @param ensure_reduce_compatible Must be set for reductions, in which case
* the found implementation is checked for reduce-like compatibility.
* If it is *not* compatible and `signature[2] != NULL`, we assume its
@@ -867,6 +910,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
npy_bool allow_legacy_promotion,
+ npy_bool promoting_pyscalars,
npy_bool ensure_reduce_compatible)
{
int nin = ufunc->nin, nargs = ufunc->nargs;
@@ -896,7 +940,8 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
}
}
- if (force_legacy_promotion) {
+ if (force_legacy_promotion
+ && npy_promotion_state == NPY_USE_LEGACY_PROMOTION) {
/*
* We must use legacy promotion for value-based logic. Call the old
* resolver once up-front to get the "actual" loop dtypes.
@@ -904,13 +949,17 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
*/
int cacheable = 1; /* unused, as we modify the original `op_dtypes` */
if (legacy_promote_using_legacy_type_resolver(ufunc,
- ops, signature, op_dtypes, &cacheable) < 0) {
+ ops, signature, op_dtypes, &cacheable, NPY_FALSE) < 0) {
return NULL;
}
}
+ /* Pause warnings and always use "new" path */
+ int old_promotion_state = npy_promotion_state;
+ npy_promotion_state = NPY_USE_WEAK_PROMOTION;
PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc,
ops, signature, op_dtypes, allow_legacy_promotion);
+ npy_promotion_state = old_promotion_state;
if (info == NULL) {
if (!PyErr_Occurred()) {
@@ -920,6 +969,27 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
}
PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1);
+ PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0);
+
+ /* If necessary, check if the old result would have been different */
+ if (NPY_UNLIKELY(npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN)
+ && (force_legacy_promotion || promoting_pyscalars)
+ && npy_give_promotion_warnings()) {
+ PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS];
+ for (int i = 0; i < nargs; i++) {
+ check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(
+ all_dtypes, i);
+ }
+ /* Before calling to the legacy promotion, pretend that is the state: */
+ npy_promotion_state = NPY_USE_LEGACY_PROMOTION;
+ int res = legacy_promote_using_legacy_type_resolver(ufunc,
+ ops, signature, check_dtypes, NULL, NPY_TRUE);
+ /* Reset the promotion state: */
+ npy_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN;
+ if (res < 0) {
+ return NULL;
+ }
+ }
/*
* In certain cases (only the logical ufuncs really), the loop we found may
@@ -931,14 +1001,14 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
* comment. That could be relaxed, in which case we may need to
* cache if a call was for a reduction.
*/
- PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0);
if (ensure_reduce_compatible && signature[0] == NULL &&
PyTuple_GET_ITEM(all_dtypes, 0) != PyTuple_GET_ITEM(all_dtypes, 2)) {
signature[0] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, 2);
Py_INCREF(signature[0]);
return promote_and_get_ufuncimpl(ufunc,
ops, signature, op_dtypes,
- force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
+ force_legacy_promotion, allow_legacy_promotion,
+ promoting_pyscalars, NPY_FALSE);
}
for (int i = 0; i < nargs; i++) {
diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h
index f2ab0be2e..513b50d75 100644
--- a/numpy/core/src/umath/dispatching.h
+++ b/numpy/core/src/umath/dispatching.h
@@ -27,6 +27,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
npy_bool allow_legacy_promotion,
+ npy_bool promote_pyscalars,
npy_bool ensure_reduce_compatible);
NPY_NO_EXPORT PyObject *
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 2636396d3..b7e390abb 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -49,6 +49,8 @@
#include "override.h"
#include "npy_import.h"
#include "extobj.h"
+
+#include "arrayobject.h"
#include "common.h"
#include "dtypemeta.h"
#include "numpyos.h"
@@ -945,6 +947,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
ufunc_full_args full_args, PyArrayObject *out_op[],
PyArray_DTypeMeta *out_op_DTypes[],
npy_bool *force_legacy_promotion, npy_bool *allow_legacy_promotion,
+ npy_bool *promoting_pyscalars,
PyObject *order_obj, NPY_ORDER *out_order,
PyObject *casting_obj, NPY_CASTING *out_casting,
PyObject *subok_obj, npy_bool *out_subok,
@@ -961,6 +964,7 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
npy_bool any_scalar = NPY_FALSE;
*allow_legacy_promotion = NPY_TRUE;
*force_legacy_promotion = NPY_FALSE;
+ *promoting_pyscalars = NPY_FALSE;
for (int i = 0; i < nin; i++) {
obj = PyTuple_GET_ITEM(full_args.in, i);
@@ -980,6 +984,8 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
if (!NPY_DT_is_legacy(out_op_DTypes[i])) {
*allow_legacy_promotion = NPY_FALSE;
+ // TODO: A subclass of int, float, complex could reach here and
+ // it should not be flagged as "weak" if it does.
}
if (PyArray_NDIM(out_op[i]) == 0) {
any_scalar = NPY_TRUE;
@@ -988,17 +994,24 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
all_scalar = NPY_FALSE;
continue;
}
+
+ // TODO: Is this equivalent/better by removing the logic which enforces
+ // that we always use weak promotion in the core?
+ if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) {
+ continue; /* Skip use of special dtypes */
+ }
+
/*
- * TODO: we need to special case scalars here, if the input is a
- * Python int, float, or complex, we have to use the "weak"
- * DTypes: `PyArray_PyIntAbstractDType`, etc.
- * This is to allow e.g. `float32(1.) + 1` to return `float32`.
- * The correct array dtype can only be found after promotion for
- * such a "weak scalar". We could avoid conversion here, but
- * must convert it for use in the legacy promotion.
- * There is still a small chance that this logic can instead
- * happen inside the Python operators.
+ * Handle the "weak" Python scalars/literals. We use a special DType
+ * for these.
+ * Further, we mark the operation array with a special flag to indicate
+ * this. This is because the legacy dtype resolution makes use of
+ * `np.can_cast(operand, dtype)`. The flag is local to this use, but
+ * necessary to propagate the information to the legacy type resolution.
*/
+ if (npy_mark_tmp_array_if_pyscalar(obj, out_op[i], &out_op_DTypes[i])) {
+ *promoting_pyscalars = NPY_TRUE;
+ }
}
if (*allow_legacy_promotion && (!all_scalar && any_scalar)) {
*force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL);
@@ -2768,7 +2781,8 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
}
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
- ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, NPY_TRUE);
+ ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE,
+ NPY_FALSE, NPY_TRUE);
if (evil_ndim_mutating_hack) {
((PyArrayObject_fields *)out)->nd = 0;
}
@@ -4875,10 +4889,13 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
int keepdims = -1; /* We need to know if it was passed */
npy_bool force_legacy_promotion;
npy_bool allow_legacy_promotion;
+ npy_bool promoting_pyscalars;
if (convert_ufunc_arguments(ufunc,
/* extract operand related information: */
full_args, operands,
- operand_DTypes, &force_legacy_promotion, &allow_legacy_promotion,
+ operand_DTypes,
+ &force_legacy_promotion, &allow_legacy_promotion,
+ &promoting_pyscalars,
/* extract general information: */
order_obj, &order,
casting_obj, &casting,
@@ -4899,7 +4916,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature,
operand_DTypes, force_legacy_promotion, allow_legacy_promotion,
- NPY_FALSE);
+ promoting_pyscalars, NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
@@ -6032,7 +6049,8 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature, operand_DTypes,
- force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
+ force_legacy_promotion, allow_legacy_promotion,
+ NPY_FALSE, NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 6edd00e65..94338e031 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -49,6 +49,7 @@
#endif
#include <stdbool.h>
+#include <arrayobject.h>
static PyObject *
npy_casting_to_py_object(NPY_CASTING casting)
@@ -1929,14 +1930,21 @@ linear_search_type_resolver(PyUFuncObject *self,
int types[NPY_MAXARGS];
const char *ufunc_name;
int no_castable_output = 0;
- int use_min_scalar;
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
ufunc_name = ufunc_get_name_cstr(self);
- use_min_scalar = should_use_min_scalar(nin, op, 0, NULL);
+ assert(npy_promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN);
+ /* Always "use" with new promotion in case of Python int/float/complex */
+ int use_min_scalar;
+ if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) {
+ use_min_scalar = should_use_min_scalar(nin, op, 0, NULL);
+ }
+ else {
+ use_min_scalar = should_use_min_scalar_weak_literals(nin, op);
+ }
/* If the ufunc has userloops, search for them. */
if (self->userloops) {
@@ -2126,11 +2134,19 @@ type_tuple_type_resolver(PyUFuncObject *self,
int nin = self->nin, nop = nin + self->nout;
int specified_types[NPY_MAXARGS];
const char *ufunc_name;
- int no_castable_output = 0, use_min_scalar;
+ int no_castable_output = 0;
ufunc_name = ufunc_get_name_cstr(self);
- use_min_scalar = should_use_min_scalar(nin, op, 0, NULL);
+ assert(npy_promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN);
+ /* Always "use" with new promotion in case of Python int/float/complex */
+ int use_min_scalar;
+ if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) {
+ use_min_scalar = should_use_min_scalar(nin, op, 0, NULL);
+ }
+ else {
+ use_min_scalar = should_use_min_scalar_weak_literals(nin, op);
+ }
/* Fill in specified_types from the tuple or string */
const char *bad_type_tup_msg = (
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index b37bded73..f95f95893 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -11,7 +11,7 @@ from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
- IS_PYSTON)
+ IS_PYSTON, _OLD_PROMOTION)
from numpy.compat import pickle
from itertools import permutations
import random
@@ -1288,25 +1288,34 @@ class TestPromotion:
"""Test cases related to more complex DType promotions. Further promotion
tests are defined in `test_numeric.py`
"""
- @pytest.mark.parametrize(["other", "expected"],
- [(2**16-1, np.complex64),
- (2**32-1, np.complex128),
- (np.float16(2), np.complex64),
- (np.float32(2), np.complex64),
- (np.longdouble(2), np.complex64),
+ @np._no_nep50_warning()
+ @pytest.mark.parametrize(["other", "expected", "expected_weak"],
+ [(2**16-1, np.complex64, None),
+ (2**32-1, np.complex128, np.complex64),
+ (np.float16(2), np.complex64, None),
+ (np.float32(2), np.complex64, None),
+ (np.longdouble(2), np.complex64, np.clongdouble),
# Base of the double value to sidestep any rounding issues:
- (np.longdouble(np.nextafter(1.7e308, 0.)), np.complex128),
+ (np.longdouble(np.nextafter(1.7e308, 0.)),
+ np.complex128, np.clongdouble),
# Additionally use "nextafter" so the cast can't round down:
- (np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
+ (np.longdouble(np.nextafter(1.7e308, np.inf)),
+ np.clongdouble, None),
# repeat for complex scalars:
- (np.complex64(2), np.complex64),
- (np.clongdouble(2), np.complex64),
+ (np.complex64(2), np.complex64, None),
+ (np.clongdouble(2), np.complex64, np.clongdouble),
# Base of the double value to sidestep any rounding issues:
- (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.complex128),
+ (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j),
+ np.complex128, np.clongdouble),
# Additionally use "nextafter" so the cast can't round down:
- (np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
+ (np.clongdouble(np.nextafter(1.7e308, np.inf)),
+ np.clongdouble, None),
])
- def test_complex_other_value_based(self, other, expected):
+ def test_complex_other_value_based(self,
+ weak_promotion, other, expected, expected_weak):
+ if weak_promotion and expected_weak is not None:
+ expected = expected_weak
+
# This would change if we modify the value based promotion
min_complex = np.dtype(np.complex64)
@@ -1339,7 +1348,7 @@ class TestPromotion:
def test_complex_pyscalar_promote_rational(self):
with pytest.raises(TypeError,
- match=r".* do not have a common DType"):
+ match=r".* no common DType exists for the given inputs"):
np.result_type(1j, rational)
with pytest.raises(TypeError,
@@ -1358,13 +1367,21 @@ class TestPromotion:
@pytest.mark.parametrize(["other", "expected"],
[(1, rational), (1., np.float64)])
- def test_float_int_pyscalar_promote_rational(self, other, expected):
+ @np._no_nep50_warning()
+ def test_float_int_pyscalar_promote_rational(
+ self, weak_promotion, other, expected):
# Note that rationals are a bit akward as they promote with float64
# or default ints, but not float16 or uint8/int8 (which looks
- # inconsistent here)
- with pytest.raises(TypeError,
- match=r".* do not have a common DType"):
- np.result_type(other, rational)
+ # inconsistent here). The new promotion fixes this (partially?)
+ if not weak_promotion and type(other) == float:
+ # The float version, checks float16 in the legacy path, which fails
+ # the integer version seems to check int8 (also), so it can
+ # pass.
+ with pytest.raises(TypeError,
+ match=r".* do not have a common DType"):
+ np.result_type(other, rational)
+ else:
+ assert np.result_type(other, rational) == expected
assert np.result_type(other, rational(1, 2)) == expected
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 0ef1b714b..ea96f0fef 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -239,6 +239,7 @@ class TestEinsum:
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
+ @np._no_nep50_warning()
def check_einsum_sums(self, dtype, do_opt=False):
# Check various sums. Does many sizes to exercise unrolled loops.
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 6743dfb51..4e9a3c9d6 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -3,7 +3,7 @@ import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
-from numpy.testing import assert_, assert_equal
+from numpy.testing import assert_, assert_equal, _OLD_PROMOTION
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
@@ -85,6 +85,7 @@ class TestHalf:
@pytest.mark.parametrize("offset", [None, "up", "down"])
@pytest.mark.parametrize("shift", [None, "up", "down"])
@pytest.mark.parametrize("float_t", [np.float32, np.float64])
+ @np._no_nep50_warning()
def test_half_conversion_rounding(self, float_t, shift, offset):
# Assumes that round to even is used during casting.
max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
@@ -450,31 +451,35 @@ class TestHalf:
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
- def test_half_coercion(self):
+ @np._no_nep50_warning()
+ def test_half_coercion(self, weak_promotion):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
- assert_equal(np.power(a16, 2).dtype, float16)
- assert_equal(np.power(a16, 2.0).dtype, float16)
- assert_equal(np.power(a16, b16).dtype, float16)
- assert_equal(np.power(a16, b32).dtype, float16)
- assert_equal(np.power(a16, a16).dtype, float16)
- assert_equal(np.power(a16, a32).dtype, float32)
-
- assert_equal(np.power(b16, 2).dtype, float64)
- assert_equal(np.power(b16, 2.0).dtype, float64)
- assert_equal(np.power(b16, b16).dtype, float16)
- assert_equal(np.power(b16, b32).dtype, float32)
- assert_equal(np.power(b16, a16).dtype, float16)
- assert_equal(np.power(b16, a32).dtype, float32)
-
- assert_equal(np.power(a32, a16).dtype, float32)
- assert_equal(np.power(a32, b16).dtype, float32)
- assert_equal(np.power(b32, a16).dtype, float16)
- assert_equal(np.power(b32, b16).dtype, float32)
+ assert np.power(a16, 2).dtype == float16
+ assert np.power(a16, 2.0).dtype == float16
+ assert np.power(a16, b16).dtype == float16
+ expected_dt = float32 if weak_promotion else float16
+ assert np.power(a16, b32).dtype == expected_dt
+ assert np.power(a16, a16).dtype == float16
+ assert np.power(a16, a32).dtype == float32
+
+ expected_dt = float16 if weak_promotion else float64
+ assert np.power(b16, 2).dtype == expected_dt
+ assert np.power(b16, 2.0).dtype == expected_dt
+ assert np.power(b16, b16).dtype, float16
+ assert np.power(b16, b32).dtype, float32
+ assert np.power(b16, a16).dtype, float16
+ assert np.power(b16, a32).dtype, float32
+
+ assert np.power(a32, a16).dtype == float32
+ assert np.power(a32, b16).dtype == float32
+ expected_dt = float32 if weak_promotion else float16
+ assert np.power(b32, a16).dtype == expected_dt
+ assert np.power(b32, b16).dtype == float32
@pytest.mark.skipif(platform.machine() == "armv5tel",
reason="See gh-413.")
diff --git a/numpy/core/tests/test_nep50_promotions.py b/numpy/core/tests/test_nep50_promotions.py
new file mode 100644
index 000000000..5c59a16ea
--- /dev/null
+++ b/numpy/core/tests/test_nep50_promotions.py
@@ -0,0 +1,72 @@
+"""
+This file adds basic tests to test the NEP 50 style promotion compatibility
+mode. Most of these test are likely to be simply deleted again once NEP 50
+is adopted in the main test suite. A few may be moved elsewhere.
+"""
+
+import numpy as np
+import pytest
+
+
+@pytest.fixture(scope="module", autouse=True)
+def _weak_promotion_enabled():
+ state = np._get_promotion_state()
+ np._set_promotion_state("weak_and_warn")
+ yield
+ np._set_promotion_state(state)
+
+
+def test_nep50_examples():
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.uint8(1) + 2
+ assert res.dtype == np.uint8
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([1], np.uint8) + np.int64(1)
+ assert res.dtype == np.int64
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([1], np.uint8) + np.array(1, dtype=np.int64)
+ assert res.dtype == np.int64
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ # Note: Should warn (error with the errstate), but does not:
+ with np.errstate(over="raise"):
+ res = np.uint8(100) + 200
+ assert res.dtype == np.uint8
+
+ with pytest.warns(Warning) as recwarn:
+ res = np.float32(1) + 3e100
+
+ # Check that both warnings were given in the one call:
+ warning = str(recwarn.pop(UserWarning).message)
+ assert warning.startswith("result dtype changed")
+ warning = str(recwarn.pop(RuntimeWarning).message)
+ assert warning.startswith("overflow")
+ assert len(recwarn) == 0 # no further warnings
+ assert np.isinf(res)
+ assert res.dtype == np.float32
+
+ # Changes, but we don't warn for it (too noisy)
+ res = np.array([0.1], np.float32) == np.float64(0.1)
+ assert res[0] == False
+
+ # Additional test, since the above silences the warning:
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([0.1], np.float32) + np.float64(0.1)
+ assert res.dtype == np.float64
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([1.], np.float32) + np.int64(3)
+ assert res.dtype == np.float64
+
+
+@pytest.mark.xfail
+def test_nep50_integer_conversion_errors():
+ # Implementation for error paths is mostly missing (as of writing)
+ with pytest.raises(ValueError): # (or TypeError?)
+ np.array([1], np.uint8) + 300
+
+ with pytest.raises(ValueError): # (or TypeError?)
+ np.uint8(1) + 300
+
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 8b14284ff..7f00f6bf5 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -1003,6 +1003,7 @@ def test_longdouble_complex():
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
+@np._no_nep50_warning()
def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
def op_func(self, other):
return __op__
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 3466178a3..e4b1ceee3 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -2402,6 +2402,7 @@ def test_ufunc_types(ufunc):
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
+@np._no_nep50_warning()
def test_ufunc_noncontiguous(ufunc):
'''
Check that contiguous and non-contiguous calls to ufuncs
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index bdcbef91d..64318255b 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -2954,11 +2954,11 @@ class TestPercentile:
H_F_TYPE_CODES = [(int_type, np.float64)
for int_type in np.typecodes["AllInteger"]
- ] + [(np.float16, np.float64),
- (np.float32, np.float64),
+ ] + [(np.float16, np.float16),
+ (np.float32, np.float32),
(np.float64, np.float64),
(np.longdouble, np.longdouble),
- (np.complex64, np.complex128),
+ (np.complex64, np.complex64),
(np.complex128, np.complex128),
(np.clongdouble, np.clongdouble),
(np.dtype("O"), np.float64)]
@@ -2980,10 +2980,15 @@ class TestPercentile:
expected,
input_dtype,
expected_dtype):
+ expected_dtype = np.dtype(expected_dtype)
+ if np._get_promotion_state() == "legacy":
+ expected_dtype = np.promote_types(expected_dtype, np.float64)
+
arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype)
actual = np.percentile(arr, 40.0, method=method)
- np.testing.assert_almost_equal(actual, expected, 14)
+ np.testing.assert_almost_equal(
+ actual, expected_dtype.type(expected), 14)
if method in ["inverted_cdf", "closest_observation"]:
if input_dtype == "O":
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index ebbd92539..f871a5f8e 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -1803,9 +1803,9 @@ class TestCholesky:
c = np.linalg.cholesky(a)
b = np.matmul(c, c.transpose(t).conj())
- assert_allclose(b, a,
- err_msg=f'{shape} {dtype}\n{a}\n{c}',
- atol=500 * a.shape[0] * np.finfo(dtype).eps)
+ with np._no_nep50_warning():
+ atol = 500 * a.shape[0] * np.finfo(dtype).eps
+ assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}')
def test_0_size(self):
class ArraySubclass(np.ndarray):
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index f60ca6922..4957ef6d7 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -36,6 +36,7 @@ __all__ = [
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON',
+ '_OLD_PROMOTION'
]
@@ -52,6 +53,8 @@ IS_PYSTON = hasattr(sys, "pyston_version_info")
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON
HAS_LAPACK64 = numpy.linalg.lapack_lite._ilp64
+_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy'
+
def import_nose():
""" Import nose only when needed.
@@ -473,6 +476,7 @@ def print_assert_equal(test_string, actual, desired):
raise AssertionError(msg.getvalue())
+@np._no_nep50_warning()
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to desired
@@ -485,7 +489,7 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
The test verifies that the elements of `actual` and `desired` satisfy.
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+ ``abs(desired-actual) < float64(1.5 * 10**(-decimal))``
That is a looser test than originally documented, but agrees with what the
actual implementation in `assert_array_almost_equal` did up to rounding
@@ -595,10 +599,11 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
return
except (NotImplementedError, TypeError):
pass
- if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
+ if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)):
raise AssertionError(_build_err_msg())
+@np._no_nep50_warning()
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to significant
@@ -698,6 +703,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
raise AssertionError(msg)
+@np._no_nep50_warning()
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True,
*, strict=False):
@@ -975,6 +981,7 @@ def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False):
strict=strict)
+@np._no_nep50_warning()
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired