summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorMatti Picus <matti.picus@gmail.com>2020-08-17 17:07:02 +0300
committerGitHub <noreply@github.com>2020-08-17 17:07:02 +0300
commit67c7066353534eeac8054f0deb75dc5923c6dc20 (patch)
tree81aa702e77fb1981eb918a557ae286e22c12a02e /numpy
parentadf50b93d76fa55d03ccbc4910602cda23b32c7d (diff)
parentf89486c781634ad53e7107b35fc3899822734908 (diff)
downloadnumpy-67c7066353534eeac8054f0deb75dc5923c6dc20.tar.gz
Merge pull request #17029 from seberg/cast-error-return
MAINT: Add error return to all casting functionality and NpyIter
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h37
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c15
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c15
-rw-r--r--numpy/core/src/multiarray/array_coercion.c6
-rw-r--r--numpy/core/src/multiarray/ctors.c23
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c347
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src173
-rw-r--r--numpy/core/src/multiarray/mapping.c43
-rw-r--r--numpy/core/src/multiarray/nditer_api.c182
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c43
-rw-r--r--numpy/core/src/multiarray/nditer_impl.h7
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c8
-rw-r--r--numpy/core/src/multiarray/nditer_templ.c.src20
-rw-r--r--numpy/core/src/umath/reduction.c7
-rw-r--r--numpy/core/src/umath/ufunc_object.c36
-rw-r--r--numpy/core/tests/test_nditer.py65
-rw-r--r--numpy/core/tests/test_ufunc.py60
17 files changed, 753 insertions, 334 deletions
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index f2f12a55b..12aa61822 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -30,10 +30,9 @@
* Use NPY_AUXDATA_CLONE and NPY_AUXDATA_FREE to deal with this data.
*
*/
-typedef void (PyArray_StridedUnaryOp)(char *dst, npy_intp dst_stride,
- char *src, npy_intp src_stride,
- npy_intp N, npy_intp src_itemsize,
- NpyAuxData *transferdata);
+typedef int (PyArray_StridedUnaryOp)(
+ char *dst, npy_intp dst_stride, char *src, npy_intp src_stride,
+ npy_intp N, npy_intp src_itemsize, NpyAuxData *transferdata);
/*
* This is for pointers to functions which behave exactly as
@@ -43,31 +42,10 @@ typedef void (PyArray_StridedUnaryOp)(char *dst, npy_intp dst_stride,
* In particular, the 'i'-th element is operated on if and only if
* mask[i*mask_stride] is true.
*/
-typedef void (PyArray_MaskedStridedUnaryOp)(char *dst, npy_intp dst_stride,
- char *src, npy_intp src_stride,
- npy_bool *mask, npy_intp mask_stride,
- npy_intp N, npy_intp src_itemsize,
- NpyAuxData *transferdata);
-
-/*
- * This function pointer is for binary operations that input two
- * arbitrarily strided one-dimensional array segments and output
- * an arbitrarily strided array segment of the same size.
- * It may be a fully general function, or a specialized function
- * when the strides or item size have particular known values.
- *
- * Examples of binary operations are the basic arithmetic operations,
- * logical operators AND, OR, and many others.
- *
- * The 'transferdata' parameter is slightly special, following a
- * generic auxiliary data pattern defined in ndarraytypes.h
- * Use NPY_AUXDATA_CLONE and NPY_AUXDATA_FREE to deal with this data.
- *
- */
-typedef void (PyArray_StridedBinaryOp)(char *dst, npy_intp dst_stride,
- char *src0, npy_intp src0_stride,
- char *src1, npy_intp src1_stride,
- npy_intp N, NpyAuxData *transferdata);
+typedef int (PyArray_MaskedStridedUnaryOp)(
+ char *dst, npy_intp dst_stride, char *src, npy_intp src_stride,
+ npy_bool *mask, npy_intp mask_stride,
+ npy_intp N, npy_intp src_itemsize, NpyAuxData *transferdata);
/*
* Gives back a function pointer to a specialized function for copying
@@ -271,6 +249,7 @@ PyArray_CastRawArrays(npy_intp count,
* The return value is the number of elements it couldn't copy. A return value
* of 0 means all elements were copied, a larger value means the end of
* the n-dimensional array was reached before 'count' elements were copied.
+ * A negative return value indicates an error occurred.
*
* ndim:
* The number of dimensions of the n-dimensional array.
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index b8dc7d516..361964a5c 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -132,17 +132,22 @@ raw_array_assign_array(int ndim, npy_intp const *shape,
NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
/* Process the innermost dimension */
- stransfer(dst_data, dst_strides_it[0], src_data, src_strides_it[0],
- shape_it[0], src_itemsize, transferdata);
+ if (stransfer(
+ dst_data, dst_strides_it[0], src_data, src_strides_it[0],
+ shape_it[0], src_itemsize, transferdata) < 0) {
+ goto fail;
+ }
} NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it,
dst_data, dst_strides_it,
src_data, src_strides_it);
NPY_END_THREADS;
-
NPY_AUXDATA_FREE(transferdata);
-
- return (needs_api && PyErr_Occurred()) ? -1 : 0;
+ return 0;
+fail:
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
}
/*
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 41eb75f1c..023772776 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -82,16 +82,21 @@ raw_array_assign_scalar(int ndim, npy_intp const *shape,
NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
/* Process the innermost dimension */
- stransfer(dst_data, dst_strides_it[0], src_data, 0,
- shape_it[0], src_itemsize, transferdata);
+ if (stransfer(
+ dst_data, dst_strides_it[0], src_data, 0,
+ shape_it[0], src_itemsize, transferdata) < 0) {
+ goto fail;
+ }
} NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord,
shape_it, dst_data, dst_strides_it);
NPY_END_THREADS;
-
NPY_AUXDATA_FREE(transferdata);
-
- return (needs_api && PyErr_Occurred()) ? -1 : 0;
+ return 0;
+fail:
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
}
/*
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index 382645ff5..ffb5bd632 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -495,12 +495,10 @@ PyArray_Pack(PyArray_Descr *descr, char *item, PyObject *value)
res = -1;
goto finish;
}
- stransfer(item, 0, data, 0, 1, tmp_descr->elsize, transferdata);
- NPY_AUXDATA_FREE(transferdata);
-
- if (needs_api && PyErr_Occurred()) {
+ if (stransfer(item, 0, data, 0, 1, tmp_descr->elsize, transferdata) < 0) {
res = -1;
}
+ NPY_AUXDATA_FREE(transferdata);
finish:
if (PyDataType_REFCHK(tmp_descr)) {
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index dc451685a..15824e9e2 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -2391,16 +2391,21 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
src_count = *src_countptr;
dst_data = dst_dataptr[0];
src_data = src_dataptr[0];
+ int res = 0;
for(;;) {
/* Transfer the biggest amount that fits both */
count = (src_count < dst_count) ? src_count : dst_count;
- stransfer(dst_data, dst_stride,
- src_data, src_stride,
- count, src_itemsize, transferdata);
+ if (stransfer(
+ dst_data, dst_stride, src_data, src_stride,
+ count, src_itemsize, transferdata) < 0) {
+ res = -1;
+ break;
+ }
/* If we exhausted the dst block, refresh it */
if (dst_count == count) {
- if (!dst_iternext(dst_iter)) {
+ res = dst_iternext(dst_iter);
+ if (!res) {
break;
}
dst_count = *dst_countptr;
@@ -2413,7 +2418,8 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
/* If we exhausted the src block, refresh it */
if (src_count == count) {
- if (!src_iternext(src_iter)) {
+ res = src_iternext(src_iter);
+ if (!res) {
break;
}
src_count = *src_countptr;
@@ -2430,8 +2436,11 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
NPY_AUXDATA_FREE(transferdata);
NpyIter_Deallocate(dst_iter);
NpyIter_Deallocate(src_iter);
-
- return PyErr_Occurred() ? -1 : 0;
+ if (res > 0) {
+ /* The iteration stopped successfully, do not report an error */
+ return 0;
+ }
+ return res;
}
/*NUMPY_API
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 3a58b5849..c9868a2c8 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -106,7 +106,7 @@ get_bool_setdstone_transfer_function(npy_intp dst_stride,
/*************************** COPY REFERENCES *******************************/
/* Moves references from src to dst */
-static void
+static int
_strided_to_strided_move_references(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -131,10 +131,11 @@ _strided_to_strided_move_references(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
/* Copies references from src to dst */
-static void
+static int
_strided_to_strided_copy_references(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -158,6 +159,7 @@ _strided_to_strided_copy_references(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
@@ -188,7 +190,7 @@ static NpyAuxData *_strided_zero_pad_data_clone(NpyAuxData *data)
* Does a strided to strided zero-padded copy for the case where
* dst_itemsize > src_itemsize
*/
-static void
+static int
_strided_to_strided_zero_pad_copy(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -205,13 +207,14 @@ _strided_to_strided_zero_pad_copy(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
/*
* Does a strided to strided zero-padded copy for the case where
* dst_itemsize < src_itemsize
*/
-static void
+static int
_strided_to_strided_truncate_copy(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -226,13 +229,14 @@ _strided_to_strided_truncate_copy(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
/*
* Does a strided to strided zero-padded or truncated copy for the case where
* unicode swapping is needed.
*/
-static void
+static int
_strided_to_strided_unicode_copyswap(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -260,6 +264,7 @@ _strided_to_strided_unicode_copyswap(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
@@ -379,7 +384,7 @@ static NpyAuxData *_align_wrap_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_contig_align_wrap(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -395,47 +400,50 @@ _strided_to_strided_contig_align_wrap(char *dst, npy_intp dst_stride,
*todata = d->todata,
*fromdata = d->fromdata;
char *bufferin = d->bufferin, *bufferout = d->bufferout;
- npy_bool init_dest = d->init_dest, out_needs_api = d->out_needs_api;
+ npy_bool init_dest = d->init_dest;
for(;;) {
- /*
- * The caller does not know if a previous call resulted in a Python
- * exception. Much of the Python API is unsafe while an exception is in
- * flight, so just skip all the work. Someone higher in the call stack
- * will check for errors and propagate them.
- */
- if (out_needs_api && PyErr_Occurred()) {
- return;
- }
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
- tobuffer(bufferin, inner_src_itemsize, src, src_stride,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- src_itemsize, todata);
+ if (tobuffer(
+ bufferin, inner_src_itemsize, src, src_stride,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE, src_itemsize, todata) < 0) {
+ return -1;
+ }
if (init_dest) {
memset(bufferout, 0,
- dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE);
+ dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE);
+ }
+ if (wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
+ inner_src_itemsize, wrappeddata) < 0) {
+ return -1;
+ }
+ if (frombuffer(dst, dst_stride, bufferout, dst_itemsize,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
+ dst_itemsize, fromdata) < 0) {
+ return -1;
}
- wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- inner_src_itemsize, wrappeddata);
- frombuffer(dst, dst_stride, bufferout, dst_itemsize,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- dst_itemsize, fromdata);
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride;
}
else {
- tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
- src_itemsize, todata);
+ if (tobuffer(bufferin, inner_src_itemsize, src, src_stride,
+ N, src_itemsize, todata) < 0) {
+ return -1;
+ }
if (init_dest) {
memset(bufferout, 0, dst_itemsize*N);
}
- wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
- inner_src_itemsize, wrappeddata);
- frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
- dst_itemsize, fromdata);
- return;
+ if (wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
+ N, inner_src_itemsize, wrappeddata) < 0) {
+ return -1;
+ }
+ if (frombuffer(dst, dst_stride, bufferout, dst_itemsize,
+ N, dst_itemsize, fromdata) < 0) {
+ return -1;
+ }
+ return 0;
}
}
}
@@ -538,7 +546,7 @@ static NpyAuxData *_wrap_copy_swap_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_wrap_copy_swap(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -546,7 +554,9 @@ _strided_to_strided_wrap_copy_swap(char *dst, npy_intp dst_stride,
{
_wrap_copy_swap_data *d = (_wrap_copy_swap_data *)data;
+ /* We assume that d->copyswapn should not be able to error. */
d->copyswapn(dst, dst_stride, src, src_stride, N, d->swap, d->arr);
+ return 0;
}
/* This only gets used for custom data types and for Unicode when swapping */
@@ -603,6 +613,7 @@ typedef struct {
NpyAuxData base;
PyArray_VectorUnaryFunc *castfunc;
PyArrayObject *aip, *aop;
+ npy_bool needs_api;
} _strided_cast_data;
/* strided cast data free function */
@@ -630,7 +641,7 @@ static NpyAuxData *_strided_cast_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_aligned_strided_to_strided_cast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -639,17 +650,29 @@ _aligned_strided_to_strided_cast(char *dst, npy_intp dst_stride,
_strided_cast_data *d = (_strided_cast_data *)data;
PyArray_VectorUnaryFunc *castfunc = d->castfunc;
PyArrayObject *aip = d->aip, *aop = d->aop;
+ npy_bool needs_api = d->needs_api;
while (N > 0) {
castfunc(src, dst, 1, aip, aop);
+ /*
+ * Since error handling in ufuncs is not ideal (at the time of
+ * writing this, an error could be in process before calling this
+ * function. For most of NumPy history these checks were completely
+ * missing, so this is hopefully OK for the time being (until ufuncs
+ * are fixed).
+ */
+ if (needs_api && PyErr_Occurred()) {
+ return -1;
+ }
dst += dst_stride;
src += src_stride;
--N;
}
+ return 0;
}
/* This one requires src be of type NPY_OBJECT */
-static void
+static int
_aligned_strided_to_strided_cast_decref_src(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -658,31 +681,49 @@ _aligned_strided_to_strided_cast_decref_src(char *dst, npy_intp dst_stride,
_strided_cast_data *d = (_strided_cast_data *)data;
PyArray_VectorUnaryFunc *castfunc = d->castfunc;
PyArrayObject *aip = d->aip, *aop = d->aop;
+ npy_bool needs_api = d->needs_api;
PyObject *src_ref;
while (N > 0) {
castfunc(src, dst, 1, aip, aop);
-
- /* After casting, decrement the source ref */
+ /*
+ * See comment in `_aligned_strided_to_strided_cast`, an error could
+ * in principle be set before `castfunc` is called.
+ */
+ if (needs_api && PyErr_Occurred()) {
+ return -1;
+ }
+ /* After casting, decrement the source ref and set it to NULL */
NPY_COPY_PYOBJECT_PTR(&src_ref, src);
- NPY_DT_DBG_REFTRACE("dec src ref (cast object -> not object)", src_ref);
Py_XDECREF(src_ref);
+ memset(src, 0, sizeof(PyObject *));
+ NPY_DT_DBG_REFTRACE("dec src ref (cast object -> not object)", src_ref);
dst += dst_stride;
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_aligned_contig_to_contig_cast(char *dst, npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(itemsize),
NpyAuxData *data)
{
_strided_cast_data *d = (_strided_cast_data *)data;
+ npy_bool needs_api = d->needs_api;
d->castfunc(src, dst, N, d->aip, d->aop);
+ /*
+ * See comment in `_aligned_strided_to_strided_cast`, an error could
+ * in principle be set before `castfunc` is called.
+ */
+ if (needs_api && PyErr_Occurred()) {
+ return -1;
+ }
+ return 0;
}
static int
@@ -777,7 +818,7 @@ static NpyAuxData *_strided_datetime_cast_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_datetime_general_cast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -792,12 +833,12 @@ _strided_to_strided_datetime_general_cast(char *dst, npy_intp dst_stride,
if (convert_datetime_to_datetimestruct(&d->src_meta,
dt, &dts) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
else {
if (convert_datetimestruct_to_datetime(&d->dst_meta,
&dts, &dt) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
}
@@ -807,9 +848,10 @@ _strided_to_strided_datetime_general_cast(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_datetime_cast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -838,9 +880,10 @@ _strided_to_strided_datetime_cast(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_aligned_strided_to_strided_datetime_cast(char *dst,
npy_intp dst_stride,
char *src, npy_intp src_stride,
@@ -870,9 +913,10 @@ _aligned_strided_to_strided_datetime_cast(char *dst,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_datetime_to_string(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -888,28 +932,26 @@ _strided_to_strided_datetime_to_string(char *dst, npy_intp dst_stride,
if (convert_datetime_to_datetimestruct(&d->src_meta,
dt, &dts) < 0) {
- /* For an error, produce a 'NaT' string */
- dts.year = NPY_DATETIME_NAT;
+ return -1;
}
/* Initialize the destination to all zeros */
memset(dst, 0, dst_itemsize);
- /*
- * This may also raise an error, but the caller needs
- * to use PyErr_Occurred().
- */
- make_iso_8601_datetime(&dts, dst, dst_itemsize,
+ if (make_iso_8601_datetime(&dts, dst, dst_itemsize,
0, 0, d->src_meta.base, -1,
- NPY_UNSAFE_CASTING);
+ NPY_UNSAFE_CASTING) < 0) {
+ return -1;
+ }
dst += dst_stride;
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -934,7 +976,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
if (parse_iso_8601_datetime(tmp_buffer, src_itemsize,
d->dst_meta.base, NPY_SAME_KIND_CASTING,
&dts, NULL, NULL) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
}
/* Otherwise parse the data in place */
@@ -942,7 +984,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
if (parse_iso_8601_datetime(src, tmp - src,
d->dst_meta.base, NPY_SAME_KIND_CASTING,
&dts, NULL, NULL) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
}
@@ -950,7 +992,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
if (dt != NPY_DATETIME_NAT &&
convert_datetimestruct_to_datetime(&d->dst_meta,
&dts, &dt) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
memcpy(dst, &dt, sizeof(dt));
@@ -959,6 +1001,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
/*
@@ -1422,6 +1465,7 @@ get_nbo_cast_transfer_function(int aligned,
data->base.free = &_strided_cast_data_free;
data->base.clone = &_strided_cast_data_clone;
data->castfunc = castfunc;
+ data->needs_api = *out_needs_api;
/*
* TODO: This is a hack so the cast functions have an array.
* The cast functions shouldn't need that. Also, since we
@@ -1652,7 +1696,7 @@ static NpyAuxData *_one_to_n_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_one_to_n(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -1664,18 +1708,19 @@ _strided_to_strided_one_to_n(char *dst, npy_intp dst_stride,
npy_intp subN = d->N, dst_itemsize = d->dst_itemsize;
while (N > 0) {
- subtransfer(dst, dst_itemsize,
- src, 0,
- subN, src_itemsize,
- subdata);
+ if (subtransfer(
+ dst, dst_itemsize, src, 0, subN, src_itemsize, subdata) < 0) {
+ return -1;
+ }
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_one_to_n_with_finish(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -1688,21 +1733,21 @@ _strided_to_strided_one_to_n_with_finish(char *dst, npy_intp dst_stride,
npy_intp subN = d->N, dst_itemsize = d->dst_itemsize;
while (N > 0) {
- subtransfer(dst, dst_itemsize,
- src, 0,
- subN, src_itemsize,
- subdata);
-
+ if (subtransfer(
+ dst, dst_itemsize, src, 0, subN, src_itemsize, subdata) < 0) {
+ return -1;
+ }
- stransfer_finish_src(NULL, 0,
- src, 0,
- 1, src_itemsize,
- data_finish_src);
+ if (stransfer_finish_src(
+ NULL, 0, src, 0, 1, src_itemsize, data_finish_src) < 0) {
+ return -1;
+ }
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
/*
@@ -1846,7 +1891,7 @@ static NpyAuxData *_n_to_n_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_n_to_n(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -1859,18 +1904,19 @@ _strided_to_strided_n_to_n(char *dst, npy_intp dst_stride,
dst_subitemsize = d->dst_itemsize;
while (N > 0) {
- subtransfer(dst, dst_subitemsize,
- src, src_subitemsize,
- subN, src_subitemsize,
- subdata);
-
+ if (subtransfer(
+ dst, dst_subitemsize, src, src_subitemsize,
+ subN, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_contig_to_contig_n_to_n(char *dst, npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -1882,10 +1928,12 @@ _contig_to_contig_n_to_n(char *dst, npy_intp NPY_UNUSED(dst_stride),
npy_intp subN = d->N, src_subitemsize = d->src_itemsize,
dst_subitemsize = d->dst_itemsize;
- subtransfer(dst, dst_subitemsize,
- src, src_subitemsize,
- subN*N, src_subitemsize,
- subdata);
+ if (subtransfer(
+ dst, dst_subitemsize, src, src_subitemsize,
+ subN*N, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
+ return 0;
}
/*
@@ -2049,7 +2097,7 @@ static NpyAuxData *_subarray_broadcast_data_clone( NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_subarray_broadcast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -2072,10 +2120,11 @@ _strided_to_strided_subarray_broadcast(char *dst, npy_intp dst_stride,
count = offsetruns[run].count;
dst_ptr = dst + loop_index*dst_subitemsize;
if (offset != -1) {
- subtransfer(dst_ptr, dst_subitemsize,
- src + offset, src_subitemsize,
- count, src_subitemsize,
- subdata);
+ if (subtransfer(
+ dst_ptr, dst_subitemsize, src + offset, src_subitemsize,
+ count, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
}
else {
memset(dst_ptr, 0, count*dst_subitemsize);
@@ -2087,10 +2136,11 @@ _strided_to_strided_subarray_broadcast(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_subarray_broadcast_withrefs(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -2118,16 +2168,19 @@ _strided_to_strided_subarray_broadcast_withrefs(char *dst, npy_intp dst_stride,
count = offsetruns[run].count;
dst_ptr = dst + loop_index*dst_subitemsize;
if (offset != -1) {
- subtransfer(dst_ptr, dst_subitemsize,
- src + offset, src_subitemsize,
- count, src_subitemsize,
- subdata);
+ if (subtransfer(
+ dst_ptr, dst_subitemsize, src + offset, src_subitemsize,
+ count, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
}
else {
if (stransfer_decdstref != NULL) {
- stransfer_decdstref(NULL, 0, dst_ptr, dst_subitemsize,
- count, dst_subitemsize,
- data_decdstref);
+ if (stransfer_decdstref(
+ NULL, 0, dst_ptr, dst_subitemsize,
+ count, dst_subitemsize, data_decdstref) < 0) {
+ return -1;
+ }
}
memset(dst_ptr, 0, count*dst_subitemsize);
}
@@ -2135,15 +2188,18 @@ _strided_to_strided_subarray_broadcast_withrefs(char *dst, npy_intp dst_stride,
}
if (stransfer_decsrcref != NULL) {
- stransfer_decsrcref(NULL, 0, src, src_subitemsize,
- src_subN, src_subitemsize,
- data_decsrcref);
+ if (stransfer_decsrcref(
+ NULL, 0, src, src_subitemsize,
+ src_subN, src_subitemsize, data_decsrcref) < 0) {
+ return -1;
+ }
}
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
@@ -2500,7 +2556,7 @@ static NpyAuxData *_field_transfer_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -2515,11 +2571,13 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
field = &d->fields;
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
for (i = 0; i < field_count; ++i, ++field) {
- field->stransfer(dst + field->dst_offset, dst_stride,
- src + field->src_offset, src_stride,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- field->src_itemsize,
- field->data);
+ if (field->stransfer(
+ dst + field->dst_offset, dst_stride,
+ src + field->src_offset, src_stride,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
+ field->src_itemsize, field->data) < 0) {
+ return -1;
+ }
}
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
@@ -2527,13 +2585,15 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
}
else {
for (i = 0; i < field_count; ++i, ++field) {
- field->stransfer(dst + field->dst_offset, dst_stride,
- src + field->src_offset, src_stride,
- N,
- field->src_itemsize,
- field->data);
+ if (field->stransfer(
+ dst + field->dst_offset, dst_stride,
+ src + field->src_offset, src_stride,
+ N,
+ field->src_itemsize, field->data) < 0) {
+ return -1;
+ }
}
- return;
+ return 0;
}
}
}
@@ -2947,7 +3007,8 @@ static NpyAuxData *_masked_wrapper_transfer_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void _strided_masked_wrapper_decsrcref_transfer_function(
+static int
+_strided_masked_wrapper_decsrcref_transfer_function(
char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
@@ -2969,8 +3030,11 @@ static void _strided_masked_wrapper_decsrcref_transfer_function(
/* Skip masked values, still calling decsrcref for move_references */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 1);
- decsrcref_stransfer(NULL, 0, src, src_stride,
- subloopsize, src_itemsize, decsrcref_transferdata);
+ if (decsrcref_stransfer(
+ NULL, 0, src, src_stride,
+ subloopsize, src_itemsize, decsrcref_transferdata) < 0) {
+ return -1;
+ }
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
@@ -2981,15 +3045,20 @@ static void _strided_masked_wrapper_decsrcref_transfer_function(
/* Process unmasked values */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 0);
- unmasked_stransfer(dst, dst_stride, src, src_stride,
- subloopsize, src_itemsize, unmasked_transferdata);
+ if (unmasked_stransfer(
+ dst, dst_stride, src, src_stride,
+ subloopsize, src_itemsize, unmasked_transferdata) < 0) {
+ return -1;
+ }
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
}
+ return 0;
}
-static void _strided_masked_wrapper_transfer_function(
+static int
+_strided_masked_wrapper_transfer_function(
char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
@@ -3020,18 +3089,22 @@ static void _strided_masked_wrapper_transfer_function(
/* Process unmasked values */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 0);
- unmasked_stransfer(dst, dst_stride, src, src_stride,
- subloopsize, src_itemsize, unmasked_transferdata);
+ if (unmasked_stransfer(
+ dst, dst_stride, src, src_stride,
+ subloopsize, src_itemsize, unmasked_transferdata) < 0) {
+ return -1;
+ }
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
}
+ return 0;
}
/************************* DEST BOOL SETONE *******************************/
-static void
+static int
_null_to_strided_set_bool_one(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3046,9 +3119,10 @@ _null_to_strided_set_bool_one(char *dst,
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_null_to_contig_set_bool_one(char *dst,
npy_intp NPY_UNUSED(dst_stride),
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3058,6 +3132,7 @@ _null_to_contig_set_bool_one(char *dst,
/* bool type is one byte, so can just use the char */
memset(dst, 1, N);
+ return 0;
}
/* Only for the bool type, sets the destination to 1 */
@@ -3101,7 +3176,7 @@ static NpyAuxData *_dst_memset_zero_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_null_to_strided_memset_zero(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3116,9 +3191,10 @@ _null_to_strided_memset_zero(char *dst,
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_null_to_contig_memset_zero(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3129,9 +3205,10 @@ _null_to_contig_memset_zero(char *dst,
npy_intp dst_itemsize = d->dst_itemsize;
memset(dst, 0, N*dst_itemsize);
+ return 0;
}
-static void
+static int
_null_to_strided_reference_setzero(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3143,17 +3220,15 @@ _null_to_strided_reference_setzero(char *dst,
while (N > 0) {
NPY_COPY_PYOBJECT_PTR(&dst_ref, dst);
- /* Release the reference in dst */
+ /* Release the reference in dst and set it to NULL */
NPY_DT_DBG_REFTRACE("dec dest ref (to set zero)", dst_ref);
Py_XDECREF(dst_ref);
-
- /* Set it to zero */
- dst_ref = NULL;
- NPY_COPY_PYOBJECT_PTR(dst, &dst_ref);
+ memset(dst, 0, sizeof(PyObject *));
dst += dst_stride;
--N;
}
+ return 0;
}
NPY_NO_EXPORT int
@@ -3250,7 +3325,7 @@ get_setdstzero_transfer_function(int aligned,
return NPY_SUCCEED;
}
-static void
+static int
_dec_src_ref_nop(char *NPY_UNUSED(dst),
npy_intp NPY_UNUSED(dst_stride),
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3259,9 +3334,10 @@ _dec_src_ref_nop(char *NPY_UNUSED(dst),
NpyAuxData *NPY_UNUSED(data))
{
/* NOP */
+ return 0;
}
-static void
+static int
_strided_to_null_dec_src_ref_reference(char *NPY_UNUSED(dst),
npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp src_stride,
@@ -3271,15 +3347,16 @@ _strided_to_null_dec_src_ref_reference(char *NPY_UNUSED(dst),
{
PyObject *src_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
-
- /* Release the reference in src */
+ /* Release the reference in src and set it to NULL */
NPY_DT_DBG_REFTRACE("dec src ref (null dst)", src_ref);
+ NPY_COPY_PYOBJECT_PTR(&src_ref, src);
Py_XDECREF(src_ref);
+ memset(src, 0, sizeof(PyObject *));
src += src_stride;
--N;
}
+ return 0;
}
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index d234c366c..9dc802508 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -110,7 +110,7 @@
* if not it can decrease performance
* tested to improve performance on intel xeon 5x/7x, core2duo, amd phenom x4
*/
-static void
+static int
#if @is_aligned@ && @is_swap@ == 0 && @elsize@ <= NPY_SIZEOF_INTP
NPY_GCC_UNROLL_LOOPS
#endif
@@ -171,6 +171,7 @@ static void
--N;
}
+ return 0;
}
#endif
@@ -182,7 +183,7 @@ static void
* but it profits from vectorization enabled with -O3
*/
#if (@src_contig@ == 0) && @is_aligned@
-static NPY_GCC_OPT_3 void
+static NPY_GCC_OPT_3 int
@prefix@_@oper@_size@elsize@_srcstride0(char *dst,
npy_intp dst_stride,
char *src, npy_intp NPY_UNUSED(src_stride),
@@ -197,7 +198,7 @@ static NPY_GCC_OPT_3 void
npy_uint64 temp0, temp1;
#endif
if (N == 0) {
- return;
+ return 0;
}
#if @is_aligned@ && @elsize@ != 16
/* sanity check */
@@ -238,6 +239,7 @@ static NPY_GCC_OPT_3 void
--N;
}
#endif/* @elsize == 1 && @dst_contig@ -- else */
+ return 0;
}
#endif/* (@src_contig@ == 0) && @is_aligned@ */
@@ -247,7 +249,7 @@ static NPY_GCC_OPT_3 void
/**end repeat1**/
/**end repeat**/
-static void
+static int
_strided_to_strided(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -259,9 +261,10 @@ _strided_to_strided(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_swap_strided_to_strided(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -284,9 +287,10 @@ _swap_strided_to_strided(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_swap_pair_strided_to_strided(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -319,15 +323,17 @@ _swap_pair_strided_to_strided(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_contig_to_contig(char *dst, npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp src_itemsize,
NpyAuxData *NPY_UNUSED(data))
{
memmove(dst, src, src_itemsize*N);
+ return 0;
}
@@ -787,7 +793,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp *
#endif
-static NPY_GCC_OPT_3 void
+static NPY_GCC_OPT_3 int
@prefix@_cast_@name1@_to_@name2@(
char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
@@ -873,6 +879,7 @@ static NPY_GCC_OPT_3 void
src += src_stride;
#endif
}
+ return 0;
}
#undef _CONVERT_FN
@@ -989,10 +996,14 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
src_stride0 = src_strides[0];
N = shape0 - coord0;
if (N >= count) {
- stransfer(dst, dst_stride, src, src_stride0, count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride, src, src_stride0,
+ count, src_itemsize, data);
+ }
+ int res = stransfer(dst, dst_stride, src, src_stride0,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
}
- stransfer(dst, dst_stride, src, src_stride0, N, src_itemsize, data);
count -= N;
/* If it's 1-dimensional, there's no more to copy */
@@ -1012,13 +1023,15 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
N = shape0*M;
for (i = 0; i < M; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride, src, src_stride0,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride, src, src_stride0,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride, src, src_stride0,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride, src, src_stride0,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
src += src_stride1;
@@ -1073,13 +1086,15 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
/* A loop for dimensions 0 and 1 */
for (i = 0; i < shape1; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride, src, src_stride0,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride, src, src_stride0,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride, src, src_stride0,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride, src, src_stride0,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
src += src_stride1;
@@ -1108,10 +1123,14 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
dst_stride0 = dst_strides[0];
N = shape0 - coord0;
if (N >= count) {
- stransfer(dst, dst_stride0, src, src_stride, count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride0, src, src_stride,
+ count, src_itemsize, data);
+ }
+ int res = stransfer(dst, dst_stride0, src, src_stride,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
}
- stransfer(dst, dst_stride0, src, src_stride, N, src_itemsize, data);
count -= N;
/* If it's 1-dimensional, there's no more to copy */
@@ -1131,13 +1150,15 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
N = shape0*M;
for (i = 0; i < M; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0, src, src_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride0, src, src_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0, src, src_stride,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride0, src, src_stride,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1192,13 +1213,15 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
/* A loop for dimensions 0 and 1 */
for (i = 0; i < shape1; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0, src, src_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride0, src, src_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0, src, src_stride,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride0, src, src_stride,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1228,16 +1251,18 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
dst_stride0 = dst_strides[0];
N = shape0 - coord0;
if (N >= count) {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- count, src_itemsize, data);
- return 0;
- }
- stransfer(dst, dst_stride0,
- src, src_stride,
+ return stransfer(
+ dst, dst_stride0, src, src_stride,
mask, mask_stride,
- N, src_itemsize, data);
+ count, src_itemsize, data);
+ }
+ int res = stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
count -= N;
/* If it's 1-dimensional, there's no more to copy */
@@ -1258,17 +1283,19 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
N = shape0*M;
for (i = 0; i < M; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- shape0, src_itemsize, data);
+ int res = stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1324,17 +1351,19 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
/* A loop for dimensions 0 and 1 */
for (i = 0; i < shape1; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- shape0, src_itemsize, data);
+ res = stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1760,13 +1789,23 @@ mapiter_@name@(PyArrayMapIterObject *mit)
do {
#if @isget@
- stransfer(subspace_ptrs[1], subspace_strides[1],
- subspace_ptrs[0], subspace_strides[0],
- *counter, src_itemsize, transferdata);
+ if (NPY_UNLIKELY(stransfer(
+ subspace_ptrs[1], subspace_strides[1],
+ subspace_ptrs[0], subspace_strides[0],
+ *counter, src_itemsize, transferdata) < 0)) {
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
+ }
#else
- stransfer(subspace_ptrs[0], subspace_strides[0],
- subspace_ptrs[1], subspace_strides[1],
- *counter, src_itemsize, transferdata);
+ if (NPY_UNLIKELY(stransfer(
+ subspace_ptrs[0], subspace_strides[0],
+ subspace_ptrs[1], subspace_strides[1],
+ *counter, src_itemsize, transferdata) < 0)) {
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
+ }
#endif
} while (mit->subspace_next(mit->subspace_iter));
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 1f2bec8b1..db15ff1d5 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1091,6 +1091,7 @@ array_boolean_subscript(PyArrayObject *self,
self_stride = innerstrides[0];
bmask_stride = innerstrides[1];
+ int res = 0;
do {
innersize = *NpyIter_GetInnerLoopSizePtr(iter);
self_data = dataptrs[0];
@@ -1105,8 +1106,11 @@ array_boolean_subscript(PyArrayObject *self,
/* Process unmasked values */
bmask_data = npy_memchr(bmask_data, 0, bmask_stride, innersize,
&subloopsize, 0);
- stransfer(ret_data, itemsize, self_data, self_stride,
- subloopsize, itemsize, transferdata);
+ res = stransfer(ret_data, itemsize, self_data, self_stride,
+ subloopsize, itemsize, transferdata);
+ if (res < 0) {
+ break;
+ }
innersize -= subloopsize;
self_data += subloopsize * self_stride;
ret_data += subloopsize * itemsize;
@@ -1115,8 +1119,15 @@ array_boolean_subscript(PyArrayObject *self,
NPY_END_THREADS;
- NpyIter_Deallocate(iter);
+ if (!NpyIter_Deallocate(iter)) {
+ res = -1;
+ }
NPY_AUXDATA_FREE(transferdata);
+ if (res < 0) {
+ /* Should be practically impossible, since there is no cast */
+ Py_DECREF(ret);
+ return NULL;
+ }
}
if (!PyArray_CheckExact(self)) {
@@ -1209,6 +1220,7 @@ array_assign_boolean_subscript(PyArrayObject *self,
v_data = PyArray_DATA(v);
/* Create an iterator for the data */
+ int res = 0;
if (size > 0) {
NpyIter *iter;
PyArrayObject *op[2] = {self, bmask};
@@ -1253,7 +1265,7 @@ array_assign_boolean_subscript(PyArrayObject *self,
/* Get a dtype transfer function */
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
if (PyArray_GetDTypeTransferFunction(
- IsUintAligned(self) && IsAligned(self) &&
+ IsUintAligned(self) && IsAligned(self) &&
IsUintAligned(v) && IsAligned(v),
v_stride, fixed_strides[0],
PyArray_DESCR(v), PyArray_DESCR(self),
@@ -1282,8 +1294,11 @@ array_assign_boolean_subscript(PyArrayObject *self,
/* Process unmasked values */
bmask_data = npy_memchr(bmask_data, 0, bmask_stride, innersize,
&subloopsize, 0);
- stransfer(self_data, self_stride, v_data, v_stride,
- subloopsize, src_itemsize, transferdata);
+ res = stransfer(self_data, self_stride, v_data, v_stride,
+ subloopsize, src_itemsize, transferdata);
+ if (res < 0) {
+ break;
+ }
innersize -= subloopsize;
self_data += subloopsize * self_stride;
v_data += subloopsize * v_stride;
@@ -1295,22 +1310,12 @@ array_assign_boolean_subscript(PyArrayObject *self,
}
NPY_AUXDATA_FREE(transferdata);
- NpyIter_Deallocate(iter);
- }
-
- if (needs_api) {
- /*
- * FIXME?: most assignment operations stop after the first occurrence
- * of an error. Boolean does not currently, but should at least
- * report the error. (This is only relevant for things like str->int
- * casts which call into python)
- */
- if (PyErr_Occurred()) {
- return -1;
+ if (!NpyIter_Deallocate(iter)) {
+ res = -1;
}
}
- return 0;
+ return res;
}
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index a5b5e5c51..059f2c437 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -229,13 +229,22 @@ NpyIter_EnableExternalLoop(NpyIter *iter)
return NpyIter_Reset(iter, NULL);
}
+
+static char *_reset_cast_error = (
+ "Iterator reset failed due to a casting failure. "
+ "This error is set as a Python error.");
+
/*NUMPY_API
* Resets the iterator to its initial state
*
+ * The use of errmsg is discouraged, it cannot be guaranteed that the GIL
+ * will not be grabbed on casting errors even when this is passed.
+ *
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
- * the GIL.
+ * the GIL. Note that cast errors may still lead to the GIL being
+ * grabbed temporarily.
*/
NPY_NO_EXPORT int
NpyIter_Reset(NpyIter *iter, char **errmsg)
@@ -250,6 +259,9 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
/* If buffer allocation was delayed, do it now */
if (itflags&NPY_ITFLAG_DELAYBUF) {
if (!npyiter_allocate_buffers(iter, errmsg)) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
return NPY_FAIL;
}
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF;
@@ -257,7 +269,7 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
else {
/*
* If the iterindex is already right, no need to
- * do anything
+ * do anything (and no cast error has previously occurred).
*/
bufferdata = NIT_BUFFERDATA(iter);
if (NIT_ITERINDEX(iter) == NIT_ITERSTART(iter) &&
@@ -265,9 +277,12 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
NBF_SIZE(bufferdata) > 0) {
return NPY_SUCCEED;
}
-
- /* Copy any data from the buffers back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
}
@@ -275,7 +290,12 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
if (itflags&NPY_ITFLAG_BUFFER) {
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
return NPY_SUCCEED;
@@ -288,7 +308,8 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
- * the GIL.
+ * the GIL. Note that cast errors may still lead to the GIL being
+ * grabbed temporarily.
*/
NPY_NO_EXPORT int
NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
@@ -309,8 +330,12 @@ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF;
}
else {
- /* Copy any data from the buffers back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
}
@@ -323,7 +348,12 @@ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
if (itflags&NPY_ITFLAG_BUFFER) {
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
return NPY_SUCCEED;
@@ -335,7 +365,8 @@ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
- * the GIL.
+ * the GIL. Note that cast errors may still lead to the GIL being
+ * grabbed temporarily.
*/
NPY_NO_EXPORT int
NpyIter_ResetToIterIndexRange(NpyIter *iter,
@@ -633,12 +664,16 @@ NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex)
/* Start the buffer at the provided iterindex */
else {
/* Write back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ return NPY_FAIL;
+ }
npyiter_goto_iterindex(iter, iterindex);
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ return NPY_FAIL;
+ }
}
}
else {
@@ -1376,6 +1411,7 @@ NpyIter_GetInnerLoopSizePtr(NpyIter *iter)
}
}
+
/*NUMPY_API
* For debugging
*/
@@ -1828,7 +1864,7 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex)
* their data needs to be written back to the arrays. The multi-index
* must be positioned for the beginning of the buffer.
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_from_buffers(NpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -1861,7 +1897,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
/* If we're past the end, nothing to copy */
if (NBF_SIZE(bufferdata) == 0) {
- return;
+ return 0;
}
NPY_IT_DBG_PRINT("Iterator: Copying buffers to outputs\n");
@@ -1968,7 +2004,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
maskptr = (npy_bool *)ad_ptrs[maskop];
}
- PyArray_TransferMaskedStridedToNDim(ndim_transfer,
+ if (PyArray_TransferMaskedStridedToNDim(ndim_transfer,
ad_ptrs[iop], dst_strides, axisdata_incr,
buffer, src_stride,
maskptr, strides[maskop],
@@ -1976,18 +2012,22 @@ npyiter_copy_from_buffers(NpyIter *iter)
dst_shape, axisdata_incr,
op_transfersize, dtypes[iop]->elsize,
(PyArray_MaskedStridedUnaryOp *)stransfer,
- transferdata);
+ transferdata) < 0) {
+ return -1;
+ }
}
/* Regular operand */
else {
- PyArray_TransferStridedToNDim(ndim_transfer,
+ if (PyArray_TransferStridedToNDim(ndim_transfer,
ad_ptrs[iop], dst_strides, axisdata_incr,
buffer, src_stride,
dst_coords, axisdata_incr,
dst_shape, axisdata_incr,
op_transfersize, dtypes[iop]->elsize,
stransfer,
- transferdata);
+ transferdata) < 0) {
+ return -1;
+ }
}
}
/* If there's no copy back, we may have to decrement refs. In
@@ -2002,9 +2042,13 @@ npyiter_copy_from_buffers(NpyIter *iter)
NPY_IT_DBG_PRINT1("Iterator: Freeing refs and zeroing buffer "
"of operand %d\n", (int)iop);
/* Decrement refs */
- stransfer(NULL, 0, buffer, dtypes[iop]->elsize,
- transfersize, dtypes[iop]->elsize,
- transferdata);
+ if (stransfer(NULL, 0, buffer, dtypes[iop]->elsize,
+ transfersize, dtypes[iop]->elsize,
+ transferdata) < 0) {
+ /* Since this should only decrement, it should never error */
+ assert(0);
+ return -1;
+ }
/*
* Zero out the memory for safety. For instance,
* if during iteration some Python code copied an
@@ -2016,6 +2060,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
}
NPY_IT_DBG_PRINT("Iterator: Finished copying buffers to outputs\n");
+ return 0;
}
/*
@@ -2023,7 +2068,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
* for the start of a buffer. It decides which operands need a buffer,
* and copies the data into the buffers.
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -2142,7 +2187,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize;
if (reduce_innersize == 0) {
NBF_REDUCE_OUTERSIZE(bufferdata) = 0;
- return;
+ return 0;
}
else {
NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize;
@@ -2508,14 +2553,15 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
"buffer (%d items)\n",
(int)iop, (int)op_transfersize);
- PyArray_TransferNDimToStrided(ndim_transfer,
- ptrs[iop], dst_stride,
- ad_ptrs[iop], src_strides, axisdata_incr,
- src_coords, axisdata_incr,
- src_shape, axisdata_incr,
- op_transfersize, src_itemsize,
- stransfer,
- transferdata);
+ if (PyArray_TransferNDimToStrided(
+ ndim_transfer, ptrs[iop], dst_stride,
+ ad_ptrs[iop], src_strides, axisdata_incr,
+ src_coords, axisdata_incr,
+ src_shape, axisdata_incr,
+ op_transfersize, src_itemsize,
+ stransfer, transferdata) < 0) {
+ return -1;
+ }
}
}
else if (ptrs[iop] == buffers[iop]) {
@@ -2551,8 +2597,80 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
NPY_IT_DBG_PRINT1("Iterator: Finished copying inputs to buffers "
"(buffered size is %d)\n", (int)NBF_SIZE(bufferdata));
+ return 0;
}
+
+/**
+ * This function clears any references still held by the buffers and should
+ * only be used to discard buffers if an error occurred.
+ *
+ * @param iter Iterator
+ */
+NPY_NO_EXPORT void
+npyiter_clear_buffers(NpyIter *iter)
+{
+ int nop = iter->nop;
+ NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
+
+ if (NBF_SIZE(bufferdata) == 0) {
+ /* if the buffers are empty already, there is nothing to do */
+ return;
+ }
+
+ if (!(NIT_ITFLAGS(iter) & NPY_ITFLAG_NEEDSAPI)) {
+ /* Buffers do not require clearing, but should not be copied back */
+ NBF_SIZE(bufferdata) = 0;
+ return;
+ }
+
+ /*
+ * The iterator may be using a dtype with references, which always
+ * requires the API. In that case, further cleanup may be necessary.
+ *
+ * TODO: At this time, we assume that a dtype having references
+ * implies the need to hold the GIL at all times. In theory
+ * we could broaden this definition for a new
+ * `PyArray_Item_XDECREF` API and the assumption may become
+ * incorrect.
+ */
+ PyObject *type, *value, *traceback;
+ PyErr_Fetch(&type, &value, &traceback);
+
+ /* Cleanup any buffers with references */
+ char **buffers = NBF_BUFFERS(bufferdata);
+ PyArray_Descr **dtypes = NIT_DTYPES(iter);
+ for (int iop = 0; iop < nop; ++iop, ++buffers) {
+ /*
+ * We may want to find a better way to do this, on the other hand,
+ * this cleanup seems rare and fairly special. A dtype using
+ * references (right now only us) must always keep the buffer in
+ * a well defined state (either NULL or owning the reference).
+ * Only we implement cleanup
+ */
+ if (!PyDataType_REFCHK(dtypes[iop])) {
+ continue;
+ }
+ if (*buffers == 0) {
+ continue;
+ }
+ int itemsize = dtypes[iop]->elsize;
+ for (npy_intp i = 0; i < NBF_SIZE(bufferdata); i++) {
+ /*
+ * See above comment, if this API is expanded the GIL assumption
+ * could become incorrect.
+ */
+ PyArray_Item_XDECREF(*buffers + (itemsize * i), dtypes[iop]);
+ }
+ /* Clear out the buffer just to be sure */
+ memset(*buffers, 0, NBF_SIZE(bufferdata) * itemsize);
+ }
+ /* Signal that the buffers are empty */
+ NBF_SIZE(bufferdata) = 0;
+ PyErr_Restore(type, value, traceback);
+}
+
+
/*
* This checks how much space can be buffered without encountering the
* same value twice, or for operands whose innermost stride is zero,
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 7da17eafe..a0dda4090 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -476,7 +476,10 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
}
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ NpyIter_Deallocate(iter);
+ return NULL;
+ }
}
}
@@ -642,21 +645,27 @@ NpyIter_Copy(NpyIter *iter)
}
/*NUMPY_API
- * Deallocate an iterator
+ * Deallocate an iterator.
+ *
+ * To correctly work when an error is in progress, we have to check
+ * `PyErr_Occurred()`. This is necessary when buffers are not finalized
+ * or WritebackIfCopy is used. We could avoid that check by exposing a new
+ * function which is passed in whether or not a Python error is already set.
*/
NPY_NO_EXPORT int
NpyIter_Deallocate(NpyIter *iter)
{
+ int success = PyErr_Occurred() == NULL;
+
npy_uint32 itflags;
/*int ndim = NIT_NDIM(iter);*/
int iop, nop;
PyArray_Descr **dtype;
PyArrayObject **object;
npyiter_opitflags *op_itflags;
- npy_bool resolve = 1;
if (iter == NULL) {
- return NPY_SUCCEED;
+ return success;
}
itflags = NIT_ITFLAGS(iter);
@@ -667,13 +676,23 @@ NpyIter_Deallocate(NpyIter *iter)
/* Deallocate any buffers and buffering data */
if (itflags & NPY_ITFLAG_BUFFER) {
+ /* Ensure no data is held by the buffers before they are cleared */
+ if (success) {
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ success = NPY_FAIL;
+ }
+ }
+ else {
+ npyiter_clear_buffers(iter);
+ }
+
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
char **buffers;
NpyAuxData **transferdata;
/* buffers */
buffers = NBF_BUFFERS(bufferdata);
- for(iop = 0; iop < nop; ++iop, ++buffers) {
+ for (iop = 0; iop < nop; ++iop, ++buffers) {
PyArray_free(*buffers);
}
/* read bufferdata */
@@ -694,12 +713,12 @@ NpyIter_Deallocate(NpyIter *iter)
/*
* Deallocate all the dtypes and objects that were iterated and resolve
- * any writeback buffers created by the iterator
+ * any writeback buffers created by the iterator.
*/
- for(iop = 0; iop < nop; ++iop, ++dtype, ++object) {
+ for (iop = 0; iop < nop; ++iop, ++dtype, ++object) {
if (op_itflags[iop] & NPY_OP_ITFLAG_HAS_WRITEBACK) {
- if (resolve && PyArray_ResolveWritebackIfCopy(*object) < 0) {
- resolve = 0;
+ if (success && PyArray_ResolveWritebackIfCopy(*object) < 0) {
+ success = 0;
}
else {
PyArray_DiscardWritebackIfCopy(*object);
@@ -711,12 +730,10 @@ NpyIter_Deallocate(NpyIter *iter)
/* Deallocate the iterator memory */
PyObject_Free(iter);
- if (resolve == 0) {
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
+ return success;
}
+
/* Checks 'flags' for (C|F)_ORDER_INDEX, MULTI_INDEX, and EXTERNAL_LOOP,
* setting the appropriate internal flags in 'itflags'.
*
diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h
index 1477c8631..bb483bb1f 100644
--- a/numpy/core/src/multiarray/nditer_impl.h
+++ b/numpy/core/src/multiarray/nditer_impl.h
@@ -342,10 +342,11 @@ NPY_NO_EXPORT int
npyiter_allocate_buffers(NpyIter *iter, char **errmsg);
NPY_NO_EXPORT void
npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex);
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_from_buffers(NpyIter *iter);
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs);
-
+NPY_NO_EXPORT void
+npyiter_clear_buffers(NpyIter *iter);
#endif
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 7f31a5096..1c68a4803 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -1268,6 +1268,10 @@ npyiter_iternext(NewNpyArrayIterObject *self)
Py_RETURN_TRUE;
}
else {
+ if (PyErr_Occurred()) {
+ /* casting error, buffer cleanup will occur at reset or dealloc */
+ return NULL;
+ }
self->finished = 1;
Py_RETURN_FALSE;
}
@@ -1483,6 +1487,10 @@ npyiter_next(NewNpyArrayIterObject *self)
*/
if (self->started) {
if (!self->iternext(self->iter)) {
+ /*
+ * A casting error may be set here (or no error causing a
+ * StopIteration). Buffers may only be cleaned up later.
+ */
self->finished = 1;
return NULL;
}
diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src
index 0f0d59972..05ce6ae75 100644
--- a/numpy/core/src/multiarray/nditer_templ.c.src
+++ b/numpy/core/src/multiarray/nditer_templ.c.src
@@ -249,7 +249,10 @@ npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter)
memcpy(prev_dataptrs, NAD_PTRS(axisdata), NPY_SIZEOF_INTP*nop);
/* Write back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
/* Check if we're past the end */
if (NIT_ITERINDEX(iter) >= NIT_ITEREND(iter)) {
@@ -262,7 +265,10 @@ npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter)
}
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, prev_dataptrs);
+ if (npyiter_copy_to_buffers(iter, prev_dataptrs) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
return 1;
}
@@ -303,7 +309,10 @@ npyiter_buffered_iternext(NpyIter *iter)
}
/* Write back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
/* Check if we're past the end */
if (NIT_ITERINDEX(iter) >= NIT_ITEREND(iter)) {
@@ -316,7 +325,10 @@ npyiter_buffered_iternext(NpyIter *iter)
}
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
return 1;
}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 4037a4757..d0fb2f6ed 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -254,7 +254,6 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
op_flags[2] = NPY_ITER_READONLY;
}
-
/* Set up result array axes mapping, operand and wheremask use default */
int result_axes[NPY_MAXDIMS];
int *op_axes[3] = {result_axes, NULL, NULL};
@@ -363,7 +362,6 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
if (loop(iter, dataptr, strideptr, countptr,
iternext, needs_api, skip_first_count, data) < 0) {
-
goto fail;
}
}
@@ -379,7 +377,10 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
Py_INCREF(result);
- NpyIter_Deallocate(iter);
+ if (!NpyIter_Deallocate(iter)) {
+ Py_DECREF(result);
+ return NULL;
+ }
return result;
fail:
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 8f841c6fa..b47ccd291 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -1536,7 +1536,14 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_END_THREADS;
}
- return NpyIter_Deallocate(iter);
+ /*
+ * Currently `innerloop` may leave an error set, in this case
+ * NpyIter_Deallocate will always return an error as well.
+ */
+ if (NpyIter_Deallocate(iter) == NPY_FAIL) {
+ return -1;
+ }
+ return 0;
}
/*
@@ -3233,9 +3240,13 @@ PyUFunc_GenericFunction_int(PyUFuncObject *ufunc,
goto fail;
}
- /* Check whether any errors occurred during the loop */
+ /*
+ * Check whether any errors occurred during the loop. The loops should
+ * indicate this in retval, but since the inner-loop currently does not
+ * report errors, this does not happen in all branches (at this time).
+ */
if (PyErr_Occurred() ||
- _check_ufunc_fperr(errormask, extobj, ufunc_name) < 0) {
+ _check_ufunc_fperr(errormask, extobj, ufunc_name) < 0) {
retval = -1;
goto fail;
}
@@ -3997,8 +4008,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
finish:
Py_XDECREF(op_dtypes[0]);
- NpyIter_Deallocate(iter);
- NpyIter_Deallocate(iter_inner);
+ int res = 0;
+ if (!NpyIter_Deallocate(iter)) {
+ res = -1;
+ }
+ if (!NpyIter_Deallocate(iter_inner)) {
+ res = -1;
+ }
+ if (res < 0) {
+ Py_DECREF(out);
+ return NULL;
+ }
return (PyObject *)out;
@@ -4379,7 +4399,10 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
finish:
Py_XDECREF(op_dtypes[0]);
- NpyIter_Deallocate(iter);
+ if (!NpyIter_Deallocate(iter)) {
+ Py_DECREF(out);
+ return NULL;
+ }
return (PyObject *)out;
@@ -4388,7 +4411,6 @@ fail:
Py_XDECREF(op_dtypes[0]);
NpyIter_Deallocate(iter);
-
return NULL;
}
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 7b3c3a40d..e10c7ad92 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2880,3 +2880,68 @@ def test_warn_noclose():
casting='equiv', op_dtypes=[np.dtype('f4')])
del it
assert len(sup.log) == 1
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
+ [("i", "O"), ("O", "i"), # most simple cases
+ ("i,O", "O,O"), # structured partially only copying O
+ ("O,i", "i,O"), # structured casting to and from O
+ ])
+@pytest.mark.parametrize("steps", [1, 2, 3])
+def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
+ count = sys.getrefcount(value)
+
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ for step in range(steps):
+ # The iteration finishes in 3 steps, the first two are partial
+ next(it)
+
+ # Note that resetting does not free references
+ del it
+ assert count == sys.getrefcount(value)
+
+ # Repeat the test with `iternext`
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ for step in range(steps):
+ it.iternext()
+
+ del it # should ensure cleanup
+ assert count == sys.getrefcount(value)
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
+ [("O", "i"), # most simple cases
+ ("O,i", "i,O"), # structured casting to and from O
+ ])
+def test_partial_iteration_error(in_dtype, buf_dtype):
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
+ if in_dtype == "O":
+ arr[int(np.BUFSIZE * 1.5)] = None
+ else:
+ arr[int(np.BUFSIZE * 1.5)]["f0"] = None
+
+ count = sys.getrefcount(value)
+
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ with pytest.raises(TypeError):
+ # pytest.raises seems to have issues with the error originating
+ # in the for loop, so manually unravel:
+ next(it)
+ next(it) # raises TypeError
+
+ # Repeat the test with `iternext` after resetting, the buffers should
+ # already be cleared from any references, so resetting is sufficient.
+ it.reset()
+ with pytest.raises(TypeError):
+ it.iternext()
+ it.iternext()
+
+ assert count == sys.getrefcount(value)
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 1305f4877..9eaa1a977 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1,5 +1,6 @@
import warnings
import itertools
+import sys
import pytest
@@ -11,7 +12,7 @@ import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
- assert_allclose,
+ assert_allclose, HAS_REFCOUNT,
)
from numpy.compat import pickle
@@ -2074,3 +2075,60 @@ def test_ufunc_warn_with_nan(ufunc):
else:
raise ValueError('ufunc with more than 2 inputs')
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_ufunc_casterrors():
+ # Tests that casting errors are correctly reported and buffers are
+ # cleared.
+ # The following array can be added to itself as an object array, but
+ # the result cannot be cast to an integer output:
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.array([value] * int(np.BUFSIZE * 1.5) +
+ ["string"] +
+ [value] * int(1.5 * np.BUFSIZE), dtype=object)
+ out = np.ones(len(arr), dtype=np.intp)
+
+ count = sys.getrefcount(value)
+ with pytest.raises(ValueError):
+ # Output casting failure:
+ np.add(arr, arr, out=out, casting="unsafe")
+
+ assert count == sys.getrefcount(value)
+ # output is unchanged after the error, this shows that the iteration
+ # was aborted (this is not necessarily defined behaviour)
+ assert out[-1] == 1
+
+ with pytest.raises(ValueError):
+ # Input casting failure:
+ np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe")
+
+ assert count == sys.getrefcount(value)
+ # output is unchanged after the error, this shows that the iteration
+ # was aborted (this is not necessarily defined behaviour)
+ assert out[-1] == 1
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize("offset",
+ [0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)])
+def test_reduce_casterrors(offset):
+ # Test reporting of casting errors in reductions, we test various
+ # offsets to where the casting error will occur, since these may occur
+ # at different places during the reduction procedure. For example
+ # the first item may be special.
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.array([value] * offset +
+ ["string"] +
+ [value] * int(1.5 * np.BUFSIZE), dtype=object)
+ out = np.array(-1, dtype=np.intp)
+
+ count = sys.getrefcount(value)
+ with pytest.raises(ValueError):
+ # This is an unsafe cast, but we currently always allow that:
+ np.add.reduce(arr, dtype=np.intp, out=out)
+ assert count == sys.getrefcount(value)
+ # If an error occurred during casting, the operation is done at most until
+ # the error occurs (the result of which would be `value * offset`) and -1
+ # if the error happened immediately.
+ # This does not define behaviour, the output is invalid and thus undefined
+ assert out[()] < value * offset