diff options
Diffstat (limited to 'numpy/core')
46 files changed, 605 insertions, 320 deletions
diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py index 092b848dc..df1ff180e 100644 --- a/numpy/core/_dtype.py +++ b/numpy/core/_dtype.py @@ -316,26 +316,39 @@ def _subarray_str(dtype): ) +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool_: + # implied + return False + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + def _name_get(dtype): - # provides dtype.name.__get__ + # provides dtype.name.__get__, documented as returning a "bit name" if dtype.isbuiltin == 2: # user dtypes don't promise to do anything special return dtype.type.__name__ - # Builtin classes are documented as returning a "bit name" - name = dtype.type.__name__ - - # handle bool_, str_, etc - if name[-1] == '_': - name = name[:-1] + if issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) - # append bit counts to str, unicode, and void - if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype): + # append bit counts + if _name_includes_bit_suffix(dtype): name += "{}".format(dtype.itemsize * 8) # append metadata to datetimes - elif dtype.type in (np.datetime64, np.timedelta64): + if dtype.type in (np.datetime64, np.timedelta64): name += _datetime_metadata_str(dtype) return name diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py index a1af7a78d..88a45561f 100644 --- a/numpy/core/_exceptions.py +++ b/numpy/core/_exceptions.py @@ -27,6 +27,7 @@ def _display_as_base(cls): assert issubclass(cls, Exception) cls.__name__ = cls.__base__.__name__ cls.__qualname__ = cls.__base__.__qualname__ + set_module(cls.__base__.__module__)(cls) return cls @@ -146,6 +147,54 @@ class _ArrayMemoryError(MemoryError): self.shape = shape self.dtype = dtype - def __str__(self): - return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype) + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + import math + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return '{:.0f} {}'.format(n_units, unit_name) + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return '{:#.3g} {}'.format(n_units, unit_name) + else: + # just give all the digits otherwise + return '{:#.0f} {}'.format(n_units, unit_name) + def __str__(self): + size_str = self._size_to_string(self._total_size) + return ( + "Unable to allocate {} for an array with shape {} and data type {}" + .format(size_str, self.shape, self.dtype) + ) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index c70718cb6..b0ea603e1 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -459,7 +459,7 @@ def _getfield_is_safe(oldtype, newtype, offset): if newtype.hasobject or oldtype.hasobject: if offset == 0 and newtype == oldtype: return - if oldtype.names: + if oldtype.names is not None: for name in oldtype.names: if (oldtype.fields[name][1] == offset and oldtype.fields[name][0] == newtype): diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index ecd05d3ac..b1310a737 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -194,7 +194,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, See Also -------- - get_printoptions, set_string_function, array2string + get_printoptions, printoptions, set_string_function, array2string Notes ----- @@ -285,7 +285,7 @@ def get_printoptions(): See Also -------- - set_printoptions, set_string_function + set_printoptions, printoptions, set_string_function """ return _format_options.copy() @@ -685,7 +685,7 @@ def array2string(a, max_line_width=None, precision=None, if style is np._NoValue: style = repr - if a.shape == () and not a.dtype.names: + if a.shape == () and a.dtype.names is None: return style(a.item()) elif style is not np._NoValue: # Deprecation 11-9-2017 v1.14 @@ -984,20 +984,6 @@ class FloatingFormat(object): pad_left=self.pad_left, pad_right=self.pad_right) -# for back-compatibility, we keep the classes for each float type too -class FloatFormat(FloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn("FloatFormat has been replaced by FloatingFormat", - DeprecationWarning, stacklevel=2) - super(FloatFormat, self).__init__(*args, **kwargs) - - -class LongFloatFormat(FloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn("LongFloatFormat has been replaced by FloatingFormat", - DeprecationWarning, stacklevel=2) - super(LongFloatFormat, self).__init__(*args, **kwargs) - @set_module('numpy') def format_float_scientific(x, precision=None, unique=True, trim='k', @@ -1196,21 +1182,6 @@ class ComplexFloatingFormat(object): return r + i -# for back-compatibility, we keep the classes for each complex type too -class ComplexFormat(ComplexFloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn( - "ComplexFormat has been replaced by ComplexFloatingFormat", - DeprecationWarning, stacklevel=2) - super(ComplexFormat, self).__init__(*args, **kwargs) - -class LongComplexFormat(ComplexFloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn( - "LongComplexFormat has been replaced by ComplexFloatingFormat", - DeprecationWarning, stacklevel=2) - super(LongComplexFormat, self).__init__(*args, **kwargs) - class _TimelikeFormat(object): def __init__(self, data): @@ -1321,16 +1292,6 @@ class StructuredVoidFormat(object): return "({})".format(", ".join(str_fields)) -# for backwards compatibility -class StructureFormat(StructuredVoidFormat): - def __init__(self, *args, **kwargs): - # NumPy 1.14, 2018-02-14 - warnings.warn( - "StructureFormat has been replaced by StructuredVoidFormat", - DeprecationWarning, stacklevel=2) - super(StructureFormat, self).__init__(*args, **kwargs) - - def _void_scalar_repr(x): """ Implements the repr for structured-void scalars. It is called from the diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 923c34425..7336e5e13 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -259,7 +259,8 @@ def find_functions(filename, tag='API'): elif state == STATE_ARGS: if line.startswith('{'): # finished - fargs_str = ' '.join(function_args).rstrip(' )') + # remove any white space and the closing bracket: + fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip() fargs = split_arguments(fargs_str) f = Function(function_name, return_type, fargs, '\n'.join(doclist)) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index bde37fca3..422ebe2de 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -3125,10 +3125,35 @@ def around(a, decimals=0, out=None): ----- For values exactly halfway between rounded decimal values, NumPy rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, - -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due - to the inexact representation of decimal fractions in the IEEE - floating point standard [1]_ and errors introduced when scaling - by powers of ten. + -0.5 and 0.5 round to 0.0, etc. + + ``np.around`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which is + inexact for large floating-point values or large values of `decimals` due + the inexact representation of decimal fractions in the IEEE floating point + standard [1]_ and errors introduced when scaling by powers of ten. For + instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 References ---------- @@ -3419,7 +3444,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as + the default is `float64`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 1221aeece..ad98d562b 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1095,7 +1095,8 @@ typedef struct PyArrayIterObject_tag PyArrayIterObject; * type of the function which translates a set of coordinates to a * pointer to the data */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); struct PyArrayIterObject_tag { PyObject_HEAD @@ -1695,7 +1696,8 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) #define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) #define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) -#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) #define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) #define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index bbcd58abb..c395b1348 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -26,6 +26,7 @@ if sys.version_info[0] < 3: from . import overrides from . import umath +from . import shape_base from .overrides import set_module from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes @@ -48,14 +49,6 @@ array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') -def loads(*args, **kwargs): - # NumPy 1.15.0, 2017-12-10 - warnings.warn( - "np.core.numeric.loads is deprecated, use pickle.loads instead", - DeprecationWarning, stacklevel=2) - return pickle.loads(*args, **kwargs) - - __all__ = [ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', @@ -66,7 +59,7 @@ __all__ = [ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', - 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', @@ -553,8 +546,10 @@ def argwhere(a): Returns ------- - index_array : ndarray + index_array : (N, a.ndim) ndarray Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. See Also -------- @@ -562,7 +557,8 @@ def argwhere(a): Notes ----- - ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``nonzero(a)`` instead. @@ -580,6 +576,11 @@ def argwhere(a): [1, 2]]) """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:,:0] return transpose(nonzero(a)) @@ -2028,30 +2029,6 @@ def base_repr(number, base=2, padding=0): return ''.join(reversed(res or '0')) -def load(file): - """ - Wrapper around cPickle.load which accepts either a file-like object or - a filename. - - Note that the NumPy binary format is not based on pickle/cPickle anymore. - For details on the preferred way of loading and saving files, see `load` - and `save`. - - See Also - -------- - load, save - - """ - # NumPy 1.15.0, 2017-12-10 - warnings.warn( - "np.core.numeric.load is deprecated, use pickle.load instead", - DeprecationWarning, stacklevel=2) - if isinstance(file, type("")): - with open(file, "rb") as file_pointer: - return pickle.load(file_pointer) - return pickle.load(file) - - # These are all essentially abbreviations # These might wind up in a special abbreviations module diff --git a/numpy/core/records.py b/numpy/core/records.py index 0576005e7..a1439f9df 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -268,8 +268,8 @@ class record(nt.void): except AttributeError: #happens if field is Object type return obj - if dt.fields: - return obj.view((self.__class__, obj.dtype.fields)) + if dt.names is not None: + return obj.view((self.__class__, obj.dtype)) return obj else: raise AttributeError("'record' object has no " @@ -293,8 +293,8 @@ class record(nt.void): obj = nt.void.__getitem__(self, indx) # copy behavior of record.__getattribute__, - if isinstance(obj, nt.void) and obj.dtype.fields: - return obj.view((self.__class__, obj.dtype.fields)) + if isinstance(obj, nt.void) and obj.dtype.names is not None: + return obj.view((self.__class__, obj.dtype)) else: # return a single element return obj @@ -444,7 +444,7 @@ class recarray(ndarray): return self def __array_finalize__(self, obj): - if self.dtype.type is not record and self.dtype.fields: + if self.dtype.type is not record and self.dtype.names is not None: # if self.dtype is not np.record, invoke __setattr__ which will # convert it to a record if it is a void dtype. self.dtype = self.dtype @@ -472,7 +472,7 @@ class recarray(ndarray): # with void type convert it to the same dtype.type (eg to preserve # numpy.record type if present), since nested structured fields do not # inherit type. Don't do this for non-void structures though. - if obj.dtype.fields: + if obj.dtype.names is not None: if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) return obj @@ -487,7 +487,7 @@ class recarray(ndarray): # Automatically convert (void) structured types to records # (but not non-void structures, subarrays, or non-structured voids) - if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields: + if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None: val = sb.dtype((record, val)) newattr = attr not in self.__dict__ @@ -521,7 +521,7 @@ class recarray(ndarray): # copy behavior of getattr, except that here # we might also be returning a single element if isinstance(obj, ndarray): - if obj.dtype.fields: + if obj.dtype.names is not None: obj = obj.view(type(self)) if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) @@ -577,7 +577,7 @@ class recarray(ndarray): if val is None: obj = self.getfield(*res) - if obj.dtype.fields: + if obj.dtype.names is not None: return obj return obj.view(ndarray) else: diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 338502791..5ac7752cc 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -464,7 +464,7 @@ def configuration(parent_package='',top_path=None): moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) # Py3K check - if sys.version_info[0] == 3: + if sys.version_info[0] >= 3: moredefs.append(('NPY_PY3K', 1)) # Generate the config.h file from moredefs diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 710f64827..d7e769e62 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -9,8 +9,9 @@ import warnings from . import numeric as _nx from . import overrides -from .numeric import array, asanyarray, newaxis +from ._asarray import array, asanyarray from .multiarray import normalize_axis_index +from . import fromnumeric as _from_nx array_function_dispatch = functools.partial( @@ -123,7 +124,7 @@ def atleast_2d(*arys): if ary.ndim == 0: result = ary.reshape(1, 1) elif ary.ndim == 1: - result = ary[newaxis, :] + result = ary[_nx.newaxis, :] else: result = ary res.append(result) @@ -193,9 +194,9 @@ def atleast_3d(*arys): if ary.ndim == 0: result = ary.reshape(1, 1, 1) elif ary.ndim == 1: - result = ary[newaxis, :, newaxis] + result = ary[_nx.newaxis, :, _nx.newaxis] elif ary.ndim == 2: - result = ary[:, :, newaxis] + result = ary[:, :, _nx.newaxis] else: result = ary res.append(result) @@ -435,9 +436,9 @@ def stack(arrays, axis=0, out=None): # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. # Use getattr to protect against __array_function__ being disabled. -_size = getattr(_nx.size, '__wrapped__', _nx.size) -_ndim = getattr(_nx.ndim, '__wrapped__', _nx.ndim) -_concatenate = getattr(_nx.concatenate, '__wrapped__', _nx.concatenate) +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate) def _block_format_index(index): diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c index addc9f006..a7f34cbe5 100644 --- a/numpy/core/src/multiarray/alloc.c +++ b/numpy/core/src/multiarray/alloc.c @@ -25,10 +25,14 @@ #include <assert.h> -#ifdef HAVE_SYS_MMAN_H +#ifdef NPY_OS_LINUX #include <sys/mman.h> -#if defined MADV_HUGEPAGE && defined HAVE_MADVISE -#define HAVE_MADV_HUGEPAGE +#ifndef MADV_HUGEPAGE +/* + * Use code 14 (MADV_HUGEPAGE) if it isn't defined. This gives a chance of + * enabling huge pages even if built with linux kernel < 2.6.38 + */ +#define MADV_HUGEPAGE 14 #endif #endif @@ -74,11 +78,15 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #ifdef _PyPyGC_AddMemoryPressure _PyPyPyGC_AddMemoryPressure(nelem * esz); #endif -#ifdef HAVE_MADV_HUGEPAGE +#ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u)))) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = nelem * esz - offset; + /** + * Intentionally not checking for errors that may be returned by + * older kernel versions; optimistically tries enabling huge pages. + */ madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE); } #endif diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index eb939f47c..4e229e321 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -462,7 +462,7 @@ WARN_IN_DEALLOC(PyObject* warning, const char * msg) { PyErr_WriteUnraisable(Py_None); } } -}; +} /* array object functions */ @@ -607,7 +607,7 @@ PyArray_DebugPrint(PyArrayObject *obj) * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_SetDatetimeParseFunction(PyObject *op) +PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op)) { } @@ -630,7 +630,7 @@ PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) /*NUMPY_API */ NPY_NO_EXPORT int -PyArray_CompareString(char *s1, char *s2, size_t len) +PyArray_CompareString(const char *s1, const char *s2, size_t len) { const unsigned char *c1 = (unsigned char *)s1; const unsigned char *c2 = (unsigned char *)s2; @@ -1200,15 +1200,28 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op) } } if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, "No fields found."); + /* these dtypes had no fields. Use a MultiIter to broadcast them + * to an output array, and fill with True (for EQ)*/ + PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *) + PyArray_MultiIterNew(2, self, other); + if (mit == NULL) { + return NULL; + } + + res = PyArray_NewFromDescr(&PyArray_Type, + PyArray_DescrFromType(NPY_BOOL), + mit->nd, mit->dimensions, + NULL, NULL, 0, NULL); + Py_DECREF(mit); + if (res) { + PyArray_FILLWBYTE((PyArrayObject *)res, + cmp_op == Py_EQ ? 1 : 0); + } } return res; } else { - /* - * compare as a string. Assumes self and - * other have same descr->type - */ + /* compare as a string. Assumes self and other have same descr->type */ return _strings_richcompare(self, other, cmp_op, 0); } } diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c index 7db467308..aa4e40e66 100644 --- a/numpy/core/src/multiarray/convert.c +++ b/numpy/core/src/multiarray/convert.c @@ -543,35 +543,6 @@ PyArray_AssignZero(PyArrayObject *dst, return retcode; } -/* - * Fills an array with ones. - * - * dst: The destination array. - * wheremask: If non-NULL, a boolean mask specifying where to set the values. - * - * Returns 0 on success, -1 on failure. - */ -NPY_NO_EXPORT int -PyArray_AssignOne(PyArrayObject *dst, - PyArrayObject *wheremask) -{ - npy_bool value; - PyArray_Descr *bool_dtype; - int retcode; - - /* Create a raw bool scalar with the value True */ - bool_dtype = PyArray_DescrFromType(NPY_BOOL); - if (bool_dtype == NULL) { - return -1; - } - value = 1; - - retcode = PyArray_AssignRawScalar(dst, bool_dtype, (char *)&value, - wheremask, NPY_SAFE_CASTING); - - Py_DECREF(bool_dtype); - return retcode; -} /*NUMPY_API * Copy an array. diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 59bfa0d9f..188e68001 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1457,28 +1457,6 @@ _dtype_from_buffer_3118(PyObject *memoryview) } -/* - * Call the python _is_from_ctypes - */ -NPY_NO_EXPORT int -_is_from_ctypes(PyObject *obj) { - PyObject *ret_obj; - static PyObject *py_func = NULL; - - npy_cache_import("numpy.core._internal", "_is_from_ctypes", &py_func); - - if (py_func == NULL) { - return -1; - } - ret_obj = PyObject_CallFunctionObjArgs(py_func, obj, NULL); - if (ret_obj == NULL) { - return -1; - } - - return PyObject_IsTrue(ret_obj); -} - - NPY_NO_EXPORT PyObject * _array_from_buffer_3118(PyObject *memoryview) { @@ -1880,13 +1858,6 @@ PyArray_GetArrayParamsFromObject(PyObject *op, *out_arr = NULL; return 0; } - if (is_object && (requested_dtype != NULL) && - (requested_dtype->type_num != NPY_OBJECT)) { - PyErr_SetString(PyExc_ValueError, - "cannot create an array from unequal-length (ragged) sequences"); - Py_DECREF(*out_dtype); - return -1; - } /* If object arrays are forced */ if (is_object) { Py_DECREF(*out_dtype); @@ -2808,9 +2779,9 @@ PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) Deprecated, use PyArray_NewFromDescr instead. */ NPY_NO_EXPORT PyObject * -PyArray_FromDimsAndDataAndDescr(int nd, int *d, +PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *descr, - char *data) + char *NPY_UNUSED(data)) { PyErr_SetString(PyExc_NotImplementedError, "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr."); @@ -2822,7 +2793,7 @@ PyArray_FromDimsAndDataAndDescr(int nd, int *d, Deprecated, use PyArray_SimpleNew instead. */ NPY_NO_EXPORT PyObject * -PyArray_FromDims(int nd, int *d, int type) +PyArray_FromDims(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type)) { PyErr_SetString(PyExc_NotImplementedError, "PyArray_FromDims: use PyArray_SimpleNew."); @@ -3902,7 +3873,13 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, s = (npy_intp)ts - offset; n = (npy_intp)count; itemsize = type->elsize; - if (n < 0 ) { + if (n < 0) { + if (itemsize == 0) { + PyErr_SetString(PyExc_ValueError, + "cannot determine count if itemsize is 0"); + Py_DECREF(type); + return NULL; + } if (s % itemsize != 0) { PyErr_SetString(PyExc_ValueError, "buffer size must be a multiple"\ @@ -4095,7 +4072,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) } for (i = 0; (i < count || count == -1) && (value = PyIter_Next(iter)); i++) { - if (i >= elcount) { + if (i >= elcount && elsize != 0) { npy_intp nbytes; /* Grow PyArray_DATA(ret): diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 60e6bbae2..82e046ca1 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -386,7 +386,8 @@ convert_datetimestruct_to_datetime(PyArray_DatetimeMetaData *meta, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT npy_datetime -PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) +PyArray_DatetimeStructToDatetime( + NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d)) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_DatetimeStructToDatetime function has " @@ -400,7 +401,8 @@ PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT npy_datetime -PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d) +PyArray_TimedeltaStructToTimedelta( + NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d)) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_TimedeltaStructToTimedelta function has " @@ -600,8 +602,9 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, - npy_datetimestruct *result) +PyArray_DatetimeToDatetimeStruct( + npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), + npy_datetimestruct *result) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_DatetimeToDatetimeStruct function has " @@ -621,8 +624,9 @@ PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr, - npy_timedeltastruct *result) +PyArray_TimedeltaToTimedeltaStruct( + npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), + npy_timedeltastruct *result) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_TimedeltaToTimedeltaStruct function has " @@ -3128,7 +3132,7 @@ is_any_numpy_datetime_or_timedelta(PyObject *obj) */ NPY_NO_EXPORT int convert_pyobjects_to_datetimes(int count, - PyObject **objs, int *type_nums, + PyObject **objs, const int *type_nums, NPY_CASTING casting, npy_int64 *out_values, PyArray_DatetimeMetaData *inout_meta) diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index c04a6c125..cdeb65d0e 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -48,7 +48,7 @@ get_day_of_week(npy_datetime date) */ static int is_holiday(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -88,7 +88,7 @@ is_holiday(npy_datetime date, */ static npy_datetime * find_earliest_holiday_on_or_after(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -127,7 +127,7 @@ find_earliest_holiday_on_or_after(npy_datetime date, */ static npy_datetime * find_earliest_holiday_after(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -159,7 +159,7 @@ static int apply_business_day_roll(npy_datetime date, npy_datetime *out, int *out_day_of_week, NPY_BUSDAY_ROLL roll, - npy_bool *weekmask, + const npy_bool *weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { int day_of_week; @@ -361,7 +361,7 @@ apply_business_day_offset(npy_datetime date, npy_int64 offset, static int apply_business_day_count(npy_datetime date_begin, npy_datetime date_end, npy_int64 *out, - npy_bool *weekmask, int busdays_in_weekmask, + const npy_bool *weekmask, int busdays_in_weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { npy_int64 count, whole_weeks; @@ -722,7 +722,7 @@ finish: */ NPY_NO_EXPORT PyArrayObject * is_business_day(PyArrayObject *dates, PyArrayObject *out, - npy_bool *weekmask, int busdays_in_weekmask, + const npy_bool *weekmask, int busdays_in_weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { PyArray_DatetimeMetaData temp_meta; diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index c7db092e6..734255a9d 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -149,7 +149,7 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args) arg == '|' || arg == '=') static int -_check_for_commastring(char *type, Py_ssize_t len) +_check_for_commastring(const char *type, Py_ssize_t len) { Py_ssize_t i; int sqbracket; @@ -3277,7 +3277,7 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) } static int -descr_nonzero(PyObject *self) +descr_nonzero(PyObject *NPY_UNUSED(self)) { /* `bool(np.dtype(...)) == True` for all dtypes. Needed to override default * nonzero implementation, which checks if `len(object) > 0`. */ diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index a90416a40..ef0dd4a01 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -3337,7 +3337,7 @@ get_decsrcref_transfer_function(int aligned, /* If there are subarrays, need to wrap it */ else if (PyDataType_HASSUBARRAY(src_dtype)) { PyArray_Dims src_shape = {NULL, -1}; - npy_intp src_size = 1; + npy_intp src_size; PyArray_StridedUnaryOp *stransfer; NpyAuxData *data; diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index bed92403f..116e37ce5 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -190,7 +190,7 @@ array_strides_set(PyArrayObject *self, PyObject *obj) static PyObject * -array_priority_get(PyArrayObject *self) +array_priority_get(PyArrayObject *NPY_UNUSED(self)) { return PyFloat_FromDouble(NPY_PRIORITY); } diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 762563eb5..c49cf67e6 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1456,8 +1456,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis) /* Now we can check the axis */ nd = PyArray_NDIM(mps[0]); - if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) { - /* single element case */ + if ((nd == 0) || (PyArray_SIZE(mps[0]) <= 1)) { + /* empty/single element case */ ret = (PyArrayObject *)PyArray_NewFromDescr( &PyArray_Type, PyArray_DescrFromType(NPY_INTP), PyArray_NDIM(mps[0]), PyArray_DIMS(mps[0]), NULL, NULL, @@ -1466,7 +1466,9 @@ PyArray_LexSort(PyObject *sort_keys, int axis) if (ret == NULL) { goto fail; } - *((npy_intp *)(PyArray_DATA(ret))) = 0; + if (PyArray_SIZE(mps[0]) > 0) { + *((npy_intp *)(PyArray_DATA(ret))) = 0; + } goto finish; } if (check_and_adjust_axis(&axis, nd) < 0) { @@ -1516,19 +1518,28 @@ PyArray_LexSort(PyObject *sort_keys, int axis) char *valbuffer, *indbuffer; int *swaps; - if (N == 0 || maxelsize == 0 || sizeof(npy_intp) == 0) { - goto fail; + assert(N > 0); /* Guaranteed and assumed by indbuffer */ + npy_intp valbufsize = N * maxelsize; + if (NPY_UNLIKELY(valbufsize) == 0) { + valbufsize = 1; /* Ensure allocation is not empty */ } - valbuffer = PyDataMem_NEW(N * maxelsize); + + valbuffer = PyDataMem_NEW(valbufsize); if (valbuffer == NULL) { goto fail; } indbuffer = PyDataMem_NEW(N * sizeof(npy_intp)); if (indbuffer == NULL) { + PyDataMem_FREE(valbuffer); + goto fail; + } + swaps = malloc(NPY_LIKELY(n > 0) ? n * sizeof(int) : 1); + if (swaps == NULL) { + PyDataMem_FREE(valbuffer); PyDataMem_FREE(indbuffer); goto fail; } - swaps = malloc(n*sizeof(int)); + for (j = 0; j < n; j++) { swaps[j] = PyArray_ISBYTESWAPPED(mps[j]); } @@ -1557,8 +1568,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis) #else if (rcode < 0) { #endif - npy_free_cache(valbuffer, N * maxelsize); - npy_free_cache(indbuffer, N * sizeof(npy_intp)); + PyDataMem_FREE(valbuffer); + PyDataMem_FREE(indbuffer); free(swaps); goto fail; } @@ -2464,7 +2475,7 @@ finish: * array of values, which must be of length PyArray_NDIM(self). */ NPY_NO_EXPORT PyObject * -PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index) +PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index) { int idim, ndim = PyArray_NDIM(self); char *data = PyArray_DATA(self); @@ -2492,7 +2503,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index) * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index, +PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, PyObject *obj) { int idim, ndim = PyArray_NDIM(self); diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h index 90bb5100d..2276b4db7 100644 --- a/numpy/core/src/multiarray/item_selection.h +++ b/numpy/core/src/multiarray/item_selection.h @@ -15,7 +15,7 @@ count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides); * array of values, which must be of length PyArray_NDIM(self). */ NPY_NO_EXPORT PyObject * -PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index); +PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index); /* * Sets a single item in the array, based on a single multi-index @@ -24,7 +24,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index); * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index, +PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, PyObject *obj); #endif diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 0d7679fe7..e66bb36aa 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -98,7 +98,7 @@ parse_index_entry(PyObject *op, npy_intp *step_size, /* get the dataptr from its current coordinates for simple iterator */ static char* -get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates) +get_ptr_simple(PyArrayIterObject* iter, const npy_intp *coordinates) { npy_intp i; char *ret; @@ -840,7 +840,6 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { goto finish; } - retval = 0; PyArray_ITER_GOTO1D(self, start); retval = type->f->setitem(val, self->dataptr, self->ao); PyArray_ITER_RESET(self); @@ -1666,7 +1665,7 @@ static char* _set_constant(PyArrayNeighborhoodIterObject* iter, /* set the dataptr from its current coordinates */ static char* -get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_constant(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS]; @@ -1721,7 +1720,7 @@ __npy_pos_remainder(npy_intp i, npy_intp n) /* set the dataptr from its current coordinates */ static char* -get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_mirror(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS], lb; @@ -1755,7 +1754,7 @@ __npy_euclidean_division(npy_intp i, npy_intp n) _coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]); static char* -get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_circular(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS], lb; @@ -1777,7 +1776,7 @@ get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) * A Neighborhood Iterator object. */ NPY_NO_EXPORT PyObject* -PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds, +PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds, int mode, PyArrayObject* fill) { int i; diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 9bb85e320..247864775 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -176,7 +176,7 @@ unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n) /* Unpack a single scalar index, taking a new reference to match unpack_tuple */ static NPY_INLINE npy_intp -unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n) +unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n)) { Py_INCREF(index); result[0] = index; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 79c60aa2e..e5845f2f6 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1051,7 +1051,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) NPY_NO_EXPORT PyObject * -array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_ufunc(PyArrayObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { PyObject *ufunc, *method_name, *normal_args, *ufunc_method; PyObject *result = NULL; @@ -1100,7 +1100,7 @@ cleanup: } static PyObject * -array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds) +array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kwds) { PyObject *func, *types, *args, *kwargs, *result; static char *kwlist[] = {"func", "types", "args", "kwargs", NULL}; @@ -1179,7 +1179,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER); + ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER); npy_free_cache_dim_obj(newshape); if (ret == NULL) { return NULL; @@ -1732,7 +1732,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args)) } static PyObject * -array_reduce_ex_regular(PyArrayObject *self, int protocol) +array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol)) { PyObject *subclass_array_reduce = NULL; PyObject *ret; @@ -1861,7 +1861,7 @@ array_reduce_ex(PyArrayObject *self, PyObject *args) PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) && ((PyObject*)self)->ob_type != &PyArray_Type) || - PyDataType_ISUNSIZED(descr)) { + descr->elsize == 0) { /* The PickleBuffer class from version 5 of the pickle protocol * can only be used for arrays backed by a contiguous data buffer. * For all other cases we fallback to the generic array_reduce diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index bef978c94..441567049 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -286,7 +286,8 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, * Convert to a 1D C-array */ NPY_NO_EXPORT int -PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) +PyArray_As1D(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), + int *NPY_UNUSED(d1), int NPY_UNUSED(typecode)) { /* 2008-07-14, 1.5 */ PyErr_SetString(PyExc_NotImplementedError, @@ -298,7 +299,8 @@ PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) * Convert to a 2D C-array */ NPY_NO_EXPORT int -PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) +PyArray_As2D(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), + int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode)) { /* 2008-07-14, 1.5 */ PyErr_SetString(PyExc_NotImplementedError, @@ -1560,7 +1562,8 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) PyArrayObject *oparr = NULL, *ret = NULL; npy_bool subok = NPY_FALSE; npy_bool copy = NPY_TRUE; - int ndmin = 0, nd; + int nd; + npy_intp ndmin = 0; PyArray_Descr *type = NULL; PyArray_Descr *oldtype = NULL; NPY_ORDER order = NPY_KEEPORDER; @@ -1631,12 +1634,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) } } - /* copy=False with default dtype, order and ndim */ - if (STRIDING_OK(oparr, order)) { - ret = oparr; - Py_INCREF(ret); - goto finish; - } + /* copy=False with default dtype, order (any is OK) and ndim */ + ret = oparr; + Py_INCREF(ret); + goto finish; } } @@ -3781,7 +3782,7 @@ _vec_string_no_args(PyArrayObject* char_array, } static PyObject * -_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyArrayObject* char_array = NULL; PyArray_Descr *type; diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 18ca127e1..db0bfcece 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -1628,15 +1628,12 @@ npyiter_coalesce_axes(NpyIter *iter) npy_intp istrides, nstrides = NAD_NSTRIDES(); NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - NpyIter_AxisData *ad_compress; + NpyIter_AxisData *ad_compress = axisdata; npy_intp new_ndim = 1; /* The HASMULTIINDEX or IDENTPERM flags do not apply after coalescing */ NIT_ITFLAGS(iter) &= ~(NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_HASMULTIINDEX); - axisdata = NIT_AXISDATA(iter); - ad_compress = axisdata; - for (idim = 0; idim < ndim-1; ++idim) { int can_coalesce = 1; npy_intp shape0 = NAD_SHAPE(ad_compress); diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 3b3635afe..d40836dc2 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -24,7 +24,7 @@ static int npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags); static int npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape); + const npy_intp *itershape); static int npyiter_calculate_ndim(int nop, PyArrayObject **op_in, int oa_ndim); @@ -55,7 +55,7 @@ npyiter_check_casting(int nop, PyArrayObject **op, static int npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags, char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, + const npy_uint32 *op_flags, int **op_axes, npy_intp *itershape); static void npyiter_replace_axisdata(NpyIter *iter, int iop, @@ -74,23 +74,23 @@ static void npyiter_find_best_axis_ordering(NpyIter *iter); static PyArray_Descr * npyiter_get_common_dtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, + const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, PyArray_Descr **op_request_dtypes, int only_inputs); static PyArrayObject * npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, npy_uint32 flags, npyiter_opitflags *op_itflags, int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes); + PyArray_Descr *op_dtype, const int *op_axes); static int npyiter_allocate_arrays(NpyIter *iter, npy_uint32 flags, PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, npyiter_opitflags *op_itflags, + const npy_uint32 *op_flags, npyiter_opitflags *op_itflags, int **op_axes); static void npyiter_get_priority_subtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, + const npyiter_opitflags *op_itflags, double *subtype_priority, PyTypeObject **subtype); static int npyiter_allocate_transfer_functions(NpyIter *iter); @@ -787,7 +787,7 @@ npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags) static int npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape) + const npy_intp *itershape) { char axes_dupcheck[NPY_MAXDIMS]; int iop, idim; @@ -1423,7 +1423,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop) static int npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags, char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, + const npy_uint32 *op_flags, int **op_axes, npy_intp *itershape) { npy_uint32 itflags = NIT_ITFLAGS(iter); @@ -2409,7 +2409,7 @@ npyiter_find_best_axis_ordering(NpyIter *iter) */ static PyArray_Descr * npyiter_get_common_dtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, + const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, PyArray_Descr **op_request_dtypes, int only_inputs) { @@ -2477,7 +2477,7 @@ static PyArrayObject * npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, npy_uint32 flags, npyiter_opitflags *op_itflags, int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes) + PyArray_Descr *op_dtype, const int *op_axes) { npy_uint32 itflags = NIT_ITFLAGS(iter); int idim, ndim = NIT_NDIM(iter); @@ -2706,7 +2706,7 @@ static int npyiter_allocate_arrays(NpyIter *iter, npy_uint32 flags, PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, npyiter_opitflags *op_itflags, + const npy_uint32 *op_flags, npyiter_opitflags *op_itflags, int **op_axes) { npy_uint32 itflags = NIT_ITFLAGS(iter); @@ -3109,7 +3109,7 @@ npyiter_allocate_arrays(NpyIter *iter, */ static void npyiter_get_priority_subtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, + const npyiter_opitflags *op_itflags, double *subtype_priority, PyTypeObject **subtype) { diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index ffea08bb3..4b9d41aa4 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -82,7 +82,8 @@ static int npyiter_cache_values(NewNpyArrayIterObject *self) } static PyObject * -npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) +npyiter_new(PyTypeObject *subtype, PyObject *NPY_UNUSED(args), + PyObject *NPY_UNUSED(kwds)) { NewNpyArrayIterObject *self; @@ -535,7 +536,7 @@ try_single_dtype: } static int -npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop, +npyiter_convert_op_axes(PyObject *op_axes_in, int nop, int **op_axes, int *oa_ndim) { PyObject *a; @@ -2365,7 +2366,7 @@ npyiter_close(NewNpyArrayIterObject *self) } static PyObject * -npyiter_exit(NewNpyArrayIterObject *self, PyObject *args) +npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args)) { /* even if called via exception handling, writeback any data */ return npyiter_close(self); diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 0ceb994ef..dabc866ff 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -391,7 +391,8 @@ array_matrix_multiply(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2) +array_inplace_matrix_multiply( + PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2)) { PyErr_SetString(PyExc_TypeError, "In-place matrix multiplication is not (yet) supported. " diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 34839b866..9adca6773 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -4492,6 +4492,36 @@ initialize_numeric_types(void) PyArrayIter_Type.tp_iter = PyObject_SelfIter; PyArrayMapIter_Type.tp_iter = PyObject_SelfIter; + + /* + * Give types different names when they are the same size (gh-9799). + * `np.intX` always refers to the first int of that size in the sequence + * `['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']`. + */ +#if (NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT) + PyByteArrType_Type.tp_name = "numpy.byte"; + PyUByteArrType_Type.tp_name = "numpy.ubyte"; +#endif +#if (NPY_SIZEOF_SHORT == NPY_SIZEOF_INT) + PyShortArrType_Type.tp_name = "numpy.short"; + PyUShortArrType_Type.tp_name = "numpy.ushort"; +#endif +#if (NPY_SIZEOF_INT == NPY_SIZEOF_LONG) + PyIntArrType_Type.tp_name = "numpy.intc"; + PyUIntArrType_Type.tp_name = "numpy.uintc"; +#endif +#if (NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG) + PyLongLongArrType_Type.tp_name = "numpy.longlong"; + PyULongLongArrType_Type.tp_name = "numpy.ulonglong"; +#endif + + /* + Do the same for longdouble + */ +#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE) + PyLongDoubleArrType_Type.tp_name = "numpy.longdouble"; + PyCLongDoubleArrType_Type.tp_name = "numpy.clongdouble"; +#endif } typedef struct { diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 30820737e..4e31f003b 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -26,7 +26,7 @@ static int _fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr); static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, +_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); static void @@ -40,11 +40,11 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype); */ NPY_NO_EXPORT PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER order) + NPY_ORDER NPY_UNUSED(order)) { npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; - int new_nd=newshape->len, k, n, elsize; + int new_nd=newshape->len, k, elsize; int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; @@ -136,8 +136,8 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, PyObject *zero = PyInt_FromLong(0); char *optr; optr = PyArray_BYTES(self) + oldnbytes; - n = newsize - oldsize; - for (k = 0; k < n; k++) { + npy_intp n_new = newsize - oldsize; + for (npy_intp i = 0; i < n_new; i++) { _putzero((char *)optr, zero, PyArray_DESCR(self)); optr += elsize; } @@ -361,7 +361,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) * stride of the next-fastest index. */ static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, +_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order) { int oldnd; @@ -766,7 +766,7 @@ static int _npy_stride_sort_item_comparator(const void *a, const void *b) * [(2, 12), (0, 4), (1, -2)]. */ NPY_NO_EXPORT void -PyArray_CreateSortedStridePerm(int ndim, npy_intp *strides, +PyArray_CreateSortedStridePerm(int ndim, npy_intp const *strides, npy_stride_sort_item *out_strideperm) { int i; @@ -1048,7 +1048,7 @@ build_shape_string(npy_intp n, npy_intp *vals) * from a reduction result once its computation is complete. */ NPY_NO_EXPORT void -PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags) +PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags) { PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; npy_intp *shape = fa->dimensions, *strides = fa->strides; diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src index c90b06974..72887d7e4 100644 --- a/numpy/core/src/npysort/radixsort.c.src +++ b/numpy/core/src/npysort/radixsort.c.src @@ -198,9 +198,9 @@ aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED( return 0; } - k1 = KEY_OF(arr[0]); + k1 = KEY_OF(arr[tosort[0]]); for (npy_intp i = 1; i < num; i++) { - k2 = KEY_OF(arr[i]); + k2 = KEY_OF(arr[tosort[i]]); if (k1 > k2) { all_sorted = 0; break; diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src index 9e74845df..615e395c7 100644 --- a/numpy/core/src/umath/_rational_tests.c.src +++ b/numpy/core/src/umath/_rational_tests.c.src @@ -539,11 +539,11 @@ static PyObject* pyrational_str(PyObject* self) { rational x = ((PyRational*)self)->r; if (d(x)!=1) { - return PyString_FromFormat( + return PyUString_FromFormat( "%ld/%ld",(long)x.n,(long)d(x)); } else { - return PyString_FromFormat( + return PyUString_FromFormat( "%ld",(long)x.n); } } diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src index 480c0c72f..b5204eca5 100644 --- a/numpy/core/src/umath/matmul.c.src +++ b/numpy/core/src/umath/matmul.c.src @@ -196,16 +196,14 @@ NPY_NO_EXPORT void * FLOAT, DOUBLE, HALF, * CFLOAT, CDOUBLE, CLONGDOUBLE, * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * BOOL# + * BYTE, SHORT, INT, LONG, LONGLONG# * #typ = npy_longdouble, * npy_float,npy_double,npy_half, * npy_cfloat, npy_cdouble, npy_clongdouble, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, - * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_bool# - * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11# - * #IS_HALF = 0, 0, 0, 1, 0*14# + * npy_byte, npy_short, npy_int, npy_long, npy_longlong# + * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*10# + * #IS_HALF = 0, 0, 0, 1, 0*13# */ NPY_NO_EXPORT void @@ -266,7 +264,44 @@ NPY_NO_EXPORT void } /**end repeat**/ +NPY_NO_EXPORT void +BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, + void *_ip2, npy_intp is2_n, npy_intp is2_p, + void *_op, npy_intp os_m, npy_intp os_p, + npy_intp dm, npy_intp dn, npy_intp dp) + +{ + npy_intp m, n, p; + npy_intp ib2_p, ob_p; + char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op; + ib2_p = is2_p * dp; + ob_p = os_p * dp; + + for (m = 0; m < dm; m++) { + for (p = 0; p < dp; p++) { + char *ip1tmp = ip1; + char *ip2tmp = ip2; + *(npy_bool *)op = NPY_FALSE; + for (n = 0; n < dn; n++) { + npy_bool val1 = (*(npy_bool *)ip1tmp); + npy_bool val2 = (*(npy_bool *)ip2tmp); + if (val1 != 0 && val2 != 0) { + *(npy_bool *)op = NPY_TRUE; + break; + } + ip2tmp += is2_n; + ip1tmp += is1_n; + } + op += os_p; + ip2 += is2_p; + } + op -= ob_p; + ip2 -= ib2_p; + ip1 += is1_m; + op += os_m; + } +} NPY_NO_EXPORT void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 8ae2f65e0..4ce8d8ab7 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -36,7 +36,7 @@ * If 'dtype' isn't NULL, this function steals its reference. */ static PyArrayObject * -allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags, +allocate_reduce_result(PyArrayObject *arr, const npy_bool *axis_flags, PyArray_Descr *dtype, int subok) { npy_intp strides[NPY_MAXDIMS], stride; @@ -84,7 +84,7 @@ allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags, * The return value is a view into 'out'. */ static PyArrayObject * -conform_reduce_result(int ndim, npy_bool *axis_flags, +conform_reduce_result(int ndim, const npy_bool *axis_flags, PyArrayObject *out, int keepdims, const char *funcname, int need_copy) { @@ -251,7 +251,7 @@ PyArray_CreateReduceResult(PyArrayObject *operand, PyArrayObject *out, * Count the number of dimensions selected in 'axis_flags' */ static int -count_axes(int ndim, npy_bool *axis_flags) +count_axes(int ndim, const npy_bool *axis_flags) { int idim; int naxes = 0; @@ -299,7 +299,7 @@ count_axes(int ndim, npy_bool *axis_flags) NPY_NO_EXPORT PyArrayObject * PyArray_InitializeReduceResult( PyArrayObject *result, PyArrayObject *operand, - npy_bool *axis_flags, + const npy_bool *axis_flags, npy_intp *out_skip_first_count, const char *funcname) { npy_intp *strides, *shape, shape_orig[NPY_MAXDIMS]; diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 7aec1ff49..88e5e1f1b 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -1017,7 +1017,7 @@ sse2_sqrt_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n) LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) { op[i] = @scalarf@(ip[i]); } - assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || + assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || npy_is_aligned(&op[i], VECTOR_SIZE_BYTES)); if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) { LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { @@ -1069,7 +1069,7 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n) LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) { op[i] = @scalar@_@type@(ip[i]); } - assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || + assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || npy_is_aligned(&op[i], VECTOR_SIZE_BYTES)); if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) { LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { @@ -1104,7 +1104,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) /* Order of operations important for MSVC 2015 */ *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } - assert(n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); + assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); if (i + 3 * stride <= n) { /* load the first elements */ @vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]); diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 5f9a0f7f4..c36680ed2 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -908,7 +908,7 @@ parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, . typedef int converter(PyObject *, void *); while (PyDict_Next(kwds, &pos, &key, &value)) { - int i; + npy_intp i; converter *convert; void *output = NULL; npy_intp index = locate_key(kwnames, key); @@ -2297,7 +2297,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Returns 0 on success, and -1 on failure */ static int -_parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis, +_parse_axis_arg(PyUFuncObject *ufunc, const int core_num_dims[], PyObject *axis, PyArrayObject **op, int broadcast_ndim, int **remap_axis) { int nop = ufunc->nargs; int iop, axis_int; @@ -2368,7 +2368,7 @@ _parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis, */ static int _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op, - int *op_core_num_dims, npy_uint32 *core_dim_flags, + const int *op_core_num_dims, npy_uint32 *core_dim_flags, npy_intp *core_dim_sizes, int **remap_axis) { int i; int nin = ufunc->nin; @@ -4053,14 +4053,14 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1], op_axes_arrays[2]}; npy_uint32 op_flags[3]; - int i, idim, ndim, otype_final; + int idim, ndim, otype_final; int need_outer_iterator = 0; NpyIter *iter = NULL; /* The reduceat indices - ind must be validated outside this call */ npy_intp *reduceat_ind; - npy_intp ind_size, red_axis_size; + npy_intp i, ind_size, red_axis_size; /* The selected inner loop */ PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; @@ -4146,7 +4146,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, #endif /* Set up the op_axes for the outer loop */ - for (i = 0, idim = 0; idim < ndim; ++idim) { + for (idim = 0; idim < ndim; ++idim) { /* Use the i-th iteration dimension to match up ind */ if (idim == axis) { op_axes_arrays[0][idim] = axis; @@ -4866,7 +4866,7 @@ ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func, PyUFuncGenericFunction newfunc, - int *signature, + const int *signature, PyUFuncGenericFunction *oldfunc) { int i, j; @@ -4921,7 +4921,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi char *types, int ntypes, int nin, int nout, int identity, const char *name, const char *doc, - int unused, const char *signature, + const int unused, const char *signature, PyObject *identity_value) { PyUFuncObject *ufunc; @@ -5223,7 +5223,7 @@ NPY_NO_EXPORT int PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int usertype, PyUFuncGenericFunction function, - int *arg_types, + const int *arg_types, void *data) { PyArray_Descr *descr; diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py new file mode 100644 index 000000000..494b51f34 --- /dev/null +++ b/numpy/core/tests/test__exceptions.py @@ -0,0 +1,42 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" +import numpy as np + +_ArrayMemoryError = np.core._exceptions._ArrayMemoryError + +class TestArrayMemoryError: + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki+1) == '1.00 KiB' + assert f(10*Ki) == '10.0 KiB' + assert f(int(999.4*Ki)) == '999. KiB' + assert f(int(1023.4*Ki)) == '1023. KiB' + assert f(int(1023.5*Ki)) == '1.00 MiB' + assert f(Ki*Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' + assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefices isn't going to help + # anyway. + assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 75a794369..702e68e76 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -262,11 +262,6 @@ class TestArray2String(object): assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == '[abcabc defdef]') - # check for backcompat that using FloatFormat works and emits warning - with assert_warns(DeprecationWarning): - fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False) - assert_equal(np.array2string(x, formatter={'float_kind': fmt}), - '[0. 1. 2.]') def test_structure_format(self): dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index f60eab696..d2fbbae5b 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -419,6 +419,31 @@ class TestRecord(object): assert_raises(ValueError, np.dtype, {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)}) + def test_fieldless_views(self): + a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[], + 'itemsize':8}) + assert_raises(ValueError, a.view, np.dtype([])) + + d = np.dtype((np.dtype([]), 10)) + assert_equal(d.shape, (10,)) + assert_equal(d.itemsize, 0) + assert_equal(d.base, np.dtype([])) + + arr = np.fromiter((() for i in range(10)), []) + assert_equal(arr.dtype, np.dtype([])) + assert_raises(ValueError, np.frombuffer, b'', dtype=[]) + assert_equal(np.frombuffer(b'', dtype=[], count=2), + np.empty(2, dtype=[])) + + assert_raises(ValueError, np.dtype, ([], 'f8')) + assert_raises(ValueError, np.zeros(1, dtype='i4').view, []) + + assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]), + np.ones(2, dtype=bool)) + + assert_equal(np.zeros((1, 2), dtype=[]) == a, + np.ones((1, 2), dtype=bool)) + class TestSubarray(object): def test_single_subarray(self): @@ -938,13 +963,6 @@ class TestDtypeAttributes(object): new_dtype = np.dtype(dtype.descr) assert_equal(new_dtype.itemsize, 16) - @pytest.mark.parametrize('t', np.typeDict.values()) - def test_name_builtin(self, t): - name = t.__name__ - if name.endswith('_'): - name = name[:-1] - assert_equal(np.dtype(t).name, name) - def test_name_dtype_subclass(self): # Ticket #4357 class user_def_subcls(np.void): diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 0a61a74cf..58572f268 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -44,7 +44,7 @@ from numpy.testing import ( assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, - temppath, suppress_warnings, break_cycles, assert_raises_regex, + temppath, suppress_warnings, break_cycles, ) from numpy.core.tests._locales import CommaDecimalPointLocale @@ -497,9 +497,6 @@ class TestArrayConstruction(object): assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) - def test_ragged(self): - assert_raises_regex(ValueError, 'ragged', - np.array, [[1], [2, 3]], dtype=int) class TestAssignment(object): def test_assignment_broadcasting(self): @@ -4590,18 +4587,26 @@ class TestTake(object): assert_equal(y, np.array([1, 2, 3])) class TestLexsort(object): - def test_basic(self): - a = [1, 2, 1, 3, 1, 5] - b = [0, 4, 5, 6, 2, 3] + @pytest.mark.parametrize('dtype',[ + np.uint8, np.uint16, np.uint32, np.uint64, + np.int8, np.int16, np.int32, np.int64, + np.float16, np.float32, np.float64 + ]) + def test_basic(self, dtype): + a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype) + b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype) idx = np.lexsort((b, a)) expected_idx = np.array([0, 4, 2, 1, 3, 5]) assert_array_equal(idx, expected_idx) + assert_array_equal(a[idx], np.sort(a)) - x = np.vstack((b, a)) - idx = np.lexsort(x) - assert_array_equal(idx, expected_idx) + def test_mixed(self): + a = np.array([1, 2, 1, 3, 1, 5]) + b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]') - assert_array_equal(x[1][idx], np.sort(x[1])) + idx = np.lexsort((b, a)) + expected_idx = np.array([0, 4, 2, 1, 3, 5]) + assert_array_equal(idx, expected_idx) def test_datetime(self): a = np.array([0,0,0], dtype='datetime64[D]') @@ -6272,6 +6277,23 @@ class TestMatmul(MatmulCommon): with assert_raises(TypeError): b = np.matmul(a, a) + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0],[1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4*5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) if sys.version_info[:2] >= (3, 5): diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index c479a0f6d..1358b45e9 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2583,6 +2583,30 @@ class TestConvolve(object): class TestArgwhere(object): + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,)*nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + def test_2D(self): x = np.arange(6).reshape((2, 3)) assert_array_equal(np.argwhere(x > 1), diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py index d0ff5578a..387740e35 100644 --- a/numpy/core/tests/test_numerictypes.py +++ b/numpy/core/tests/test_numerictypes.py @@ -498,3 +498,32 @@ class TestDocStrings(object): assert_('int64' in np.int_.__doc__) elif np.int64 is np.longlong: assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.int_, np.longlong, + np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 14413224e..c1b794145 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -444,6 +444,48 @@ class TestRecord(object): ] arr = np.rec.fromarrays(arrays) # ValueError? + @pytest.mark.parametrize('nfields', [0, 1, 2]) + def test_assign_dtype_attribute(self, nfields): + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields]) + data = np.zeros(3, dt).view(np.recarray) + + # the original and resulting dtypes differ on whether they are records + assert data.dtype.type == np.record + assert dt.type != np.record + + # ensure that the dtype remains a record even when assigned + data.dtype = dt + assert data.dtype.type == np.record + + @pytest.mark.parametrize('nfields', [0, 1, 2]) + def test_nested_fields_are_records(self, nfields): + """ Test that nested structured types are treated as records too """ + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields]) + dt_outer = np.dtype([('inner', dt)]) + + data = np.zeros(3, dt_outer).view(np.recarray) + assert isinstance(data, np.recarray) + assert isinstance(data['inner'], np.recarray) + + data0 = data[0] + assert isinstance(data0, np.record) + assert isinstance(data0['inner'], np.record) + + def test_nested_dtype_padding(self): + """ test that trailing padding is preserved """ + # construct a dtype with padding at the end + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)]) + dt_padded_end = dt[['a', 'b']] + assert dt_padded_end.itemsize == dt.itemsize + + dt_outer = np.dtype([('inner', dt_padded_end)]) + + data = np.zeros(3, dt_outer).view(np.recarray) + assert_equal(data['inner'].dtype, dt_padded_end) + + data0 = data[0] + assert_equal(data0['inner'].dtype, dt_padded_end) + def test_find_duplicate(): l1 = [1, 2, 3, 4, 5, 6] diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index ca5b82e6f..9dc231deb 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -436,6 +436,32 @@ class TestRegression(object): assert_raises(KeyError, np.lexsort, BuggySequence()) + def test_lexsort_zerolen_custom_strides(self): + # Ticket #14228 + xs = np.array([], dtype='i8') + assert xs.strides == (8,) + assert np.lexsort((xs,)).shape[0] == 0 # Works + + xs.strides = (16,) + assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError + + def test_lexsort_zerolen_custom_strides_2d(self): + xs = np.array([], dtype='i8') + + xs.shape = (0, 2) + xs.strides = (16, 16) + assert np.lexsort((xs,), axis=0).shape[0] == 0 + + xs.shape = (2, 0) + xs.strides = (16, 16) + assert np.lexsort((xs,), axis=0).shape[0] == 2 + + def test_lexsort_zerolen_element(self): + dt = np.dtype([]) # a void dtype with no fields + xs = np.empty(4, dt) + + assert np.lexsort((xs,)).shape[0] == xs.shape[0] + def test_pickle_py2_bytes_encoding(self): # Check that arrays and scalars pickled on Py2 are # unpickleable on Py3 using encoding='bytes' @@ -468,7 +494,7 @@ class TestRegression(object): result = pickle.loads(data, encoding='bytes') assert_equal(result, original) - if isinstance(result, np.ndarray) and result.dtype.names: + if isinstance(result, np.ndarray) and result.dtype.names is not None: for name in result.dtype.names: assert_(isinstance(name, str)) @@ -2475,3 +2501,13 @@ class TestRegression(object): t = T() #gh-13659, would raise in broadcasting [x=t for x in result] np.array([t]) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8), + reason='overflows on windows, fixed in bpo-16865') + def test_to_ctypes(self): + #gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) |