diff options
Diffstat (limited to 'numpy')
91 files changed, 2168 insertions, 938 deletions
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd new file mode 100644 index 000000000..23bd22e36 --- /dev/null +++ b/numpy/__init__.pxd @@ -0,0 +1,978 @@ +# NumPy static imports for Cython +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# This also defines backwards-compatibility buffer acquisition +# code for use in Python 2.x (or Python <= 2.5 when NumPy starts +# implementing PEP-3118 directly). +# +# Because of laziness, the format string of the buffer is statically +# allocated. Increase the size if this is not enough, or submit a +# patch to do this properly. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + +cdef extern from "numpy/arrayobject.h": + ctypedef Py_intptr_t npy_intp + ctypedef size_t npy_uintp + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_UPDATEIFCOPY + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_UPDATEIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS + + npy_intp NPY_MAX_ELSIZE + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags + cdef int type_num + cdef int itemsize "elsize" + cdef int alignment + cdef dict fields + cdef tuple names + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray + + ctypedef extern class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base + + # Note: This syntax (function definition in pxd files) is an + # experimental exception made for __getbuffer__ and __releasebuffer__ + # -- the details of this may change. + def __getbuffer__(ndarray self, Py_buffer* info, int flags): + PyObject_GetBuffer(<object>self, info, flags); + + def __releasebuffer__(ndarray self, Py_buffer* info): + # We should call a possible tp_bufferrelease(self, info) but no + # interface to that is exposed by cython or python. And currently + # the function is NULL in numpy, we rely on refcounting to release + # info when self is collected + pass + + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + double real + double imag + + ctypedef struct npy_cdouble: + double real + double imag + + ctypedef struct npy_clongdouble: + long double real + long double imag + + ctypedef struct npy_complex64: + float real + float imag + + ctypedef struct npy_complex128: + double real + double imag + + ctypedef struct npy_complex160: + long double real + long double imag + + ctypedef struct npy_complex192: + long double real + long double imag + + ctypedef struct npy_complex256: + long double real + long double imag + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + int _import_array() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) + bint PyArray_ISCONTIGUOUS(ndarray m) + bint PyArray_ISWRITEABLE(ndarray m) + bint PyArray_ISALIGNED(ndarray m) + + int PyArray_NDIM(ndarray) + bint PyArray_ISONESEGMENT(ndarray) + bint PyArray_ISFORTRAN(ndarray) + int PyArray_FORTRANIF(ndarray) + + void* PyArray_DATA(ndarray) + char* PyArray_BYTES(ndarray) + npy_intp* PyArray_DIMS(ndarray) + npy_intp* PyArray_STRIDES(ndarray) + npy_intp PyArray_DIM(ndarray, size_t) + npy_intp PyArray_STRIDE(ndarray, size_t) + + PyObject *PyArray_BASE(ndarray) # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype! + int PyArray_FLAGS(ndarray) + npy_intp PyArray_ITEMSIZE(ndarray) + int PyArray_TYPE(ndarray arr) + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) + + bint PyTypeNum_ISBOOL(int) + bint PyTypeNum_ISUNSIGNED(int) + bint PyTypeNum_ISSIGNED(int) + bint PyTypeNum_ISINTEGER(int) + bint PyTypeNum_ISFLOAT(int) + bint PyTypeNum_ISNUMBER(int) + bint PyTypeNum_ISSTRING(int) + bint PyTypeNum_ISCOMPLEX(int) + bint PyTypeNum_ISPYTHON(int) + bint PyTypeNum_ISFLEXIBLE(int) + bint PyTypeNum_ISUSERDEF(int) + bint PyTypeNum_ISEXTENDED(int) + bint PyTypeNum_ISOBJECT(int) + + bint PyDataType_ISBOOL(dtype) + bint PyDataType_ISUNSIGNED(dtype) + bint PyDataType_ISSIGNED(dtype) + bint PyDataType_ISINTEGER(dtype) + bint PyDataType_ISFLOAT(dtype) + bint PyDataType_ISNUMBER(dtype) + bint PyDataType_ISSTRING(dtype) + bint PyDataType_ISCOMPLEX(dtype) + bint PyDataType_ISPYTHON(dtype) + bint PyDataType_ISFLEXIBLE(dtype) + bint PyDataType_ISUSERDEF(dtype) + bint PyDataType_ISEXTENDED(dtype) + bint PyDataType_ISOBJECT(dtype) + bint PyDataType_HASFIELDS(dtype) + bint PyDataType_HASSUBARRAY(dtype) + + bint PyArray_ISBOOL(ndarray) + bint PyArray_ISUNSIGNED(ndarray) + bint PyArray_ISSIGNED(ndarray) + bint PyArray_ISINTEGER(ndarray) + bint PyArray_ISFLOAT(ndarray) + bint PyArray_ISNUMBER(ndarray) + bint PyArray_ISSTRING(ndarray) + bint PyArray_ISCOMPLEX(ndarray) + bint PyArray_ISPYTHON(ndarray) + bint PyArray_ISFLEXIBLE(ndarray) + bint PyArray_ISUSERDEF(ndarray) + bint PyArray_ISEXTENDED(ndarray) + bint PyArray_ISOBJECT(ndarray) + bint PyArray_HASFIELDS(ndarray) + + bint PyArray_ISVARIABLE(ndarray) + + bint PyArray_SAFEALIGNEDCOPY(ndarray) + bint PyArray_ISNBO(char) # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) + bint PyArray_ISBYTESWAPPED(ndarray) + + bint PyArray_FLAGSWAP(ndarray, int) + + bint PyArray_ISCARRAY(ndarray) + bint PyArray_ISCARRAY_RO(ndarray) + bint PyArray_ISFARRAY(ndarray) + bint PyArray_ISFARRAY_RO(ndarray) + bint PyArray_ISBEHAVED(ndarray) + bint PyArray_ISBEHAVED_RO(ndarray) + + + bint PyDataType_ISNOTSWAPPED(dtype) + bint PyDataType_ISBYTESWAPPED(dtype) + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) + npy_intp PyArray_SIZE(ndarray) + npy_intp PyArray_NBYTES(ndarray) + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(object, int val) + npy_intp PyArray_REFCOUNT(object) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) + + void PyArray_XDECREF_ERR(ndarray) + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_SetNumericOps (object) + object PyArray_GetNumericOps () + int PyArray_INCREF (ndarray) + int PyArray_XDECREF (ndarray) + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CastTo (ndarray, ndarray) + int PyArray_CastAnyTo (ndarray, ndarray) + int PyArray_CanCastSafely (int, int) + npy_bool PyArray_CanCastTo (dtype, dtype) + int PyArray_ObjectType (object, int) + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + object PyArray_ScalarFromObject (object) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + object PyArray_FromDims (int, int *, int) + #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_MoveInto (ndarray, ndarray) + int PyArray_CopyInto (ndarray, ndarray) + int PyArray_CopyAnyInto (ndarray, ndarray) + int PyArray_CopyObject (ndarray, object) + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) + int PyArray_Dump (object, object, int) + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) + void PyArray_FillObjectArray (ndarray, object) + int PyArray_FillWithScalar (ndarray, object) + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + object PyArray_NewFlagsObject (object) + npy_bool PyArray_CanCastScalar (type, type) + #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) + int PyArray_RemoveSmallest (broadcast) + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) + void PyArray_Item_XDECREF (char *, dtype) + object PyArray_FieldNames (object) + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + #int PyArray_As1D (object*, char **, int *, int) + #int PyArray_As2D (object*, char ***, int *, int *, int) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_CopyAndTranspose (object) + object PyArray_Correlate (object, object, int) + int PyArray_TypestrConvert (int, int) + #int PyArray_DescrConverter (object, dtype*) + #int PyArray_DescrConverter2 (object, dtype*) + int PyArray_IntpConverter (object, PyArray_Dims *) + #int PyArray_BufferConverter (object, chunk) + int PyArray_AxisConverter (object, int *) + int PyArray_BoolConverter (object, npy_bool *) + int PyArray_ByteorderConverter (object, char *) + int PyArray_OrderConverter (object, NPY_ORDER *) + unsigned char PyArray_EquivTypes (dtype, dtype) + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_TypeNumFromName (char *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) + #int PyArray_OutputConverter (object, ndarray*) + object PyArray_BroadcastToShape (object, npy_intp *, int) + void _PyArray_SigintHandler (int) + void* _PyArray_GetSigintBuf () + #int PyArray_DescrAlignConverter (object, dtype*) + #int PyArray_DescrAlignConverter2 (object, dtype*) + int PyArray_SearchsideConverter (object, void *) + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_CompareString (char *, char *, size_t) + int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +# The int types are mapped a bit surprising -- +# numpy.int corresponds to 'l' and numpy.long to 'q' +ctypedef npy_long int_t +ctypedef npy_longlong long_t +ctypedef npy_longlong longlong_t + +ctypedef npy_ulong uint_t +ctypedef npy_ulonglong ulong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef npy_cfloat cfloat_t +ctypedef npy_cdouble cdouble_t +ctypedef npy_clongdouble clongdouble_t + +ctypedef npy_cdouble complex_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, <void*>a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, <void*>a, <void*>b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return <tuple>d.subarray.shape + else: + return () + +cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + # Recursive utility function used in __getbuffer__ to get format + # string. The new location in the format string is returned. + + cdef dtype child + cdef int endian_detector = 1 + cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) + cdef tuple fields + + for childname in descr.names: + fields = descr.fields[childname] + child, new_offset = fields + + if (end - f) - <int>(new_offset - offset[0]) < 15: + raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + + if ((child.byteorder == c'>' and little_endian) or + (child.byteorder == c'<' and not little_endian)): + raise ValueError(u"Non-native byte order not supported") + # One could encode it in the format string and have Cython + # complain instead, BUT: < and > in format strings also imply + # standardized sizes for datatypes, and we rely on native in + # order to avoid reencoding data types based on their size. + # + # A proper PEP 3118 exporter for other clients than Cython + # must deal properly with this! + + # Output padding bytes + while offset[0] < new_offset: + f[0] = 120 # "x"; pad byte + f += 1 + offset[0] += 1 + + offset[0] += child.itemsize + + if not PyDataType_HASFIELDS(child): + t = child.type_num + if end - f < 5: + raise RuntimeError(u"Format string allocated too short.") + + # Until ticket #99 is fixed, use integers to avoid warnings + if t == NPY_BYTE: f[0] = 98 #"b" + elif t == NPY_UBYTE: f[0] = 66 #"B" + elif t == NPY_SHORT: f[0] = 104 #"h" + elif t == NPY_USHORT: f[0] = 72 #"H" + elif t == NPY_INT: f[0] = 105 #"i" + elif t == NPY_UINT: f[0] = 73 #"I" + elif t == NPY_LONG: f[0] = 108 #"l" + elif t == NPY_ULONG: f[0] = 76 #"L" + elif t == NPY_LONGLONG: f[0] = 113 #"q" + elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + elif t == NPY_FLOAT: f[0] = 102 #"f" + elif t == NPY_DOUBLE: f[0] = 100 #"d" + elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + elif t == NPY_OBJECT: f[0] = 79 #"O" + else: + raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + f += 1 + else: + # Cython ignores struct boundary information ("T{...}"), + # so don't output it + f = _util_dtypestring(child, f, end, offset) + return f + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef extern class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_ERR_IGNORE + UFUNC_ERR_WARN + UFUNC_ERR_RAISE + UFUNC_ERR_CALL + UFUNC_ERR_PRINT + UFUNC_ERR_LOG + UFUNC_MASK_DIVIDEBYZERO + UFUNC_MASK_OVERFLOW + UFUNC_MASK_UNDERFLOW + UFUNC_MASK_INVALID + UFUNC_SHIFT_DIVIDEBYZERO + UFUNC_SHIFT_OVERFLOW + UFUNC_SHIFT_UNDERFLOW + UFUNC_SHIFT_INVALID + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + UFUNC_ERR_DEFAULT + UFUNC_ERR_DEFAULT2 + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) + int PyUFunc_GenericFunction \ + (ufunc, PyObject *, PyObject *, PyArrayObject **) + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **) + int PyUFunc_checkfperr \ + (int, PyObject *, int *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_handlefperr \ + (int, PyObject *, int, int *) + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return <object>base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + _import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") diff --git a/numpy/__init__.py b/numpy/__init__.py index ba88c733f..07d67945c 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -166,6 +166,8 @@ else: # now that numpy modules are imported, can initialize limits core.getlimits._register_known_types() + __all__.extend(['bool', 'int', 'float', 'complex', 'object', 'unicode', + 'str']) __all__.extend(['__version__', 'show_config']) __all__.extend(core.__all__) __all__.extend(_mat.__all__) @@ -182,9 +184,34 @@ else: oldnumeric = 'removed' numarray = 'removed' - # We don't actually use this ourselves anymore, but I'm not 100% sure that - # no-one else in the world is using it (though I hope not) - from .testing import Tester + if sys.version_info[:2] >= (3, 7): + # Importing Tester requires importing all of UnitTest which is not a + # cheap import Since it is mainly used in test suits, we lazy import it + # here to save on the order of 10 ms of import time for most users + # + # The previous way Tester was imported also had a side effect of adding + # the full `numpy.testing` namespace + # + # module level getattr is only supported in 3.7 onwards + # https://www.python.org/dev/peps/pep-0562/ + def __getattr__(attr): + if attr == 'testing': + import numpy.testing as testing + return testing + elif attr == 'Tester': + from .testing import Tester + return Tester + else: + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + def __dir__(): + return __all__ + ['Tester', 'testing'] + + else: + # We don't actually use this ourselves anymore, but I'm not 100% sure that + # no-one else in the world is using it (though I hope not) + from .testing import Tester # Pytest testing from numpy._pytesttester import PytestTester diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 8d1a3811c..b25224c20 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -48,10 +48,9 @@ class PytestTester(object): """ Pytest test runner. - This class is made available in ``numpy.testing``, and a test function - is typically added to a package's __init__.py like so:: + A test function is typically added to a package's __init__.py like so:: - from numpy.testing import PytestTester + from numpy._pytesttester import PytestTester test = PytestTester(__name__).test del PytestTester @@ -68,6 +67,12 @@ class PytestTester(object): module_name : module name The name of the module to test. + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + """ def __init__(self, module_name): self.module_name = module_name diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py index 092b848dc..df1ff180e 100644 --- a/numpy/core/_dtype.py +++ b/numpy/core/_dtype.py @@ -316,26 +316,39 @@ def _subarray_str(dtype): ) +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool_: + # implied + return False + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + def _name_get(dtype): - # provides dtype.name.__get__ + # provides dtype.name.__get__, documented as returning a "bit name" if dtype.isbuiltin == 2: # user dtypes don't promise to do anything special return dtype.type.__name__ - # Builtin classes are documented as returning a "bit name" - name = dtype.type.__name__ - - # handle bool_, str_, etc - if name[-1] == '_': - name = name[:-1] + if issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) - # append bit counts to str, unicode, and void - if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype): + # append bit counts + if _name_includes_bit_suffix(dtype): name += "{}".format(dtype.itemsize * 8) # append metadata to datetimes - elif dtype.type in (np.datetime64, np.timedelta64): + if dtype.type in (np.datetime64, np.timedelta64): name += _datetime_metadata_str(dtype) return name diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py index a1af7a78d..88a45561f 100644 --- a/numpy/core/_exceptions.py +++ b/numpy/core/_exceptions.py @@ -27,6 +27,7 @@ def _display_as_base(cls): assert issubclass(cls, Exception) cls.__name__ = cls.__base__.__name__ cls.__qualname__ = cls.__base__.__qualname__ + set_module(cls.__base__.__module__)(cls) return cls @@ -146,6 +147,54 @@ class _ArrayMemoryError(MemoryError): self.shape = shape self.dtype = dtype - def __str__(self): - return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype) + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + import math + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return '{:.0f} {}'.format(n_units, unit_name) + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return '{:#.3g} {}'.format(n_units, unit_name) + else: + # just give all the digits otherwise + return '{:#.0f} {}'.format(n_units, unit_name) + def __str__(self): + size_str = self._size_to_string(self._total_size) + return ( + "Unable to allocate {} for an array with shape {} and data type {}" + .format(size_str, self.shape, self.dtype) + ) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index c70718cb6..b0ea603e1 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -459,7 +459,7 @@ def _getfield_is_safe(oldtype, newtype, offset): if newtype.hasobject or oldtype.hasobject: if offset == 0 and newtype == oldtype: return - if oldtype.names: + if oldtype.names is not None: for name in oldtype.names: if (oldtype.fields[name][1] == offset and oldtype.fields[name][0] == newtype): diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index ecd05d3ac..b1310a737 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -194,7 +194,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, See Also -------- - get_printoptions, set_string_function, array2string + get_printoptions, printoptions, set_string_function, array2string Notes ----- @@ -285,7 +285,7 @@ def get_printoptions(): See Also -------- - set_printoptions, set_string_function + set_printoptions, printoptions, set_string_function """ return _format_options.copy() @@ -685,7 +685,7 @@ def array2string(a, max_line_width=None, precision=None, if style is np._NoValue: style = repr - if a.shape == () and not a.dtype.names: + if a.shape == () and a.dtype.names is None: return style(a.item()) elif style is not np._NoValue: # Deprecation 11-9-2017 v1.14 @@ -984,20 +984,6 @@ class FloatingFormat(object): pad_left=self.pad_left, pad_right=self.pad_right) -# for back-compatibility, we keep the classes for each float type too -class FloatFormat(FloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn("FloatFormat has been replaced by FloatingFormat", - DeprecationWarning, stacklevel=2) - super(FloatFormat, self).__init__(*args, **kwargs) - - -class LongFloatFormat(FloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn("LongFloatFormat has been replaced by FloatingFormat", - DeprecationWarning, stacklevel=2) - super(LongFloatFormat, self).__init__(*args, **kwargs) - @set_module('numpy') def format_float_scientific(x, precision=None, unique=True, trim='k', @@ -1196,21 +1182,6 @@ class ComplexFloatingFormat(object): return r + i -# for back-compatibility, we keep the classes for each complex type too -class ComplexFormat(ComplexFloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn( - "ComplexFormat has been replaced by ComplexFloatingFormat", - DeprecationWarning, stacklevel=2) - super(ComplexFormat, self).__init__(*args, **kwargs) - -class LongComplexFormat(ComplexFloatingFormat): - def __init__(self, *args, **kwargs): - warnings.warn( - "LongComplexFormat has been replaced by ComplexFloatingFormat", - DeprecationWarning, stacklevel=2) - super(LongComplexFormat, self).__init__(*args, **kwargs) - class _TimelikeFormat(object): def __init__(self, data): @@ -1321,16 +1292,6 @@ class StructuredVoidFormat(object): return "({})".format(", ".join(str_fields)) -# for backwards compatibility -class StructureFormat(StructuredVoidFormat): - def __init__(self, *args, **kwargs): - # NumPy 1.14, 2018-02-14 - warnings.warn( - "StructureFormat has been replaced by StructuredVoidFormat", - DeprecationWarning, stacklevel=2) - super(StructureFormat, self).__init__(*args, **kwargs) - - def _void_scalar_repr(x): """ Implements the repr for structured-void scalars. It is called from the diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 923c34425..7336e5e13 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -259,7 +259,8 @@ def find_functions(filename, tag='API'): elif state == STATE_ARGS: if line.startswith('{'): # finished - fargs_str = ' '.join(function_args).rstrip(' )') + # remove any white space and the closing bracket: + fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip() fargs = split_arguments(fargs_str) f = Function(function_name, return_type, fargs, '\n'.join(doclist)) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index bde37fca3..422ebe2de 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -3125,10 +3125,35 @@ def around(a, decimals=0, out=None): ----- For values exactly halfway between rounded decimal values, NumPy rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, - -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due - to the inexact representation of decimal fractions in the IEEE - floating point standard [1]_ and errors introduced when scaling - by powers of ten. + -0.5 and 0.5 round to 0.0, etc. + + ``np.around`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which is + inexact for large floating-point values or large values of `decimals` due + the inexact representation of decimal fractions in the IEEE floating point + standard [1]_ and errors introduced when scaling by powers of ten. For + instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 References ---------- @@ -3419,7 +3444,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as + the default is `float64`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 1221aeece..ad98d562b 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -1095,7 +1095,8 @@ typedef struct PyArrayIterObject_tag PyArrayIterObject; * type of the function which translates a set of coordinates to a * pointer to the data */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); struct PyArrayIterObject_tag { PyObject_HEAD @@ -1695,7 +1696,8 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) #define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) #define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) -#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) #define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) #define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index bbcd58abb..c395b1348 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -26,6 +26,7 @@ if sys.version_info[0] < 3: from . import overrides from . import umath +from . import shape_base from .overrides import set_module from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes @@ -48,14 +49,6 @@ array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') -def loads(*args, **kwargs): - # NumPy 1.15.0, 2017-12-10 - warnings.warn( - "np.core.numeric.loads is deprecated, use pickle.loads instead", - DeprecationWarning, stacklevel=2) - return pickle.loads(*args, **kwargs) - - __all__ = [ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', @@ -66,7 +59,7 @@ __all__ = [ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', - 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', @@ -553,8 +546,10 @@ def argwhere(a): Returns ------- - index_array : ndarray + index_array : (N, a.ndim) ndarray Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. See Also -------- @@ -562,7 +557,8 @@ def argwhere(a): Notes ----- - ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``nonzero(a)`` instead. @@ -580,6 +576,11 @@ def argwhere(a): [1, 2]]) """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:,:0] return transpose(nonzero(a)) @@ -2028,30 +2029,6 @@ def base_repr(number, base=2, padding=0): return ''.join(reversed(res or '0')) -def load(file): - """ - Wrapper around cPickle.load which accepts either a file-like object or - a filename. - - Note that the NumPy binary format is not based on pickle/cPickle anymore. - For details on the preferred way of loading and saving files, see `load` - and `save`. - - See Also - -------- - load, save - - """ - # NumPy 1.15.0, 2017-12-10 - warnings.warn( - "np.core.numeric.load is deprecated, use pickle.load instead", - DeprecationWarning, stacklevel=2) - if isinstance(file, type("")): - with open(file, "rb") as file_pointer: - return pickle.load(file_pointer) - return pickle.load(file) - - # These are all essentially abbreviations # These might wind up in a special abbreviations module diff --git a/numpy/core/records.py b/numpy/core/records.py index 0576005e7..a1439f9df 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -268,8 +268,8 @@ class record(nt.void): except AttributeError: #happens if field is Object type return obj - if dt.fields: - return obj.view((self.__class__, obj.dtype.fields)) + if dt.names is not None: + return obj.view((self.__class__, obj.dtype)) return obj else: raise AttributeError("'record' object has no " @@ -293,8 +293,8 @@ class record(nt.void): obj = nt.void.__getitem__(self, indx) # copy behavior of record.__getattribute__, - if isinstance(obj, nt.void) and obj.dtype.fields: - return obj.view((self.__class__, obj.dtype.fields)) + if isinstance(obj, nt.void) and obj.dtype.names is not None: + return obj.view((self.__class__, obj.dtype)) else: # return a single element return obj @@ -444,7 +444,7 @@ class recarray(ndarray): return self def __array_finalize__(self, obj): - if self.dtype.type is not record and self.dtype.fields: + if self.dtype.type is not record and self.dtype.names is not None: # if self.dtype is not np.record, invoke __setattr__ which will # convert it to a record if it is a void dtype. self.dtype = self.dtype @@ -472,7 +472,7 @@ class recarray(ndarray): # with void type convert it to the same dtype.type (eg to preserve # numpy.record type if present), since nested structured fields do not # inherit type. Don't do this for non-void structures though. - if obj.dtype.fields: + if obj.dtype.names is not None: if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) return obj @@ -487,7 +487,7 @@ class recarray(ndarray): # Automatically convert (void) structured types to records # (but not non-void structures, subarrays, or non-structured voids) - if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields: + if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None: val = sb.dtype((record, val)) newattr = attr not in self.__dict__ @@ -521,7 +521,7 @@ class recarray(ndarray): # copy behavior of getattr, except that here # we might also be returning a single element if isinstance(obj, ndarray): - if obj.dtype.fields: + if obj.dtype.names is not None: obj = obj.view(type(self)) if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) @@ -577,7 +577,7 @@ class recarray(ndarray): if val is None: obj = self.getfield(*res) - if obj.dtype.fields: + if obj.dtype.names is not None: return obj return obj.view(ndarray) else: diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 338502791..5ac7752cc 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -464,7 +464,7 @@ def configuration(parent_package='',top_path=None): moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) # Py3K check - if sys.version_info[0] == 3: + if sys.version_info[0] >= 3: moredefs.append(('NPY_PY3K', 1)) # Generate the config.h file from moredefs diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 710f64827..d7e769e62 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -9,8 +9,9 @@ import warnings from . import numeric as _nx from . import overrides -from .numeric import array, asanyarray, newaxis +from ._asarray import array, asanyarray from .multiarray import normalize_axis_index +from . import fromnumeric as _from_nx array_function_dispatch = functools.partial( @@ -123,7 +124,7 @@ def atleast_2d(*arys): if ary.ndim == 0: result = ary.reshape(1, 1) elif ary.ndim == 1: - result = ary[newaxis, :] + result = ary[_nx.newaxis, :] else: result = ary res.append(result) @@ -193,9 +194,9 @@ def atleast_3d(*arys): if ary.ndim == 0: result = ary.reshape(1, 1, 1) elif ary.ndim == 1: - result = ary[newaxis, :, newaxis] + result = ary[_nx.newaxis, :, _nx.newaxis] elif ary.ndim == 2: - result = ary[:, :, newaxis] + result = ary[:, :, _nx.newaxis] else: result = ary res.append(result) @@ -435,9 +436,9 @@ def stack(arrays, axis=0, out=None): # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. # Use getattr to protect against __array_function__ being disabled. -_size = getattr(_nx.size, '__wrapped__', _nx.size) -_ndim = getattr(_nx.ndim, '__wrapped__', _nx.ndim) -_concatenate = getattr(_nx.concatenate, '__wrapped__', _nx.concatenate) +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate) def _block_format_index(index): diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c index addc9f006..a7f34cbe5 100644 --- a/numpy/core/src/multiarray/alloc.c +++ b/numpy/core/src/multiarray/alloc.c @@ -25,10 +25,14 @@ #include <assert.h> -#ifdef HAVE_SYS_MMAN_H +#ifdef NPY_OS_LINUX #include <sys/mman.h> -#if defined MADV_HUGEPAGE && defined HAVE_MADVISE -#define HAVE_MADV_HUGEPAGE +#ifndef MADV_HUGEPAGE +/* + * Use code 14 (MADV_HUGEPAGE) if it isn't defined. This gives a chance of + * enabling huge pages even if built with linux kernel < 2.6.38 + */ +#define MADV_HUGEPAGE 14 #endif #endif @@ -74,11 +78,15 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #ifdef _PyPyGC_AddMemoryPressure _PyPyPyGC_AddMemoryPressure(nelem * esz); #endif -#ifdef HAVE_MADV_HUGEPAGE +#ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u)))) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = nelem * esz - offset; + /** + * Intentionally not checking for errors that may be returned by + * older kernel versions; optimistically tries enabling huge pages. + */ madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE); } #endif diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index eb939f47c..4e229e321 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -462,7 +462,7 @@ WARN_IN_DEALLOC(PyObject* warning, const char * msg) { PyErr_WriteUnraisable(Py_None); } } -}; +} /* array object functions */ @@ -607,7 +607,7 @@ PyArray_DebugPrint(PyArrayObject *obj) * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_SetDatetimeParseFunction(PyObject *op) +PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op)) { } @@ -630,7 +630,7 @@ PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) /*NUMPY_API */ NPY_NO_EXPORT int -PyArray_CompareString(char *s1, char *s2, size_t len) +PyArray_CompareString(const char *s1, const char *s2, size_t len) { const unsigned char *c1 = (unsigned char *)s1; const unsigned char *c2 = (unsigned char *)s2; @@ -1200,15 +1200,28 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op) } } if (res == NULL && !PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, "No fields found."); + /* these dtypes had no fields. Use a MultiIter to broadcast them + * to an output array, and fill with True (for EQ)*/ + PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *) + PyArray_MultiIterNew(2, self, other); + if (mit == NULL) { + return NULL; + } + + res = PyArray_NewFromDescr(&PyArray_Type, + PyArray_DescrFromType(NPY_BOOL), + mit->nd, mit->dimensions, + NULL, NULL, 0, NULL); + Py_DECREF(mit); + if (res) { + PyArray_FILLWBYTE((PyArrayObject *)res, + cmp_op == Py_EQ ? 1 : 0); + } } return res; } else { - /* - * compare as a string. Assumes self and - * other have same descr->type - */ + /* compare as a string. Assumes self and other have same descr->type */ return _strings_richcompare(self, other, cmp_op, 0); } } diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c index 7db467308..aa4e40e66 100644 --- a/numpy/core/src/multiarray/convert.c +++ b/numpy/core/src/multiarray/convert.c @@ -543,35 +543,6 @@ PyArray_AssignZero(PyArrayObject *dst, return retcode; } -/* - * Fills an array with ones. - * - * dst: The destination array. - * wheremask: If non-NULL, a boolean mask specifying where to set the values. - * - * Returns 0 on success, -1 on failure. - */ -NPY_NO_EXPORT int -PyArray_AssignOne(PyArrayObject *dst, - PyArrayObject *wheremask) -{ - npy_bool value; - PyArray_Descr *bool_dtype; - int retcode; - - /* Create a raw bool scalar with the value True */ - bool_dtype = PyArray_DescrFromType(NPY_BOOL); - if (bool_dtype == NULL) { - return -1; - } - value = 1; - - retcode = PyArray_AssignRawScalar(dst, bool_dtype, (char *)&value, - wheremask, NPY_SAFE_CASTING); - - Py_DECREF(bool_dtype); - return retcode; -} /*NUMPY_API * Copy an array. diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 59bfa0d9f..188e68001 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1457,28 +1457,6 @@ _dtype_from_buffer_3118(PyObject *memoryview) } -/* - * Call the python _is_from_ctypes - */ -NPY_NO_EXPORT int -_is_from_ctypes(PyObject *obj) { - PyObject *ret_obj; - static PyObject *py_func = NULL; - - npy_cache_import("numpy.core._internal", "_is_from_ctypes", &py_func); - - if (py_func == NULL) { - return -1; - } - ret_obj = PyObject_CallFunctionObjArgs(py_func, obj, NULL); - if (ret_obj == NULL) { - return -1; - } - - return PyObject_IsTrue(ret_obj); -} - - NPY_NO_EXPORT PyObject * _array_from_buffer_3118(PyObject *memoryview) { @@ -1880,13 +1858,6 @@ PyArray_GetArrayParamsFromObject(PyObject *op, *out_arr = NULL; return 0; } - if (is_object && (requested_dtype != NULL) && - (requested_dtype->type_num != NPY_OBJECT)) { - PyErr_SetString(PyExc_ValueError, - "cannot create an array from unequal-length (ragged) sequences"); - Py_DECREF(*out_dtype); - return -1; - } /* If object arrays are forced */ if (is_object) { Py_DECREF(*out_dtype); @@ -2808,9 +2779,9 @@ PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) Deprecated, use PyArray_NewFromDescr instead. */ NPY_NO_EXPORT PyObject * -PyArray_FromDimsAndDataAndDescr(int nd, int *d, +PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *descr, - char *data) + char *NPY_UNUSED(data)) { PyErr_SetString(PyExc_NotImplementedError, "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr."); @@ -2822,7 +2793,7 @@ PyArray_FromDimsAndDataAndDescr(int nd, int *d, Deprecated, use PyArray_SimpleNew instead. */ NPY_NO_EXPORT PyObject * -PyArray_FromDims(int nd, int *d, int type) +PyArray_FromDims(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type)) { PyErr_SetString(PyExc_NotImplementedError, "PyArray_FromDims: use PyArray_SimpleNew."); @@ -3902,7 +3873,13 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, s = (npy_intp)ts - offset; n = (npy_intp)count; itemsize = type->elsize; - if (n < 0 ) { + if (n < 0) { + if (itemsize == 0) { + PyErr_SetString(PyExc_ValueError, + "cannot determine count if itemsize is 0"); + Py_DECREF(type); + return NULL; + } if (s % itemsize != 0) { PyErr_SetString(PyExc_ValueError, "buffer size must be a multiple"\ @@ -4095,7 +4072,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) } for (i = 0; (i < count || count == -1) && (value = PyIter_Next(iter)); i++) { - if (i >= elcount) { + if (i >= elcount && elsize != 0) { npy_intp nbytes; /* Grow PyArray_DATA(ret): diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 60e6bbae2..82e046ca1 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -386,7 +386,8 @@ convert_datetimestruct_to_datetime(PyArray_DatetimeMetaData *meta, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT npy_datetime -PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) +PyArray_DatetimeStructToDatetime( + NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d)) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_DatetimeStructToDatetime function has " @@ -400,7 +401,8 @@ PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT npy_datetime -PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d) +PyArray_TimedeltaStructToTimedelta( + NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d)) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_TimedeltaStructToTimedelta function has " @@ -600,8 +602,9 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, - npy_datetimestruct *result) +PyArray_DatetimeToDatetimeStruct( + npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), + npy_datetimestruct *result) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_DatetimeToDatetimeStruct function has " @@ -621,8 +624,9 @@ PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr, * TO BE REMOVED - NOT USED INTERNALLY. */ NPY_NO_EXPORT void -PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr, - npy_timedeltastruct *result) +PyArray_TimedeltaToTimedeltaStruct( + npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), + npy_timedeltastruct *result) { PyErr_SetString(PyExc_RuntimeError, "The NumPy PyArray_TimedeltaToTimedeltaStruct function has " @@ -3128,7 +3132,7 @@ is_any_numpy_datetime_or_timedelta(PyObject *obj) */ NPY_NO_EXPORT int convert_pyobjects_to_datetimes(int count, - PyObject **objs, int *type_nums, + PyObject **objs, const int *type_nums, NPY_CASTING casting, npy_int64 *out_values, PyArray_DatetimeMetaData *inout_meta) diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index c04a6c125..cdeb65d0e 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -48,7 +48,7 @@ get_day_of_week(npy_datetime date) */ static int is_holiday(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -88,7 +88,7 @@ is_holiday(npy_datetime date, */ static npy_datetime * find_earliest_holiday_on_or_after(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -127,7 +127,7 @@ find_earliest_holiday_on_or_after(npy_datetime date, */ static npy_datetime * find_earliest_holiday_after(npy_datetime date, - npy_datetime *holidays_begin, npy_datetime *holidays_end) + npy_datetime *holidays_begin, const npy_datetime *holidays_end) { npy_datetime *trial; @@ -159,7 +159,7 @@ static int apply_business_day_roll(npy_datetime date, npy_datetime *out, int *out_day_of_week, NPY_BUSDAY_ROLL roll, - npy_bool *weekmask, + const npy_bool *weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { int day_of_week; @@ -361,7 +361,7 @@ apply_business_day_offset(npy_datetime date, npy_int64 offset, static int apply_business_day_count(npy_datetime date_begin, npy_datetime date_end, npy_int64 *out, - npy_bool *weekmask, int busdays_in_weekmask, + const npy_bool *weekmask, int busdays_in_weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { npy_int64 count, whole_weeks; @@ -722,7 +722,7 @@ finish: */ NPY_NO_EXPORT PyArrayObject * is_business_day(PyArrayObject *dates, PyArrayObject *out, - npy_bool *weekmask, int busdays_in_weekmask, + const npy_bool *weekmask, int busdays_in_weekmask, npy_datetime *holidays_begin, npy_datetime *holidays_end) { PyArray_DatetimeMetaData temp_meta; diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index c7db092e6..734255a9d 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -149,7 +149,7 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args) arg == '|' || arg == '=') static int -_check_for_commastring(char *type, Py_ssize_t len) +_check_for_commastring(const char *type, Py_ssize_t len) { Py_ssize_t i; int sqbracket; @@ -3277,7 +3277,7 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) } static int -descr_nonzero(PyObject *self) +descr_nonzero(PyObject *NPY_UNUSED(self)) { /* `bool(np.dtype(...)) == True` for all dtypes. Needed to override default * nonzero implementation, which checks if `len(object) > 0`. */ diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index a90416a40..ef0dd4a01 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -3337,7 +3337,7 @@ get_decsrcref_transfer_function(int aligned, /* If there are subarrays, need to wrap it */ else if (PyDataType_HASSUBARRAY(src_dtype)) { PyArray_Dims src_shape = {NULL, -1}; - npy_intp src_size = 1; + npy_intp src_size; PyArray_StridedUnaryOp *stransfer; NpyAuxData *data; diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index bed92403f..116e37ce5 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -190,7 +190,7 @@ array_strides_set(PyArrayObject *self, PyObject *obj) static PyObject * -array_priority_get(PyArrayObject *self) +array_priority_get(PyArrayObject *NPY_UNUSED(self)) { return PyFloat_FromDouble(NPY_PRIORITY); } diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 762563eb5..c49cf67e6 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1456,8 +1456,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis) /* Now we can check the axis */ nd = PyArray_NDIM(mps[0]); - if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) { - /* single element case */ + if ((nd == 0) || (PyArray_SIZE(mps[0]) <= 1)) { + /* empty/single element case */ ret = (PyArrayObject *)PyArray_NewFromDescr( &PyArray_Type, PyArray_DescrFromType(NPY_INTP), PyArray_NDIM(mps[0]), PyArray_DIMS(mps[0]), NULL, NULL, @@ -1466,7 +1466,9 @@ PyArray_LexSort(PyObject *sort_keys, int axis) if (ret == NULL) { goto fail; } - *((npy_intp *)(PyArray_DATA(ret))) = 0; + if (PyArray_SIZE(mps[0]) > 0) { + *((npy_intp *)(PyArray_DATA(ret))) = 0; + } goto finish; } if (check_and_adjust_axis(&axis, nd) < 0) { @@ -1516,19 +1518,28 @@ PyArray_LexSort(PyObject *sort_keys, int axis) char *valbuffer, *indbuffer; int *swaps; - if (N == 0 || maxelsize == 0 || sizeof(npy_intp) == 0) { - goto fail; + assert(N > 0); /* Guaranteed and assumed by indbuffer */ + npy_intp valbufsize = N * maxelsize; + if (NPY_UNLIKELY(valbufsize) == 0) { + valbufsize = 1; /* Ensure allocation is not empty */ } - valbuffer = PyDataMem_NEW(N * maxelsize); + + valbuffer = PyDataMem_NEW(valbufsize); if (valbuffer == NULL) { goto fail; } indbuffer = PyDataMem_NEW(N * sizeof(npy_intp)); if (indbuffer == NULL) { + PyDataMem_FREE(valbuffer); + goto fail; + } + swaps = malloc(NPY_LIKELY(n > 0) ? n * sizeof(int) : 1); + if (swaps == NULL) { + PyDataMem_FREE(valbuffer); PyDataMem_FREE(indbuffer); goto fail; } - swaps = malloc(n*sizeof(int)); + for (j = 0; j < n; j++) { swaps[j] = PyArray_ISBYTESWAPPED(mps[j]); } @@ -1557,8 +1568,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis) #else if (rcode < 0) { #endif - npy_free_cache(valbuffer, N * maxelsize); - npy_free_cache(indbuffer, N * sizeof(npy_intp)); + PyDataMem_FREE(valbuffer); + PyDataMem_FREE(indbuffer); free(swaps); goto fail; } @@ -2464,7 +2475,7 @@ finish: * array of values, which must be of length PyArray_NDIM(self). */ NPY_NO_EXPORT PyObject * -PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index) +PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index) { int idim, ndim = PyArray_NDIM(self); char *data = PyArray_DATA(self); @@ -2492,7 +2503,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index) * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index, +PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, PyObject *obj) { int idim, ndim = PyArray_NDIM(self); diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h index 90bb5100d..2276b4db7 100644 --- a/numpy/core/src/multiarray/item_selection.h +++ b/numpy/core/src/multiarray/item_selection.h @@ -15,7 +15,7 @@ count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides); * array of values, which must be of length PyArray_NDIM(self). */ NPY_NO_EXPORT PyObject * -PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index); +PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index); /* * Sets a single item in the array, based on a single multi-index @@ -24,7 +24,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index); * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index, +PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, PyObject *obj); #endif diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 0d7679fe7..e66bb36aa 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -98,7 +98,7 @@ parse_index_entry(PyObject *op, npy_intp *step_size, /* get the dataptr from its current coordinates for simple iterator */ static char* -get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates) +get_ptr_simple(PyArrayIterObject* iter, const npy_intp *coordinates) { npy_intp i; char *ret; @@ -840,7 +840,6 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { goto finish; } - retval = 0; PyArray_ITER_GOTO1D(self, start); retval = type->f->setitem(val, self->dataptr, self->ao); PyArray_ITER_RESET(self); @@ -1666,7 +1665,7 @@ static char* _set_constant(PyArrayNeighborhoodIterObject* iter, /* set the dataptr from its current coordinates */ static char* -get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_constant(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS]; @@ -1721,7 +1720,7 @@ __npy_pos_remainder(npy_intp i, npy_intp n) /* set the dataptr from its current coordinates */ static char* -get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_mirror(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS], lb; @@ -1755,7 +1754,7 @@ __npy_euclidean_division(npy_intp i, npy_intp n) _coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]); static char* -get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) +get_ptr_circular(PyArrayIterObject* _iter, const npy_intp *coordinates) { int i; npy_intp bd, _coordinates[NPY_MAXDIMS], lb; @@ -1777,7 +1776,7 @@ get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates) * A Neighborhood Iterator object. */ NPY_NO_EXPORT PyObject* -PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds, +PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds, int mode, PyArrayObject* fill) { int i; diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 9bb85e320..247864775 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -176,7 +176,7 @@ unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n) /* Unpack a single scalar index, taking a new reference to match unpack_tuple */ static NPY_INLINE npy_intp -unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n) +unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n)) { Py_INCREF(index); result[0] = index; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 79c60aa2e..e5845f2f6 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1051,7 +1051,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) NPY_NO_EXPORT PyObject * -array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_ufunc(PyArrayObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { PyObject *ufunc, *method_name, *normal_args, *ufunc_method; PyObject *result = NULL; @@ -1100,7 +1100,7 @@ cleanup: } static PyObject * -array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds) +array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kwds) { PyObject *func, *types, *args, *kwargs, *result; static char *kwlist[] = {"func", "types", "args", "kwargs", NULL}; @@ -1179,7 +1179,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER); + ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER); npy_free_cache_dim_obj(newshape); if (ret == NULL) { return NULL; @@ -1732,7 +1732,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args)) } static PyObject * -array_reduce_ex_regular(PyArrayObject *self, int protocol) +array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol)) { PyObject *subclass_array_reduce = NULL; PyObject *ret; @@ -1861,7 +1861,7 @@ array_reduce_ex(PyArrayObject *self, PyObject *args) PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) && ((PyObject*)self)->ob_type != &PyArray_Type) || - PyDataType_ISUNSIZED(descr)) { + descr->elsize == 0) { /* The PickleBuffer class from version 5 of the pickle protocol * can only be used for arrays backed by a contiguous data buffer. * For all other cases we fallback to the generic array_reduce diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index bef978c94..441567049 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -286,7 +286,8 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, * Convert to a 1D C-array */ NPY_NO_EXPORT int -PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) +PyArray_As1D(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), + int *NPY_UNUSED(d1), int NPY_UNUSED(typecode)) { /* 2008-07-14, 1.5 */ PyErr_SetString(PyExc_NotImplementedError, @@ -298,7 +299,8 @@ PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) * Convert to a 2D C-array */ NPY_NO_EXPORT int -PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) +PyArray_As2D(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), + int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode)) { /* 2008-07-14, 1.5 */ PyErr_SetString(PyExc_NotImplementedError, @@ -1560,7 +1562,8 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) PyArrayObject *oparr = NULL, *ret = NULL; npy_bool subok = NPY_FALSE; npy_bool copy = NPY_TRUE; - int ndmin = 0, nd; + int nd; + npy_intp ndmin = 0; PyArray_Descr *type = NULL; PyArray_Descr *oldtype = NULL; NPY_ORDER order = NPY_KEEPORDER; @@ -1631,12 +1634,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) } } - /* copy=False with default dtype, order and ndim */ - if (STRIDING_OK(oparr, order)) { - ret = oparr; - Py_INCREF(ret); - goto finish; - } + /* copy=False with default dtype, order (any is OK) and ndim */ + ret = oparr; + Py_INCREF(ret); + goto finish; } } @@ -3781,7 +3782,7 @@ _vec_string_no_args(PyArrayObject* char_array, } static PyObject * -_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyArrayObject* char_array = NULL; PyArray_Descr *type; diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 18ca127e1..db0bfcece 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -1628,15 +1628,12 @@ npyiter_coalesce_axes(NpyIter *iter) npy_intp istrides, nstrides = NAD_NSTRIDES(); NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - NpyIter_AxisData *ad_compress; + NpyIter_AxisData *ad_compress = axisdata; npy_intp new_ndim = 1; /* The HASMULTIINDEX or IDENTPERM flags do not apply after coalescing */ NIT_ITFLAGS(iter) &= ~(NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_HASMULTIINDEX); - axisdata = NIT_AXISDATA(iter); - ad_compress = axisdata; - for (idim = 0; idim < ndim-1; ++idim) { int can_coalesce = 1; npy_intp shape0 = NAD_SHAPE(ad_compress); diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 3b3635afe..d40836dc2 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -24,7 +24,7 @@ static int npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags); static int npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape); + const npy_intp *itershape); static int npyiter_calculate_ndim(int nop, PyArrayObject **op_in, int oa_ndim); @@ -55,7 +55,7 @@ npyiter_check_casting(int nop, PyArrayObject **op, static int npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags, char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, + const npy_uint32 *op_flags, int **op_axes, npy_intp *itershape); static void npyiter_replace_axisdata(NpyIter *iter, int iop, @@ -74,23 +74,23 @@ static void npyiter_find_best_axis_ordering(NpyIter *iter); static PyArray_Descr * npyiter_get_common_dtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, + const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, PyArray_Descr **op_request_dtypes, int only_inputs); static PyArrayObject * npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, npy_uint32 flags, npyiter_opitflags *op_itflags, int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes); + PyArray_Descr *op_dtype, const int *op_axes); static int npyiter_allocate_arrays(NpyIter *iter, npy_uint32 flags, PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, npyiter_opitflags *op_itflags, + const npy_uint32 *op_flags, npyiter_opitflags *op_itflags, int **op_axes); static void npyiter_get_priority_subtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, + const npyiter_opitflags *op_itflags, double *subtype_priority, PyTypeObject **subtype); static int npyiter_allocate_transfer_functions(NpyIter *iter); @@ -787,7 +787,7 @@ npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags) static int npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes, - npy_intp *itershape) + const npy_intp *itershape) { char axes_dupcheck[NPY_MAXDIMS]; int iop, idim; @@ -1423,7 +1423,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop) static int npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags, char **op_dataptr, - npy_uint32 *op_flags, int **op_axes, + const npy_uint32 *op_flags, int **op_axes, npy_intp *itershape) { npy_uint32 itflags = NIT_ITFLAGS(iter); @@ -2409,7 +2409,7 @@ npyiter_find_best_axis_ordering(NpyIter *iter) */ static PyArray_Descr * npyiter_get_common_dtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, + const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype, PyArray_Descr **op_request_dtypes, int only_inputs) { @@ -2477,7 +2477,7 @@ static PyArrayObject * npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype, npy_uint32 flags, npyiter_opitflags *op_itflags, int op_ndim, npy_intp *shape, - PyArray_Descr *op_dtype, int *op_axes) + PyArray_Descr *op_dtype, const int *op_axes) { npy_uint32 itflags = NIT_ITFLAGS(iter); int idim, ndim = NIT_NDIM(iter); @@ -2706,7 +2706,7 @@ static int npyiter_allocate_arrays(NpyIter *iter, npy_uint32 flags, PyArray_Descr **op_dtype, PyTypeObject *subtype, - npy_uint32 *op_flags, npyiter_opitflags *op_itflags, + const npy_uint32 *op_flags, npyiter_opitflags *op_itflags, int **op_axes) { npy_uint32 itflags = NIT_ITFLAGS(iter); @@ -3109,7 +3109,7 @@ npyiter_allocate_arrays(NpyIter *iter, */ static void npyiter_get_priority_subtype(int nop, PyArrayObject **op, - npyiter_opitflags *op_itflags, + const npyiter_opitflags *op_itflags, double *subtype_priority, PyTypeObject **subtype) { diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index ffea08bb3..4b9d41aa4 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -82,7 +82,8 @@ static int npyiter_cache_values(NewNpyArrayIterObject *self) } static PyObject * -npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) +npyiter_new(PyTypeObject *subtype, PyObject *NPY_UNUSED(args), + PyObject *NPY_UNUSED(kwds)) { NewNpyArrayIterObject *self; @@ -535,7 +536,7 @@ try_single_dtype: } static int -npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop, +npyiter_convert_op_axes(PyObject *op_axes_in, int nop, int **op_axes, int *oa_ndim) { PyObject *a; @@ -2365,7 +2366,7 @@ npyiter_close(NewNpyArrayIterObject *self) } static PyObject * -npyiter_exit(NewNpyArrayIterObject *self, PyObject *args) +npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args)) { /* even if called via exception handling, writeback any data */ return npyiter_close(self); diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c index 0ceb994ef..dabc866ff 100644 --- a/numpy/core/src/multiarray/number.c +++ b/numpy/core/src/multiarray/number.c @@ -391,7 +391,8 @@ array_matrix_multiply(PyArrayObject *m1, PyObject *m2) } static PyObject * -array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2) +array_inplace_matrix_multiply( + PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2)) { PyErr_SetString(PyExc_TypeError, "In-place matrix multiplication is not (yet) supported. " diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 34839b866..9adca6773 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -4492,6 +4492,36 @@ initialize_numeric_types(void) PyArrayIter_Type.tp_iter = PyObject_SelfIter; PyArrayMapIter_Type.tp_iter = PyObject_SelfIter; + + /* + * Give types different names when they are the same size (gh-9799). + * `np.intX` always refers to the first int of that size in the sequence + * `['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']`. + */ +#if (NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT) + PyByteArrType_Type.tp_name = "numpy.byte"; + PyUByteArrType_Type.tp_name = "numpy.ubyte"; +#endif +#if (NPY_SIZEOF_SHORT == NPY_SIZEOF_INT) + PyShortArrType_Type.tp_name = "numpy.short"; + PyUShortArrType_Type.tp_name = "numpy.ushort"; +#endif +#if (NPY_SIZEOF_INT == NPY_SIZEOF_LONG) + PyIntArrType_Type.tp_name = "numpy.intc"; + PyUIntArrType_Type.tp_name = "numpy.uintc"; +#endif +#if (NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG) + PyLongLongArrType_Type.tp_name = "numpy.longlong"; + PyULongLongArrType_Type.tp_name = "numpy.ulonglong"; +#endif + + /* + Do the same for longdouble + */ +#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE) + PyLongDoubleArrType_Type.tp_name = "numpy.longdouble"; + PyCLongDoubleArrType_Type.tp_name = "numpy.clongdouble"; +#endif } typedef struct { diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 30820737e..4e31f003b 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -26,7 +26,7 @@ static int _fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr); static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, +_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); static void @@ -40,11 +40,11 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype); */ NPY_NO_EXPORT PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER order) + NPY_ORDER NPY_UNUSED(order)) { npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; - int new_nd=newshape->len, k, n, elsize; + int new_nd=newshape->len, k, elsize; int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; @@ -136,8 +136,8 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, PyObject *zero = PyInt_FromLong(0); char *optr; optr = PyArray_BYTES(self) + oldnbytes; - n = newsize - oldsize; - for (k = 0; k < n; k++) { + npy_intp n_new = newsize - oldsize; + for (npy_intp i = 0; i < n_new; i++) { _putzero((char *)optr, zero, PyArray_DESCR(self)); optr += elsize; } @@ -361,7 +361,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) * stride of the next-fastest index. */ static int -_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, +_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order) { int oldnd; @@ -766,7 +766,7 @@ static int _npy_stride_sort_item_comparator(const void *a, const void *b) * [(2, 12), (0, 4), (1, -2)]. */ NPY_NO_EXPORT void -PyArray_CreateSortedStridePerm(int ndim, npy_intp *strides, +PyArray_CreateSortedStridePerm(int ndim, npy_intp const *strides, npy_stride_sort_item *out_strideperm) { int i; @@ -1048,7 +1048,7 @@ build_shape_string(npy_intp n, npy_intp *vals) * from a reduction result once its computation is complete. */ NPY_NO_EXPORT void -PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags) +PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags) { PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; npy_intp *shape = fa->dimensions, *strides = fa->strides; diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src index c90b06974..72887d7e4 100644 --- a/numpy/core/src/npysort/radixsort.c.src +++ b/numpy/core/src/npysort/radixsort.c.src @@ -198,9 +198,9 @@ aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED( return 0; } - k1 = KEY_OF(arr[0]); + k1 = KEY_OF(arr[tosort[0]]); for (npy_intp i = 1; i < num; i++) { - k2 = KEY_OF(arr[i]); + k2 = KEY_OF(arr[tosort[i]]); if (k1 > k2) { all_sorted = 0; break; diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src index 9e74845df..615e395c7 100644 --- a/numpy/core/src/umath/_rational_tests.c.src +++ b/numpy/core/src/umath/_rational_tests.c.src @@ -539,11 +539,11 @@ static PyObject* pyrational_str(PyObject* self) { rational x = ((PyRational*)self)->r; if (d(x)!=1) { - return PyString_FromFormat( + return PyUString_FromFormat( "%ld/%ld",(long)x.n,(long)d(x)); } else { - return PyString_FromFormat( + return PyUString_FromFormat( "%ld",(long)x.n); } } diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src index 480c0c72f..b5204eca5 100644 --- a/numpy/core/src/umath/matmul.c.src +++ b/numpy/core/src/umath/matmul.c.src @@ -196,16 +196,14 @@ NPY_NO_EXPORT void * FLOAT, DOUBLE, HALF, * CFLOAT, CDOUBLE, CLONGDOUBLE, * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * BOOL# + * BYTE, SHORT, INT, LONG, LONGLONG# * #typ = npy_longdouble, * npy_float,npy_double,npy_half, * npy_cfloat, npy_cdouble, npy_clongdouble, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, - * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_bool# - * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11# - * #IS_HALF = 0, 0, 0, 1, 0*14# + * npy_byte, npy_short, npy_int, npy_long, npy_longlong# + * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*10# + * #IS_HALF = 0, 0, 0, 1, 0*13# */ NPY_NO_EXPORT void @@ -266,7 +264,44 @@ NPY_NO_EXPORT void } /**end repeat**/ +NPY_NO_EXPORT void +BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, + void *_ip2, npy_intp is2_n, npy_intp is2_p, + void *_op, npy_intp os_m, npy_intp os_p, + npy_intp dm, npy_intp dn, npy_intp dp) + +{ + npy_intp m, n, p; + npy_intp ib2_p, ob_p; + char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op; + ib2_p = is2_p * dp; + ob_p = os_p * dp; + + for (m = 0; m < dm; m++) { + for (p = 0; p < dp; p++) { + char *ip1tmp = ip1; + char *ip2tmp = ip2; + *(npy_bool *)op = NPY_FALSE; + for (n = 0; n < dn; n++) { + npy_bool val1 = (*(npy_bool *)ip1tmp); + npy_bool val2 = (*(npy_bool *)ip2tmp); + if (val1 != 0 && val2 != 0) { + *(npy_bool *)op = NPY_TRUE; + break; + } + ip2tmp += is2_n; + ip1tmp += is1_n; + } + op += os_p; + ip2 += is2_p; + } + op -= ob_p; + ip2 -= ib2_p; + ip1 += is1_m; + op += os_m; + } +} NPY_NO_EXPORT void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 8ae2f65e0..4ce8d8ab7 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -36,7 +36,7 @@ * If 'dtype' isn't NULL, this function steals its reference. */ static PyArrayObject * -allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags, +allocate_reduce_result(PyArrayObject *arr, const npy_bool *axis_flags, PyArray_Descr *dtype, int subok) { npy_intp strides[NPY_MAXDIMS], stride; @@ -84,7 +84,7 @@ allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags, * The return value is a view into 'out'. */ static PyArrayObject * -conform_reduce_result(int ndim, npy_bool *axis_flags, +conform_reduce_result(int ndim, const npy_bool *axis_flags, PyArrayObject *out, int keepdims, const char *funcname, int need_copy) { @@ -251,7 +251,7 @@ PyArray_CreateReduceResult(PyArrayObject *operand, PyArrayObject *out, * Count the number of dimensions selected in 'axis_flags' */ static int -count_axes(int ndim, npy_bool *axis_flags) +count_axes(int ndim, const npy_bool *axis_flags) { int idim; int naxes = 0; @@ -299,7 +299,7 @@ count_axes(int ndim, npy_bool *axis_flags) NPY_NO_EXPORT PyArrayObject * PyArray_InitializeReduceResult( PyArrayObject *result, PyArrayObject *operand, - npy_bool *axis_flags, + const npy_bool *axis_flags, npy_intp *out_skip_first_count, const char *funcname) { npy_intp *strides, *shape, shape_orig[NPY_MAXDIMS]; diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 7aec1ff49..88e5e1f1b 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -1017,7 +1017,7 @@ sse2_sqrt_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n) LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) { op[i] = @scalarf@(ip[i]); } - assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || + assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || npy_is_aligned(&op[i], VECTOR_SIZE_BYTES)); if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) { LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { @@ -1069,7 +1069,7 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n) LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) { op[i] = @scalar@_@type@(ip[i]); } - assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || + assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) || npy_is_aligned(&op[i], VECTOR_SIZE_BYTES)); if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) { LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { @@ -1104,7 +1104,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) /* Order of operations important for MSVC 2015 */ *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } - assert(n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); + assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); if (i + 3 * stride <= n) { /* load the first elements */ @vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]); diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 5f9a0f7f4..c36680ed2 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -908,7 +908,7 @@ parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, . typedef int converter(PyObject *, void *); while (PyDict_Next(kwds, &pos, &key, &value)) { - int i; + npy_intp i; converter *convert; void *output = NULL; npy_intp index = locate_key(kwnames, key); @@ -2297,7 +2297,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Returns 0 on success, and -1 on failure */ static int -_parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis, +_parse_axis_arg(PyUFuncObject *ufunc, const int core_num_dims[], PyObject *axis, PyArrayObject **op, int broadcast_ndim, int **remap_axis) { int nop = ufunc->nargs; int iop, axis_int; @@ -2368,7 +2368,7 @@ _parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis, */ static int _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op, - int *op_core_num_dims, npy_uint32 *core_dim_flags, + const int *op_core_num_dims, npy_uint32 *core_dim_flags, npy_intp *core_dim_sizes, int **remap_axis) { int i; int nin = ufunc->nin; @@ -4053,14 +4053,14 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1], op_axes_arrays[2]}; npy_uint32 op_flags[3]; - int i, idim, ndim, otype_final; + int idim, ndim, otype_final; int need_outer_iterator = 0; NpyIter *iter = NULL; /* The reduceat indices - ind must be validated outside this call */ npy_intp *reduceat_ind; - npy_intp ind_size, red_axis_size; + npy_intp i, ind_size, red_axis_size; /* The selected inner loop */ PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; @@ -4146,7 +4146,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, #endif /* Set up the op_axes for the outer loop */ - for (i = 0, idim = 0; idim < ndim; ++idim) { + for (idim = 0; idim < ndim; ++idim) { /* Use the i-th iteration dimension to match up ind */ if (idim == axis) { op_axes_arrays[0][idim] = axis; @@ -4866,7 +4866,7 @@ ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func, PyUFuncGenericFunction newfunc, - int *signature, + const int *signature, PyUFuncGenericFunction *oldfunc) { int i, j; @@ -4921,7 +4921,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi char *types, int ntypes, int nin, int nout, int identity, const char *name, const char *doc, - int unused, const char *signature, + const int unused, const char *signature, PyObject *identity_value) { PyUFuncObject *ufunc; @@ -5223,7 +5223,7 @@ NPY_NO_EXPORT int PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int usertype, PyUFuncGenericFunction function, - int *arg_types, + const int *arg_types, void *data) { PyArray_Descr *descr; diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py new file mode 100644 index 000000000..494b51f34 --- /dev/null +++ b/numpy/core/tests/test__exceptions.py @@ -0,0 +1,42 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" +import numpy as np + +_ArrayMemoryError = np.core._exceptions._ArrayMemoryError + +class TestArrayMemoryError: + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki+1) == '1.00 KiB' + assert f(10*Ki) == '10.0 KiB' + assert f(int(999.4*Ki)) == '999. KiB' + assert f(int(1023.4*Ki)) == '1023. KiB' + assert f(int(1023.5*Ki)) == '1.00 MiB' + assert f(Ki*Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' + assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefices isn't going to help + # anyway. + assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 75a794369..702e68e76 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -262,11 +262,6 @@ class TestArray2String(object): assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == '[abcabc defdef]') - # check for backcompat that using FloatFormat works and emits warning - with assert_warns(DeprecationWarning): - fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False) - assert_equal(np.array2string(x, formatter={'float_kind': fmt}), - '[0. 1. 2.]') def test_structure_format(self): dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index f60eab696..d2fbbae5b 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -419,6 +419,31 @@ class TestRecord(object): assert_raises(ValueError, np.dtype, {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)}) + def test_fieldless_views(self): + a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[], + 'itemsize':8}) + assert_raises(ValueError, a.view, np.dtype([])) + + d = np.dtype((np.dtype([]), 10)) + assert_equal(d.shape, (10,)) + assert_equal(d.itemsize, 0) + assert_equal(d.base, np.dtype([])) + + arr = np.fromiter((() for i in range(10)), []) + assert_equal(arr.dtype, np.dtype([])) + assert_raises(ValueError, np.frombuffer, b'', dtype=[]) + assert_equal(np.frombuffer(b'', dtype=[], count=2), + np.empty(2, dtype=[])) + + assert_raises(ValueError, np.dtype, ([], 'f8')) + assert_raises(ValueError, np.zeros(1, dtype='i4').view, []) + + assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]), + np.ones(2, dtype=bool)) + + assert_equal(np.zeros((1, 2), dtype=[]) == a, + np.ones((1, 2), dtype=bool)) + class TestSubarray(object): def test_single_subarray(self): @@ -938,13 +963,6 @@ class TestDtypeAttributes(object): new_dtype = np.dtype(dtype.descr) assert_equal(new_dtype.itemsize, 16) - @pytest.mark.parametrize('t', np.typeDict.values()) - def test_name_builtin(self, t): - name = t.__name__ - if name.endswith('_'): - name = name[:-1] - assert_equal(np.dtype(t).name, name) - def test_name_dtype_subclass(self): # Ticket #4357 class user_def_subcls(np.void): diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 0a61a74cf..58572f268 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -44,7 +44,7 @@ from numpy.testing import ( assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, - temppath, suppress_warnings, break_cycles, assert_raises_regex, + temppath, suppress_warnings, break_cycles, ) from numpy.core.tests._locales import CommaDecimalPointLocale @@ -497,9 +497,6 @@ class TestArrayConstruction(object): assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) - def test_ragged(self): - assert_raises_regex(ValueError, 'ragged', - np.array, [[1], [2, 3]], dtype=int) class TestAssignment(object): def test_assignment_broadcasting(self): @@ -4590,18 +4587,26 @@ class TestTake(object): assert_equal(y, np.array([1, 2, 3])) class TestLexsort(object): - def test_basic(self): - a = [1, 2, 1, 3, 1, 5] - b = [0, 4, 5, 6, 2, 3] + @pytest.mark.parametrize('dtype',[ + np.uint8, np.uint16, np.uint32, np.uint64, + np.int8, np.int16, np.int32, np.int64, + np.float16, np.float32, np.float64 + ]) + def test_basic(self, dtype): + a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype) + b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype) idx = np.lexsort((b, a)) expected_idx = np.array([0, 4, 2, 1, 3, 5]) assert_array_equal(idx, expected_idx) + assert_array_equal(a[idx], np.sort(a)) - x = np.vstack((b, a)) - idx = np.lexsort(x) - assert_array_equal(idx, expected_idx) + def test_mixed(self): + a = np.array([1, 2, 1, 3, 1, 5]) + b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]') - assert_array_equal(x[1][idx], np.sort(x[1])) + idx = np.lexsort((b, a)) + expected_idx = np.array([0, 4, 2, 1, 3, 5]) + assert_array_equal(idx, expected_idx) def test_datetime(self): a = np.array([0,0,0], dtype='datetime64[D]') @@ -6272,6 +6277,23 @@ class TestMatmul(MatmulCommon): with assert_raises(TypeError): b = np.matmul(a, a) + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0],[1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4*5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) if sys.version_info[:2] >= (3, 5): diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index c479a0f6d..1358b45e9 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2583,6 +2583,30 @@ class TestConvolve(object): class TestArgwhere(object): + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,)*nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + def test_2D(self): x = np.arange(6).reshape((2, 3)) assert_array_equal(np.argwhere(x > 1), diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py index d0ff5578a..387740e35 100644 --- a/numpy/core/tests/test_numerictypes.py +++ b/numpy/core/tests/test_numerictypes.py @@ -498,3 +498,32 @@ class TestDocStrings(object): assert_('int64' in np.int_.__doc__) elif np.int64 is np.longlong: assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.int_, np.longlong, + np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 14413224e..c1b794145 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -444,6 +444,48 @@ class TestRecord(object): ] arr = np.rec.fromarrays(arrays) # ValueError? + @pytest.mark.parametrize('nfields', [0, 1, 2]) + def test_assign_dtype_attribute(self, nfields): + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields]) + data = np.zeros(3, dt).view(np.recarray) + + # the original and resulting dtypes differ on whether they are records + assert data.dtype.type == np.record + assert dt.type != np.record + + # ensure that the dtype remains a record even when assigned + data.dtype = dt + assert data.dtype.type == np.record + + @pytest.mark.parametrize('nfields', [0, 1, 2]) + def test_nested_fields_are_records(self, nfields): + """ Test that nested structured types are treated as records too """ + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields]) + dt_outer = np.dtype([('inner', dt)]) + + data = np.zeros(3, dt_outer).view(np.recarray) + assert isinstance(data, np.recarray) + assert isinstance(data['inner'], np.recarray) + + data0 = data[0] + assert isinstance(data0, np.record) + assert isinstance(data0['inner'], np.record) + + def test_nested_dtype_padding(self): + """ test that trailing padding is preserved """ + # construct a dtype with padding at the end + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)]) + dt_padded_end = dt[['a', 'b']] + assert dt_padded_end.itemsize == dt.itemsize + + dt_outer = np.dtype([('inner', dt_padded_end)]) + + data = np.zeros(3, dt_outer).view(np.recarray) + assert_equal(data['inner'].dtype, dt_padded_end) + + data0 = data[0] + assert_equal(data0['inner'].dtype, dt_padded_end) + def test_find_duplicate(): l1 = [1, 2, 3, 4, 5, 6] diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index ca5b82e6f..9dc231deb 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -436,6 +436,32 @@ class TestRegression(object): assert_raises(KeyError, np.lexsort, BuggySequence()) + def test_lexsort_zerolen_custom_strides(self): + # Ticket #14228 + xs = np.array([], dtype='i8') + assert xs.strides == (8,) + assert np.lexsort((xs,)).shape[0] == 0 # Works + + xs.strides = (16,) + assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError + + def test_lexsort_zerolen_custom_strides_2d(self): + xs = np.array([], dtype='i8') + + xs.shape = (0, 2) + xs.strides = (16, 16) + assert np.lexsort((xs,), axis=0).shape[0] == 0 + + xs.shape = (2, 0) + xs.strides = (16, 16) + assert np.lexsort((xs,), axis=0).shape[0] == 2 + + def test_lexsort_zerolen_element(self): + dt = np.dtype([]) # a void dtype with no fields + xs = np.empty(4, dt) + + assert np.lexsort((xs,)).shape[0] == xs.shape[0] + def test_pickle_py2_bytes_encoding(self): # Check that arrays and scalars pickled on Py2 are # unpickleable on Py3 using encoding='bytes' @@ -468,7 +494,7 @@ class TestRegression(object): result = pickle.loads(data, encoding='bytes') assert_equal(result, original) - if isinstance(result, np.ndarray) and result.dtype.names: + if isinstance(result, np.ndarray) and result.dtype.names is not None: for name in result.dtype.names: assert_(isinstance(name, str)) @@ -2475,3 +2501,13 @@ class TestRegression(object): t = T() #gh-13659, would raise in broadcasting [x=t for x in result] np.array([t]) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8), + reason='overflows on windows, fixed in bpo-16865') + def test_to_ctypes(self): + #gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 1f842d003..58f3ef9d3 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -92,11 +92,11 @@ else: # Adapted from Albert Strasheim def load_library(libname, loader_path): """ - It is possible to load a library using + It is possible to load a library using >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP But there are cross-platform considerations, such as library file extensions, - plus the fact Windows will just load the first library it finds with that name. + plus the fact Windows will just load the first library it finds with that name. NumPy supplies the load_library function as a convenience. Parameters @@ -110,12 +110,12 @@ else: Returns ------- ctypes.cdll[libpath] : library object - A ctypes library object + A ctypes library object Raises ------ OSError - If there is no library with the expected extension, or the + If there is no library with the expected extension, or the library is defective and cannot be loaded. """ if ctypes.__version__ < '1.0.1': @@ -321,7 +321,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): # produce a name for the new type if dtype is None: name = 'any' - elif dtype.names: + elif dtype.names is not None: name = str(id(dtype)) else: name = dtype.str @@ -535,7 +535,10 @@ if ctypes is not None: if readonly: raise TypeError("readonly arrays unsupported") - dtype = _dtype((ai["typestr"], ai["shape"])) - result = as_ctypes_type(dtype).from_address(addr) + # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows + # dtype.itemsize (gh-14214) + ctype_scalar = as_ctypes_type(ai["typestr"]) + result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) + result = result_type.from_address(addr) result.__keep = obj return result diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py index 3d7101582..b3e18b204 100644 --- a/numpy/distutils/command/build.py +++ b/numpy/distutils/command/build.py @@ -38,7 +38,7 @@ class build(old_build): raise ValueError("--parallel/-j argument must be an integer") build_scripts = self.build_scripts old_build.finalize_options(self) - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) if build_scripts is None: self.build_scripts = os.path.join(self.build_base, 'scripts' + plat_specifier) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index 41bb01da5..e183b2090 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -90,7 +90,7 @@ class build_src(build_ext.build_ext): self.data_files = self.distribution.data_files or [] if self.build_src is None: - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) # py_modules_dict is used in build_py.find_package_modules diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py index 73a5e98e1..bb362d483 100644 --- a/numpy/distutils/fcompiler/environment.py +++ b/numpy/distutils/fcompiler/environment.py @@ -59,17 +59,13 @@ class EnvironmentConfig(object): if envvar_contents is not None: envvar_contents = convert(envvar_contents) if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1': + if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': var.extend(envvar_contents) else: + # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 + # to keep old (overwrite flags rather than append to + # them) behavior var = envvar_contents - if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys(): - msg = "{} is used as is, not appended ".format(envvar) + \ - "to flags already defined " + \ - "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \ - "to obtain appending behavior instead (this " + \ - "behavior will become default in a future release)." - warnings.warn(msg, UserWarning, stacklevel=3) else: var = envvar_contents if confvar is not None and self._conf: diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 89171eede..1e10e92fd 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -859,7 +859,7 @@ class Configuration(object): print(message) def warn(self, message): - sys.stderr.write('Warning: %s' % (message,)) + sys.stderr.write('Warning: %s\n' % (message,)) def set_options(self, **options): """ diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py index ba19a97ea..6d245fbd4 100644 --- a/numpy/distutils/tests/test_fcompiler.py +++ b/numpy/distutils/tests/test_fcompiler.py @@ -45,37 +45,3 @@ def test_fcompiler_flags(monkeypatch): else: assert_(new_flags == prev_flags + [new_flag]) - -def test_fcompiler_flags_append_warning(monkeypatch): - # Test to check that the warning for append behavior changing in future - # is triggered. Need to use a real compiler instance so that we have - # non-empty flags to start with (otherwise the "if var and append" check - # will always be false). - try: - with suppress_warnings() as sup: - sup.record() - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - fc.customize() - except numpy.distutils.fcompiler.CompilerNotFound: - pytest.skip("gfortran not found, so can't execute this test") - - # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined - monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - with suppress_warnings() as sup: - sup.record() - prev_flags = getattr(fc.flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - with suppress_warnings() as sup: - sup.record() - new_flags = getattr(fc.flag_vars, opt) - if prev_flags: - # Check that warning was issued - assert len(sup.log) == 1 - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py index f7bd2515b..cb548a0d0 100644 --- a/numpy/doc/broadcasting.py +++ b/numpy/doc/broadcasting.py @@ -61,8 +61,7 @@ dimensions are compatible when If these conditions are not met, a ``ValueError: operands could not be broadcast together`` exception is thrown, indicating that the arrays have incompatible shapes. The size of -the resulting array is the maximum size along each dimension of the input -arrays. +the resulting array is the size that is not 1 along each axis of the inputs. Arrays do not need to have the same *number* of dimensions. For example, if you have a ``256x256x3`` array of RGB values, and you want to scale diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py index 8db607131..c9029941b 100644 --- a/numpy/doc/dispatch.py +++ b/numpy/doc/dispatch.py @@ -223,7 +223,7 @@ calls ``numpy.sum(self)``, and the same for ``mean``. ... return arr._i * arr._N ... >>> @implements(np.mean) -... def sum(arr): +... def mean(arr): ... "Implementation of np.mean for DiagonalArray objects" ... return arr._i / arr._N ... diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py index 4b983893a..d0685328e 100644 --- a/numpy/doc/subclassing.py +++ b/numpy/doc/subclassing.py @@ -118,7 +118,8 @@ For example, consider the following Python code: def __new__(cls, *args): print('Cls in __new__:', cls) print('Args in __new__:', args) - return object.__new__(cls, *args) + # The `object` type __new__ method takes a single argument. + return object.__new__(cls) def __init__(self, *args): print('type(self) in __init__:', type(self)) diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 64b35bc19..fe95d8b17 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -1,9 +1,191 @@ -from __future__ import division, absolute_import, print_function +""" +Discrete Fourier Transform (:mod:`numpy.fft`) +============================================= + +.. currentmodule:: numpy.fft + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. -# To get sub-modules -from .info import __doc__ +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Normalization +------------- +The default normalization has the direct transforms unscaled and the inverse +transforms are scaled by :math:`1/n`. It is possible to obtain unitary +transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is +`None`) so that both direct and inverse transforms will be scaled by +:math:`1/\\sqrt{n}`. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +from __future__ import division, absolute_import, print_function -from .pocketfft import * +from ._pocketfft import * from .helper import * from numpy._pytesttester import PytestTester diff --git a/numpy/fft/pocketfft.c b/numpy/fft/_pocketfft.c index 9d1218e6b..d75b9983c 100644 --- a/numpy/fft/pocketfft.c +++ b/numpy/fft/_pocketfft.c @@ -2362,7 +2362,7 @@ static struct PyMethodDef methods[] = { #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "pocketfft_internal", + "_pocketfft_internal", NULL, -1, methods, @@ -2376,11 +2376,11 @@ static struct PyModuleDef moduledef = { /* Initialization function for the module */ #if PY_MAJOR_VERSION >= 3 #define RETVAL(x) x -PyMODINIT_FUNC PyInit_pocketfft_internal(void) +PyMODINIT_FUNC PyInit__pocketfft_internal(void) #else #define RETVAL(x) PyMODINIT_FUNC -initpocketfft_internal(void) +init_pocketfft_internal(void) #endif { PyObject *m; @@ -2389,7 +2389,7 @@ initpocketfft_internal(void) #else static const char module_documentation[] = ""; - m = Py_InitModule4("pocketfft_internal", methods, + m = Py_InitModule4("_pocketfft_internal", methods, module_documentation, (PyObject*)NULL,PYTHON_API_VERSION); #endif diff --git a/numpy/fft/pocketfft.py b/numpy/fft/_pocketfft.py index 77ea6e3ba..50720cda4 100644 --- a/numpy/fft/pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -35,7 +35,7 @@ __all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', import functools from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt -from . import pocketfft_internal as pfi +from . import _pocketfft_internal as pfi from numpy.core.multiarray import normalize_axis_index from numpy.core import overrides @@ -44,7 +44,11 @@ array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.fft') -def _raw_fft(a, n, axis, is_real, is_forward, fct): +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, inv_norm): axis = normalize_axis_index(axis, a.ndim) if n is None: n = a.shape[axis] @@ -53,6 +57,8 @@ def _raw_fft(a, n, axis, is_real, is_forward, fct): raise ValueError("Invalid number of FFT data points (%d) specified." % n) + fct = 1/inv_norm + if a.shape[axis] != n: s = list(a.shape) if s[axis] > n: @@ -176,10 +182,10 @@ def fft(a, n=None, axis=-1, norm=None): a = asarray(a) if n is None: n = a.shape[axis] - fct = 1 + inv_norm = 1 if norm is not None and _unitary(norm): - fct = 1 / sqrt(n) - output = _raw_fft(a, n, axis, False, True, fct) + inv_norm = sqrt(n) + output = _raw_fft(a, n, axis, False, True, inv_norm) return output @@ -272,10 +278,10 @@ def ifft(a, n=None, axis=-1, norm=None): if n is None: n = a.shape[axis] if norm is not None and _unitary(norm): - fct = 1/sqrt(max(n, 1)) + inv_norm = sqrt(max(n, 1)) else: - fct = 1/max(n, 1) - output = _raw_fft(a, n, axis, False, False, fct) + inv_norm = n + output = _raw_fft(a, n, axis, False, False, inv_norm) return output @@ -360,12 +366,12 @@ def rfft(a, n=None, axis=-1, norm=None): """ a = asarray(a) - fct = 1 + inv_norm = 1 if norm is not None and _unitary(norm): if n is None: n = a.shape[axis] - fct = 1/sqrt(n) - output = _raw_fft(a, n, axis, True, True, fct) + inv_norm = sqrt(n) + output = _raw_fft(a, n, axis, True, True, inv_norm) return output @@ -462,10 +468,10 @@ def irfft(a, n=None, axis=-1, norm=None): a = asarray(a) if n is None: n = (a.shape[axis] - 1) * 2 - fct = 1/n + inv_norm = n if norm is not None and _unitary(norm): - fct = 1/sqrt(n) - output = _raw_fft(a, n, axis, True, False, fct) + inv_norm = sqrt(n) + output = _raw_fft(a, n, axis, True, False, inv_norm) return output diff --git a/numpy/fft/info.py b/numpy/fft/info.py deleted file mode 100644 index cb6526b44..000000000 --- a/numpy/fft/info.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= - -.. currentmodule:: numpy.fft - -Standard FFTs -------------- - -.. autosummary:: - :toctree: generated/ - - fft Discrete Fourier transform. - ifft Inverse discrete Fourier transform. - fft2 Discrete Fourier transform in two dimensions. - ifft2 Inverse discrete Fourier transform in two dimensions. - fftn Discrete Fourier transform in N-dimensions. - ifftn Inverse discrete Fourier transform in N dimensions. - -Real FFTs ---------- - -.. autosummary:: - :toctree: generated/ - - rfft Real discrete Fourier transform. - irfft Inverse real discrete Fourier transform. - rfft2 Real discrete Fourier transform in two dimensions. - irfft2 Inverse real discrete Fourier transform in two dimensions. - rfftn Real discrete Fourier transform in N dimensions. - irfftn Inverse real discrete Fourier transform in N dimensions. - -Hermitian FFTs --------------- - -.. autosummary:: - :toctree: generated/ - - hfft Hermitian discrete Fourier transform. - ihfft Inverse Hermitian discrete Fourier transform. - -Helper routines ---------------- - -.. autosummary:: - :toctree: generated/ - - fftfreq Discrete Fourier Transform sample frequencies. - rfftfreq DFT sample frequencies (for usage with rfft, irfft). - fftshift Shift zero-frequency component to center of spectrum. - ifftshift Inverse of fftshift. - - -Background information ----------------------- - -Fourier analysis is fundamentally a method for expressing a function as a -sum of periodic components, and for recovering the function from those -components. When both the function and its Fourier transform are -replaced with discretized counterparts, it is called the discrete Fourier -transform (DFT). The DFT has become a mainstay of numerical computing in -part because of a very fast algorithm for computing it, called the Fast -Fourier Transform (FFT), which was known to Gauss (1805) and was brought -to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ -provide an accessible introduction to Fourier analysis and its -applications. - -Because the discrete Fourier transform separates its input into -components that contribute at discrete frequencies, it has a great number -of applications in digital signal processing, e.g., for filtering, and in -this context the discretized input to the transform is customarily -referred to as a *signal*, which exists in the *time domain*. The output -is called a *spectrum* or *transform* and exists in the *frequency -domain*. - -Implementation details ----------------------- - -There are many ways to define the DFT, varying in the sign of the -exponent, normalization, etc. In this implementation, the DFT is defined -as - -.. math:: - A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} - \\qquad k = 0,\\ldots,n-1. - -The DFT is in general defined for complex inputs and outputs, and a -single-frequency component at linear frequency :math:`f` is -represented by a complex exponential -:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` -is the sampling interval. - -The values in the result follow so-called "standard" order: If ``A = -fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of -the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` -contains the positive-frequency terms, and ``A[n/2+1:]`` contains the -negative-frequency terms, in order of decreasingly negative frequency. -For an even number of input points, ``A[n/2]`` represents both positive and -negative Nyquist frequency, and is also purely real for real input. For -an odd number of input points, ``A[(n-1)/2]`` contains the largest positive -frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. -The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies -of corresponding elements in the output. The routine -``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the -zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes -that shift. - -When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` -is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. -The phase spectrum is obtained by ``np.angle(A)``. - -The inverse DFT is defined as - -.. math:: - a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} - \\qquad m = 0,\\ldots,n-1. - -It differs from the forward transform by the sign of the exponential -argument and the default normalization by :math:`1/n`. - -Normalization -------------- -The default normalization has the direct transforms unscaled and the inverse -transforms are scaled by :math:`1/n`. It is possible to obtain unitary -transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is -`None`) so that both direct and inverse transforms will be scaled by -:math:`1/\\sqrt{n}`. - -Real and Hermitian transforms ------------------------------ - -When the input is purely real, its transform is Hermitian, i.e., the -component at frequency :math:`f_k` is the complex conjugate of the -component at frequency :math:`-f_k`, which means that for real -inputs there is no information in the negative frequency components that -is not already available from the positive frequency components. -The family of `rfft` functions is -designed to operate on real inputs, and exploits this symmetry by -computing only the positive frequency components, up to and including the -Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex -output points. The inverses of this family assumes the same symmetry of -its input, and for an output of ``n`` points uses ``n/2+1`` input points. - -Correspondingly, when the spectrum is purely real, the signal is -Hermitian. The `hfft` family of functions exploits this symmetry by -using ``n/2+1`` complex points in the input (time) domain for ``n`` real -points in the frequency domain. - -In higher dimensions, FFTs are used, e.g., for image analysis and -filtering. The computational efficiency of the FFT means that it can -also be a faster way to compute large convolutions, using the property -that a convolution in the time domain is equivalent to a point-by-point -multiplication in the frequency domain. - -Higher dimensions ------------------ - -In two dimensions, the DFT is defined as - -.. math:: - A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} - a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} - \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, - -which extends in the obvious way to higher dimensions, and the inverses -in higher dimensions also extend in the same way. - -References ----------- - -.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - -.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., - 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. - 12-13. Cambridge Univ. Press, Cambridge, UK. - -Examples --------- - -For examples, see the various functions. - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core'] diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py index 6c3548b65..8c3a31557 100644 --- a/numpy/fft/setup.py +++ b/numpy/fft/setup.py @@ -8,8 +8,8 @@ def configuration(parent_package='',top_path=None): config.add_data_dir('tests') # Configure pocketfft_internal - config.add_extension('pocketfft_internal', - sources=['pocketfft.c'] + config.add_extension('_pocketfft_internal', + sources=['_pocketfft.c'] ) return config diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 0ebd39b8c..c392929fd 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -121,7 +121,7 @@ def has_nested_fields(ndtype): """ for name in ndtype.names or (): - if ndtype[name].names: + if ndtype[name].names is not None: return True return False @@ -931,28 +931,27 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt) ndtype = np.dtype(dict(formats=ndtype, names=names)) else: - nbtypes = len(ndtype) # Explicit names if names is not None: validate = NameValidator(**validationargs) if isinstance(names, basestring): names = names.split(",") # Simple dtype: repeat to match the nb of names - if nbtypes == 0: + if ndtype.names is None: formats = tuple([ndtype.type] * len(names)) names = validate(names, defaultfmt=defaultfmt) ndtype = np.dtype(list(zip(names, formats))) # Structured dtype: just validate the names as needed else: - ndtype.names = validate(names, nbfields=nbtypes, + ndtype.names = validate(names, nbfields=len(ndtype.names), defaultfmt=defaultfmt) # No implicit names - elif (nbtypes > 0): + elif ndtype.names is not None: validate = NameValidator(**validationargs) # Default initial names : should we change the format ? - if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and + if ((ndtype.names == tuple("f%i" % i for i in range(len(ndtype.names)))) and (defaultfmt != "f%i")): - ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt) + ndtype.names = validate([''] * len(ndtype.names), defaultfmt=defaultfmt) # Explicit initial names : just validate else: ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt) diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index 62330e692..33e64708d 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -17,66 +17,6 @@ __all__ = ['pad'] # Private utility functions. -def _linear_ramp(ndim, axis, start, stop, size, reverse=False): - """ - Create a linear ramp of `size` in `axis` with `ndim`. - - This algorithm behaves like a vectorized version of `numpy.linspace`. - The resulting linear ramp is broadcastable to any array that matches the - ramp in `shape[axis]` and `ndim`. - - Parameters - ---------- - ndim : int - Number of dimensions of the resulting array. All dimensions except - the one specified by `axis` will have the size 1. - axis : int - The dimension that contains the linear ramp of `size`. - start : int or ndarray - The starting value(s) of the linear ramp. If given as an array, its - size must match `size`. - stop : int or ndarray - The stop value(s) (not included!) of the linear ramp. If given as an - array, its size must match `size`. - size : int - The number of elements in the linear ramp. If this argument is 0 the - dimensions of `ramp` will all be of length 1 except for the one given - by `axis` which will be 0. - reverse : bool - If False, increment in a positive fashion, otherwise decrement. - - Returns - ------- - ramp : ndarray - Output array of dtype np.float64 that in- or decrements along the given - `axis`. - - Examples - -------- - >>> _linear_ramp(ndim=2, axis=0, start=np.arange(3), stop=10, size=2) - array([[0. , 1. , 2. ], - [5. , 5.5, 6. ]]) - >>> _linear_ramp(ndim=3, axis=0, start=2, stop=0, size=0) - array([], shape=(0, 1, 1), dtype=float64) - """ - # Create initial ramp - ramp = np.arange(size, dtype=np.float64) - if reverse: - ramp = ramp[::-1] - - # Make sure, that ramp is broadcastable - init_shape = (1,) * axis + (size,) + (1,) * (ndim - axis - 1) - ramp = ramp.reshape(init_shape) - - if size != 0: - # And scale to given start and stop values - gain = (stop - start) / float(size) - ramp = ramp * gain - ramp += start - - return ramp - - def _round_if_needed(arr, dtype): """ Rounds arr inplace if destination dtype is integer. @@ -269,17 +209,25 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair): """ edge_pair = _get_edges(padded, axis, width_pair) - left_ramp = _linear_ramp( - padded.ndim, axis, start=end_value_pair[0], stop=edge_pair[0], - size=width_pair[0], reverse=False + left_ramp = np.linspace( + start=end_value_pair[0], + stop=edge_pair[0].squeeze(axis), # Dimensions is replaced by linspace + num=width_pair[0], + endpoint=False, + dtype=padded.dtype, + axis=axis, ) - _round_if_needed(left_ramp, padded.dtype) - right_ramp = _linear_ramp( - padded.ndim, axis, start=end_value_pair[1], stop=edge_pair[1], - size=width_pair[1], reverse=True + right_ramp = np.linspace( + start=end_value_pair[1], + stop=edge_pair[1].squeeze(axis), # Dimension is replaced by linspace + num=width_pair[1], + endpoint=False, + dtype=padded.dtype, + axis=axis, ) - _round_if_needed(right_ramp, padded.dtype) + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] return left_ramp, right_ramp diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index f3f4bc17e..2309f7e42 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -213,6 +213,7 @@ def unique(ar, return_index=False, return_inverse=False, ----- When an axis is specified the subarrays indexed by the axis are sorted. This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) and then flattening the subarrays in C order. The flattened subarrays are then viewed as a structured type with each element given a label, with the effect that we end up with a 1-D array of structured types that can be @@ -264,7 +265,7 @@ def unique(ar, return_index=False, return_inverse=False, # axis was specified and not None try: - ar = np.swapaxes(ar, axis, 0) + ar = np.moveaxis(ar, axis, 0) except np.AxisError: # this removes the "axis1" or "axis2" prefix from the error message raise np.AxisError(axis, ar.ndim) @@ -285,7 +286,7 @@ def unique(ar, return_index=False, return_inverse=False, def reshape_uniq(uniq): uniq = uniq.view(orig_dtype) uniq = uniq.reshape(-1, *orig_shape[1:]) - uniq = np.swapaxes(uniq, 0, axis) + uniq = np.moveaxis(uniq, 0, axis) return uniq output = _unique1d(consolidated, return_index, diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 9d380e67d..21532838b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -316,14 +316,17 @@ def average(a, axis=None, weights=None, returned=False): The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a - weight equal to one. + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. - Returns ------- retval, [sum_of_weights] : array_type or double diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 9a03d0b39..6cffab6ac 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -1443,7 +1443,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): the variance of the flattened array. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as + the default is `float64`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index c45622edd..e57a6dd47 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -2180,7 +2180,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, outputmask = np.array(masks, dtype=mdtype) else: # Overwrite the initial dtype names if needed - if names and dtype.names: + if names and dtype.names is not None: dtype.names = names # Case 1. We have a structured type if len(dtype_flat) > 1: @@ -2230,7 +2230,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, # output = np.array(data, dtype) if usemask: - if dtype.names: + if dtype.names is not None: mdtype = [(_, bool) for _ in dtype.names] else: mdtype = bool diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index 6e257bb3f..40060b41a 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -72,7 +72,7 @@ def recursive_fill_fields(input, output): current = input[field] except ValueError: continue - if current.dtype.names: + if current.dtype.names is not None: recursive_fill_fields(current, output[field]) else: output[field][:len(current)] = current @@ -139,11 +139,11 @@ def get_names(adtype): names = adtype.names for name in names: current = adtype[name] - if current.names: + if current.names is not None: listnames.append((name, tuple(get_names(current)))) else: listnames.append(name) - return tuple(listnames) or None + return tuple(listnames) def get_names_flat(adtype): @@ -176,9 +176,9 @@ def get_names_flat(adtype): for name in names: listnames.append(name) current = adtype[name] - if current.names: + if current.names is not None: listnames.extend(get_names_flat(current)) - return tuple(listnames) or None + return tuple(listnames) def flatten_descr(ndtype): @@ -215,8 +215,8 @@ def _zip_dtype(seqarrays, flatten=False): else: for a in seqarrays: current = a.dtype - if current.names and len(current.names) <= 1: - # special case - dtypes of 0 or 1 field are flattened + if current.names is not None and len(current.names) == 1: + # special case - dtypes of 1 field are flattened newdtype.extend(_get_fieldspec(current)) else: newdtype.append(('', current)) @@ -268,7 +268,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): names = adtype.names for name in names: current = adtype[name] - if current.names: + if current.names is not None: if lastname: parents[name] = [lastname, ] else: @@ -281,7 +281,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): elif lastname: lastparent = [lastname, ] parents[name] = lastparent or [] - return parents or None + return parents def _izip_fields_flat(iterable): @@ -435,7 +435,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, if isinstance(seqarrays, (ndarray, np.void)): seqdtype = seqarrays.dtype # Make sure we have named fields - if not seqdtype.names: + if seqdtype.names is None: seqdtype = np.dtype([('', seqdtype)]) if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype: # Minimal processing needed: just make sure everythng's a-ok @@ -653,7 +653,7 @@ def rename_fields(base, namemapper): for name in ndtype.names: newname = namemapper.get(name, name) current = ndtype[name] - if current.names: + if current.names is not None: newdtype.append( (newname, _recursive_rename_fields(current, namemapper)) ) @@ -874,16 +874,35 @@ def _get_fields_and_offsets(dt, offset=0): scalar fields in the dtype "dt", including nested fields, in left to right order. """ + + # counts up elements in subarrays, including nested subarrays, and returns + # base dtype and count + def count_elem(dt): + count = 1 + while dt.shape != (): + for size in dt.shape: + count *= size + dt = dt.base + return dt, count + fields = [] for name in dt.names: field = dt.fields[name] - if field[0].names is None: - count = 1 - for size in field[0].shape: - count *= size - fields.append((field[0], count, field[1] + offset)) + f_dt, f_offset = field[0], field[1] + f_dt, n = count_elem(f_dt) + + if f_dt.names is None: + fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) else: - fields.extend(_get_fields_and_offsets(field[0], field[1] + offset)) + subfields = _get_fields_and_offsets(f_dt, f_offset + offset) + size = f_dt.itemsize + + for i in range(n): + if i == 0: + # optimization: avoid list comprehension if no subarray + fields.extend(subfields) + else: + fields.extend([(d, c, o + i*size) for d, c, o in subfields]) return fields @@ -948,6 +967,12 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): fields = _get_fields_and_offsets(arr.dtype) n_fields = len(fields) + if n_fields == 0 and dtype is None: + raise ValueError("arr has no fields. Unable to guess dtype") + elif n_fields == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("arr with no fields is not supported") + dts, counts, offsets = zip(*fields) names = ['f{}'.format(n) for n in range(n_fields)] @@ -1039,6 +1064,9 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, if arr.shape == (): raise ValueError('arr must have at least one dimension') n_elem = arr.shape[-1] + if n_elem == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("last axis with size 0 is not supported") if dtype is None: if names is None: @@ -1051,7 +1079,11 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, raise ValueError("don't supply both dtype and names") # sanity check of the input dtype fields = _get_fields_and_offsets(dtype) - dts, counts, offsets = zip(*fields) + if len(fields) == 0: + dts, counts, offsets = [], [], [] + else: + dts, counts, offsets = zip(*fields) + if n_elem != sum(counts): raise ValueError('The length of the last dimension of arr must ' 'be equal to the number of fields in dtype') diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index b6dd3b31c..65593dd29 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -2,7 +2,6 @@ """ from __future__ import division, absolute_import, print_function -from itertools import chain import pytest @@ -11,6 +10,12 @@ from numpy.testing import assert_array_equal, assert_allclose, assert_equal from numpy.lib.arraypad import _as_pairs +_numeric_dtypes = ( + np.sctypes["uint"] + + np.sctypes["int"] + + np.sctypes["float"] + + np.sctypes["complex"] +) _all_modes = { 'constant': {'constant_values': 0}, 'edge': {}, @@ -738,6 +743,24 @@ class TestLinearRamp(object): assert_equal(a[0, :], 0.) assert_equal(a[-1, :], 0.) + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + class TestReflect(object): def test_check_simple(self): @@ -1330,13 +1353,7 @@ def test_memory_layout_persistence(mode): assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] -@pytest.mark.parametrize("dtype", chain( - # Skip "other" dtypes as they are not supported by all modes - np.sctypes["int"], - np.sctypes["uint"], - np.sctypes["float"], - np.sctypes["complex"] -)) +@pytest.mark.parametrize("dtype", _numeric_dtypes) @pytest.mark.parametrize("mode", _all_modes.keys()) def test_dtype_persistence(dtype, mode): arr = np.zeros((3, 2, 1), dtype=dtype) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index dd8a38248..fd21a7f76 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -600,8 +600,11 @@ class TestUnique(object): assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) msg = 'Unique with 3d array and axis=2 failed' - data3d = np.dstack([data] * 3) - result = data3d[..., :1] + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) assert_array_equal(unique(data3d, axis=2), result, msg) uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 407bb56bf..6ee17c830 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1565,6 +1565,13 @@ M 33 21.99 test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) + # nested but empty fields also aren't supported + ndtype = [('idx', int), ('code', object), ('nest', [])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + def test_userconverters_with_explicit_dtype(self): # Test user_converters w/ explicit (standard) dtype data = TextIO('skip,skip,2001-01-01,1.0,skip') diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 0126ccaf8..0c839d486 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -115,6 +115,14 @@ class TestRecFunctions(object): test = get_names(ndtype) assert_equal(test, ('a', ('b', ('ba', 'bb')))) + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ()))) + + ndtype = np.dtype([]) + test = get_names(ndtype) + assert_equal(test, ()) + def test_get_names_flat(self): # Test get_names_flat ndtype = np.dtype([('A', '|S3'), ('B', float)]) @@ -125,6 +133,14 @@ class TestRecFunctions(object): test = get_names_flat(ndtype) assert_equal(test, ('a', 'b', 'ba', 'bb')) + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b')) + + ndtype = np.dtype([]) + test = get_names_flat(ndtype) + assert_equal(test, ()) + def test_get_fieldstructure(self): # Test get_fieldstructure @@ -147,6 +163,11 @@ class TestRecFunctions(object): 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} assert_equal(test, control) + # 0 fields + ndtype = np.dtype([]) + test = get_fieldstructure(ndtype) + assert_equal(test, {}) + def test_find_duplicates(self): # Test find_duplicates a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), @@ -248,7 +269,8 @@ class TestRecFunctions(object): # including uniform fields with subarrays unpacked d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), (8, [9, 10], [[11, 12], [13, 14]])], - dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))]) + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2)))]) dd = structured_to_unstructured(d) ddd = unstructured_to_structured(dd, d.dtype) assert_(dd.base is d) @@ -262,6 +284,40 @@ class TestRecFunctions(object): assert_equal(res, np.zeros((10, 6), dtype=int)) + # test nested combinations of subarrays and structured arrays, gh-13333 + def subarray(dt, shape): + return np.dtype((dt, shape)) + + def structured(*dts): + return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)]) + + def inspect(dt, dtype=None): + arr = np.zeros((), dt) + ret = structured_to_unstructured(arr, dtype=dtype) + backarr = unstructured_to_structured(ret, dt) + return ret.shape, ret.dtype, backarr.dtype + + dt = structured(subarray(structured(np.int32, np.int32), 3)) + assert_equal(inspect(dt), ((6,), np.int32, dt)) + + dt = structured(subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((4,), np.int32, dt)) + + dt = structured(np.int32) + assert_equal(inspect(dt), ((1,), np.int32, dt)) + + dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((5,), np.int32, dt)) + + dt = structured() + assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) + + # these currently don't work, but we may make it work in the future + assert_raises(NotImplementedError, structured_to_unstructured, + np.zeros(3, dt), dtype=np.int32) + assert_raises(NotImplementedError, unstructured_to_structured, + np.zeros((3,0), dtype=np.int32)) + def test_field_assignment_by_name(self): a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) newdt = [('b', 'f4'), ('c', 'u1')] diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index c7dbcc5f9..8bcbd8e86 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -1003,93 +1003,6 @@ def _getmembers(item): if hasattr(item, x)] return members -#----------------------------------------------------------------------------- - -# The following SafeEval class and company are adapted from Michael Spencer's -# ASPN Python Cookbook recipe: https://code.activestate.com/recipes/364469/ -# -# Accordingly it is mostly Copyright 2006 by Michael Spencer. -# The recipe, like most of the other ASPN Python Cookbook recipes was made -# available under the Python license. -# https://en.wikipedia.org/wiki/Python_License - -# It has been modified to: -# * handle unary -/+ -# * support True/False/None -# * raise SyntaxError instead of a custom exception. - -class SafeEval(object): - """ - Object to evaluate constant string expressions. - - This includes strings with lists, dicts and tuples using the abstract - syntax tree created by ``compiler.parse``. - - .. deprecated:: 1.10.0 - - See Also - -------- - safe_eval - - """ - def __init__(self): - # 2014-10-15, 1.10 - warnings.warn("SafeEval is deprecated in 1.10 and will be removed.", - DeprecationWarning, stacklevel=2) - - def visit(self, node): - cls = node.__class__ - meth = getattr(self, 'visit' + cls.__name__, self.default) - return meth(node) - - def default(self, node): - raise SyntaxError("Unsupported source construct: %s" - % node.__class__) - - def visitExpression(self, node): - return self.visit(node.body) - - def visitNum(self, node): - return node.n - - def visitStr(self, node): - return node.s - - def visitBytes(self, node): - return node.s - - def visitDict(self, node,**kw): - return dict([(self.visit(k), self.visit(v)) - for k, v in zip(node.keys, node.values)]) - - def visitTuple(self, node): - return tuple([self.visit(i) for i in node.elts]) - - def visitList(self, node): - return [self.visit(i) for i in node.elts] - - def visitUnaryOp(self, node): - import ast - if isinstance(node.op, ast.UAdd): - return +self.visit(node.operand) - elif isinstance(node.op, ast.USub): - return -self.visit(node.operand) - else: - raise SyntaxError("Unknown unary op: %r" % node.op) - - def visitName(self, node): - if node.id == 'False': - return False - elif node.id == 'True': - return True - elif node.id == 'None': - return None - else: - raise SyntaxError("Unknown name: %s" % node.id) - - def visitNameConstant(self, node): - return node.value - def safe_eval(source): """ diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 95b799f6d..bb3788c9a 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -59,14 +59,14 @@ __all__ = [ 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', - 'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', + 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', - 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2', + 'less', 'less_equal', 'log', 'log10', 'log2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', 'masked_array', 'masked_equal', 'masked_greater', @@ -7886,93 +7886,6 @@ def _pickle_warn(method): stacklevel=3) -def dump(a, F): - """ - Pickle a masked array to a file. - - This is a wrapper around ``cPickle.dump``. - - Parameters - ---------- - a : MaskedArray - The array to be pickled. - F : str or file-like object - The file to pickle `a` to. If a string, the full path to the file. - - """ - _pickle_warn('dump') - if not hasattr(F, 'readline'): - with open(F, 'w') as F: - pickle.dump(a, F) - else: - pickle.dump(a, F) - - -def dumps(a): - """ - Return a string corresponding to the pickling of a masked array. - - This is a wrapper around ``cPickle.dumps``. - - Parameters - ---------- - a : MaskedArray - The array for which the string representation of the pickle is - returned. - - """ - _pickle_warn('dumps') - return pickle.dumps(a) - - -def load(F): - """ - Wrapper around ``cPickle.load`` which accepts either a file-like object - or a filename. - - Parameters - ---------- - F : str or file - The file or file name to load. - - See Also - -------- - dump : Pickle an array - - Notes - ----- - This is different from `numpy.load`, which does not use cPickle but loads - the NumPy binary .npy format. - - """ - _pickle_warn('load') - if not hasattr(F, 'readline'): - with open(F, 'r') as F: - return pickle.load(F) - else: - return pickle.load(F) - - -def loads(strg): - """ - Load a pickle from the current string. - - The result of ``cPickle.loads(strg)`` is returned. - - Parameters - ---------- - strg : str - The string to load. - - See Also - -------- - dumps : Return a string corresponding to the pickling of a masked array. - - """ - _pickle_warn('loads') - return pickle.loads(strg) - - def fromfile(file, dtype=float, count=-1, sep=''): raise NotImplementedError( "fromfile() not yet implemented for a MaskedArray.") diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 639b3dd1f..de1aa3af8 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -549,8 +549,11 @@ def average(a, axis=None, weights=None, returned=False): The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If ``weights=None``, then all data in `a` are assumed to have a - weight equal to one. If `weights` is complex, the imaginary parts - are ignored. + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. returned : bool, optional Flag indicating whether a tuple ``(result, sum of weights)`` should be returned as output (True), or just the result (False). diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 931a7e8b9..826fb0f64 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -208,7 +208,7 @@ class MaskedRecords(MaskedArray, object): _localdict = ndarray.__getattribute__(self, '__dict__') _data = ndarray.view(self, _localdict['_baseclass']) obj = _data.getfield(*res) - if obj.dtype.fields: + if obj.dtype.names is not None: raise NotImplementedError("MaskedRecords is currently limited to" "simple records.") # Get some special attributes diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index a9059f522..35b24d1ab 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -426,10 +426,7 @@ def _vander2d(vander_f, x, y, deg): x, y, deg : See the ``<type>vander2d`` functions for more detail """ - degx, degy = [ - _deprecate_as_int(d, "degrees") - for d in deg - ] + degx, degy = deg x, y = np.array((x, y), copy=False) + 0.0 vx = vander_f(x, degx) @@ -449,10 +446,7 @@ def _vander3d(vander_f, x, y, z, deg): x, y, z, deg : See the ``<type>vander3d`` functions for more detail """ - degx, degy, degz = [ - _deprecate_as_int(d, "degrees") - for d in deg - ] + degx, degy, degz = deg x, y, z = np.array((x, y, z), copy=False) + 0.0 vx = vander_f(x, degx) diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd index 79fe69275..984033f17 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/bit_generator.pxd @@ -1,5 +1,5 @@ -from .common cimport bitgen_t +from .common cimport bitgen_t, uint32_t cimport numpy as np cdef class BitGenerator(): @@ -14,9 +14,9 @@ cdef class BitGenerator(): cdef class SeedSequence(): cdef readonly object entropy cdef readonly tuple spawn_key - cdef readonly int pool_size + cdef readonly uint32_t pool_size cdef readonly object pool - cdef readonly int n_children_spawned + cdef readonly uint32_t n_children_spawned cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, np.ndarray[np.npy_uint32, ndim=1] entropy_array) diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index 6694e5e4d..eb608af6c 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -116,7 +116,7 @@ def _coerce_to_uint32_array(x): Examples -------- >>> import numpy as np - >>> from np.random.bit_generator import _coerce_to_uint32_array + >>> from numpy.random.bit_generator import _coerce_to_uint32_array >>> _coerce_to_uint32_array(12345) array([12345], dtype=uint32) >>> _coerce_to_uint32_array('12345') @@ -458,6 +458,8 @@ cdef class SeedSequence(): ------- seqs : list of `SeedSequence` s """ + cdef uint32_t i + seqs = [] for i in range(self.n_children_spawned, self.n_children_spawned + n_children): diff --git a/numpy/random/generator.pyx b/numpy/random/generator.pyx index c7432d8c1..26fd95129 100644 --- a/numpy/random/generator.pyx +++ b/numpy/random/generator.pyx @@ -3919,9 +3919,8 @@ cdef class Generator: permutation(x) Randomly permute a sequence, or return a permuted range. - If `x` is a multi-dimensional array, it is only shuffled along its - first index. + first index. Parameters ---------- @@ -3950,13 +3949,20 @@ cdef class Generator: [0, 1, 2], [3, 4, 5]]) + >>> rng.permutation("abc") + Traceback (most recent call last): + ... + numpy.AxisError: x must be an integer or at least 1-dimensional """ + if isinstance(x, (int, np.integer)): arr = np.arange(x) self.shuffle(arr) return arr arr = np.asarray(x) + if arr.ndim < 1: + raise np.AxisError("x must be an integer or at least 1-dimensional") # shuffle has fast-path for 1-d if arr.ndim == 1: diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 46b6b3388..468703e38 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -83,8 +83,8 @@ cdef class RandomState: See Also -------- Generator - mt19937.MT19937 - Bit_Generators + MT19937 + :ref:`bit_generator` """ cdef public object _bit_generator @@ -3517,7 +3517,7 @@ cdef class RandomState: # Convert to int64, if necessary, to use int64 infrastructure ongood = ongood.astype(np.int64) onbad = onbad.astype(np.int64) - onbad = onbad.astype(np.int64) + onsample = onsample.astype(np.int64) out = discrete_broadcast_iii(&legacy_random_hypergeometric,&self._bitgen, size, self.lock, ongood, 'ngood', CONS_NON_NEGATIVE, onbad, 'nbad', CONS_NON_NEGATIVE, @@ -4134,6 +4134,7 @@ cdef class RandomState: out : ndarray Permuted sequence or array range. + Examples -------- >>> np.random.permutation(10) @@ -4149,12 +4150,15 @@ cdef class RandomState: [3, 4, 5]]) """ + if isinstance(x, (int, np.integer)): arr = np.arange(x) self.shuffle(arr) return arr arr = np.asarray(x) + if arr.ndim < 1: + raise IndexError("x must be an integer or at least 1-dimensional") # shuffle has fast-path for 1-d if arr.ndim == 1: diff --git a/numpy/random/setup.py b/numpy/random/setup.py index a1bf3b83c..a820d326e 100644 --- a/numpy/random/setup.py +++ b/numpy/random/setup.py @@ -49,8 +49,8 @@ def configuration(parent_package='', top_path=None): elif not is_msvc: # Some bit generators require c99 EXTRA_COMPILE_ARGS += ['-std=c99'] - INTEL_LIKE = any([val in k.lower() for k in platform.uname() - for val in ('x86', 'i686', 'i386', 'amd64')]) + INTEL_LIKE = any(arch in platform.machine() + for arch in ('x86', 'i686', 'i386', 'amd64')) if INTEL_LIKE: # Assumes GCC or GCC-like compiler EXTRA_COMPILE_ARGS += ['-msse2'] diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index a962fe84e..853d86fba 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -757,6 +757,19 @@ class TestRandomDist(object): arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T actual = random.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) + + bad_x_str = "abcd" + assert_raises(np.AxisError, random.permutation, bad_x_str) + + bad_x_float = 1.2 + assert_raises(np.AxisError, random.permutation, bad_x_float) + + random = Generator(MT19937(self.seed)) + integer_val = 10 + desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] + + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) def test_beta(self): random = Generator(MT19937(self.seed)) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 3b5a279a3..a0edc5c23 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -686,6 +686,21 @@ class TestRandomDist(object): actual = random.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) + random.seed(self.seed) + bad_x_str = "abcd" + assert_raises(IndexError, random.permutation, bad_x_str) + + random.seed(self.seed) + bad_x_float = 1.2 + assert_raises(IndexError, random.permutation, bad_x_float) + + integer_val = 10 + desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] + + random.seed(self.seed) + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + def test_beta(self): random.seed(self.seed) actual = random.beta(.1, .9, size=(3, 2)) diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py index a5fa4fb5e..489d8e09a 100644 --- a/numpy/testing/_private/parameterized.py +++ b/numpy/testing/_private/parameterized.py @@ -45,11 +45,18 @@ except ImportError: from unittest import TestCase -PY3 = sys.version_info[0] == 3 PY2 = sys.version_info[0] == 2 -if PY3: +if PY2: + from types import InstanceType + lzip = zip + text_type = unicode + bytes_type = str + string_types = basestring, + def make_method(func, instance, type): + return MethodType(func, instance, type) +else: # Python 3 doesn't have an InstanceType, so just use a dummy type. class InstanceType(): pass @@ -61,14 +68,6 @@ if PY3: if instance is None: return func return MethodType(func, instance) -else: - from types import InstanceType - lzip = zip - text_type = unicode - bytes_type = str - string_types = basestring, - def make_method(func, instance, type): - return MethodType(func, instance, type) _param = namedtuple("param", "args kwargs") diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 7aa5ef033..8a31fcf15 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -21,7 +21,6 @@ import pprint from numpy.core import( intp, float32, empty, arange, array_repr, ndarray, isnat, array) -from numpy.lib.utils import deprecate if sys.version_info[0] >= 3: from io import StringIO @@ -33,7 +32,7 @@ __all__ = [ 'assert_array_equal', 'assert_array_less', 'assert_string_equal', 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', - 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', + 'raises', 'rundocs', 'runstring', 'verbose', 'measure', 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', @@ -154,22 +153,6 @@ def gisinf(x): return st -@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. " - "Use numpy.random.rand instead.") -def rand(*args): - """Returns an array of random numbers with the given shape. - - This only uses the standard library, so it is useful for testing purposes. - """ - import random - from numpy.core import zeros, float64 - results = zeros(args, float64) - f = results.flat - for i in range(len(f)): - f[i] = random.random() - return results - - if os.name == 'nt': # Code "stolen" from enthought/debug/memusage.py def GetPerformanceAttributes(object, counter, instance=None, @@ -703,7 +686,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6, equal_nan=True, equal_inf=True): __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import array, array2string, isnan, inf, bool_, errstate + from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_ x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) @@ -805,17 +788,18 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, # np.ma.masked, which is falsy). if cond != True: n_mismatch = reduced.size - reduced.sum(dtype=intp) - percent_mismatch = 100 * n_mismatch / ox.size + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements remarks = [ 'Mismatched elements: {} / {} ({:.3g}%)'.format( - n_mismatch, ox.size, percent_mismatch)] + n_mismatch, n_elements, percent_mismatch)] with errstate(invalid='ignore', divide='ignore'): # ignore errors for non-numeric types with contextlib.suppress(TypeError): error = abs(x - y) - max_abs_error = error.max() - if error.dtype == 'object': + max_abs_error = max(error) + if getattr(error, 'dtype', object_) == object_: remarks.append('Max absolute difference: ' + str(max_abs_error)) else: @@ -824,8 +808,13 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, # note: this definition of relative error matches that one # used by assert_allclose (found in np.isclose) - max_rel_error = (error / abs(y)).max() - if error.dtype == 'object': + # Filter values where the divisor would be zero + nonzero = bool_(y != 0) + if all(~nonzero): + max_rel_error = array(inf) + else: + max_rel_error = max(error[nonzero] / abs(y[nonzero])) + if getattr(error, 'dtype', object_) == object_: remarks.append('Max relative difference: ' + str(max_rel_error)) else: diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 4f1b46d4f..44f93a693 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -564,6 +564,26 @@ class TestAlmostEqual(_GenericTest): assert_equal(msgs[4], 'Max absolute difference: 2') assert_equal(msgs[5], 'Max relative difference: inf') + def test_error_message_2(self): + """Check the message is formatted correctly when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + + y = 2 + x = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 0.5') + def test_subclass_that_cannot_be_bool(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having @@ -881,6 +901,15 @@ class TestAssertAllclose(object): assert_array_less(a, b) assert_allclose(a, b) + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Max relative difference: 0.5' in msg) + class TestArrayAlmostEqualNulp(object): diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 98f19e348..1e7d65b89 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -19,7 +19,7 @@ __all__ = [ 'assert_array_equal', 'assert_array_less', 'assert_string_equal', 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', - 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', + 'raises', 'rundocs', 'runstring', 'verbose', 'measure', 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 807c98652..df2fc4802 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -1,6 +1,7 @@ from __future__ import division, absolute_import, print_function import sys +import subprocess import numpy as np import pytest @@ -69,6 +70,28 @@ def test_numpy_namespace(): assert bad_results == whitelist +@pytest.mark.parametrize('name', ['testing', 'Tester']) +def test_import_lazy_import(name): + """Make sure we can actually the the modules we lazy load. + + While not exported as part of the public API, it was accessible. With the + use of __getattr__ and __dir__, this isn't always true It can happen that + an infinite recursion may happen. + + This is the only way I found that would force the failure to appear on the + badly implemented code. + + We also test for the presence of the lazily imported modules in dir + + """ + exe = (sys.executable, '-c', "import numpy; numpy." + name) + result = subprocess.check_output(exe) + assert not result + + # Make sure they are still in the __dir__ + assert name in dir(np) + + def test_numpy_linalg(): bad_results = check_dir(np.linalg) assert bad_results == {} |