diff options
Diffstat (limited to 'numpy/core/src')
23 files changed, 47 insertions, 54 deletions
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 6db6060c9..86a6b0137 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -77,7 +77,7 @@ PyArray_Size(PyObject *op) * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) +PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) { if (base == NULL) { PyErr_SetString(PyExc_ValueError, @@ -92,7 +92,7 @@ PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) if (PyArray_FailUnlessWriteable(base, "UPDATEIFCOPY base") < 0) { goto fail; } - + /* * Any writes to 'arr' will magicaly turn into writes to 'base', so we * should warn if necessary. @@ -145,8 +145,8 @@ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) /* * Don't allow infinite chains of views, always set the base - * to the first owner of the data. - * That is, either the first object which isn't an array, + * to the first owner of the data. + * That is, either the first object which isn't an array, * or the first object which owns its own data. */ @@ -157,19 +157,19 @@ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) /* Propagate WARN_ON_WRITE through views. */ if (PyArray_FLAGS(obj_arr) & NPY_ARRAY_WARN_ON_WRITE) { PyArray_ENABLEFLAGS(arr, NPY_ARRAY_WARN_ON_WRITE); - } + } /* If this array owns its own data, stop collapsing */ if (PyArray_CHKFLAGS(obj_arr, NPY_ARRAY_OWNDATA)) { break; - } + } tmp = PyArray_BASE(obj_arr); /* If there's no base, stop collapsing */ if (tmp == NULL) { break; } - /* Stop the collapse new base when the would not be of the same + /* Stop the collapse new base when the would not be of the same * type (i.e. different subclass). */ if (Py_TYPE(tmp) != Py_TYPE(arr)) { @@ -1478,7 +1478,7 @@ PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp offset, offset_bounds_from_strides(elsize, nd, dims, newstrides, &lower_offset, &upper_offset); - + if ((upper_offset > end) || (lower_offset < begin)) { return NPY_FALSE; } diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 4c374cc75..5a7a58b33 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -108,7 +108,7 @@ _array_find_python_scalar_type(PyObject *op) } } else if (PyLong_Check(op)) { - /* check to see if integer can fit into a longlong or ulonglong + /* check to see if integer can fit into a longlong or ulonglong and return that --- otherwise return object */ if ((PyLong_AsLongLong(op) == -1) && PyErr_Occurred()) { PyErr_Clear(); @@ -117,14 +117,14 @@ _array_find_python_scalar_type(PyObject *op) return PyArray_DescrFromType(NPY_LONGLONG); } - if ((PyLong_AsUnsignedLongLong(op) == (unsigned long long) -1) + if ((PyLong_AsUnsignedLongLong(op) == (unsigned long long) -1) && PyErr_Occurred()){ PyErr_Clear(); - } + } else { return PyArray_DescrFromType(NPY_ULONGLONG); - } - + } + return PyArray_DescrFromType(NPY_OBJECT); } return NULL; @@ -381,7 +381,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_FORMAT|PyBUF_STRIDES) == 0 || PyObject_GetBuffer(obj, &buffer_view, PyBUF_FORMAT) == 0) { - + PyErr_Clear(); dtype = _descriptor_from_pep3118_format(buffer_view.format); PyBuffer_Release(&buffer_view); @@ -391,7 +391,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, } else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_STRIDES) == 0 || PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) { - + PyErr_Clear(); dtype = PyArray_DescrNewFromType(NPY_VOID); dtype->elsize = buffer_view.itemsize; diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 82b66123c..9df644210 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -39,7 +39,7 @@ _array_find_python_scalar_type(PyObject *op); NPY_NO_EXPORT PyArray_Descr * _array_typedescr_fromstr(char *str); -/* +/* * Returns -1 and sets an exception if *index is an invalid index for * an array of size max_item, otherwise adjusts it in place to be * 0 <= *index < max_item, and returns 0. diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index 8ce7bb74d..9450766ab 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -802,7 +802,7 @@ PyArray_PyIntAsIntp(PyObject *o) PyErr_Clear(); } - /* + /* * For backward compatibility check the number C-Api number protcol * This should be removed up the finish label after deprecation. */ @@ -1127,7 +1127,7 @@ PyArray_TypestrConvert(int itemsize, int gentype) } break; } - + /* * Raise deprecate warning if new type hasn't been * set yet and size char is invalid. diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 19ba5d327..e327db024 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -738,9 +738,9 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, /* An aligned memory buffer large enough to hold any type */ npy_longlong value[4]; - /* - * If the two dtypes are actually references to the same object - * or if casting type is forced unsafe then always OK. + /* + * If the two dtypes are actually references to the same object + * or if casting type is forced unsafe then always OK. */ if (scal_type == to || casting == NPY_UNSAFE_CASTING ) { return 1; diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 9e84d9a32..07de463f7 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1805,8 +1805,8 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) oldtype = PyArray_DESCR(arr); if (newtype == NULL) { - /* - * Check if object is of array with Null newtype. + /* + * Check if object is of array with Null newtype. * If so return it directly instead of checking for casting. */ if (flags == 0) { diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index 3923709ed..aeff950c6 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -4245,4 +4245,3 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape, *out_ndim = ndim; return 0; } - diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c index 6e06103a8..7f56ddb03 100644 --- a/numpy/core/src/multiarray/flagsobject.c +++ b/numpy/core/src/multiarray/flagsobject.c @@ -100,7 +100,7 @@ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) * * strides[0] == itemsize * strides[i] == shape[i - 1] * strides[i - 1] - * + * * According to these rules, a 0- or 1-dimensional array is either both * C- and F-contiguous, or neither; and an array with 2+ dimensions * can be C- or F- contiguous, or neither, but not both. Though there diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 4fbdd5e54..b7fed6de7 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -975,8 +975,8 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) "Cannot delete iterator elements"); return -1; } - - if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) + + if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) return -1; if (ind == Py_Ellipsis) { @@ -1158,9 +1158,9 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op)) * 2) underlying array is not contiguous * -- make new 1-d contiguous array with updateifcopy flag set * to copy back to the old array - * + * * If underlying array is readonly, then we make the output array readonly - * and updateifcopy does not apply. + * and updateifcopy does not apply. */ size = PyArray_SIZE(it->ao); Py_INCREF(PyArray_DESCR(it->ao)); diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 22a4d8c97..3cdde9afe 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -987,7 +987,7 @@ array_subscript_fancy(PyArrayObject *self, PyObject *op, int fancy) int oned; PyObject *other; PyArrayMapIterObject *mit; - + oned = ((PyArray_NDIM(self) == 1) && !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1)); @@ -1030,7 +1030,7 @@ array_subscript_fromobject(PyArrayObject *self, PyObject *op) { int fancy; npy_intp vals[NPY_MAXDIMS]; - + /* Integer index */ if (PyArray_IsIntegerScalar(op) || (PyIndex_Check(op) && !PySequence_Check(op))) { @@ -1212,7 +1212,7 @@ array_subscript(PyArrayObject *self, PyObject *op) "0-dimensional arrays can't be indexed"); return NULL; } - + else { fancy = fancy_indexing_check(op); if (fancy != SOBJ_NOTFANCY) { @@ -1226,8 +1226,8 @@ array_subscript(PyArrayObject *self, PyObject *op) if (ret == NULL) { return NULL; } - - if (PyArray_Check(ret) && PyArray_NDIM((PyArrayObject *)ret) == 0 + + if (PyArray_Check(ret) && PyArray_NDIM((PyArrayObject *)ret) == 0 && !_check_ellipses(op)) { return PyArray_Return((PyArrayObject *)ret); } @@ -2002,7 +2002,7 @@ PyArray_MapIterNew(PyObject *indexobj, int oned, int fancy) */ /* convert all inputs to iterators */ - if (PyArray_Check(indexobj) && PyArray_ISBOOL(indexobj) + if (PyArray_Check(indexobj) && PyArray_ISBOOL(indexobj) && !PyArray_IsZeroDim(indexobj)) { mit->numiter = _nonzero_indices(indexobj, mit->iters); if (mit->numiter < 0) { @@ -2124,10 +2124,10 @@ PyArray_MapIterArray(PyArrayObject * a, PyObject * index) int fancy = fancy_indexing_check(index); /* - * MapIterNew supports a special mode that allows more efficient 1-d iteration, - * but clients that want to make use of this need to use a different API just + * MapIterNew supports a special mode that allows more efficient 1-d iteration, + * but clients that want to make use of this need to use a different API just * for the one-d cases. For the public interface this is confusing, so we - * unconditionally disable the 1-d optimized mode, and use the generic + * unconditionally disable the 1-d optimized mode, and use the generic * implementation in all cases. */ int oned = 0; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index e4858eef7..45e322973 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1992,7 +1992,7 @@ array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject *numpycore = NULL; char * kwords[] = {"b", "out", NULL }; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwords, &b, &out)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwords, &b, &out)) { return NULL; } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index dd1f3c7d7..9fe406c72 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -488,7 +488,7 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, for (iarrays = 0; iarrays < narrays; ++iarrays) { shape += sizes[iarrays] = PyArray_SIZE(arrays[iarrays]); /* Check for overflow */ - if (shape < 0) { + if (shape < 0) { PyErr_SetString(PyExc_ValueError, "total number of elements " "too large to concatenate"); diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index f9b35deed..f64b7e9f7 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -3051,7 +3051,7 @@ void_arrtype_hash(PyObject *obj) /* Cannot hash mutable void scalars */ if (p->flags & NPY_ARRAY_WRITEABLE) { PyErr_SetString(PyExc_TypeError, "unhashable type: 'writeable void-scalar'"); - return -1; + return -1; } len = voidtype_length(p); for (n=0; n < len; n++) { diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 67ee4b04b..41c670df4 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -361,7 +361,7 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, int oi, oj, ok, ni, nj, nk; oldnd = 0; - /* + /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c index 883830795..34ac64287 100644 --- a/numpy/core/src/npymath/halffloat.c +++ b/numpy/core/src/npymath/halffloat.c @@ -527,4 +527,3 @@ npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) return d_sgn + (((npy_uint64)(h&0x7fffu) + 0xfc000u) << 42); } } - diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src index 90bbf5fb6..2aba8be8f 100644 --- a/numpy/core/src/npymath/ieee754.c.src +++ b/numpy/core/src/npymath/ieee754.c.src @@ -21,7 +21,7 @@ double npy_copysign(double x, double y) /* The below code is provided for compilers which do not yet provide C11 compatibility (gcc 4.5 and older) */ -#ifndef LDBL_TRUE_MIN +#ifndef LDBL_TRUE_MIN #define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ #endif @@ -678,4 +678,3 @@ void npy_set_floatstatus_invalid(void) } #endif - diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src index a41aa87f1..b11753367 100644 --- a/numpy/core/src/npysort/selection.c.src +++ b/numpy/core/src/npysort/selection.c.src @@ -1,7 +1,7 @@ /* -*- c -*- */ /* - * + * * The code is loosely based on the quickselect from * Nicolas Devillard - 1998 public domain * http://ndevilla.free.fr/median/median/ diff --git a/numpy/core/src/private/npy_partition.h b/numpy/core/src/private/npy_partition.h index 9e1c8494a..f2b684d3a 100644 --- a/numpy/core/src/private/npy_partition.h +++ b/numpy/core/src/private/npy_partition.h @@ -556,4 +556,3 @@ get_argpartition_func(int type, NPY_SELECTKIND which) } #endif - diff --git a/numpy/core/src/scalarmathmodule.c.src b/numpy/core/src/scalarmathmodule.c.src index 307c7ddb0..d789a3dd4 100644 --- a/numpy/core/src/scalarmathmodule.c.src +++ b/numpy/core/src/scalarmathmodule.c.src @@ -500,7 +500,7 @@ half_ctype_remainder(npy_half a, npy_half b, npy_half *out) { static npy_@name@ (*_basic_@name@_pow)(@type@ a, @type@ b); static void -@name@_ctype_power(@type@ a, @type@ b, @type@ *out) +@name@_ctype_power(@type@ a, @type@ b, @type@ *out) { *out = _basic_@name@_pow(a, b); } diff --git a/numpy/core/src/umath/loops.h b/numpy/core/src/umath/loops.h index 7f49f1956..62890f370 100644 --- a/numpy/core/src/umath/loops.h +++ b/numpy/core/src/umath/loops.h @@ -2726,4 +2726,3 @@ OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED */ #endif - diff --git a/numpy/core/src/umath/operand_flag_tests.c.src b/numpy/core/src/umath/operand_flag_tests.c.src index 7eaf1fbfc..4fb428bfc 100644 --- a/numpy/core/src/umath/operand_flag_tests.c.src +++ b/numpy/core/src/umath/operand_flag_tests.c.src @@ -79,7 +79,7 @@ PyMODINIT_FUNC initoperand_flag_tests(void) PyUFunc_None, "inplace_add", "inplace_add_docstring", 0); - /* + /* * Set flags to turn off buffering for first input operand, * so that result can be written back to input operand. */ diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 3f2b94a4a..bb4c0f44e 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -250,7 +250,7 @@ check_nonreorderable_axes(int ndim, npy_bool *axis_flags, const char *funcname) * PyArray_AssignOne(result, NULL, NULL), because this function raises an * exception when there are no elements to reduce (which appropriate iff the * reduction operation has no identity). - * + * * This means it copies the subarray indexed at zero along each reduction axis * into 'result', then returns a view into 'operand' excluding those copied * elements. diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 6a16692b0..36dbf6569 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -1613,7 +1613,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, for (i = 0; i < nop; ++i) { int type_num; - + /* no more ufunc arguments to check */ if (op[i] == NULL) { break; @@ -2129,5 +2129,3 @@ type_tuple_type_resolver(PyUFuncObject *self, return -1; } - - |