summaryrefslogtreecommitdiff
path: root/numpy/core/include/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/core/include/numpy')
-rw-r--r--numpy/core/include/numpy/_dtype_api.h408
-rw-r--r--numpy/core/include/numpy/_neighborhood_iterator_imp.h10
-rw-r--r--numpy/core/include/numpy/_numpyconfig.h.in32
-rw-r--r--numpy/core/include/numpy/arrayscalars.h4
-rw-r--r--numpy/core/include/numpy/experimental_dtype_api.h163
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h4
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h77
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h86
-rw-r--r--numpy/core/include/numpy/npy_common.h61
-rw-r--r--numpy/core/include/numpy/npy_cpu.h2
-rw-r--r--numpy/core/include/numpy/npy_math.h70
-rw-r--r--numpy/core/include/numpy/numpyconfig.h93
-rw-r--r--numpy/core/include/numpy/random/distributions.h2
-rw-r--r--numpy/core/include/numpy/ufuncobject.h6
14 files changed, 654 insertions, 364 deletions
diff --git a/numpy/core/include/numpy/_dtype_api.h b/numpy/core/include/numpy/_dtype_api.h
new file mode 100644
index 000000000..c397699c7
--- /dev/null
+++ b/numpy/core/include/numpy/_dtype_api.h
@@ -0,0 +1,408 @@
+/*
+ * DType related API shared by the (experimental) public API And internal API.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
+
+#define __EXPERIMENTAL_DTYPE_API_VERSION 10
+
+struct PyArrayMethodObject_tag;
+
+/*
+ * Largely opaque struct for DType classes (i.e. metaclass instances).
+ * The internal definition is currently in `ndarraytypes.h` (export is a bit
+ * more complex because `PyArray_Descr` is a DTypeMeta internally but not
+ * externally).
+ */
+#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
+
+ typedef struct PyArray_DTypeMeta_tag {
+ PyHeapTypeObject super;
+
+ /*
+ * Most DTypes will have a singleton default instance, for the
+ * parametric legacy DTypes (bytes, string, void, datetime) this
+ * may be a pointer to the *prototype* instance?
+ */
+ PyArray_Descr *singleton;
+ /* Copy of the legacy DTypes type number, usually invalid. */
+ int type_num;
+
+ /* The type object of the scalar instances (may be NULL?) */
+ PyTypeObject *scalar_type;
+ /*
+ * DType flags to signal legacy, parametric, or
+ * abstract. But plenty of space for additional information/flags.
+ */
+ npy_uint64 flags;
+
+ /*
+ * Use indirection in order to allow a fixed size for this struct.
+ * A stable ABI size makes creating a static DType less painful
+ * while also ensuring flexibility for all opaque API (with one
+ * indirection due the pointer lookup).
+ */
+ void *dt_slots;
+ /* Allow growing (at the moment also beyond this) */
+ void *reserved[3];
+ } PyArray_DTypeMeta;
+
+#endif /* not internal build */
+
+/*
+ * ******************************************************
+ * ArrayMethod API (Casting and UFuncs)
+ * ******************************************************
+ */
+/*
+ * NOTE: Expected changes:
+ * * probably split runtime and general flags into two
+ * * should possibly not use an enum for typedef for more stable ABI?
+ */
+typedef enum {
+ /* Flag for whether the GIL is required */
+ NPY_METH_REQUIRES_PYAPI = 1 << 0,
+ /*
+ * Some functions cannot set floating point error flags, this flag
+ * gives us the option (not requirement) to skip floating point error
+ * setup/check. No function should set error flags and ignore them
+ * since it would interfere with chaining operations (e.g. casting).
+ */
+ NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1,
+ /* Whether the method supports unaligned access (not runtime) */
+ NPY_METH_SUPPORTS_UNALIGNED = 1 << 2,
+ /*
+ * Used for reductions to allow reordering the operation. At this point
+ * assume that if set, it also applies to normal operations though!
+ */
+ NPY_METH_IS_REORDERABLE = 1 << 3,
+ /*
+ * Private flag for now for *logic* functions. The logical functions
+ * `logical_or` and `logical_and` can always cast the inputs to booleans
+ * "safely" (because that is how the cast to bool is defined).
+ * @seberg: I am not sure this is the best way to handle this, so its
+ * private for now (also it is very limited anyway).
+ * There is one "exception". NA aware dtypes cannot cast to bool
+ * (hopefully), so the `??->?` loop should error even with this flag.
+ * But a second NA fallback loop will be necessary.
+ */
+ _NPY_METH_FORCE_CAST_INPUTS = 1 << 17,
+
+ /* All flags which can change at runtime */
+ NPY_METH_RUNTIME_FLAGS = (
+ NPY_METH_REQUIRES_PYAPI |
+ NPY_METH_NO_FLOATINGPOINT_ERRORS),
+} NPY_ARRAYMETHOD_FLAGS;
+
+
+typedef struct PyArrayMethod_Context_tag {
+ /* The caller, which is typically the original ufunc. May be NULL */
+ PyObject *caller;
+ /* The method "self". Publically currentl an opaque object. */
+ struct PyArrayMethodObject_tag *method;
+
+ /* Operand descriptors, filled in by resolve_descriptors */
+ PyArray_Descr **descriptors;
+ /* Structure may grow (this is harmless for DType authors) */
+} PyArrayMethod_Context;
+
+
+/*
+ * The main object for creating a new ArrayMethod. We use the typical `slots`
+ * mechanism used by the Python limited API (see below for the slot defs).
+ */
+typedef struct {
+ const char *name;
+ int nin, nout;
+ NPY_CASTING casting;
+ NPY_ARRAYMETHOD_FLAGS flags;
+ PyArray_DTypeMeta **dtypes;
+ PyType_Slot *slots;
+} PyArrayMethod_Spec;
+
+
+/*
+ * ArrayMethod slots
+ * -----------------
+ *
+ * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed
+ * but can be deprecated and arbitrarily extended.
+ */
+#define NPY_METH_resolve_descriptors 1
+/* We may want to adapt the `get_loop` signature a bit: */
+#define _NPY_METH_get_loop 2
+#define NPY_METH_get_reduction_initial 3
+/* specific loops for constructions/default get_loop: */
+#define NPY_METH_strided_loop 4
+#define NPY_METH_contiguous_loop 5
+#define NPY_METH_unaligned_strided_loop 6
+#define NPY_METH_unaligned_contiguous_loop 7
+#define NPY_METH_contiguous_indexed_loop 8
+
+/*
+ * The resolve descriptors function, must be able to handle NULL values for
+ * all output (but not input) `given_descrs` and fill `loop_descrs`.
+ * Return -1 on error or 0 if the operation is not possible without an error
+ * set. (This may still be in flux.)
+ * Otherwise must return the "casting safety", for normal functions, this is
+ * almost always "safe" (or even "equivalent"?).
+ *
+ * `resolve_descriptors` is optional if all output DTypes are non-parametric.
+ */
+typedef NPY_CASTING (resolve_descriptors_function)(
+ /* "method" is currently opaque (necessary e.g. to wrap Python) */
+ struct PyArrayMethodObject_tag *method,
+ /* DTypes the method was created for */
+ PyArray_DTypeMeta **dtypes,
+ /* Input descriptors (instances). Outputs may be NULL. */
+ PyArray_Descr **given_descrs,
+ /* Exact loop descriptors to use, must not hold references on error */
+ PyArray_Descr **loop_descrs,
+ npy_intp *view_offset);
+
+
+typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,
+ char *const *data, const npy_intp *dimensions, const npy_intp *strides,
+ NpyAuxData *transferdata);
+
+
+typedef int (get_loop_function)(
+ PyArrayMethod_Context *context,
+ int aligned, int move_references,
+ const npy_intp *strides,
+ PyArrayMethod_StridedLoop **out_loop,
+ NpyAuxData **out_transferdata,
+ NPY_ARRAYMETHOD_FLAGS *flags);
+
+/**
+ * Query an ArrayMethod for the initial value for use in reduction.
+ *
+ * @param context The arraymethod context, mainly to access the descriptors.
+ * @param reduction_is_empty Whether the reduction is empty. When it is, the
+ * value returned may differ. In this case it is a "default" value that
+ * may differ from the "identity" value normally used. For example:
+ * - `0.0` is the default for `sum([])`. But `-0.0` is the correct
+ * identity otherwise as it preserves the sign for `sum([-0.0])`.
+ * - We use no identity for object, but return the default of `0` and `1`
+ * for the empty `sum([], dtype=object)` and `prod([], dtype=object)`.
+ * This allows `np.sum(np.array(["a", "b"], dtype=object))` to work.
+ * - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN`
+ * not a good *default* when there are no items.
+ * @param initial Pointer to initial data to be filled (if possible)
+ *
+ * @returns -1, 0, or 1 indicating error, no initial value, and initial being
+ * successfully filled. Errors must not be given where 0 is correct, NumPy
+ * may call this even when not strictly necessary.
+ */
+typedef int (get_reduction_initial_function)(
+ PyArrayMethod_Context *context, npy_bool reduction_is_empty,
+ char *initial);
+
+/*
+ * The following functions are only used by the wrapping array method defined
+ * in umath/wrapping_array_method.c
+ */
+
+/*
+ * The function to convert the given descriptors (passed in to
+ * `resolve_descriptors`) and translates them for the wrapped loop.
+ * The new descriptors MUST be viewable with the old ones, `NULL` must be
+ * supported (for outputs) and should normally be forwarded.
+ *
+ * The function must clean up on error.
+ *
+ * NOTE: We currently assume that this translation gives "viewable" results.
+ * I.e. there is no additional casting related to the wrapping process.
+ * In principle that could be supported, but not sure it is useful.
+ * This currently also means that e.g. alignment must apply identically
+ * to the new dtypes.
+ *
+ * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast`
+ * there is no way to "pass out" the result of this function. This means
+ * it will be called twice for every ufunc call.
+ * (I am considering including `auxdata` as an "optional" parameter to
+ * `resolve_descriptors`, so that it can be filled there if not NULL.)
+ */
+typedef int translate_given_descrs_func(int nin, int nout,
+ PyArray_DTypeMeta *wrapped_dtypes[],
+ PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]);
+
+/**
+ * The function to convert the actual loop descriptors (as returned by the
+ * original `resolve_descriptors` function) to the ones the output array
+ * should use.
+ * This function must return "viewable" types, it must not mutate them in any
+ * form that would break the inner-loop logic. Does not need to support NULL.
+ *
+ * The function must clean up on error.
+ *
+ * @param nargs Number of arguments
+ * @param new_dtypes The DTypes of the output (usually probably not needed)
+ * @param given_descrs Original given_descrs to the resolver, necessary to
+ * fetch any information related to the new dtypes from the original.
+ * @param original_descrs The `loop_descrs` returned by the wrapped loop.
+ * @param loop_descrs The output descriptors, compatible to `original_descrs`.
+ *
+ * @returns 0 on success, -1 on failure.
+ */
+typedef int translate_loop_descrs_func(int nin, int nout,
+ PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[],
+ PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]);
+
+
+/*
+ * A traverse loop working on a single array. This is similar to the general
+ * strided-loop function. This is designed for loops that need to visit every
+ * element of a single array.
+ *
+ * Currently this is used for array clearing, via the NPY_DT_get_clear_loop
+ * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook.
+ * These are most useful for handling arrays storing embedded references to
+ * python objects or heap-allocated data.
+ *
+ * The `void *traverse_context` is passed in because we may need to pass in
+ * Intepreter state or similar in the future, but we don't want to pass in
+ * a full context (with pointers to dtypes, method, caller which all make
+ * no sense for a traverse function).
+ *
+ * We assume for now that this context can be just passed through in the
+ * the future (for structured dtypes).
+ *
+ */
+typedef int (traverse_loop_function)(
+ void *traverse_context, PyArray_Descr *descr, char *data,
+ npy_intp size, npy_intp stride, NpyAuxData *auxdata);
+
+
+/*
+ * Simplified get_loop function specific to dtype traversal
+ *
+ * It should set the flags needed for the traversal loop and set out_loop to the
+ * loop function, which must be a valid traverse_loop_function
+ * pointer. Currently this is used for zero-filling and clearing arrays storing
+ * embedded references.
+ *
+ */
+typedef int (get_traverse_loop_function)(
+ void *traverse_context, PyArray_Descr *descr,
+ int aligned, npy_intp fixed_stride,
+ traverse_loop_function **out_loop, NpyAuxData **out_auxdata,
+ NPY_ARRAYMETHOD_FLAGS *flags);
+
+
+/*
+ * ****************************
+ * DTYPE API
+ * ****************************
+ */
+
+#define NPY_DT_ABSTRACT 1 << 1
+#define NPY_DT_PARAMETRIC 1 << 2
+#define NPY_DT_NUMERIC 1 << 3
+
+/*
+ * These correspond to slots in the NPY_DType_Slots struct and must
+ * be in the same order as the members of that struct. If new slots
+ * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also
+ * be updated
+ */
+
+#define NPY_DT_discover_descr_from_pyobject 1
+// this slot is considered private because its API hasn't beed decided
+#define _NPY_DT_is_known_scalar_type 2
+#define NPY_DT_default_descr 3
+#define NPY_DT_common_dtype 4
+#define NPY_DT_common_instance 5
+#define NPY_DT_ensure_canonical 6
+#define NPY_DT_setitem 7
+#define NPY_DT_getitem 8
+#define NPY_DT_get_clear_loop 9
+#define NPY_DT_get_fill_zero_loop 10
+
+// These PyArray_ArrFunc slots will be deprecated and replaced eventually
+// getitem and setitem can be defined as a performance optimization;
+// by default the user dtypes call `legacy_getitem_using_DType` and
+// `legacy_setitem_using_DType`, respectively. This functionality is
+// only supported for basic NumPy DTypes.
+
+
+// used to separate dtype slots from arrfuncs slots
+// intended only for internal use but defined here for clarity
+#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10)
+
+// Cast is disabled
+// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET
+
+#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET
+
+#define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET
+
+// Casting related slots are disabled. See
+// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163
+// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET
+
+// These are deprecated in NumPy 1.19, so are disabled here.
+// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET
+
+// TODO: These slots probably still need some thought, and/or a way to "grow"?
+typedef struct {
+ PyTypeObject *typeobj; /* type of python scalar or NULL */
+ int flags; /* flags, including parametric and abstract */
+ /* NULL terminated cast definitions. Use NULL for the newly created DType */
+ PyArrayMethod_Spec **casts;
+ PyType_Slot *slots;
+ /* Baseclass or NULL (will always subclass `np.dtype`) */
+ PyTypeObject *baseclass;
+} PyArrayDTypeMeta_Spec;
+
+
+typedef PyArray_Descr *(discover_descr_from_pyobject_function)(
+ PyArray_DTypeMeta *cls, PyObject *obj);
+
+/*
+ * Before making this public, we should decide whether it should pass
+ * the type, or allow looking at the object. A possible use-case:
+ * `np.array(np.array([0]), dtype=np.ndarray)`
+ * Could consider arrays that are not `dtype=ndarray` "scalars".
+ */
+typedef int (is_known_scalar_type_function)(
+ PyArray_DTypeMeta *cls, PyTypeObject *obj);
+
+typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls);
+typedef PyArray_DTypeMeta *(common_dtype_function)(
+ PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);
+typedef PyArray_Descr *(common_instance_function)(
+ PyArray_Descr *dtype1, PyArray_Descr *dtype2);
+typedef PyArray_Descr *(ensure_canonical_function)(PyArray_Descr *dtype);
+
+/*
+ * TODO: These two functions are currently only used for experimental DType
+ * API support. Their relation should be "reversed": NumPy should
+ * always use them internally.
+ * There are open points about "casting safety" though, e.g. setting
+ * elements is currently always unsafe.
+ */
+typedef int(setitemfunction)(PyArray_Descr *, PyObject *, char *);
+typedef PyObject *(getitemfunction)(PyArray_Descr *, char *);
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */
diff --git a/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/numpy/core/include/numpy/_neighborhood_iterator_imp.h
index 07e2363d0..b365cb508 100644
--- a/numpy/core/include/numpy/_neighborhood_iterator_imp.h
+++ b/numpy/core/include/numpy/_neighborhood_iterator_imp.h
@@ -4,7 +4,7 @@
/*
* Private API (here for inline)
*/
-static NPY_INLINE int
+static inline int
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
/*
@@ -34,7 +34,7 @@ _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
iter->coordinates[c] = iter->bounds[c][0]; \
}
-static NPY_INLINE int
+static inline int
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
{
npy_intp i, wb;
@@ -49,7 +49,7 @@ _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
/*
* Version optimized for 2d arrays, manual loop unrolling
*/
-static NPY_INLINE int
+static inline int
_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
{
npy_intp wb;
@@ -64,7 +64,7 @@ _PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
/*
* Advance to the next neighbour
*/
-static NPY_INLINE int
+static inline int
PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
{
_PyArrayNeighborhoodIter_IncrCoord (iter);
@@ -76,7 +76,7 @@ PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
/*
* Reset functions
*/
-static NPY_INLINE int
+static inline int
PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
{
npy_intp i;
diff --git a/numpy/core/include/numpy/_numpyconfig.h.in b/numpy/core/include/numpy/_numpyconfig.h.in
new file mode 100644
index 000000000..a21820026
--- /dev/null
+++ b/numpy/core/include/numpy/_numpyconfig.h.in
@@ -0,0 +1,32 @@
+#mesondefine NPY_HAVE_ENDIAN_H
+
+#mesondefine NPY_SIZEOF_SHORT
+#mesondefine NPY_SIZEOF_INT
+#mesondefine NPY_SIZEOF_LONG
+#mesondefine NPY_SIZEOF_FLOAT
+#mesondefine NPY_SIZEOF_COMPLEX_FLOAT
+#mesondefine NPY_SIZEOF_DOUBLE
+#mesondefine NPY_SIZEOF_COMPLEX_DOUBLE
+#mesondefine NPY_SIZEOF_LONGDOUBLE
+#mesondefine NPY_SIZEOF_COMPLEX_LONGDOUBLE
+#mesondefine NPY_SIZEOF_PY_INTPTR_T
+#mesondefine NPY_SIZEOF_OFF_T
+#mesondefine NPY_SIZEOF_PY_LONG_LONG
+#mesondefine NPY_SIZEOF_LONGLONG
+
+#mesondefine NPY_USE_C99_COMPLEX
+#mesondefine NPY_HAVE_COMPLEX_DOUBLE
+#mesondefine NPY_HAVE_COMPLEX_FLOAT
+#mesondefine NPY_HAVE_COMPLEX_LONG_DOUBLE
+#mesondefine NPY_USE_C99_FORMATS
+
+#mesondefine NPY_NO_SIGNAL
+#mesondefine NPY_NO_SMP
+
+#mesondefine NPY_VISIBILITY_HIDDEN
+#mesondefine NPY_ABI_VERSION
+#mesondefine NPY_API_VERSION
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS 1
+#endif
diff --git a/numpy/core/include/numpy/arrayscalars.h b/numpy/core/include/numpy/arrayscalars.h
index a20a68016..258bf95b6 100644
--- a/numpy/core/include/numpy/arrayscalars.h
+++ b/numpy/core/include/numpy/arrayscalars.h
@@ -139,7 +139,9 @@ typedef struct {
/* note that the PyObject_HEAD macro lives right here */
PyUnicodeObject base;
Py_UCS4 *obval;
+ #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
char *buffer_fmt;
+ #endif
} PyUnicodeScalarObject;
@@ -149,7 +151,9 @@ typedef struct {
PyArray_Descr *descr;
int flags;
PyObject *base;
+ #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
void *_buffer_info; /* private buffer info, tagged to allow warning */
+ #endif
} PyVoidScalarObject;
/* Macros
diff --git a/numpy/core/include/numpy/experimental_dtype_api.h b/numpy/core/include/numpy/experimental_dtype_api.h
index 1fbd41981..120d33324 100644
--- a/numpy/core/include/numpy/experimental_dtype_api.h
+++ b/numpy/core/include/numpy/experimental_dtype_api.h
@@ -3,6 +3,9 @@
* NEPs 41 to 43. For background, please check these NEPs. Otherwise,
* this header also serves as documentation for the time being.
*
+ * The header includes `_dtype_api.h` which holds most definition while this
+ * header mainly wraps functions for public consumption.
+ *
* Please do not hesitate to contact @seberg with questions. This is
* developed together with https://github.com/seberg/experimental_user_dtypes
* and those interested in experimenting are encouraged to contribute there.
@@ -114,7 +117,13 @@
#include <Python.h>
#include "ndarraytypes.h"
+#include "_dtype_api.h"
+/*
+ * The contents of PyArrayMethodObject are currently opaque (is there a way
+ * good way to make them be `PyObject *`?)
+ */
+typedef struct PyArrayMethodObject_tag PyArrayMethodObject;
/*
* There must be a better way?! -- Oh well, this is experimental
@@ -154,66 +163,6 @@
#endif
-/*
- * DTypeMeta struct, the content may be made fully opaque (except the size).
- * We may also move everything into a single `void *dt_slots`.
- */
-typedef struct {
- PyHeapTypeObject super;
- PyArray_Descr *singleton;
- int type_num;
- PyTypeObject *scalar_type;
- npy_uint64 flags;
- void *dt_slots;
- void *reserved[3];
-} PyArray_DTypeMeta;
-
-
-/*
- * ******************************************************
- * ArrayMethod API (Casting and UFuncs)
- * ******************************************************
- */
-/*
- * NOTE: Expected changes:
- * * invert logic of floating point error flag
- * * probably split runtime and general flags into two
- * * should possibly not use an enum for typedef for more stable ABI?
- */
-typedef enum {
- /* Flag for whether the GIL is required */
- NPY_METH_REQUIRES_PYAPI = 1 << 1,
- /*
- * Some functions cannot set floating point error flags, this flag
- * gives us the option (not requirement) to skip floating point error
- * setup/check. No function should set error flags and ignore them
- * since it would interfere with chaining operations (e.g. casting).
- */
- NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 2,
- /* Whether the method supports unaligned access (not runtime) */
- NPY_METH_SUPPORTS_UNALIGNED = 1 << 3,
-
- /* All flags which can change at runtime */
- NPY_METH_RUNTIME_FLAGS = (
- NPY_METH_REQUIRES_PYAPI |
- NPY_METH_NO_FLOATINGPOINT_ERRORS),
-} NPY_ARRAYMETHOD_FLAGS;
-
-
-/*
- * The main object for creating a new ArrayMethod. We use the typical `slots`
- * mechanism used by the Python limited API (see below for the slot defs).
- */
-typedef struct {
- const char *name;
- int nin, nout;
- NPY_CASTING casting;
- NPY_ARRAYMETHOD_FLAGS flags;
- PyArray_DTypeMeta **dtypes;
- PyType_Slot *slots;
-} PyArrayMethod_Spec;
-
-
typedef int _ufunc_addloop_fromspec_func(
PyObject *ufunc, PyArrayMethod_Spec *spec);
/*
@@ -267,92 +216,6 @@ typedef int _ufunc_addpromoter_func(
#define PyUFunc_AddPromoter \
(*(_ufunc_addpromoter_func *)(__experimental_dtype_api_table[1]))
-
-/*
- * The resolve descriptors function, must be able to handle NULL values for
- * all output (but not input) `given_descrs` and fill `loop_descrs`.
- * Return -1 on error or 0 if the operation is not possible without an error
- * set. (This may still be in flux.)
- * Otherwise must return the "casting safety", for normal functions, this is
- * almost always "safe" (or even "equivalent"?).
- *
- * `resolve_descriptors` is optional if all output DTypes are non-parametric.
- */
-#define NPY_METH_resolve_descriptors 1
-typedef NPY_CASTING (resolve_descriptors_function)(
- /* "method" is currently opaque (necessary e.g. to wrap Python) */
- PyObject *method,
- /* DTypes the method was created for */
- PyObject **dtypes,
- /* Input descriptors (instances). Outputs may be NULL. */
- PyArray_Descr **given_descrs,
- /* Exact loop descriptors to use, must not hold references on error */
- PyArray_Descr **loop_descrs,
- npy_intp *view_offset);
-
-/* NOT public yet: Signature needs adapting as external API. */
-#define _NPY_METH_get_loop 2
-
-/*
- * Current public API to define fast inner-loops. You must provide a
- * strided loop. If this is a cast between two "versions" of the same dtype
- * you must also provide an unaligned strided loop.
- * Other loops are useful to optimize the very common contiguous case.
- *
- * NOTE: As of now, NumPy will NOT use unaligned loops in ufuncs!
- */
-#define NPY_METH_strided_loop 3
-#define NPY_METH_contiguous_loop 4
-#define NPY_METH_unaligned_strided_loop 5
-#define NPY_METH_unaligned_contiguous_loop 6
-
-
-typedef struct {
- PyObject *caller; /* E.g. the original ufunc, may be NULL */
- PyObject *method; /* The method "self". Currently an opaque object */
-
- /* Operand descriptors, filled in by resolve_descriptors */
- PyArray_Descr **descriptors;
- /* Structure may grow (this is harmless for DType authors) */
-} PyArrayMethod_Context;
-
-typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,
- char *const *data, const npy_intp *dimensions, const npy_intp *strides,
- NpyAuxData *transferdata);
-
-
-
-/*
- * ****************************
- * DTYPE API
- * ****************************
- */
-
-#define NPY_DT_ABSTRACT 1 << 1
-#define NPY_DT_PARAMETRIC 1 << 2
-
-#define NPY_DT_discover_descr_from_pyobject 1
-#define _NPY_DT_is_known_scalar_type 2
-#define NPY_DT_default_descr 3
-#define NPY_DT_common_dtype 4
-#define NPY_DT_common_instance 5
-#define NPY_DT_ensure_canonical 6
-#define NPY_DT_setitem 7
-#define NPY_DT_getitem 8
-
-
-// TODO: These slots probably still need some thought, and/or a way to "grow"?
-typedef struct{
- PyTypeObject *typeobj; /* type of python scalar or NULL */
- int flags; /* flags, including parametric and abstract */
- /* NULL terminated cast definitions. Use NULL for the newly created DType */
- PyArrayMethod_Spec **casts;
- PyType_Slot *slots;
- /* Baseclass or NULL (will always subclass `np.dtype`) */
- PyTypeObject *baseclass;
-} PyArrayDTypeMeta_Spec;
-
-
#define PyArrayDTypeMeta_Type \
(*(PyTypeObject *)__experimental_dtype_api_table[2])
typedef int __dtypemeta_fromspec(
@@ -392,7 +255,7 @@ typedef PyArray_Descr *__get_default_descr(
#define _PyArray_GetDefaultDescr \
((__get_default_descr *)(__experimental_dtype_api_table[6]))
-static NPY_INLINE PyArray_Descr *
+static inline PyArray_Descr *
PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType)
{
if (DType->singleton != NULL) {
@@ -458,16 +321,14 @@ PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType)
*/
#if !defined(NO_IMPORT) && !defined(NO_IMPORT_ARRAY)
-#define __EXPERIMENTAL_DTYPE_VERSION 5
-
static int
import_experimental_dtype_api(int version)
{
- if (version != __EXPERIMENTAL_DTYPE_VERSION) {
+ if (version != __EXPERIMENTAL_DTYPE_API_VERSION) {
PyErr_Format(PyExc_RuntimeError,
"DType API version %d did not match header version %d. Please "
"update the import statement and check for API changes.",
- version, __EXPERIMENTAL_DTYPE_VERSION);
+ version, __EXPERIMENTAL_DTYPE_API_VERSION);
return -1;
}
if (__experimental_dtype_api_table != __uninitialized_table) {
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index aaaefd7de..36cfdd6f6 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -152,7 +152,7 @@ extern "C" {
(k)*PyArray_STRIDES(obj)[2] + \
(l)*PyArray_STRIDES(obj)[3]))
-static NPY_INLINE void
+static inline void
PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
{
PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
@@ -214,7 +214,7 @@ PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
dict).
*/
-static NPY_INLINE int
+static inline int
NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
{
PyObject *title;
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index e894b08f6..742ba5261 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -44,23 +44,6 @@
#define NPY_FAIL 0
#define NPY_SUCCEED 1
-/*
- * Binary compatibility version number. This number is increased
- * whenever the C-API is changed such that binary compatibility is
- * broken, i.e. whenever a recompile of extension modules is needed.
- */
-#define NPY_VERSION NPY_ABI_VERSION
-
-/*
- * Minor API version. This number is increased whenever a change is
- * made to the C-API -- whether it breaks binary compatibility or not.
- * Some changes, such as adding a function pointer to the end of the
- * function table, can be made without breaking binary compatibility.
- * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION)
- * would be increased. Whenever binary compatibility is broken, both
- * NPY_VERSION and NPY_FEATURE_VERSION should be increased.
- */
-#define NPY_FEATURE_VERSION NPY_API_VERSION
enum NPY_TYPES { NPY_BOOL=0,
NPY_BYTE, NPY_UBYTE,
@@ -729,11 +712,15 @@ typedef struct tagPyArrayObject_fields {
int flags;
/* For weak references */
PyObject *weakreflist;
+#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
void *_buffer_info; /* private buffer info, tagged to allow warning */
+#endif
/*
* For malloc/calloc/realloc/free per object
*/
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
PyObject *mem_handler;
+#endif
} PyArrayObject_fields;
/*
@@ -1463,12 +1450,12 @@ typedef struct {
*/
/* General: those work for any mode */
-static NPY_INLINE int
+static inline int
PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter);
-static NPY_INLINE int
+static inline int
PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter);
#if 0
-static NPY_INLINE int
+static inline int
PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
#endif
@@ -1514,85 +1501,85 @@ PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
* ABI compatibility.
*/
-static NPY_INLINE int
+static inline int
PyArray_NDIM(const PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->nd;
}
-static NPY_INLINE void *
+static inline void *
PyArray_DATA(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->data;
}
-static NPY_INLINE char *
+static inline char *
PyArray_BYTES(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->data;
}
-static NPY_INLINE npy_intp *
+static inline npy_intp *
PyArray_DIMS(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->dimensions;
}
-static NPY_INLINE npy_intp *
+static inline npy_intp *
PyArray_STRIDES(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->strides;
}
-static NPY_INLINE npy_intp
+static inline npy_intp
PyArray_DIM(const PyArrayObject *arr, int idim)
{
return ((PyArrayObject_fields *)arr)->dimensions[idim];
}
-static NPY_INLINE npy_intp
+static inline npy_intp
PyArray_STRIDE(const PyArrayObject *arr, int istride)
{
return ((PyArrayObject_fields *)arr)->strides[istride];
}
-static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject *
+static inline NPY_RETURNS_BORROWED_REF PyObject *
PyArray_BASE(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->base;
}
-static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr *
+static inline NPY_RETURNS_BORROWED_REF PyArray_Descr *
PyArray_DESCR(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->descr;
}
-static NPY_INLINE int
+static inline int
PyArray_FLAGS(const PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->flags;
}
-static NPY_INLINE npy_intp
+static inline npy_intp
PyArray_ITEMSIZE(const PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->descr->elsize;
}
-static NPY_INLINE int
+static inline int
PyArray_TYPE(const PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->descr->type_num;
}
-static NPY_INLINE int
+static inline int
PyArray_CHKFLAGS(const PyArrayObject *arr, int flags)
{
return (PyArray_FLAGS(arr) & flags) == flags;
}
-static NPY_INLINE PyObject *
+static inline PyObject *
PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
{
return ((PyArrayObject_fields *)arr)->descr->f->getitem(
@@ -1604,7 +1591,7 @@ PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
* and of a type understood by the arrays dtype.
* Use `PyArray_Pack` if the value may be of a different dtype.
*/
-static NPY_INLINE int
+static inline int
PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
{
return ((PyArrayObject_fields *)arr)->descr->f->setitem(v, itemptr, arr);
@@ -1639,13 +1626,13 @@ PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
(PyArrayObject *)(obj))
#endif
-static NPY_INLINE PyArray_Descr *
+static inline PyArray_Descr *
PyArray_DTYPE(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->descr;
}
-static NPY_INLINE npy_intp *
+static inline npy_intp *
PyArray_SHAPE(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->dimensions;
@@ -1655,7 +1642,7 @@ PyArray_SHAPE(PyArrayObject *arr)
* Enables the specified array flags. Does no checking,
* assumes you know what you're doing.
*/
-static NPY_INLINE void
+static inline void
PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags)
{
((PyArrayObject_fields *)arr)->flags |= flags;
@@ -1665,17 +1652,19 @@ PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags)
* Clears the specified array flags. Does no checking,
* assumes you know what you're doing.
*/
-static NPY_INLINE void
+static inline void
PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
{
((PyArrayObject_fields *)arr)->flags &= ~flags;
}
-static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject *
-PyArray_HANDLER(PyArrayObject *arr)
-{
- return ((PyArrayObject_fields *)arr)->mem_handler;
-}
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+ static inline NPY_RETURNS_BORROWED_REF PyObject *
+ PyArray_HANDLER(PyArrayObject *arr)
+ {
+ return ((PyArrayObject_fields *)arr)->mem_handler;
+ }
+#endif
#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL)
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 11cc47765..62fde943a 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -36,7 +36,7 @@ extern "C" {
* included here because it is missing from the PyPy API. It completes the PyLong_As*
* group of functions and can be useful in replacing PyInt_Check.
*/
-static NPY_INLINE int
+static inline int
Npy__PyLong_AsInt(PyObject *obj)
{
int overflow;
@@ -56,7 +56,7 @@ Npy__PyLong_AsInt(PyObject *obj)
#if defined(NPY_PY3K)
/* Return True only if the long fits in a C long */
-static NPY_INLINE int PyInt_Check(PyObject *op) {
+static inline int PyInt_Check(PyObject *op) {
int overflow = 0;
if (!PyLong_Check(op)) {
return 0;
@@ -99,32 +99,6 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
-/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
-#if PY_VERSION_HEX < 0x03050200
- #define Py_SETREF(op, op2) \
- do { \
- PyObject *_py_tmp = (PyObject *)(op); \
- (op) = (op2); \
- Py_DECREF(_py_tmp); \
- } while (0)
-#endif
-
-/* introduced in https://github.com/python/cpython/commit/a24107b04c1277e3c1105f98aff5bfa3a98b33a0 */
-#if PY_VERSION_HEX < 0x030800A3
- static NPY_INLINE PyObject *
- _PyDict_GetItemStringWithError(PyObject *v, const char *key)
- {
- PyObject *kv, *rv;
- kv = PyUnicode_FromString(key);
- if (kv == NULL) {
- return NULL;
- }
- rv = PyDict_GetItemWithError(v, kv);
- Py_DECREF(kv);
- return rv;
- }
-#endif
-
/*
* PyString -> PyBytes
*/
@@ -193,15 +167,35 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
#endif /* NPY_PY3K */
+/*
+ * Macros to protect CRT calls against instant termination when passed an
+ * invalid parameter (https://bugs.python.org/issue23524).
+ */
+#if defined _MSC_VER && _MSC_VER >= 1900
+
+#include <stdlib.h>
+
+extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
+#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \
+ _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
+#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); }
+
+#else
+
+#define NPY_BEGIN_SUPPRESS_IPH
+#define NPY_END_SUPPRESS_IPH
+
+#endif /* _MSC_VER >= 1900 */
+
-static NPY_INLINE void
+static inline void
PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
{
Py_SETREF(*left, PyUnicode_Concat(*left, right));
Py_DECREF(right);
}
-static NPY_INLINE void
+static inline void
PyUnicode_Concat2(PyObject **left, PyObject *right)
{
Py_SETREF(*left, PyUnicode_Concat(*left, right));
@@ -214,7 +208,7 @@ PyUnicode_Concat2(PyObject **left, PyObject *right)
/*
* Get a FILE* handle to the file represented by the Python object
*/
-static NPY_INLINE FILE*
+static inline FILE*
npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
{
int fd, fd2, unbuf;
@@ -268,13 +262,17 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
/* Convert to FILE* handle */
#ifdef _WIN32
+ NPY_BEGIN_SUPPRESS_IPH
handle = _fdopen(fd2, mode);
+ NPY_END_SUPPRESS_IPH
#else
handle = fdopen(fd2, mode);
#endif
if (handle == NULL) {
PyErr_SetString(PyExc_IOError,
- "Getting a FILE* from a Python file object failed");
+ "Getting a FILE* from a Python file object via "
+ "_fdopen failed. If you built NumPy, you probably "
+ "linked with the wrong debug/release runtime");
return NULL;
}
@@ -330,7 +328,7 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
/*
* Close the dup-ed file handle, and seek the Python one to the current position
*/
-static NPY_INLINE int
+static inline int
npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
{
int fd, unbuf;
@@ -397,7 +395,7 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
return 0;
}
-static NPY_INLINE int
+static inline int
npy_PyFile_Check(PyObject *file)
{
int fd;
@@ -415,7 +413,7 @@ npy_PyFile_Check(PyObject *file)
return 1;
}
-static NPY_INLINE PyObject*
+static inline PyObject*
npy_PyFile_OpenFile(PyObject *filename, const char *mode)
{
PyObject *open;
@@ -426,7 +424,7 @@ npy_PyFile_OpenFile(PyObject *filename, const char *mode)
return PyObject_CallFunction(open, "Os", filename, mode);
}
-static NPY_INLINE int
+static inline int
npy_PyFile_CloseFile(PyObject *file)
{
PyObject *ret;
@@ -442,7 +440,7 @@ npy_PyFile_CloseFile(PyObject *file)
/* This is a copy of _PyErr_ChainExceptions
*/
-static NPY_INLINE void
+static inline void
npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
{
if (exc == NULL)
@@ -474,7 +472,7 @@ npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
* - a minimal implementation for python 2
* - __cause__ used instead of __context__
*/
-static NPY_INLINE void
+static inline void
npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
{
if (exc == NULL)
@@ -505,7 +503,7 @@ npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
* PyObject_Cmp
*/
#if defined(NPY_PY3K)
-static NPY_INLINE int
+static inline int
PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
{
int v;
@@ -545,7 +543,7 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
* The main job here is to get rid of the improved error handling
* of PyCapsules. It's a shame...
*/
-static NPY_INLINE PyObject *
+static inline PyObject *
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
{
PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
@@ -555,7 +553,7 @@ NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
return ret;
}
-static NPY_INLINE PyObject *
+static inline PyObject *
NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
{
PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
@@ -567,7 +565,7 @@ NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)
return ret;
}
-static NPY_INLINE void *
+static inline void *
NpyCapsule_AsVoidPtr(PyObject *obj)
{
void *ret = PyCapsule_GetPointer(obj, NULL);
@@ -577,13 +575,13 @@ NpyCapsule_AsVoidPtr(PyObject *obj)
return ret;
}
-static NPY_INLINE void *
+static inline void *
NpyCapsule_GetDesc(PyObject *obj)
{
return PyCapsule_GetContext(obj);
}
-static NPY_INLINE int
+static inline int
NpyCapsule_Check(PyObject *ptr)
{
return PyCapsule_CheckExact(ptr);
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 2bcc45e4f..728cedbf1 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -40,39 +40,6 @@
#define NPY_GCC_OPT_3
#endif
-/* compile target attributes */
-#if defined HAVE_ATTRIBUTE_TARGET_AVX && defined HAVE_LINK_AVX
-#define NPY_GCC_TARGET_AVX __attribute__((target("avx")))
-#else
-#define NPY_GCC_TARGET_AVX
-#endif
-
-#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
-#define HAVE_ATTRIBUTE_TARGET_FMA
-#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma")))
-#endif
-
-#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2
-#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
-#else
-#define NPY_GCC_TARGET_AVX2
-#endif
-
-#if defined HAVE_ATTRIBUTE_TARGET_AVX512F && defined HAVE_LINK_AVX512F
-#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f")))
-#elif defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS
-#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f")))
-#else
-#define NPY_GCC_TARGET_AVX512F
-#endif
-
-#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX && defined HAVE_LINK_AVX512_SKX
-#define NPY_GCC_TARGET_AVX512_SKX __attribute__((target("avx512f,avx512dq,avx512vl,avx512bw,avx512cd")))
-#elif defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS
-#define NPY_GCC_TARGET_AVX512_SKX __attribute__((target("avx512f,avx512dq,avx512vl,avx512bw,avx512cd")))
-#else
-#define NPY_GCC_TARGET_AVX512_SKX
-#endif
/*
* mark an argument (starting from 1) that must not be NULL and is not checked
* DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check
@@ -83,21 +50,6 @@
#define NPY_GCC_NONNULL(n)
#endif
-#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS
-#define NPY_HAVE_SSE_INTRINSICS
-#endif
-
-#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD
-#define NPY_HAVE_SSE2_INTRINSICS
-#endif
-
-#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX2
-#define NPY_HAVE_AVX2_INTRINSICS
-#endif
-
-#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX512F
-#define NPY_HAVE_AVX512F_INTRINSICS
-#endif
/*
* give a hint to the compiler which branch is more likely or unlikely
* to occur, e.g. rare error cases:
@@ -120,7 +72,7 @@
/* unlike _mm_prefetch also works on non-x86 */
#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))
#else
-#ifdef HAVE__MM_PREFETCH
+#ifdef NPY_HAVE_SSE
/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */
#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \
(loc == 1 ? _MM_HINT_T2 : \
@@ -131,6 +83,7 @@
#endif
#endif
+/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */
#if defined(_MSC_VER) && !defined(__clang__)
#define NPY_INLINE __inline
/* clang included here to handle clang-cl on Windows */
@@ -147,11 +100,19 @@
#ifdef _MSC_VER
#define NPY_FINLINE static __forceinline
#elif defined(__GNUC__)
- #define NPY_FINLINE static NPY_INLINE __attribute__((always_inline))
+ #define NPY_FINLINE static inline __attribute__((always_inline))
#else
#define NPY_FINLINE static
#endif
+#if defined(_MSC_VER)
+ #define NPY_NOINLINE static __declspec(noinline)
+#elif defined(__GNUC__) || defined(__clang__)
+ #define NPY_NOINLINE static __attribute__((noinline))
+#else
+ #define NPY_NOINLINE static
+#endif
+
#ifdef HAVE___THREAD
#define NPY_TLS __thread
#else
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 78d229e7d..a19f8e6bb 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -77,7 +77,7 @@
#elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
#if defined(__ARM_32BIT_STATE)
#define NPY_CPU_ARMEL_AARCH32
- #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64)
+ #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
#define NPY_CPU_ARMEL_AARCH64
#else
#define NPY_CPU_ARMEL
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index 27e1ddad0..2fcd41eb0 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -8,7 +8,7 @@
/* By adding static inline specifiers to npy_math function definitions when
appropriate, compiler is given the opportunity to optimize */
#if NPY_INLINE_MATH
-#define NPY_INPLACE NPY_INLINE static
+#define NPY_INPLACE static inline
#else
#define NPY_INPLACE
#endif
@@ -24,25 +24,25 @@ extern "C" {
*
* XXX: I should test whether INFINITY and NAN are available on the platform
*/
-NPY_INLINE static float __npy_inff(void)
+static inline float __npy_inff(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
return __bint.__f;
}
-NPY_INLINE static float __npy_nanf(void)
+static inline float __npy_nanf(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
return __bint.__f;
}
-NPY_INLINE static float __npy_pzerof(void)
+static inline float __npy_pzerof(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
return __bint.__f;
}
-NPY_INLINE static float __npy_nzerof(void)
+static inline float __npy_nzerof(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
return __bint.__f;
@@ -198,12 +198,12 @@ NPY_INPLACE double npy_atan2(double x, double y);
#define npy_sqrt sqrt
#define npy_pow pow
#define npy_modf modf
+#define npy_nextafter nextafter
-double npy_nextafter(double x, double y);
double npy_spacing(double x);
/*
- * IEEE 754 fpu handling. Those are guaranteed to be macros
+ * IEEE 754 fpu handling
*/
/* use builtins to avoid function calls in tight loops
@@ -211,11 +211,7 @@ double npy_spacing(double x);
#ifdef HAVE___BUILTIN_ISNAN
#define npy_isnan(x) __builtin_isnan(x)
#else
- #ifndef NPY_HAVE_DECL_ISNAN
- #define npy_isnan(x) ((x) != (x))
- #else
- #define npy_isnan(x) isnan(x)
- #endif
+ #define npy_isnan(x) isnan(x)
#endif
@@ -223,39 +219,17 @@ double npy_spacing(double x);
#ifdef HAVE___BUILTIN_ISFINITE
#define npy_isfinite(x) __builtin_isfinite(x)
#else
- #ifndef NPY_HAVE_DECL_ISFINITE
- #ifdef _MSC_VER
- #define npy_isfinite(x) _finite((x))
- #else
- #define npy_isfinite(x) !npy_isnan((x) + (-x))
- #endif
- #else
- #define npy_isfinite(x) isfinite((x))
- #endif
+ #define npy_isfinite(x) isfinite((x))
#endif
/* only available if npy_config.h is available (= numpys own build) */
#ifdef HAVE___BUILTIN_ISINF
#define npy_isinf(x) __builtin_isinf(x)
#else
- #ifndef NPY_HAVE_DECL_ISINF
- #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x))
- #else
- #define npy_isinf(x) isinf((x))
- #endif
+ #define npy_isinf(x) isinf((x))
#endif
-#ifndef NPY_HAVE_DECL_SIGNBIT
- int _npy_signbit_f(float x);
- int _npy_signbit_d(double x);
- int _npy_signbit_ld(long double x);
- #define npy_signbit(x) \
- (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \
- : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \
- : _npy_signbit_f (x))
-#else
- #define npy_signbit(x) signbit((x))
-#endif
+#define npy_signbit(x) signbit((x))
/*
* float C99 math funcs that need fixups or are blocklist-able
@@ -298,8 +272,8 @@ NPY_INPLACE float npy_modff(float x, float* y);
#define npy_frexpf frexpf
#define npy_ldexpf ldexpf
#define npy_copysignf copysignf
+#define npy_nextafterf nextafterf
-float npy_nextafterf(float x, float y);
float npy_spacingf(float x);
/*
@@ -342,8 +316,8 @@ NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
#define npy_frexpl frexpl
#define npy_ldexpl ldexpl
#define npy_copysignl copysignl
+#define npy_nextafterl nextafterl
-npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y);
npy_longdouble npy_spacingl(npy_longdouble x);
/*
@@ -399,17 +373,17 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
\
return z1.z;
-static NPY_INLINE npy_cdouble npy_cpack(double x, double y)
+static inline npy_cdouble npy_cpack(double x, double y)
{
__NPY_CPACK_IMP(x, y, double, npy_cdouble);
}
-static NPY_INLINE npy_cfloat npy_cpackf(float x, float y)
+static inline npy_cfloat npy_cpackf(float x, float y)
{
__NPY_CPACK_IMP(x, y, float, npy_cfloat);
}
-static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
+static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
{
__NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble);
}
@@ -431,32 +405,32 @@ static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
\
return __z_repr.a[index];
-static NPY_INLINE double npy_creal(npy_cdouble z)
+static inline double npy_creal(npy_cdouble z)
{
__NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble);
}
-static NPY_INLINE double npy_cimag(npy_cdouble z)
+static inline double npy_cimag(npy_cdouble z)
{
__NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble);
}
-static NPY_INLINE float npy_crealf(npy_cfloat z)
+static inline float npy_crealf(npy_cfloat z)
{
__NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat);
}
-static NPY_INLINE float npy_cimagf(npy_cfloat z)
+static inline float npy_cimagf(npy_cfloat z)
{
__NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat);
}
-static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z)
+static inline npy_longdouble npy_creall(npy_clongdouble z)
{
__NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble);
}
-static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z)
+static inline npy_longdouble npy_cimagl(npy_clongdouble z)
{
__NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble);
}
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index 164fcbdbe..1c25aa5fc 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -6,7 +6,13 @@
/*
* On Mac OS X, because there is only one configuration stage for all the archs
* in universal builds, any macro which depends on the arch needs to be
- * hardcoded
+ * hardcoded.
+ *
+ * Note that distutils/pip will attempt a universal2 build when Python itself
+ * is built as universal2, hence this hardcoding is needed even if we do not
+ * support universal2 wheels anymore (see gh-22796).
+ * This code block can be removed after we have dropped the setup.py based
+ * build completely.
*/
#ifdef __APPLE__
#undef NPY_SIZEOF_LONG
@@ -48,30 +54,85 @@
#endif
#endif
+
/**
- * To help with the NPY_NO_DEPRECATED_API macro, we include API version
- * numbers for specific versions of NumPy. To exclude all API that was
- * deprecated as of 1.7, add the following before #including any NumPy
- * headers:
+ * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro,
+ * we include API version numbers for specific versions of NumPy.
+ * To exclude all API that was deprecated as of 1.7, add the following before
+ * #including any NumPy headers:
* #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ * The same is true for NPY_TARGET_VERSION, although NumPy will default to
+ * a backwards compatible build anyway.
*/
#define NPY_1_7_API_VERSION 0x00000007
#define NPY_1_8_API_VERSION 0x00000008
-#define NPY_1_9_API_VERSION 0x00000008
-#define NPY_1_10_API_VERSION 0x00000008
-#define NPY_1_11_API_VERSION 0x00000008
-#define NPY_1_12_API_VERSION 0x00000008
-#define NPY_1_13_API_VERSION 0x00000008
-#define NPY_1_14_API_VERSION 0x00000008
-#define NPY_1_15_API_VERSION 0x00000008
-#define NPY_1_16_API_VERSION 0x00000008
-#define NPY_1_17_API_VERSION 0x00000008
-#define NPY_1_18_API_VERSION 0x00000008
-#define NPY_1_19_API_VERSION 0x00000008
+#define NPY_1_9_API_VERSION 0x00000009
+#define NPY_1_10_API_VERSION 0x0000000a
+#define NPY_1_11_API_VERSION 0x0000000a
+#define NPY_1_12_API_VERSION 0x0000000a
+#define NPY_1_13_API_VERSION 0x0000000b
+#define NPY_1_14_API_VERSION 0x0000000c
+#define NPY_1_15_API_VERSION 0x0000000c
+#define NPY_1_16_API_VERSION 0x0000000d
+#define NPY_1_17_API_VERSION 0x0000000d
+#define NPY_1_18_API_VERSION 0x0000000d
+#define NPY_1_19_API_VERSION 0x0000000d
#define NPY_1_20_API_VERSION 0x0000000e
#define NPY_1_21_API_VERSION 0x0000000e
#define NPY_1_22_API_VERSION 0x0000000f
#define NPY_1_23_API_VERSION 0x00000010
#define NPY_1_24_API_VERSION 0x00000010
+#define NPY_1_25_API_VERSION 0x00000011
+
+
+/*
+ * Binary compatibility version number. This number is increased
+ * whenever the C-API is changed such that binary compatibility is
+ * broken, i.e. whenever a recompile of extension modules is needed.
+ */
+#define NPY_VERSION NPY_ABI_VERSION
+
+/*
+ * Minor API version we are compiling to be compatible with. The version
+ * Number is always increased when the API changes via: `NPY_API_VERSION`
+ * (and should maybe just track the NumPy version).
+ *
+ * If we have an internal build, we always target the current version of
+ * course.
+ *
+ * For downstream users, we default to an older version to provide them with
+ * maximum compatibility by default. Downstream can choose to extend that
+ * default, or narrow it down if they wish to use newer API. If you adjust
+ * this, consider the Python version support (example for 1.25.x):
+ *
+ * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12)
+ * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9
+ * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8
+ * NumPy 1.15.x supports Python: ... 3.6 3.7
+ *
+ * Users of the stable ABI may wish to target the last Python that is not
+ * end of life. This would be 3.8 at NumPy 1.25 release time.
+ * 1.17 as default was the choice of oldest-support-numpy at the time and
+ * has in practice no limit (comapared to 1.19). Even earlier becomes legacy.
+ */
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+ /* NumPy internal build, always use current version. */
+ #define NPY_FEATURE_VERSION NPY_API_VERSION
+#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION
+ /* user provided a target version, use it */
+ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION
+#else
+ /* Use the default (increase when dropping Python 3.9 support) */
+ #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION
+#endif
+
+/* Sanity check the (requested) feature version */
+#if NPY_FEATURE_VERSION > NPY_API_VERSION
+ #error "NPY_TARGET_VERSION higher than NumPy headers!"
+#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION
+ /* No support for irrelevant old targets, no need for error, but warn. */
+ #warning "Requested NumPy target lower than supported NumPy 1.15."
+#endif
+
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
diff --git a/numpy/core/include/numpy/random/distributions.h b/numpy/core/include/numpy/random/distributions.h
index 78bd06ff5..e7fa4bd00 100644
--- a/numpy/core/include/numpy/random/distributions.h
+++ b/numpy/core/include/numpy/random/distributions.h
@@ -198,7 +198,7 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
double p,
binomial_t *binomial);
double random_loggam(double x);
-static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
+static inline double next_double(bitgen_t *bitgen_state) {
return bitgen_state->next_double(bitgen_state->state);
}
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index bb0633100..9e00f2e56 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -197,7 +197,7 @@ typedef struct _tagPyUFuncObject {
npy_uint32 iter_flags;
/* New in NPY_API_VERSION 0x0000000D and above */
-
+ #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
/*
* for each core_num_dim_ix distinct dimension names,
* the possible "frozen" size (-1 if not frozen).
@@ -211,13 +211,15 @@ typedef struct _tagPyUFuncObject {
/* Identity for reduction, when identity == PyUFunc_IdentityValue */
PyObject *identity_value;
+ #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */
/* New in NPY_API_VERSION 0x0000000F and above */
-
+ #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
/* New private fields related to dispatching */
void *_dispatch_cache;
/* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */
PyObject *_loops;
+ #endif
} PyUFuncObject;
#include "arrayobject.h"