summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorscoder <stefan_ml@behnel.de>2020-05-22 22:59:25 +0200
committerGitHub <noreply@github.com>2020-05-22 22:59:25 +0200
commit82e49ca04873e336872504e6c3f0182042bb8bc1 (patch)
tree086f3290a0521ee94e92857f53a57276f23a270b
parent69bebba52df8ff0c3d39420af7e4d0a174638bc1 (diff)
parent917dbeef5422b3ef49874d60bbb70c294c5c45e2 (diff)
downloadcython-82e49ca04873e336872504e6c3f0182042bb8bc1.tar.gz
Merge branch 'master' into gh3090_always_allow_keywords
-rw-r--r--CHANGES.rst70
-rw-r--r--Cython/Compiler/Code.py2
-rw-r--r--Cython/Compiler/MemoryView.py6
-rw-r--r--Cython/Compiler/ModuleNode.py10
-rw-r--r--Cython/Compiler/Optimize.py2
-rw-r--r--Cython/Compiler/Options.py2
-rw-r--r--Cython/Compiler/ParseTreeTransforms.py13
-rw-r--r--Cython/Compiler/Parsing.py2
-rw-r--r--Cython/Compiler/Pipeline.py3
-rw-r--r--Cython/Includes/cpython/datetime.pxd28
-rw-r--r--Cython/Includes/numpy/__init__.pxd3
-rw-r--r--Cython/Shadow.py8
-rw-r--r--Cython/Utility/CommonStructures.c4
-rw-r--r--Cython/Utility/Coroutine.c8
-rw-r--r--Cython/Utility/CythonFunction.c8
-rw-r--r--Cython/Utility/ExtensionTypes.c63
-rw-r--r--Cython/Utility/MemoryView.pyx39
-rw-r--r--Cython/Utility/MemoryView_C.c101
-rw-r--r--Cython/Utility/ModuleSetupCode.c24
-rw-r--r--Cython/Utility/Overflow.c184
-rw-r--r--docs/src/tutorial/pure.rst7
-rwxr-xr-xruntests.py3
-rw-r--r--tests/buffers/buffmt.pyx6
-rw-r--r--tests/compile/fused_redeclare_T3111.pyx12
-rw-r--r--tests/compile/tree_assertions.pyx20
-rw-r--r--tests/memoryview/memoryview_no_binding_T3613.pyx21
-rw-r--r--tests/run/async_def.pyx29
-rw-r--r--tests/run/common_utility_types.srctree2
-rw-r--r--tests/run/datetime_pxd.pyx125
-rw-r--r--tests/run/fused_def.pyx2
-rw-r--r--tests/run/numpy_test.pyx14
-rw-r--r--tests/run/overflow_check.pxi16
-rw-r--r--tests/run/parallel.pyx6
-rw-r--r--tests/run/test_coroutines_pep492.pyx6
34 files changed, 603 insertions, 246 deletions
diff --git a/CHANGES.rst b/CHANGES.rst
index 1286f47c8..32cff054d 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -2,7 +2,17 @@
Cython Changelog
================
-3.0.0 alpha 5 (2020-0?-??)
+3.0.0 alpha 6 (2020-0?-??)
+==========================
+
+Bugs fixed
+----------
+
+* Single argument funcions failed to accept keyword arguments.
+ (Github issue #3090)
+
+
+3.0.0 alpha 5 (2020-05-19)
==========================
Features added
@@ -21,6 +31,10 @@ Features added
Bugs fixed
----------
+* Several issues with arithmetic overflow handling were resolved, including
+ undefined behaviour in C.
+ Patch by Sam Sneddon. (Github issue #3588)
+
* The improved GIL handling in ``nogil`` functions introduced in 3.0a3
could fail to acquire the GIL in some cases on function exit.
(Github issue #3590 etc.)
@@ -29,24 +43,11 @@ Bugs fixed
that appeared in 3.0a1.
(Github issue #3578)
-* C-tuples could use invalid C struct casting.
- Patch by MegaIng. (Github issue #3038)
-
-* Optimised ``%d`` string formatting into f-strings failed on float values.
- (Github issue #3092)
-
-* Optimised aligned string formatting (``%05s``, ``%-5s``) failed.
- (Github issue #3476)
-
-* Single argument funcions failed to accept keyword arguments.
- (Github issue #3090)
-
* The outdated getbuffer/releasebuffer implementations in the NumPy
declarations were removed so that buffers declared as ``ndarray``
now use the normal implementation in NumPy.
-* The non-public NumPy array specific ``INCREF``/``XDECREF`` functions
- were removed from the NumPy declarations.
+* Includes all bug-fixes from the 0.29.18 release.
3.0.0 alpha 4 (2020-05-05)
@@ -409,7 +410,20 @@ Other changes
* Support for Python 2.6 was removed.
-0.29.18 (2020-0?-??)
+0.29.19 (2020-05-20)
+====================
+
+Bugs fixed
+----------
+
+* A typo in Windows specific code in 0.29.18 was fixed that broke "libc.math".
+ (Github issue #3622)
+
+* A platform specific test failure in 0.29.18 was fixed.
+ Patch by smutch. (Github issue #3620)
+
+
+0.29.18 (2020-05-18)
====================
Bugs fixed
@@ -423,9 +437,24 @@ Bugs fixed
* Error handling in ``cython.array`` creation was improved to avoid calling
C-API functions with an error held.
+* A memory corruption was fixed when garbage collection was triggered during calls
+ to ``PyType_Ready()`` of extension type subclasses.
+ (Github issue #3603)
+
+* Memory view slicing generated unused error handling code which could negatively
+ impact the C compiler optimisations for parallel OpenMP code etc. Also, it is
+ now helped by static branch hints.
+ (Github issue #2987)
+
+* Cython's built-in OpenMP functions were not translated inside of call arguments.
+ Original patch by Celelibi and David Woods. (Github issue #3594)
+
* Complex buffer item types of structs of arrays could fail to validate.
Patch by Leo and smutch. (Github issue #1407)
+* Decorators were not allowed on nested `async def` functions.
+ (Github issue #1462)
+
* C-tuples could use invalid C struct casting.
Patch by MegaIng. (Github issue #3038)
@@ -439,6 +468,15 @@ Bugs fixed
additional command line arguments leaked into the regular command.
Patch by Kamekameha. (Github issue #2209)
+* When using the ``CYTHON_NO_PYINIT_EXPORT`` option in C++, the module init function
+ was not declared as ``extern "C"``.
+ (Github issue #3414)
+
+* Three missing timedelta access macros were added in ``cpython.datetime``.
+
+* The signature of the NumPy C-API function ``PyArray_SearchSorted()`` was fixed.
+ Patch by Brock Mendel. (Github issue #3606)
+
0.29.17 (2020-04-26)
====================
diff --git a/Cython/Compiler/Code.py b/Cython/Compiler/Code.py
index 08ce46081..b0d133da4 100644
--- a/Cython/Compiler/Code.py
+++ b/Cython/Compiler/Code.py
@@ -296,7 +296,7 @@ class UtilityCodeBase(object):
_, ext = os.path.splitext(path)
if ext in ('.pyx', '.py', '.pxd', '.pxi'):
comment = '#'
- strip_comments = partial(re.compile(r'^\s*#.*').sub, '')
+ strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '')
rstrip = StringEncoding._unicode.rstrip
else:
comment = '/'
diff --git a/Cython/Compiler/MemoryView.py b/Cython/Compiler/MemoryView.py
index e4b839f01..1c7d63342 100644
--- a/Cython/Compiler/MemoryView.py
+++ b/Cython/Compiler/MemoryView.py
@@ -292,7 +292,6 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
dim += 1
access, packing = self.type.axes[dim]
- error_goto = code.error_goto(index.pos)
if isinstance(index, ExprNodes.SliceNode):
# slice, unspecified dimension, or part of ellipsis
@@ -309,6 +308,7 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
util_name = "SimpleSlice"
else:
util_name = "ToughSlice"
+ d['error_goto'] = code.error_goto(index.pos)
new_ndim += 1
else:
@@ -326,8 +326,10 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
d = dict(
locals(),
wraparound=int(directives['wraparound']),
- boundscheck=int(directives['boundscheck'])
+ boundscheck=int(directives['boundscheck']),
)
+ if d['boundscheck']:
+ d['error_goto'] = code.error_goto(index.pos)
util_name = "SliceIndex"
_, impl = TempitaUtilityCode.load_as_string(util_name, "MemoryView_C.c", context=d)
diff --git a/Cython/Compiler/ModuleNode.py b/Cython/Compiler/ModuleNode.py
index ea003bf31..bb4e3ea57 100644
--- a/Cython/Compiler/ModuleNode.py
+++ b/Cython/Compiler/ModuleNode.py
@@ -87,7 +87,6 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
child_attrs = ["body"]
directives = None
-
def merge_in(self, tree, scope, merge_scope=False):
# Merges in the contents of another tree, and possibly scope. With the
# current implementation below, this must be done right prior
@@ -123,6 +122,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.scope.merge_in(scope)
+ def with_compiler_directives(self):
+ # When merging a utility code module into the user code we need to preserve
+ # the original compiler directives. This returns the body of the module node,
+ # wrapped in its set of directives.
+ body = Nodes.CompilerDirectivesNode(self.pos, directives=self.directives, body=self.body)
+ return body
+
def analyse_declarations(self, env):
if has_np_pythran(env):
Pythran.include_pythran_generic(env)
@@ -705,6 +711,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
from .. import __version__
code.putln('#define CYTHON_ABI "%s"' % __version__.replace('.', '_'))
+ code.putln('#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI')
+ code.putln('#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "."')
code.putln('#define CYTHON_HEX_VERSION %s' % build_hex_version(__version__))
code.putln("#define CYTHON_FUTURE_DIVISION %d" % (
Future.division in env.context.future_directives))
diff --git a/Cython/Compiler/Optimize.py b/Cython/Compiler/Optimize.py
index 1379308d0..6f64fc7f1 100644
--- a/Cython/Compiler/Optimize.py
+++ b/Cython/Compiler/Optimize.py
@@ -1723,7 +1723,7 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
arg = pos_args[0]
if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type:
- list_node = pos_args[0]
+ list_node = arg
loop_node = list_node.loop
elif isinstance(arg, ExprNodes.GeneratorExpressionNode):
diff --git a/Cython/Compiler/Options.py b/Cython/Compiler/Options.py
index 5d01292f8..2839a05c7 100644
--- a/Cython/Compiler/Options.py
+++ b/Cython/Compiler/Options.py
@@ -169,7 +169,7 @@ def get_directive_defaults():
# Declare compiler directives
_directive_defaults = {
- 'binding': True,
+ 'binding': True, # was False before 3.0
'boundscheck' : True,
'nonecheck' : False,
'initializedcheck' : True,
diff --git a/Cython/Compiler/ParseTreeTransforms.py b/Cython/Compiler/ParseTreeTransforms.py
index 15feda18f..7a068ec04 100644
--- a/Cython/Compiler/ParseTreeTransforms.py
+++ b/Cython/Compiler/ParseTreeTransforms.py
@@ -993,6 +993,10 @@ class InterpretCompilerDirectives(CythonTransform):
old_directives = self.directives
new_directives = dict(old_directives)
+ # test_assert_path_exists and test_fail_if_path_exists should not be inherited
+ # otherwise they can produce very misleading test failures
+ new_directives.pop('test_assert_path_exists', None)
+ new_directives.pop('test_fail_if_path_exists', None)
new_directives.update(directives)
if new_directives == old_directives:
@@ -1190,6 +1194,7 @@ class ParallelRangeTransform(CythonTransform, SkipDeclarations):
def visit_CallNode(self, node):
self.visit(node.function)
if not self.parallel_directive:
+ self.visitchildren(node, exclude=('function',))
return node
# We are a parallel directive, replace this node with the
@@ -1661,7 +1666,7 @@ cdef class NAME:
count = 0
INIT_ASSIGNMENTS
if IS_UNION and count > 1:
- raise ValueError("At most one union member should be specified.")
+ raise ValueError, "At most one union member should be specified."
def __str__(self):
return STR_FORMAT % MEMBER_TUPLE
def __repr__(self):
@@ -1774,9 +1779,9 @@ if VALUE is not None:
pickle_func = TreeFragment(u"""
def __reduce_cython__(self):
- raise TypeError("%(msg)s")
+ raise TypeError, "%(msg)s"
def __setstate_cython__(self, __pyx_state):
- raise TypeError("%(msg)s")
+ raise TypeError, "%(msg)s"
""" % {'msg': msg},
level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
pickle_func.analyse_declarations(node.scope)
@@ -1800,7 +1805,7 @@ if VALUE is not None:
cdef object __pyx_result
if __pyx_checksum != %(checksum)s:
from pickle import PickleError as __pyx_PickleError
- raise __pyx_PickleError("Incompatible checksums (%%s vs %(checksum)s = (%(members)s))" %% __pyx_checksum)
+ raise __pyx_PickleError, "Incompatible checksums (%%s vs %(checksum)s = (%(members)s))" %% __pyx_checksum
__pyx_result = %(class_name)s.__new__(__pyx_type)
if __pyx_state is not None:
%(unpickle_func_name)s__set_state(<%(class_name)s> __pyx_result, __pyx_state)
diff --git a/Cython/Compiler/Parsing.py b/Cython/Compiler/Parsing.py
index 5a9fce8a2..0f3748e37 100644
--- a/Cython/Compiler/Parsing.py
+++ b/Cython/Compiler/Parsing.py
@@ -2239,7 +2239,7 @@ def p_statement(s, ctx, first_statement = 0):
s.error('decorator not allowed here')
s.level = ctx.level
decorators = p_decorators(s)
- if not ctx.allow_struct_enum_decorator and s.sy not in ('def', 'cdef', 'cpdef', 'class'):
+ if not ctx.allow_struct_enum_decorator and s.sy not in ('def', 'cdef', 'cpdef', 'class', 'async'):
if s.sy == 'IDENT' and s.systring == 'async':
pass # handled below
else:
diff --git a/Cython/Compiler/Pipeline.py b/Cython/Compiler/Pipeline.py
index 5194c3e49..5bab4dbdb 100644
--- a/Cython/Compiler/Pipeline.py
+++ b/Cython/Compiler/Pipeline.py
@@ -128,7 +128,8 @@ def inject_utility_code_stage_factory(context):
module_node.scope.utility_code_list.append(dep)
tree = utilcode.get_tree(cython_scope=context.cython_scope)
if tree:
- module_node.merge_in(tree.body, tree.scope, merge_scope=True)
+ module_node.merge_in(tree.with_compiler_directives(),
+ tree.scope, merge_scope=True)
return module_node
return inject_utility_code_stage
diff --git a/Cython/Includes/cpython/datetime.pxd b/Cython/Includes/cpython/datetime.pxd
index 3d561f046..26782be55 100644
--- a/Cython/Includes/cpython/datetime.pxd
+++ b/Cython/Includes/cpython/datetime.pxd
@@ -5,6 +5,17 @@ cdef extern from "Python.h":
pass
cdef extern from "datetime.h":
+ """
+ #if PY_MAJOR_VERSION < 3 && !defined(PyDateTime_DELTA_GET_DAYS)
+ #define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days)
+ #endif
+ #if PY_MAJOR_VERSION < 3 && !defined(PyDateTime_DELTA_GET_SECONDS)
+ #define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds)
+ #endif
+ #if PY_MAJOR_VERSION < 3 && !defined(PyDateTime_DELTA_GET_MICROSECONDS)
+ #define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds)
+ #endif
+ """
ctypedef extern class datetime.date[object PyDateTime_Date]:
pass
@@ -90,9 +101,9 @@ cdef extern from "datetime.h":
int PyDateTime_TIME_GET_MICROSECOND(object o)
# Getters for timedelta (C macros).
- #int PyDateTime_DELTA_GET_DAYS(object o)
- #int PyDateTime_DELTA_GET_SECONDS(object o)
- #int PyDateTime_DELTA_GET_MICROSECONDS(object o)
+ int PyDateTime_DELTA_GET_DAYS(object o)
+ int PyDateTime_DELTA_GET_SECONDS(object o)
+ int PyDateTime_DELTA_GET_MICROSECONDS(object o)
# PyDateTime CAPI object.
PyDateTime_CAPI *PyDateTimeAPI
@@ -210,3 +221,14 @@ cdef inline int timedelta_seconds(object o):
# Get microseconds of timedelta
cdef inline int timedelta_microseconds(object o):
return (<PyDateTime_Delta*>o).microseconds
+
+cdef inline double total_seconds(timedelta obj):
+ # Mirrors the "timedelta.total_seconds()" method.
+ # Note that this implementation is not guaranteed to give *exactly* the same
+ # result as the original method, due to potential differences in floating point rounding.
+ cdef:
+ double days, seconds, micros
+ days = <double>PyDateTime_DELTA_GET_DAYS(obj)
+ seconds = <double>PyDateTime_DELTA_GET_SECONDS(obj)
+ micros = <double>PyDateTime_DELTA_GET_MICROSECONDS(obj)
+ return days * 24 * 3600 + seconds + micros / 1_000_000
diff --git a/Cython/Includes/numpy/__init__.pxd b/Cython/Includes/numpy/__init__.pxd
index b40137b42..667190599 100644
--- a/Cython/Includes/numpy/__init__.pxd
+++ b/Cython/Includes/numpy/__init__.pxd
@@ -302,7 +302,6 @@ cdef extern from "numpy/arrayobject.h":
"""
return PyArray_BYTES(self)
-
ctypedef unsigned char npy_bool
ctypedef signed char npy_byte
@@ -655,7 +654,7 @@ cdef extern from "numpy/arrayobject.h":
object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
int PyArray_Sort (ndarray, int, NPY_SORTKIND)
object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
- object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject*)
object PyArray_ArgMax (ndarray, int, ndarray)
object PyArray_ArgMin (ndarray, int, ndarray)
object PyArray_Reshape (ndarray, object)
diff --git a/Cython/Shadow.py b/Cython/Shadow.py
index 57fbb60f5..a7f3a9be1 100644
--- a/Cython/Shadow.py
+++ b/Cython/Shadow.py
@@ -1,7 +1,7 @@
# cython.* namespace for pure mode.
from __future__ import absolute_import
-__version__ = "3.0a4"
+__version__ = "3.0a5"
try:
from __builtin__ import basestring
@@ -165,13 +165,13 @@ def cmod(a, b):
def cast(t, *args, **kwargs):
kwargs.pop('typecheck', None)
assert not kwargs
-
+
if isinstance(t, typedef):
return t(*args)
elif isinstance(t, type): #Doesn't work with old-style classes of Python 2.x
if len(args) != 1 or not (args[0] is None or isinstance(args[0], t)):
return t(*args)
-
+
return args[0]
def sizeof(arg):
@@ -187,7 +187,7 @@ def address(arg):
def _is_value_type(t):
if isinstance(t, typedef):
return _is_value_type(t._basetype)
-
+
return isinstance(t, type) and issubclass(t, (StructType, UnionType, ArrayType))
def declare(t=None, value=_Unspecified, **kwds):
diff --git a/Cython/Utility/CommonStructures.c b/Cython/Utility/CommonStructures.c
index e2bf227fd..4b572defb 100644
--- a/Cython/Utility/CommonStructures.c
+++ b/Cython/Utility/CommonStructures.c
@@ -8,7 +8,7 @@ static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyType_Spec *spec, PyObject *
/////////////// FetchCommonType ///////////////
static PyObject *__Pyx_FetchSharedCythonABIModule(void) {
- PyObject *abi_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
+ PyObject *abi_module = PyImport_AddModule((char*) __PYX_ABI_MODULE_NAME);
if (!abi_module) return NULL;
Py_INCREF(abi_module);
return abi_module;
@@ -126,7 +126,7 @@ static void* __Pyx_FetchCommonPointer(void* pointer, const char* name) {
PyObject* capsule = NULL;
void* value = NULL;
- abi_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
+ abi_module = PyImport_AddModule((char*) __PYX_ABI_MODULE_NAME);
if (!abi_module) return NULL;
Py_INCREF(abi_module);
diff --git a/Cython/Utility/Coroutine.c b/Cython/Utility/Coroutine.c
index 8151fec34..a69a40299 100644
--- a/Cython/Utility/Coroutine.c
+++ b/Cython/Utility/Coroutine.c
@@ -1468,7 +1468,7 @@ static PyMethodDef __pyx_CoroutineAwait_methods[] = {
static PyTypeObject __pyx_CoroutineAwaitType_type = {
PyVarObject_HEAD_INIT(0, 0)
- "coroutine_wrapper", /*tp_name*/
+ __PYX_TYPE_MODULE_PREFIX "coroutine_wrapper", /*tp_name*/
sizeof(__pyx_CoroutineAwaitObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_CoroutineAwait_dealloc,/*tp_dealloc*/
@@ -1616,7 +1616,7 @@ static __Pyx_PyAsyncMethodsStruct __pyx_Coroutine_as_async = {
static PyTypeObject __pyx_CoroutineType_type = {
PyVarObject_HEAD_INIT(0, 0)
- "coroutine", /*tp_name*/
+ __PYX_TYPE_MODULE_PREFIX "coroutine", /*tp_name*/
sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
@@ -1732,7 +1732,7 @@ static int __pyx_IterableCoroutine_init(void);/*proto*/
static PyTypeObject __pyx_IterableCoroutineType_type = {
PyVarObject_HEAD_INIT(0, 0)
- "iterable_coroutine", /*tp_name*/
+ __PYX_TYPE_MODULE_PREFIX "iterable_coroutine", /*tp_name*/
sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
@@ -1851,7 +1851,7 @@ static PyGetSetDef __pyx_Generator_getsets[] = {
static PyTypeObject __pyx_GeneratorType_type = {
PyVarObject_HEAD_INIT(0, 0)
- "generator", /*tp_name*/
+ __PYX_TYPE_MODULE_PREFIX "generator", /*tp_name*/
sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
diff --git a/Cython/Utility/CythonFunction.c b/Cython/Utility/CythonFunction.c
index bd096bf93..c9098d953 100644
--- a/Cython/Utility/CythonFunction.c
+++ b/Cython/Utility/CythonFunction.c
@@ -811,7 +811,7 @@ static PyType_Slot __pyx_CyFunctionType_slots[] = {
};
static PyType_Spec __pyx_CyFunctionType_spec = {
- "cython_function_or_method",
+ __PYX_TYPE_MODULE_PREFIX "cython_function_or_method",
sizeof(__pyx_CyFunctionObject),
0,
// TODO: Support _Py_TPFLAGS_HAVE_VECTORCALL and _Py_TPFLAGS_HAVE_VECTORCALL
@@ -821,7 +821,7 @@ static PyType_Spec __pyx_CyFunctionType_spec = {
#else
static PyTypeObject __pyx_CyFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
- "cython_function_or_method", /*tp_name*/
+ __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", /*tp_name*/
sizeof(__pyx_CyFunctionObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_CyFunction_dealloc, /*tp_dealloc*/
@@ -1335,7 +1335,7 @@ static PyType_Slot __pyx_FusedFunctionType_slots[] = {
};
static PyType_Spec __pyx_FusedFunctionType_spec = {
- "fused_cython_function",
+ __PYX_TYPE_MODULE_PREFIX "fused_cython_function",
sizeof(__pyx_FusedFunctionObject),
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /*tp_flags*/
@@ -1352,7 +1352,7 @@ static PyMappingMethods __pyx_FusedFunction_mapping_methods = {
static PyTypeObject __pyx_FusedFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
- "fused_cython_function", /*tp_name*/
+ __PYX_TYPE_MODULE_PREFIX "fused_cython_function", /*tp_name*/
sizeof(__pyx_FusedFunctionObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __pyx_FusedFunction_dealloc, /*tp_dealloc*/
diff --git a/Cython/Utility/ExtensionTypes.c b/Cython/Utility/ExtensionTypes.c
index a75863d02..6f748a9cb 100644
--- a/Cython/Utility/ExtensionTypes.c
+++ b/Cython/Utility/ExtensionTypes.c
@@ -55,20 +55,65 @@ static int __Pyx_PyType_Ready(PyTypeObject *t) {
}
#if PY_VERSION_HEX >= 0x03050000
- // As of https://bugs.python.org/issue22079
- // PyType_Ready enforces that all bases of a non-heap type are
- // non-heap. We know that this is the case for the solid base but
- // other bases are heap allocated and are kept alive through the
- // tp_bases reference.
- // Other than this check, the Py_TPFLAGS_HEAPTYPE flag is unused
- // in PyType_Ready().
- t->tp_flags |= Py_TPFLAGS_HEAPTYPE;
+ {
+ // Make sure GC does not pick up our non-heap type as heap type with this hack!
+ // For details, see https://github.com/cython/cython/issues/3603
+ PyObject *ret, *py_status;
+ int gc_was_enabled;
+ PyObject *gc = PyImport_Import(PYUNICODE("gc"));
+ if (unlikely(!gc)) return -1;
+ py_status = PyObject_CallMethodObjArgs(gc, PYUNICODE("isenabled"), NULL);
+ if (unlikely(!py_status)) {
+ Py_DECREF(gc);
+ return -1;
+ }
+ gc_was_enabled = __Pyx_PyObject_IsTrue(py_status);
+ Py_DECREF(py_status);
+ if (gc_was_enabled > 0) {
+ ret = PyObject_CallMethodObjArgs(gc, PYUNICODE("disable"), NULL);
+ if (unlikely(!ret)) {
+ Py_DECREF(gc);
+ return -1;
+ }
+ Py_DECREF(ret);
+ } else if (unlikely(gc_was_enabled == -1)) {
+ Py_DECREF(gc);
+ return -1;
+ }
+
+ // As of https://bugs.python.org/issue22079
+ // PyType_Ready enforces that all bases of a non-heap type are
+ // non-heap. We know that this is the case for the solid base but
+ // other bases are heap allocated and are kept alive through the
+ // tp_bases reference.
+ // Other than this check, the Py_TPFLAGS_HEAPTYPE flag is unused
+ // in PyType_Ready().
+ t->tp_flags |= Py_TPFLAGS_HEAPTYPE;
#endif
r = PyType_Ready(t);
#if PY_VERSION_HEX >= 0x03050000
- t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE;
+ t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE;
+
+ if (gc_was_enabled) {
+ PyObject *t, *v, *tb;
+ PyErr_Fetch(&t, &v, &tb);
+ ret = PyObject_CallMethodObjArgs(gc, PYUNICODE("enable"), NULL);
+ if (likely(ret || r == -1)) {
+ Py_XDECREF(ret);
+ // do not overwrite exceptions raised by PyType_Ready() above
+ PyErr_Restore(t, v, tb);
+ } else {
+ // PyType_Ready() succeeded, but gc.enable() failed.
+ Py_XDECREF(t);
+ Py_XDECREF(v);
+ Py_XDECREF(tb);
+ r = -1;
+ }
+ }
+ Py_DECREF(gc);
+ }
#endif
return r;
diff --git a/Cython/Utility/MemoryView.pyx b/Cython/Utility/MemoryView.pyx
index f0e4142dc..6166c14a1 100644
--- a/Cython/Utility/MemoryView.pyx
+++ b/Cython/Utility/MemoryView.pyx
@@ -1,5 +1,8 @@
#################### View.MemoryView ####################
+# cython: language_level=3str
+# cython: binding=False
+
# This utility provides cython.array and cython.view.memoryview
from __future__ import absolute_import
@@ -130,10 +133,10 @@ cdef class array:
self.itemsize = itemsize
if not self.ndim:
- raise ValueError("Empty shape tuple for cython.array")
+ raise ValueError, "Empty shape tuple for cython.array"
if itemsize <= 0:
- raise ValueError("itemsize <= 0 for cython.array")
+ raise ValueError, "itemsize <= 0 for cython.array"
if not isinstance(format, bytes):
format = format.encode('ASCII')
@@ -145,12 +148,12 @@ cdef class array:
self._strides = self._shape + self.ndim
if not self._shape:
- raise MemoryError("unable to allocate shape and strides.")
+ raise MemoryError, "unable to allocate shape and strides."
# cdef Py_ssize_t dim, stride
for idx, dim in enumerate(shape):
if dim <= 0:
- raise ValueError(f"Invalid shape in axis {idx}: {dim}.")
+ raise ValueError, f"Invalid shape in axis {idx}: {dim}."
self._shape[idx] = dim
cdef char order
@@ -161,7 +164,7 @@ cdef class array:
order = b'F'
self.mode = u'fortran'
else:
- raise ValueError(f"Invalid mode, expected 'c' or 'fortran', got {mode}")
+ raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}"
self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order)
@@ -179,7 +182,7 @@ cdef class array:
elif self.mode == u"fortran":
bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
if not (flags & bufmode):
- raise ValueError("Can only create a buffer that is contiguous in memory.")
+ raise ValueError, "Can only create a buffer that is contiguous in memory."
info.buf = self.data
info.len = self.len
info.ndim = self.ndim
@@ -239,7 +242,7 @@ cdef int _allocate_buffer(array self) except -1:
self.free_data = True
self.data = <char *>malloc(self.len)
if not self.data:
- raise MemoryError("unable to allocate array data.")
+ raise MemoryError, "unable to allocate array data."
if self.dtype_is_object:
p = <PyObject **> self.data
@@ -423,7 +426,7 @@ cdef class memoryview:
def __setitem__(memoryview self, object index, object value):
if self.view.readonly:
- raise TypeError("Cannot assign to read-only memoryview")
+ raise TypeError, "Cannot assign to read-only memoryview"
have_slices, index = _unellipsify(index, self.view.ndim)
@@ -500,7 +503,7 @@ cdef class memoryview:
try:
result = struct.unpack(self.view.format, bytesitem)
except struct.error:
- raise ValueError("Unable to convert item to object")
+ raise ValueError, "Unable to convert item to object"
else:
if len(self.view.format) == 1:
return result[0]
@@ -525,7 +528,7 @@ cdef class memoryview:
@cname('getbuffer')
def __getbuffer__(self, Py_buffer *info, int flags):
if flags & PyBUF_WRITABLE and self.view.readonly:
- raise ValueError("Cannot create writable memory view from read-only memoryview")
+ raise ValueError, "Cannot create writable memory view from read-only memoryview"
if flags & PyBUF_ND:
info.shape = self.view.shape
@@ -578,7 +581,7 @@ cdef class memoryview:
def strides(self):
if self.view.strides == NULL:
# Note: we always ask for strides, so if this is not set it's a bug
- raise ValueError("Buffer view does not expose strides")
+ raise ValueError, "Buffer view does not expose strides"
return tuple([stride for stride in self.view.strides[:self.view.ndim]])
@@ -696,7 +699,7 @@ cdef tuple _unellipsify(object index, int ndim):
if isinstance(item, slice):
have_slices = True
elif not PyIndex_Check(item):
- raise TypeError(f"Cannot index with type '{type(item)}'")
+ raise TypeError, f"Cannot index with type '{type(item)}'"
result[idx] = item
idx += 1
@@ -706,7 +709,7 @@ cdef tuple _unellipsify(object index, int ndim):
cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
for suboffset in suboffsets[:ndim]:
if suboffset >= 0:
- raise ValueError("Indirect dimensions not supported")
+ raise ValueError, "Indirect dimensions not supported"
#
### Slicing a memoryview
@@ -932,10 +935,10 @@ cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
if index < 0:
index += view.shape[dim]
if index < 0:
- raise IndexError(f"Out of bounds on buffer access (axis {dim})")
+ raise IndexError, f"Out of bounds on buffer access (axis {dim})"
if index >= shape:
- raise IndexError(f"Out of bounds on buffer access (axis {dim})")
+ raise IndexError, f"Out of bounds on buffer access (axis {dim})"
resultp = bufp + index * stride
if suboffset >= 0:
@@ -1252,15 +1255,15 @@ cdef void *copy_data_to_temp({{memviewslice_name}} *src,
@cname('__pyx_memoryview_err_extents')
cdef int _err_extents(int i, Py_ssize_t extent1,
Py_ssize_t extent2) except -1 with gil:
- raise ValueError(f"got differing extents in dimension {i} (got {extent1} and {extent2})")
+ raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})"
@cname('__pyx_memoryview_err_dim')
cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil:
- raise (<object>error)(msg % dim)
+ raise <object>error, msg % dim
@cname('__pyx_memoryview_err')
cdef int _err(PyObject *error, str msg) except -1 with gil:
- raise (<object>error)(msg)
+ raise <object>error, msg
@cname('__pyx_memoryview_err_no_memory')
cdef int _err_no_memory() except -1 with gil:
diff --git a/Cython/Utility/MemoryView_C.c b/Cython/Utility/MemoryView_C.c
index 64552e5e1..063fbf360 100644
--- a/Cython/Utility/MemoryView_C.c
+++ b/Cython/Utility/MemoryView_C.c
@@ -181,13 +181,13 @@ __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
- if (buf->strides[dim] != sizeof(void *)) {
+ if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
- } else if (buf->strides[dim] != buf->itemsize) {
+ } else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
@@ -199,7 +199,7 @@ __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
- if (stride < buf->itemsize) {
+ if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
@@ -207,17 +207,17 @@ __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
}
}
} else {
- if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
+ if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
- } else if (spec & (__Pyx_MEMVIEW_PTR)) {
+ } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
- } else if (buf->suboffsets) {
+ } else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
@@ -235,7 +235,7 @@ __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec
// Todo: without PyBUF_INDIRECT we may not have suboffset information, i.e., the
// ptr may not be set to NULL but may be uninitialized?
if (spec & __Pyx_MEMVIEW_DIRECT) {
- if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
+ if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
@@ -244,7 +244,7 @@ __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec
}
if (spec & __Pyx_MEMVIEW_PTR) {
- if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) {
+ if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
@@ -265,9 +265,7 @@ __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
- if (stride * buf->itemsize != buf->strides[i] &&
- buf->shape[i] > 1)
- {
+ if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
@@ -277,8 +275,7 @@ __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
- if (stride * buf->itemsize != buf->strides[i] &&
- buf->shape[i] > 1) {
+ if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
@@ -325,7 +322,7 @@ static int __Pyx_ValidateAndInit_memviewslice(
}
buf = &memview->view;
- if (buf->ndim != ndim) {
+ if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
@@ -334,10 +331,10 @@ static int __Pyx_ValidateAndInit_memviewslice(
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
- if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
+ if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
- if ((unsigned) buf->itemsize != dtype->size) {
+ if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
@@ -352,14 +349,14 @@ static int __Pyx_ValidateAndInit_memviewslice(
/* Check axes */
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
- if (!__pyx_check_strides(buf, i, ndim, spec))
+ if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
- if (!__pyx_check_suboffsets(buf, i, ndim, spec))
+ if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
/* Check contiguity */
- if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
+ if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
/* Initialize */
@@ -394,7 +391,7 @@ __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
- if (memviewslice->memview || memviewslice->data) {
+ if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
@@ -486,47 +483,49 @@ __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil, int lineno)
{
- int first_time;
+ __pyx_atomic_int_type old_acquisition_count;
struct {{memview_struct_name}} *memview = memslice->memview;
- if (!memview || (PyObject *) memview == Py_None)
- return; /* allow uninitialized memoryview assignment */
-
- if (__pyx_get_slice_count(memview) < 0)
- __pyx_fatalerror("Acquisition count is %d (line %d)",
- __pyx_get_slice_count(memview), lineno);
-
- first_time = __pyx_add_acquisition_count(memview) == 0;
+ if (unlikely(!memview || (PyObject *) memview == Py_None)) {
+ // Allow uninitialized memoryview assignment and do not ref-count None.
+ return;
+ }
- if (first_time) {
- if (have_gil) {
- Py_INCREF((PyObject *) memview);
+ old_acquisition_count = __pyx_add_acquisition_count(memview);
+ if (unlikely(old_acquisition_count <= 0)) {
+ if (likely(old_acquisition_count == 0)) {
+ // First acquisition => keep the memoryview object alive.
+ if (have_gil) {
+ Py_INCREF((PyObject *) memview);
+ } else {
+ PyGILState_STATE _gilstate = PyGILState_Ensure();
+ Py_INCREF((PyObject *) memview);
+ PyGILState_Release(_gilstate);
+ }
} else {
- PyGILState_STATE _gilstate = PyGILState_Ensure();
- Py_INCREF((PyObject *) memview);
- PyGILState_Release(_gilstate);
+ __pyx_fatalerror("Acquisition count is %d (line %d)",
+ __pyx_get_slice_count(memview), lineno);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *memslice,
int have_gil, int lineno) {
- int last_time;
+ __pyx_atomic_int_type old_acquisition_count;
struct {{memview_struct_name}} *memview = memslice->memview;
- if (!memview ) {
- return;
- } else if ((PyObject *) memview == Py_None) {
+ if (unlikely(!memview || (PyObject *) memview == Py_None)) {
+ // Do not ref-count None.
memslice->memview = NULL;
return;
}
- if (__pyx_get_slice_count(memview) <= 0)
- __pyx_fatalerror("Acquisition count is %d (line %d)",
- __pyx_get_slice_count(memview), lineno);
-
- last_time = __pyx_sub_acquisition_count(memview) == 1;
+ old_acquisition_count = __pyx_sub_acquisition_count(memview);
memslice->data = NULL;
- if (last_time) {
+ if (likely(old_acquisition_count > 1)) {
+ // Still other slices out there => we do not own the reference.
+ memslice->memview = NULL;
+ } else if (likely(old_acquisition_count == 1)) {
+ // Last slice => discard owned Python reference to memoryview object.
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
@@ -535,7 +534,8 @@ static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *memslice,
PyGILState_Release(_gilstate);
}
} else {
- memslice->memview = NULL;
+ __pyx_fatalerror("Acquisition count is %d (line %d)",
+ __pyx_get_slice_count(memview), lineno);
}
}
@@ -570,7 +570,7 @@ __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
- if (from_mvs->suboffsets[i] >= 0) {
+ if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
@@ -773,7 +773,7 @@ static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp) {
{{if from_py_function}}
static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj) {
{{dtype}} value = {{from_py_function}}(obj);
- if ({{error_condition}})
+ if (unlikely({{error_condition}}))
return 0;
*({{dtype}} *) itemp = value;
return 1;
@@ -860,7 +860,7 @@ if (unlikely(__pyx_memoryview_slice_memviewslice(
{{endif}}
{{if boundscheck}}
- if (!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape)) {
+ if (unlikely(!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape))) {
{{if not have_gil}}
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
@@ -878,9 +878,6 @@ if (unlikely(__pyx_memoryview_slice_memviewslice(
{{error_goto}}
}
- {{else}}
- // make sure label is not un-used
- if ((0)) {{error_goto}}
{{endif}}
{{if all_dimensions_direct}}
diff --git a/Cython/Utility/ModuleSetupCode.c b/Cython/Utility/ModuleSetupCode.c
index 9f523fa10..4f7824862 100644
--- a/Cython/Utility/ModuleSetupCode.c
+++ b/Cython/Utility/ModuleSetupCode.c
@@ -903,24 +903,24 @@ static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict,
/////////////// PyModInitFuncType.proto ///////////////
-#if PY_MAJOR_VERSION < 3
+#ifndef CYTHON_NO_PYINIT_EXPORT
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
-#ifdef CYTHON_NO_PYINIT_EXPORT
-// define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
-#define __Pyx_PyMODINIT_FUNC void
+#elif PY_MAJOR_VERSION < 3
+// Py2: define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
-#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#define __Pyx_PyMODINIT_FUNC void
#endif
#else
-
-#ifdef CYTHON_NO_PYINIT_EXPORT
-// define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
-#define __Pyx_PyMODINIT_FUNC PyObject *
+// Py3+: define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
-#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
-
#endif
@@ -1648,7 +1648,7 @@ static void __Pyx_FastGilFuncInit(void);
#if CYTHON_FAST_GIL
-#define __Pyx_FastGIL_ABI_module "_cython_" CYTHON_ABI
+#define __Pyx_FastGIL_ABI_module __PYX_ABI_MODULE_NAME
#define __Pyx_FastGIL_PyCapsuleName "FastGilFuncs"
#define __Pyx_FastGIL_PyCapsule \
__Pyx_FastGIL_ABI_module "." __Pyx_FastGIL_PyCapsuleName
diff --git a/Cython/Utility/Overflow.c b/Cython/Utility/Overflow.c
index e277d20a9..9f3588074 100644
--- a/Cython/Utility/Overflow.c
+++ b/Cython/Utility/Overflow.c
@@ -46,6 +46,24 @@ static int __Pyx_check_twos_complement(void) {
#define __Pyx_div_no_overflow(a, b, overflow) ((a) / (b))
#define __Pyx_div_const_no_overflow(a, b, overflow) ((a) / (b))
+#if defined(__has_builtin)
+# if __has_builtin(__builtin_add_overflow) && !defined(__ibmxl__)
+# define __PYX_HAVE_BUILTIN_OVERFLOW
+# endif
+#elif defined(__GNUC__) && (__GNUC__ >= 5) && (!defined(__INTEL_COMPILER) || (__INTEL_COMPILER >= 1800))
+# define __PYX_HAVE_BUILTIN_OVERFLOW
+#endif
+
+#if defined(__GNUC__)
+# define __Pyx_is_constant(x) (__builtin_constant_p(x))
+#elif (defined(__has_builtin))
+# if __has_builtin(__builtin_constant_p))
+# define __Pyx_is_constant(x) (__builtin_constant_p(x))
+# endif
+#else
+# define __Pyx_is_constant(x) (0)
+#endif
+
/////////////// Common.init ///////////////
//@substitute: naming
@@ -64,11 +82,37 @@ static CYTHON_INLINE {{UINT}} __Pyx_div_{{NAME}}_checking_overflow({{UINT}} a, {
// Use these when b is known at compile time.
#define __Pyx_add_const_{{NAME}}_checking_overflow __Pyx_add_{{NAME}}_checking_overflow
#define __Pyx_sub_const_{{NAME}}_checking_overflow __Pyx_sub_{{NAME}}_checking_overflow
+#if defined(__PYX_HAVE_BUILTIN_OVERFLOW)
+#define __Pyx_mul_const_{{NAME}}_checking_overflow __Pyx_mul_{{NAME}}_checking_overflow
+#else
static CYTHON_INLINE {{UINT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} constant, int *overflow);
+#endif
#define __Pyx_div_const_{{NAME}}_checking_overflow __Pyx_div_{{NAME}}_checking_overflow
/////////////// BaseCaseUnsigned ///////////////
+#if defined(__PYX_HAVE_BUILTIN_OVERFLOW)
+
+static CYTHON_INLINE {{UINT}} __Pyx_add_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
+ {{UINT}} result;
+ *overflow |= __builtin_add_overflow(a, b, &result);
+ return result;
+}
+
+static CYTHON_INLINE {{UINT}} __Pyx_sub_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
+ {{UINT}} result;
+ *overflow |= __builtin_sub_overflow(a, b, &result);
+ return result;
+}
+
+static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
+ {{UINT}} result;
+ *overflow |= __builtin_mul_overflow(a, b, &result);
+ return result;
+}
+
+#else
+
static CYTHON_INLINE {{UINT}} __Pyx_add_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
{{UINT}} r = a + b;
*overflow |= r < a;
@@ -82,7 +126,12 @@ static CYTHON_INLINE {{UINT}} __Pyx_sub_{{NAME}}_checking_overflow({{UINT}} a, {
}
static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
- if ((sizeof({{UINT}}) < sizeof(unsigned long))) {
+ // if we have a constant, use the constant version
+ if (__Pyx_is_constant(b)) {
+ return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow);
+ } else if (__Pyx_is_constant(a)) {
+ return __Pyx_mul_const_{{NAME}}_checking_overflow(b, a, overflow);
+ } else if ((sizeof({{UINT}}) < sizeof(unsigned long))) {
unsigned long big_r = ((unsigned long) a) * ((unsigned long) b);
{{UINT}} r = ({{UINT}}) big_r;
*overflow |= big_r != r;
@@ -95,21 +144,26 @@ static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {
return r;
#endif
} else {
- {{UINT}} prod = a * b;
- double dprod = ((double) a) * ((double) b);
- // Overflow results in an error of at least 2^sizeof(UINT),
- // whereas rounding represents an error on the order of 2^(sizeof(UINT)-53).
- *overflow |= fabs(dprod - prod) > (__PYX_MAX({{UINT}}) / 2);
- return prod;
+ return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow);
}
}
static CYTHON_INLINE {{UINT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
- if (b > 1) {
- *overflow |= a > __PYX_MAX({{UINT}}) / b;
+ // note that deliberately the overflow check is written such that it divides by b; this
+ // function is used when b is a constant thus the compiler should be able to eliminate the
+ // (very slow on most CPUs!) division operation
+ if (__Pyx_is_constant(a) && !__Pyx_is_constant(b)) {
+ // if a is a compile-time constant and b isn't, swap them
+ {{UINT}} temp = b;
+ b = a;
+ a = temp;
}
- return a * b;
+ {{UINT}} prod = a * b;
+ if (b != 0)
+ *overflow |= a > (__PYX_MAX({{UINT}}) / b);
+ return prod;
}
+#endif // __PYX_HAVE_BUILTIN_OVERFLOW
static CYTHON_INLINE {{UINT}} __Pyx_div_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
@@ -130,13 +184,39 @@ static CYTHON_INLINE {{INT}} __Pyx_div_{{NAME}}_checking_overflow({{INT}} a, {{I
// Use when b is known at compile time.
-static CYTHON_INLINE {{INT}} __Pyx_add_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
-static CYTHON_INLINE {{INT}} __Pyx_sub_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
+#define __Pyx_add_const_{{NAME}}_checking_overflow __Pyx_add_{{NAME}}_checking_overflow
+#define __Pyx_sub_const_{{NAME}}_checking_overflow __Pyx_sub_{{NAME}}_checking_overflow
+#if defined(__PYX_HAVE_BUILTIN_OVERFLOW)
+#define __Pyx_mul_const_{{NAME}}_checking_overflow __Pyx_mul_{{NAME}}_checking_overflow
+#else
static CYTHON_INLINE {{INT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} constant, int *overflow);
+#endif
#define __Pyx_div_const_{{NAME}}_checking_overflow __Pyx_div_{{NAME}}_checking_overflow
/////////////// BaseCaseSigned ///////////////
+#if defined(__PYX_HAVE_BUILTIN_OVERFLOW)
+
+static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
+ {{INT}} result;
+ *overflow |= __builtin_add_overflow(a, b, &result);
+ return result;
+}
+
+static CYTHON_INLINE {{INT}} __Pyx_sub_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
+ {{INT}} result;
+ *overflow |= __builtin_sub_overflow(a, b, &result);
+ return result;
+}
+
+static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
+ {{INT}} result;
+ *overflow |= __builtin_mul_overflow(a, b, &result);
+ return result;
+}
+
+#else
+
static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
if ((sizeof({{INT}}) < sizeof(long))) {
long big_r = ((long) a) + ((long) b);
@@ -151,40 +231,33 @@ static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{I
return r;
#endif
} else {
- // Signed overflow undefined, but unsigned overflow is well defined.
- {{INT}} r = ({{INT}}) ((unsigned {{INT}}) a + (unsigned {{INT}}) b);
+ // Signed overflow undefined, but unsigned overflow is well defined. Casting is
+ // implementation-defined, but we assume two's complement (see __Pyx_check_twos_complement
+ // above), and arithmetic in two's-complement is the same as unsigned arithmetic.
+ unsigned {{INT}} r = (unsigned {{INT}}) a + (unsigned {{INT}}) b;
// Overflow happened if the operands have the same sign, but the result
// has opposite sign.
- // sign(a) == sign(b) != sign(r)
- {{INT}} sign_a = __PYX_SIGN_BIT({{INT}}) & a;
- {{INT}} sign_b = __PYX_SIGN_BIT({{INT}}) & b;
- {{INT}} sign_r = __PYX_SIGN_BIT({{INT}}) & r;
- *overflow |= (sign_a == sign_b) & (sign_a != sign_r);
- return r;
- }
-}
-
-static CYTHON_INLINE {{INT}} __Pyx_add_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
- if (b > 0) {
- *overflow |= a > __PYX_MAX({{INT}}) - b;
- } else if (b < 0) {
- *overflow |= a < __PYX_MIN({{INT}}) - b;
+ *overflow |= (((unsigned {{INT}})a ^ r) & ((unsigned {{INT}})b ^ r)) >> (8 * sizeof({{INT}}) - 1);
+ return ({{INT}}) r;
}
- return a + b;
}
static CYTHON_INLINE {{INT}} __Pyx_sub_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
- *overflow |= b == __PYX_MIN({{INT}});
- return __Pyx_add_{{NAME}}_checking_overflow(a, -b, overflow);
-}
-
-static CYTHON_INLINE {{INT}} __Pyx_sub_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
- *overflow |= b == __PYX_MIN({{INT}});
- return __Pyx_add_const_{{NAME}}_checking_overflow(a, -b, overflow);
+ // Compilers don't handle widening as well in the subtraction case, so don't bother
+ unsigned {{INT}} r = (unsigned {{INT}}) a - (unsigned {{INT}}) b;
+ // Overflow happened if the operands differing signs, and the result
+ // has opposite sign to a.
+ *overflow |= (((unsigned {{INT}})a ^ (unsigned {{INT}})b) & ((unsigned {{INT}})a ^ r)) >> (8 * sizeof({{INT}}) - 1);
+ return ({{INT}}) r;
}
static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
- if ((sizeof({{INT}}) < sizeof(long))) {
+ // if we have a constant, use the constant version
+ if (__Pyx_is_constant(b)) {
+ return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow);
+ } else if (__Pyx_is_constant(a)) {
+ return __Pyx_mul_const_{{NAME}}_checking_overflow(b, a, overflow);
+ } else if ((sizeof({{INT}}) < sizeof(long))) {
long big_r = ((long) a) * ((long) b);
{{INT}} r = ({{INT}}) big_r;
*overflow |= big_r != r;
@@ -197,16 +270,20 @@ static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{I
return ({{INT}}) r;
#endif
} else {
- {{INT}} prod = a * b;
- double dprod = ((double) a) * ((double) b);
- // Overflow results in an error of at least 2^sizeof(INT),
- // whereas rounding represents an error on the order of 2^(sizeof(INT)-53).
- *overflow |= fabs(dprod - prod) > (__PYX_MAX({{INT}}) / 2);
- return prod;
+ return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow);
}
}
static CYTHON_INLINE {{INT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
+ // note that deliberately all these comparisons are written such that they divide by b; this
+ // function is used when b is a constant thus the compiler should be able to eliminate the
+ // (very slow on most CPUs!) division operations
+ if (__Pyx_is_constant(a) && !__Pyx_is_constant(b)) {
+ // if a is a compile-time constant and b isn't, swap them
+ {{INT}} temp = b;
+ b = a;
+ a = temp;
+ }
if (b > 1) {
*overflow |= a > __PYX_MAX({{INT}}) / b;
*overflow |= a < __PYX_MIN({{INT}}) / b;
@@ -216,16 +293,17 @@ static CYTHON_INLINE {{INT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{INT}}
*overflow |= a > __PYX_MIN({{INT}}) / b;
*overflow |= a < __PYX_MAX({{INT}}) / b;
}
- return a * b;
+ return ({{INT}}) (((unsigned {{INT}})a) * ((unsigned {{INT}}) b));
}
+#endif // defined(__PYX_HAVE_BUILTIN_OVERFLOW)
static CYTHON_INLINE {{INT}} __Pyx_div_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
if (b == 0) {
*overflow |= 1;
return 0;
}
- *overflow |= (a == __PYX_MIN({{INT}})) & (b == -1);
- return a / b;
+ *overflow |= a == __PYX_MIN({{INT}}) && b == -1;
+ return ({{INT}}) ((unsigned {{INT}}) a / (unsigned {{INT}}) b);
}
@@ -293,12 +371,18 @@ static CYTHON_INLINE {{TYPE}} __Pyx_{{BINOP}}_{{NAME}}_checking_overflow({{TYPE}
/////////////// LeftShift.proto ///////////////
static CYTHON_INLINE {{TYPE}} __Pyx_lshift_{{NAME}}_checking_overflow({{TYPE}} a, {{TYPE}} b, int *overflow) {
- *overflow |=
+ int overflow_check =
#if {{SIGNED}}
- (b < 0) |
+ (a < 0) || (b < 0) ||
#endif
- (b > ({{TYPE}}) (8 * sizeof({{TYPE}}))) | (a > (__PYX_MAX({{TYPE}}) >> b));
- return a << b;
+ // the following must be a _logical_ OR as the RHS is undefined if the LHS is true
+ (b >= ({{TYPE}}) (8 * sizeof({{TYPE}}))) || (a > (__PYX_MAX({{TYPE}}) >> b));
+ if (overflow_check) {
+ *overflow |= 1;
+ return 0;
+ } else {
+ return a << b;
+ }
}
#define __Pyx_lshift_const_{{NAME}}_checking_overflow __Pyx_lshift_{{NAME}}_checking_overflow
diff --git a/docs/src/tutorial/pure.rst b/docs/src/tutorial/pure.rst
index 775e7719c..2ae589680 100644
--- a/docs/src/tutorial/pure.rst
+++ b/docs/src/tutorial/pure.rst
@@ -165,6 +165,13 @@ Static typing
.. literalinclude:: ../../examples/tutorial/pure/exceptval.py
+ Note that the default exception handling behaviour when returning C numeric types
+ is to check for ``-1``, and if that was returned, check Python's error indicator
+ for an exception. This means, if no ``@exceptval`` decorator is provided, and the
+ return type is a numeric type, then the default with type annotations is
+ ``@exceptval(-1, check=True)``, in order to make sure that exceptions are correctly
+ and efficiently reported to the caller.
+
Since version 0.27, Cython also supports the variable annotations defined
in `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_. This allows to
declare types of variables in a Python 3.6 compatible way as follows:
diff --git a/runtests.py b/runtests.py
index c42da628c..c64cf9c79 100755
--- a/runtests.py
+++ b/runtests.py
@@ -2515,8 +2515,9 @@ def runtests(options, cmd_args, coverage=None):
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
+ examples_workdir = os.path.join(WORKDIR, 'examples')
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
- filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
+ filetests = TestBuilder(subdirectory, examples_workdir, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
diff --git a/tests/buffers/buffmt.pyx b/tests/buffers/buffmt.pyx
index 5d015e4bf..0aa0cb325 100644
--- a/tests/buffers/buffmt.pyx
+++ b/tests/buffers/buffmt.pyx
@@ -1,4 +1,5 @@
from __future__ import unicode_literals
+import struct
# Tests buffer format string parsing.
@@ -442,7 +443,10 @@ def packed_struct_with_arrays(fmt):
@testcase
def unpacked_struct_with_arrays(fmt):
"""
- >>> unpacked_struct_with_arrays("T{i:a:(8)f:b:f:c:Q:d:(5)i:e:i:f:i:g:xxxx(4)d:h:i:i:}")
+ >>> if struct.calcsize('P') == 8: # 64 bit
+ ... unpacked_struct_with_arrays("T{i:a:(8)f:b:f:c:Q:d:(5)i:e:i:f:i:g:xxxx(4)d:h:i:i:}")
+ ... elif struct.calcsize('P') == 4: # 32 bit
+ ... unpacked_struct_with_arrays("T{i:a:(8)f:b:f:c:Q:d:(5)i:e:i:f:i:g:(4)d:h:i:i:}")
"""
cdef object[UnpackedStructWithArrays] buf = MockBuffer(
diff --git a/tests/compile/fused_redeclare_T3111.pyx b/tests/compile/fused_redeclare_T3111.pyx
index 9450bbdff..cdb338af6 100644
--- a/tests/compile/fused_redeclare_T3111.pyx
+++ b/tests/compile/fused_redeclare_T3111.pyx
@@ -26,10 +26,10 @@ _WARNINGS = """
22:10: 'cpdef_method' redeclared
33:10: 'cpdef_cname_method' redeclared
# from MemoryView.pyx
-981:29: Ambiguous exception value, same as default return value: 0
-981:29: Ambiguous exception value, same as default return value: 0
-1008:46: Ambiguous exception value, same as default return value: 0
-1008:46: Ambiguous exception value, same as default return value: 0
-1098:29: Ambiguous exception value, same as default return value: 0
-1098:29: Ambiguous exception value, same as default return value: 0
+984:29: Ambiguous exception value, same as default return value: 0
+984:29: Ambiguous exception value, same as default return value: 0
+1011:46: Ambiguous exception value, same as default return value: 0
+1011:46: Ambiguous exception value, same as default return value: 0
+1101:29: Ambiguous exception value, same as default return value: 0
+1101:29: Ambiguous exception value, same as default return value: 0
"""
diff --git a/tests/compile/tree_assertions.pyx b/tests/compile/tree_assertions.pyx
new file mode 100644
index 000000000..e311bfd25
--- /dev/null
+++ b/tests/compile/tree_assertions.pyx
@@ -0,0 +1,20 @@
+# mode: compile
+
+# This is a sort of meta test - to test the functionality of "test_assert_path_exists"
+
+cimport cython
+
+@cython.test_assert_path_exists("//ReturnStatNode")
+def not_in_inner_compiler_directives():
+ # used to fail because ReturnStatNode wasn't in *this* CompilerDirectivesNode
+ with cython.boundscheck(False):
+ pass
+ return 1 # should pass
+
+@cython.test_assert_path_exists("//ReturnStatNode")
+def in_inner_compiler_directives():
+ # used to fail because ReturnStatNode wasn't in *this* CompilerDirectivesNode
+ with cython.boundscheck(False):
+ return 1
+
+# it's hard to come up with a corresponding test for fail_if_path_exists..
diff --git a/tests/memoryview/memoryview_no_binding_T3613.pyx b/tests/memoryview/memoryview_no_binding_T3613.pyx
new file mode 100644
index 000000000..1c736359e
--- /dev/null
+++ b/tests/memoryview/memoryview_no_binding_T3613.pyx
@@ -0,0 +1,21 @@
+# mode: compile
+# tag: memoryview
+
+# cython: binding=False
+
+# See GH 3613 - when memoryviews were compiled with binding off they ended up in an
+# inconsistent state where different directives were applied at different stages
+# of the pipeline
+
+import cython
+
+def f(double[:] a):
+ pass
+
+@cython.binding(False)
+def g(double[:] a):
+ pass
+
+@cython.binding(True)
+def h(double[:] a):
+ pass
diff --git a/tests/run/async_def.pyx b/tests/run/async_def.pyx
index ac0b8a516..64b5af486 100644
--- a/tests/run/async_def.pyx
+++ b/tests/run/async_def.pyx
@@ -1,4 +1,4 @@
-# cython: language_level=3, binding=True
+# cython: language_level=3str, binding=True
# mode: run
# tag: pep492, await, gh3337
@@ -33,3 +33,30 @@ async def test_async_temp_gh3337(x, y):
([], 0)
"""
return min(x - y, 0)
+
+
+async def outer_with_nested(called):
+ """
+ >>> called = []
+ >>> _, inner = run_async(outer_with_nested(called))
+ >>> called # after outer_with_nested()
+ ['outer', 'make inner', 'deco', 'return inner']
+ >>> _ = run_async(inner())
+ >>> called # after inner()
+ ['outer', 'make inner', 'deco', 'return inner', 'inner']
+ """
+ called.append('outer')
+
+ def deco(f):
+ called.append('deco')
+ return f
+
+ called.append('make inner')
+
+ @deco
+ async def inner():
+ called.append('inner')
+ return 1
+
+ called.append('return inner')
+ return inner
diff --git a/tests/run/common_utility_types.srctree b/tests/run/common_utility_types.srctree
index 1f3d2aea3..35daad505 100644
--- a/tests/run/common_utility_types.srctree
+++ b/tests/run/common_utility_types.srctree
@@ -31,7 +31,7 @@ print("importing...")
import a, b
print(type(a.funcA))
-assert type(a.funcA).__name__ == 'cython_function_or_method'
+assert type(a.funcA).__name__.endswith('cython_function_or_method')
assert type(a.funcA) is type(b.funcB)
assert a.funcA.func_globals is a.__dict__
diff --git a/tests/run/datetime_pxd.pyx b/tests/run/datetime_pxd.pyx
index 64c4980db..09e579ba7 100644
--- a/tests/run/datetime_pxd.pyx
+++ b/tests/run/datetime_pxd.pyx
@@ -4,7 +4,7 @@
#from datetime import time, date, datetime, timedelta, tzinfo
-from cpython.datetime cimport import_datetime
+from cpython.datetime cimport import_datetime, timedelta
from cpython.datetime cimport time_new, date_new, datetime_new, timedelta_new
from cpython.datetime cimport time_tzinfo, datetime_tzinfo
from cpython.datetime cimport time_hour, time_minute, time_second, time_microsecond
@@ -12,6 +12,15 @@ from cpython.datetime cimport date_day, date_month, date_year
from cpython.datetime cimport datetime_day, datetime_month, datetime_year
from cpython.datetime cimport datetime_hour, datetime_minute, datetime_second, \
datetime_microsecond
+from cpython.datetime cimport datetime, total_seconds
+
+# These were added in Py3, make sure that their backport works.
+from cpython.datetime cimport (
+ timedelta as timedelta_ext_type,
+ PyDateTime_DELTA_GET_DAYS,
+ PyDateTime_DELTA_GET_SECONDS,
+ PyDateTime_DELTA_GET_MICROSECONDS,
+)
import datetime as py_datetime
@@ -37,7 +46,23 @@ class FixedOffset(py_datetime.tzinfo):
def dst(self, dt):
return ZERO
-
+
+
+def do_timedelta_macros(timedelta_ext_type delta):
+ """
+ >>> delta = py_datetime.timedelta(days=13, hours=7, seconds=31, microseconds=993322)
+ >>> (delta.days, delta.seconds, delta.microseconds)
+ (13, 25231, 993322)
+ >>> do_timedelta_macros(delta)
+ (13, 25231, 993322)
+ """
+ return (
+ PyDateTime_DELTA_GET_DAYS(delta),
+ PyDateTime_DELTA_GET_SECONDS(delta),
+ PyDateTime_DELTA_GET_MICROSECONDS(delta),
+ )
+
+
def do_date(int year, int month, int day):
"""
>>> do_date(2012, 12, 31)
@@ -46,7 +71,7 @@ def do_date(int year, int month, int day):
v = date_new(year, month, day)
return type(v) is py_datetime.date, v.year == year, v.month == month, v.day == day
-def do_datetime(int year, int month, int day,
+def do_datetime(int year, int month, int day,
int hour, int minute, int second, int microsecond):
"""
>>> do_datetime(2012, 12, 31, 12, 23, 0, 0)
@@ -69,7 +94,7 @@ def do_time(int hour, int minute, int second, int microsecond):
def do_time_tzinfo(int hour, int minute, int second, int microsecond, object tz):
"""
- >>> tz = FixedOffset(60*3, 'Moscow')
+ >>> tz = FixedOffset(60*3, 'Moscow')
>>> do_time_tzinfo(12, 23, 0, 0, tz)
(True, True, True, True, True, True)
"""
@@ -79,10 +104,10 @@ def do_time_tzinfo(int hour, int minute, int second, int microsecond, object tz)
v.microsecond == microsecond, v.tzinfo is tz
-def do_datetime_tzinfo(int year, int month, int day,
+def do_datetime_tzinfo(int year, int month, int day,
int hour, int minute, int second, int microsecond, object tz):
"""
- >>> tz = FixedOffset(60*3, 'Moscow')
+ >>> tz = FixedOffset(60*3, 'Moscow')
>>> do_datetime_tzinfo(2012, 12, 31, 12, 23, 0, 0, tz)
(True, True, True, True, True, True, True, True, True)
"""
@@ -90,35 +115,35 @@ def do_datetime_tzinfo(int year, int month, int day,
return type(v) is py_datetime.datetime, v.year == year, v.month == month, v.day == day, \
v.hour == hour, v.minute == minute, v.second == second, \
v.microsecond == microsecond, v.tzinfo is tz
-
+
def do_time_tzinfo2(int hour, int minute, int second, int microsecond, object tz):
"""
- >>> tz = FixedOffset(60*3, 'Moscow')
+ >>> tz = FixedOffset(60*3, 'Moscow')
>>> do_time_tzinfo2(12, 23, 0, 0, tz)
(True, True, True, True, True, True, True, True)
"""
v = time_new(hour, minute, second, microsecond, None)
v1 = time_new(
- time_hour(v),
- time_minute(v),
- time_second(v),
- time_microsecond(v),
+ time_hour(v),
+ time_minute(v),
+ time_second(v),
+ time_microsecond(v),
tz)
r1 = (v1.tzinfo == tz)
r2 = (tz == time_tzinfo(v1))
v2 = time_new(
- time_hour(v1),
- time_minute(v1),
- time_second(v1),
- time_microsecond(v1),
+ time_hour(v1),
+ time_minute(v1),
+ time_second(v1),
+ time_microsecond(v1),
None)
r3 = (v2.tzinfo == None)
r4 = (None == time_tzinfo(v2))
v3 = time_new(
- time_hour(v2),
- time_minute(v2),
- time_second(v2),
- time_microsecond(v2),
+ time_hour(v2),
+ time_minute(v2),
+ time_second(v2),
+ time_microsecond(v2),
tz)
r5 = (v3.tzinfo == tz)
r6 = (tz == time_tzinfo(v3))
@@ -130,44 +155,60 @@ def do_time_tzinfo2(int hour, int minute, int second, int microsecond, object tz
def do_datetime_tzinfo2(int year, int month, int day,
int hour, int minute, int second, int microsecond, object tz):
"""
- >>> tz = FixedOffset(60*3, 'Moscow')
+ >>> tz = FixedOffset(60*3, 'Moscow')
>>> do_datetime_tzinfo2(2012, 12, 31, 12, 23, 0, 0, tz)
(True, True, True, True, True, True, True, True)
"""
v = datetime_new(year, month, day, hour, minute, second, microsecond, None)
v1 = datetime_new(
- datetime_year(v),
- datetime_month(v),
- datetime_day(v),
- datetime_hour(v),
- datetime_minute(v),
- datetime_second(v),
- datetime_microsecond(v),
+ datetime_year(v),
+ datetime_month(v),
+ datetime_day(v),
+ datetime_hour(v),
+ datetime_minute(v),
+ datetime_second(v),
+ datetime_microsecond(v),
tz)
r1 = (v1.tzinfo == tz)
r2 = (tz == datetime_tzinfo(v1))
v2 = datetime_new(
- datetime_year(v1),
- datetime_month(v1),
- datetime_day(v1),
- datetime_hour(v1),
- datetime_minute(v1),
- datetime_second(v1),
- datetime_microsecond(v1),
+ datetime_year(v1),
+ datetime_month(v1),
+ datetime_day(v1),
+ datetime_hour(v1),
+ datetime_minute(v1),
+ datetime_second(v1),
+ datetime_microsecond(v1),
None)
r3 = (v2.tzinfo == None)
r4 = (None == datetime_tzinfo(v2))
v3 = datetime_new(
- datetime_year(v2),
- datetime_month(v2),
- datetime_day(v2),
- datetime_hour(v2),
- datetime_minute(v2),
- datetime_second(v2),
- datetime_microsecond(v2),
+ datetime_year(v2),
+ datetime_month(v2),
+ datetime_day(v2),
+ datetime_hour(v2),
+ datetime_minute(v2),
+ datetime_second(v2),
+ datetime_microsecond(v2),
tz)
r5 = (v3.tzinfo == tz)
r6 = (tz == datetime_tzinfo(v3))
r7 = (v2 == v)
r8 = (v3 == v1)
return r1, r2, r3, r4, r5, r6, r7, r8
+
+
+def test_timedelta_total_seconds():
+ """
+ >>> cytotal, pytotal = test_timedelta_total_seconds()
+ >>> assert cytotal == pytotal, (cytotal, pytotal)
+ >>> cytotal == pytotal
+ True
+ """
+ cdef:
+ datetime now = py_datetime.datetime.now()
+ timedelta td = now - py_datetime.datetime(1970, 1, 1)
+
+ pytd = now - py_datetime.datetime(1970, 1, 1)
+
+ return total_seconds(td), pytd.total_seconds()
diff --git a/tests/run/fused_def.pyx b/tests/run/fused_def.pyx
index efd578298..0f5ec1bbd 100644
--- a/tests/run/fused_def.pyx
+++ b/tests/run/fused_def.pyx
@@ -143,7 +143,7 @@ def run_cyfunction_check():
fused_cython_function
1
"""
- print(type(opt_func).__name__)
+ print(type(opt_func).__name__.rsplit('.', 1)[-1])
print(__Pyx_CyFunction_Check(opt_func)) # should be True
def test_opt_func():
diff --git a/tests/run/numpy_test.pyx b/tests/run/numpy_test.pyx
index 4dc89ad5c..e81547c32 100644
--- a/tests/run/numpy_test.pyx
+++ b/tests/run/numpy_test.pyx
@@ -944,3 +944,17 @@ def test_broadcast_comparison(np.ndarray[double, ndim=1] a):
cdef object obj = a
return a == 0, obj == 0, a == 1, obj == 1
+
+@testcase
+def test_c_api_searchsorted(np.ndarray arr, other):
+ """
+ >>> arr = np.random.randn(10)
+ >>> other = np.random.randn(5)
+ >>> result, expected = test_c_api_searchsorted(arr, other)
+ >>> (result == expected).all()
+ True
+ """
+ result = np.PyArray_SearchSorted(arr, other, np.NPY_SEARCHRIGHT, NULL)
+
+ expected = arr.searchsorted(other, side="right")
+ return result, expected
diff --git a/tests/run/overflow_check.pxi b/tests/run/overflow_check.pxi
index 9bfd068a2..0b9844c89 100644
--- a/tests/run/overflow_check.pxi
+++ b/tests/run/overflow_check.pxi
@@ -1,14 +1,15 @@
cimport cython
cdef object two = 2
-cdef int size_in_bits = sizeof(INT) * 8
+cdef int size_in_bits_ = sizeof(INT) * 8
cdef bint is_signed_ = not ((<INT>-1) > 0)
-cdef INT max_value_ = <INT>(two ** (size_in_bits - is_signed_) - 1)
+cdef INT max_value_ = <INT>(two ** (size_in_bits_ - is_signed_) - 1)
cdef INT min_value_ = ~max_value_
cdef INT half_ = max_value_ // <INT>2
# Python visible.
+size_in_bits = size_in_bits_
is_signed = is_signed_
max_value = max_value_
min_value = min_value_
@@ -230,6 +231,17 @@ def test_lshift(INT a, int b):
"""
>>> test_lshift(1, 10)
1024
+ >>> test_lshift(1, size_in_bits - 2) == 1 << (size_in_bits - 2)
+ True
+ >>> test_lshift(0, size_in_bits - 1)
+ 0
+ >>> test_lshift(1, size_in_bits - 1) == 1 << (size_in_bits - 1) if not is_signed else True
+ True
+ >>> if is_signed: expect_overflow(test_lshift, 1, size_in_bits - 1)
+ >>> expect_overflow(test_lshift, 0, size_in_bits)
+ >>> expect_overflow(test_lshift, 1, size_in_bits)
+ >>> expect_overflow(test_lshift, 0, size_in_bits + 1)
+ >>> expect_overflow(test_lshift, 1, size_in_bits + 1)
>>> expect_overflow(test_lshift, 1, 100)
>>> expect_overflow(test_lshift, max_value, 1)
>>> test_lshift(max_value, 0) == max_value
diff --git a/tests/run/parallel.pyx b/tests/run/parallel.pyx
index c3526556f..c3739b10b 100644
--- a/tests/run/parallel.pyx
+++ b/tests/run/parallel.pyx
@@ -8,6 +8,9 @@ from libc.stdlib cimport malloc, free
openmp.omp_set_nested(1)
+cdef int forward(int x) nogil:
+ return x
+
def test_parallel():
"""
>>> test_parallel()
@@ -20,6 +23,9 @@ def test_parallel():
with nogil, cython.parallel.parallel():
buf[threadid()] = threadid()
+ # Recognise threadid() also when it's used in a function argument.
+ # See https://github.com/cython/cython/issues/3594
+ buf[forward(cython.parallel.threadid())] = forward(threadid())
for i in range(maxthreads):
assert buf[i] == i
diff --git a/tests/run/test_coroutines_pep492.pyx b/tests/run/test_coroutines_pep492.pyx
index b274160e8..a12307174 100644
--- a/tests/run/test_coroutines_pep492.pyx
+++ b/tests/run/test_coroutines_pep492.pyx
@@ -109,7 +109,7 @@ class AsyncYield(object):
def run_async(coro):
#assert coro.__class__ is types.GeneratorType
- assert coro.__class__.__name__ in ('coroutine', '_GeneratorWrapper'), coro.__class__.__name__
+ assert coro.__class__.__name__.rsplit('.', 1)[-1] in ('coroutine', '_GeneratorWrapper'), coro.__class__.__name__
buffer = []
result = None
@@ -123,7 +123,7 @@ def run_async(coro):
def run_async__await__(coro):
- assert coro.__class__.__name__ in ('coroutine', '_GeneratorWrapper'), coro.__class__.__name__
+ assert coro.__class__.__name__.rsplit('.', 1)[-1] in ('coroutine', '_GeneratorWrapper'), coro.__class__.__name__
aw = coro.__await__()
buffer = []
result = None
@@ -895,7 +895,7 @@ class CoroutineTest(unittest.TestCase):
raise StopIteration
with silence_coro_gc():
- self.assertRegex(repr(foo()), '^<coroutine object.* at 0x.*>$')
+ self.assertRegex(repr(foo()), '^<[^\s]*coroutine object.* at 0x.*>$')
def test_func_4(self):
async def foo():