summaryrefslogtreecommitdiff
path: root/Cython/Compiler/ExprNodes.py
diff options
context:
space:
mode:
Diffstat (limited to 'Cython/Compiler/ExprNodes.py')
-rw-r--r--Cython/Compiler/ExprNodes.py598
1 files changed, 409 insertions, 189 deletions
diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py
index 312b37329..881851535 100644
--- a/Cython/Compiler/ExprNodes.py
+++ b/Cython/Compiler/ExprNodes.py
@@ -328,14 +328,13 @@ class ExprNode(Node):
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
- # saved_subexpr_nodes
- # [ExprNode or [ExprNode or None] or None]
- # Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
- # is_numpy_attribute boolean Is a Numpy module attribute
# result_code/temp_result can safely be set to None
+ # is_numpy_attribute boolean Is a Numpy module attribute
# annotation ExprNode or None PEP526 annotation for names or expressions
+ # generator_arg_tag None or Node A tag to mark ExprNodes that potentially need to
+ # be changed to a generator argument
result_ctype = None
type = None
@@ -345,6 +344,7 @@ class ExprNode(Node):
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
is_numpy_attribute = False
+ generator_arg_tag = None
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
@@ -473,7 +473,6 @@ class ExprNode(Node):
is_memview_broadcast = False
is_memview_copy_assignment = False
- saved_subexpr_nodes = None
is_temp = False
has_temp_moved = False # if True then attempting to do anything but free the temp is invalid
is_target = False
@@ -548,7 +547,7 @@ class ExprNode(Node):
if is_pythran_supported_node_or_none(self):
return to_pythran(self)
- assert(type_ is not None)
+ assert type_ is not None
return to_pythran(self, type_)
def is_c_result_required(self):
@@ -1103,6 +1102,8 @@ class ExprNode(Node):
type = self.type
if type.is_enum or type.is_error:
return self
+ elif type is PyrexTypes.c_bint_type:
+ return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
elif type.is_cpp_class and type.scope and type.scope.lookup("operator bool"):
@@ -1530,14 +1531,23 @@ class FloatNode(ConstNode):
def _analyse_name_as_type(name, pos, env):
- type = PyrexTypes.parse_basic_type(name)
- if type is not None:
- return type
+ ctype = PyrexTypes.parse_basic_type(name)
+ if ctype is not None and env.in_c_type_context:
+ return ctype
global_entry = env.global_scope().lookup(name)
- if global_entry and global_entry.is_type and global_entry.type:
- return global_entry.type
+ if global_entry and global_entry.is_type:
+ type = global_entry.type
+ if (not env.in_c_type_context and
+ name == 'int' and type is Builtin.int_type):
+ # While we still support Python2 this needs to be downgraded
+ # to a generic Python object to include both int and long
+ type = py_object_type
+ if type and (type.is_pyobject or env.in_c_type_context):
+ return type
+ ctype = ctype or type
+ # This is fairly heavy, so it's worth trying some easier things above.
from .TreeFragment import TreeFragment
with local_errors(ignore=True):
pos = (pos[0], pos[1], pos[2]-7)
@@ -1550,8 +1560,11 @@ def _analyse_name_as_type(name, pos, env):
if isinstance(sizeof_node, SizeofTypeNode):
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
- return sizeof_node.arg_type
- return None
+ type = sizeof_node.arg_type
+ if type and (type.is_pyobject or env.in_c_type_context):
+ return type
+ ctype = ctype or type
+ return ctype
class BytesNode(ConstNode):
@@ -2025,6 +2038,8 @@ class NameNode(AtomicExprNode):
# annotations never create global cdef names
if env.is_module_scope:
return
+
+ modifiers = ()
if (
# name: "description" => not a type, but still a declared variable or attribute
annotation.expr.is_string_literal
@@ -2036,10 +2051,11 @@ class NameNode(AtomicExprNode):
# For Python class scopes every attribute is a Python object
atype = py_object_type
else:
- _, atype = annotation.analyse_type_annotation(env)
+ modifiers, atype = annotation.analyse_type_annotation(env)
+
if atype is None:
atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
- if atype.is_fused and env.fused_to_specific:
+ elif atype.is_fused and env.fused_to_specific:
try:
atype = atype.specialize(env.fused_to_specific)
except CannotSpecialize:
@@ -2047,25 +2063,26 @@ class NameNode(AtomicExprNode):
"'%s' cannot be specialized since its type is not a fused argument to this function" %
self.name)
atype = error_type
+
visibility = 'private'
- if 'dataclasses.dataclass' in env.directives:
+ if env.is_c_dataclass_scope:
# handle "frozen" directive - full inspection of the dataclass directives happens
# in Dataclass.py
- frozen_directive = None
- dataclass_directive = env.directives['dataclasses.dataclass']
- if dataclass_directive:
- dataclass_directive_kwds = dataclass_directive[1]
- frozen_directive = dataclass_directive_kwds.get('frozen', None)
- is_frozen = frozen_directive and frozen_directive.is_literal and frozen_directive.value
+ is_frozen = env.is_c_dataclass_scope == "frozen"
if atype.is_pyobject or atype.can_coerce_to_pyobject(env):
visibility = 'readonly' if is_frozen else 'public'
# If the object can't be coerced that's fine - we just don't create a property
+
if as_target and env.is_c_class_scope and not (atype.is_pyobject or atype.is_error):
# TODO: this will need revising slightly if annotated cdef attributes are implemented
atype = py_object_type
warning(annotation.pos, "Annotation ignored since class-level attributes must be Python objects. "
"Were you trying to set up an instance attribute?", 2)
- entry = self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target, visibility=visibility)
+
+ entry = self.entry = env.declare_var(
+ name, atype, self.pos, is_cdef=not as_target, visibility=visibility,
+ pytyping_modifiers=modifiers)
+
# Even if the entry already exists, make sure we're supplying an annotation if we can.
if annotation and not entry.annotation:
entry.annotation = annotation
@@ -2085,23 +2102,42 @@ class NameNode(AtomicExprNode):
return None
def analyse_as_type(self, env):
+ type = None
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
- else:
+ elif env.in_c_type_context:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
+
entry = self.entry
if not entry:
entry = env.lookup(self.name)
- if entry and entry.is_type:
- return entry.type
- elif entry and entry.known_standard_library_import:
+ if entry and not entry.is_type and entry.known_standard_library_import:
entry = Builtin.get_known_standard_library_entry(entry.known_standard_library_import)
- if entry and entry.is_type:
- return entry.type
- else:
- return None
+ if entry and entry.is_type:
+ # Infer equivalent C types instead of Python types when possible.
+ type = entry.type
+ if not env.in_c_type_context and type is Builtin.long_type:
+ # Try to give a helpful warning when users write plain C type names.
+ warning(self.pos, "Found Python 2.x type 'long' in a Python annotation. Did you mean to use 'cython.long'?")
+ type = py_object_type
+ elif type.is_pyobject and type.equivalent_type:
+ type = type.equivalent_type
+ elif type is Builtin.int_type:
+ # while we still support Python 2 this must be an object
+ # so that it can be either int or long
+ type = py_object_type
+ return type
+ if self.name == 'object':
+ # This is normally parsed as "simple C type", but not if we don't parse C types.
+ return py_object_type
+
+ # Try to give a helpful warning when users write plain C type names.
+ if not env.in_c_type_context and PyrexTypes.parse_basic_type(self.name):
+ warning(self.pos, "Found C type '%s' in a Python annotation. Did you mean to use a Python type?" % self.name)
+
+ return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
@@ -2131,10 +2167,13 @@ class NameNode(AtomicExprNode):
self.entry.known_standard_library_import = "" # already exists somewhere and so is now ambiguous
if not self.entry and self.annotation is not None:
# name : type = ...
- is_dataclass = 'dataclasses.dataclass' in env.directives
+ is_dataclass = env.is_c_dataclass_scope
# In a dataclass, an assignment should not prevent a name from becoming an instance attribute.
# Hence, "as_target = not is_dataclass".
self.declare_from_annotation(env, as_target=not is_dataclass)
+ elif (self.entry and self.entry.is_inherited and
+ self.annotation and env.is_c_dataclass_scope):
+ error(self.pos, "Cannot redeclare inherited fields in Cython dataclasses")
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
@@ -2761,7 +2800,98 @@ class ImportNode(ExprNode):
return self.module_name.value
-class IteratorNode(ExprNode):
+class ScopedExprNode(ExprNode):
+ # Abstract base class for ExprNodes that have their own local
+ # scope, such as generator expressions.
+ #
+ # expr_scope Scope the inner scope of the expression
+
+ subexprs = []
+ expr_scope = None
+
+ # does this node really have a local scope, e.g. does it leak loop
+ # variables or not? non-leaking Py3 behaviour is default, except
+ # for list comprehensions where the behaviour differs in Py2 and
+ # Py3 (set in Parsing.py based on parser context)
+ has_local_scope = True
+
+ def init_scope(self, outer_scope, expr_scope=None):
+ if expr_scope is not None:
+ self.expr_scope = expr_scope
+ elif self.has_local_scope:
+ self.expr_scope = Symtab.ComprehensionScope(outer_scope)
+ elif not self.expr_scope: # don't unset if it's already been set
+ self.expr_scope = None
+
+ def analyse_declarations(self, env):
+ self.init_scope(env)
+
+ def analyse_scoped_declarations(self, env):
+ # this is called with the expr_scope as env
+ pass
+
+ def analyse_types(self, env):
+ # no recursion here, the children will be analysed separately below
+ return self
+
+ def analyse_scoped_expressions(self, env):
+ # this is called with the expr_scope as env
+ return self
+
+ def generate_evaluation_code(self, code):
+ # set up local variables and free their references on exit
+ generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
+ if not self.has_local_scope or not self.expr_scope.var_entries:
+ # no local variables => delegate, done
+ generate_inner_evaluation_code(code)
+ return
+
+ code.putln('{ /* enter inner scope */')
+ py_entries = []
+ for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
+ if not entry.in_closure:
+ if entry.type.is_pyobject and entry.used:
+ py_entries.append(entry)
+ if not py_entries:
+ # no local Python references => no cleanup required
+ generate_inner_evaluation_code(code)
+ code.putln('} /* exit inner scope */')
+ return
+
+ # must free all local Python references at each exit point
+ old_loop_labels = code.new_loop_labels()
+ old_error_label = code.new_error_label()
+
+ generate_inner_evaluation_code(code)
+
+ # normal (non-error) exit
+ self._generate_vars_cleanup(code, py_entries)
+
+ # error/loop body exit points
+ exit_scope = code.new_label('exit_scope')
+ code.put_goto(exit_scope)
+ for label, old_label in ([(code.error_label, old_error_label)] +
+ list(zip(code.get_loop_labels(), old_loop_labels))):
+ if code.label_used(label):
+ code.put_label(label)
+ self._generate_vars_cleanup(code, py_entries)
+ code.put_goto(old_label)
+ code.put_label(exit_scope)
+ code.putln('} /* exit inner scope */')
+
+ code.set_loop_labels(old_loop_labels)
+ code.error_label = old_error_label
+
+ def _generate_vars_cleanup(self, code, py_entries):
+ for entry in py_entries:
+ if entry.is_cglobal:
+ code.put_var_gotref(entry)
+ code.put_var_decref_set(entry, "Py_None")
+ else:
+ code.put_var_xdecref_clear(entry)
+
+
+class IteratorNode(ScopedExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
@@ -2773,10 +2903,13 @@ class IteratorNode(ExprNode):
counter_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
is_async = False
+ has_local_scope = False
subexprs = ['sequence']
def analyse_types(self, env):
+ if self.expr_scope:
+ env = self.expr_scope # actually evaluate sequence in this scope instead
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
@@ -2784,6 +2917,9 @@ class IteratorNode(ExprNode):
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
return CppIteratorNode(self.pos, sequence=self.sequence).analyse_types(env)
+ elif self.is_reversed_cpp_iteration():
+ sequence = self.sequence.arg_tuple.args[0].arg
+ return CppIteratorNode(self.pos, sequence=sequence, reversed=True).analyse_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type in (list_type, tuple_type):
@@ -2798,8 +2934,27 @@ class IteratorNode(ExprNode):
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
+ def is_reversed_cpp_iteration(self):
+ """
+ Returns True if the 'reversed' function is applied to a C++ iterable.
+
+ This supports C++ classes with reverse_iterator implemented.
+ """
+ if not (isinstance(self.sequence, SimpleCallNode) and
+ self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1):
+ return False
+ func = self.sequence.function
+ if func.is_name and func.name == "reversed":
+ if not func.entry.is_builtin:
+ return False
+ arg = self.sequence.arg_tuple.args[0]
+ if isinstance(arg, CoercionNode) and arg.arg.is_name:
+ arg = arg.arg.entry
+ return arg.type.is_cpp_class
+ return False
+
def type_dependencies(self, env):
- return self.sequence.type_dependencies(env)
+ return self.sequence.type_dependencies(self.expr_scope or env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
@@ -2961,25 +3116,30 @@ class CppIteratorNode(ExprNode):
cpp_attribute_op = "."
extra_dereference = ""
is_temp = True
+ reversed = False
subexprs = ['sequence']
+ def get_iterator_func_names(self):
+ return ("begin", "end") if not self.reversed else ("rbegin", "rend")
+
def analyse_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
- begin = sequence_type.scope.lookup("begin")
- end = sequence_type.scope.lookup("end")
+ begin_name, end_name = self.get_iterator_func_names()
+ begin = sequence_type.scope.lookup(begin_name)
+ end = sequence_type.scope.lookup(end_name)
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
- error(self.pos, "missing begin() on %s" % self.sequence.type)
+ error(self.pos, "missing %s() on %s" % (begin_name, self.sequence.type))
self.type = error_type
return self
if (end is None
or not end.type.is_cfunction
or end.type.args):
- error(self.pos, "missing end() on %s" % self.sequence.type)
+ error(self.pos, "missing %s() on %s" % (end_name, self.sequence.type))
self.type = error_type
return self
iter_type = begin.type.return_type
@@ -2990,37 +3150,40 @@ class CppIteratorNode(ExprNode):
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
- error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
+ error(self.pos, "missing operator!= on result of %s() on %s" % (begin_name, self.sequence.type))
self.type = error_type
return self
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
- error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
+ error(self.pos, "missing operator++ on result of %s() on %s" % (begin_name, self.sequence.type))
self.type = error_type
return self
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
- error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
+ error(self.pos, "missing operator* on result of %s() on %s" % (begin_name, self.sequence.type))
self.type = error_type
return self
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
- error(self.pos, "incompatible types for begin() and end()")
+ error(self.pos, "incompatible types for %s() and %s()" % (begin_name, end_name))
self.type = iter_type
else:
- error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
+ error(self.pos, "result type of %s() on %s must be a C++ class or pointer" % (begin_name, self.sequence.type))
self.type = error_type
return self
def generate_result_code(self, code):
sequence_type = self.sequence.type
+ begin_name, _ = self.get_iterator_func_names()
# essentially 3 options:
- if self.sequence.is_name or self.sequence.is_attribute:
- # 1) is a name and can be accessed directly;
+ if self.sequence.is_simple():
+ # 1) Sequence can be accessed directly, like a name;
# assigning to it may break the container, but that's the responsibility
# of the user
- code.putln("%s = %s%sbegin();" % (self.result(),
- self.sequence.result(),
- self.cpp_attribute_op))
+ code.putln("%s = %s%s%s();" % (
+ self.result(),
+ self.sequence.result(),
+ self.cpp_attribute_op,
+ begin_name))
else:
# (while it'd be nice to limit the scope of the loop temp, it's essentially
# impossible to do while supporting generators)
@@ -3038,23 +3201,50 @@ class CppIteratorNode(ExprNode):
code.putln("%s = %s%s;" % (self.cpp_sequence_cname,
"&" if temp_type.is_ptr else "",
self.sequence.move_result_rhs()))
- code.putln("%s = %s%sbegin();" % (self.result(), self.cpp_sequence_cname,
- self.cpp_attribute_op))
+ code.putln("%s = %s%s%s();" % (
+ self.result(),
+ self.cpp_sequence_cname,
+ self.cpp_attribute_op,
+ begin_name))
def generate_iter_next_result_code(self, result_name, code):
# end call isn't cached to support containers that allow adding while iterating
# (much as this is usually a bad idea)
- code.putln("if (!(%s%s != %s%send())) break;" % (
+ _, end_name = self.get_iterator_func_names()
+ code.putln("if (!(%s%s != %s%s%s())) break;" % (
self.extra_dereference,
self.result(),
self.cpp_sequence_cname or self.sequence.result(),
- self.cpp_attribute_op))
+ self.cpp_attribute_op,
+ end_name))
code.putln("%s = *%s%s;" % (
result_name,
self.extra_dereference,
self.result()))
code.putln("++%s%s;" % (self.extra_dereference, self.result()))
+ def generate_subexpr_disposal_code(self, code):
+ if not self.cpp_sequence_cname:
+ # the sequence is accessed directly so any temporary result in its
+ # subexpressions must remain available until the iterator is not needed
+ return
+ ExprNode.generate_subexpr_disposal_code(self, code)
+
+ def free_subexpr_temps(self, code):
+ if not self.cpp_sequence_cname:
+ # the sequence is accessed directly so any temporary result in its
+ # subexpressions must remain available until the iterator is not needed
+ return
+ ExprNode.free_subexpr_temps(self, code)
+
+ def generate_disposal_code(self, code):
+ if not self.cpp_sequence_cname:
+ # postponed from CppIteratorNode.generate_subexpr_disposal_code
+ # and CppIteratorNode.free_subexpr_temps
+ ExprNode.generate_subexpr_disposal_code(self, code)
+ ExprNode.free_subexpr_temps(self, code)
+ ExprNode.generate_disposal_code(self, code)
+
def free_temps(self, code):
if self.cpp_sequence_cname:
code.funcstate.release_temp(self.cpp_sequence_cname)
@@ -3062,6 +3252,32 @@ class CppIteratorNode(ExprNode):
ExprNode.free_temps(self, code)
+def remove_const(item_type):
+ """
+ Removes the constness of a given type and its underlying templates
+ if any.
+
+ This is to solve the compilation error when the temporary variable used to
+ store the result of an iterator cannot be changed due to its constness.
+ For example, the value_type of std::map, which will also be the type of
+ the temporarry variable, is std::pair<const Key, T>. This means the first
+ component of the variable cannot be reused to store the result of each
+ iteration, which leads to a compilation error.
+ """
+ if item_type.is_const:
+ item_type = item_type.cv_base_type
+ if item_type.is_typedef:
+ item_type = remove_const(item_type.typedef_base_type)
+ if item_type.is_cpp_class and item_type.templates:
+ templates = [remove_const(t) if t.is_const else t for t in item_type.templates]
+ template_type = item_type.template_type
+ item_type = PyrexTypes.CppClassType(
+ template_type.name, template_type.scope,
+ template_type.cname, template_type.base_classes,
+ templates, template_type)
+ return item_type
+
+
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = next(iterator)
@@ -3104,6 +3320,7 @@ class NextNode(AtomicExprNode):
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
+ self.type = remove_const(self.type)
self.is_temp = 1
return self
@@ -3111,7 +3328,7 @@ class NextNode(AtomicExprNode):
self.iterator.generate_iter_next_result_code(self.result(), code)
-class AsyncIteratorNode(ExprNode):
+class AsyncIteratorNode(ScopedExprNode):
# Used as part of 'async for' statement implementation.
#
# Implements result = sequence.__aiter__()
@@ -3123,11 +3340,14 @@ class AsyncIteratorNode(ExprNode):
is_async = True
type = py_object_type
is_temp = 1
+ has_local_scope = False
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
+ if self.expr_scope:
+ env = self.expr_scope
self.sequence = self.sequence.analyse_types(env)
if not self.sequence.type.is_pyobject:
error(self.pos, "async for loops not allowed on C/C++ types")
@@ -3702,6 +3922,18 @@ class IndexNode(_IndexingBaseNode):
error(self.pos, "Array size must be a compile time constant")
return None
+ def analyse_pytyping_modifiers(self, env):
+ # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
+ # TODO: somehow bring this together with TemplatedTypeNode.analyse_pytyping_modifiers()
+ modifiers = []
+ modifier_node = self
+ while modifier_node.is_subscript:
+ modifier_type = modifier_node.base.analyse_as_type(env)
+ if modifier_type.python_type_constructor_name and modifier_type.modifier_name:
+ modifiers.append(modifier_type.modifier_name)
+ modifier_node = modifier_node.index
+ return modifiers
+
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
@@ -3932,12 +4164,16 @@ class IndexNode(_IndexingBaseNode):
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
- if item_type is None:
- item_type = py_object_type
- self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
+ if item_type is None or not item_type.is_pyobject:
+ # Even if we inferred a C type as result, we will read a Python object, so trigger coercion if needed.
+ # We could potentially use "item_type.equivalent_type" here, but that may trigger assumptions
+ # about the actual runtime item types, rather than just their ability to coerce to the C "item_type".
+ self.type = py_object_type
+ else:
+ self.type = item_type
self.wrap_in_nonecheck_node(env, getting)
return self
@@ -4233,6 +4469,7 @@ class IndexNode(_IndexingBaseNode):
return
utility_code = None
+ error_value = None
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
@@ -4268,8 +4505,8 @@ class IndexNode(_IndexingBaseNode):
error_value = '-1'
utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
elif not (self.base.type.is_cpp_class and self.exception_check):
- assert False, "unexpected type %s and base type %s for indexing" % (
- self.type, self.base.type)
+ assert False, "unexpected type %s and base type %s for indexing (%s)" % (
+ self.type, self.base.type, self.pos)
if utility_code is not None:
code.globalstate.use_utility_code(utility_code)
@@ -4582,17 +4819,17 @@ class BufferIndexNode(_IndexingBaseNode):
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
- # Must manage refcounts. Decref what is already there
- # and incref what we put in.
+ # Must manage refcounts. XDecref what is already there
+ # and incref what we put in (NumPy allows there to be NULL)
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
- code.put_gotref("*%s" % ptr, self.buffer_type.dtype)
- code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
+ code.put_xgotref("*%s" % ptr, self.buffer_type.dtype)
+ code.putln("__Pyx_INCREF(%s); __Pyx_XDECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
- code.put_giveref("*%s" % ptr, self.buffer_type.dtype)
+ code.put_xgiveref("*%s" % ptr, self.buffer_type.dtype)
code.funcstate.release_temp(ptr)
else:
# Simple case
@@ -4613,8 +4850,11 @@ class BufferIndexNode(_IndexingBaseNode):
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
- code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code))
- code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
+ res = self.result()
+ code.putln("%s = (PyObject *) *%s;" % (res, self.buffer_ptr_code))
+ # NumPy does (occasionally) allow NULL to denote None.
+ code.putln("if (unlikely(%s == NULL)) %s = Py_None;" % (res, res))
+ code.putln("__Pyx_INCREF((PyObject*)%s);" % res)
def free_subexpr_temps(self, code):
for temp in self.index_temps:
@@ -6991,6 +7231,35 @@ class AttributeNode(ExprNode):
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
+ elif entry and entry.is_cfunction and self.obj.type is not Builtin.type_type:
+ # "bound" cdef function.
+ # This implementation is likely a little inefficient and could be improved.
+ # Essentially it does:
+ # __import__("functools").partial(coerce_to_object(self), self.obj)
+ from .UtilNodes import EvalWithTempExprNode, ResultRefNode
+ # take self.obj out to a temp because it's used twice
+ obj_node = ResultRefNode(self.obj, type=self.obj.type)
+ obj_node.result_ctype = self.obj.result_ctype
+ self.obj = obj_node
+ unbound_node = ExprNode.coerce_to(self, dst_type, env)
+ utility_code=UtilityCode.load_cached(
+ "PyMethodNew2Arg", "ObjectHandling.c"
+ )
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("func", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("self", PyrexTypes.py_object_type, None)
+ ],
+ )
+ binding_call = PythonCapiCallNode(
+ self.pos,
+ function_name="__Pyx_PyMethod_New2Arg",
+ func_type=func_type,
+ args=[unbound_node, obj_node],
+ utility_code=utility_code,
+ )
+ complete_call = EvalWithTempExprNode(obj_node, binding_call)
+ return complete_call.analyse_types(env)
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
@@ -8104,7 +8373,7 @@ class SequenceNode(ExprNode):
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
- code.putln('(void)%s;' % sublist_temp) # avoid warning about unused variable
+ code.putln('CYTHON_UNUSED_VAR(%s);' % sublist_temp)
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
@@ -8417,97 +8686,6 @@ class ListNode(SequenceNode):
raise InternalError("List type never specified")
-class ScopedExprNode(ExprNode):
- # Abstract base class for ExprNodes that have their own local
- # scope, such as generator expressions.
- #
- # expr_scope Scope the inner scope of the expression
-
- subexprs = []
- expr_scope = None
-
- # does this node really have a local scope, e.g. does it leak loop
- # variables or not? non-leaking Py3 behaviour is default, except
- # for list comprehensions where the behaviour differs in Py2 and
- # Py3 (set in Parsing.py based on parser context)
- has_local_scope = True
-
- def init_scope(self, outer_scope, expr_scope=None):
- if expr_scope is not None:
- self.expr_scope = expr_scope
- elif self.has_local_scope:
- self.expr_scope = Symtab.ComprehensionScope(outer_scope)
- else:
- self.expr_scope = None
-
- def analyse_declarations(self, env):
- self.init_scope(env)
-
- def analyse_scoped_declarations(self, env):
- # this is called with the expr_scope as env
- pass
-
- def analyse_types(self, env):
- # no recursion here, the children will be analysed separately below
- return self
-
- def analyse_scoped_expressions(self, env):
- # this is called with the expr_scope as env
- return self
-
- def generate_evaluation_code(self, code):
- # set up local variables and free their references on exit
- generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
- if not self.has_local_scope or not self.expr_scope.var_entries:
- # no local variables => delegate, done
- generate_inner_evaluation_code(code)
- return
-
- code.putln('{ /* enter inner scope */')
- py_entries = []
- for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
- if not entry.in_closure:
- if entry.type.is_pyobject and entry.used:
- py_entries.append(entry)
- if not py_entries:
- # no local Python references => no cleanup required
- generate_inner_evaluation_code(code)
- code.putln('} /* exit inner scope */')
- return
-
- # must free all local Python references at each exit point
- old_loop_labels = code.new_loop_labels()
- old_error_label = code.new_error_label()
-
- generate_inner_evaluation_code(code)
-
- # normal (non-error) exit
- self._generate_vars_cleanup(code, py_entries)
-
- # error/loop body exit points
- exit_scope = code.new_label('exit_scope')
- code.put_goto(exit_scope)
- for label, old_label in ([(code.error_label, old_error_label)] +
- list(zip(code.get_loop_labels(), old_loop_labels))):
- if code.label_used(label):
- code.put_label(label)
- self._generate_vars_cleanup(code, py_entries)
- code.put_goto(old_label)
- code.put_label(exit_scope)
- code.putln('} /* exit inner scope */')
-
- code.set_loop_labels(old_loop_labels)
- code.error_label = old_error_label
-
- def _generate_vars_cleanup(self, code, py_entries):
- for entry in py_entries:
- if entry.is_cglobal:
- code.put_var_gotref(entry)
- code.put_var_decref_set(entry, "Py_None")
- else:
- code.put_var_xdecref_clear(entry)
-
-
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
@@ -8522,6 +8700,12 @@ class ComprehensionNode(ScopedExprNode):
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
+ # setup loop scope
+ if isinstance(self.loop, Nodes._ForInStatNode):
+ assert isinstance(self.loop.iterator, ScopedExprNode), self.loop.iterator
+ self.loop.iterator.init_scope(None, env)
+ else:
+ assert isinstance(self.loop, Nodes.ForFromStatNode), self.loop
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
@@ -8699,7 +8883,7 @@ class MergedSequenceNode(ExprNode):
if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
# construct a list directly from the first argument that we can then extend
if args[0].type is not list_type:
- args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True)
+ args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True, mult_factor=args[0].mult_factor)
ExprNode.__init__(self, pos, args=args, type=type)
def calculate_constant_result(self):
@@ -9790,6 +9974,12 @@ class CodeObjectNode(ExprNode):
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
+ if self.def_node.is_asyncgen:
+ flags.append('CO_ASYNC_GENERATOR')
+ elif self.def_node.is_coroutine:
+ flags.append('CO_COROUTINE')
+ elif self.def_node.is_generator:
+ flags.append('CO_GENERATOR')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
@@ -9940,10 +10130,18 @@ class GeneratorExpressionNode(LambdaNode):
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
+ # call_parameters [ExprNode] (Internal) parameters passed to the DefNode call
name = StringEncoding.EncodedString('genexpr')
binding = False
+ child_attrs = LambdaNode.child_attrs + ["call_parameters"]
+ subexprs = LambdaNode.subexprs + ["call_parameters"]
+
+ def __init__(self, pos, *args, **kwds):
+ super(GeneratorExpressionNode, self).__init__(pos, *args, **kwds)
+ self.call_parameters = []
+
def analyse_declarations(self, env):
if hasattr(self, "genexpr_name"):
# this if-statement makes it safe to run twice
@@ -9956,13 +10154,22 @@ class GeneratorExpressionNode(LambdaNode):
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
+ # setup loop scope
+ if isinstance(self.loop, Nodes._ForInStatNode):
+ assert isinstance(self.loop.iterator, ScopedExprNode)
+ self.loop.iterator.init_scope(None, env)
+ else:
+ assert isinstance(self.loop, Nodes.ForFromStatNode)
def generate_result_code(self, code):
+ args_to_call = ([self.closure_result_code()] +
+ [ cp.result() for cp in self.call_parameters ])
+ args_to_call = ", ".join(args_to_call)
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
- self.closure_result_code(),
+ args_to_call,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
@@ -10066,6 +10273,8 @@ class YieldExprNode(ExprNode):
if type.is_pyobject:
code.putln('%s = 0;' % save_cname)
code.put_xgotref(cname, type)
+ elif type.is_memoryviewslice:
+ code.putln('%s.memview = NULL; %s.data = NULL;' % (save_cname, save_cname))
self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
if self.result_is_used:
self.allocate_temp_result(code)
@@ -10289,6 +10498,7 @@ class UnopNode(ExprNode):
subexprs = ['operand']
infix = True
+ is_inc_dec_op = False
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
@@ -10400,7 +10610,10 @@ class UnopNode(ExprNode):
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env, overload_check=True):
- entry = env.lookup_operator(self.operator, [self.operand])
+ operand_types = [self.operand.type]
+ if self.is_inc_dec_op and not self.is_prefix:
+ operand_types.append(PyrexTypes.c_int_type)
+ entry = env.lookup_operator_for_types(self.pos, self.operator, operand_types)
if overload_check and not entry:
self.type_error()
return
@@ -10414,7 +10627,12 @@ class UnopNode(ExprNode):
else:
self.exception_check = ''
self.exception_value = ''
- cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
+ if self.is_inc_dec_op and not self.is_prefix:
+ cpp_type = self.operand.type.find_cpp_operation_type(
+ self.operator, operand_type=PyrexTypes.c_int_type
+ )
+ else:
+ cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if overload_check and cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
@@ -10556,6 +10774,17 @@ class DereferenceNode(CUnopNode):
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
+ is_inc_dec_op = True
+
+ def type_error(self):
+ if not self.operand.type.is_error:
+ if self.is_prefix:
+ error(self.pos, "No match for 'operator%s' (operand type is '%s')" %
+ (self.operator, self.operand.type))
+ else:
+ error(self.pos, "No 'operator%s(int)' declared for postfix '%s' (operand type is '%s')" %
+ (self.operator, self.operator, self.operand.type))
+ self.type = PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
@@ -14020,10 +14249,8 @@ class AnnotationNode(ExprNode):
def analyse_type_annotation(self, env, assigned_value=None):
if self.untyped:
# Already applied as a fused type, not re-evaluating it here.
- return None, None
+ return [], None
annotation = self.expr
- base_type = None
- is_ambiguous = False
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
warning(annotation.pos,
@@ -14040,36 +14267,29 @@ class AnnotationNode(ExprNode):
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation", level=1)
- arg_type = annotation.analyse_as_type(env)
- if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
- # Map builtin numeric Python types to C types in safe cases.
- if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
- assigned_type = assigned_value.infer_type(env)
- if assigned_type and assigned_type.is_pyobject:
- # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
- is_ambiguous = True
- arg_type = None
- # ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
- if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
- arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
- elif arg_type is not None and annotation.is_string_literal:
+
+ with env.new_c_type_context(in_c_type_context=explicit_ctype):
+ arg_type = annotation.analyse_as_type(env)
+
+ if arg_type is None:
+ warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
+ return [], arg_type
+
+ if annotation.is_string_literal:
warning(annotation.pos,
"Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.",
level=1)
- elif arg_type is not None and arg_type.is_complex:
+ if explicit_pytype and not explicit_ctype and not (arg_type.is_pyobject or arg_type.equivalent_type):
+ warning(annotation.pos,
+ "Python type declaration in signature annotation does not refer to a Python type")
+ if arg_type.is_complex:
# creating utility code needs to be special-cased for complex types
arg_type.create_declaration_utility_code(env)
- if arg_type is not None:
- if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
- warning(annotation.pos,
- "Python type declaration in signature annotation does not refer to a Python type")
- base_type = Nodes.CAnalysedBaseTypeNode(
- annotation.pos, type=arg_type, is_arg=True)
- elif is_ambiguous:
- warning(annotation.pos, "Ambiguous types in annotation, ignoring")
- else:
- warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
- return base_type, arg_type
+
+ # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
+ modifiers = annotation.analyse_pytyping_modifiers(env) if annotation.is_subscript else []
+
+ return modifiers, arg_type
class AssignmentExpressionNode(ExprNode):