summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Behnel <stefan_ml@behnel.de>2020-04-17 10:39:03 +0200
committerStefan Behnel <stefan_ml@behnel.de>2020-04-17 10:39:03 +0200
commit2d46e8c66b0b40ad05ab7ceb9c0a3c7ea6e2343f (patch)
treeb66deb658d769f8c3447e8846f8b1ad33eb75bc3
parentd6d8b3456253fe1a3d08a628058b51db3eb955a5 (diff)
downloadcython-2d46e8c66b0b40ad05ab7ceb9c0a3c7ea6e2343f.tar.gz
Cleanup more generator temps (GH-3522)
* Fixed indexing temps for non-python objects. * Moved cleanup into release_temps since the temps survived into the result_code.
-rw-r--r--Cython/Compiler/ExprNodes.py23
1 files changed, 12 insertions, 11 deletions
diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py
index 81fde57ef..7c878c183 100644
--- a/Cython/Compiler/ExprNodes.py
+++ b/Cython/Compiler/ExprNodes.py
@@ -4200,6 +4200,9 @@ class BufferIndexNode(_IndexingBaseNode):
# Whether we're assigning to a buffer (in that case it needs to be writable)
writable_needed = False
+ # Any indexing temp variables that we need to clean up.
+ index_temps = ()
+
def analyse_target_types(self, env):
self.analyse_types(env, getting=False)
@@ -4284,7 +4287,7 @@ class BufferIndexNode(_IndexingBaseNode):
warning(self.pos, "Use boundscheck(False) for faster access", level=1)
# Assign indices to temps of at least (s)size_t to allow further index calculations.
- index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
+ self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
# Generate buffer access code using these temps
from . import Buffer
@@ -4294,7 +4297,7 @@ class BufferIndexNode(_IndexingBaseNode):
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
- buffer_lookup_result = Buffer.put_buffer_lookup_code(
+ return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[ivar.type.signed for ivar in self.indices],
index_cnames=index_temps,
@@ -4303,9 +4306,6 @@ class BufferIndexNode(_IndexingBaseNode):
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
- # must return index_temps since that cannot be released until buffer_lookup_result has been used
- return buffer_entry, buffer_lookup_result, index_temps
-
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.generate_subexpr_evaluation_code(code)
self.generate_buffer_setitem_code(rhs, code)
@@ -4336,7 +4336,7 @@ class BufferIndexNode(_IndexingBaseNode):
return
# Used from generate_assignment_code and InPlaceAssignmentNode
- buffer_entry, ptrexpr, buffer_temps = self.buffer_lookup_code(code)
+ buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
@@ -4354,8 +4354,6 @@ class BufferIndexNode(_IndexingBaseNode):
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
- for temp in buffer_temps:
- code.funcstate.release_temp(temp)
def generate_result_code(self, code):
if is_pythran_expr(self.base.type):
@@ -4367,15 +4365,18 @@ class BufferIndexNode(_IndexingBaseNode):
self.base.pythran_result(),
pythran_indexing_code(self.indices)))
return
- buffer_entry, self.buffer_ptr_code, buffer_temps = self.buffer_lookup_code(code)
+ buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
- for temp in buffer_temps:
- code.funcstate.release_temp(temp)
+
+ def free_temps(self, code):
+ for temp in self.index_temps:
+ code.funcstate.release_temp(temp)
+ super(BufferIndexNode, self).free_temps(code)
class MemoryViewIndexNode(BufferIndexNode):