summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDag Sverre Seljebotn <dagss@student.matnat.uio.no>2009-12-14 15:54:46 +0100
committerDag Sverre Seljebotn <dagss@student.matnat.uio.no>2009-12-14 15:54:46 +0100
commit7dbc32ed5bbcc3f0617f6f2027fdc70713f8a641 (patch)
tree21e79e5895fb242fbd174280a9b42751a4ff2ee9
parent5109df243090ec49b3f6cdc5254feaacda464afd (diff)
parente43b0ae081492d91bec6e150706d74b20e88abe2 (diff)
downloadcython-7dbc32ed5bbcc3f0617f6f2027fdc70713f8a641.tar.gz
Merge in current devel (only memoryview testcases fail)
-rw-r--r--.hgignore2
-rw-r--r--.hgtags22
-rw-r--r--Cython/Compiler/Buffer.py363
-rw-r--r--Cython/Compiler/Code.py9
-rw-r--r--Cython/Compiler/CythonScope.py348
-rw-r--r--Cython/Compiler/ExprNodes.py141
-rw-r--r--Cython/Compiler/Main.py170
-rw-r--r--Cython/Compiler/MemoryView.py984
-rw-r--r--Cython/Compiler/ModuleNode.py40
-rw-r--r--Cython/Compiler/Naming.py6
-rw-r--r--Cython/Compiler/Nodes.py88
-rw-r--r--Cython/Compiler/Optimize.py6
-rw-r--r--Cython/Compiler/Options.py3
-rw-r--r--Cython/Compiler/ParseTreeTransforms.py10
-rw-r--r--Cython/Compiler/Parsing.py100
-rw-r--r--Cython/Compiler/Pipeline.py218
-rw-r--r--Cython/Compiler/PyrexTypes.py204
-rw-r--r--Cython/Compiler/Symtab.py49
-rw-r--r--Cython/Compiler/Tests/TestBuffer.py5
-rw-r--r--Cython/Compiler/Tests/TestMemView.py71
-rw-r--r--Cython/Compiler/TreeFragment.py16
-rw-r--r--Cython/Compiler/UtilityCode.py65
-rw-r--r--tests/compile/memview_declaration.pyx17
-rw-r--r--tests/errors/e_bufaccess.pyx4
-rw-r--r--tests/errors/memview_declarations.pyx47
-rw-r--r--tests/run/cymemoryview.pyx15
-rw-r--r--tests/run/cythonarray.pyx58
-rw-r--r--tests/run/cythonscope.pyx34
-rw-r--r--tests/run/memoryview.pyx77
-rw-r--r--tests/run/numpy_memoryviewattrs.pyx286
30 files changed, 3007 insertions, 451 deletions
diff --git a/.hgignore b/.hgignore
index a4e97a7fe..2962dc51f 100644
--- a/.hgignore
+++ b/.hgignore
@@ -13,3 +13,5 @@ build/
*.dep
tags
+Tools/fwrap/fparser
+Tools/fwrap/Cython
diff --git a/.hgtags b/.hgtags
deleted file mode 100644
index 0128d4a27..000000000
--- a/.hgtags
+++ /dev/null
@@ -1,22 +0,0 @@
-966abe58538dfbdaccd53bd970d4998c78ea522e 0.9.6.14
-67ee5a34bfc662e4e3cf989c2c8bf78a412ae8f4 0.9.8rc1
-16a746d969e2654112fc0dc081690b891c496977 Version-0.9.8
-a09347d7b470290076b983aef98707921445a038 0.9.8.1
-82084a7b654e2a133ab64ceb47e03d6e7a204990 0.9.9.2.beta
-a89b05b78236a27a654f3004bdffc7b8a56311a7 0.10
-ef9d2c680684d0df7d81f529cda29e9e1741f575 cython-0.10.1
-92baafe0edf3cea00deb7ce1e31e337bb485af1a 0.10.2
-cdf889c30e7a7053de20bae3a578dad09ebcbdf5 0.10.3
-59c67af0674bd93c5fd8958e08c76a9dab9aae37 sage-cythonizes
-a4abf0156540db4d3ebaa95712b65811c43c5acb 0.11-beta
-838a6b7cae62e01dc0ce663cccab1f93f649fdbd 0.11.rc
-4497f635d5fdbd38ebb841be4869fbfa2bbfdbb6 0.11.1.alpha
-7bc36a0f81723117a19f92ffde1676a0884fef65 0.11.1.beta
-6454db601984145f38e28d34176fca8a3a22329c 0.11.1
-af6f1bed8cd40a2edefb57d3eacbc9274a8788b4 0.11.2.rc1
-15ad532e2127840ae09dfbe46ccc80ac8c562f99 0.11.2
-eb00d00a73c13b6aa8b440fe07cd7acb52a060e8 0.11.3.rc0
-7c695fe49fd6912f52d995fe512d66baacf90ee6 0.11.3
-4208042ceeae634f5c0999b8ab75f69faf46b6db 0.12.alpha0
-e77827f09af67560aa82a18feab778f71ca0a9d3 0.12.rc0
-fae19937e4945c59a5d9d62c63f1c3b09046c3a3 0.12
diff --git a/Cython/Compiler/Buffer.py b/Cython/Compiler/Buffer.py
index e807d4281..a76bfef7e 100644
--- a/Cython/Compiler/Buffer.py
+++ b/Cython/Compiler/Buffer.py
@@ -4,7 +4,9 @@ from Nodes import *
from ExprNodes import *
from StringEncoding import EncodedString
from Errors import CompileError
+from UtilityCode import CythonUtilityCode
from Code import UtilityCode
+import Cython.Compiler.Options
import Interpreter
import PyrexTypes
@@ -29,12 +31,14 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
#
buffers_exists = False
+ using_memoryview = False
def __call__(self, node):
assert isinstance(node, ModuleNode)
self.max_ndim = 0
result = super(IntroduceBufferAuxiliaryVars, self).__call__(node)
- if self.buffers_exists:
+ if self.buffers_exists or self.using_memoryview:
+ use_bufstruct_declare_code(node.scope)
use_py2_buffer_functions(node.scope)
use_empty_bufstruct_code(node.scope, self.max_ndim)
return result
@@ -53,6 +57,18 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
if len(bufvars) > 0:
self.buffers_exists = True
+ memviewslicevars = [entry for name, entry
+ in scope.entries.iteritems()
+ if entry.type.is_memoryviewslice]
+ if len(memviewslicevars) > 0:
+ self.buffers_exists = True
+
+
+ for (name, entry) in scope.entries.iteritems():
+ if name == 'memoryview' and isinstance(entry.utility_code_definition, CythonUtilityCode):
+ self.using_memoryview = True
+ break
+
if isinstance(node, ModuleNode) and len(bufvars) > 0:
# for now...note that pos is wrong
@@ -63,36 +79,27 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
name = entry.name
buftype = entry.type
+ if buftype.ndim > Options.buffer_max_dims:
+ raise CompileError(node.pos,
+ "Buffer ndims exceeds Options.buffer_max_dims = %d" % Options.buffer_max_dims)
if buftype.ndim > self.max_ndim:
self.max_ndim = buftype.ndim
# Declare auxiliary vars
- cname = scope.mangle(Naming.bufstruct_prefix, name)
- bufinfo = scope.declare_var(name="$%s" % cname, cname=cname,
- type=PyrexTypes.c_py_buffer_type, pos=node.pos)
- if entry.is_arg:
- bufinfo.used = True # otherwise, NameNode will mark whether it is used
-
- def var(prefix, idx, initval):
- cname = scope.mangle(prefix, "%d_%s" % (idx, name))
- result = scope.declare_var("$%s" % cname, PyrexTypes.c_py_ssize_t_type,
- node.pos, cname=cname, is_cdef=True)
-
- result.init = initval
+ def decvar(type, prefix):
+ cname = scope.mangle(prefix, name)
+ aux_var = scope.declare_var(name="$%s" % cname, cname=cname,
+ type=type, pos=node.pos)
if entry.is_arg:
- result.used = True
- return result
-
+ aux_var.used = True # otherwise, NameNode will mark whether it is used
+
+ return aux_var
- stridevars = [var(Naming.bufstride_prefix, i, "0") for i in range(entry.type.ndim)]
- shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)]
- mode = entry.type.mode
- if mode == 'full':
- suboffsetvars = [var(Naming.bufsuboffset_prefix, i, "-1") for i in range(entry.type.ndim)]
- else:
- suboffsetvars = None
+ auxvars = ((PyrexTypes.c_pyx_buffer_nd_type, Naming.pybuffernd_prefix),
+ (PyrexTypes.c_pyx_buffer_type, Naming.pybufferstruct_prefix))
+ pybuffernd, rcbuffer = [decvar(type, prefix) for (type, prefix) in auxvars]
- entry.buffer_aux = Symtab.BufferAux(bufinfo, stridevars, shapevars, suboffsetvars)
+ entry.buffer_aux = Symtab.BufferAux(pybuffernd, rcbuffer)
scope.buffer_entries = bufvars
self.scope = scope
@@ -191,6 +198,15 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee
# Code generation
#
+def get_buf_suboffsetvars(entry):
+ return [("%s.diminfo[%d].suboffsets" % \
+ (entry.buffer_aux.buflocal_nd_var.cname, i)) for i in range(entry.type.ndim)]
+def get_buf_stridevars(entry):
+ return [("%s.diminfo[%d].strides" % \
+ (entry.buffer_aux.buflocal_nd_var.cname, i)) for i in range(entry.type.ndim)]
+def get_buf_shapevars(entry):
+ return [("%s.diminfo[%d].shape" % \
+ (entry.buffer_aux.buflocal_nd_var.cname, i)) for i in range(entry.type.ndim)]
def get_flags(buffer_aux, buffer_type):
flags = 'PyBUF_FORMAT'
@@ -210,26 +226,39 @@ def get_flags(buffer_aux, buffer_type):
def used_buffer_aux_vars(entry):
buffer_aux = entry.buffer_aux
- buffer_aux.buffer_info_var.used = True
- for s in buffer_aux.shapevars: s.used = True
- for s in buffer_aux.stridevars: s.used = True
- if buffer_aux.suboffsetvars:
- for s in buffer_aux.suboffsetvars: s.used = True
+ buffer_aux.buflocal_nd_var.used = True
+ buffer_aux.rcbuf_var.used = True
-def put_unpack_buffer_aux_into_scope(buffer_aux, mode, code):
+def put_unpack_buffer_aux_into_scope(buf_entry, code):
# Generate code to copy the needed struct info into local
# variables.
- bufstruct = buffer_aux.buffer_info_var.cname
+ buffer_aux, mode = buf_entry.buffer_aux, buf_entry.type.mode
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
- varspec = [("strides", buffer_aux.stridevars),
- ("shape", buffer_aux.shapevars)]
+ fldnames = ['strides', 'shape']
if mode == 'full':
- varspec.append(("suboffsets", buffer_aux.suboffsetvars))
+ fldnames.append('suboffsets')
- for field, vars in varspec:
- code.putln(" ".join(["%s = %s.%s[%d];" %
- (s.cname, bufstruct, field, idx)
- for idx, s in enumerate(vars)]))
+ ln = []
+ for i in range(buf_entry.type.ndim):
+ for fldname in fldnames:
+ ln.append("%s.diminfo[%d].%s = %s.rcbuffer->pybuffer.%s[%d];" % \
+ (pybuffernd_struct, i, fldname,
+ pybuffernd_struct, fldname, i))
+ code.putln(' '.join(ln))
+
+def put_init_vars(entry, code):
+ bufaux = entry.buffer_aux
+ pybuffernd_struct = bufaux.buflocal_nd_var.cname
+ pybuffer_struct = bufaux.rcbuf_var.cname
+ # init pybuffer_struct
+ code.putln("%s.pybuffer.buf = NULL;" % pybuffer_struct)
+ code.putln("%s.refcount = 0;" % pybuffer_struct)
+ # init the buffer object
+ # code.put_init_var_to_py_none(entry)
+ # init the pybuffernd_struct
+ code.putln("%s.data = NULL;" % pybuffernd_struct)
+ code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct))
def put_acquire_arg_buffer(entry, code, pos):
code.globalstate.use_utility_code(acquire_utility_code)
@@ -243,25 +272,25 @@ def put_acquire_arg_buffer(entry, code, pos):
code.putln("}")
# An exception raised in arg parsing cannot be catched, so no
# need to care about the buffer then.
- put_unpack_buffer_aux_into_scope(buffer_aux, entry.type.mode, code)
+ put_unpack_buffer_aux_into_scope(entry, code)
def put_release_buffer_code(code, entry):
code.globalstate.use_utility_code(acquire_utility_code)
- code.putln("__Pyx_SafeReleaseBuffer(&%s);" % entry.buffer_aux.buffer_info_var.cname)
+ code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname)
def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
ndim = buffer_type.ndim
cast = int(buffer_type.cast)
flags = get_flags(buffer_aux, buffer_type)
- bufstruct = buffer_aux.buffer_info_var.cname
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
- return ("__Pyx_GetBufferAndValidate(&%(bufstruct)s, "
+ return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
- "%(cast)d, __pyx_stack)" % locals())
+ "%(cast)d, __pyx_stack)" % locals())
-def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
+def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
is_initialized, pos, code):
"""
Generate code for reassigning a buffer variables. This only deals with getting
@@ -277,8 +306,9 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
(which may or may not succeed).
"""
+ buffer_aux, buffer_type = buf_entry.buffer_aux, buf_entry.type
code.globalstate.use_utility_code(acquire_utility_code)
- bufstruct = buffer_aux.buffer_info_var.cname
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
flags = get_flags(buffer_aux, buffer_type)
code.putln("{") # Set up necesarry stack for getbuffer
@@ -288,7 +318,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
if is_initialized:
# Release any existing buffer
- code.putln('__Pyx_SafeReleaseBuffer(&%s);' % bufstruct)
+ code.putln('__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);' % pybuffernd_struct)
# Acquire
retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname))
@@ -311,7 +341,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
code.putln('}')
code.putln('}')
# Unpack indices
- put_unpack_buffer_aux_into_scope(buffer_aux, buffer_type.mode, code)
+ put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln(code.error_goto_if_neg(retcode_cname, pos))
code.funcstate.release_temp(retcode_cname)
else:
@@ -319,14 +349,14 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
# In this case, auxiliary vars should be set up right in initialization to a zero-buffer,
# so it suffices to set the buf field to NULL.
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname)))
- code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.buf = NULL;' %
+ code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.rcbuffer->pybuffer.buf = NULL;' %
(lhs_cname,
PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"),
- bufstruct))
+ pybuffernd_struct))
code.putln(code.error_goto(pos))
code.put('} else {')
# Unpack indices
- put_unpack_buffer_aux_into_scope(buffer_aux, buffer_type.mode, code)
+ put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln('}')
code.putln("}") # Release stack
@@ -345,7 +375,8 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
"""
bufaux = entry.buffer_aux
- bufstruct = bufaux.buffer_info_var.cname
+ pybuffernd_struct = bufaux.buflocal_nd_var.cname
+ # bufstruct = bufaux.buffer_info_var.cname
negative_indices = directives['wraparound'] and entry.type.negative_indices
if directives['boundscheck']:
@@ -356,12 +387,12 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
tmp_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = -1;" % tmp_cname)
for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames,
- bufaux.shapevars)):
+ get_buf_shapevars(entry))):
if signed != 0:
# not unsigned, deal with negative index
code.putln("if (%s < 0) {" % cname)
if negative_indices:
- code.putln("%s += %s;" % (cname, shape.cname))
+ code.putln("%s += %s;" % (cname, shape))
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s < 0" % cname), tmp_cname, dim))
else:
@@ -369,7 +400,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
code.put("} else ")
# check bounds in positive direction
code.putln("if (%s) %s = %d;" % (
- code.unlikely("%s >= %s" % (cname, shape.cname)),
+ code.unlikely("%s >= %s" % (cname, shape)),
tmp_cname, dim))
code.globalstate.use_utility_code(raise_indexerror_code)
code.putln("if (%s) {" % code.unlikely("%s != -1" % tmp_cname))
@@ -380,9 +411,9 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
elif negative_indices:
# Only fix negative indices.
for signed, cname, shape in zip(index_signeds, index_cnames,
- bufaux.shapevars):
+ get_buf_shapevars(entry)):
if signed != 0:
- code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape.cname))
+ code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape))
# Create buffer lookup and return it
# This is done via utility macros/inline functions, which vary
@@ -391,10 +422,10 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
nd = entry.type.ndim
mode = entry.type.mode
if mode == 'full':
- for i, s, o in zip(index_cnames, bufaux.stridevars, bufaux.suboffsetvars):
+ for i, s, o in zip(index_cnames, get_buf_stridevars(entry), get_buf_suboffsetvars(entry)):
params.append(i)
- params.append(s.cname)
- params.append(o.cname)
+ params.append(s)
+ params.append(o)
funcname = "__Pyx_BufPtrFull%dd" % nd
funcgen = buf_lookup_full_code
else:
@@ -409,9 +440,9 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
funcgen = buf_lookup_fortran_code
else:
assert False
- for i, s in zip(index_cnames, bufaux.stridevars):
+ for i, s in zip(index_cnames, get_buf_stridevars(entry)):
params.append(i)
- params.append(s.cname)
+ params.append(s)
# Make sure the utility code is available
if funcname not in code.globalstate.utility_codes:
@@ -421,13 +452,16 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
funcgen(protocode, defcode, name=funcname, nd=nd)
ptr_type = entry.type.buffer_ptr_type
- ptrcode = "%s(%s, %s.buf, %s)" % (funcname,
+ ptrcode = "%s(%s, %s.rcbuffer->pybuffer.buf, %s)" % (funcname,
ptr_type.declaration_code(""),
- bufstruct,
+ pybuffernd_struct,
", ".join(params))
return ptrcode
+def use_bufstruct_declare_code(env):
+ env.use_utility_code(buffer_struct_declare_code)
+
def use_empty_bufstruct_code(env, max_ndim):
code = dedent("""
Py_ssize_t __Pyx_zeros[] = {%s};
@@ -492,82 +526,109 @@ def buf_lookup_fortran_code(proto, defin, name, nd):
def use_py2_buffer_functions(env):
+ env.use_utility_code(GetAndReleaseBufferUtilityCode())
+
+class GetAndReleaseBufferUtilityCode(object):
# Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2.
# For >= 2.6 we do double mode -- use the new buffer interface on objects
# which has the right tp_flags set, but emulation otherwise.
- # Search all types for __getbuffer__ overloads
- types = []
- visited_scopes = set()
- def find_buffer_types(scope):
- if scope in visited_scopes:
- return
- visited_scopes.add(scope)
- for m in scope.cimported_modules:
- find_buffer_types(m)
- for e in scope.type_entries:
- t = e.type
- if t.is_extension_type:
- release = get = None
- for x in t.scope.pyfunc_entries:
- if x.name == u"__getbuffer__": get = x.func_cname
- elif x.name == u"__releasebuffer__": release = x.func_cname
- if get:
- types.append((t.typeptr_cname, get, release))
-
- find_buffer_types(env)
+ requires = None
- code = dedent("""
- #if PY_MAJOR_VERSION < 3
- static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
- #if PY_VERSION_HEX >= 0x02060000
- if (Py_TYPE(obj)->tp_flags & Py_TPFLAGS_HAVE_NEWBUFFER)
- return PyObject_GetBuffer(obj, view, flags);
- #endif
- """)
- if len(types) > 0:
- clause = "if"
- for t, get, release in types:
- code += " %s (PyObject_TypeCheck(obj, %s)) return %s(obj, view, flags);\n" % (clause, t, get)
- clause = "else if"
- code += " else {\n"
- code += dedent("""\
- PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
- return -1;
- """, 2)
- if len(types) > 0: code += " }"
- code += dedent("""
- }
+ def __init__(self):
+ pass
- static void __Pyx_ReleaseBuffer(Py_buffer *view) {
- PyObject* obj = view->obj;
- if (obj) {
- """)
- if len(types) > 0:
- clause = "if"
- for t, get, release in types:
- if release:
- code += "%s (PyObject_TypeCheck(obj, %s)) %s(obj, view);" % (clause, t, release)
+ def __eq__(self, other):
+ return isinstance(other, GetAndReleaseBufferUtilityCode)
+
+ def __hash__(self):
+ return 24342342
+
+ def get_tree(self): pass
+
+ def put_code(self, output):
+ code = output['utility_code_def']
+ proto = output['utility_code_proto']
+ env = output.module_node.scope
+ cython_scope = env.context.cython_scope
+
+ proto.put(dedent("""\
+ #if PY_MAJOR_VERSION < 3
+ static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
+ static void __Pyx_ReleaseBuffer(Py_buffer *view);
+ #else
+ #define __Pyx_GetBuffer PyObject_GetBuffer
+ #define __Pyx_ReleaseBuffer PyBuffer_Release
+ #endif
+ """))
+
+ # Search all types for __getbuffer__ overloads
+ types = []
+ visited_scopes = set()
+ def find_buffer_types(scope):
+ if scope in visited_scopes:
+ return
+ visited_scopes.add(scope)
+ for m in scope.cimported_modules:
+ find_buffer_types(m)
+ for e in scope.type_entries:
+ t = e.type
+ if t.is_extension_type:
+ if scope is cython_scope and not e.used:
+ continue
+ release = get = None
+ for x in t.scope.pyfunc_entries:
+ if x.name == u"__getbuffer__": get = x.func_cname
+ elif x.name == u"__releasebuffer__": release = x.func_cname
+ if get:
+ types.append((t.typeptr_cname, get, release))
+
+ find_buffer_types(env)
+
+ code.put(dedent("""
+ #if PY_MAJOR_VERSION < 3
+ static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
+ #if PY_VERSION_HEX >= 0x02060000
+ if (Py_TYPE(obj)->tp_flags & Py_TPFLAGS_HAVE_NEWBUFFER)
+ return PyObject_GetBuffer(obj, view, flags);
+ #endif
+ """))
+
+ if len(types) > 0:
+ clause = "if"
+ for t, get, release in types:
+ code.putln(" %s (PyObject_TypeCheck(obj, %s)) return %s(obj, view, flags);" % (clause, t, get))
clause = "else if"
- code += dedent("""
- Py_DECREF(obj);
- view->obj = NULL;
- }
- }
+ code.putln(" else {")
+ code.put(dedent("""\
+ PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
+ return -1;
+ """, 2))
+ if len(types) > 0:
+ code.putln(" }")
+ code.put(dedent("""\
+ }
+
+ static void __Pyx_ReleaseBuffer(Py_buffer *view) {
+ PyObject* obj = view->obj;
+ if (obj) {
+ """))
+
+ if len(types) > 0:
+ clause = "if"
+ for t, get, release in types:
+ if release:
+ code.putln("%s (PyObject_TypeCheck(obj, %s)) %s(obj, view);" % (clause, t, release))
+ clause = "else if"
+ code.put(dedent("""
+ Py_DECREF(obj);
+ view->obj = NULL;
+ }
+ }
+
+ #endif
+ """))
- #endif
- """)
-
- env.use_utility_code(UtilityCode(
- proto = dedent("""\
- #if PY_MAJOR_VERSION < 3
- static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
- static void __Pyx_ReleaseBuffer(Py_buffer *view);
- #else
- #define __Pyx_GetBuffer PyObject_GetBuffer
- #define __Pyx_ReleaseBuffer PyBuffer_Release
- #endif
- """), impl = code))
def mangle_dtype_name(dtype):
@@ -656,6 +717,27 @@ def get_type_information_cname(code, dtype, maxdepth=None):
), safe=True)
return name
+buffer_struct_declare_code = UtilityCode(proto="""
+
+/* structs for buffer access */
+
+typedef struct {
+ Py_ssize_t shape, strides, suboffsets;
+} __Pyx_Buf_DimInfo;
+
+typedef struct {
+ size_t refcount;
+ Py_buffer pybuffer;
+} __Pyx_Buffer;
+
+typedef struct {
+ __Pyx_Buffer *rcbuffer;
+ char *data;
+ __Pyx_Buf_DimInfo diminfo[%d];
+} __Pyx_LocalBuf_ND;
+
+""" % Options.buffer_max_dims)
+
# Utility function to set the right exception
# The caller should immediately goto_error
@@ -722,15 +804,6 @@ typedef struct {
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
-
-static INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
-static int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
-""", impl="""
-static INLINE int __Pyx_IsLittleEndian(void) {
- unsigned int n = 1;
- return *(unsigned char*)(&n) != 0;
-}
-
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
@@ -741,6 +814,16 @@ typedef struct {
char packmode;
} __Pyx_BufFmt_Context;
+
+static INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
+static int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
+""", impl="""
+static INLINE int __Pyx_IsLittleEndian(void) {
+ unsigned int n = 1;
+ return *(unsigned char*)(&n) != 0;
+}
+
+
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
diff --git a/Cython/Compiler/Code.py b/Cython/Compiler/Code.py
index b514047e6..356a4a312 100644
--- a/Cython/Compiler/Code.py
+++ b/Cython/Compiler/Code.py
@@ -23,7 +23,7 @@ class UtilityCode(object):
# See GlobalState.put_utility_code.
#
# hashes/equals by instance
-
+
def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
proto_block='utility_code_proto'):
# proto_block: Which code block to dump prototype in. See GlobalState.
@@ -36,6 +36,9 @@ class UtilityCode(object):
self.specialize_list = []
self.proto_block = proto_block
+ def get_tree(self):
+ pass
+
def specialize(self, pyrex_type=None, **data):
# Dicts aren't hashable...
if pyrex_type is not None:
@@ -431,7 +434,7 @@ class GlobalState(object):
]
- def __init__(self, writer, emit_linenums=False):
+ def __init__(self, writer, module_node, emit_linenums=False):
self.filename_table = {}
self.filename_list = []
self.input_file_contents = {}
@@ -440,6 +443,8 @@ class GlobalState(object):
self.in_utility_code_generation = False
self.emit_linenums = emit_linenums
self.parts = {}
+ self.module_node = module_node # because some utility code generation needs it
+ # (generating backwards-compatible Get/ReleaseBuffer
self.const_cname_counter = 1
self.string_const_index = {}
diff --git a/Cython/Compiler/CythonScope.py b/Cython/Compiler/CythonScope.py
index 54dc38666..69af08a56 100644
--- a/Cython/Compiler/CythonScope.py
+++ b/Cython/Compiler/CythonScope.py
@@ -1,20 +1,16 @@
from Symtab import ModuleScope
from PyrexTypes import *
-
-shape_func_type = CFuncType(
- c_ptr_type(c_py_ssize_t_type),
- [CFuncTypeArg("buffer", py_object_type, None)])
+from UtilityCode import CythonUtilityCode
+from Errors import error
+from Scanning import StringSourceDescriptor
+import Options
+import Buffer
class CythonScope(ModuleScope):
- def __init__(self, context):
- ModuleScope.__init__(self, u'cython', None, context)
+ def __init__(self):
+ ModuleScope.__init__(self, u'cython', None, None, no_outer_scope=True)
self.pxd_file_loaded = True
-
- self.shape_entry = self.declare_cfunction('shape',
- shape_func_type,
- pos=None,
- defining = 1,
- cname='<error>')
+ self.populate_cython_scope()
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
@@ -22,28 +18,308 @@ class CythonScope(ModuleScope):
if type:
return type
+ def find_module(self, module_name, pos):
+ error("cython.%s is not available" % module_name, pos)
+
+ def find_submodule(self, module_name):
+ entry = self.entries.get(module_name, None)
+ if entry and entry.as_module:
+ return entry.as_module
+ else:
+ # TODO: fix find_submodule control flow so that we're not
+ # expected to create a submodule here (to protect CythonScope's
+ # possible immutability). Hack ourselves out of the situation
+ # for now.
+ raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
+ "cython.%s is not available" % module_name)
+
+ def populate_cython_scope(self):
+ # These are used to optimize isinstance in FinalOptimizePhase
+ type_object = self.declare_typedef(
+ 'PyTypeObject',
+ base_type = c_void_type,
+ pos = None,
+ cname = 'PyTypeObject')
+ type_object.is_void = True
+
+ self.declare_cfunction(
+ 'PyObject_TypeCheck',
+ CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
+ CFuncTypeArg("t", c_ptr_type(type_object), None)]),
+ pos = None,
+ defining = 1,
+ cname = 'PyObject_TypeCheck')
+
+ # A special function just to make it easy to test the scope and
+ # utility code functionality in isolation. It is available to
+ # "end-users" but nobody will know it is there anyway...
+ entry = self.declare_cfunction(
+ '_testscope',
+ CFuncType(py_object_type, [CFuncTypeArg("value", c_int_type, None)]),
+ pos=None,
+ defining=1,
+ cname='__pyx_cython__testscope'
+ )
+ entry.utility_code_definition = cython_testscope_utility_code
+
+ #
+ # The view sub-scope
+ #
+ self.viewscope = viewscope = ModuleScope(u'view', self, None, no_outer_scope=True)
+ self.declare_module('view', viewscope, None)
+ viewscope.pxd_file_loaded = True
+ entry = viewscope.declare_cfunction(
+ '_testscope',
+ CFuncType(py_object_type, [CFuncTypeArg("value", c_int_type, None)]),
+ pos=None,
+ defining=1,
+ cname='__pyx_cython_view__testscope'
+ )
+ entry.utility_code_definition = cythonview_testscope_utility_code
+
+
+ for x in ('strided', 'contig', 'follow', 'direct', 'ptr', 'full'):
+ entry = viewscope.declare_var(x, py_object_type, None,
+ cname='__pyx_viewaxis_%s' % x,
+ is_cdef=True)
+ entry.utility_code_definition = view_utility_code
+
+ #
+ # cython.view.memoryview declaration
+ #
+ self.memviewentry = entry = viewscope.declare_c_class(memview_name, None,
+ implementing=1,
+ objstruct_cname = memview_objstruct_cname,
+ typeobj_cname = memview_typeobj_cname,
+ typeptr_cname= memview_typeptr_cname)
+
+ entry.utility_code_definition = view_utility_code
+
+ #
+ # cython.array declaration
+ #
+ name = u'array'
+ entry = self.declare_c_class(name, None,
+ implementing=1,
+ objstruct_cname='__pyx_obj_array',
+ typeobj_cname='__pyx_tobj_array',
+ typeptr_cname=Naming.typeptr_prefix+name)
+
+ # NOTE: the typeptr_cname is constrained to be '__pyx_ptype_<name>'
+ # (name is 'array' in this case). otherwise the code generation for
+ # the struct pointers will not work!
+
+ entry.utility_code_definition = cython_array_utility_code
+
+ arr_scope = entry.type.scope
+
+ arr_scope.declare_var(u'data', c_char_ptr_type, None, is_cdef = 1)
+ arr_scope.declare_var(u'len', c_size_t_type, None, is_cdef = 1)
+ arr_scope.declare_var(u'format', c_char_ptr_type, None, is_cdef = 1)
+ arr_scope.declare_var(u'ndim', c_int_type, None, is_cdef = 1)
+ arr_scope.declare_var(u'shape', c_py_ssize_t_ptr_type, None, is_cdef = 1)
+ arr_scope.declare_var(u'strides', c_py_ssize_t_ptr_type, None, is_cdef = 1)
+ arr_scope.declare_var(u'itemsize', c_py_ssize_t_type, None, is_cdef = 1)
+
+ # declare the __getbuffer__ & __releasebuffer__ functions
+
+ for name in ('__getbuffer__', '__releasebuffer__'):
+ entry = arr_scope.declare_pyfunction(name, None)
+ # FIXME XXX: hack right here!!!
+ entry.func_cname = '__pyx_pf_9__pyxutil_5array_' + name
+ entry.utility_code_definition = cython_array_utility_code
+
+ #
+ # Declare the array modes
+ #
+ entry = self.declare_var(u'PyBUF_C_CONTIGUOUS', c_int_type, None,
+ cname='PyBUF_C_CONTIGUOUS',is_cdef = 1)
+ entry = self.declare_var(u'PyBUF_F_CONTIGUOUS', c_int_type, None,
+ is_cdef = 1)
+ entry = self.declare_var(u'PyBUF_ANY_CONTIGUOUS', c_int_type, None,
+ is_cdef = 1)
+
def create_cython_scope(context):
- create_utility_scope(context)
- return CythonScope(context)
-
-
-def create_utility_scope(context):
- global utility_scope
- utility_scope = ModuleScope(u'utility', None, context)
-
- # These are used to optimize isinstance in FinalOptimizePhase
- type_object = utility_scope.declare_typedef('PyTypeObject',
- base_type = c_void_type,
- pos = None,
- cname = 'PyTypeObject')
- type_object.is_void = True
-
- utility_scope.declare_cfunction(
- 'PyObject_TypeCheck',
- CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
- CFuncTypeArg("t", c_ptr_type(type_object), None)]),
- pos = None,
- defining = 1,
- cname = 'PyObject_TypeCheck')
-
- return utility_scope
+ # One could in fact probably make it a singleton,
+ # but not sure yet whether any code mutates it (which would kill reusing
+ # it across different contexts)
+ return CythonScope()
+
+cython_testscope_utility_code = CythonUtilityCode(u"""
+cdef object _testscope(int value):
+ return "hello from cython scope, value=%d" % value
+""", name="cython utility code", prefix="__pyx_cython_")
+
+cythonview_testscope_utility_code = CythonUtilityCode(u"""
+cdef object _testscope(int value):
+ return "hello from cython.view scope, value=%d" % value
+""", name="cython utility code", prefix="__pyx_cython_view_")
+
+memview_name = u'memoryview'
+memview_typeptr_cname = Naming.typeptr_prefix+memview_name
+memview_typeobj_cname = '__pyx_tobj_'+memview_name
+memview_objstruct_cname = '__pyx_obj_'+memview_name
+view_utility_code = CythonUtilityCode(
+u"""
+cdef class Enum(object):
+ cdef object name
+ def __init__(self, name):
+ self.name = name
+ def __repr__(self):
+ return self.name
+
+cdef strided = Enum("<strided axis packing mode>")
+cdef contig = Enum("<contig axis packing mode>")
+cdef follow = Enum("<follow axis packing mode>")
+cdef direct = Enum("<direct axis access mode>")
+cdef ptr = Enum("<ptr axis access mode>")
+cdef full = Enum("<full axis access mode>")
+
+cdef extern from *:
+ int __Pyx_GetBuffer(object, Py_buffer *, int)
+ void __Pyx_ReleaseBuffer(Py_buffer *)
+
+cdef class memoryview(object):
+
+ cdef object obj
+ cdef Py_buffer view
+ cdef int gotbuf_flag
+
+ def __cinit__(memoryview self, object obj, int flags):
+ self.obj = obj
+ __Pyx_GetBuffer(self.obj, &self.view, flags)
+
+ def __dealloc__(memoryview self):
+ self.obj = None
+ __Pyx_ReleaseBuffer(&self.view)
+
+cdef memoryview memoryview_cwrapper(object o, int flags):
+ return memoryview(o, flags)
+""", name="view_code",
+ prefix="__pyx_viewaxis_",
+ requires=(Buffer.GetAndReleaseBufferUtilityCode(),))
+
+cyarray_prefix = u'__pyx_cythonarray_'
+cython_array_utility_code = CythonUtilityCode(u'''
+cdef extern from "stdlib.h":
+ void *malloc(size_t)
+ void free(void *)
+
+cdef extern from "Python.h":
+
+ cdef enum:
+ PyBUF_C_CONTIGUOUS,
+ PyBUF_F_CONTIGUOUS,
+ PyBUF_ANY_CONTIGUOUS
+
+cdef class array:
+
+ cdef:
+ char *data
+ Py_ssize_t len
+ char *format
+ int ndim
+ Py_ssize_t *shape
+ Py_ssize_t *strides
+ Py_ssize_t itemsize
+ str mode
+
+ def __cinit__(array self, tuple shape, Py_ssize_t itemsize, char *format, mode="c"):
+
+ self.ndim = len(shape)
+ self.itemsize = itemsize
+
+ if not self.ndim:
+ raise ValueError("Empty shape tuple for cython.array")
+
+ if self.itemsize <= 0:
+ raise ValueError("itemsize <= 0 for cython.array")
+
+ self.format = format
+
+ self.shape = <Py_ssize_t *>malloc(sizeof(Py_ssize_t)*self.ndim)
+ self.strides = <Py_ssize_t *>malloc(sizeof(Py_ssize_t)*self.ndim)
+
+ if not self.shape or not self.strides:
+ raise MemoryError("unable to allocate shape or strides.")
+
+ cdef int idx
+ cdef Py_ssize_t int_dim, stride
+ idx = 0
+ for dim in shape:
+ int_dim = <Py_ssize_t>dim
+ if int_dim <= 0:
+ raise ValueError("Invalid shape.")
+ self.shape[idx] = int_dim
+ idx += 1
+ assert idx == self.ndim
+
+ if mode == "fortran":
+ idx = 0; stride = 1
+ for dim in shape:
+ self.strides[idx] = stride*itemsize
+ int_dim = <Py_ssize_t>dim
+ stride = stride * int_dim
+ idx += 1
+ assert idx == self.ndim
+ self.len = stride * itemsize
+ elif mode == "c":
+ idx = self.ndim-1; stride = 1
+ for dim in reversed(shape):
+ self.strides[idx] = stride*itemsize
+ int_dim = <Py_ssize_t>dim
+ stride = stride * int_dim
+ idx -= 1
+ assert idx == -1
+ self.len = stride * itemsize
+ else:
+ raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
+
+ self.mode = mode
+
+ self.data = <char *>malloc(self.len)
+ if not self.data:
+ raise MemoryError("unable to allocate array data.")
+
+ def __getbuffer__(self, Py_buffer *info, int flags):
+
+ cdef int bufmode = -1
+ if self.mode == "c":
+ bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
+ elif self.mode == "fortran":
+ bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
+ if not (flags & bufmode):
+ raise ValueError("Can only create a buffer that is contiguous in memory.")
+ info.buf = self.data
+ info.len = self.len
+ info.ndim = self.ndim
+ info.shape = self.shape
+ info.strides = self.strides
+ info.suboffsets = NULL
+ info.itemsize = self.itemsize
+ info.format = self.format
+ # we do not need to call releasebuffer
+ info.obj = None
+
+ def __releasebuffer__(array self, Py_buffer* info):
+ # array.__releasebuffer__ should not be called,
+ # because the Py_buffer's 'obj' field is set to None.
+ raise NotImplementedError()
+
+ def __dealloc__(array self):
+ if self.data:
+ free(self.data)
+ self.data = NULL
+ if self.strides:
+ free(self.strides)
+ self.strides = NULL
+ if self.shape:
+ free(self.shape)
+ self.shape = NULL
+ self.format = NULL
+ self.itemsize = 0
+
+cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *mode):
+ return array(shape, itemsize, format, mode)
+
+''', prefix=cyarray_prefix)
diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py
index d2789833e..0df1855b1 100644
--- a/Cython/Compiler/ExprNodes.py
+++ b/Cython/Compiler/ExprNodes.py
@@ -12,7 +12,7 @@ import Naming
import Nodes
from Nodes import Node
import PyrexTypes
-from PyrexTypes import py_object_type, c_long_type, typecast, error_type, unspecified_type
+from PyrexTypes import py_object_type, c_long_type, typecast, error_type, unspecified_type, cython_memoryview_ptr_type
from Builtin import list_type, tuple_type, set_type, dict_type, \
unicode_type, str_type, bytes_type, type_type
import Builtin
@@ -374,7 +374,7 @@ class ExprNode(Node):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
- if self.type.is_pyobject:
+ if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
@@ -548,7 +548,14 @@ class ExprNode(Node):
if self.check_for_coercion_error(dst_type):
return self
- if dst_type.is_pyobject:
+ if dst_type.is_memoryviewslice:
+ import MemoryView
+ if not src.type.is_memoryviewslice:
+ src = CoerceToMemViewSliceNode(src, dst_type, env)
+ elif not MemoryView.src_conforms_to_dst(src.type, dst_type):
+ error(self.pos, "Memoryview '%s' not conformable to memoryview '%s'." %
+ (src.type, dst_type))
+ elif dst_type.is_pyobject:
if not src.type.is_pyobject:
src = CoerceToPyTypeNode(src, env)
if not src.type.subtype_of(dst_type):
@@ -1354,7 +1361,13 @@ class NameNode(AtomicExprNode):
rhs.generate_disposal_code(code)
rhs.free_temps(code)
else:
- if self.type.is_buffer:
+ if self.type.is_memoryviewslice:
+ import MemoryView
+ MemoryView.gen_acquire_memoryviewslice(rhs, self.type,
+ self.entry.is_cglobal, self.result(), self.pos, code)
+ # self.generate_acquire_memoryviewslice(rhs, code)
+
+ elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
@@ -1385,11 +1398,12 @@ class NameNode(AtomicExprNode):
if self.use_managed_ref:
if entry.is_cglobal:
code.put_giveref(rhs.py_result())
- code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
- if debug_disposal_code:
- print("NameNode.generate_assignment_code:")
- print("...generating post-assignment code for %s" % rhs)
- rhs.generate_post_assignment_code(code)
+ if not self.type.is_memoryviewslice:
+ code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
+ if debug_disposal_code:
+ print("NameNode.generate_assignment_code:")
+ print("...generating post-assignment code for %s" % rhs)
+ rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_buffer(self, rhs, code):
@@ -1403,10 +1417,8 @@ class NameNode(AtomicExprNode):
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
- buffer_aux = self.entry.buffer_aux
- bufstruct = buffer_aux.buffer_info_var.cname
import Buffer
- Buffer.put_assign_to_buffer(self.result(), rhstmp, buffer_aux, self.entry.type,
+ Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
@@ -1796,6 +1808,7 @@ class IndexNode(ExprNode):
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
+ self.is_memoryviewslice_access = False
self.base.analyse_types(env)
if self.base.type.is_error:
@@ -1810,6 +1823,7 @@ class IndexNode(ExprNode):
skip_child_analysis = False
buffer_access = False
+ memoryviewslice_access = False
if self.base.type.is_buffer:
assert hasattr(self.base, "entry") # Must be a NameNode-like node
if self.indices:
@@ -1826,6 +1840,12 @@ class IndexNode(ExprNode):
x.analyse_types(env)
if not x.type.is_int:
buffer_access = False
+
+ if self.base.type.is_memoryviewslice:
+ assert hasattr(self.base, "entry")
+ if self.indices or not isinstance(self.index, EllipsisNode):
+ error(self.pos, "Memoryviews currently support ellipsis indexing only.")
+ else: memoryviewslice_access = True
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not (buffer_access and isinstance(self.index, CloneNode))
@@ -1844,6 +1864,13 @@ class IndexNode(ExprNode):
error(self.pos, "Writing to readonly buffer")
else:
self.base.entry.buffer_aux.writable_needed = True
+
+ elif memoryviewslice_access:
+ self.type = self.base.type
+ self.is_memoryviewslice_access = True
+ if getting:
+ error(self.pos, "memoryviews currently support setting only.")
+
else:
if isinstance(self.index, TupleNode):
self.index.analyse_types(env, skip_children=skip_child_analysis)
@@ -2004,6 +2031,14 @@ class IndexNode(ExprNode):
self.extra_index_params(),
code.error_goto(self.pos)))
+ def generate_memoryviewslice_setitem_code(self, rhs, code, op=""):
+ assert isinstance(self.index, EllipsisNode)
+ import MemoryView
+ util_code = MemoryView.CopyContentsFuncUtilCode(rhs.type, self.type)
+ func_name = util_code.copy_contents_name
+ code.putln(code.error_goto_if_neg("%s(&%s, &%s)" % (func_name, rhs.result(), self.base.result()), self.pos))
+ code.globalstate.use_utility_code(util_code)
+
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
if code.globalstate.directives['nonecheck']:
@@ -2030,6 +2065,8 @@ class IndexNode(ExprNode):
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access:
self.generate_buffer_setitem_code(rhs, code)
+ elif self.is_memoryviewslice_access:
+ self.generate_memoryviewslice_setitem_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
else:
@@ -2935,6 +2972,7 @@ class AttributeNode(ExprNode):
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
self.mutate_into_name_node(env, entry, target)
+ entry.used = 1
return 1
return 0
@@ -3133,7 +3171,7 @@ class AttributeNode(ExprNode):
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
- if (self.obj.type.is_extension_type
+ if (self.obj.type.needs_nonecheck()
and self.needs_none_check
and code.globalstate.directives['nonecheck']):
self.put_nonecheck(code)
@@ -3155,7 +3193,7 @@ class AttributeNode(ExprNode):
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
- if (self.obj.type.is_extension_type
+ if (self.obj.type.needs_nonecheck()
and self.needs_none_check
and code.globalstate.directives['nonecheck']):
self.put_nonecheck(code)
@@ -3166,11 +3204,18 @@ class AttributeNode(ExprNode):
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
- code.putln(
- "%s = %s;" % (
- select_code,
- rhs.result_as(self.ctype())))
- #rhs.result()))
+ elif self.type.is_memoryviewslice:
+ import MemoryView
+ MemoryView.put_assign_to_memviewslice(select_code, rhs.result(), self.type,
+ pos=self.pos, code=code)
+ if rhs.is_temp:
+ code.put_xdecref_clear("%s.memview" % rhs.result(), cython_memoryview_ptr_type)
+ if not self.type.is_memoryviewslice:
+ code.putln(
+ "%s = %s;" % (
+ select_code,
+ rhs.result_as(self.ctype())))
+ #rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
@@ -3197,7 +3242,13 @@ class AttributeNode(ExprNode):
def put_nonecheck(self, code):
code.globalstate.use_utility_code(raise_noneattr_error_utility_code)
- code.putln("if (%s) {" % code.unlikely("%s == Py_None") % self.obj.result_as(PyrexTypes.py_object_type))
+ if self.obj.type.is_extension_type:
+ test = "%s == Py_None" % self.obj.result_as(PyrexTypes.py_object_type)
+ elif self.obj.type.is_memoryviewslice:
+ test = "!%s.memview" % self.obj.result()
+ else:
+ assert False
+ code.putln("if (%s) {" % code.unlikely(test))
code.putln("__Pyx_RaiseNoneAttributeError(\"%s\");" % self.attribute)
code.putln(code.error_goto(self.pos))
code.putln("}")
@@ -5635,6 +5686,56 @@ class CoercionNode(ExprNode):
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
+class CoerceToMemViewSliceNode(CoercionNode):
+
+ def __init__(self, arg, dst_type, env):
+ assert dst_type.is_memoryviewslice
+ assert not arg.type.is_memoryviewslice
+ CoercionNode.__init__(self, arg)
+ self.type = dst_type
+ self.env = env
+ self.is_temp = 1
+
+ def generate_result_code(self, code):
+ import MemoryView, Buffer
+ memviewobj = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
+ buf_flag = MemoryView.get_buf_flag(self.type.axes)
+ code.putln("%s = (PyObject *)"
+ "__pyx_viewaxis_memoryview_cwrapper(%s, %s);" %\
+ (memviewobj, self.arg.py_result(), buf_flag))
+ code.putln(code.error_goto_if_PyErr(self.pos))
+ ndim = len(self.type.axes)
+ spec_int_arr = code.funcstate.allocate_temp(
+ PyrexTypes.c_array_type(PyrexTypes.c_int_type, ndim),
+ manage_ref=False)
+ specs_code = MemoryView.specs_to_code(self.type.axes)
+ for idx, cspec in enumerate(specs_code):
+ code.putln("%s[%d] = %s;" % (spec_int_arr, idx, cspec))
+
+ code.globalstate.use_utility_code(Buffer.acquire_utility_code)
+ code.globalstate.use_utility_code(MemoryView.memviewslice_init_code)
+ dtype_typeinfo = Buffer.get_type_information_cname(code, self.type.dtype)
+
+ MemoryView.put_init_entry(self.result(), code)
+ code.putln("{")
+ code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" %
+ self.type.dtype.struct_nesting_depth())
+ result = self.result()
+ if self.type.is_c_contig:
+ c_or_f_flag = "__Pyx_IS_C_CONTIG"
+ elif self.type.is_f_contig:
+ c_or_f_flag = "__Pyx_IS_F_CONTIG"
+ else:
+ c_or_f_flag = "0"
+ code.putln(code.error_goto_if("-1 == __Pyx_ValidateAndInit_memviewslice("
+ "(struct __pyx_obj_memoryview *) %(memviewobj)s,"
+ " %(spec_int_arr)s, %(c_or_f_flag)s, %(ndim)d,"
+ " &%(dtype_typeinfo)s, __pyx_stack, &%(result)s)" % locals(), self.pos))
+ code.putln("}")
+ code.put_gotref(
+ code.as_pyobject("%s.memview" % self.result(), cython_memoryview_ptr_type))
+ code.funcstate.release_temp(memviewobj)
+ code.funcstate.release_temp(spec_int_arr)
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
diff --git a/Cython/Compiler/Main.py b/Cython/Compiler/Main.py
index e869e9816..c9e4833dd 100644
--- a/Cython/Compiler/Main.py
+++ b/Cython/Compiler/Main.py
@@ -23,24 +23,12 @@ from Errors import PyrexError, CompileError, InternalError, error
from Symtab import BuiltinScope, ModuleScope
from Cython import Utils
from Cython.Utils import open_new_file, replace_suffix
-import CythonScope
import DebugFlags
module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$")
verbose = 0
-def dumptree(t):
- # For quick debugging in pipelines
- print t.dump()
- return t
-
-def abort_on_errors(node):
- # Stop the pipeline if there are any errors.
- if Errors.num_errors != 0:
- raise InternalError, "abort"
- return node
-
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
@@ -65,12 +53,12 @@ class Context(object):
# modules {string : ModuleScope}
# include_directories [string]
# future_directives [object]
-
+
+ cython_scope = None
def __init__(self, include_directories, compiler_directives):
- #self.modules = {"__builtin__" : BuiltinScope()}
import Builtin, CythonScope
self.modules = {"__builtin__" : Builtin.builtin_scope}
- self.modules["cython"] = CythonScope.create_cython_scope(self)
+ self.modules["cython"] = self.cython_scope = CythonScope.create_cython_scope(self)
self.include_directories = include_directories
self.future_directives = set()
self.compiler_directives = compiler_directives
@@ -81,155 +69,18 @@ class Context(object):
os.path.join(os.path.dirname(__file__), '..', 'Includes'))
self.include_directories = include_directories + [standard_include_path]
- def create_pipeline(self, pxd, py=False):
- from Visitor import PrintTree
- from ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
- from ParseTreeTransforms import AnalyseDeclarationsTransform, AnalyseExpressionsTransform
- from ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
- from ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods
- from TypeInference import MarkAssignments
- from ParseTreeTransforms import AlignFunctionDefinitions, GilCheck
- from AnalysedTreeTransforms import AutoTestDictTransform
- from AutoDocTransforms import EmbedSignature
- from Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
- from Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
- from Optimize import ConstantFolding, FinalOptimizePhase
- from Optimize import DropRefcountingTransform
- from Buffer import IntroduceBufferAuxiliaryVars
- from ModuleNode import check_c_declarations, check_c_declarations_pxd
-
- # Temporary hack that can be used to ensure that all result_code's
- # are generated at code generation time.
- import Visitor
- class ClearResultCodes(Visitor.CythonTransform):
- def visit_ExprNode(self, node):
- self.visitchildren(node)
- node.result_code = "<cleared>"
- return node
-
- if pxd:
- _check_c_declarations = check_c_declarations_pxd
- _specific_post_parse = PxdPostParse(self)
- else:
- _check_c_declarations = check_c_declarations
- _specific_post_parse = None
-
- if py and not pxd:
- _align_function_definitions = AlignFunctionDefinitions(self)
- else:
- _align_function_definitions = None
-
- return [
- NormalizeTree(self),
- PostParse(self),
- _specific_post_parse,
- InterpretCompilerDirectives(self, self.compiler_directives),
- _align_function_definitions,
- ConstantFolding(),
- FlattenInListTransform(),
- WithTransform(self),
- DecoratorTransform(self),
- AnalyseDeclarationsTransform(self),
- AutoTestDictTransform(self),
- EmbedSignature(self),
- EarlyReplaceBuiltinCalls(self),
- MarkAssignments(self),
- TransformBuiltinMethods(self),
- IntroduceBufferAuxiliaryVars(self),
- _check_c_declarations,
- AnalyseExpressionsTransform(self),
- OptimizeBuiltinCalls(self),
- IterationTransform(),
- SwitchTransform(),
- DropRefcountingTransform(),
- FinalOptimizePhase(self),
- GilCheck(),
-# ClearResultCodes(self),
-# SpecialFunctions(self),
- # CreateClosureClasses(context),
- ]
-
- def create_pyx_pipeline(self, options, result, py=False):
- def generate_pyx_code(module_node):
- module_node.process_implementation(options, result)
- result.compilation_source = module_node.compilation_source
- return result
-
- def inject_pxd_code(module_node):
- from textwrap import dedent
- stats = module_node.body.stats
- for name, (statlistnode, scope) in self.pxds.iteritems():
- # Copy over function nodes to the module
- # (this seems strange -- I believe the right concept is to split
- # ModuleNode into a ModuleNode and a CodeGenerator, and tell that
- # CodeGenerator to generate code both from the pyx and pxd ModuleNodes.
- stats.append(statlistnode)
- # Until utility code is moved to code generation phase everywhere,
- # we need to copy it over to the main scope
- module_node.scope.utility_code_list.extend(scope.utility_code_list)
- return module_node
-
- test_support = []
- if options.evaluate_tree_assertions:
- from Cython.TestUtils import TreeAssertVisitor
- test_support.append(TreeAssertVisitor())
-
- return ([
- create_parse(self),
- ] + self.create_pipeline(pxd=False, py=py) + test_support + [
- inject_pxd_code,
- abort_on_errors,
- generate_pyx_code,
- ])
-
- def create_pxd_pipeline(self, scope, module_name):
- def parse_pxd(source_desc):
- tree = self.parse(source_desc, scope, pxd=True,
- full_module_name=module_name)
- tree.scope = scope
- tree.is_pxd = True
- return tree
-
- from CodeGeneration import ExtractPxdCode
-
- # The pxd pipeline ends up with a CCodeWriter containing the
- # code of the pxd, as well as a pxd scope.
- return [parse_pxd] + self.create_pipeline(pxd=True) + [
- ExtractPxdCode(self),
- ]
-
- def create_py_pipeline(self, options, result):
- return self.create_pyx_pipeline(options, result, py=True)
+ # pipeline creation functions can now be found in Pipeline.py
def process_pxd(self, source_desc, scope, module_name):
- pipeline = self.create_pxd_pipeline(scope, module_name)
- result = self.run_pipeline(pipeline, source_desc)
+ import Pipeline
+ pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
+ result = Pipeline.run_pipeline(pipeline, source_desc)
return result
def nonfatal_error(self, exc):
return Errors.report_error(exc)
- def run_pipeline(self, pipeline, source):
- error = None
- data = source
- try:
- for phase in pipeline:
- if phase is not None:
- if DebugFlags.debug_verbose_pipeline:
- print "Entering pipeline phase %r" % phase
- data = phase(data)
- except CompileError, err:
- # err is set
- Errors.report_error(err)
- error = err
- except InternalError, err:
- # Only raise if there was not an earlier error
- if Errors.num_errors == 0:
- raise
- error = err
- return (error, data)
-
def find_module(self, module_name,
relative_to = None, pos = None, need_pxd = 1):
# Finds and returns the module scope corresponding to
@@ -548,6 +399,7 @@ def create_default_resultobj(compilation_source, options):
return result
def run_pipeline(source, options, full_module_name = None):
+ import Pipeline
# Set up context
context = Context(options.include_path, options.compiler_directives)
@@ -562,12 +414,12 @@ def run_pipeline(source, options, full_module_name = None):
# Get pipeline
if source_desc.filename.endswith(".py"):
- pipeline = context.create_py_pipeline(options, result)
+ pipeline = Pipeline.create_py_pipeline(context, options, result)
else:
- pipeline = context.create_pyx_pipeline(options, result)
+ pipeline = Pipeline.create_pyx_pipeline(context, options, result)
context.setup_errors(options)
- err, enddata = context.run_pipeline(pipeline, source)
+ err, enddata = Pipeline.run_pipeline(pipeline, source)
context.teardown_errors(err, options, result)
return result
diff --git a/Cython/Compiler/MemoryView.py b/Cython/Compiler/MemoryView.py
new file mode 100644
index 000000000..09b28e597
--- /dev/null
+++ b/Cython/Compiler/MemoryView.py
@@ -0,0 +1,984 @@
+from Errors import CompileError
+from ExprNodes import IntNode, NoneNode, IntBinopNode, NameNode, AttributeNode
+from Visitor import CythonTransform
+import Options
+import CythonScope
+from Code import UtilityCode
+from UtilityCode import CythonUtilityCode
+from PyrexTypes import py_object_type, cython_memoryview_ptr_type
+import Buffer
+
+
+START_ERR = "there must be nothing or the value 0 (zero) in the start slot."
+STOP_ERR = "Axis specification only allowed in the 'stop' slot."
+STEP_ERR = "Only the value 1 (one) or valid axis specification allowed in the step slot."
+ONE_ERR = "The value 1 (one) may appear in the first or last axis specification only."
+BOTH_CF_ERR = "Cannot specify an array that is both C and Fortran contiguous."
+NOT_AMP_ERR = "Invalid operator, only an ampersand '&' is allowed."
+INVALID_ERR = "Invalid axis specification."
+EXPR_ERR = "no expressions allowed in axis spec, only names (e.g. cython.view.contig)."
+CF_ERR = "Invalid axis specification for a C/Fortran contiguous array."
+
+memview_c_contiguous = "PyBUF_C_CONTIGUOUS"
+memview_f_contiguous = "PyBUF_F_CONTIGUOUS"
+memview_any_contiguous = "PyBUF_ANY_CONTIGUOUS"
+memview_full_access = "PyBUF_FULL"
+memview_strided_access = "PyBUF_STRIDED"
+
+MEMVIEW_DIRECT = 1
+MEMVIEW_PTR = 2
+MEMVIEW_FULL = 4
+MEMVIEW_CONTIG = 8
+MEMVIEW_STRIDED= 16
+MEMVIEW_FOLLOW = 32
+
+_spec_to_const = {
+ 'contig' : MEMVIEW_CONTIG,
+ 'strided': MEMVIEW_STRIDED,
+ 'follow' : MEMVIEW_FOLLOW,
+ 'direct' : MEMVIEW_DIRECT,
+ 'ptr' : MEMVIEW_PTR,
+ 'full' : MEMVIEW_FULL
+ }
+
+def specs_to_code(specs):
+ arr = []
+ for access, packing in specs:
+ arr.append("(%s | %s)" % (_spec_to_const[access], _spec_to_const[packing]))
+ return arr
+
+def put_init_entry(mv_cname, code):
+ code.putln("%s.data = NULL;" % mv_cname)
+ code.putln("%s.memview = NULL;" % mv_cname)
+
+def mangle_dtype_name(dtype):
+ # a dumb wrapper for now; move Buffer.mangle_dtype_name in here later?
+ import Buffer
+ return Buffer.mangle_dtype_name(dtype)
+
+def axes_to_str(axes):
+ return "".join([access[0].upper()+packing[0] for (access, packing) in axes])
+
+def gen_acquire_memoryviewslice(rhs, lhs_type, lhs_is_cglobal, lhs_result, lhs_pos, code):
+ # import MemoryView
+ assert rhs.type.is_memoryviewslice
+
+ pretty_rhs = isinstance(rhs, NameNode) or rhs.result_in_temp()
+ if pretty_rhs:
+ rhstmp = rhs.result()
+ else:
+ rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False)
+ code.putln("%s = %s;" % (rhstmp, rhs.result_as(lhs_type)))
+ code.putln(code.error_goto_if_null("%s.memview" % rhstmp, lhs_pos))
+
+ if not rhs.result_in_temp():
+ code.put_incref("%s.memview" % rhstmp, cython_memoryview_ptr_type)
+
+ if lhs_is_cglobal:
+ code.put_gotref("%s.memview" % lhs_result)
+
+ #XXX: this is here because self.lhs_of_first_assignment is not set correctly,
+ # once that is working this should take that flag into account.
+ # See NameNode.generate_assignment_code
+ code.put_xdecref("%s.memview" % lhs_result, cython_memoryview_ptr_type)
+
+ if lhs_is_cglobal:
+ code.put_giveref("%s.memview" % rhstmp)
+
+ put_assign_to_memviewslice(lhs_result, rhstmp, lhs_type,
+ lhs_pos, code=code)
+
+ if rhs.result_in_temp() or not pretty_rhs:
+ code.putln("%s.memview = 0;" % rhstmp)
+
+ if not pretty_rhs:
+ code.funcstate.release_temp(rhstmp)
+
+def put_assign_to_memviewslice(lhs_cname, rhs_cname, memviewslicetype, pos, code):
+
+ code.putln("%s.memview = %s.memview;" % (lhs_cname, rhs_cname))
+ code.putln("%s.data = %s.data;" % (lhs_cname, rhs_cname))
+ ndim = len(memviewslicetype.axes)
+ for i in range(ndim):
+ code.putln("%s.shape[%d] = %s.shape[%d];" % (lhs_cname, i, rhs_cname, i))
+ code.putln("%s.strides[%d] = %s.strides[%d];" % (lhs_cname, i, rhs_cname, i))
+ code.putln("%s.suboffsets[%d] = %s.suboffsets[%d];" % (lhs_cname, i, rhs_cname, i))
+
+def get_buf_flag(specs):
+ is_c_contig, is_f_contig = is_cf_contig(specs)
+
+ if is_c_contig:
+ return memview_c_contiguous
+ elif is_f_contig:
+ return memview_f_contiguous
+
+ access, packing = zip(*specs)
+
+ assert 'follow' not in packing
+
+ if 'full' in access or 'ptr' in access:
+ return memview_full_access
+ else:
+ return memview_strided_access
+
+def use_memview_util_code(env):
+ import CythonScope
+ env.use_utility_code(CythonScope.view_utility_code)
+ env.use_utility_code(memviewslice_declare_code)
+
+def use_memview_cwrap(env):
+ import CythonScope
+ env.use_utility_code(CythonScope.view_utility_code)
+
+def use_cython_array(env):
+ import CythonScope
+ env.use_utility_code(CythonScope.cython_array_utility_code)
+
+def src_conforms_to_dst(src, dst):
+ '''
+ returns True if src conforms to dst, False otherwise.
+
+ If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.
+
+ Any packing/access spec is conformable to itself.
+
+ 'direct' and 'ptr' are conformable to 'full'.
+ 'contig' and 'follow' are conformable to 'strided'.
+ Any other combo is not conformable.
+ '''
+
+ if src.dtype != dst.dtype:
+ return False
+ if len(src.axes) != len(dst.axes):
+ return False
+
+ for src_spec, dst_spec in zip(src.axes, dst.axes):
+ src_access, src_packing = src_spec
+ dst_access, dst_packing = dst_spec
+ if src_access != dst_access and dst_access != 'full':
+ return False
+ if src_packing != dst_packing and dst_packing != 'strided':
+ return False
+
+ return True
+
+
+def get_copy_func_name(to_memview):
+ base = "__Pyx_BufferNew_%s_From_%s_%s"
+ if to_memview.is_c_contig:
+ return base % ('C', axes_to_str(to_memview.axes), mangle_dtype_name(to_memview.dtype))
+ else:
+ return base % ('F', axes_to_str(to_memview.axes), mangle_dtype_name(to_memview.dtype))
+
+def get_copy_contents_name(from_mvs, to_mvs):
+ dtype = from_mvs.dtype
+ assert dtype == to_mvs.dtype
+ return ('__Pyx_BufferCopyContents_%s_%s_%s' %
+ (axes_to_str(from_mvs.axes),
+ axes_to_str(to_mvs.axes),
+ mangle_dtype_name(dtype)))
+
+class IsContigFuncUtilCode(object):
+
+ requires = None
+
+ def __init__(self, c_or_f):
+ self.c_or_f = c_or_f
+
+ self.is_contig_func_name = get_is_contig_func_name(self.c_or_f)
+
+ def __eq__(self, other):
+ if not isinstance(other, IsContigFuncUtilCode):
+ return False
+ return self.is_contig_func_name == other.is_contig_func_name
+
+ def __hash__(self):
+ return hash(self.is_contig_func_name)
+
+ def get_tree(self): pass
+
+ def put_code(self, output):
+ code = output['utility_code_def']
+ proto = output['utility_code_proto']
+
+ func_decl, func_impl = get_is_contiguous_func(self.c_or_f)
+
+ proto.put(func_decl)
+ code.put(func_impl)
+
+def get_is_contig_func_name(c_or_f):
+ return "__Pyx_Buffer_is_%s_contiguous" % c_or_f
+
+def get_is_contiguous_func(c_or_f):
+
+ func_name = get_is_contig_func_name(c_or_f)
+ decl = "static int %s(const __Pyx_memviewslice); /* proto */\n" % func_name
+
+ impl = """
+static int %s(const __Pyx_memviewslice mvs) {
+ /* returns 1 if mvs is the right contiguity, 0 otherwise */
+
+ int i, ndim = mvs.memview->view.ndim;
+ Py_ssize_t itemsize = mvs.memview->view.itemsize;
+ long size = 0;
+""" % func_name
+
+ if c_or_f == 'fortran':
+ for_loop = "for(i=0; i<ndim; i++)"
+ elif c_or_f == 'c':
+ for_loop = "for(i=ndim-1; i>-1; i--)"
+ else:
+ assert False
+
+ impl += """
+ size = 1;
+ %(for_loop)s {
+
+#ifdef DEBUG
+ printf("mvs.suboffsets[i] %%d\\n", mvs.suboffsets[i]);
+ printf("mvs.strides[i] %%d\\n", mvs.strides[i]);
+ printf("mvs.shape[i] %%d\\n", mvs.shape[i]);
+ printf("size %%d\\n", size);
+ printf("ndim %%d\\n", ndim);
+#endif
+#undef DEBUG
+
+ if(mvs.suboffsets[i] >= 0) {
+ return 0;
+ }
+ if(size * itemsize != mvs.strides[i]) {
+ return 0;
+ }
+ size *= mvs.shape[i];
+ }
+ return 1;
+
+}""" % {'for_loop' : for_loop}
+
+ return decl, impl
+
+copy_to_template = '''
+static int %(copy_to_name)s(const __Pyx_memviewslice from_mvs, __Pyx_memviewslice to_mvs) {
+
+ /* ensure from_mvs & to_mvs have the same shape & dtype */
+
+}
+'''
+
+class CopyContentsFuncUtilCode(object):
+
+ requires = None
+
+ def __init__(self, from_memview, to_memview):
+ self.from_memview = from_memview
+ self.to_memview = to_memview
+ self.copy_contents_name = get_copy_contents_name(from_memview, to_memview)
+
+ def __eq__(self, other):
+ if not isinstance(other, CopyContentsFuncUtilCode):
+ return False
+ return other.copy_contents_name == self.copy_contents_name
+
+ def __hash__(self):
+ return hash(self.copy_contents_name)
+
+ def get_tree(self): pass
+
+ def put_code(self, output):
+ code = output['utility_code_def']
+ proto = output['utility_code_proto']
+
+ func_decl, func_impl = \
+ get_copy_contents_func(self.from_memview, self.to_memview, self.copy_contents_name)
+
+ proto.put(func_decl)
+ code.put(func_impl)
+
+class CopyFuncUtilCode(object):
+
+ requires = None
+
+ def __init__(self, from_memview, to_memview):
+ if from_memview.dtype != to_memview.dtype:
+ raise ValueError("dtypes must be the same!")
+ if len(from_memview.axes) != len(to_memview.axes):
+ raise ValueError("number of dimensions must be same")
+ if not (to_memview.is_c_contig or to_memview.is_f_contig):
+ raise ValueError("to_memview must be c or f contiguous.")
+ for (access, packing) in from_memview.axes:
+ if access != 'direct':
+ raise NotImplementedError("cannot handle 'full' or 'ptr' access at this time.")
+
+ self.from_memview = from_memview
+ self.to_memview = to_memview
+ self.copy_func_name = get_copy_func_name(to_memview)
+
+ self.requires = [CopyContentsFuncUtilCode(from_memview, to_memview)]
+
+ def __eq__(self, other):
+ if not isinstance(other, CopyFuncUtilCode):
+ return False
+ return other.copy_func_name == self.copy_func_name
+
+ def __hash__(self):
+ return hash(self.copy_func_name)
+
+ def get_tree(self): pass
+
+ def put_code(self, output):
+ code = output['utility_code_def']
+ proto = output['utility_code_proto']
+
+ proto.put(Buffer.dedent("""\
+ static __Pyx_memviewslice %s(const __Pyx_memviewslice from_mvs); /* proto */
+ """ % self.copy_func_name))
+
+ copy_contents_name = get_copy_contents_name(self.from_memview, self.to_memview)
+
+ if self.to_memview.is_c_contig:
+ mode = 'c'
+ contig_flag = 'PyBUF_C_CONTIGUOUS'
+ elif self.to_memview.is_f_contig:
+ mode = 'fortran'
+ contig_flag = "PyBUF_F_CONTIGUOUS"
+
+ code.put(copy_template %
+ dict(
+ copy_name=self.copy_func_name,
+ mode=mode,
+ sizeof_dtype="sizeof(%s)" % self.from_memview.dtype.declaration_code(''),
+ contig_flag=contig_flag,
+ copy_contents_name=copy_contents_name))
+
+
+def get_copy_contents_func(from_mvs, to_mvs, cfunc_name):
+ assert from_mvs.dtype == to_mvs.dtype
+ assert len(from_mvs.axes) == len(to_mvs.axes)
+
+ ndim = len(from_mvs.axes)
+
+ # XXX: we only support direct access for now.
+ for (access, packing) in from_mvs.axes:
+ if access != 'direct':
+ raise NotImplementedError("currently only direct access is supported.")
+
+ code_decl = ("static int %(cfunc_name)s(const __Pyx_memviewslice *from_mvs,"
+ "__Pyx_memviewslice *to_mvs); /* proto */" % {'cfunc_name' : cfunc_name})
+
+ code_impl = '''
+
+static int %(cfunc_name)s(const __Pyx_memviewslice *from_mvs, __Pyx_memviewslice *to_mvs) {
+
+ char *to_buf = (char *)to_mvs->data;
+ char *from_buf = (char *)from_mvs->data;
+ struct __pyx_obj_memoryview *temp_memview = 0;
+ char *temp_data = 0;
+
+ int ndim_idx = 0;
+
+ for(ndim_idx=0; ndim_idx<%(ndim)d; ndim_idx++) {
+ if(from_mvs->shape[ndim_idx] != to_mvs->shape[ndim_idx]) {
+ PyErr_Format(PyExc_ValueError,
+ "memoryview shapes not the same in dimension %%d", ndim_idx);
+ return -1;
+ }
+ }
+
+''' % {'cfunc_name' : cfunc_name, 'ndim' : ndim}
+
+ # raise NotImplementedError("put in shape checking code here!!!")
+
+ INDENT = " "
+ dtype_decl = from_mvs.dtype.declaration_code("")
+ last_idx = ndim-1
+
+ if to_mvs.is_c_contig or to_mvs.is_f_contig:
+ if to_mvs.is_c_contig:
+ start, stop, step = 0, ndim, 1
+ elif to_mvs.is_f_contig:
+ start, stop, step = ndim-1, -1, -1
+
+
+ for i, idx in enumerate(range(start, stop, step)):
+ # the crazy indexing is to account for the fortran indexing.
+ # 'i' always goes up from zero to ndim-1.
+ # 'idx' is the same as 'i' for c_contig, and goes from ndim-1 to 0 for f_contig.
+ # this makes the loop code below identical in both cases.
+ code_impl += INDENT+"Py_ssize_t i%d = 0, idx%d = 0;\n" % (i,i)
+ code_impl += INDENT+"Py_ssize_t stride%(i)d = from_mvs->strides[%(idx)d];\n" % {'i':i, 'idx':idx}
+ code_impl += INDENT+"Py_ssize_t shape%(i)d = from_mvs->shape[%(idx)d];\n" % {'i':i, 'idx':idx}
+
+ code_impl += "\n"
+
+ # put down the nested for-loop.
+ for k in range(ndim):
+
+ code_impl += INDENT*(k+1) + "for(i%(k)d=0; i%(k)d<shape%(k)d; i%(k)d++) {\n" % {'k' : k}
+ if k >= 1:
+ code_impl += INDENT*(k+2) + "idx%(k)d = i%(k)d * stride%(k)d + idx%(km1)d;\n" % {'k' : k, 'km1' : k-1}
+ else:
+ code_impl += INDENT*(k+2) + "idx%(k)d = i%(k)d * stride%(k)d;\n" % {'k' : k}
+
+ # the inner part of the loop.
+ code_impl += INDENT*(ndim+1)+"memcpy(to_buf, from_buf+idx%(last_idx)d, sizeof(%(dtype_decl)s));\n" % locals()
+ code_impl += INDENT*(ndim+1)+"to_buf += sizeof(%(dtype_decl)s);\n" % locals()
+
+
+ else:
+
+ code_impl += INDENT+"/* 'f' prefix is for the 'from' memview, 't' prefix is for the 'to' memview */\n"
+ for i in range(ndim):
+ code_impl += INDENT+"char *fi%d = 0, *ti%d = 0, *end%d = 0;\n" % (i,i,i)
+ code_impl += INDENT+"Py_ssize_t fstride%(i)d = from_mvs->strides[%(i)d];\n" % {'i':i}
+ code_impl += INDENT+"Py_ssize_t fshape%(i)d = from_mvs->shape[%(i)d];\n" % {'i':i}
+ code_impl += INDENT+"Py_ssize_t tstride%(i)d = to_mvs->strides[%(i)d];\n" % {'i':i}
+ # code_impl += INDENT+"Py_ssize_t tshape%(i)d = to_mvs->shape[%(i)d];\n" % {'i':i}
+
+ code_impl += INDENT+"end0 = fshape0 * fstride0 + from_mvs->data;\n"
+ code_impl += INDENT+"for(fi0=from_buf, ti0=to_buf; fi0 < end0; fi0 += fstride0, ti0 += tstride0) {\n"
+ for i in range(1, ndim):
+ code_impl += INDENT*(i+1)+"end%(i)d = fshape%(i)d * fstride%(i)d + fi%(im1)d;\n" % {'i' : i, 'im1' : i-1}
+ code_impl += INDENT*(i+1)+"for(fi%(i)d=fi%(im1)d, ti%(i)d=ti%(im1)d; fi%(i)d < end%(i)d; fi%(i)d += fstride%(i)d, ti%(i)d += tstride%(i)d) {\n" % {'i':i, 'im1':i-1}
+
+ code_impl += INDENT*(ndim+1)+"*(%(dtype_decl)s*)(ti%(last_idx)d) = *(%(dtype_decl)s*)(fi%(last_idx)d);\n" % locals()
+
+ # for-loop closing braces
+ for k in range(ndim-1, -1, -1):
+ code_impl += INDENT*(k+1)+"}\n"
+
+ # init to_mvs->data and to_mvs shape/strides/suboffsets arrays.
+ code_impl += INDENT+"temp_memview = to_mvs->memview;\n"
+ code_impl += INDENT+"temp_data = to_mvs->data;\n"
+ code_impl += INDENT+"to_mvs->memview = 0; to_mvs->data = 0;\n"
+ code_impl += INDENT+"if(unlikely(-1 == __Pyx_init_memviewslice(temp_memview, %d, to_mvs))) {\n" % (ndim,)
+ code_impl += INDENT*2+"return -1;\n"
+ code_impl += INDENT+"}\n"
+
+ code_impl += INDENT + "return 0;\n"
+
+ code_impl += '}\n'
+
+ return code_decl, code_impl
+
+def get_axes_specs(env, axes):
+ '''
+ get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.
+
+ access is one of 'full', 'ptr' or 'direct'
+ packing is one of 'contig', 'strided' or 'follow'
+ '''
+
+ cythonscope = env.global_scope().context.cython_scope
+ viewscope = cythonscope.viewscope
+
+ access_specs = tuple([viewscope.lookup(name)
+ for name in ('full', 'direct', 'ptr')])
+ packing_specs = tuple([viewscope.lookup(name)
+ for name in ('contig', 'strided', 'follow')])
+
+ is_f_contig, is_c_contig = False, False
+ default_access, default_packing = 'direct', 'strided'
+ cf_access, cf_packing = default_access, 'follow'
+
+ # set the is_{c,f}_contig flag.
+ for idx, axis in ((0,axes[0]), (-1,axes[-1])):
+ if isinstance(axis.step, IntNode):
+ if axis.step.compile_time_value(env) != 1:
+ raise CompileError(axis.step.pos, STEP_ERR)
+ if len(axes) > 1 and (is_c_contig or is_f_contig):
+ raise CompileError(axis.step.pos, BOTH_CF_ERR)
+ if not idx:
+ is_f_contig = True
+ else:
+ is_c_contig = True
+ if len(axes) == 1:
+ break
+
+ assert not (is_c_contig and is_f_contig)
+
+ axes_specs = []
+ # analyse all axes.
+ for idx, axis in enumerate(axes):
+
+ # start slot can be either a literal '0' or None.
+ if isinstance(axis.start, IntNode):
+ if axis.start.compile_time_value(env):
+ raise CompileError(axis.start.pos, START_ERR)
+ elif not isinstance(axis.start, NoneNode):
+ raise CompileError(axis.start.pos, START_ERR)
+
+ # stop slot must be None.
+ if not isinstance(axis.stop, NoneNode):
+ raise CompileError(axis.stop.pos, STOP_ERR)
+
+ # step slot can be None, the value 1,
+ # a single axis spec, or an IntBinopNode.
+ if isinstance(axis.step, NoneNode):
+ if is_c_contig or is_f_contig:
+ axes_specs.append((cf_access, cf_packing))
+ else:
+ axes_specs.append((default_access, default_packing))
+
+ elif isinstance(axis.step, IntNode):
+ if idx not in (0, len(axes)-1):
+ raise CompileError(axis.step.pos, ONE_ERR)
+ # the packing for the ::1 axis is contiguous,
+ # all others are cf_packing.
+ axes_specs.append((cf_access, 'contig'))
+
+ elif isinstance(axis.step, IntBinopNode):
+ if is_c_contig or is_f_contig:
+ raise CompileError(axis.step.pos, CF_ERR)
+ if axis.step.operator != u'&':
+ raise CompileError(axis.step.pos, NOT_AMP_ERR)
+ operand1, operand2 = axis.step.operand1, axis.step.operand2
+ spec1, spec2 = [_get_resolved_spec(env, op)
+ for op in (operand1, operand2)]
+ if spec1 in access_specs and spec2 in packing_specs:
+ axes_specs.append((spec1.name, spec2.name))
+ elif spec2 in access_specs and spec1 in packing_specs:
+ axes_specs.append((spec2.name, spec1.name))
+ else:
+ raise CompileError(axis.step.pos, INVALID_ERR)
+
+ elif isinstance(axis.step, (NameNode, AttributeNode)):
+ if is_c_contig or is_f_contig:
+ raise CompileError(axis.step.pos, CF_ERR)
+ resolved_spec = _get_resolved_spec(env, axis.step)
+ if resolved_spec in access_specs:
+ axes_specs.append((resolved_spec.name, default_packing))
+ elif resolved_spec in packing_specs:
+ axes_specs.append((default_access, resolved_spec.name))
+ else:
+ raise CompileError(axis.step.pos, INVALID_ERR)
+
+ else:
+ raise CompileError(axis.step.pos, INVALID_ERR)
+
+
+ validate_axes_specs(axes[0].start.pos, axes_specs)
+
+ return axes_specs
+
+def is_cf_contig(specs):
+ is_c_contig = is_f_contig = False
+
+ if (len(specs) == 1 and specs == [('direct', 'contig')]):
+ is_c_contig = True
+
+ elif (specs[-1] == ('direct','contig') and
+ all(axis == ('direct','follow') for axis in specs[:-1])):
+ # c_contiguous: 'follow', 'follow', ..., 'follow', 'contig'
+ is_c_contig = True
+
+ elif (len(specs) > 1 and
+ specs[0] == ('direct','contig') and
+ all(axis == ('direct','follow') for axis in specs[1:])):
+ # f_contiguous: 'contig', 'follow', 'follow', ..., 'follow'
+ is_f_contig = True
+
+ return is_c_contig, is_f_contig
+
+def validate_axes_specs(pos, specs):
+
+ packing_specs = ('contig', 'strided', 'follow')
+ access_specs = ('direct', 'ptr', 'full')
+
+ is_c_contig, is_f_contig = is_cf_contig(specs)
+
+ has_contig = has_follow = has_strided = False
+
+ for access, packing in specs:
+
+ if not (access in access_specs and
+ packing in packing_specs):
+ raise CompileError(pos, "Invalid axes specification.")
+
+ if packing == 'strided':
+ has_strided = True
+ elif packing == 'contig':
+ if has_contig:
+ raise CompileError(pos, "Only one contiguous axis may be specified.")
+ has_contig = True
+ elif packing == 'follow':
+ if has_strided:
+ raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
+ if not (is_c_contig or is_f_contig):
+ raise CompileError(pos, "Invalid use of the follow specifier.")
+
+
+def _get_resolved_spec(env, spec):
+ # spec must be a NameNode or an AttributeNode
+ if isinstance(spec, NameNode):
+ return _resolve_NameNode(env, spec)
+ elif isinstance(spec, AttributeNode):
+ return _resolve_AttributeNode(env, spec)
+ else:
+ raise CompileError(spec.pos, INVALID_ERR)
+
+def _resolve_NameNode(env, node):
+ try:
+ resolved_name = env.lookup(node.name).name
+ except AttributeError:
+ raise CompileError(node.pos, INVALID_ERR)
+ viewscope = env.global_scope().context.cython_scope.viewscope
+ return viewscope.lookup(resolved_name)
+
+def _resolve_AttributeNode(env, node):
+ path = []
+ while isinstance(node, AttributeNode):
+ path.insert(0, node.attribute)
+ node = node.obj
+ if isinstance(node, NameNode):
+ path.insert(0, node.name)
+ else:
+ raise CompileError(node.pos, EXPR_ERR)
+ modnames = path[:-1]
+ # must be at least 1 module name, o/w not an AttributeNode.
+ assert modnames
+ scope = env.lookup(modnames[0]).as_module
+ for modname in modnames[1:]:
+ scope = scope.lookup(modname).as_module
+ return scope.lookup(path[-1])
+
+class MemoryViewSliceTransform(CythonTransform):
+
+ memviews_exist = False
+
+ def __call__(self, node):
+ return super(MemoryViewSliceTransform, self).__call__(node)
+
+ def inspect_scope(self, node, scope):
+
+ memviewvars = [entry for name, entry
+ in scope.entries.iteritems()
+ if entry.type.is_memoryviewslice]
+ if memviewvars:
+ self.memviews_exist = True
+
+ def visit_FuncDefNode(self, node):
+ # check for the existence of memview entries here.
+ self.inspect_scope(node, node.local_scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_ModuleNode(self, node):
+ # check for memviews here.
+ self.inspect_scope(node, node.scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_ClassDefNode(self, node):
+ # check for memviews in the class scope
+ if hasattr(node, 'scope'):
+ scope = node.scope
+ else:
+ scope = node.entry.type.scope
+ self.inspect_scope(node, scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_SingleAssignmentNode(self, node):
+ return node
+
+copy_template = '''
+static __Pyx_memviewslice %(copy_name)s(const __Pyx_memviewslice from_mvs) {
+
+ int i;
+ __Pyx_memviewslice new_mvs = {0, 0};
+ struct __pyx_obj_memoryview *from_memview = from_mvs.memview;
+ Py_buffer *buf = &from_memview->view;
+ PyObject *shape_tuple = 0;
+ PyObject *temp_int = 0;
+ struct __pyx_obj_array *array_obj = 0;
+ struct __pyx_obj_memoryview *memview_obj = 0;
+ char mode[] = "%(mode)s";
+
+ __Pyx_RefNannySetupContext("%(copy_name)s");
+
+ shape_tuple = PyTuple_New((Py_ssize_t)(buf->ndim));
+ if(unlikely(!shape_tuple)) {
+ goto fail;
+ }
+ __Pyx_GOTREF(shape_tuple);
+
+
+ for(i=0; i<buf->ndim; i++) {
+ temp_int = PyInt_FromLong(buf->shape[i]);
+ if(unlikely(!temp_int)) {
+ goto fail;
+ } else {
+ PyTuple_SET_ITEM(shape_tuple, i, temp_int);
+ }
+ }
+
+ array_obj = __pyx_cythonarray_array_cwrapper(shape_tuple, %(sizeof_dtype)s, buf->format, mode);
+ if (unlikely(!array_obj)) {
+ goto fail;
+ }
+ __Pyx_GOTREF(array_obj);
+
+ memview_obj = __pyx_viewaxis_memoryview_cwrapper((PyObject *)array_obj, %(contig_flag)s);
+ if (unlikely(!memview_obj)) {
+ goto fail;
+ }
+
+ /* initialize new_mvs */
+ if (unlikely(-1 == __Pyx_init_memviewslice(memview_obj, buf->ndim, &new_mvs))) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Could not initialize new memoryviewslice object.");
+ goto fail;
+ }
+
+ if (unlikely(-1 == %(copy_contents_name)s(&from_mvs, &new_mvs))) {
+ /* PyErr_SetString(PyExc_RuntimeError,
+ "Could not copy contents of memoryview slice."); */
+ goto fail;
+ }
+
+ goto no_fail;
+
+fail:
+ __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = 0;
+ new_mvs.data = 0;
+no_fail:
+ __Pyx_XDECREF(shape_tuple); shape_tuple = 0;
+ __Pyx_GOTREF(temp_int);
+ __Pyx_XDECREF(temp_int); temp_int = 0;
+ __Pyx_XDECREF(array_obj); array_obj = 0;
+ __Pyx_RefNannyFinishContext();
+ return new_mvs;
+
+}
+'''
+
+spec_constants_code = UtilityCode(proto="""
+#define __Pyx_MEMVIEW_DIRECT 1
+#define __Pyx_MEMVIEW_PTR 2
+#define __Pyx_MEMVIEW_FULL 4
+#define __Pyx_MEMVIEW_CONTIG 8
+#define __Pyx_MEMVIEW_STRIDED 16
+#define __Pyx_MEMVIEW_FOLLOW 32
+"""
+)
+
+memviewslice_cname = u'__Pyx_memviewslice'
+memviewslice_declare_code = UtilityCode(proto="""
+
+/* memoryview slice struct */
+
+typedef struct {
+ struct %(memview_struct_name)s *memview;
+ char *data;
+ Py_ssize_t shape[%(max_dims)d];
+ Py_ssize_t strides[%(max_dims)d];
+ Py_ssize_t suboffsets[%(max_dims)d];
+} %(memviewslice_name)s;
+
+""" % dict(memview_struct_name=CythonScope.memview_objstruct_cname,
+ max_dims=Options.buffer_max_dims,
+ memviewslice_name=memviewslice_cname)
+)
+
+memviewslice_init_code = UtilityCode(proto="""\
+
+#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
+
+#define __Pyx_MEMVIEW_DIRECT 1
+#define __Pyx_MEMVIEW_PTR 2
+#define __Pyx_MEMVIEW_FULL 4
+#define __Pyx_MEMVIEW_CONTIG 8
+#define __Pyx_MEMVIEW_STRIDED 16
+#define __Pyx_MEMVIEW_FOLLOW 32
+
+#define __Pyx_IS_C_CONTIG 1
+#define __Pyx_IS_F_CONTIG 2
+
+static int __Pyx_ValidateAndInit_memviewslice(struct __pyx_obj_memoryview *memview,
+ int *axes_specs, int c_or_f_flag, int ndim, __Pyx_TypeInfo *dtype,
+ __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice);
+
+static int __Pyx_init_memviewslice(
+ struct __pyx_obj_memoryview *memview,
+ int ndim,
+ __Pyx_memviewslice *memviewslice);
+""" % {'BUF_MAX_NDIMS' :Options.buffer_max_dims},
+impl = """\
+static int __Pyx_ValidateAndInit_memviewslice(
+ struct __pyx_obj_memoryview *memview,
+ int *axes_specs,
+ int c_or_f_flag,
+ int ndim,
+ __Pyx_TypeInfo *dtype,
+ __Pyx_BufFmt_StackElem stack[],
+ __Pyx_memviewslice *memviewslice) {
+
+ __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice");
+ Py_buffer *buf = &memview->view;
+ int stride, i, spec = 0, retval = -1;
+
+ if (!buf) goto fail;
+
+ if(memviewslice->data || memviewslice->memview) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryviewslice struct must be initialized to NULL.");
+ goto fail;
+ }
+
+ if (buf->ndim != ndim) {
+ PyErr_Format(PyExc_ValueError,
+ "Buffer has wrong number of dimensions (expected %d, got %d)",
+ ndim, buf->ndim);
+ goto fail;
+ }
+
+ __Pyx_BufFmt_Context ctx;
+ __Pyx_BufFmt_Init(&ctx, stack, dtype);
+ if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
+
+ if ((unsigned)buf->itemsize != dtype->size) {
+ PyErr_Format(PyExc_ValueError,
+ "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)",
+ buf->itemsize, (buf->itemsize > 1) ? "s" : "",
+ dtype->name,
+ dtype->size, (dtype->size > 1) ? "s" : "");
+ goto fail;
+ }
+
+ if (!buf->strides) {
+ PyErr_SetString(PyExc_ValueError,
+ "buffer does not supply strides necessary for memoryview.");
+ goto fail;
+ }
+
+ for(i=0; i<ndim; i++) {
+ spec = axes_specs[i];
+
+ if (spec & __Pyx_MEMVIEW_CONTIG) {
+ if (buf->strides[i] != buf->itemsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "Buffer and memoryview are not contiguous in the same dimension.");
+ goto fail;
+ }
+ }
+
+ if (spec & (__Pyx_MEMVIEW_STRIDED | __Pyx_MEMVIEW_FOLLOW)) {
+ if (buf->strides[i] < buf->itemsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "Buffer and memoryview are not contiguous in the same dimension.");
+ goto fail;
+ }
+ }
+
+ if (spec & __Pyx_MEMVIEW_DIRECT) {
+ if (buf->suboffsets && buf->suboffsets[i] >= 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "Buffer not compatible with direct access.");
+ goto fail;
+ }
+ }
+
+ if (spec & (__Pyx_MEMVIEW_PTR | __Pyx_MEMVIEW_FULL)) {
+ if (!buf->suboffsets) {
+ PyErr_SetString(PyExc_ValueError,
+ "Buffer not able to be indirectly accessed.");
+ goto fail;
+ }
+ }
+
+ if (spec & __Pyx_MEMVIEW_PTR) {
+ if (buf->suboffsets[i] < 0) {
+ PyErr_Format(PyExc_ValueError,
+ "Buffer not indirectly accessed in %d dimension, although memoryview is.", i);
+ goto fail;
+ }
+ }
+ }
+
+ if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
+ stride = 1;
+ for(i=0; i<ndim; i++) {
+ if(stride * buf->itemsize != buf->strides[i]) {
+ PyErr_SetString(PyExc_ValueError,
+ "Buffer not fortran contiguous.");
+ goto fail;
+ }
+ stride = stride * buf->shape[i];
+ }
+ } else if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
+ for(i=ndim-1; i>-1; i--) {
+ if(stride * buf->itemsize != buf->strides[i]) {
+ PyErr_SetString(PyExc_ValueError,
+ "Buffer not C contiguous.");
+ goto fail;
+ }
+ stride = stride * buf->shape[i];
+ }
+ }
+
+ if(unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice) == -1)) {
+ goto fail;
+ }
+
+ retval = 0;
+ goto no_fail;
+fail:
+ __Pyx_XDECREF(memviewslice->memview);
+ memviewslice->memview = 0;
+ memviewslice->data = 0;
+ retval = -1;
+
+no_fail:
+ __Pyx_RefNannyFinishContext();
+ return retval;
+}
+
+static int __Pyx_init_memviewslice(
+ struct __pyx_obj_memoryview *memview,
+ int ndim,
+ __Pyx_memviewslice *memviewslice) {
+
+ __Pyx_RefNannySetupContext("init_memviewslice");
+ int i, retval=-1;
+ Py_buffer *buf = &memview->view;
+
+ if(!buf) {
+ PyErr_SetString(PyExc_ValueError,
+ "buf is NULL.");
+ goto fail;
+ } else if(memviewslice->memview || memviewslice->data) {
+ PyErr_SetString(PyExc_ValueError,
+ "memviewslice is already initialized!");
+ goto fail;
+ }
+
+ for(i=0; i<ndim; i++) {
+ memviewslice->strides[i] = buf->strides[i];
+ memviewslice->shape[i] = buf->shape[i];
+ if(buf->suboffsets) {
+ memviewslice->suboffsets[i] = buf->suboffsets[i];
+ } else {
+ memviewslice->suboffsets[i] = -1;
+ }
+ }
+
+ __Pyx_INCREF((PyObject *)memview);
+ __Pyx_GIVEREF((PyObject *)memview);
+ memviewslice->memview = memview;
+ memviewslice->data = (char *)buf->buf;
+ retval = 0;
+ goto no_fail;
+
+fail:
+ __Pyx_XDECREF(memviewslice->memview);
+ memviewslice->memview = 0;
+ memviewslice->data = 0;
+ retval = -1;
+no_fail:
+ __Pyx_RefNannyFinishContext();
+ return retval;
+}
+""")
+
+memviewslice_init_code.requires = [memviewslice_declare_code]
+
diff --git a/Cython/Compiler/ModuleNode.py b/Cython/Compiler/ModuleNode.py
index 5a45ad718..a305822f5 100644
--- a/Cython/Compiler/ModuleNode.py
+++ b/Cython/Compiler/ModuleNode.py
@@ -50,7 +50,26 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
child_attrs = ["body"]
directives = None
-
+
+ def merge_in(self, tree, scope, merge_scope=False):
+ # Merges in the contents of another tree, and possibly scope. With the
+ # current implementation below, this must be done right prior
+ # to code generation.
+ #
+ # Note: This way of doing it seems strange -- I believe the
+ # right concept is to split ModuleNode into a ModuleNode and a
+ # CodeGenerator, and tell that CodeGenerator to generate code
+ # from multiple sources.
+ assert isinstance(self.body, Nodes.StatListNode)
+ if isinstance(tree, Nodes.StatListNode):
+ self.body.stats.extend(tree.stats)
+ else:
+ self.body.stats.append(tree)
+ selfscope = self.scope
+ selfscope.utility_code_list.extend(scope.utility_code_list)
+ if merge_scope:
+ selfscope.merge_in(scope)
+
def analyse_declarations(self, env):
if Options.embed_pos_in_docstring:
env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
@@ -105,7 +124,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if h_types or h_vars or h_funcs or h_extension_types:
result.h_file = replace_suffix(result.c_file, ".h")
h_code = Code.CCodeWriter()
- Code.GlobalState(h_code)
+ Code.GlobalState(h_code, self)
if options.generate_pxi:
result.i_file = replace_suffix(result.c_file, ".pxi")
i_code = Code.PyrexCodeWriter(result.i_file)
@@ -167,7 +186,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if api_funcs or has_api_extension_types:
result.api_file = replace_suffix(result.c_file, "_api.h")
h_code = Code.CCodeWriter()
- Code.GlobalState(h_code)
+ Code.GlobalState(h_code, self)
name = self.api_name(env)
guard = Naming.api_guard_prefix + name
h_code.put_h_guard(guard)
@@ -251,7 +270,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
else:
emit_linenums = options.emit_linenums
rootwriter = Code.CCodeWriter(emit_linenums=emit_linenums)
- globalstate = Code.GlobalState(rootwriter, emit_linenums)
+ globalstate = Code.GlobalState(rootwriter, self, emit_linenums)
globalstate.initialize_main_c_code()
h_code = globalstate['h_code']
@@ -925,10 +944,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
type = scope.parent_type
base_type = type.base_type
py_attrs = []
+ memviewslice_attrs = []
for entry in scope.var_entries:
if entry.type.is_pyobject:
py_attrs.append(entry)
- need_self_cast = type.vtabslot_cname or py_attrs
+ elif entry.type.is_memoryviewslice:
+ memviewslice_attrs.append(entry)
+ need_self_cast = type.vtabslot_cname or py_attrs or memviewslice_attrs
code.putln("")
code.putln(
"static PyObject *%s(PyTypeObject *t, PyObject *a, PyObject *k) {"
@@ -970,6 +992,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("p->%s = 0;" % entry.cname)
else:
code.put_init_var_to_py_none(entry, "p->%s", nanny=False)
+ for entry in memviewslice_attrs:
+ code.putln("p->%s.data = NULL;" % entry.cname)
+ code.put_init_to_py_none("p->%s.memview" % entry.cname,
+ PyrexTypes.cython_memoryview_ptr_type, nanny=False)
entry = scope.lookup_here("__new__")
if entry and entry.is_special:
if entry.trivial_signature:
@@ -1890,8 +1916,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
# variables to None.
for entry in env.var_entries:
if entry.visibility != 'extern':
- if entry.type.is_pyobject and entry.used:
- code.put_init_var_to_py_none(entry, nanny=False)
+ if entry.used:
+ entry.type.global_init_code(entry, code)
def generate_c_function_export_code(self, env, code):
# Generate code to create PyCFunction wrappers for exported C functions.
diff --git a/Cython/Compiler/Naming.py b/Cython/Compiler/Naming.py
index 059779d6c..76e705c8f 100644
--- a/Cython/Compiler/Naming.py
+++ b/Cython/Compiler/Naming.py
@@ -35,10 +35,8 @@ prop_set_prefix = pyrex_prefix + "setprop_"
type_prefix = pyrex_prefix + "t_"
typeobj_prefix = pyrex_prefix + "type_"
var_prefix = pyrex_prefix + "v_"
-bufstruct_prefix = pyrex_prefix + "bstruct_"
-bufstride_prefix = pyrex_prefix + "bstride_"
-bufshape_prefix = pyrex_prefix + "bshape_"
-bufsuboffset_prefix = pyrex_prefix + "boffset_"
+pybuffernd_prefix = pyrex_prefix + "pybuffernd_"
+pybufferstruct_prefix = pyrex_prefix + "pybuffer_"
vtable_prefix = pyrex_prefix + "vtable_"
vtabptr_prefix = pyrex_prefix + "vtabptr_"
vtabstruct_prefix = pyrex_prefix + "vtabstruct_"
diff --git a/Cython/Compiler/Nodes.py b/Cython/Compiler/Nodes.py
index e6b004876..0d4871f00 100644
--- a/Cython/Compiler/Nodes.py
+++ b/Cython/Compiler/Nodes.py
@@ -12,11 +12,11 @@ except NameError:
import Code
import Builtin
-from Errors import error, warning, InternalError
+from Errors import error, warning, InternalError, CompileError
import Naming
import PyrexTypes
import TypeSlots
-from PyrexTypes import py_object_type, error_type, CFuncType
+from PyrexTypes import py_object_type, error_type, CTypedefType, CFuncType, cython_memoryview_ptr_type
from Symtab import ModuleScope, LocalScope, GeneratorLocalScope, \
StructOrUnionScope, PyClassScope, CClassScope
from Cython.Utils import open_new_file, replace_suffix
@@ -725,6 +725,31 @@ class CSimpleBaseTypeNode(CBaseTypeNode):
else:
return PyrexTypes.error_type
+class MemoryViewSliceTypeNode(CBaseTypeNode):
+
+ child_attrs = ['base_type_node', 'axes']
+
+ def analyse(self, env, could_be_name = False):
+
+ base_type = self.base_type_node.analyse(env)
+ if base_type.is_error: return base_type
+
+ import MemoryView
+
+ try:
+ axes_specs = MemoryView.get_axes_specs(env, self.axes)
+ except CompileError, e:
+ error(e.position, e.message_only)
+ self.type = PyrexTypes.ErrorType()
+ return self.type
+
+ self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs)
+ MemoryView.use_memview_util_code(env)
+ MemoryView.use_cython_array(env)
+ MemoryView.use_memview_util_code(env)
+ env.use_utility_code(MemoryView.memviewslice_declare_code)
+ return self.type
+
class CBufferAccessTypeNode(CBaseTypeNode):
# After parsing:
# positional_args [ExprNode] List of positional arguments
@@ -816,7 +841,7 @@ class CVarDefNode(StatNode):
for declarator in self.declarators:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
- if not (self.visibility == 'extern' and type.is_array):
+ if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos,
"Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
@@ -1041,7 +1066,7 @@ class FuncDefNode(StatNode, BlockNode):
return lenv
def generate_function_definitions(self, env, code):
- import Buffer
+ import Buffer, MemoryView
lenv = self.local_scope
@@ -1084,6 +1109,9 @@ class FuncDefNode(StatNode, BlockNode):
(self.return_type.declaration_code(
Naming.retval_cname),
init))
+ if self.return_type.is_memoryviewslice:
+ import MemoryView
+ MemoryView.put_init_entry(Naming.retval_cname, code)
tempvardecl_code = code.insertion_point()
self.generate_keyword_list(code)
# ----- Extern library function declarations
@@ -1107,20 +1135,26 @@ class FuncDefNode(StatNode, BlockNode):
for entry in lenv.arg_entries:
if entry.type.is_pyobject and lenv.control_flow.get_state((entry.name, 'source')) != 'arg':
code.put_var_incref(entry)
+ if entry.type.is_memoryviewslice:
+ code.put_incref("%s.memview" % entry.cname, cython_memoryview_ptr_type)
# ----- Initialise local variables
for entry in lenv.var_entries:
if entry.type.is_pyobject and entry.init_to_none and entry.used:
code.put_init_var_to_py_none(entry)
# ----- Initialise local buffer auxiliary variables
for entry in lenv.var_entries + lenv.arg_entries:
- if entry.type.is_buffer and entry.buffer_aux.buffer_info_var.used:
- code.putln("%s.buf = NULL;" % entry.buffer_aux.buffer_info_var.cname)
+ if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
+ Buffer.put_init_vars(entry, code)
+ # ----- Initialise local memoryviewslices
+ for entry in lenv.var_entries:
+ if entry.type.is_memoryviewslice:
+ MemoryView.put_init_entry(entry.cname, code)
# ----- Check and convert arguments
self.generate_argument_type_tests(code)
# ----- Acquire buffer arguments
for entry in lenv.arg_entries:
if entry.type.is_buffer:
- Buffer.put_acquire_arg_buffer(entry, code, self.pos)
+ Buffer.put_acquire_arg_buffer(entry, code, self.pos)
# ----- Function body
self.body.generate_execution_code(code)
# ----- Default return value
@@ -1201,11 +1235,16 @@ class FuncDefNode(StatNode, BlockNode):
for entry in lenv.var_entries:
if lenv.control_flow.get_state((entry.name, 'initalized')) is not True:
entry.xdecref_cleanup = 1
+ for entry in lenv.var_entries:
+ if entry.type.is_memoryviewslice and entry.used:
+ code.put_xdecref("%s.memview" % entry.cname, cython_memoryview_ptr_type)
code.put_var_decrefs(lenv.var_entries, used_only = 1)
# Decref any increfed args
for entry in lenv.arg_entries:
if entry.type.is_pyobject and lenv.control_flow.get_state((entry.name, 'source')) != 'arg':
code.put_var_decref(entry)
+ if entry.type.is_memoryviewslice:
+ code.put_decref("%s.memview" % entry.cname, cython_memoryview_ptr_type)
# code.putln("/* TODO: decref scope object */")
# ----- Return
@@ -1217,6 +1256,8 @@ class FuncDefNode(StatNode, BlockNode):
err_val = default_retval
if self.return_type.is_pyobject:
code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
+ elif self.return_type.is_memoryviewslice:
+ code.put_xgiveref(code.as_pyobject("%s.memview" % Naming.retval_cname,cython_memoryview_ptr_type))
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
@@ -1250,7 +1291,7 @@ class FuncDefNode(StatNode, BlockNode):
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
- elif not arg.type.is_complete() and not arg.type.is_array:
+ elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
@@ -1838,13 +1879,16 @@ class DefNode(FuncDefNode):
self.entry = entry
prefix = env.scope_prefix
entry.func_cname = \
- Naming.pyfunc_prefix + prefix + name
+ env.mangle(Naming.pyfunc_prefix, name)
+ # Naming.pyfunc_prefix + prefix + name
entry.pymethdef_cname = \
- Naming.pymethdef_prefix + prefix + name
+ env.mangle(Naming.pymethdef_prefix, name)
+ # Naming.pymethdef_prefix + prefix + name
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
entry.doc_cname = \
- Naming.funcdoc_prefix + prefix + name
+ env.mangle(Naming.funcdoc_prefix, name)
+ # Naming.funcdoc_prefix + prefix + name
else:
entry.doc = None
@@ -3492,14 +3536,23 @@ class ReturnStatNode(StatNode):
if self.return_type.is_pyobject:
code.put_xdecref(Naming.retval_cname,
self.return_type)
+ elif self.return_type.is_memoryviewslice:
+ code.put_xdecref("%s.memview" % Naming.retval_cname,
+ self.return_type)
+
if self.value:
self.value.generate_evaluation_code(code)
- self.value.make_owned_reference(code)
- code.putln(
- "%s = %s;" % (
- Naming.retval_cname,
- self.value.result_as(self.return_type)))
- self.value.generate_post_assignment_code(code)
+ if self.return_type.is_memoryviewslice:
+ import MemoryView
+ MemoryView.gen_acquire_memoryviewslice(self.value, self.return_type,
+ False, Naming.retval_cname, None, code)
+ else:
+ self.value.make_owned_reference(code)
+ code.putln(
+ "%s = %s;" % (
+ Naming.retval_cname,
+ self.value.result_as(self.return_type)))
+ self.value.generate_post_assignment_code(code)
self.value.free_temps(code)
else:
if self.return_type.is_pyobject:
@@ -4694,6 +4747,7 @@ class FromCImportStatNode(StatNode):
if entry:
if kind and not self.declaration_matches(entry, kind):
entry.redeclared(pos)
+ entry.used = 1
else:
if kind == 'struct' or kind == 'union':
entry = module_scope.declare_struct_or_union(name,
diff --git a/Cython/Compiler/Optimize.py b/Cython/Compiler/Optimize.py
index 989ca2d80..40b48dc5c 100644
--- a/Cython/Compiler/Optimize.py
+++ b/Cython/Compiler/Optimize.py
@@ -1860,9 +1860,9 @@ class FinalOptimizePhase(Visitor.CythonTransform):
if node.function.name == 'isinstance':
type_arg = node.args[1]
if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
- from CythonScope import utility_scope
- node.function.entry = utility_scope.lookup('PyObject_TypeCheck')
+ cython_scope = self.context.cython_scope
+ node.function.entry = cython_scope.lookup('PyObject_TypeCheck')
node.function.type = node.function.entry.type
- PyTypeObjectPtr = PyrexTypes.CPtrType(utility_scope.lookup('PyTypeObject').type)
+ PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
return node
diff --git a/Cython/Compiler/Options.py b/Cython/Compiler/Options.py
index 9fccdf74c..785ce4ad4 100644
--- a/Cython/Compiler/Options.py
+++ b/Cython/Compiler/Options.py
@@ -47,6 +47,9 @@ c_line_in_traceback = 1
# executes the body of this module.
embed = False
+# max # of dims for buffers -- set to same value as max # of dims for numpy
+# arrays.
+buffer_max_dims = 32
# Declare compiler directives
directive_defaults = {
diff --git a/Cython/Compiler/ParseTreeTransforms.py b/Cython/Compiler/ParseTreeTransforms.py
index 49762e4a8..02bd58882 100644
--- a/Cython/Compiler/ParseTreeTransforms.py
+++ b/Cython/Compiler/ParseTreeTransforms.py
@@ -978,12 +978,13 @@ class TransformBuiltinMethods(EnvTransform):
node = BoolNode(node.pos, value=True)
elif attribute == u'NULL':
node = NullNode(node.pos)
- elif not PyrexTypes.parse_basic_type(attribute):
+ elif PyrexTypes.parse_basic_type(attribute):
+ pass
+ elif not self.context.cython_scope.lookup(attribute):
error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute)
return node
def visit_SimpleCallNode(self, node):
-
# locals builtin
if isinstance(node.function, ExprNodes.NameNode):
if node.function.name == 'locals':
@@ -1045,7 +1046,10 @@ class TransformBuiltinMethods(EnvTransform):
node = binop_node(node.function.pos, '/', node.args[0], node.args[1])
node.cdivision = True
else:
- error(node.function.pos, u"'%s' not a valid cython language construct" % function)
+ entry = self.context.cython_scope.lookup(function)
+ if not entry:
+ error(node.function.pos,
+ u"'%s' not a valid cython language construct" % function)
self.visitchildren(node)
return node
diff --git a/Cython/Compiler/Parsing.py b/Cython/Compiler/Parsing.py
index d459b4b24..8bcb43fb2 100644
--- a/Cython/Compiler/Parsing.py
+++ b/Cython/Compiler/Parsing.py
@@ -250,7 +250,7 @@ def p_typecast(s):
# s.sy == "<"
pos = s.position()
s.next()
- base_type = p_c_base_type(s)
+ base_type = p_c_base_type(s, empty = 1)
if base_type.name is None:
s.error("Unknown type")
declarator = p_c_declarator(s, empty = 1)
@@ -279,7 +279,7 @@ def p_sizeof(s):
operand = p_simple_expr(s)
node = ExprNodes.SizeofVarNode(pos, operand = operand)
else:
- base_type = p_c_base_type(s)
+ base_type = p_c_base_type(s, empty = 1)
declarator = p_c_declarator(s, empty = 1)
node = ExprNodes.SizeofTypeNode(pos,
base_type = base_type, declarator = declarator)
@@ -1715,7 +1715,7 @@ def p_positional_and_keyword_args(s, end_sy_set, type_positions=(), type_keyword
s.next()
# Is keyword arg
if ident in type_keywords:
- arg = p_c_base_type(s)
+ arg = p_c_base_type(s, empty = 1)
parsed_type = True
else:
arg = p_simple_expr(s)
@@ -1728,7 +1728,7 @@ def p_positional_and_keyword_args(s, end_sy_set, type_positions=(), type_keyword
if not was_keyword:
if pos_idx in type_positions:
- arg = p_c_base_type(s)
+ arg = p_c_base_type(s, empty = 1)
parsed_type = True
else:
arg = p_simple_expr(s)
@@ -1748,13 +1748,13 @@ def p_positional_and_keyword_args(s, end_sy_set, type_positions=(), type_keyword
s.next()
return positional_args, keyword_args
-def p_c_base_type(s, self_flag = 0, nonempty = 0):
+def p_c_base_type(s, self_flag = 0, empty = 0, nonempty = 0):
# If self_flag is true, this is the base type for the
# self argument of a C method of an extension type.
if s.sy == '(':
return p_c_complex_base_type(s)
else:
- return p_c_simple_base_type(s, self_flag, nonempty = nonempty)
+ return p_c_simple_base_type(s, self_flag, nonempty = nonempty, empty = empty)
def p_calling_convention(s):
if s.sy == 'IDENT' and s.systring in calling_convention_words:
@@ -1770,13 +1770,13 @@ def p_c_complex_base_type(s):
# s.sy == '('
pos = s.position()
s.next()
- base_type = p_c_base_type(s)
+ base_type = p_c_base_type(s, empty = 1)
declarator = p_c_declarator(s, empty = 1)
s.expect(')')
return Nodes.CComplexBaseTypeNode(pos,
base_type = base_type, declarator = declarator)
-def p_c_simple_base_type(s, self_flag, nonempty):
+def p_c_simple_base_type(s, self_flag, nonempty, empty):
#print "p_c_simple_base_type: self_flag =", self_flag, nonempty
is_basic = 0
signed = 1
@@ -1835,17 +1835,86 @@ def p_c_simple_base_type(s, self_flag, nonempty):
is_self_arg = self_flag)
- # Treat trailing [] on type as buffer access if it appears in a context
- # where declarator names are required (so that it cannot mean int[] or
- # sizeof(int[SIZE]))...
+ # Trailing [] on type could mean one of 3 things, depending on the values
+ # of empty and nonempty.
#
- # (This means that buffers cannot occur where there can be empty declarators,
- # which is an ok restriction to make.)
- if nonempty and s.sy == '[':
- return p_buffer_access(s, type_node)
+ # empty == True and nonempty == False:
+ # e.g., sizeof(int[SIZE]). The only bracketed type that can appear here
+ # is an anonymous C array.
+ # empty == False and nonempty == True:
+ # e.g., declaration of a buffer/memoryview slice array. no anonymous
+ # brackted C arrays allowed, so only buffer declarations/memoryview
+ # declarations here.
+ # empty == False and nonempty == False:
+ # We disallow buffer declarations in this case. Only anonymous C arrays
+ # and memoryview slice arrays are possible here. Memoryview arrays are
+ # distinguished by an explicit colon in the first axis declaration.
+ # empty == True and nonempty == True:
+ # obviously illegal.
+
+ if s.sy == '[':
+ return p_bracketed_base_type(s, type_node, nonempty, empty)
else:
return type_node
+
+def p_bracketed_base_type(s, base_type_node, nonempty, empty):
+ # s.sy == '['
+ if empty and not nonempty:
+ # sizeof-like thing. Only anonymous C arrays allowed (int[SIZE]).
+ return base_type_node
+ elif not empty and nonempty:
+ # declaration of either memoryview slice or buffer.
+ if is_memoryviewslice_access(s):
+ return p_memoryviewslice_access(s, base_type_node)
+ else:
+ return p_buffer_access(s, base_type_node)
+ elif not empty and not nonempty:
+ # only anonymous C arrays and memoryview slice arrays here. We
+ # disallow buffer declarations for now, due to ambiguity with anonymous
+ # C arrays.
+ if is_memoryviewslice_access(s):
+ return p_memoryviewslice_access(s, base_type_node)
+ else:
+ return base_type_node
+
+def is_memoryviewslice_access(s):
+ # s.sy == '['
+ # a memoryview slice declaration is distinguishable from a buffer access
+ # declaration by the first entry in the bracketed list. The buffer will
+ # not have an unnested colon in the first entry; the memoryview slice will.
+ saved = [(s.sy, s.systring)]
+ s.next()
+ retval = False
+ if s.systring == ':':
+ retval = True
+ elif s.sy == 'INT':
+ saved.append((s.sy, s.systring))
+ s.next()
+ if s.sy == ':':
+ retval = True
+
+ for sv in reversed(saved):
+ s.put_back(*sv)
+
+ return retval
+
+def p_memoryviewslice_access(s, base_type_node):
+ # s.sy == '['
+ pos = s.position()
+ s.next()
+ subscripts = p_subscript_list(s)
+ # make sure each entry in subscripts is a slice
+ for subscript in subscripts:
+ if len(subscript) < 2:
+ s.error("An axis specification in memoryview declaration does not have a ':'.")
+ s.expect(']')
+ indexes = make_slice_nodes(pos, subscripts)
+ result = Nodes.MemoryViewSliceTypeNode(pos,
+ base_type_node = base_type_node,
+ axes = indexes)
+ return result
+
def p_buffer_access(s, base_type_node):
# s.sy == '['
pos = s.position()
@@ -2652,4 +2721,3 @@ def print_parse_tree(f, node, level, key = None):
f.write("%s]\n" % ind)
return
f.write("%s%s\n" % (ind, node))
-
diff --git a/Cython/Compiler/Pipeline.py b/Cython/Compiler/Pipeline.py
new file mode 100644
index 000000000..b6f8dfe7b
--- /dev/null
+++ b/Cython/Compiler/Pipeline.py
@@ -0,0 +1,218 @@
+from Errors import PyrexError, CompileError, InternalError, error
+import Errors
+import DebugFlags
+from Visitor import CythonTransform
+
+#
+# Really small pipeline stages.
+#
+def dumptree(t):
+ # For quick debugging in pipelines
+ print t.dump()
+ return t
+
+def abort_on_errors(node):
+ # Stop the pipeline if there are any errors.
+ if Errors.num_errors != 0:
+ raise InternalError, "abort"
+ return node
+
+def parse_stage_factory(context):
+ def parse(compsrc):
+ source_desc = compsrc.source_desc
+ full_module_name = compsrc.full_module_name
+ initial_pos = (source_desc, 1, 0)
+ scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0)
+ tree = context.parse(source_desc, scope, pxd = 0, full_module_name = full_module_name)
+ tree.compilation_source = compsrc
+ tree.scope = scope
+ tree.is_pxd = False
+ return tree
+ return parse
+
+def parse_pxd_stage_factory(context, scope, module_name):
+ def parse(source_desc):
+ tree = context.parse(source_desc, scope, pxd=True,
+ full_module_name=module_name)
+ tree.scope = scope
+ tree.is_pxd = True
+ return tree
+ return parse
+
+def generate_pyx_code_stage_factory(options, result):
+ def generate_pyx_code_stage(module_node):
+ module_node.process_implementation(options, result)
+ result.compilation_source = module_node.compilation_source
+ return result
+ return generate_pyx_code_stage
+
+def inject_pxd_code_stage_factory(context):
+ def inject_pxd_code_stage(module_node):
+ from textwrap import dedent
+ stats = module_node.body.stats
+ for name, (statlistnode, scope) in context.pxds.iteritems():
+ module_node.merge_in(statlistnode, scope)
+ return module_node
+ return inject_pxd_code_stage
+
+def inject_utility_code_stage_factory(context):
+ def inject_utility_code_stage(module_node):
+ added = []
+ # Note: the list might be extended inside the loop (if some utility code
+ # pulls in other utility code, explicitly or implicitly)
+ for utilcode in module_node.scope.utility_code_list:
+ if utilcode in added: continue
+ added.append(utilcode)
+ if utilcode.requires:
+ for dep in utilcode.requires:
+ if not dep in added and not dep in module_node.scope.utility_code_list:
+ module_node.scope.utility_code_list.append(dep)
+ tree = utilcode.get_tree()
+ if tree:
+ module_node.merge_in(tree.body, tree.scope, merge_scope=True)
+ return module_node
+ return inject_utility_code_stage
+
+class UseUtilityCodeDefinitions(CythonTransform):
+ # Temporary hack to use any utility code in nodes' "utility_code_definitions".
+ # This should be moved to the code generation phase of the relevant nodes once
+ # it is safe to generate CythonUtilityCode at code generation time.
+ def __call__(self, node):
+ self.scope = node.scope
+ return super(UseUtilityCodeDefinitions, self).__call__(node)
+
+ def visit_AttributeNode(self, node):
+ if node.entry and node.entry.utility_code_definition:
+ self.scope.use_utility_code(node.entry.utility_code_definition)
+ return node
+
+ def visit_NameNode(self, node):
+ for e in (node.entry, node.type_entry):
+ if e and e.utility_code_definition:
+ self.scope.use_utility_code(e.utility_code_definition)
+ return node
+
+#
+# Pipeline factories
+#
+
+def create_pipeline(context, mode):
+ assert mode in ('pyx', 'py', 'pxd', 'utility_code')
+ # utility_code should behave like pyx in most instances,
+ # but not for e.g. AutoTestDictTransform
+
+ from Visitor import PrintTree
+ from ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
+ from ParseTreeTransforms import AnalyseDeclarationsTransform, AnalyseExpressionsTransform
+ from ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
+ from ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods
+ from ParseTreeTransforms import AlignFunctionDefinitions, GilCheck
+ from TypeInference import MarkAssignments
+ from AnalysedTreeTransforms import AutoTestDictTransform
+ from AutoDocTransforms import EmbedSignature
+ from Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
+ from Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
+ from Optimize import ConstantFolding, FinalOptimizePhase
+ from Optimize import DropRefcountingTransform
+ from Buffer import IntroduceBufferAuxiliaryVars
+ from ModuleNode import check_c_declarations, check_c_declarations_pxd
+
+ # NOTE: This is the "common" parts of the pipeline, which is also
+ # used e.g. for dealing with utility code written in Cython, or
+ # code in pxd files. So it will be run multiple times in a
+ # compilation stage.
+
+ # TODO: Make the code prettier
+ pipeline = [
+ NormalizeTree(context),
+ PostParse(context)]
+ if mode == 'pxd':
+ pipeline.append(PxdPostParse(context))
+ pipeline += [
+ InterpretCompilerDirectives(context, context.compiler_directives)]
+ if mode == 'py':
+ pipeline.append(AlignFunctionDefinitions(context))
+ pipeline += [
+ ConstantFolding(),
+ FlattenInListTransform(),
+ WithTransform(context),
+ DecoratorTransform(context),
+ AnalyseDeclarationsTransform(context)]
+ if mode in ('py', 'pyx'):
+ pipeline.append(AutoTestDictTransform(context))
+ pipeline += [
+ EmbedSignature(context),
+ EarlyReplaceBuiltinCalls(context),
+ MarkAssignments(context),
+ TransformBuiltinMethods(context),
+ IntroduceBufferAuxiliaryVars(context)]
+ if mode == 'pxd':
+ pipeline.append(check_c_declarations_pxd)
+ else:
+ pipeline.append(check_c_declarations)
+ pipeline += [
+ AnalyseExpressionsTransform(context),
+ OptimizeBuiltinCalls(context),
+ IterationTransform(),
+ SwitchTransform(),
+ DropRefcountingTransform(),
+ FinalOptimizePhase(context),
+ GilCheck(),
+ UseUtilityCodeDefinitions(context)]
+ return pipeline
+
+def create_pyx_pipeline(context, options, result, py=False):
+ if py:
+ mode = 'py'
+ else:
+ mode = 'pyx'
+ test_support = []
+ if options.evaluate_tree_assertions:
+ from Cython.TestUtils import TreeAssertVisitor
+ test_support.append(TreeAssertVisitor())
+ return ([
+ parse_stage_factory(context),
+ ] + create_pipeline(context, mode) + test_support + [
+ inject_pxd_code_stage_factory(context),
+ inject_utility_code_stage_factory(context),
+ abort_on_errors,
+ generate_pyx_code_stage_factory(options, result),
+ ])
+
+def create_pxd_pipeline(context, scope, module_name):
+ from CodeGeneration import ExtractPxdCode
+
+ # The pxd pipeline ends up with a CCodeWriter containing the
+ # code of the pxd, as well as a pxd scope.
+ return [
+ parse_pxd_stage_factory(context, scope, module_name)
+ ] + create_pipeline(context, 'pxd') + [
+ ExtractPxdCode(context)
+ ]
+
+def create_py_pipeline(context, options, result):
+ return create_pyx_pipeline(context, options, result, py=True)
+
+#
+# Running a pipeline
+#
+def run_pipeline(pipeline, source):
+ error = None
+ data = source
+ try:
+ for phase in pipeline:
+ if phase is not None:
+ if DebugFlags.debug_verbose_pipeline:
+ print "Entering pipeline phase %r" % phase
+ data = phase(data)
+ except CompileError, err:
+ # err is set
+ Errors.report_error(err)
+ error = err
+ except InternalError, err:
+ # Only raise if there was not an earlier error
+ if Errors.num_errors == 0:
+ raise
+ error = err
+ return (error, data)
+
diff --git a/Cython/Compiler/PyrexTypes.py b/Cython/Compiler/PyrexTypes.py
index 6d7d608aa..f92856242 100644
--- a/Cython/Compiler/PyrexTypes.py
+++ b/Cython/Compiler/PyrexTypes.py
@@ -26,6 +26,7 @@ class BaseType(object):
else:
return base_code
+
class PyrexType(BaseType):
#
# Base class for all Pyrex types.
@@ -101,6 +102,7 @@ class PyrexType(BaseType):
is_returncode = 0
is_error = 0
is_buffer = 0
+ is_memoryviewslice = 0
has_attributes = 0
default_value = ""
pymemberdef_typecode = None
@@ -153,6 +155,13 @@ class PyrexType(BaseType):
# type information of the struct.
return 1
+ def global_init_code(self, entry, code):
+ # abstract
+ pass
+
+ def needs_nonecheck(self):
+ return 0
+
def create_typedef_type(cname, base_type, is_external=0):
if base_type.is_complex:
@@ -294,6 +303,171 @@ class CTypedefType(BaseType):
def __getattr__(self, name):
return getattr(self.typedef_base_type, name)
+class MemoryViewSliceType(PyrexType):
+
+ is_memoryviewslice = 1
+
+ has_attributes = 1
+ scope = None
+
+ def __init__(self, base_dtype, axes):
+ '''
+ MemoryViewSliceType(base, axes)
+
+ Base is the C base type; axes is a list of (access, packing) strings,
+ where access is one of 'full', 'direct' or 'ptr' and packing is one of
+ 'contig', 'strided' or 'follow'. There is one (access, packing) tuple
+ for each dimension.
+
+ the access specifiers determine whether the array data contains
+ pointers that need to be dereferenced along that axis when
+ retrieving/setting:
+
+ 'direct' -- No pointers stored in this dimension.
+ 'ptr' -- Pointer stored in this dimension.
+ 'full' -- Check along this dimension, don't assume either.
+
+ the packing specifiers specify how the array elements are layed-out
+ in memory.
+
+ 'contig' -- The data are contiguous in memory along this dimension.
+ At most one dimension may be specified as 'contig'.
+ 'strided' -- The data aren't contiguous along this dimenison.
+ 'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension
+ has its stride automatically computed from extents of the other
+ dimensions to ensure C or Fortran memory layout.
+
+ C-contiguous memory has 'direct' as the access spec, 'contig' as the
+ *last* axis' packing spec and 'follow' for all other packing specs.
+
+ Fortran-contiguous memory has 'direct' as the access spec, 'contig' as
+ the *first* axis' packing spec and 'follow' for all other packing
+ specs.
+ '''
+
+ self.dtype = base_dtype
+ self.axes = axes
+
+ import MemoryView
+ self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes)
+ assert not (self.is_c_contig and self.is_f_contig)
+
+ def same_as_resolved_type(self, other_type):
+ return ((other_type.is_memoryviewslice and
+ self.dtype.same_as(other_type.dtype) and
+ self.axes == other_type.axes) or
+ other_type is error_type)
+
+ def needs_nonecheck(self):
+ return True
+
+ def is_complete(self):
+ # incomplete since the underlying struct doesn't have a cython.memoryview object.
+ return 0
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ # XXX: we put these guards in for now...
+ assert not pyrex
+ assert not dll_linkage
+ import MemoryView
+ return self.base_declaration_code(
+ MemoryView.memviewslice_cname,
+ entity_code)
+
+ def attributes_known(self):
+ if self.scope is None:
+
+ import Symtab, MemoryView, Options
+ from MemoryView import axes_to_str
+
+ self.scope = scope = Symtab.CClassScope(
+ 'mvs_class_'+self.specialization_suffix(),
+ None,
+ visibility='extern')
+
+ scope.parent_type = self
+
+ scope.declare_var('_data', c_char_ptr_type, None, cname='data', is_cdef=1)
+
+ scope.declare_var('shape',
+ c_array_type(c_py_ssize_t_type,
+ Options.buffer_max_dims),
+ None,
+ cname='shape',
+ is_cdef=1)
+
+ scope.declare_var('strides',
+ c_array_type(c_py_ssize_t_type,
+ Options.buffer_max_dims),
+ None,
+ cname='strides',
+ is_cdef=1)
+
+ scope.declare_var('suboffsets',
+ c_array_type(c_py_ssize_t_type,
+ Options.buffer_max_dims),
+ None,
+ cname='suboffsets',
+ is_cdef=1)
+
+ mangle_dtype = MemoryView.mangle_dtype_name(self.dtype)
+ ndim = len(self.axes)
+
+ to_axes_c = [('direct', 'contig')]
+ to_axes_f = [('direct', 'contig')]
+ if ndim-1:
+ to_axes_c = [('direct', 'follow')]*(ndim-1) + to_axes_c
+ to_axes_f = to_axes_f + [('direct', 'follow')]*(ndim-1)
+
+ to_memview_c = MemoryViewSliceType(self.dtype, to_axes_c)
+ to_memview_f = MemoryViewSliceType(self.dtype, to_axes_f)
+
+ cython_name_c, cython_name_f = "copy", "copy_fortran"
+ copy_name_c, copy_name_f = (
+ MemoryView.get_copy_func_name(to_memview_c),
+ MemoryView.get_copy_func_name(to_memview_f))
+
+
+ for (to_memview, cython_name, copy_name) in ((to_memview_c, cython_name_c, copy_name_c),
+ (to_memview_f, cython_name_f, copy_name_f)):
+
+ entry = scope.declare_cfunction(cython_name,
+ CFuncType(self,
+ [CFuncTypeArg("memviewslice", self, None)]),
+ pos = None,
+ defining = 1,
+ cname = copy_name)
+
+ entry.utility_code_definition = \
+ MemoryView.CopyFuncUtilCode(self, to_memview)
+
+ # is_c_contig and is_f_contig functions
+ for (c_or_f, cython_name) in (('c', 'is_c_contig'), ('fortran', 'is_f_contig')):
+
+ is_contig_name = \
+ MemoryView.get_is_contig_func_name(c_or_f)
+
+ entry = scope.declare_cfunction(cython_name,
+ CFuncType(c_int_type,
+ [CFuncTypeArg("memviewslice", self, None)]),
+ pos = None,
+ defining = 1,
+ cname = is_contig_name)
+
+ entry.utility_code_definition = \
+ MemoryView.IsContigFuncUtilCode(c_or_f)
+
+ return True
+
+ def specialization_suffix(self):
+ import MemoryView
+ return MemoryView.axes_to_str(self.axes) + '_' + MemoryView.mangle_dtype_name(self.dtype)
+
+ def global_init_code(self, entry, code):
+ code.putln("%s.data = NULL;" % entry.cname)
+ code.put_init_to_py_none("%s.memview" % entry.cname, cython_memoryview_ptr_type, nanny=False)
+
class BufferType(BaseType):
#
# Delegates most attribute
@@ -374,6 +548,9 @@ class PyObjectType(PyrexType):
else:
return cname
+ def global_init_code(self, entry, code):
+ code.put_init_var_to_py_none(entry, nanny=False)
+
class BuiltinObjectType(PyObjectType):
is_builtin_type = 1
@@ -471,9 +648,12 @@ class PyExtensionType(PyObjectType):
is_extension_type = 1
has_attributes = 1
-
+
objtypedef_cname = None
+ def needs_nonecheck(self):
+ return True
+
def __init__(self, name, typedef_flag, base_type, is_external=0):
self.name = name
self.scope = None
@@ -1598,6 +1778,9 @@ class CFuncTypeArg(object):
return self.type.declaration_code(self.cname, for_display)
class StructUtilityCode(object):
+
+ requires = None
+
def __init__(self, type, forward_decl):
self.type = type
self.header = "static PyObject* %s(%s)" % (type.to_py_function, type.declaration_code('s'))
@@ -1607,6 +1790,9 @@ class StructUtilityCode(object):
return isinstance(other, StructUtilityCode) and self.header == other.header
def __hash__(self):
return hash(self.header)
+
+ def get_tree(self):
+ pass
def put_code(self, output):
code = output['utility_code_def']
@@ -1932,6 +2118,22 @@ c_anon_enum_type = CAnonEnumType(-1, 1)
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
+# buffer-related structs
+c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct",
+ None, 1, "__Pyx_Buf_DimInfo")
+c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer")
+c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type)
+c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct",
+ None, 1, "__Pyx_LocalBuf_ND")
+
+cython_memoryview_type = CStructOrUnionType("__pyx_obj_memoryview", "struct",
+ None, 0, "__pyx_obj_memoryview")
+
+cython_memoryview_ptr_type = CPtrType(cython_memoryview_type)
+
+memoryviewslice_type = CStructOrUnionType("__Pyx_memviewslice", "struct",
+ None, 1, "__Pyx_memviewslice")
+
error_type = ErrorType()
unspecified_type = UnspecifiedType()
diff --git a/Cython/Compiler/Symtab.py b/Cython/Compiler/Symtab.py
index c8fb01c7f..a2a5dcce8 100644
--- a/Cython/Compiler/Symtab.py
+++ b/Cython/Compiler/Symtab.py
@@ -44,12 +44,9 @@ def c_safe_identifier(cname):
class BufferAux(object):
writable_needed = False
- def __init__(self, buffer_info_var, stridevars, shapevars,
- suboffsetvars):
- self.buffer_info_var = buffer_info_var
- self.stridevars = stridevars
- self.shapevars = shapevars
- self.suboffsetvars = suboffsetvars
+ def __init__(self, buflocal_nd_var, rcbuf_var):
+ self.buflocal_nd_var = buflocal_nd_var
+ self.rcbuf_var = rcbuf_var
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
@@ -118,6 +115,11 @@ class Entry(object):
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
# Ideally this should not be necesarry.
# assignments [ExprNode] List of expressions that get assigned to this entry.
+ # utility_code_definition For some Cython builtins, the utility code
+ # which contains the definition of the entry.
+ # Currently only supported for CythonScope entries.
+
+ # TODO: utility_code and utility_code_definition serves the same purpose...
inline_func_in_pxd = False
borrowed = 0
@@ -165,6 +167,7 @@ class Entry(object):
is_overridable = 0
buffer_aux = None
prev_entry = None
+ utility_code_definition = None
def __init__(self, name, cname, type, pos = None, init = None):
self.name = name
@@ -194,14 +197,12 @@ class Scope(object):
# cfunc_entries [Entry] C function entries
# c_class_entries [Entry] All extension type entries
# cname_to_entry {string : Entry} Temp cname to entry mapping
- # int_to_entry {int : Entry} Temp cname to entry mapping
# return_type PyrexType or None Return type of function owning scope
# is_py_class_scope boolean Is a Python class scope
# is_c_class_scope boolean Is an extension type scope
# scope_prefix string Disambiguator for C names
# in_cinclude boolean Suppress C declaration code
# qualified_name string "modname" or "modname.classname"
- # pystring_entries [Entry] String const entries newly used as
# Python strings in this scope
# control_flow ControlFlow Used for keeping track of environment state
# nogil boolean In a nogil section
@@ -245,11 +246,23 @@ class Scope(object):
self.identifier_to_entry = {}
self.num_to_entry = {}
self.obj_to_entry = {}
- self.pystring_entries = []
self.buffer_entries = []
self.control_flow = ControlFlow.LinearControlFlow()
self.return_type = None
-
+
+ def merge_in(self, other):
+ # Use with care...
+ self.entries.update(other.entries)
+ for x in ('const_entries',
+ 'type_entries',
+ 'sue_entries',
+ 'arg_entries',
+ 'var_entries',
+ 'pyfunc_entries',
+ 'cfunc_entries',
+ 'c_class_entries'):
+ getattr(self, x).extend(getattr(other, x))
+
def start_branching(self, pos):
self.control_flow = self.control_flow.start_branch(pos)
@@ -688,9 +701,13 @@ class ModuleScope(Scope):
is_module_scope = 1
has_import_star = 0
- def __init__(self, name, parent_module, context):
+ def __init__(self, name, parent_module, context, no_outer_scope=False):
self.parent_module = parent_module
- outer_scope = context.find_submodule("__builtin__")
+ if not no_outer_scope:
+ outer_scope = context.find_submodule("__builtin__")
+ self.type_names = dict(outer_scope.type_names)
+ else:
+ outer_scope = None
Scope.__init__(self, name, outer_scope, parent_module)
if name != "__init__":
self.module_name = name
@@ -708,7 +725,6 @@ class ModuleScope(Scope):
self.module_entries = {}
self.python_include_files = ["Python.h", "structmember.h"]
self.include_files = []
- self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
self.types_imported = {}
@@ -853,7 +869,7 @@ class ModuleScope(Scope):
def declare_c_class(self, name, pos, defining = 0, implementing = 0,
module_name = None, base_type = None, objstruct_cname = None,
- typeobj_cname = None, visibility = 'private', typedef_flag = 0, api = 0,
+ typeobj_cname = None, typeptr_cname = None, visibility = 'private', typedef_flag = 0, api = 0,
buffer_defaults = None):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
@@ -896,7 +912,10 @@ class ModuleScope(Scope):
type.module_name = module_name
else:
type.module_name = self.qualified_name
- type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
+ if typeptr_cname:
+ type.typeptr_cname = typeptr_cname
+ else:
+ type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
entry = self.declare_type(name, type, pos, visibility = visibility,
defining = 0)
entry.is_cclass = True
diff --git a/Cython/Compiler/Tests/TestBuffer.py b/Cython/Compiler/Tests/TestBuffer.py
index b819c91bb..28f0fd270 100644
--- a/Cython/Compiler/Tests/TestBuffer.py
+++ b/Cython/Compiler/Tests/TestBuffer.py
@@ -106,3 +106,8 @@ class TestBufferOptions(CythonTest):
self.assert_(stats[1].base_type.ndim == 3)
# add exotic and impossible combinations as they come along...
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
+
diff --git a/Cython/Compiler/Tests/TestMemView.py b/Cython/Compiler/Tests/TestMemView.py
new file mode 100644
index 000000000..7b61b30c4
--- /dev/null
+++ b/Cython/Compiler/Tests/TestMemView.py
@@ -0,0 +1,71 @@
+from Cython.TestUtils import CythonTest
+import Cython.Compiler.Errors as Errors
+from Cython.Compiler.Nodes import *
+from Cython.Compiler.ParseTreeTransforms import *
+from Cython.Compiler.Buffer import *
+
+
+class TestMemviewParsing(CythonTest):
+
+ def parse(self, s):
+ return self.should_not_fail(lambda: self.fragment(s)).root
+
+ def not_parseable(self, expected_error, s):
+ e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
+ self.assertEqual(expected_error, e.message_only)
+
+ def test_default_1dim(self):
+ self.parse(u"cdef int[:] x")
+ self.parse(u"cdef short int[:] x")
+
+ def test_default_ndim(self):
+ self.parse(u"cdef int[:,:,:,:,:] x")
+ self.parse(u"cdef unsigned long int[:,:,:,:,:] x")
+ self.parse(u"cdef unsigned int[:,:,:,:,:] x")
+
+ def test_zero_offset(self):
+ self.parse(u"cdef long double[0:] x")
+ self.parse(u"cdef int[0:] x")
+
+ def test_zero_offset_ndim(self):
+ self.parse(u"cdef int[0:,0:,0:,0:] x")
+
+ def test_def_arg(self):
+ self.parse(u"def foo(int[:,:] x): pass")
+
+ def test_cdef_arg(self):
+ self.parse(u"cdef foo(int[:,:] x): pass")
+
+ def test_general_slice(self):
+ self.parse(u'cdef float[::ptr, ::direct & contig, 0::full & strided] x')
+
+ def test_non_slice_memview(self):
+ self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
+ u"cdef double[:foo, bar] x")
+ self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
+ u"cdef double[0:foo, bar] x")
+
+ def test_basic(self):
+ t = self.parse(u"cdef int[:] x")
+ memv_node = t.stats[0].base_type
+ self.assert_(isinstance(memv_node, MemoryViewSliceTypeNode))
+
+ # we also test other similar declarations (buffers, anonymous C arrays)
+ # since the parsing has to distinguish between them.
+
+ def test_no_buf_arg(self):
+ self.not_parseable(u"Expected ']'",
+ u"cdef extern foo(object[int, ndim=2])")
+
+ def test_parse_sizeof(self):
+ self.parse(u"sizeof(int[NN])")
+ self.parse(u"sizeof(int[])")
+ self.parse(u"sizeof(int[][NN])")
+ self.not_parseable(u"Expected an identifier or literal",
+ u"sizeof(int[:NN])")
+ self.not_parseable(u"Expected ']'",
+ u"sizeof(foo[dtype=bar]")
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff --git a/Cython/Compiler/TreeFragment.py b/Cython/Compiler/TreeFragment.py
index 66feaf09e..b203961fe 100644
--- a/Cython/Compiler/TreeFragment.py
+++ b/Cython/Compiler/TreeFragment.py
@@ -20,7 +20,8 @@ Support for parsing strings into code trees.
"""
class StringParseContext(Main.Context):
- def __init__(self, include_directories, name):
+ def __init__(self, name, include_directories=None):
+ if include_directories is None: include_directories = []
Main.Context.__init__(self, include_directories, {})
self.module_name = name
@@ -29,7 +30,8 @@ class StringParseContext(Main.Context):
raise AssertionError("Not yet supporting any cimports/includes from string code snippets")
return ModuleScope(module_name, parent_module = None, context = self)
-def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None):
+def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None,
+ context=None):
"""
Utility method to parse a (unicode) string of code. This is mostly
used for internal Cython compiler purposes (creating code snippets
@@ -37,8 +39,14 @@ def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None):
code - a unicode string containing Cython (module-level) code
name - a descriptive name for the code source (to use in error messages etc.)
- """
+ RETURNS
+
+ The tree, i.e. a ModuleNode. The ModuleNode's scope attribute is
+ set to the scope used when parsing.
+ """
+ if context is None:
+ context = StringParseContext(name)
# Since source files carry an encoding, it makes sense in this context
# to use a unicode string so that code fragments don't have to bother
# with encoding. This means that test code passed in should not have an
@@ -51,7 +59,6 @@ def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None):
initial_pos = (name, 1, 0)
code_source = StringSourceDescriptor(name, code)
- context = StringParseContext([], name)
scope = context.find_module(module_name, pos = initial_pos, need_pxd = 0)
buf = StringIO(code)
@@ -62,6 +69,7 @@ def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None):
tree = Parsing.p_module(scanner, 0, module_name)
else:
tree = Parsing.p_code(scanner, level=level)
+ tree.scope = scope
return tree
class TreeCopier(VisitorTransform):
diff --git a/Cython/Compiler/UtilityCode.py b/Cython/Compiler/UtilityCode.py
new file mode 100644
index 000000000..e64f17d39
--- /dev/null
+++ b/Cython/Compiler/UtilityCode.py
@@ -0,0 +1,65 @@
+from TreeFragment import parse_from_strings, StringParseContext
+from Scanning import StringSourceDescriptor
+import Symtab
+import Naming
+
+class NonManglingModuleScope(Symtab.ModuleScope):
+
+ def __init__(self, prefix, *args, **kw):
+ self.prefix = prefix
+ Symtab.ModuleScope.__init__(self, *args, **kw)
+
+ def mangle(self, prefix, name=None):
+ if name:
+ if prefix in (Naming.typeobj_prefix, Naming.func_prefix, Naming.var_prefix, Naming.pyfunc_prefix):
+ # Functions, classes etc. gets a manually defined prefix easily
+ # manually callable instead (the one passed to CythonUtilityCode)
+ prefix = self.prefix
+ result = "%s%s" % (prefix, name)
+ else:
+ result = Symtab.ModuleScope.mangle(self, prefix)
+ return result
+
+class CythonUtilityCodeContext(StringParseContext):
+ scope = None
+
+ def find_module(self, module_name, relative_to = None, pos = None, need_pxd = 1):
+ if module_name != self.module_name:
+ raise AssertionError("Not yet supporting any cimports/includes from string code snippets")
+ if self.scope is None:
+ self.scope = NonManglingModuleScope(self.prefix,
+ module_name, parent_module = None, context = self)
+ return self.scope
+
+class CythonUtilityCode:
+ """
+ Utility code written in the Cython language itself.
+ """
+
+ def __init__(self, pyx, name="__pyxutil", prefix="", requires=None):
+ # 1) We need to delay the parsing/processing, so that all modules can be
+ # imported without import loops
+ # 2) The same utility code object can be used for multiple source files;
+ # while the generated node trees can be altered in the compilation of a
+ # single file.
+ # Hence, delay any processing until later.
+ self.pyx = pyx
+ self.name = name
+ self.prefix = prefix
+ self.requires = requires
+
+ def get_tree(self):
+ import Pipeline
+ context = CythonUtilityCodeContext(self.name)
+ context.prefix = self.prefix
+ tree = parse_from_strings(self.name, self.pyx, context=context)
+ pipeline = Pipeline.create_pipeline(context, 'utility_code')
+ (err, tree) = Pipeline.run_pipeline(pipeline, tree)
+ assert not err
+ return tree
+
+ def put_code(self, output):
+ pass
+
+
+
diff --git a/tests/compile/memview_declaration.pyx b/tests/compile/memview_declaration.pyx
new file mode 100644
index 000000000..68b99b89b
--- /dev/null
+++ b/tests/compile/memview_declaration.pyx
@@ -0,0 +1,17 @@
+cimport cython
+from cython.view cimport contig as foo, full as bar, follow
+from cython cimport view
+
+cdef char[:] one_dim
+cdef char[:,:,:] three_dim
+cdef unsigned int[::1, :] view1
+cdef unsigned int[:, ::1] view2
+cdef long long[::1, :, :, :] fort_contig
+cdef unsigned long[:, :, :, ::1] c_contig
+cdef unsigned short int[::1] c_and_fort
+cdef long long[0x0::0x1, 00:, -0 :,0 :] fort_contig0
+cdef unsigned long[0:, 0:, 0:, 0::0x0001] c_contig0
+
+cdef float[::foo & bar, ::cython.view.direct & cython.view.follow] view4
+cdef int[::view.full & foo] view3
+cdef int[::view.ptr & view.strided] view1000
diff --git a/tests/errors/e_bufaccess.pyx b/tests/errors/e_bufaccess.pyx
index d541103a4..9ae68c911 100644
--- a/tests/errors/e_bufaccess.pyx
+++ b/tests/errors/e_bufaccess.pyx
@@ -12,8 +12,8 @@ def f():
cdef object[int, 2, well] buf6
_ERRORS = u"""
-1:11: Buffer types only allowed as function local variables
-3:15: Buffer types only allowed as function local variables
+1:12: Buffer types only allowed as function local variables
+3:16: Buffer types only allowed as function local variables
6:27: "fakeoption" is not a buffer option
"""
#TODO:
diff --git a/tests/errors/memview_declarations.pyx b/tests/errors/memview_declarations.pyx
new file mode 100644
index 000000000..168da0816
--- /dev/null
+++ b/tests/errors/memview_declarations.pyx
@@ -0,0 +1,47 @@
+cimport cython
+from cython.view cimport contig as foo, full as bar, follow
+from cython cimport view
+
+biz = cython.view.contig
+foz = cython.view.full
+
+adict = {'view': cython.view}
+alist = [adict]
+
+cdef signed short[::1, ::1] both
+cdef signed short[::1, :, :, ::1] both2
+cdef signed char[::2] err0
+cdef signed char[::-100] err1
+cdef signed char[::-1] err2
+cdef long long[01::1, 0x01:, '0' :, False:] fort_contig0
+cdef signed char[1::] bad_start
+cdef unsigned long[:,:1] bad_stop
+cdef unsigned long[:,::1,:] neither_c_or_f
+cdef signed char[::1, ::view.follow & view.direct] bad_f_contig
+cdef signed char[::1, ::view.follow] bad_f_contig2
+cdef signed char[::view.contig | view.direct] not_ampersand
+cdef signed char[::view.ptr & view.direct] no_access_spec
+cdef signed char[::1-1+1] expr_spec
+cdef signed char[::blargh] bad_name
+cdef double[::alist[0]['view'].full] expr_attribute
+cdef double[::view.ptr & view.follow] no_single_follow
+
+_ERRORS = u'''
+11:25: Cannot specify an array that is both C and Fortran contiguous.
+12:31: Cannot specify an array that is both C and Fortran contiguous.
+13:19: Only the value 1 (one) or valid axis specification allowed in the step slot.
+14:20: Only the value 1 (one) or valid axis specification allowed in the step slot.
+15:20: Only the value 1 (one) or valid axis specification allowed in the step slot.
+16:17: there must be nothing or the value 0 (zero) in the start slot.
+17:18: there must be nothing or the value 0 (zero) in the start slot.
+18:22: Axis specification only allowed in the 'stop' slot.
+19:23: The value 1 (one) may appear in the first or last axis specification only.
+20:36: Invalid axis specification for a C/Fortran contiguous array.
+21:28: Invalid axis specification for a C/Fortran contiguous array.
+22:31: Invalid operator, only an ampersand '&' is allowed.
+23:28: Invalid axis specification.
+24:22: Invalid axis specification.
+25:25: Invalid axis specification.
+26:22: no expressions allowed in axis spec, only names (e.g. cython.view.contig).
+27:12: Invalid use of the follow specifier.
+'''
diff --git a/tests/run/cymemoryview.pyx b/tests/run/cymemoryview.pyx
new file mode 100644
index 000000000..67f36d0f6
--- /dev/null
+++ b/tests/run/cymemoryview.pyx
@@ -0,0 +1,15 @@
+u'''
+>>> f()
+>>> g()
+'''
+
+# from cython.view cimport memoryview
+from cython cimport array, PyBUF_C_CONTIGUOUS
+
+def f():
+ pass
+ # cdef array arr = array(shape=(10,10), itemsize=sizeof(int), format='i')
+ # cdef memoryview mv = memoryview(arr, PyBUF_C_CONTIGUOUS)
+def g():
+ # cdef int[::1] mview = array((10,), itemsize=sizeof(int), format='i')
+ cdef int[::1] mview = array((10,), itemsize=sizeof(int), format='i')
diff --git a/tests/run/cythonarray.pyx b/tests/run/cythonarray.pyx
new file mode 100644
index 000000000..9cea262bd
--- /dev/null
+++ b/tests/run/cythonarray.pyx
@@ -0,0 +1,58 @@
+__test__ = {}
+
+def testcase(func):
+ __test__[func.__name__] = func.__doc__
+ return func
+
+# from cython cimport array
+# cimport cython.array as array
+cimport cython as cy
+# array = cython.array
+
+@testcase
+def contiguity():
+ u'''
+ >>> contiguity()
+ 12 4
+ 2 3
+ 2
+ 4 8
+ 2 3
+ 2
+ '''
+ cdef cy.array cvarray = cy.array(shape=(2,3), itemsize=sizeof(int), format="i", mode='c')
+ assert cvarray.len == 2*3*sizeof(int)
+ assert cvarray.itemsize == sizeof(int)
+ print cvarray.strides[0], cvarray.strides[1]
+ print cvarray.shape[0], cvarray.shape[1]
+ print cvarray.ndim
+ cdef cy.array farray = cy.array(shape=(2,3), itemsize=sizeof(int), format="i", mode='fortran')
+ assert farray.len == 2*3*sizeof(int)
+ assert farray.itemsize == sizeof(int)
+ print farray.strides[0], farray.strides[1]
+ print farray.shape[0], farray.shape[1]
+ print farray.ndim
+
+@testcase
+def acquire():
+ u'''
+ >>> acquire()
+ '''
+ cdef object[int, ndim=1, mode="c"] buf1d = \
+ cy.array(shape=(10,), itemsize=sizeof(int), format='i', mode='c')
+ cdef object[int, ndim=2, mode="c"] buf2d = \
+ cy.array(shape=(10,10), itemsize=sizeof(int), format='i')
+ cdef object[unsigned long, ndim=3, mode='fortran'] buf3d = \
+ cy.array(shape=(1,2,3), itemsize=sizeof(unsigned long), format='L', mode='fortran')
+ cdef object[long double, ndim=3, mode='fortran'] bufld = \
+ cy.array(shape=(1,2,3), itemsize=sizeof(long double), format='g', mode='fortran')
+
+@testcase
+def full_or_strided():
+ u'''
+ >>> full_or_strided()
+ '''
+ cdef object[float, ndim=2, mode='full'] fullbuf = \
+ cy.array(shape=(10,10), itemsize=sizeof(float), format='f', mode='c')
+ cdef object[long long int, ndim=3, mode='strided'] stridedbuf = \
+ cy.array(shape=(1,2,3), itemsize=sizeof(long long int), format='q', mode='fortran')
diff --git a/tests/run/cythonscope.pyx b/tests/run/cythonscope.pyx
new file mode 100644
index 000000000..4824ea33b
--- /dev/null
+++ b/tests/run/cythonscope.pyx
@@ -0,0 +1,34 @@
+"""
+>>> f()
+hello from cython scope, value=4
+hello from cython.view scope, value=4
+hello from cython scope, value=3
+hello from cython.view scope, value=3
+>>> viewobjs()
+<strided axis packing mode>
+<contig axis packing mode>
+<follow axis packing mode>
+<direct axis access mode>
+<ptr axis access mode>
+<full axis access mode>
+"""
+
+cimport cython
+
+from cython cimport _testscope as tester
+from cython.view cimport _testscope as viewtester
+
+
+def f():
+ print cython._testscope(4)
+ print cython.view._testscope(4)
+ print tester(3)
+ print viewtester(3)
+
+def viewobjs():
+ print cython.view.strided
+ print cython.view.contig
+ print cython.view.follow
+ print cython.view.direct
+ print cython.view.ptr
+ print cython.view.full
diff --git a/tests/run/memoryview.pyx b/tests/run/memoryview.pyx
new file mode 100644
index 000000000..c2f809028
--- /dev/null
+++ b/tests/run/memoryview.pyx
@@ -0,0 +1,77 @@
+u'''
+>>> f()
+>>> g()
+>>> call()
+>>> assignmvs()
+'''
+
+from cython.view cimport memoryview
+from cython cimport array, PyBUF_C_CONTIGUOUS
+
+def init_obj():
+ return 3
+
+cdef passmvs(float[:,::1] mvs, object foo):
+ mvs = array((10,10), itemsize=sizeof(float), format='f')
+ foo = init_obj()
+
+cdef object returnobj():
+ cdef obj = object()
+ return obj
+
+cdef float[::1] returnmvs_inner():
+ return array((10,), itemsize=sizeof(float), format='f')
+
+cdef float[::1] returnmvs():
+ cdef float[::1] mvs = returnmvs_inner()
+ return mvs
+
+def f():
+ cdef array arr = array(shape=(10,10), itemsize=sizeof(int), format='i')
+ cdef memoryview mv = memoryview(arr, PyBUF_C_CONTIGUOUS)
+
+def g():
+ cdef object obj = init_obj()
+ cdef int[::1] mview = array((10,), itemsize=sizeof(int), format='i')
+ obj = init_obj()
+ mview = array((10,), itemsize=sizeof(int), format='i')
+
+cdef class Foo:
+ cdef int[::1] mview
+
+ def __init__(self):
+ self.mview = array((10,), itemsize=sizeof(int), format='i')
+ self.mview = array((10,), itemsize=sizeof(int), format='i')
+
+class pyfoo:
+
+ def __init__(self):
+ self.mview = array((10,), itemsize=sizeof(long), format='l')
+
+cdef cdg():
+ cdef double[::1] dmv = array((10,), itemsize=sizeof(double), format='d')
+ dmv = array((10,), itemsize=sizeof(double), format='d')
+
+cdef float[:,::1] global_mv = array((10,10), itemsize=sizeof(float), format='f')
+global_mv = array((10,10), itemsize=sizeof(float), format='f')
+cdef object global_obj
+
+def assignmvs():
+ cdef int[::1] mv1, mv2
+ cdef int[:] mv3
+ mv1 = array((10,), itemsize=sizeof(int), format='i')
+ mv2 = mv1
+ mv1 = mv2
+ mv3 = mv2
+
+def call():
+ global global_mv
+ passmvs(global_mv, global_obj)
+ global_mv = array((3,3), itemsize=sizeof(float), format='f')
+ cdef float[::1] getmvs = returnmvs()
+ returnmvs()
+ cdef object obj = returnobj()
+ cdg()
+ f = Foo()
+ pf = pyfoo()
+
diff --git a/tests/run/numpy_memoryviewattrs.pyx b/tests/run/numpy_memoryviewattrs.pyx
new file mode 100644
index 000000000..1163d3585
--- /dev/null
+++ b/tests/run/numpy_memoryviewattrs.pyx
@@ -0,0 +1,286 @@
+
+cimport cython
+from cython cimport array
+
+import numpy as np
+cimport numpy as np
+
+
+__test__ = {}
+
+def testcase(func):
+ __test__[func.__name__] = func.__doc__
+ return func
+
+
+@testcase
+def test_shape_stride_suboffset():
+ '''
+ >>> test_shape_stride_suboffset()
+ 5 7 11
+ 616 88 8
+ -1 -1 -1
+ 5 7 11
+ 8 40 280
+ -1 -1 -1
+ 5 7 11
+ 616 88 8
+ -1 -1 -1
+ '''
+ cdef unsigned long[:,:,:] larr = array((5,7,11), sizeof(unsigned long), 'L')
+ print larr.shape[0], larr.shape[1], larr.shape[2]
+ print larr.strides[0], larr.strides[1], larr.strides[2]
+ print larr.suboffsets[0], larr.suboffsets[1], larr.suboffsets[2]
+ larr = array((5,7,11), sizeof(unsigned long), 'L', mode='fortran')
+ print larr.shape[0], larr.shape[1], larr.shape[2]
+ print larr.strides[0], larr.strides[1], larr.strides[2]
+ print larr.suboffsets[0], larr.suboffsets[1], larr.suboffsets[2]
+ cdef unsigned long[:,:,:] c_contig = larr.copy()
+ print c_contig.shape[0], c_contig.shape[1], c_contig.shape[2]
+ print c_contig.strides[0], c_contig.strides[1], c_contig.strides[2]
+ print c_contig.suboffsets[0], c_contig.suboffsets[1], c_contig.suboffsets[2]
+
+@testcase
+def test_copy_to():
+ u'''
+ >>> test_copy_to()
+ 0 1 2 3 4 5 6 7
+ 0 1 2 3 4 5 6 7
+ 0 1 2 3 4 5 6 7
+ '''
+ cdef int[:,:,:] from_mvs, to_mvs
+ from_mvs = np.arange(8, dtype=np.int32).reshape(2,2,2)
+ cdef int *from_dta = <int*>from_mvs._data
+ for i in range(2*2*2):
+ print from_dta[i],
+ print
+ # for i in range(2*2*2):
+ # from_dta[i] = i
+
+ to_mvs = array((2,2,2), sizeof(int), 'i')
+ to_mvs[...] = from_mvs
+ cdef int *to_data = <int*>to_mvs._data
+ for i in range(2*2*2):
+ print from_dta[i],
+ print
+ for i in range(2*2*2):
+ print to_data[i],
+ print
+
+@testcase
+@cython.nonecheck(True)
+def test_nonecheck1():
+ u'''
+ >>> test_nonecheck1()
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'NoneType' object has no attribute 'is_c_contig'
+ '''
+ cdef int[:,:,:] uninitialized
+ print uninitialized.is_c_contig()
+
+@testcase
+@cython.nonecheck(True)
+def test_nonecheck2():
+ u'''
+ >>> test_nonecheck2()
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'NoneType' object has no attribute 'is_f_contig'
+ '''
+ cdef int[:,:,:] uninitialized
+ print uninitialized.is_f_contig()
+
+@testcase
+@cython.nonecheck(True)
+def test_nonecheck3():
+ u'''
+ >>> test_nonecheck3()
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'NoneType' object has no attribute 'copy'
+ '''
+ cdef int[:,:,:] uninitialized
+ uninitialized.copy()
+
+@testcase
+@cython.nonecheck(True)
+def test_nonecheck4():
+ u'''
+ >>> test_nonecheck4()
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'NoneType' object has no attribute 'copy_fortran'
+ '''
+ cdef int[:,:,:] uninitialized
+ uninitialized.copy_fortran()
+
+@testcase
+@cython.nonecheck(True)
+def test_nonecheck5():
+ u'''
+ >>> test_nonecheck5()
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'NoneType' object has no attribute '_data'
+ '''
+ cdef int[:,:,:] uninitialized
+ uninitialized._data
+
+@testcase
+def test_copy_mismatch():
+ u'''
+ >>> test_copy_mismatch()
+ Traceback (most recent call last):
+ ...
+ ValueError: memoryview shapes not the same in dimension 0
+ '''
+ cdef int[:,:,::1] mv1 = array((2,2,3), sizeof(int), 'i')
+ cdef int[:,:,::1] mv2 = array((1,2,3), sizeof(int), 'i')
+
+ mv1[...] = mv2
+
+@testcase
+def test_is_contiguous():
+ u'''
+ >>> test_is_contiguous()
+ 1 1
+ 0 1
+ 1 0
+ 1 0
+ <BLANKLINE>
+ 0 1
+ 1 0
+'''
+ cdef int[::1, :, :] fort_contig = array((1,1,1), sizeof(int), 'i', mode='fortran')
+ print fort_contig.is_c_contig() , fort_contig.is_f_contig()
+ fort_contig = array((200,100,100), sizeof(int), 'i', mode='fortran')
+ print fort_contig.is_c_contig(), fort_contig.is_f_contig()
+ fort_contig = fort_contig.copy()
+ print fort_contig.is_c_contig(), fort_contig.is_f_contig()
+ cdef int[:,:,:] strided = fort_contig
+ print strided.is_c_contig(), strided.is_f_contig()
+ print
+ fort_contig = fort_contig.copy_fortran()
+ print fort_contig.is_c_contig(), fort_contig.is_f_contig()
+ print strided.is_c_contig(), strided.is_f_contig()
+
+
+@testcase
+def call():
+ u'''
+ >>> call()
+ 1000 2000 3000
+ 1000
+ 2000 3000
+ 3000
+ 1 1 1000
+ '''
+ cdef int[::1] mv1, mv2, mv3
+ cdef array arr = array((3,), sizeof(int), 'i')
+ mv1 = arr
+ cdef int *data
+ data = <int*>arr.data
+ data[0] = 1000
+ data[1] = 2000
+ data[2] = 3000
+
+ print (<int*>mv1._data)[0] , (<int*>mv1._data)[1] , (<int*>mv1._data)[2]
+
+ mv2 = mv1.copy()
+
+ print (<int*>mv2._data)[0]
+
+
+ print (<int*>mv2._data)[1] , (<int*>mv2._data)[2]
+
+ mv3 = mv2
+
+ cdef int *mv3_data = <int*>mv3._data
+
+ print (<int*>mv1._data)[2]
+
+ mv3_data[0] = 1
+
+ print (<int*>mv3._data)[0] , (<int*>mv2._data)[0] , (<int*>mv1._data)[0]
+
+@testcase
+def two_dee():
+ u'''
+ >>> two_dee()
+ 1 2 3 4
+ -4 -4
+ 1 2 3 -4
+ 1 2 3 -4
+ '''
+ cdef long[:,::1] mv1, mv2, mv3
+ cdef array arr = array((2,2), sizeof(long), 'l')
+
+ cdef long *arr_data
+ arr_data = <long*>arr.data
+
+ mv1 = arr
+
+ arr_data[0] = 1
+ arr_data[1] = 2
+ arr_data[2] = 3
+ arr_data[3] = 4
+
+ print (<long*>mv1._data)[0] , (<long*>mv1._data)[1] , (<long*>mv1._data)[2] , (<long*>mv1._data)[3]
+
+ mv2 = mv1
+
+ arr_data = <long*>mv2._data
+
+ arr_data[3] = -4
+
+ print (<long*>mv2._data)[3] , (<long*>mv1._data)[3]
+
+ mv3 = mv2.copy()
+
+ print (<long*>mv2._data)[0] , (<long*>mv2._data)[1] , (<long*>mv2._data)[2] , (<long*>mv2._data)[3]
+
+ print (<long*>mv3._data)[0] , (<long*>mv3._data)[1] , (<long*>mv3._data)[2] , (<long*>mv3._data)[3]
+
+@testcase
+def fort_two_dee():
+ u'''
+ >>> fort_two_dee()
+ 1 2 3 4
+ -4 -4
+ 1 2 3 -4
+ 1 3 2 -4
+ 1 2 3 -4
+ '''
+ cdef array arr = array((2,2), sizeof(long), 'l', mode='fortran')
+ cdef long[::1,:] mv1, mv2, mv3
+
+ cdef long *arr_data
+ arr_data = <long*>arr.data
+
+ mv1 = arr
+
+ arr_data[0] = 1
+ arr_data[1] = 2
+ arr_data[2] = 3
+ arr_data[3] = 4
+
+ print (<long*>mv1._data)[0], (<long*>mv1._data)[1], (<long*>mv1._data)[2], (<long*>mv1._data)[3]
+
+ mv2 = mv1
+
+ arr_data = <long*>mv2._data
+
+ arr_data[3] = -4
+
+ print (<long*>mv2._data)[3], (<long*>mv1._data)[3]
+
+ mv3 = mv2.copy()
+
+ print (<long*>mv2._data)[0], (<long*>mv2._data)[1], (<long*>mv2._data)[2], (<long*>mv2._data)[3]
+
+ print (<long*>mv3._data)[0], (<long*>mv3._data)[1], (<long*>mv3._data)[2], (<long*>mv3._data)[3]
+
+ mv3 = mv3.copy_fortran()
+
+ print (<long*>mv3._data)[0], (<long*>mv3._data)[1], (<long*>mv3._data)[2], (<long*>mv3._data)[3]