summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/engine/base.py
diff options
context:
space:
mode:
authorGaëtan de Menten <gdementen@gmail.com>2010-02-13 22:53:39 +0000
committerGaëtan de Menten <gdementen@gmail.com>2010-02-13 22:53:39 +0000
commit165609a190665f5453417c9c935a834714c7f5a5 (patch)
tree90d3d0da3f233cf6fc211f367eea0dba661b098e /lib/sqlalchemy/engine/base.py
parentf2974ef3993e02646a2dfade5feb74afb78f370f (diff)
downloadsqlalchemy-165609a190665f5453417c9c935a834714c7f5a5.tar.gz
- Added an optional C extension to speed up the sql layer by
reimplementing the highest impact functions. The actual speedups will depend heavily on your DBAPI and the mix of datatypes used in your tables, and can vary from a 50% improvement to more than 200%. It also provides a modest (~20%) indirect improvement to ORM speed for large queries. Note that it is *not* built/installed by default. See README for installation instructions. - The most common result processors conversion function were moved to the new "processors" module. Dialect authors are encouraged to use those functions whenever they correspond to their needs instead of implementing custom ones.
Diffstat (limited to 'lib/sqlalchemy/engine/base.py')
-rw-r--r--lib/sqlalchemy/engine/base.py382
1 files changed, 225 insertions, 157 deletions
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index 844183628..4dc9665c0 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -20,6 +20,7 @@ __all__ = [
'connection_memoize']
import inspect, StringIO, sys, operator
+from itertools import izip
from sqlalchemy import exc, schema, util, types, log
from sqlalchemy.sql import expression
@@ -1536,16 +1537,20 @@ class Engine(Connectable):
def _proxy_connection_cls(cls, proxy):
class ProxyConnection(cls):
def execute(self, object, *multiparams, **params):
- return proxy.execute(self, super(ProxyConnection, self).execute, object, *multiparams, **params)
+ return proxy.execute(self, super(ProxyConnection, self).execute,
+ object, *multiparams, **params)
def _execute_clauseelement(self, elem, multiparams=None, params=None):
- return proxy.execute(self, super(ProxyConnection, self).execute, elem, *(multiparams or []), **(params or {}))
+ return proxy.execute(self, super(ProxyConnection, self).execute,
+ elem, *(multiparams or []), **(params or {}))
def _cursor_execute(self, cursor, statement, parameters, context=None):
- return proxy.cursor_execute(super(ProxyConnection, self)._cursor_execute, cursor, statement, parameters, context, False)
+ return proxy.cursor_execute(super(ProxyConnection, self)._cursor_execute,
+ cursor, statement, parameters, context, False)
def _cursor_executemany(self, cursor, statement, parameters, context=None):
- return proxy.cursor_execute(super(ProxyConnection, self)._cursor_executemany, cursor, statement, parameters, context, True)
+ return proxy.cursor_execute(super(ProxyConnection, self)._cursor_executemany,
+ cursor, statement, parameters, context, True)
def _begin_impl(self):
return proxy.begin(self, super(ProxyConnection, self)._begin_impl)
@@ -1560,27 +1565,125 @@ def _proxy_connection_cls(cls, proxy):
return proxy.savepoint(self, super(ProxyConnection, self)._savepoint_impl, name=name)
def _rollback_to_savepoint_impl(self, name, context):
- return proxy.rollback_savepoint(self, super(ProxyConnection, self)._rollback_to_savepoint_impl, name, context)
+ return proxy.rollback_savepoint(self,
+ super(ProxyConnection, self)._rollback_to_savepoint_impl,
+ name, context)
def _release_savepoint_impl(self, name, context):
- return proxy.release_savepoint(self, super(ProxyConnection, self)._release_savepoint_impl, name, context)
+ return proxy.release_savepoint(self,
+ super(ProxyConnection, self)._release_savepoint_impl,
+ name, context)
def _begin_twophase_impl(self, xid):
- return proxy.begin_twophase(self, super(ProxyConnection, self)._begin_twophase_impl, xid)
+ return proxy.begin_twophase(self,
+ super(ProxyConnection, self)._begin_twophase_impl, xid)
def _prepare_twophase_impl(self, xid):
- return proxy.prepare_twophase(self, super(ProxyConnection, self)._prepare_twophase_impl, xid)
+ return proxy.prepare_twophase(self,
+ super(ProxyConnection, self)._prepare_twophase_impl, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
- return proxy.rollback_twophase(self, super(ProxyConnection, self)._rollback_twophase_impl, xid, is_prepared)
+ return proxy.rollback_twophase(self,
+ super(ProxyConnection, self)._rollback_twophase_impl,
+ xid, is_prepared)
def _commit_twophase_impl(self, xid, is_prepared):
- return proxy.commit_twophase(self, super(ProxyConnection, self)._commit_twophase_impl, xid, is_prepared)
+ return proxy.commit_twophase(self,
+ super(ProxyConnection, self)._commit_twophase_impl,
+ xid, is_prepared)
return ProxyConnection
+# This reconstructor is necessary so that pickles with the C extension or
+# without use the same Binary format.
+# We need a different reconstructor on the C extension so that we can
+# add extra checks that fields have correctly been initialized by
+# __setstate__.
+try:
+ from sqlalchemy.cresultproxy import rowproxy_reconstructor
+
+ # this is a hack so that the reconstructor function is pickled with the
+ # same name as without the C extension.
+ # BUG: It fails for me if I run the "python" interpreter and
+ # then say "import sqlalchemy":
+ # TypeError: 'builtin_function_or_method' object has only read-only attributes (assign to .__module__)
+ # However, if I run the tests with nosetests, it succeeds !
+ # I've verified with pdb etc. that this is the case.
+ #rowproxy_reconstructor.__module__ = 'sqlalchemy.engine.base'
+
+except ImportError:
+ def rowproxy_reconstructor(cls, state):
+ obj = cls.__new__(cls)
+ obj.__setstate__(state)
+ return obj
+
+try:
+ from sqlalchemy.cresultproxy import BaseRowProxy
+except ImportError:
+ class BaseRowProxy(object):
+ __slots__ = ('_parent', '_row', '_processors', '_keymap')
+
+ def __init__(self, parent, row, processors, keymap):
+ """RowProxy objects are constructed by ResultProxy objects."""
+
+ self._parent = parent
+ self._row = row
+ self._processors = processors
+ self._keymap = keymap
+
+ def __reduce__(self):
+ return (rowproxy_reconstructor,
+ (self.__class__, self.__getstate__()))
+
+ def values(self):
+ """Return the values represented by this RowProxy as a list."""
+ return list(self)
+
+ def __iter__(self):
+ for processor, value in izip(self._processors, self._row):
+ if processor is None:
+ yield value
+ else:
+ yield processor(value)
+
+ def __len__(self):
+ return len(self._row)
-class RowProxy(object):
+ def __getitem__(self, key):
+ try:
+ processor, index = self._keymap[key]
+ except KeyError:
+ processor, index = self._parent._key_fallback(key)
+ except TypeError:
+ if isinstance(key, slice):
+ l = []
+ for processor, value in izip(self._processors[key],
+ self._row[key]):
+ if processor is None:
+ l.append(value)
+ else:
+ l.append(processor(value))
+ return tuple(l)
+ else:
+ raise
+ if index is None:
+ raise exc.InvalidRequestError(
+ "Ambiguous column name '%s' in result set! "
+ "try 'use_labels' option on select statement." % key)
+ if processor is not None:
+ return processor(self._row[index])
+ else:
+ return self._row[index]
+
+ def __getattr__(self, name):
+ try:
+ # TODO: no test coverage here
+ return self[name]
+ except KeyError, e:
+ raise AttributeError(e.args[0])
+
+
+class RowProxy(BaseRowProxy):
"""Proxy values from a single cursor row.
Mostly follows "ordered dictionary" behavior, mapping result
@@ -1589,38 +1692,22 @@ class RowProxy(object):
mapped to the original Columns that produced this result set (for
results that correspond to constructed SQL expressions).
"""
+ __slots__ = ()
- __slots__ = ['__parent', '__row', '__colfuncs']
-
- def __init__(self, parent, row):
-
- self.__parent = parent
- self.__row = row
- self.__colfuncs = parent._colfuncs
- if self.__parent._echo:
- self.__parent.logger.debug("Row %r", row)
-
def __contains__(self, key):
- return self.__parent._has_key(self.__row, key)
+ return self._parent._has_key(self._row, key)
- def __len__(self):
- return len(self.__row)
-
def __getstate__(self):
return {
- '__row':[self.__colfuncs[i][0](self.__row) for i in xrange(len(self.__row))],
- '__parent':self.__parent
+ '_parent': self._parent,
+ '_row': tuple(self)
}
-
- def __setstate__(self, d):
- self.__row = d['__row']
- self.__parent = d['__parent']
- self.__colfuncs = self.__parent._colfuncs
-
- def __iter__(self):
- row = self.__row
- for func in self.__parent._colfunc_list:
- yield func(row)
+
+ def __setstate__(self, state):
+ self._parent = parent = state['_parent']
+ self._row = state['_row']
+ self._processors = parent._processors
+ self._keymap = parent._keymap
__hash__ = None
@@ -1636,33 +1723,7 @@ class RowProxy(object):
def has_key(self, key):
"""Return True if this RowProxy contains the given key."""
- return self.__parent._has_key(self.__row, key)
-
- def __getitem__(self, key):
- # the fallback and slices are only useful for __getitem__ anyway
- try:
- return self.__colfuncs[key][0](self.__row)
- except KeyError:
- k = self.__parent._key_fallback(key)
- if k is None:
- raise exc.NoSuchColumnError(
- "Could not locate column in row for column '%s'" % key)
- else:
- # save on KeyError + _key_fallback() lookup next time around
- self.__colfuncs[key] = k
- return k[0](self.__row)
- except TypeError:
- if isinstance(key, slice):
- return tuple(func(self.__row) for func in self.__parent._colfunc_list[key])
- else:
- raise
-
- def __getattr__(self, name):
- try:
- # TODO: no test coverage here
- return self[name]
- except KeyError, e:
- raise AttributeError(e.args[0])
+ return self._parent._has_key(self._row, key)
def items(self):
"""Return a list of tuples, each tuple containing a key/value pair."""
@@ -1672,24 +1733,25 @@ class RowProxy(object):
def keys(self):
"""Return the list of keys as strings represented by this RowProxy."""
- return self.__parent.keys
+ return self._parent.keys
def iterkeys(self):
- return iter(self.__parent.keys)
-
- def values(self):
- """Return the values represented by this RowProxy as a list."""
-
- return list(self)
+ return iter(self._parent.keys)
def itervalues(self):
return iter(self)
+
class ResultMetaData(object):
"""Handle cursor.description, applying additional info from an execution context."""
def __init__(self, parent, metadata):
- self._colfuncs = colfuncs = {}
+ self._processors = processors = []
+
+ # We do not strictly need to store the processor in the key mapping,
+ # though it is faster in the Python version (probably because of the
+ # saved attribute lookup self._processors)
+ self._keymap = keymap = {}
self.keys = []
self._echo = parent._echo
context = parent.context
@@ -1720,29 +1782,25 @@ class ResultMetaData(object):
processor = type_.dialect_impl(dialect).\
result_processor(dialect, coltype)
- if processor:
- def make_colfunc(processor, index):
- def getcol(row):
- return processor(row[index])
- return getcol
- rec = (make_colfunc(processor, i), i, "colfunc")
- else:
- rec = (operator.itemgetter(i), i, "itemgetter")
+ processors.append(processor)
+ rec = (processor, i)
- # indexes as keys
- colfuncs[i] = rec
+ # indexes as keys. This is only needed for the Python version of
+ # RowProxy (the C version uses a faster path for integer indexes).
+ keymap[i] = rec
# Column names as keys
- if colfuncs.setdefault(name.lower(), rec) is not rec:
- #XXX: why not raise directly? because several columns colliding
- #by name is not a problem as long as the user don't use them (ie
- #use the more precise ColumnElement
- colfuncs[name.lower()] = (self._ambiguous_processor(name), i, "ambiguous")
-
+ if keymap.setdefault(name.lower(), rec) is not rec:
+ # We do not raise an exception directly because several
+ # columns colliding by name is not a problem as long as the
+ # user does not try to access them (ie use an index directly,
+ # or the more precise ColumnElement)
+ keymap[name.lower()] = (processor, None)
+
# store the "origname" if we truncated (sqlite only)
if origname and \
- colfuncs.setdefault(origname.lower(), rec) is not rec:
- colfuncs[origname.lower()] = (self._ambiguous_processor(origname), i, "ambiguous")
+ keymap.setdefault(origname.lower(), rec) is not rec:
+ keymap[origname.lower()] = (processor, None)
if dialect.requires_name_normalize:
colname = dialect.normalize_name(colname)
@@ -1750,76 +1808,67 @@ class ResultMetaData(object):
self.keys.append(colname)
if obj:
for o in obj:
- colfuncs[o] = rec
+ keymap[o] = rec
if self._echo:
self.logger = context.engine.logger
self.logger.debug(
"Col %r", tuple(x[0] for x in metadata))
- @util.memoized_property
- def _colfunc_list(self):
- funcs = self._colfuncs
- return [funcs[i][0] for i in xrange(len(self.keys))]
-
def _key_fallback(self, key):
- funcs = self._colfuncs
-
+ map = self._keymap
+ result = None
if isinstance(key, basestring):
- key = key.lower()
- if key in funcs:
- return funcs[key]
-
+ result = map.get(key.lower())
# fallback for targeting a ColumnElement to a textual expression
# this is a rare use case which only occurs when matching text()
- # constructs to ColumnElements
- if isinstance(key, expression.ColumnElement):
- if key._label and key._label.lower() in funcs:
- return funcs[key._label.lower()]
- elif hasattr(key, 'name') and key.name.lower() in funcs:
- return funcs[key.name.lower()]
-
- return None
+ # constructs to ColumnElements, and after a pickle/unpickle roundtrip
+ elif isinstance(key, expression.ColumnElement):
+ if key._label and key._label.lower() in map:
+ result = map[key._label.lower()]
+ elif hasattr(key, 'name') and key.name.lower() in map:
+ result = map[key.name.lower()]
+ if result is None:
+ raise exc.NoSuchColumnError(
+ "Could not locate column in row for column '%s'" % key)
+ else:
+ map[key] = result
+ return result
def _has_key(self, row, key):
- if key in self._colfuncs:
+ if key in self._keymap:
return True
else:
- key = self._key_fallback(key)
- return key is not None
+ try:
+ self._key_fallback(key)
+ return True
+ except exc.NoSuchColumnError:
+ return False
- @classmethod
- def _ambiguous_processor(cls, colname):
- def process(value):
- raise exc.InvalidRequestError(
- "Ambiguous column name '%s' in result set! "
- "try 'use_labels' option on select statement." % colname)
- return process
-
def __len__(self):
return len(self.keys)
def __getstate__(self):
return {
- '_pickled_colfuncs':dict(
- (key, (i, type_))
- for key, (fn, i, type_) in self._colfuncs.iteritems()
+ '_pickled_keymap': dict(
+ (key, index)
+ for key, (processor, index) in self._keymap.iteritems()
if isinstance(key, (basestring, int))
),
- 'keys':self.keys
+ 'keys': self.keys
}
def __setstate__(self, state):
- pickled_colfuncs = state['_pickled_colfuncs']
- self._colfuncs = d = {}
- for key, (index, type_) in pickled_colfuncs.iteritems():
- if type_ == 'ambiguous':
- d[key] = (self._ambiguous_processor(key), index, type_)
- else:
- d[key] = (operator.itemgetter(index), index, "itemgetter")
+ # the row has been processed at pickling time so we don't need any
+ # processor anymore
+ self._processors = [None for _ in xrange(len(state['keys']))]
+ self._keymap = keymap = {}
+ for key, index in state['_pickled_keymap'].iteritems():
+ keymap[key] = (None, index)
self.keys = state['keys']
self._echo = False
-
+
+
class ResultProxy(object):
"""Wraps a DB-API cursor object to provide easier access to row columns.
@@ -2031,13 +2080,27 @@ class ResultProxy(object):
def _fetchall_impl(self):
return self.cursor.fetchall()
+ def process_rows(self, rows):
+ process_row = self._process_row
+ metadata = self._metadata
+ keymap = metadata._keymap
+ processors = metadata._processors
+ if self._echo:
+ log = self.context.engine.logger.debug
+ l = []
+ for row in rows:
+ log("Row %r", row)
+ l.append(process_row(metadata, row, processors, keymap))
+ return l
+ else:
+ return [process_row(metadata, row, processors, keymap)
+ for row in rows]
+
def fetchall(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``."""
try:
- process_row = self._process_row
- metadata = self._metadata
- l = [process_row(metadata, row) for row in self._fetchall_impl()]
+ l = self.process_rows(self._fetchall_impl())
self.close()
return l
except Exception, e:
@@ -2053,9 +2116,7 @@ class ResultProxy(object):
"""
try:
- process_row = self._process_row
- metadata = self._metadata
- l = [process_row(metadata, row) for row in self._fetchmany_impl(size)]
+ l = self.process_rows(self._fetchmany_impl(size))
if len(l) == 0:
self.close()
return l
@@ -2074,7 +2135,7 @@ class ResultProxy(object):
try:
row = self._fetchone_impl()
if row is not None:
- return self._process_row(self._metadata, row)
+ return self.process_rows([row])[0]
else:
self.close()
return None
@@ -2096,13 +2157,12 @@ class ResultProxy(object):
try:
if row is not None:
- return self._process_row(self._metadata, row)
+ return self.process_rows([row])[0]
else:
return None
finally:
self.close()
-
def scalar(self):
"""Fetch the first column of the first row, and close the result set.
@@ -2210,9 +2270,18 @@ class FullyBufferedResultProxy(ResultProxy):
return ret
class BufferedColumnRow(RowProxy):
- def __init__(self, parent, row):
- row = [parent._orig_colfuncs[i][0](row) for i in xrange(len(row))]
- super(BufferedColumnRow, self).__init__(parent, row)
+ def __init__(self, parent, row, processors, keymap):
+ # preprocess row
+ row = list(row)
+ # this is a tad faster than using enumerate
+ index = 0
+ for processor in parent._orig_processors:
+ if processor is not None:
+ row[index] = processor(row[index])
+ index += 1
+ row = tuple(row)
+ super(BufferedColumnRow, self).__init__(parent, row,
+ processors, keymap)
class BufferedColumnResultProxy(ResultProxy):
"""A ResultProxy with column buffering behavior.
@@ -2221,7 +2290,7 @@ class BufferedColumnResultProxy(ResultProxy):
fetchone() is called. If fetchmany() or fetchall() are called,
the full grid of results is fetched. This is to operate with
databases where result rows contain "live" results that fall out
- of scope unless explicitly fetched. Currently this includes
+ of scope unless explicitly fetched. Currently this includes
cx_Oracle LOB objects.
"""
@@ -2230,17 +2299,16 @@ class BufferedColumnResultProxy(ResultProxy):
def _init_metadata(self):
super(BufferedColumnResultProxy, self)._init_metadata()
- self._metadata._orig_colfuncs = self._metadata._colfuncs
- self._metadata._colfuncs = colfuncs = {}
- # replace the parent's _colfuncs dict, replacing
- # column processors with straight itemgetters.
- # the original _colfuncs dict is used when each row
- # is constructed.
- for k, (colfunc, index, type_) in self._metadata._orig_colfuncs.iteritems():
- if type_ == "colfunc":
- colfuncs[k] = (operator.itemgetter(index), index, "itemgetter")
- else:
- colfuncs[k] = (colfunc, index, type_)
+ metadata = self._metadata
+ # orig_processors will be used to preprocess each row when they are
+ # constructed.
+ metadata._orig_processors = metadata._processors
+ # replace the all type processors by None processors.
+ metadata._processors = [None for _ in xrange(len(metadata.keys))]
+ keymap = {}
+ for k, (func, index) in metadata._keymap.iteritems():
+ keymap[k] = (None, index)
+ self._metadata._keymap = keymap
def fetchall(self):
# can't call cursor.fetchall(), since rows must be