summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sqlalchemy')
-rw-r--r--lib/sqlalchemy/cextension/resultproxy.c84
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py4
-rw-r--r--lib/sqlalchemy/dialects/oracle/cx_oracle.py31
-rw-r--r--lib/sqlalchemy/engine/__init__.py32
-rw-r--r--lib/sqlalchemy/engine/cursor.py1758
-rw-r--r--lib/sqlalchemy/engine/default.py36
-rw-r--r--lib/sqlalchemy/engine/result.py2298
-rw-r--r--lib/sqlalchemy/engine/row.py25
-rw-r--r--lib/sqlalchemy/exc.py23
-rw-r--r--lib/sqlalchemy/future/__init__.py1
-rw-r--r--lib/sqlalchemy/future/result.py305
-rw-r--r--lib/sqlalchemy/orm/exc.py10
-rw-r--r--lib/sqlalchemy/orm/loading.py4
-rw-r--r--lib/sqlalchemy/orm/mapper.py3
-rw-r--r--lib/sqlalchemy/orm/query.py2
-rw-r--r--lib/sqlalchemy/sql/compiler.py8
-rw-r--r--lib/sqlalchemy/testing/fixtures.py23
-rw-r--r--lib/sqlalchemy/testing/suite/test_insert.py24
18 files changed, 2752 insertions, 1919 deletions
diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c
index b105038bc..d5a6ea0c8 100644
--- a/lib/sqlalchemy/cextension/resultproxy.c
+++ b/lib/sqlalchemy/cextension/resultproxy.c
@@ -47,6 +47,10 @@ typedef struct {
PyObject *keymap;
} BaseRow;
+
+static PyObject *sqlalchemy_engine_row = NULL;
+static PyObject *sqlalchemy_engine_result = NULL;
+
/****************
* BaseRow *
****************/
@@ -103,7 +107,7 @@ BaseRow_init(BaseRow *self, PyObject *args, PyObject *kwds)
return -1;
num_values = PySequence_Length(values_fastseq);
- num_processors = PyList_Size(processors);
+ num_processors = PySequence_Size(processors);
if (num_values != num_processors) {
PyErr_Format(PyExc_RuntimeError,
"number of values in row (%d) differ from number of column "
@@ -172,12 +176,14 @@ BaseRow_reduce(PyObject *self)
if (state == NULL)
return NULL;
- module = PyImport_ImportModule("sqlalchemy.engine.result");
- if (module == NULL)
- return NULL;
+ if (sqlalchemy_engine_row == NULL) {
+ module = PyImport_ImportModule("sqlalchemy.engine.row");
+ if (module == NULL)
+ return NULL;
+ sqlalchemy_engine_row = module;
+ }
- reconstructor = PyObject_GetAttrString(module, "rowproxy_reconstructor");
- Py_DECREF(module);
+ reconstructor = PyObject_GetAttrString(sqlalchemy_engine_row, "rowproxy_reconstructor");
if (reconstructor == NULL) {
Py_DECREF(state);
return NULL;
@@ -193,6 +199,33 @@ BaseRow_reduce(PyObject *self)
return Py_BuildValue("(N(NN))", reconstructor, cls, state);
}
+static PyObject *
+BaseRow_filter_on_values(BaseRow *self, PyObject *filters)
+{
+ PyObject *module, *row_class, *new_obj;
+
+ if (sqlalchemy_engine_row == NULL) {
+ module = PyImport_ImportModule("sqlalchemy.engine.row");
+ if (module == NULL)
+ return NULL;
+ sqlalchemy_engine_row = module;
+ }
+
+ // TODO: do we want to get self.__class__ instead here? I'm not sure
+ // how to use METH_VARARGS and then also get the BaseRow struct
+ // at the same time
+ row_class = PyObject_GetAttrString(sqlalchemy_engine_row, "Row");
+
+ new_obj = PyObject_CallFunction(row_class, "OOOO", self->parent, filters, self->keymap, self->row);
+ Py_DECREF(row_class);
+ if (new_obj == NULL) {
+ return NULL;
+ }
+
+ return new_obj;
+
+}
+
static void
BaseRow_dealloc(BaseRow *self)
{
@@ -449,12 +482,14 @@ BaseRow_setparent(BaseRow *self, PyObject *value, void *closure)
return -1;
}
- module = PyImport_ImportModule("sqlalchemy.engine.result");
- if (module == NULL)
- return -1;
+ if (sqlalchemy_engine_result == NULL) {
+ module = PyImport_ImportModule("sqlalchemy.engine.result");
+ if (module == NULL)
+ return -1;
+ sqlalchemy_engine_result = module;
+ }
- cls = PyObject_GetAttrString(module, "ResultMetaData");
- Py_DECREF(module);
+ cls = PyObject_GetAttrString(sqlalchemy_engine_result, "ResultMetaData");
if (cls == NULL)
return -1;
@@ -557,6 +592,9 @@ static PyMethodDef BaseRow_methods[] = {
"implement mapping-like getitem as well as sequence getitem"},
{"_get_by_key_impl_mapping", (PyCFunction)BaseRow_subscript_mapping, METH_O,
"implement mapping-like getitem as well as sequence getitem"},
+ {"_filter_on_values", (PyCFunction)BaseRow_filter_on_values, METH_O,
+ "return a new Row with per-value filters applied to columns"},
+
{NULL} /* Sentinel */
};
@@ -681,14 +719,18 @@ tuplegetter_traverse(tuplegetterobject *tg, visitproc visit, void *arg)
static PyObject *
tuplegetter_call(tuplegetterobject *tg, PyObject *args, PyObject *kw)
{
- PyObject *row, *result;
+ PyObject *row_or_tuple, *result;
Py_ssize_t i, nitems=tg->nitems;
+ int has_row_method;
assert(PyTuple_CheckExact(args));
- // this is normally a BaseRow subclass but we are not doing
- // strict checking at the moment
- row = PyTuple_GET_ITEM(args, 0);
+ // this is a tuple, however if its a BaseRow subclass we want to
+ // call specific methods to bypass the pure python LegacyRow.__getitem__
+ // method for now
+ row_or_tuple = PyTuple_GET_ITEM(args, 0);
+
+ has_row_method = PyObject_HasAttrString(row_or_tuple, "_get_by_key_impl_mapping");
assert(PyTuple_Check(tg->item));
assert(PyTuple_GET_SIZE(tg->item) == nitems);
@@ -701,11 +743,13 @@ tuplegetter_call(tuplegetterobject *tg, PyObject *args, PyObject *kw)
PyObject *item, *val;
item = PyTuple_GET_ITEM(tg->item, i);
- val = PyObject_CallMethod(row, "_get_by_key_impl_mapping", "O", item);
+ if (has_row_method) {
+ val = PyObject_CallMethod(row_or_tuple, "_get_by_key_impl_mapping", "O", item);
+ }
+ else {
+ val = PyObject_GetItem(row_or_tuple, item);
+ }
- // generic itemgetter version; if BaseRow __getitem__ is implemented
- // in C directly then we can use that
- //val = PyObject_GetItem(row, item);
if (val == NULL) {
Py_DECREF(result);
return NULL;
@@ -756,7 +800,7 @@ and returns them as a tuple.\n");
static PyTypeObject tuplegetter_type = {
PyVarObject_HEAD_INIT(NULL, 0)
- "sqlalchemy.engine.util..tuplegetter", /* tp_name */
+ "sqlalchemy.engine.util.tuplegetter", /* tp_name */
sizeof(tuplegetterobject), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index dda445743..5618a67f9 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -704,9 +704,9 @@ from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
+from ...engine import cursor as _cursor
from ...engine import default
from ...engine import reflection
-from ...engine import result as _result
from ...sql import compiler
from ...sql import elements
from ...sql import expression
@@ -1525,7 +1525,7 @@ class MSExecutionContext(default.DefaultExecutionContext):
elif (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
- fbcr = _result.FullyBufferedCursorFetchStrategy
+ fbcr = _cursor.FullyBufferedCursorFetchStrategy
self._result_strategy = fbcr.create_from_buffer(
self.cursor, self.cursor.description, self.cursor.fetchall()
)
diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
index b555c4555..9e10679c6 100644
--- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -299,7 +299,6 @@ SQLAlchemy type (or a subclass of such).
from __future__ import absolute_import
-import collections
import decimal
import random
import re
@@ -312,7 +311,7 @@ from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
-from ...engine import result as _result
+from ...engine import cursor as _cursor
from ...util import compat
@@ -680,8 +679,13 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
for i in range(len(self.out_parameters))
]
- return ReturningResultStrategy(
- result, result.cursor, returning_params
+ return _cursor.FullyBufferedCursorFetchStrategy(
+ result.cursor,
+ [
+ (getattr(col, "name", col.anon_label), None)
+ for col in result.context.compiled.returning
+ ],
+ initial_buffer=[tuple(returning_params)],
)
else:
return super(
@@ -689,25 +693,6 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
).get_result_cursor_strategy(result)
-class ReturningResultStrategy(_result.FullyBufferedCursorFetchStrategy):
- __slots__ = ("_returning_params",)
-
- def __init__(self, result, dbapi_cursor, returning_params):
- self._returning_params = returning_params
-
- returning = result.context.compiled.returning
- cursor_description = [
- (getattr(col, "name", col.anon_label), None) for col in returning
- ]
-
- super(ReturningResultStrategy, self).__init__(
- dbapi_cursor, cursor_description
- )
-
- def _buffer_rows(self):
- return collections.deque([tuple(self._returning_params)])
-
-
class OracleDialect_cx_oracle(OracleDialect):
execution_ctx_cls = OracleExecutionContext_cx_oracle
statement_compiler = OracleCompiler_cx_oracle
diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py
index db5f9dee7..8419cf920 100644
--- a/lib/sqlalchemy/engine/__init__.py
+++ b/lib/sqlalchemy/engine/__init__.py
@@ -25,6 +25,13 @@ from .base import Transaction # noqa
from .base import TwoPhaseTransaction # noqa
from .create import create_engine
from .create import engine_from_config
+from .cursor import BaseCursorResult # noqa
+from .cursor import BufferedColumnResultProxy # noqa
+from .cursor import BufferedColumnRow # noqa
+from .cursor import BufferedRowResultProxy # noqa
+from .cursor import CursorResult # noqa
+from .cursor import FullyBufferedResultProxy # noqa
+from .cursor import LegacyCursorResult # noqa
from .interfaces import Compiled # noqa
from .interfaces import Connectable # noqa
from .interfaces import CreateEnginePlugin # noqa
@@ -33,29 +40,14 @@ from .interfaces import ExceptionContext # noqa
from .interfaces import ExecutionContext # noqa
from .interfaces import TypeCompiler # noqa
from .mock import create_mock_engine
-from .result import BaseResult # noqa
-from .result import BaseRow # noqa
-from .result import BufferedColumnResultProxy # noqa
-from .result import BufferedColumnRow # noqa
-from .result import BufferedRowResultProxy # noqa
-from .result import FullyBufferedResultProxy # noqa
-from .result import LegacyRow # noqa
+from .result import Result # noqa
from .result import result_tuple # noqa
-from .result import ResultProxy # noqa
-from .result import Row # noqa
-from .result import RowMapping # noqa
+from .row import BaseRow # noqa
+from .row import LegacyRow # noqa
+from .row import Row # noqa
+from .row import RowMapping # noqa
from .util import connection_memoize # noqa
from ..sql import ddl # noqa
__all__ = ("create_engine", "engine_from_config", "create_mock_engine")
-
-
-def __go(lcls):
- from .. import future
- from . import result
-
- result._future_Result = future.Result
-
-
-__go(locals())
diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py
new file mode 100644
index 000000000..55462f0bf
--- /dev/null
+++ b/lib/sqlalchemy/engine/cursor.py
@@ -0,0 +1,1758 @@
+# engine/cursor.py
+# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""Define cursor-specific result set constructs including
+:class:`.BaseCursorResult`, :class:`.CursorResult`."""
+
+
+import collections
+
+from .result import Result
+from .result import ResultMetaData
+from .result import SimpleResultMetaData
+from .result import tuplegetter
+from .row import _baserow_usecext
+from .row import LegacyRow
+from .. import exc
+from .. import util
+from ..sql import expression
+from ..sql import sqltypes
+from ..sql import util as sql_util
+from ..sql.base import _generative
+from ..sql.base import HasMemoized
+from ..sql.compiler import RM_NAME
+from ..sql.compiler import RM_OBJECTS
+from ..sql.compiler import RM_RENDERED_NAME
+from ..sql.compiler import RM_TYPE
+
+_UNPICKLED = util.symbol("unpickled")
+
+
+# metadata entry tuple indexes.
+# using raw tuple is faster than namedtuple.
+MD_INDEX = 0 # integer index in cursor.description
+MD_OBJECTS = 1 # other string keys and ColumnElement obj that can match
+MD_LOOKUP_KEY = 2 # string key we usually expect for key-based lookup
+MD_RENDERED_NAME = 3 # name that is usually in cursor.description
+MD_PROCESSOR = 4 # callable to process a result value into a row
+MD_UNTRANSLATED = 5 # raw name from cursor.description
+
+
+class CursorResultMetaData(ResultMetaData):
+ """Result metadata for DBAPI cursors."""
+
+ __slots__ = (
+ "_keymap",
+ "case_sensitive",
+ "_processors",
+ "_keys",
+ "_tuplefilter",
+ "_translated_indexes",
+ # don't need _unique_filters support here for now. Can be added
+ # if a need arises.
+ )
+
+ returns_rows = True
+
+ def _for_freeze(self):
+ return SimpleResultMetaData(
+ self._keys,
+ extra=[self._keymap[key][MD_OBJECTS] for key in self._keys],
+ )
+
+ def _reduce(self, keys):
+ recs = list(self._metadata_for_keys(keys))
+
+ indexes = [rec[MD_INDEX] for rec in recs]
+ new_keys = [rec[MD_LOOKUP_KEY] for rec in recs]
+
+ if self._translated_indexes:
+ indexes = [self._translated_indexes[idx] for idx in indexes]
+
+ tup = tuplegetter(*indexes)
+
+ new_metadata = self.__class__.__new__(self.__class__)
+ new_metadata.case_sensitive = self.case_sensitive
+ new_metadata._processors = self._processors
+ new_metadata._keys = new_keys
+ new_metadata._tuplefilter = tup
+ new_metadata._translated_indexes = indexes
+
+ new_recs = [
+ (index,) + rec[1:]
+ for index, rec in enumerate(self._metadata_for_keys(keys))
+ ]
+ new_metadata._keymap = {rec[MD_LOOKUP_KEY]: rec for rec in new_recs}
+ if not _baserow_usecext:
+ # TODO: can consider assembling ints + negative ints here
+ new_metadata._keymap.update(
+ {
+ index: (index, new_keys[index], ())
+ for index in range(len(new_keys))
+ }
+ )
+
+ new_metadata._keymap.update(
+ {e: new_rec for new_rec in new_recs for e in new_rec[MD_OBJECTS]}
+ )
+
+ return new_metadata
+
+ def _adapt_to_context(self, context):
+ """When using a cached result metadata against a new context,
+ we need to rewrite the _keymap so that it has the specific
+ Column objects in the new context inside of it. this accommodates
+ for select() constructs that contain anonymized columns and
+ are cached.
+
+ """
+ if not context.compiled._result_columns:
+ return self
+
+ compiled_statement = context.compiled.statement
+ invoked_statement = context.invoked_statement
+
+ # same statement was invoked as the one we cached against,
+ # return self
+ if compiled_statement is invoked_statement:
+ return self
+
+ # make a copy and add the columns from the invoked statement
+ # to the result map.
+ md = self.__class__.__new__(self.__class__)
+
+ md._keymap = self._keymap.copy()
+
+ # match up new columns positionally to the result columns
+ for existing, new in zip(
+ context.compiled._result_columns,
+ invoked_statement._exported_columns_iterator(),
+ ):
+ md._keymap[new] = md._keymap[existing[RM_NAME]]
+
+ md.case_sensitive = self.case_sensitive
+ md._processors = self._processors
+ assert not self._tuplefilter
+ md._tuplefilter = None
+ md._translated_indexes = None
+ md._keys = self._keys
+ return md
+
+ def __init__(self, parent, cursor_description):
+ context = parent.context
+ dialect = context.dialect
+ self._tuplefilter = None
+ self._translated_indexes = None
+ self.case_sensitive = dialect.case_sensitive
+
+ if context.result_column_struct:
+ (
+ result_columns,
+ cols_are_ordered,
+ textual_ordered,
+ loose_column_name_matching,
+ ) = context.result_column_struct
+ num_ctx_cols = len(result_columns)
+ else:
+ result_columns = (
+ cols_are_ordered
+ ) = (
+ num_ctx_cols
+ ) = loose_column_name_matching = textual_ordered = False
+
+ # merge cursor.description with the column info
+ # present in the compiled structure, if any
+ raw = self._merge_cursor_description(
+ context,
+ cursor_description,
+ result_columns,
+ num_ctx_cols,
+ cols_are_ordered,
+ textual_ordered,
+ loose_column_name_matching,
+ )
+
+ self._keymap = {}
+ if not _baserow_usecext:
+ # keymap indexes by integer index: this is only used
+ # in the pure Python BaseRow.__getitem__
+ # implementation to avoid an expensive
+ # isinstance(key, util.int_types) in the most common
+ # case path
+
+ len_raw = len(raw)
+
+ self._keymap.update(
+ [
+ (metadata_entry[MD_INDEX], metadata_entry)
+ for metadata_entry in raw
+ ]
+ + [
+ (metadata_entry[MD_INDEX] - len_raw, metadata_entry)
+ for metadata_entry in raw
+ ]
+ )
+
+ # processors in key order for certain per-row
+ # views like __iter__ and slices
+ self._processors = [
+ metadata_entry[MD_PROCESSOR] for metadata_entry in raw
+ ]
+
+ # keymap by primary string...
+ by_key = dict(
+ [
+ (metadata_entry[MD_LOOKUP_KEY], metadata_entry)
+ for metadata_entry in raw
+ ]
+ )
+
+ # for compiled SQL constructs, copy additional lookup keys into
+ # the key lookup map, such as Column objects, labels,
+ # column keys and other names
+ if num_ctx_cols:
+
+ # if by-primary-string dictionary smaller (or bigger?!) than
+ # number of columns, assume we have dupes, rewrite
+ # dupe records with "None" for index which results in
+ # ambiguous column exception when accessed.
+ if len(by_key) != num_ctx_cols:
+ # new in 1.4: get the complete set of all possible keys,
+ # strings, objects, whatever, that are dupes across two
+ # different records, first.
+ index_by_key = {}
+ dupes = set()
+ for metadata_entry in raw:
+ for key in (metadata_entry[MD_RENDERED_NAME],) + (
+ metadata_entry[MD_OBJECTS] or ()
+ ):
+ if not self.case_sensitive and isinstance(
+ key, util.string_types
+ ):
+ key = key.lower()
+ idx = metadata_entry[MD_INDEX]
+ # if this key has been associated with more than one
+ # positional index, it's a dupe
+ if index_by_key.setdefault(key, idx) != idx:
+ dupes.add(key)
+
+ # then put everything we have into the keymap excluding only
+ # those keys that are dupes.
+ self._keymap.update(
+ [
+ (obj_elem, metadata_entry)
+ for metadata_entry in raw
+ if metadata_entry[MD_OBJECTS]
+ for obj_elem in metadata_entry[MD_OBJECTS]
+ if obj_elem not in dupes
+ ]
+ )
+
+ # then for the dupe keys, put the "ambiguous column"
+ # record into by_key.
+ by_key.update({key: (None, (), key) for key in dupes})
+
+ else:
+ # no dupes - copy secondary elements from compiled
+ # columns into self._keymap
+ self._keymap.update(
+ [
+ (obj_elem, metadata_entry)
+ for metadata_entry in raw
+ if metadata_entry[MD_OBJECTS]
+ for obj_elem in metadata_entry[MD_OBJECTS]
+ ]
+ )
+
+ # update keymap with primary string names taking
+ # precedence
+ self._keymap.update(by_key)
+
+ # update keymap with "translated" names (sqlite-only thing)
+ if not num_ctx_cols and context._translate_colname:
+ self._keymap.update(
+ [
+ (
+ metadata_entry[MD_UNTRANSLATED],
+ self._keymap[metadata_entry[MD_LOOKUP_KEY]],
+ )
+ for metadata_entry in raw
+ if metadata_entry[MD_UNTRANSLATED]
+ ]
+ )
+
+ def _merge_cursor_description(
+ self,
+ context,
+ cursor_description,
+ result_columns,
+ num_ctx_cols,
+ cols_are_ordered,
+ textual_ordered,
+ loose_column_name_matching,
+ ):
+ """Merge a cursor.description with compiled result column information.
+
+ There are at least four separate strategies used here, selected
+ depending on the type of SQL construct used to start with.
+
+ The most common case is that of the compiled SQL expression construct,
+ which generated the column names present in the raw SQL string and
+ which has the identical number of columns as were reported by
+ cursor.description. In this case, we assume a 1-1 positional mapping
+ between the entries in cursor.description and the compiled object.
+ This is also the most performant case as we disregard extracting /
+ decoding the column names present in cursor.description since we
+ already have the desired name we generated in the compiled SQL
+ construct.
+
+ The next common case is that of the completely raw string SQL,
+ such as passed to connection.execute(). In this case we have no
+ compiled construct to work with, so we extract and decode the
+ names from cursor.description and index those as the primary
+ result row target keys.
+
+ The remaining fairly common case is that of the textual SQL
+ that includes at least partial column information; this is when
+ we use a :class:`_expression.TextualSelect` construct.
+ This construct may have
+ unordered or ordered column information. In the ordered case, we
+ merge the cursor.description and the compiled construct's information
+ positionally, and warn if there are additional description names
+ present, however we still decode the names in cursor.description
+ as we don't have a guarantee that the names in the columns match
+ on these. In the unordered case, we match names in cursor.description
+ to that of the compiled construct based on name matching.
+ In both of these cases, the cursor.description names and the column
+ expression objects and names are indexed as result row target keys.
+
+ The final case is much less common, where we have a compiled
+ non-textual SQL expression construct, but the number of columns
+ in cursor.description doesn't match what's in the compiled
+ construct. We make the guess here that there might be textual
+ column expressions in the compiled construct that themselves include
+ a comma in them causing them to split. We do the same name-matching
+ as with textual non-ordered columns.
+
+ The name-matched system of merging is the same as that used by
+ SQLAlchemy for all cases up through te 0.9 series. Positional
+ matching for compiled SQL expressions was introduced in 1.0 as a
+ major performance feature, and positional matching for textual
+ :class:`_expression.TextualSelect` objects in 1.1.
+ As name matching is no longer
+ a common case, it was acceptable to factor it into smaller generator-
+ oriented methods that are easier to understand, but incur slightly
+ more performance overhead.
+
+ """
+
+ case_sensitive = context.dialect.case_sensitive
+
+ if (
+ num_ctx_cols
+ and cols_are_ordered
+ and not textual_ordered
+ and num_ctx_cols == len(cursor_description)
+ ):
+ self._keys = [elem[0] for elem in result_columns]
+ # pure positional 1-1 case; doesn't need to read
+ # the names from cursor.description
+ return [
+ (
+ idx,
+ rmap_entry[RM_OBJECTS],
+ rmap_entry[RM_NAME].lower()
+ if not case_sensitive
+ else rmap_entry[RM_NAME],
+ rmap_entry[RM_RENDERED_NAME],
+ context.get_result_processor(
+ rmap_entry[RM_TYPE],
+ rmap_entry[RM_RENDERED_NAME],
+ cursor_description[idx][1],
+ ),
+ None,
+ )
+ for idx, rmap_entry in enumerate(result_columns)
+ ]
+ else:
+ # name-based or text-positional cases, where we need
+ # to read cursor.description names
+ if textual_ordered:
+ # textual positional case
+ raw_iterator = self._merge_textual_cols_by_position(
+ context, cursor_description, result_columns
+ )
+ elif num_ctx_cols:
+ # compiled SQL with a mismatch of description cols
+ # vs. compiled cols, or textual w/ unordered columns
+ raw_iterator = self._merge_cols_by_name(
+ context,
+ cursor_description,
+ result_columns,
+ loose_column_name_matching,
+ )
+ else:
+ # no compiled SQL, just a raw string
+ raw_iterator = self._merge_cols_by_none(
+ context, cursor_description
+ )
+
+ return [
+ (
+ idx,
+ obj,
+ cursor_colname,
+ cursor_colname,
+ context.get_result_processor(
+ mapped_type, cursor_colname, coltype
+ ),
+ untranslated,
+ )
+ for (
+ idx,
+ cursor_colname,
+ mapped_type,
+ coltype,
+ obj,
+ untranslated,
+ ) in raw_iterator
+ ]
+
+ def _colnames_from_description(self, context, cursor_description):
+ """Extract column names and data types from a cursor.description.
+
+ Applies unicode decoding, column translation, "normalization",
+ and case sensitivity rules to the names based on the dialect.
+
+ """
+
+ dialect = context.dialect
+ case_sensitive = dialect.case_sensitive
+ translate_colname = context._translate_colname
+ description_decoder = (
+ dialect._description_decoder
+ if dialect.description_encoding
+ else None
+ )
+ normalize_name = (
+ dialect.normalize_name if dialect.requires_name_normalize else None
+ )
+ untranslated = None
+
+ self._keys = []
+
+ for idx, rec in enumerate(cursor_description):
+ colname = rec[0]
+ coltype = rec[1]
+
+ if description_decoder:
+ colname = description_decoder(colname)
+
+ if translate_colname:
+ colname, untranslated = translate_colname(colname)
+
+ if normalize_name:
+ colname = normalize_name(colname)
+
+ self._keys.append(colname)
+ if not case_sensitive:
+ colname = colname.lower()
+
+ yield idx, colname, untranslated, coltype
+
+ def _merge_textual_cols_by_position(
+ self, context, cursor_description, result_columns
+ ):
+ num_ctx_cols = len(result_columns) if result_columns else None
+
+ if num_ctx_cols > len(cursor_description):
+ util.warn(
+ "Number of columns in textual SQL (%d) is "
+ "smaller than number of columns requested (%d)"
+ % (num_ctx_cols, len(cursor_description))
+ )
+ seen = set()
+ for (
+ idx,
+ colname,
+ untranslated,
+ coltype,
+ ) in self._colnames_from_description(context, cursor_description):
+ if idx < num_ctx_cols:
+ ctx_rec = result_columns[idx]
+ obj = ctx_rec[RM_OBJECTS]
+ mapped_type = ctx_rec[RM_TYPE]
+ if obj[0] in seen:
+ raise exc.InvalidRequestError(
+ "Duplicate column expression requested "
+ "in textual SQL: %r" % obj[0]
+ )
+ seen.add(obj[0])
+ else:
+ mapped_type = sqltypes.NULLTYPE
+ obj = None
+ yield idx, colname, mapped_type, coltype, obj, untranslated
+
+ def _merge_cols_by_name(
+ self,
+ context,
+ cursor_description,
+ result_columns,
+ loose_column_name_matching,
+ ):
+ dialect = context.dialect
+ case_sensitive = dialect.case_sensitive
+ match_map = self._create_description_match_map(
+ result_columns, case_sensitive, loose_column_name_matching
+ )
+
+ for (
+ idx,
+ colname,
+ untranslated,
+ coltype,
+ ) in self._colnames_from_description(context, cursor_description):
+ try:
+ ctx_rec = match_map[colname]
+ except KeyError:
+ mapped_type = sqltypes.NULLTYPE
+ obj = None
+ else:
+ obj = ctx_rec[1]
+ mapped_type = ctx_rec[2]
+ yield idx, colname, mapped_type, coltype, obj, untranslated
+
+ @classmethod
+ def _create_description_match_map(
+ cls,
+ result_columns,
+ case_sensitive=True,
+ loose_column_name_matching=False,
+ ):
+ """when matching cursor.description to a set of names that are present
+ in a Compiled object, as is the case with TextualSelect, get all the
+ names we expect might match those in cursor.description.
+ """
+
+ d = {}
+ for elem in result_columns:
+ key = elem[RM_RENDERED_NAME]
+
+ if not case_sensitive:
+ key = key.lower()
+ if key in d:
+ # conflicting keyname - just add the column-linked objects
+ # to the existing record. if there is a duplicate column
+ # name in the cursor description, this will allow all of those
+ # objects to raise an ambiguous column error
+ e_name, e_obj, e_type = d[key]
+ d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type
+ else:
+ d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
+
+ if loose_column_name_matching:
+ # when using a textual statement with an unordered set
+ # of columns that line up, we are expecting the user
+ # to be using label names in the SQL that match to the column
+ # expressions. Enable more liberal matching for this case;
+ # duplicate keys that are ambiguous will be fixed later.
+ for r_key in elem[RM_OBJECTS]:
+ d.setdefault(
+ r_key, (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
+ )
+
+ return d
+
+ def _merge_cols_by_none(self, context, cursor_description):
+ for (
+ idx,
+ colname,
+ untranslated,
+ coltype,
+ ) in self._colnames_from_description(context, cursor_description):
+ yield idx, colname, sqltypes.NULLTYPE, coltype, None, untranslated
+
+ def _key_fallback(self, key, err, raiseerr=True):
+ if raiseerr:
+ util.raise_(
+ exc.NoSuchColumnError(
+ "Could not locate column in row for column '%s'"
+ % util.string_or_unprintable(key)
+ ),
+ replace_context=err,
+ )
+ else:
+ return None
+
+ def _raise_for_ambiguous_column_name(self, rec):
+ raise exc.InvalidRequestError(
+ "Ambiguous column name '%s' in "
+ "result set column descriptions" % rec[MD_LOOKUP_KEY]
+ )
+
+ def _index_for_key(self, key, raiseerr=True):
+ # TODO: can consider pre-loading ints and negative ints
+ # into _keymap - also no coverage here
+ if isinstance(key, int):
+ key = self._keys[key]
+
+ try:
+ rec = self._keymap[key]
+ except KeyError as ke:
+ rec = self._key_fallback(key, ke, raiseerr)
+ if rec is None:
+ return None
+
+ index = rec[0]
+
+ if index is None:
+ self._raise_for_ambiguous_column_name(rec)
+ return index
+
+ def _indexes_for_keys(self, keys):
+ for rec in self._metadata_for_keys(keys):
+ yield rec[0]
+
+ def _metadata_for_keys(self, keys):
+ for key in keys:
+ # TODO: can consider pre-loading ints and negative ints
+ # into _keymap
+ if isinstance(key, int):
+ key = self._keys[key]
+
+ try:
+ rec = self._keymap[key]
+ except KeyError as ke:
+ rec = self._key_fallback(key, ke)
+
+ index = rec[0]
+
+ if index is None:
+ self._raise_for_ambiguous_column_name(rec)
+
+ yield rec
+
+ def __getstate__(self):
+ return {
+ "_keymap": {
+ key: (rec[MD_INDEX], _UNPICKLED, key)
+ for key, rec in self._keymap.items()
+ if isinstance(key, util.string_types + util.int_types)
+ },
+ "_keys": self._keys,
+ "case_sensitive": self.case_sensitive,
+ "_translated_indexes": self._translated_indexes,
+ "_tuplefilter": self._tuplefilter,
+ }
+
+ def __setstate__(self, state):
+ self._processors = [None for _ in range(len(state["_keys"]))]
+ self._keymap = state["_keymap"]
+
+ self._keys = state["_keys"]
+ self.case_sensitive = state["case_sensitive"]
+
+ if state["_translated_indexes"]:
+ self._translated_indexes = state["_translated_indexes"]
+ self._tuplefilter = tuplegetter(*self._translated_indexes)
+ else:
+ self._translated_indexes = self._tuplefilter = None
+
+
+class LegacyCursorResultMetaData(CursorResultMetaData):
+ def _contains(self, value, row):
+ key = value
+ if key in self._keymap:
+ util.warn_deprecated_20(
+ "Using the 'in' operator to test for string or column "
+ "keys, or integer indexes, in a :class:`.Row` object is "
+ "deprecated and will "
+ "be removed in a future release. "
+ "Use the `Row._fields` or `Row._mapping` attribute, i.e. "
+ "'key in row._fields'",
+ )
+ return True
+ else:
+ return self._key_fallback(key, None, False) is not None
+
+ def _key_fallback(self, key, err, raiseerr=True):
+ map_ = self._keymap
+ result = None
+
+ if isinstance(key, util.string_types):
+ result = map_.get(key if self.case_sensitive else key.lower())
+ elif isinstance(key, expression.ColumnElement):
+ if (
+ key._label
+ and (key._label if self.case_sensitive else key._label.lower())
+ in map_
+ ):
+ result = map_[
+ key._label if self.case_sensitive else key._label.lower()
+ ]
+ elif (
+ hasattr(key, "name")
+ and (key.name if self.case_sensitive else key.name.lower())
+ in map_
+ ):
+ # match is only on name.
+ result = map_[
+ key.name if self.case_sensitive else key.name.lower()
+ ]
+
+ # search extra hard to make sure this
+ # isn't a column/label name overlap.
+ # this check isn't currently available if the row
+ # was unpickled.
+ if result is not None and result[MD_OBJECTS] not in (
+ None,
+ _UNPICKLED,
+ ):
+ for obj in result[MD_OBJECTS]:
+ if key._compare_name_for_result(obj):
+ break
+ else:
+ result = None
+ if result is not None:
+ if result[MD_OBJECTS] is _UNPICKLED:
+ util.warn_deprecated(
+ "Retreiving row values using Column objects from a "
+ "row that was unpickled is deprecated; adequate "
+ "state cannot be pickled for this to be efficient. "
+ "This usage will raise KeyError in a future release.",
+ version="1.4",
+ )
+ else:
+ util.warn_deprecated(
+ "Retreiving row values using Column objects with only "
+ "matching names as keys is deprecated, and will raise "
+ "KeyError in a future release; only Column "
+ "objects that are explicitly part of the statement "
+ "object should be used.",
+ version="1.4",
+ )
+ if result is None:
+ if raiseerr:
+ util.raise_(
+ exc.NoSuchColumnError(
+ "Could not locate column in row for column '%s'"
+ % util.string_or_unprintable(key)
+ ),
+ replace_context=err,
+ )
+ else:
+ return None
+ else:
+ map_[key] = result
+ return result
+
+ def _warn_for_nonint(self, key):
+ util.warn_deprecated_20(
+ "Using non-integer/slice indices on Row is deprecated and will "
+ "be removed in version 2.0; please use row._mapping[<key>], or "
+ "the mappings() accessor on the Result object.",
+ stacklevel=4,
+ )
+
+ def _has_key(self, key):
+ if key in self._keymap:
+ return True
+ else:
+ return self._key_fallback(key, None, False) is not None
+
+
+class ResultFetchStrategy(object):
+ """Define a fetching strategy for a result object.
+
+
+ .. versionadded:: 1.4
+
+ """
+
+ __slots__ = ()
+
+ def soft_close(self, result):
+ raise NotImplementedError()
+
+ def hard_close(self, result):
+ raise NotImplementedError()
+
+ def yield_per(self, result, num):
+ return
+
+ def fetchone(self, result):
+ raise NotImplementedError()
+
+ def fetchmany(self, result, size=None):
+ raise NotImplementedError()
+
+ def fetchall(self, result):
+ raise NotImplementedError()
+
+ def handle_exception(self, result, err):
+ raise err
+
+
+class NoCursorFetchStrategy(ResultFetchStrategy):
+ """Cursor strategy for a result that has no open cursor.
+
+ There are two varities of this strategy, one for DQL and one for
+ DML (and also DDL), each of which represent a result that had a cursor
+ but no longer has one.
+
+ """
+
+ __slots__ = ("closed",)
+
+ def __init__(self, closed):
+ self.closed = closed
+ self.cursor_description = None
+
+ def soft_close(self, result):
+ pass
+
+ def hard_close(self, result):
+ self.closed = True
+
+ def fetchone(self, result):
+ return self._non_result(result, None)
+
+ def fetchmany(self, result, size=None):
+ return self._non_result(result, [])
+
+ def fetchall(self, result):
+ return self._non_result(result, [])
+
+ def _non_result(self, result, default, err=None):
+ raise NotImplementedError()
+
+
+class NoCursorDQLFetchStrategy(NoCursorFetchStrategy):
+ """Cursor strategy for a DQL result that has no open cursor.
+
+ This is a result set that can return rows, i.e. for a SELECT, or for an
+ INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
+ where the cursor is closed and no rows remain available. The owning result
+ object may or may not be "hard closed", which determines if the fetch
+ methods send empty results or raise for closed result.
+
+ """
+
+ def _non_result(self, result, default, err=None):
+ if self.closed:
+ util.raise_(
+ exc.ResourceClosedError("This result object is closed."),
+ replace_context=err,
+ )
+ else:
+ return default
+
+
+class NoCursorDMLFetchStrategy(NoCursorFetchStrategy):
+ """Cursor strategy for a DML result that has no open cursor.
+
+ This is a result set that does not return rows, i.e. for an INSERT,
+ UPDATE, DELETE that does not include RETURNING.
+
+ """
+
+ def _non_result(self, result, default, err=None):
+ # we only expect to have a _NoResultMetaData() here right now.
+ assert not result._metadata.returns_rows
+ result._metadata._we_dont_return_rows(err)
+
+
+class CursorFetchStrategy(ResultFetchStrategy):
+ """Call fetch methods from a DBAPI cursor.
+
+ Alternate versions of this class may instead buffer the rows from
+ cursors or not use cursors at all.
+
+ """
+
+ __slots__ = ("dbapi_cursor", "cursor_description")
+
+ def __init__(self, dbapi_cursor, cursor_description):
+ self.dbapi_cursor = dbapi_cursor
+ self.cursor_description = cursor_description
+
+ @classmethod
+ def create(cls, result):
+ dbapi_cursor = result.cursor
+ description = dbapi_cursor.description
+
+ if description is None:
+ return NoCursorDMLFetchStrategy(False)
+ else:
+ return cls(dbapi_cursor, description)
+
+ def soft_close(self, result):
+ result.cursor_strategy = NoCursorDQLFetchStrategy(False)
+
+ def hard_close(self, result):
+ result.cursor_strategy = NoCursorDQLFetchStrategy(True)
+
+ def handle_exception(self, result, err):
+ result.connection._handle_dbapi_exception(
+ err, None, None, self.dbapi_cursor, result.context
+ )
+
+ def yield_per(self, result, num):
+ result.cursor_strategy = BufferedRowCursorFetchStrategy(
+ self.dbapi_cursor,
+ self.cursor_description,
+ num,
+ collections.deque(),
+ growth_factor=0,
+ )
+
+ def fetchone(self, result):
+ try:
+ row = self.dbapi_cursor.fetchone()
+ if row is None:
+ result._soft_close()
+ return row
+ except BaseException as e:
+ self.handle_exception(result, e)
+
+ def fetchmany(self, result, size=None):
+ try:
+ if size is None:
+ l = self.dbapi_cursor.fetchmany()
+ else:
+ l = self.dbapi_cursor.fetchmany(size)
+
+ if not l:
+ result._soft_close()
+ return l
+ except BaseException as e:
+ self.handle_exception(result, e)
+
+ def fetchall(self, result):
+ try:
+ rows = self.dbapi_cursor.fetchall()
+ result._soft_close()
+ return rows
+ except BaseException as e:
+ self.handle_exception(result, e)
+
+
+class BufferedRowCursorFetchStrategy(CursorFetchStrategy):
+ """A cursor fetch strategy with row buffering behavior.
+
+ This strategy buffers the contents of a selection of rows
+ before ``fetchone()`` is called. This is to allow the results of
+ ``cursor.description`` to be available immediately, when
+ interfacing with a DB-API that requires rows to be consumed before
+ this information is available (currently psycopg2, when used with
+ server-side cursors).
+
+ The pre-fetching behavior fetches only one row initially, and then
+ grows its buffer size by a fixed amount with each successive need
+ for additional rows up the ``max_row_buffer`` size, which defaults
+ to 1000::
+
+ with psycopg2_engine.connect() as conn:
+
+ result = conn.execution_options(
+ stream_results=True, max_row_buffer=50
+ ).execute(text("select * from table"))
+
+ .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
+
+ .. seealso::
+
+ :ref:`psycopg2_execution_options`
+ """
+
+ __slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor")
+
+ def __init__(
+ self,
+ dbapi_cursor,
+ description,
+ max_row_buffer,
+ initial_buffer,
+ growth_factor=5,
+ ):
+ super(BufferedRowCursorFetchStrategy, self).__init__(
+ dbapi_cursor, description
+ )
+
+ self._max_row_buffer = max_row_buffer
+ self._growth_factor = growth_factor
+ self._rowbuffer = initial_buffer
+
+ if growth_factor:
+ self._bufsize = min(self._max_row_buffer, self._growth_factor)
+ else:
+ self._bufsize = self._max_row_buffer
+
+ @classmethod
+ def create(cls, result):
+ """Buffered row strategy has to buffer the first rows *before*
+ cursor.description is fetched so that it works with named cursors
+ correctly
+
+ """
+
+ dbapi_cursor = result.cursor
+
+ # TODO: is create() called within a handle_error block externally?
+ # can this be guaranteed / tested / etc
+ initial_buffer = collections.deque(dbapi_cursor.fetchmany(1))
+
+ description = dbapi_cursor.description
+
+ if description is None:
+ return NoCursorDMLFetchStrategy(False)
+ else:
+ max_row_buffer = result.context.execution_options.get(
+ "max_row_buffer", 1000
+ )
+ return cls(
+ dbapi_cursor, description, max_row_buffer, initial_buffer
+ )
+
+ def _buffer_rows(self, result):
+ size = self._bufsize
+ try:
+ if size < 1:
+ new_rows = self.dbapi_cursor.fetchall()
+ else:
+ new_rows = self.dbapi_cursor.fetchmany(size)
+ except BaseException as e:
+ self.handle_exception(result, e)
+
+ if not new_rows:
+ return
+ self._rowbuffer = collections.deque(new_rows)
+ if self._growth_factor and size < self._max_row_buffer:
+ self._bufsize = min(
+ self._max_row_buffer, size * self._growth_factor
+ )
+
+ def yield_per(self, result, num):
+ self._growth_factor = 0
+ self._max_row_buffer = self._bufsize = num
+
+ def soft_close(self, result):
+ self._rowbuffer.clear()
+ super(BufferedRowCursorFetchStrategy, self).soft_close(result)
+
+ def hard_close(self, result):
+ self._rowbuffer.clear()
+ super(BufferedRowCursorFetchStrategy, self).hard_close(result)
+
+ def fetchone(self, result):
+ if not self._rowbuffer:
+ self._buffer_rows(result)
+ if not self._rowbuffer:
+ try:
+ result._soft_close()
+ except BaseException as e:
+ self.handle_exception(result, e)
+ return None
+ return self._rowbuffer.popleft()
+
+ def fetchmany(self, result, size=None):
+ if size is None:
+ return self.fetchall(result)
+
+ buf = list(self._rowbuffer)
+ lb = len(buf)
+ if size > lb:
+ try:
+ buf.extend(self.dbapi_cursor.fetchmany(size - lb))
+ except BaseException as e:
+ self.handle_exception(result, e)
+
+ result = buf[0:size]
+ self._rowbuffer = collections.deque(buf[size:])
+ return result
+
+ def fetchall(self, result):
+ try:
+ ret = list(self._rowbuffer) + list(self.dbapi_cursor.fetchall())
+ self._rowbuffer.clear()
+ result._soft_close()
+ return ret
+ except BaseException as e:
+ self.handle_exception(result, e)
+
+
+class FullyBufferedCursorFetchStrategy(CursorFetchStrategy):
+ """A cursor strategy that buffers rows fully upon creation.
+
+ Used for operations where a result is to be delivered
+ after the database conversation can not be continued,
+ such as MSSQL INSERT...OUTPUT after an autocommit.
+
+ """
+
+ __slots__ = ("_rowbuffer",)
+
+ def __init__(self, dbapi_cursor, description, initial_buffer=None):
+ super(FullyBufferedCursorFetchStrategy, self).__init__(
+ dbapi_cursor, description
+ )
+ if initial_buffer is not None:
+ self._rowbuffer = collections.deque(initial_buffer)
+ else:
+ self._rowbuffer = collections.deque(self.dbapi_cursor.fetchall())
+
+ @classmethod
+ def create_from_buffer(cls, dbapi_cursor, description, buffer):
+ return cls(dbapi_cursor, description, buffer)
+
+ def yield_per(self, result, num):
+ pass
+
+ def soft_close(self, result):
+ self._rowbuffer.clear()
+ super(FullyBufferedCursorFetchStrategy, self).soft_close(result)
+
+ def hard_close(self, result):
+ self._rowbuffer.clear()
+ super(FullyBufferedCursorFetchStrategy, self).hard_close(result)
+
+ def fetchone(self, result):
+ if self._rowbuffer:
+ return self._rowbuffer.popleft()
+ else:
+ result._soft_close()
+ return None
+
+ def fetchmany(self, result, size=None):
+ if size is None:
+ return self.fetchall(result)
+
+ buf = list(self._rowbuffer)
+ rows = buf[0:size]
+ self._rowbuffer = collections.deque(buf[size:])
+ if not rows:
+ result._soft_close()
+ return rows
+
+ def fetchall(self, result):
+ ret = self._rowbuffer
+ self._rowbuffer = collections.deque()
+ result._soft_close()
+ return ret
+
+
+class _NoResultMetaData(ResultMetaData):
+ __slots__ = ()
+
+ returns_rows = False
+
+ def _we_dont_return_rows(self, err=None):
+ util.raise_(
+ exc.ResourceClosedError(
+ "This result object does not return rows. "
+ "It has been closed automatically."
+ ),
+ replace_context=err,
+ )
+
+ def _index_for_key(self, keys, raiseerr):
+ self._we_dont_return_rows()
+
+ def _metadata_for_keys(self, key):
+ self._we_dont_return_rows()
+
+ def _reduce(self, keys):
+ self._we_dont_return_rows()
+
+ @property
+ def _keymap(self):
+ self._we_dont_return_rows()
+
+ @property
+ def keys(self):
+ self._we_dont_return_rows()
+
+
+_no_result_metadata = _NoResultMetaData()
+
+
+class BaseCursorResult(object):
+ """Base class for database result objects.
+
+ """
+
+ out_parameters = None
+ _metadata = None
+ _soft_closed = False
+ closed = False
+
+ @classmethod
+ def _create_for_context(cls, context):
+ if context._is_future_result:
+ obj = object.__new__(CursorResult)
+ else:
+ obj = object.__new__(LegacyCursorResult)
+ obj.__init__(context)
+ return obj
+
+ def __init__(self, context):
+ self.context = context
+ self.dialect = context.dialect
+ self.cursor = context.cursor
+ self.connection = context.root_connection
+ self._echo = (
+ self.connection._echo and context.engine._should_log_debug()
+ )
+ self._init_metadata()
+
+ def _init_metadata(self):
+ self.cursor_strategy = strat = self.context.get_result_cursor_strategy(
+ self
+ )
+
+ if strat.cursor_description is not None:
+ if self.context.compiled:
+ if self.context.compiled._cached_metadata:
+ cached_md = self.context.compiled._cached_metadata
+ self._metadata = cached_md._adapt_to_context(self.context)
+
+ else:
+ self._metadata = (
+ self.context.compiled._cached_metadata
+ ) = self._cursor_metadata(self, strat.cursor_description)
+ else:
+ self._metadata = self._cursor_metadata(
+ self, strat.cursor_description
+ )
+ if self._echo:
+ self.context.engine.logger.debug(
+ "Col %r", tuple(x[0] for x in strat.cursor_description)
+ )
+ else:
+ self._metadata = _no_result_metadata
+ # leave cursor open so that execution context can continue
+ # setting up things like rowcount
+
+ def _soft_close(self, hard=False):
+ """Soft close this :class:`_engine.CursorResult`.
+
+ This releases all DBAPI cursor resources, but leaves the
+ CursorResult "open" from a semantic perspective, meaning the
+ fetchXXX() methods will continue to return empty results.
+
+ This method is called automatically when:
+
+ * all result rows are exhausted using the fetchXXX() methods.
+ * cursor.description is None.
+
+ This method is **not public**, but is documented in order to clarify
+ the "autoclose" process used.
+
+ .. versionadded:: 1.0.0
+
+ .. seealso::
+
+ :meth:`_engine.CursorResult.close`
+
+
+ """
+
+ if (not hard and self._soft_closed) or (hard and self.closed):
+ return
+
+ if hard:
+ self.closed = True
+ self.cursor_strategy.hard_close(self)
+ else:
+ self.cursor_strategy.soft_close(self)
+
+ if not self._soft_closed:
+ cursor = self.cursor
+ self.cursor = None
+ self.connection._safe_close_cursor(cursor)
+ self._soft_closed = True
+
+ @util.memoized_property
+ def inserted_primary_key(self):
+ """Return the primary key for the row just inserted.
+
+ The return value is a list of scalar values
+ corresponding to the list of primary key columns
+ in the target table.
+
+ This only applies to single row :func:`_expression.insert`
+ constructs which did not explicitly specify
+ :meth:`_expression.Insert.returning`.
+
+ Note that primary key columns which specify a
+ server_default clause,
+ or otherwise do not qualify as "autoincrement"
+ columns (see the notes at :class:`_schema.Column`), and were
+ generated using the database-side default, will
+ appear in this list as ``None`` unless the backend
+ supports "returning" and the insert statement executed
+ with the "implicit returning" enabled.
+
+ Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
+ statement is not a compiled expression construct
+ or is not an insert() construct.
+
+ """
+
+ if not self.context.compiled:
+ raise exc.InvalidRequestError(
+ "Statement is not a compiled " "expression construct."
+ )
+ elif not self.context.isinsert:
+ raise exc.InvalidRequestError(
+ "Statement is not an insert() " "expression construct."
+ )
+ elif self.context._is_explicit_returning:
+ raise exc.InvalidRequestError(
+ "Can't call inserted_primary_key "
+ "when returning() "
+ "is used."
+ )
+
+ return self.context.inserted_primary_key
+
+ def last_updated_params(self):
+ """Return the collection of updated parameters from this
+ execution.
+
+ Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
+ statement is not a compiled expression construct
+ or is not an update() construct.
+
+ """
+ if not self.context.compiled:
+ raise exc.InvalidRequestError(
+ "Statement is not a compiled " "expression construct."
+ )
+ elif not self.context.isupdate:
+ raise exc.InvalidRequestError(
+ "Statement is not an update() " "expression construct."
+ )
+ elif self.context.executemany:
+ return self.context.compiled_parameters
+ else:
+ return self.context.compiled_parameters[0]
+
+ def last_inserted_params(self):
+ """Return the collection of inserted parameters from this
+ execution.
+
+ Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
+ statement is not a compiled expression construct
+ or is not an insert() construct.
+
+ """
+ if not self.context.compiled:
+ raise exc.InvalidRequestError(
+ "Statement is not a compiled " "expression construct."
+ )
+ elif not self.context.isinsert:
+ raise exc.InvalidRequestError(
+ "Statement is not an insert() " "expression construct."
+ )
+ elif self.context.executemany:
+ return self.context.compiled_parameters
+ else:
+ return self.context.compiled_parameters[0]
+
+ @property
+ def returned_defaults(self):
+ """Return the values of default columns that were fetched using
+ the :meth:`.ValuesBase.return_defaults` feature.
+
+ The value is an instance of :class:`.Row`, or ``None``
+ if :meth:`.ValuesBase.return_defaults` was not used or if the
+ backend does not support RETURNING.
+
+ .. versionadded:: 0.9.0
+
+ .. seealso::
+
+ :meth:`.ValuesBase.return_defaults`
+
+ """
+ return self.context.returned_defaults
+
+ def lastrow_has_defaults(self):
+ """Return ``lastrow_has_defaults()`` from the underlying
+ :class:`.ExecutionContext`.
+
+ See :class:`.ExecutionContext` for details.
+
+ """
+
+ return self.context.lastrow_has_defaults()
+
+ def postfetch_cols(self):
+ """Return ``postfetch_cols()`` from the underlying
+ :class:`.ExecutionContext`.
+
+ See :class:`.ExecutionContext` for details.
+
+ Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
+ statement is not a compiled expression construct
+ or is not an insert() or update() construct.
+
+ """
+
+ if not self.context.compiled:
+ raise exc.InvalidRequestError(
+ "Statement is not a compiled " "expression construct."
+ )
+ elif not self.context.isinsert and not self.context.isupdate:
+ raise exc.InvalidRequestError(
+ "Statement is not an insert() or update() "
+ "expression construct."
+ )
+ return self.context.postfetch_cols
+
+ def prefetch_cols(self):
+ """Return ``prefetch_cols()`` from the underlying
+ :class:`.ExecutionContext`.
+
+ See :class:`.ExecutionContext` for details.
+
+ Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
+ statement is not a compiled expression construct
+ or is not an insert() or update() construct.
+
+ """
+
+ if not self.context.compiled:
+ raise exc.InvalidRequestError(
+ "Statement is not a compiled " "expression construct."
+ )
+ elif not self.context.isinsert and not self.context.isupdate:
+ raise exc.InvalidRequestError(
+ "Statement is not an insert() or update() "
+ "expression construct."
+ )
+ return self.context.prefetch_cols
+
+ def supports_sane_rowcount(self):
+ """Return ``supports_sane_rowcount`` from the dialect.
+
+ See :attr:`_engine.CursorResult.rowcount` for background.
+
+ """
+
+ return self.dialect.supports_sane_rowcount
+
+ def supports_sane_multi_rowcount(self):
+ """Return ``supports_sane_multi_rowcount`` from the dialect.
+
+ See :attr:`_engine.CursorResult.rowcount` for background.
+
+ """
+
+ return self.dialect.supports_sane_multi_rowcount
+
+ @util.memoized_property
+ def rowcount(self):
+ """Return the 'rowcount' for this result.
+
+ The 'rowcount' reports the number of rows *matched*
+ by the WHERE criterion of an UPDATE or DELETE statement.
+
+ .. note::
+
+ Notes regarding :attr:`_engine.CursorResult.rowcount`:
+
+
+ * This attribute returns the number of rows *matched*,
+ which is not necessarily the same as the number of rows
+ that were actually *modified* - an UPDATE statement, for example,
+ may have no net change on a given row if the SET values
+ given are the same as those present in the row already.
+ Such a row would be matched but not modified.
+ On backends that feature both styles, such as MySQL,
+ rowcount is configured by default to return the match
+ count in all cases.
+
+ * :attr:`_engine.CursorResult.rowcount`
+ is *only* useful in conjunction
+ with an UPDATE or DELETE statement. Contrary to what the Python
+ DBAPI says, it does *not* return the
+ number of rows available from the results of a SELECT statement
+ as DBAPIs cannot support this functionality when rows are
+ unbuffered.
+
+ * :attr:`_engine.CursorResult.rowcount`
+ may not be fully implemented by
+ all dialects. In particular, most DBAPIs do not support an
+ aggregate rowcount result from an executemany call.
+ The :meth:`_engine.CursorResult.supports_sane_rowcount` and
+ :meth:`_engine.CursorResult.supports_sane_multi_rowcount` methods
+ will report from the dialect if each usage is known to be
+ supported.
+
+ * Statements that use RETURNING may not return a correct
+ rowcount.
+
+ """
+ try:
+ return self.context.rowcount
+ except BaseException as e:
+ self.cursor_strategy.handle_exception(self, e)
+
+ @property
+ def lastrowid(self):
+ """return the 'lastrowid' accessor on the DBAPI cursor.
+
+ This is a DBAPI specific method and is only functional
+ for those backends which support it, for statements
+ where it is appropriate. It's behavior is not
+ consistent across backends.
+
+ Usage of this method is normally unnecessary when
+ using insert() expression constructs; the
+ :attr:`~CursorResult.inserted_primary_key` attribute provides a
+ tuple of primary key values for a newly inserted row,
+ regardless of database backend.
+
+ """
+ try:
+ return self.context.get_lastrowid()
+ except BaseException as e:
+ self.cursor_strategy.handle_exception(self, e)
+
+ @property
+ def returns_rows(self):
+ """True if this :class:`_engine.CursorResult` returns zero or more rows.
+
+ I.e. if it is legal to call the methods
+ :meth:`_engine.CursorResult.fetchone`,
+ :meth:`_engine.CursorResult.fetchmany`
+ :meth:`_engine.CursorResult.fetchall`.
+
+ Overall, the value of :attr:`_engine.CursorResult.returns_rows` should
+ always be synonymous with whether or not the DBAPI cursor had a
+ ``.description`` attribute, indicating the presence of result columns,
+ noting that a cursor that returns zero rows still has a
+ ``.description`` if a row-returning statement was emitted.
+
+ This attribute should be True for all results that are against
+ SELECT statements, as well as for DML statements INSERT/UPDATE/DELETE
+ that use RETURNING. For INSERT/UPDATE/DELETE statements that were
+ not using RETURNING, the value will usually be False, however
+ there are some dialect-specific exceptions to this, such as when
+ using the MSSQL / pyodbc dialect a SELECT is emitted inline in
+ order to retrieve an inserted primary key value.
+
+
+ """
+ return self._metadata.returns_rows
+
+ @property
+ def is_insert(self):
+ """True if this :class:`_engine.CursorResult` is the result
+ of a executing an expression language compiled
+ :func:`_expression.insert` construct.
+
+ When True, this implies that the
+ :attr:`inserted_primary_key` attribute is accessible,
+ assuming the statement did not include
+ a user defined "returning" construct.
+
+ """
+ return self.context.isinsert
+
+
+class CursorResult(BaseCursorResult, Result):
+ """A Result that is representing state from a DBAPI cursor.
+
+ .. versionchanged:: 1.4 The :class:`.CursorResult` and
+ :class:`.LegacyCursorResult`
+ classes replace the previous :class:`.ResultProxy` interface.
+ These classes are based on the :class:`.Result` calling API
+ which provides an updated usage model and calling facade for
+ SQLAlchemy Core and SQLAlchemy ORM.
+
+ Returns database rows via the :class:`.Row` class, which provides
+ additional API features and behaviors on top of the raw data returned by
+ the DBAPI. Through the use of filters such as the :meth:`.Result.scalars`
+ method, other kinds of objects may also be returned.
+
+ Within the scope of the 1.x series of SQLAlchemy, Core SQL results in
+ version 1.4 return an instance of :class:`._engine.LegacyCursorResult`
+ which takes the place of the ``CursorResult`` class used for the 1.3 series
+ and previously. This object returns rows as :class:`.LegacyRow` objects,
+ which maintains Python mapping (i.e. dictionary) like behaviors upon the
+ object itself. Going forward, the :attr:`.Row._mapping` attribute should
+ be used for dictionary behaviors.
+
+ .. seealso::
+
+ :ref:`coretutorial_selecting` - introductory material for accessing
+ :class:`_engine.CursorResult` and :class:`.Row` objects.
+
+ """
+
+ _cursor_metadata = CursorResultMetaData
+ _cursor_strategy_cls = CursorFetchStrategy
+
+ @HasMemoized.memoized_attribute
+ def _row_logging_fn(self):
+ if self._echo:
+ log = self.context.engine.logger.debug
+
+ def log_row(row):
+ log("Row %r", sql_util._repr_row(row))
+ return row
+
+ return log_row
+ else:
+ return None
+
+ def _fetchiter_impl(self):
+ fetchone = self.cursor_strategy.fetchone
+
+ while True:
+ row = fetchone(self)
+ if row is None:
+ break
+ yield row
+
+ def _fetchone_impl(self):
+ return self.cursor_strategy.fetchone(self)
+
+ def _fetchall_impl(self):
+ return self.cursor_strategy.fetchall(self)
+
+ def _fetchmany_impl(self, size=None):
+ return self.cursor_strategy.fetchmany(self, size)
+
+ def _soft_close(self, **kw):
+ BaseCursorResult._soft_close(self, **kw)
+
+ def _raw_row_iterator(self):
+ return self._fetchiter_impl()
+
+ def close(self):
+ """Close this :class:`_engine.CursorResult`.
+
+ This closes out the underlying DBAPI cursor corresponding to the
+ statement execution, if one is still present. Note that the DBAPI
+ cursor is automatically released when the :class:`_engine.CursorResult`
+ exhausts all available rows. :meth:`_engine.CursorResult.close` is
+ generally an optional method except in the case when discarding a
+ :class:`_engine.CursorResult` that still has additional rows pending
+ for fetch.
+
+ After this method is called, it is no longer valid to call upon
+ the fetch methods, which will raise a :class:`.ResourceClosedError`
+ on subsequent use.
+
+ .. seealso::
+
+ :ref:`connections_toplevel`
+
+ """
+ self._soft_close(hard=True)
+
+ @_generative
+ def yield_per(self, num):
+ self._yield_per = num
+ self.cursor_strategy.yield_per(self, num)
+
+
+class LegacyCursorResult(CursorResult):
+ """Legacy version of :class:`.CursorResult`.
+
+ This class includes connection "connection autoclose" behavior for use with
+ "connectionless" execution, as well as delivers rows using the
+ :class:`.LegacyRow` row implementation.
+
+ .. versionadded:: 1.4
+
+ """
+
+ _autoclose_connection = False
+ _process_row = LegacyRow
+ _cursor_metadata = LegacyCursorResultMetaData
+ _cursor_strategy_cls = CursorFetchStrategy
+
+ def close(self):
+ """Close this :class:`_engine.LegacyCursorResult`.
+
+ This method has the same behavior as that of
+ :meth:`._engine.CursorResult`, but it also may close
+ the underlying :class:`.Connection` for the case of "connectionless"
+ execution.
+
+ .. deprecated:: 2.0 "connectionless" execution is deprecated and will
+ be removed in version 2.0. Version 2.0 will feature the
+ :class:`_future.Result`
+ object that will no longer affect the status
+ of the originating connection in any case.
+
+ After this method is called, it is no longer valid to call upon
+ the fetch methods, which will raise a :class:`.ResourceClosedError`
+ on subsequent use.
+
+ .. seealso::
+
+ :ref:`connections_toplevel`
+
+ :ref:`dbengine_implicit`
+ """
+ self._soft_close(hard=True)
+
+ def _soft_close(self, hard=False):
+ soft_closed = self._soft_closed
+ super(LegacyCursorResult, self)._soft_close(hard=hard)
+ if (
+ not soft_closed
+ and self._soft_closed
+ and self._autoclose_connection
+ ):
+ self.connection.close()
+
+
+ResultProxy = LegacyCursorResult
+
+
+class BufferedRowResultProxy(ResultProxy):
+ """A ResultProxy with row buffering behavior.
+
+ .. deprecated:: 1.4 this class is now supplied using a strategy object.
+ See :class:`.BufferedRowCursorFetchStrategy`.
+
+ """
+
+ _cursor_strategy_cls = BufferedRowCursorFetchStrategy
+
+
+class FullyBufferedResultProxy(ResultProxy):
+ """A result proxy that buffers rows fully upon creation.
+
+ .. deprecated:: 1.4 this class is now supplied using a strategy object.
+ See :class:`.FullyBufferedCursorFetchStrategy`.
+
+ """
+
+ _cursor_strategy_cls = FullyBufferedCursorFetchStrategy
+
+
+class BufferedColumnRow(LegacyRow):
+ """Row is now BufferedColumn in all cases"""
+
+
+class BufferedColumnResultProxy(ResultProxy):
+ """A ResultProxy with column buffering behavior.
+
+ .. versionchanged:: 1.4 This is now the default behavior of the Row
+ and this class does not change behavior in any way.
+
+ """
+
+ _process_row = BufferedColumnRow
diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py
index 865a1160b..20f731116 100644
--- a/lib/sqlalchemy/engine/default.py
+++ b/lib/sqlalchemy/engine/default.py
@@ -18,8 +18,8 @@ import random
import re
import weakref
+from . import cursor as _cursor
from . import interfaces
-from . import result as _result
from .. import event
from .. import exc
from .. import pool
@@ -1217,9 +1217,9 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
def get_result_cursor_strategy(self, result):
if self._is_server_side:
- strat_cls = _result.BufferedRowCursorFetchStrategy
+ strat_cls = _cursor.BufferedRowCursorFetchStrategy
else:
- strat_cls = _result.DefaultCursorFetchStrategy
+ strat_cls = _cursor.CursorFetchStrategy
return strat_cls.create(result)
@@ -1237,7 +1237,7 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
if self.is_crud or self.is_text:
result = self._setup_crud_result_proxy()
else:
- result = _result.ResultProxy._create_for_context(self)
+ result = _cursor.CursorResult._create_for_context(self)
if (
self.compiled
@@ -1289,25 +1289,39 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
elif not self._is_implicit_returning:
self._setup_ins_pk_from_empty()
- result = _result.ResultProxy._create_for_context(self)
+ result = _cursor.CursorResult._create_for_context(self)
if self.isinsert:
if self._is_implicit_returning:
- row = result._onerow()
+ row = result.fetchone()
self.returned_defaults = row
self._setup_ins_pk_from_implicit_returning(row)
+
+ # test that it has a cursor metadata that is accurate.
+ # the first row will have been fetched and current assumptions
+ # are that the result has only one row, until executemany()
+ # support is added here.
+ assert result.returns_rows
result._soft_close()
- result._metadata = None
elif not self._is_explicit_returning:
result._soft_close()
- result._metadata = None
+
+ # we assume here the result does not return any rows.
+ # *usually*, this will be true. However, some dialects
+ # such as that of MSSQL/pyodbc need to SELECT a post fetch
+ # function so this is not necessarily true.
+ # assert not result.returns_rows
+
elif self.isupdate and self._is_implicit_returning:
- row = result._onerow()
+ row = result.fetchone()
self.returned_defaults = row
result._soft_close()
- result._metadata = None
- elif result._metadata is None:
+ # test that it has a cursor metadata that is accurate.
+ # the rows have all been fetched however.
+ assert result.returns_rows
+
+ elif not result.returns_rows:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc)
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py
index bc3cdbb9a..a3a9cc489 100644
--- a/lib/sqlalchemy/engine/result.py
+++ b/lib/sqlalchemy/engine/result.py
@@ -5,1753 +5,997 @@
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""Define result set constructs including :class:`.Result`"""
+"""Define generic result set constructs."""
-import collections
import functools
+import itertools
import operator
from .row import _baserow_usecext
-from .row import BaseRow # noqa
-from .row import LegacyRow # noqa
-from .row import Row # noqa
-from .row import RowMapping # noqa
-from .row import RowProxy # noqa
-from .row import rowproxy_reconstructor # noqa
+from .row import Row
from .. import exc
from .. import util
-from ..sql import expression
-from ..sql import sqltypes
-from ..sql import util as sql_util
-from ..sql.compiler import RM_NAME
-from ..sql.compiler import RM_OBJECTS
-from ..sql.compiler import RM_RENDERED_NAME
-from ..sql.compiler import RM_TYPE
+from ..sql.base import _generative
+from ..sql.base import HasMemoized
+from ..sql.base import InPlaceGenerative
+from ..util import collections_abc
if _baserow_usecext:
- from sqlalchemy.cresultproxy import tuplegetter as _tuplegetter
+ from sqlalchemy.cresultproxy import tuplegetter
-_UNPICKLED = util.symbol("unpickled")
+ _row_as_tuple = tuplegetter
+else:
-# cyclical import for sqlalchemy.future
-_future_Result = None
+ def tuplegetter(*indexes):
+ it = operator.itemgetter(*indexes)
-# metadata entry tuple indexes.
-# using raw tuple is faster than namedtuple.
-MD_INDEX = 0 # integer index in cursor.description
-MD_OBJECTS = 1 # other string keys and ColumnElement obj that can match
-MD_LOOKUP_KEY = 2 # string key we usually expect for key-based lookup
-MD_RENDERED_NAME = 3 # name that is usually in cursor.description
-MD_PROCESSOR = 4 # callable to process a result value into a row
-MD_UNTRANSLATED = 5 # raw name from cursor.description
+ if len(indexes) > 1:
+ return it
+ else:
+ return lambda row: (it(row),)
+
+ def _row_as_tuple(*indexes):
+ getters = [
+ operator.methodcaller("_get_by_key_impl_mapping", index)
+ for index in indexes
+ ]
+ return lambda rec: tuple(getter(rec) for getter in getters)
class ResultMetaData(object):
+ """Base for metadata about result rows."""
+
__slots__ = ()
- def _has_key(self, key):
- return key in self._keymap
+ _tuplefilter = None
+ _translated_indexes = None
+ _unique_filters = None
- def _key_fallback(self, key, err):
+ @property
+ def keys(self):
+ return RMKeyView(self)
+
+ def _for_freeze(self):
+ raise NotImplementedError()
+
+ def _key_fallback(self, key, err, raiseerr=True):
+ assert raiseerr
if isinstance(key, int):
util.raise_(IndexError(key), replace_context=err)
else:
util.raise_(KeyError(key), replace_context=err)
+ def _warn_for_nonint(self, key):
+ raise TypeError(
+ "TypeError: tuple indices must be integers or slices, not %s"
+ % type(key).__name__
+ )
-class SimpleResultMetaData(ResultMetaData):
- __slots__ = "keys", "_keymap", "_processors"
-
- def __init__(self, keys, extra=None):
- self.keys = list(keys)
-
- len_keys = len(keys)
-
- self._keymap = {
- name: (index, name) for index, name in enumerate(self.keys)
- }
- if not _baserow_usecext:
- self._keymap.update(
- {
- index: (index, None, self.keys[index])
- for index in range(len_keys)
- }
- )
- # TODO: negative indexes? test coverage?
- if extra:
- for key, ex in zip(keys, extra):
- rec = self._keymap[key]
- self._keymap.update({e: rec for e in ex})
- self._processors = [None] * len(keys)
-
- def __getstate__(self):
- return {"keys": self.keys}
+ def _index_for_key(self, keys, raiseerr):
+ raise NotImplementedError()
- def __setstate__(self, state):
- self.__init__(state["keys"])
+ def _metadata_for_keys(self, key):
+ raise NotImplementedError()
- def _has_key(self, key):
- return key in self._keymap
+ def _reduce(self, keys):
+ raise NotImplementedError()
- def _contains(self, value, row):
- return value in row._data
+ def _getter(self, key, raiseerr=True):
+ index = self._index_for_key(key, raiseerr)
-def result_tuple(fields, extra=None):
- parent = SimpleResultMetaData(fields, extra)
- return functools.partial(Row, parent, parent._processors, parent._keymap)
+ if index is not None:
+ return operator.methodcaller("_get_by_key_impl_mapping", index)
+ else:
+ return None
+ def _row_as_tuple_getter(self, keys):
+ indexes = list(self._indexes_for_keys(keys))
+ return _row_as_tuple(*indexes)
-class CursorResultMetaData(ResultMetaData):
- """Handle cursor.description, applying additional info from an execution
- context."""
- __slots__ = (
- "_keymap",
- "case_sensitive",
- "matched_on_name",
- "_processors",
- "keys",
- )
+class RMKeyView(collections_abc.KeysView):
+ __slots__ = ("_parent", "_keys")
- def _adapt_to_context(self, context):
- """When using a cached result metadata against a new context,
- we need to rewrite the _keymap so that it has the specific
- Column objects in the new context inside of it. this accommodates
- for select() constructs that contain anonymized columns and
- are cached.
+ def __init__(self, parent):
+ self._parent = parent
+ self._keys = [k for k in parent._keys if k is not None]
- """
- if not context.compiled._result_columns:
- return self
-
- compiled_statement = context.compiled.statement
- invoked_statement = context.invoked_statement
-
- # same statement was invoked as the one we cached against,
- # return self
- if compiled_statement is invoked_statement:
- return self
-
- # make a copy and add the columns from the invoked statement
- # to the result map.
- md = self.__class__.__new__(self.__class__)
-
- md._keymap = self._keymap.copy()
-
- # match up new columns positionally to the result columns
- for existing, new in zip(
- context.compiled._result_columns,
- invoked_statement._exported_columns_iterator(),
- ):
- md._keymap[new] = md._keymap[existing[RM_NAME]]
-
- md.case_sensitive = self.case_sensitive
- md.matched_on_name = self.matched_on_name
- md._processors = self._processors
- md.keys = self.keys
- return md
-
- def __init__(self, parent, cursor_description):
- context = parent.context
- dialect = context.dialect
- self.case_sensitive = dialect.case_sensitive
- self.matched_on_name = False
-
- if context.result_column_struct:
- (
- result_columns,
- cols_are_ordered,
- textual_ordered,
- loose_column_name_matching,
- ) = context.result_column_struct
- num_ctx_cols = len(result_columns)
- else:
- result_columns = (
- cols_are_ordered
- ) = (
- num_ctx_cols
- ) = loose_column_name_matching = textual_ordered = False
-
- # merge cursor.description with the column info
- # present in the compiled structure, if any
- raw = self._merge_cursor_description(
- context,
- cursor_description,
- result_columns,
- num_ctx_cols,
- cols_are_ordered,
- textual_ordered,
- loose_column_name_matching,
- )
+ def __len__(self):
+ return len(self._keys)
- self._keymap = {}
- if not _baserow_usecext:
- # keymap indexes by integer index: this is only used
- # in the pure Python BaseRow.__getitem__
- # implementation to avoid an expensive
- # isinstance(key, util.int_types) in the most common
- # case path
+ def __repr__(self):
+ return "{0.__class__.__name__}({0._keys!r})".format(self)
- len_raw = len(raw)
+ def __iter__(self):
+ return iter(self._keys)
- self._keymap.update(
- [
- (metadata_entry[MD_INDEX], metadata_entry)
- for metadata_entry in raw
- ]
- + [
- (metadata_entry[MD_INDEX] - len_raw, metadata_entry)
- for metadata_entry in raw
- ]
- )
+ def __contains__(self, item):
+ if not _baserow_usecext and isinstance(item, int):
+ return False
- # processors in key order for certain per-row
- # views like __iter__ and slices
- self._processors = [
- metadata_entry[MD_PROCESSOR] for metadata_entry in raw
- ]
+ # note this also includes special key fallback behaviors
+ # which also don't seem to be tested in test_resultset right now
+ return self._parent._has_key(item)
- # keymap by primary string...
- by_key = dict(
- [
- (metadata_entry[MD_LOOKUP_KEY], metadata_entry)
- for metadata_entry in raw
- ]
- )
+ def __eq__(self, other):
+ return list(other) == list(self)
- # for compiled SQL constructs, copy additional lookup keys into
- # the key lookup map, such as Column objects, labels,
- # column keys and other names
- if num_ctx_cols:
-
- # if by-primary-string dictionary smaller (or bigger?!) than
- # number of columns, assume we have dupes, rewrite
- # dupe records with "None" for index which results in
- # ambiguous column exception when accessed.
- if len(by_key) != num_ctx_cols:
- # new in 1.4: get the complete set of all possible keys,
- # strings, objects, whatever, that are dupes across two
- # different records, first.
- index_by_key = {}
- dupes = set()
- for metadata_entry in raw:
- for key in (metadata_entry[MD_RENDERED_NAME],) + (
- metadata_entry[MD_OBJECTS] or ()
- ):
- if not self.case_sensitive and isinstance(
- key, util.string_types
- ):
- key = key.lower()
- idx = metadata_entry[MD_INDEX]
- # if this key has been associated with more than one
- # positional index, it's a dupe
- if index_by_key.setdefault(key, idx) != idx:
- dupes.add(key)
-
- # then put everything we have into the keymap excluding only
- # those keys that are dupes.
- self._keymap.update(
- [
- (obj_elem, metadata_entry)
- for metadata_entry in raw
- if metadata_entry[MD_OBJECTS]
- for obj_elem in metadata_entry[MD_OBJECTS]
- if obj_elem not in dupes
- ]
- )
+ def __ne__(self, other):
+ return list(other) != list(self)
- # then for the dupe keys, put the "ambiguous column"
- # record into by_key.
- by_key.update({key: (None, (), key) for key in dupes})
- else:
- # no dupes - copy secondary elements from compiled
- # columns into self._keymap
- self._keymap.update(
- [
- (obj_elem, metadata_entry)
- for metadata_entry in raw
- if metadata_entry[MD_OBJECTS]
- for obj_elem in metadata_entry[MD_OBJECTS]
- ]
- )
+class SimpleResultMetaData(ResultMetaData):
+ """result metadata for in-memory collections."""
- # update keymap with primary string names taking
- # precedence
- self._keymap.update(by_key)
-
- # update keymap with "translated" names (sqlite-only thing)
- if not num_ctx_cols and context._translate_colname:
- self._keymap.update(
- [
- (
- metadata_entry[MD_UNTRANSLATED],
- self._keymap[metadata_entry[MD_LOOKUP_KEY]],
- )
- for metadata_entry in raw
- if metadata_entry[MD_UNTRANSLATED]
- ]
- )
+ __slots__ = (
+ "_keys",
+ "_keymap",
+ "_processors",
+ "_tuplefilter",
+ "_translated_indexes",
+ "_unique_filters",
+ )
- def _merge_cursor_description(
+ def __init__(
self,
- context,
- cursor_description,
- result_columns,
- num_ctx_cols,
- cols_are_ordered,
- textual_ordered,
- loose_column_name_matching,
+ keys,
+ extra=None,
+ _processors=None,
+ _tuplefilter=None,
+ _translated_indexes=None,
+ _unique_filters=None,
):
- """Merge a cursor.description with compiled result column information.
-
- There are at least four separate strategies used here, selected
- depending on the type of SQL construct used to start with.
-
- The most common case is that of the compiled SQL expression construct,
- which generated the column names present in the raw SQL string and
- which has the identical number of columns as were reported by
- cursor.description. In this case, we assume a 1-1 positional mapping
- between the entries in cursor.description and the compiled object.
- This is also the most performant case as we disregard extracting /
- decoding the column names present in cursor.description since we
- already have the desired name we generated in the compiled SQL
- construct.
-
- The next common case is that of the completely raw string SQL,
- such as passed to connection.execute(). In this case we have no
- compiled construct to work with, so we extract and decode the
- names from cursor.description and index those as the primary
- result row target keys.
-
- The remaining fairly common case is that of the textual SQL
- that includes at least partial column information; this is when
- we use a :class:`_expression.TextualSelect` construct.
- This construct may have
- unordered or ordered column information. In the ordered case, we
- merge the cursor.description and the compiled construct's information
- positionally, and warn if there are additional description names
- present, however we still decode the names in cursor.description
- as we don't have a guarantee that the names in the columns match
- on these. In the unordered case, we match names in cursor.description
- to that of the compiled construct based on name matching.
- In both of these cases, the cursor.description names and the column
- expression objects and names are indexed as result row target keys.
-
- The final case is much less common, where we have a compiled
- non-textual SQL expression construct, but the number of columns
- in cursor.description doesn't match what's in the compiled
- construct. We make the guess here that there might be textual
- column expressions in the compiled construct that themselves include
- a comma in them causing them to split. We do the same name-matching
- as with textual non-ordered columns.
-
- The name-matched system of merging is the same as that used by
- SQLAlchemy for all cases up through te 0.9 series. Positional
- matching for compiled SQL expressions was introduced in 1.0 as a
- major performance feature, and positional matching for textual
- :class:`_expression.TextualSelect` objects in 1.1.
- As name matching is no longer
- a common case, it was acceptable to factor it into smaller generator-
- oriented methods that are easier to understand, but incur slightly
- more performance overhead.
-
- """
+ self._keys = list(keys)
+ self._tuplefilter = _tuplefilter
+ self._translated_indexes = _translated_indexes
+ self._unique_filters = _unique_filters
+ len_keys = len(self._keys)
- case_sensitive = context.dialect.case_sensitive
-
- if (
- num_ctx_cols
- and cols_are_ordered
- and not textual_ordered
- and num_ctx_cols == len(cursor_description)
- ):
- self.keys = [elem[0] for elem in result_columns]
- # pure positional 1-1 case; doesn't need to read
- # the names from cursor.description
- return [
+ if extra:
+ recs_names = [
(
- idx,
- rmap_entry[RM_OBJECTS],
- rmap_entry[RM_NAME].lower()
- if not case_sensitive
- else rmap_entry[RM_NAME],
- rmap_entry[RM_RENDERED_NAME],
- context.get_result_processor(
- rmap_entry[RM_TYPE],
- rmap_entry[RM_RENDERED_NAME],
- cursor_description[idx][1],
- ),
- None,
+ (index, name, index - len_keys) + extras,
+ (index, name, extras),
)
- for idx, rmap_entry in enumerate(result_columns)
+ for index, (name, extras) in enumerate(zip(self._keys, extra))
]
else:
- # name-based or text-positional cases, where we need
- # to read cursor.description names
- if textual_ordered:
- # textual positional case
- raw_iterator = self._merge_textual_cols_by_position(
- context, cursor_description, result_columns
- )
- elif num_ctx_cols:
- # compiled SQL with a mismatch of description cols
- # vs. compiled cols, or textual w/ unordered columns
- raw_iterator = self._merge_cols_by_name(
- context,
- cursor_description,
- result_columns,
- loose_column_name_matching,
- )
- else:
- # no compiled SQL, just a raw string
- raw_iterator = self._merge_cols_by_none(
- context, cursor_description
- )
-
- return [
- (
- idx,
- obj,
- cursor_colname,
- cursor_colname,
- context.get_result_processor(
- mapped_type, cursor_colname, coltype
- ),
- untranslated,
- )
- for (
- idx,
- cursor_colname,
- mapped_type,
- coltype,
- obj,
- untranslated,
- ) in raw_iterator
+ recs_names = [
+ ((index, name, index - len_keys), (index, name, ()))
+ for index, name in enumerate(self._keys)
]
- def _colnames_from_description(self, context, cursor_description):
- """Extract column names and data types from a cursor.description.
-
- Applies unicode decoding, column translation, "normalization",
- and case sensitivity rules to the names based on the dialect.
-
- """
-
- dialect = context.dialect
- case_sensitive = dialect.case_sensitive
- translate_colname = context._translate_colname
- description_decoder = (
- dialect._description_decoder
- if dialect.description_encoding
- else None
- )
- normalize_name = (
- dialect.normalize_name if dialect.requires_name_normalize else None
- )
- untranslated = None
-
- self.keys = []
-
- for idx, rec in enumerate(cursor_description):
- colname = rec[0]
- coltype = rec[1]
-
- if description_decoder:
- colname = description_decoder(colname)
+ self._keymap = {key: rec for keys, rec in recs_names for key in keys}
- if translate_colname:
- colname, untranslated = translate_colname(colname)
-
- if normalize_name:
- colname = normalize_name(colname)
-
- self.keys.append(colname)
- if not case_sensitive:
- colname = colname.lower()
-
- yield idx, colname, untranslated, coltype
-
- def _merge_textual_cols_by_position(
- self, context, cursor_description, result_columns
- ):
- num_ctx_cols = len(result_columns) if result_columns else None
-
- if num_ctx_cols > len(cursor_description):
- util.warn(
- "Number of columns in textual SQL (%d) is "
- "smaller than number of columns requested (%d)"
- % (num_ctx_cols, len(cursor_description))
- )
- seen = set()
- for (
- idx,
- colname,
- untranslated,
- coltype,
- ) in self._colnames_from_description(context, cursor_description):
- if idx < num_ctx_cols:
- ctx_rec = result_columns[idx]
- obj = ctx_rec[RM_OBJECTS]
- mapped_type = ctx_rec[RM_TYPE]
- if obj[0] in seen:
- raise exc.InvalidRequestError(
- "Duplicate column expression requested "
- "in textual SQL: %r" % obj[0]
- )
- seen.add(obj[0])
- else:
- mapped_type = sqltypes.NULLTYPE
- obj = None
- yield idx, colname, mapped_type, coltype, obj, untranslated
-
- def _merge_cols_by_name(
- self,
- context,
- cursor_description,
- result_columns,
- loose_column_name_matching,
- ):
- dialect = context.dialect
- case_sensitive = dialect.case_sensitive
- match_map = self._create_description_match_map(
- result_columns, case_sensitive, loose_column_name_matching
+ if _processors is None:
+ self._processors = [None] * len_keys
+ else:
+ self._processors = _processors
+
+ def _for_freeze(self):
+ unique_filters = self._unique_filters
+ if unique_filters and self._tuplefilter:
+ unique_filters = self._tuplefilter(unique_filters)
+
+ # TODO: are we freezing the result with or without uniqueness
+ # applied?
+ return SimpleResultMetaData(
+ self._keys,
+ extra=[self._keymap[key][2] for key in self._keys],
+ _unique_filters=unique_filters,
)
- self.matched_on_name = True
- for (
- idx,
- colname,
- untranslated,
- coltype,
- ) in self._colnames_from_description(context, cursor_description):
- try:
- ctx_rec = match_map[colname]
- except KeyError:
- mapped_type = sqltypes.NULLTYPE
- obj = None
- else:
- obj = ctx_rec[1]
- mapped_type = ctx_rec[2]
- yield idx, colname, mapped_type, coltype, obj, untranslated
-
- @classmethod
- def _create_description_match_map(
- cls,
- result_columns,
- case_sensitive=True,
- loose_column_name_matching=False,
- ):
- """when matching cursor.description to a set of names that are present
- in a Compiled object, as is the case with TextualSelect, get all the
- names we expect might match those in cursor.description.
- """
-
- d = {}
- for elem in result_columns:
- key = elem[RM_RENDERED_NAME]
-
- if not case_sensitive:
- key = key.lower()
- if key in d:
- # conflicting keyname - just add the column-linked objects
- # to the existing record. if there is a duplicate column
- # name in the cursor description, this will allow all of those
- # objects to raise an ambiguous column error
- e_name, e_obj, e_type = d[key]
- d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type
- else:
- d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
-
- if loose_column_name_matching:
- # when using a textual statement with an unordered set
- # of columns that line up, we are expecting the user
- # to be using label names in the SQL that match to the column
- # expressions. Enable more liberal matching for this case;
- # duplicate keys that are ambiguous will be fixed later.
- for r_key in elem[RM_OBJECTS]:
- d.setdefault(
- r_key, (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
- )
-
- return d
-
- def _merge_cols_by_none(self, context, cursor_description):
- for (
- idx,
- colname,
- untranslated,
- coltype,
- ) in self._colnames_from_description(context, cursor_description):
- yield idx, colname, sqltypes.NULLTYPE, coltype, None, untranslated
+ def __getstate__(self):
+ return {
+ "_keys": self._keys,
+ "_translated_indexes": self._translated_indexes,
+ }
- def _key_fallback(self, key, err, raiseerr=True):
- if raiseerr:
- util.raise_(
- exc.NoSuchColumnError(
- "Could not locate column in row for column '%s'"
- % util.string_or_unprintable(key)
- ),
- replace_context=err,
- )
+ def __setstate__(self, state):
+ if state["_translated_indexes"]:
+ _translated_indexes = state["_translated_indexes"]
+ _tuplefilter = tuplegetter(*_translated_indexes)
else:
- return None
-
- def _raise_for_ambiguous_column_name(self, rec):
- raise exc.InvalidRequestError(
- "Ambiguous column name '%s' in "
- "result set column descriptions" % rec[MD_LOOKUP_KEY]
+ _translated_indexes = _tuplefilter = None
+ self.__init__(
+ state["_keys"],
+ _translated_indexes=_translated_indexes,
+ _tuplefilter=_tuplefilter,
)
- def _warn_for_nonint(self, key):
- raise TypeError(
- "TypeError: tuple indices must be integers or slices, not %s"
- % type(key).__name__
- )
+ def _contains(self, value, row):
+ return value in row._data
- def _getter(self, key, raiseerr=True):
+ def _index_for_key(self, key, raiseerr=True):
try:
rec = self._keymap[key]
except KeyError as ke:
rec = self._key_fallback(key, ke, raiseerr)
- if rec is None:
- return None
-
- index, obj = rec[0:2]
-
- if index is None:
- self._raise_for_ambiguous_column_name(rec)
-
- return operator.methodcaller("_get_by_key_impl_mapping", index)
-
- def _tuple_getter(self, keys, raiseerr=True):
- """Given a list of keys, return a callable that will deliver a tuple.
-
- This is strictly used by the ORM and the keys are Column objects.
- However, this might be some nice-ish feature if we could find a very
- clean way of presenting it.
- note that in the new world of "row._mapping", this is a mapping-getter.
- maybe the name should indicate that somehow.
+ return rec[0]
+ def _indexes_for_keys(self, keys):
+ for rec in self._metadata_for_keys(keys):
+ yield rec[0]
- """
- indexes = []
+ def _metadata_for_keys(self, keys):
for key in keys:
- if isinstance(key, int):
- indexes.append(key)
- continue
try:
rec = self._keymap[key]
except KeyError as ke:
- rec = self._key_fallback(key, ke, raiseerr)
- if rec is None:
- return None
-
- index, obj = rec[0:2]
-
- if index is None:
- self._raise_for_ambiguous_column_name(obj)
- indexes.append(index)
-
- if _baserow_usecext:
- return _tuplegetter(*indexes)
- else:
- return self._pure_py_tuplegetter(*indexes)
-
- def _pure_py_tuplegetter(self, *indexes):
- getters = [
- operator.methodcaller("_get_by_key_impl_mapping", index)
- for index in indexes
- ]
- return lambda rec: tuple(getter(rec) for getter in getters)
-
- def __getstate__(self):
- return {
- "_keymap": {
- key: (rec[MD_INDEX], _UNPICKLED, key)
- for key, rec in self._keymap.items()
- if isinstance(key, util.string_types + util.int_types)
- },
- "keys": self.keys,
- "case_sensitive": self.case_sensitive,
- "matched_on_name": self.matched_on_name,
- }
+ rec = self._key_fallback(key, ke, True)
- def __setstate__(self, state):
- self._processors = [None for _ in range(len(state["keys"]))]
- self._keymap = state["_keymap"]
+ yield rec
- self.keys = state["keys"]
- self.case_sensitive = state["case_sensitive"]
- self.matched_on_name = state["matched_on_name"]
+ def _reduce(self, keys):
+ try:
+ metadata_for_keys = [self._keymap[key] for key in keys]
+ except KeyError as ke:
+ self._key_fallback(ke.args[0], ke, True)
+ indexes, new_keys, extra = zip(*metadata_for_keys)
-class LegacyCursorResultMetaData(CursorResultMetaData):
- def _contains(self, value, row):
- key = value
- if key in self._keymap:
- util.warn_deprecated_20(
- "Using the 'in' operator to test for string or column "
- "keys, or integer indexes, in a :class:`.Row` object is "
- "deprecated and will "
- "be removed in a future release. "
- "Use the `Row._fields` or `Row._mapping` attribute, i.e. "
- "'key in row._fields'",
- )
- return True
- else:
- return self._key_fallback(key, None, False) is not None
+ if self._translated_indexes:
+ indexes = [self._translated_indexes[idx] for idx in indexes]
- def _key_fallback(self, key, err, raiseerr=True):
- map_ = self._keymap
- result = None
-
- if isinstance(key, util.string_types):
- result = map_.get(key if self.case_sensitive else key.lower())
- elif isinstance(key, expression.ColumnElement):
- if (
- key._label
- and (key._label if self.case_sensitive else key._label.lower())
- in map_
- ):
- result = map_[
- key._label if self.case_sensitive else key._label.lower()
- ]
- elif (
- hasattr(key, "name")
- and (key.name if self.case_sensitive else key.name.lower())
- in map_
- ):
- # match is only on name.
- result = map_[
- key.name if self.case_sensitive else key.name.lower()
- ]
+ tup = tuplegetter(*indexes)
- # search extra hard to make sure this
- # isn't a column/label name overlap.
- # this check isn't currently available if the row
- # was unpickled.
- if result is not None and result[MD_OBJECTS] not in (
- None,
- _UNPICKLED,
- ):
- for obj in result[MD_OBJECTS]:
- if key._compare_name_for_result(obj):
- break
- else:
- result = None
- if result is not None:
- if result[MD_OBJECTS] is _UNPICKLED:
- util.warn_deprecated(
- "Retreiving row values using Column objects from a "
- "row that was unpickled is deprecated; adequate "
- "state cannot be pickled for this to be efficient. "
- "This usage will raise KeyError in a future release.",
- version="1.4",
- )
- else:
- util.warn_deprecated(
- "Retreiving row values using Column objects with only "
- "matching names as keys is deprecated, and will raise "
- "KeyError in a future release; only Column "
- "objects that are explicitly part of the statement "
- "object should be used.",
- version="1.4",
- )
- if result is None:
- if raiseerr:
- util.raise_(
- exc.NoSuchColumnError(
- "Could not locate column in row for column '%s'"
- % util.string_or_unprintable(key)
- ),
- replace_context=err,
- )
- else:
- return None
- else:
- map_[key] = result
- return result
-
- def _warn_for_nonint(self, key):
- util.warn_deprecated_20(
- "Using non-integer/slice indices on Row is deprecated and will "
- "be removed in version 2.0; please use row._mapping[<key>], or "
- "the mappings() accessor on the sqlalchemy.future result object.",
- stacklevel=4,
+ new_metadata = SimpleResultMetaData(
+ new_keys,
+ extra=extra,
+ _tuplefilter=tup,
+ _translated_indexes=indexes,
+ _processors=self._processors,
+ _unique_filters=self._unique_filters,
)
- def _has_key(self, key):
- if key in self._keymap:
- return True
- else:
- return self._key_fallback(key, None, False) is not None
-
-
-class CursorFetchStrategy(object):
- """Define a cursor strategy for a result object.
-
- Subclasses define different ways of fetching rows, typically but
- not necessarily using a DBAPI cursor object.
+ return new_metadata
- .. versionadded:: 1.4
- """
-
- __slots__ = ("dbapi_cursor", "cursor_description")
-
- def __init__(self, dbapi_cursor, cursor_description):
- self.dbapi_cursor = dbapi_cursor
- self.cursor_description = cursor_description
-
- @classmethod
- def create(cls, result):
- raise NotImplementedError()
-
- def soft_close(self, result):
- raise NotImplementedError()
-
- def hard_close(self, result):
- raise NotImplementedError()
-
- def fetchone(self):
- raise NotImplementedError()
-
- def fetchmany(self, size=None):
- raise NotImplementedError()
-
- def fetchall(self):
- raise NotImplementedError()
-
-
-class NoCursorDQLFetchStrategy(CursorFetchStrategy):
- """Cursor strategy for a DQL result that has no open cursor.
-
- This is a result set that can return rows, i.e. for a SELECT, or for an
- INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
- where the cursor is closed and no rows remain available. The owning result
- object may or may not be "hard closed", which determines if the fetch
- methods send empty results or raise for closed result.
-
- """
-
- __slots__ = ("closed",)
-
- def __init__(self, closed):
- self.closed = closed
- self.cursor_description = None
-
- def soft_close(self, result):
- pass
-
- def hard_close(self, result):
- self.closed = True
-
- def fetchone(self):
- return self._non_result(None)
-
- def fetchmany(self, size=None):
- return self._non_result([])
+def result_tuple(fields, extra=None):
+ parent = SimpleResultMetaData(fields, extra)
+ return functools.partial(Row, parent, parent._processors, parent._keymap)
- def fetchall(self):
- return self._non_result([])
- def _non_result(self, default, err=None):
- if self.closed:
- util.raise_(
- exc.ResourceClosedError("This result object is closed."),
- replace_context=err,
- )
- else:
- return default
+# a symbol that indicates to internal Result methods that
+# "no row is returned". We can't use None for those cases where a scalar
+# filter is applied to rows.
+_NO_ROW = util.symbol("NO_ROW")
-class NoCursorDMLFetchStrategy(CursorFetchStrategy):
- """Cursor strategy for a DML result that has no open cursor.
+class Result(InPlaceGenerative):
+ """Represent a set of database results.
- This is a result set that does not return rows, i.e. for an INSERT,
- UPDATE, DELETE that does not include RETURNING.
+ .. versionadded:: 1.4 The :class:`.Result` object provides a completely
+ updated usage model and calling facade for SQLAlchemy Core and
+ SQLAlchemy ORM. In Core, it forms the basis of the
+ :class:`.CursorResult` object which replaces the previous
+ :class:`.ResultProxy` interface.
"""
- __slots__ = ("closed",)
+ _process_row = Row
- def __init__(self, closed):
- self.closed = closed
- self.cursor_description = None
+ _row_logging_fn = None
- def soft_close(self, result):
- pass
+ _column_slice_filter = None
+ _post_creational_filter = None
+ _unique_filter_state = None
+ _no_scalar_onerow = False
+ _yield_per = None
- def hard_close(self, result):
- self.closed = True
+ def __init__(self, cursor_metadata):
+ self._metadata = cursor_metadata
- def fetchone(self):
- return self._non_result(None)
+ def _soft_close(self, hard=False):
+ raise NotImplementedError()
- def fetchmany(self, size=None):
- return self._non_result([])
+ def keys(self):
+ """Return an iterable view which yields the string keys that would
+ be represented by each :class:`.Row`.
- def fetchall(self):
- return self._non_result([])
+ The view also can be tested for key containment using the Python
+ ``in`` operator, which will test both for the string keys represented
+ in the view, as well as for alternate keys such as column objects.
- def _non_result(self, default, err=None):
- util.raise_(
- exc.ResourceClosedError(
- "This result object does not return rows. "
- "It has been closed automatically."
- ),
- replace_context=err,
- )
+ .. versionchanged:: 1.4 a key view object is returned rather than a
+ plain list.
-class DefaultCursorFetchStrategy(CursorFetchStrategy):
- """Call fetch methods from a DBAPI cursor.
+ """
+ return self._metadata.keys
- Alternate versions of this class may instead buffer the rows from
- cursors or not use cursors at all.
+ @_generative
+ def yield_per(self, num):
+ """Configure the row-fetching strategy to fetch num rows at a time.
- """
+ This impacts the underlying behavior of the result when iterating over
+ the result object, or otherwise making use of methods such as
+ :meth:`_engine.Result.fetchone` that return one row at a time. Data
+ from the underlying cursor or other data source will be buffered up to
+ this many rows in memory, and the buffered collection will then be
+ yielded out one row at at time or as many rows are requested. Each time
+ the buffer clears, it will be refreshed to this many rows or as many
+ rows remain if fewer remain.
- @classmethod
- def create(cls, result):
- dbapi_cursor = result.cursor
- description = dbapi_cursor.description
+ The :meth:`_engine.Result.yield_per` method is generally used in
+ conjunction with the
+ :paramref:`_engine.Connection.execution_options.stream_results`
+ execution option, which will allow the database dialect in use to make
+ use of a server side cursor, if the DBAPI supports it.
- if description is None:
- return NoCursorDMLFetchStrategy(False)
- else:
- return cls(dbapi_cursor, description)
+ Most DBAPIs do not use server side cursors by default, which means all
+ rows will be fetched upfront from the database regardless of the
+ :meth:`_engine.Result.yield_per` setting. However,
+ :meth:`_engine.Result.yield_per` may still be useful in that it batches
+ the SQLAlchemy-side processing of the raw data from the database, and
+ additionally when used for ORM scenarios will batch the conversion of
+ database rows into ORM entity rows.
- def soft_close(self, result):
- result.cursor_strategy = NoCursorDQLFetchStrategy(False)
- def hard_close(self, result):
- result.cursor_strategy = NoCursorDQLFetchStrategy(True)
+ .. versionadded:: 1.4
- def fetchone(self):
- return self.dbapi_cursor.fetchone()
+ :param num: number of rows to fetch each time the buffer is refilled.
+ If set to a value below 1, fetches all rows for the next buffer.
- def fetchmany(self, size=None):
- if size is None:
- return self.dbapi_cursor.fetchmany()
- else:
- return self.dbapi_cursor.fetchmany(size)
+ """
+ self._yield_per = num
+
+ @_generative
+ def unique(self, strategy=None):
+ """Apply unique filtering to the objects returned by this
+ :class:`_engine.Result`.
+
+ When this filter is applied with no arguments, the rows or objects
+ returned will filtered such that each row is returned uniquely. The
+ algorithm used to determine this uniqueness is by default the Python
+ hashing identity of the whole tuple. In some cases a specialized
+ per-entity hashing scheme may be used, such as when using the ORM, a
+ scheme is applied which works against the primary key identity of
+ returned objects.
+
+ The unique filter is applied **after all other filters**, which means
+ if the columns returned have been refined using a method such as the
+ :meth:`_engine.Result.columns` or :meth:`_engine.Result.scalars`
+ method, the uniquing is applied to **only the column or columns
+ returned**. This occurs regardless of the order in which these
+ methods have been called upon the :class:`_engine.Result` object.
+
+ The unique filter also changes the calculus used for methods like
+ :meth:`_engine.Result.fetchmany` and :meth:`_engine.Result.partitions`.
+ When using :meth:`_engine.Result.unique`, these methods will continue
+ to yield the number of rows or objects requested, after uniquing
+ has been applied. However, this necessarily impacts the buffering
+ behavior of the underlying cursor or datasource, such that multiple
+ underlying calls to ``cursor.fetchmany()`` may be necessary in order
+ to accumulate enough objects in order to provide a unique collection
+ of the requested size.
+
+ :param strategy: a callable that will be applied to rows or objects
+ being iterated, which should return an object that represents the
+ unique value of the row. A Python ``set()`` is used to store
+ these identities. If not passed, a default uniqueness strategy
+ is used which may have been assembled by the source of this
+ :class:`_engine.Result` object.
- def fetchall(self):
- return self.dbapi_cursor.fetchall()
+ """
+ self._unique_filter_state = (set(), strategy)
+ @HasMemoized.memoized_attribute
+ def _unique_strategy(self):
+ uniques, strategy = self._unique_filter_state
-class BufferedRowCursorFetchStrategy(DefaultCursorFetchStrategy):
- """A cursor fetch strategy with row buffering behavior.
+ if not strategy and self._metadata._unique_filters:
+ filters = self._metadata._unique_filters
+ if self._metadata._tuplefilter:
+ filters = self._metadata._tuplefilter(filters)
- This strategy buffers the contents of a selection of rows
- before ``fetchone()`` is called. This is to allow the results of
- ``cursor.description`` to be available immediately, when
- interfacing with a DB-API that requires rows to be consumed before
- this information is available (currently psycopg2, when used with
- server-side cursors).
+ strategy = operator.methodcaller("_filter_on_values", filters)
+ return uniques, strategy
- The pre-fetching behavior fetches only one row initially, and then
- grows its buffer size by a fixed amount with each successive need
- for additional rows up the ``max_row_buffer`` size, which defaults
- to 1000::
+ def columns(self, *col_expressions):
+ r"""Establish the columns that should be returned in each row.
- with psycopg2_engine.connect() as conn:
+ This method may be used to limit the columns returned as well
+ as to reorder them. The given list of expressions are normally
+ a series of integers or string key names. They may also be
+ appropriate :class:`.ColumnElement` objects which correspond to
+ a given statement construct.
- result = conn.execution_options(
- stream_results=True, max_row_buffer=50
- ).execute(text("select * from table"))
+ E.g.::
- .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
+ statement = select(table.c.x, table.c.y, table.c.z)
+ result = connection.execute(statement)
- .. seealso::
+ for z, y in result.columns('z', 'y'):
+ # ...
- :ref:`psycopg2_execution_options`
- """
- __slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize")
+ Example of using the column objects from the statement itself::
- def __init__(
- self, max_row_buffer, dbapi_cursor, description, initial_buffer
- ):
- super(BufferedRowCursorFetchStrategy, self).__init__(
- dbapi_cursor, description
- )
+ for z, y in result.columns(
+ statement.selected_columns.c.z,
+ statement.selected_columns.c.y
+ ):
+ # ...
- self._max_row_buffer = max_row_buffer
- self._growth_factor = 5
- self._rowbuffer = initial_buffer
+ .. versionadded:: 1.4
- self._bufsize = min(self._max_row_buffer, self._growth_factor)
+ :param \*col_expressions: indicates columns to be returned. Elements
+ may be integer row indexes, string column names, or appropriate
+ :class:`.ColumnElement` objects corresponding to a select construct.
- @classmethod
- def create(cls, result):
- """Buffered row strategy has to buffer the first rows *before*
- cursor.description is fetched so that it works with named cursors
- correctly
+ :return: this :class:`_engine.Result` object with the modifications
+ given.
"""
+ return self._column_slices(col_expressions)
- dbapi_cursor = result.cursor
+ def partitions(self, size=None):
+ """Iterate through sub-lists of rows of the size given.
- initial_buffer = collections.deque(dbapi_cursor.fetchmany(1))
+ Each list will be of the size given, excluding the last list to
+ be yielded, which may have a small number of rows. No empty
+ lists will be yielded.
- description = dbapi_cursor.description
+ The result object is automatically closed when the iterator
+ is fully consumed.
- if description is None:
- return NoCursorDMLFetchStrategy(False)
- else:
- max_row_buffer = result.context.execution_options.get(
- "max_row_buffer", 1000
- )
- return cls(
- max_row_buffer, dbapi_cursor, description, initial_buffer
- )
+ Note that the backend driver will usually buffer the entire result
+ ahead of time unless the
+ :paramref:`.Connection.execution_options.stream_results` execution
+ option is used indicating that the driver should not pre-buffer
+ results, if possible. Not all drivers support this option and
+ the option is silently ignored for those who do.
- def __buffer_rows(self):
- size = self._bufsize
- self._rowbuffer = collections.deque(self.dbapi_cursor.fetchmany(size))
- if size < self._max_row_buffer:
- self._bufsize = min(
- self._max_row_buffer, size * self._growth_factor
- )
+ .. versionadded:: 1.4
- def soft_close(self, result):
- self._rowbuffer.clear()
- super(BufferedRowCursorFetchStrategy, self).soft_close(result)
+ :param size: indicate the maximum number of rows to be present
+ in each list yielded. If None, makes use of the value set by
+ :meth:`_engine.Result.yield_per`, if present, otherwise uses the
+ :meth:`_engine.Result.fetchmany` default which may be backend
+ specific.
- def hard_close(self, result):
- self._rowbuffer.clear()
- super(BufferedRowCursorFetchStrategy, self).hard_close(result)
+ :return: iterator of lists
- def fetchone(self):
- if not self._rowbuffer:
- self.__buffer_rows()
- if not self._rowbuffer:
- return None
- return self._rowbuffer.popleft()
+ """
+ getter = self._manyrow_getter
- def fetchmany(self, size=None):
- if size is None:
- return self.fetchall()
- result = []
- for x in range(0, size):
- row = self.fetchone()
- if row is None:
+ while True:
+ partition = getter(self, size)
+ if partition:
+ yield partition
+ else:
break
- result.append(row)
- return result
- def fetchall(self):
- self._rowbuffer.extend(self.dbapi_cursor.fetchall())
- ret = self._rowbuffer
- self._rowbuffer = collections.deque()
- return ret
+ def scalars(self, index=0):
+ """Apply a scalars filter to returned rows.
+ When this filter is applied, fetching results will return Python scalar
+ objects from exactly one column of each row, rather than :class:`.Row`
+ objects or mappings.
-class FullyBufferedCursorFetchStrategy(DefaultCursorFetchStrategy):
- """A cursor strategy that buffers rows fully upon creation.
+ This filter cancels out other filters that may be established such
+ as that of :meth:`_engine.Result.mappings`.
- Used for operations where a result is to be delivered
- after the database conversation can not be continued,
- such as MSSQL INSERT...OUTPUT after an autocommit.
+ .. versionadded:: 1.4
- """
+ :param index: integer or row key indicating the column to be fetched
+ from each row, defaults to ``0`` indicating the first column.
- __slots__ = ("_rowbuffer",)
+ :return: this :class:`_engine.Result` object with modifications.
- def __init__(self, dbapi_cursor, description, initial_buffer=None):
- super(FullyBufferedCursorFetchStrategy, self).__init__(
- dbapi_cursor, description
- )
- if initial_buffer is not None:
- self._rowbuffer = collections.deque(initial_buffer)
- else:
- self._rowbuffer = self._buffer_rows()
-
- @classmethod
- def create_from_buffer(cls, dbapi_cursor, description, buffer):
- return cls(dbapi_cursor, description, buffer)
-
- def _buffer_rows(self):
- return collections.deque(self.dbapi_cursor.fetchall())
-
- def soft_close(self, result):
- self._rowbuffer.clear()
- super(FullyBufferedCursorFetchStrategy, self).soft_close(result)
-
- def hard_close(self, result):
- self._rowbuffer.clear()
- super(FullyBufferedCursorFetchStrategy, self).hard_close(result)
-
- def fetchone(self):
- if self._rowbuffer:
- return self._rowbuffer.popleft()
- else:
- return None
-
- def fetchmany(self, size=None):
- if size is None:
- return self.fetchall()
- result = []
- for x in range(0, size):
- row = self.fetchone()
- if row is None:
- break
- result.append(row)
+ """
+ result = self._column_slices([index])
+ result._post_creational_filter = operator.itemgetter(0)
+ result._no_scalar_onerow = True
return result
- def fetchall(self):
- ret = self._rowbuffer
- self._rowbuffer = collections.deque()
- return ret
-
-
-class BaseResult(object):
- """Base class for database result objects.
-
-
- :class:`.BaseResult` is the base class for the 1.x style
- :class:`_engine.ResultProxy` class as well as the 2.x style
- :class:`_future.Result` class.
-
- """
-
- out_parameters = None
- _metadata = None
- _soft_closed = False
- closed = False
-
- @classmethod
- def _create_for_context(cls, context):
- if context._is_future_result:
- obj = object.__new__(_future_Result)
- else:
- obj = object.__new__(ResultProxy)
- obj.__init__(context)
- return obj
-
- def __init__(self, context):
- self.context = context
- self.dialect = context.dialect
- self.cursor = context.cursor
- self.connection = context.root_connection
- self._echo = (
- self.connection._echo and context.engine._should_log_debug()
- )
- self._init_metadata()
-
- def _init_metadata(self):
- self.cursor_strategy = strat = self.context.get_result_cursor_strategy(
- self
- )
-
- if strat.cursor_description is not None:
- if self.context.compiled:
- if self.context.compiled._cached_metadata:
- cached_md = self.context.compiled._cached_metadata
- self._metadata = cached_md._adapt_to_context(self.context)
-
- else:
- self._metadata = (
- self.context.compiled._cached_metadata
- ) = self._cursor_metadata(self, strat.cursor_description)
- else:
- self._metadata = self._cursor_metadata(
- self, strat.cursor_description
- )
- if self._echo:
- self.context.engine.logger.debug(
- "Col %r", tuple(x[0] for x in strat.cursor_description)
- )
- # leave cursor open so that execution context can continue
- # setting up things like rowcount
-
- def keys(self):
- """Return the list of string keys that would represented by each
- :class:`.Row`."""
-
- if self._metadata:
- return self._metadata.keys
- else:
- return []
+ @_generative
+ def _column_slices(self, indexes):
+ self._metadata = self._metadata._reduce(indexes)
def _getter(self, key, raiseerr=True):
- try:
- getter = self._metadata._getter
- except AttributeError as err:
- return self.cursor_strategy._non_result(None, err)
- else:
- return getter(key, raiseerr)
-
- def _tuple_getter(self, key, raiseerr=True):
- try:
- getter = self._metadata._tuple_getter
- except AttributeError as err:
- return self.cursor_strategy._non_result(None, err)
- else:
- return getter(key, raiseerr)
-
- def _has_key(self, key):
- try:
- has_key = self._metadata._has_key
- except AttributeError as err:
- return self.cursor_strategy._non_result(None, err)
- else:
- return has_key(key)
-
- def _soft_close(self, hard=False):
- """Soft close this :class:`_engine.ResultProxy`.
+ """return a callable that will retrieve the given key from a
+ :class:`.Row`.
- This releases all DBAPI cursor resources, but leaves the
- ResultProxy "open" from a semantic perspective, meaning the
- fetchXXX() methods will continue to return empty results.
-
- This method is called automatically when:
+ """
+ return self._metadata._getter(key, raiseerr)
- * all result rows are exhausted using the fetchXXX() methods.
- * cursor.description is None.
+ def _tuple_getter(self, keys):
+ """return a callable that will retrieve the given keys from a
+ :class:`.Row`.
- This method is **not public**, but is documented in order to clarify
- the "autoclose" process used.
+ """
+ return self._metadata._row_as_tuple_getter(keys)
- .. versionadded:: 1.0.0
+ @_generative
+ def mappings(self):
+ """Apply a mappings filter to returned rows.
- .. seealso::
+ When this filter is applied, fetching rows will return
+ :class:`.RowMapping` objects instead of :class:`.Row` objects.
- :meth:`_engine.ResultProxy.close`
+ This filter cancels out other filters that may be established such
+ as that of :meth:`_engine.Result.scalars`.
+ .. versionadded:: 1.4
+ :return: this :class:`._engine.Result` object with modifications.
"""
+ self._post_creational_filter = operator.attrgetter("_mapping")
+ self._no_scalar_onerow = False
- if (not hard and self._soft_closed) or (hard and self.closed):
- return
+ def _row_getter(self):
+ process_row = self._process_row
+ metadata = self._metadata
- if hard:
- self.closed = True
- self.cursor_strategy.hard_close(self)
- else:
- self.cursor_strategy.soft_close(self)
-
- if not self._soft_closed:
- cursor = self.cursor
- self.cursor = None
- self.connection._safe_close_cursor(cursor)
- self._soft_closed = True
-
- @util.memoized_property
- def inserted_primary_key(self):
- """Return the primary key for the row just inserted.
-
- The return value is a list of scalar values
- corresponding to the list of primary key columns
- in the target table.
-
- This only applies to single row :func:`_expression.insert`
- constructs which did not explicitly specify
- :meth:`_expression.Insert.returning`.
-
- Note that primary key columns which specify a
- server_default clause,
- or otherwise do not qualify as "autoincrement"
- columns (see the notes at :class:`_schema.Column`), and were
- generated using the database-side default, will
- appear in this list as ``None`` unless the backend
- supports "returning" and the insert statement executed
- with the "implicit returning" enabled.
-
- Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
- statement is not a compiled expression construct
- or is not an insert() construct.
+ keymap = metadata._keymap
+ processors = metadata._processors
+ tf = metadata._tuplefilter
- """
+ if tf:
+ processors = tf(processors)
- if not self.context.compiled:
- raise exc.InvalidRequestError(
- "Statement is not a compiled " "expression construct."
- )
- elif not self.context.isinsert:
- raise exc.InvalidRequestError(
- "Statement is not an insert() " "expression construct."
+ _make_row_orig = functools.partial(
+ process_row, metadata, processors, keymap
)
- elif self.context._is_explicit_returning:
- raise exc.InvalidRequestError(
- "Can't call inserted_primary_key "
- "when returning() "
- "is used."
- )
-
- return self.context.inserted_primary_key
- def last_updated_params(self):
- """Return the collection of updated parameters from this
- execution.
+ def make_row(row):
+ return _make_row_orig(tf(row))
- Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
- statement is not a compiled expression construct
- or is not an update() construct.
-
- """
- if not self.context.compiled:
- raise exc.InvalidRequestError(
- "Statement is not a compiled " "expression construct."
- )
- elif not self.context.isupdate:
- raise exc.InvalidRequestError(
- "Statement is not an update() " "expression construct."
- )
- elif self.context.executemany:
- return self.context.compiled_parameters
else:
- return self.context.compiled_parameters[0]
-
- def last_inserted_params(self):
- """Return the collection of inserted parameters from this
- execution.
+ make_row = functools.partial(
+ process_row, metadata, processors, keymap
+ )
- Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
- statement is not a compiled expression construct
- or is not an insert() construct.
+ fns = ()
- """
- if not self.context.compiled:
- raise exc.InvalidRequestError(
- "Statement is not a compiled " "expression construct."
- )
- elif not self.context.isinsert:
- raise exc.InvalidRequestError(
- "Statement is not an insert() " "expression construct."
- )
- elif self.context.executemany:
- return self.context.compiled_parameters
+ if self._row_logging_fn:
+ fns = (self._row_logging_fn,)
else:
- return self.context.compiled_parameters[0]
+ fns = ()
- @property
- def returned_defaults(self):
- """Return the values of default columns that were fetched using
- the :meth:`.ValuesBase.return_defaults` feature.
+ if self._column_slice_filter:
+ fns += (self._column_slice_filter,)
- The value is an instance of :class:`.Row`, or ``None``
- if :meth:`.ValuesBase.return_defaults` was not used or if the
- backend does not support RETURNING.
+ if fns:
+ _make_row = make_row
- .. versionadded:: 0.9.0
+ def make_row(row):
+ row = _make_row(row)
+ for fn in fns:
+ row = fn(row)
+ return row
- .. seealso::
+ return make_row
- :meth:`.ValuesBase.return_defaults`
+ def _raw_row_iterator(self):
+ """Return a safe iterator that yields raw row data.
+
+ This is used by the :meth:`._engine.Result.merge` method
+ to merge multiple compatible results together.
"""
- return self.context.returned_defaults
+ raise NotImplementedError()
- def lastrow_has_defaults(self):
- """Return ``lastrow_has_defaults()`` from the underlying
- :class:`.ExecutionContext`.
+ def freeze(self):
+ """Return a callable object that will produce copies of this
+ :class:`.Result` when invoked.
- See :class:`.ExecutionContext` for details.
+ This is used for result set caching. The method must be called
+ on the result when it has been unconsumed, and calling the method
+ will consume the result fully.
"""
+ return FrozenResult(self)
- return self.context.lastrow_has_defaults()
+ def merge(self, *others):
+ """Merge this :class:`.Result` with other compatible result
+ objects.
- def postfetch_cols(self):
- """Return ``postfetch_cols()`` from the underlying
- :class:`.ExecutionContext`.
+ The object returned is an instance of :class:`.MergedResult`,
+ which will be composed of iterators from the given result
+ objects.
- See :class:`.ExecutionContext` for details.
-
- Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
- statement is not a compiled expression construct
- or is not an insert() or update() construct.
+ The new result will use the metadata from this result object.
+ The subsequent result objects must be against an identical
+ set of result / cursor metadata, otherwise the behavior is
+ undefined.
"""
+ return MergedResult(self._metadata, (self,) + others)
- if not self.context.compiled:
- raise exc.InvalidRequestError(
- "Statement is not a compiled " "expression construct."
- )
- elif not self.context.isinsert and not self.context.isupdate:
- raise exc.InvalidRequestError(
- "Statement is not an insert() or update() "
- "expression construct."
- )
- return self.context.postfetch_cols
+ @HasMemoized.memoized_attribute
+ def _iterator_getter(self):
- def prefetch_cols(self):
- """Return ``prefetch_cols()`` from the underlying
- :class:`.ExecutionContext`.
+ make_row = self._row_getter()
- See :class:`.ExecutionContext` for details.
+ post_creational_filter = self._post_creational_filter
- Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
- statement is not a compiled expression construct
- or is not an insert() or update() construct.
+ if self._unique_filter_state:
+ uniques, strategy = self._unique_strategy
- """
+ def iterrows(self):
+ for row in self._fetchiter_impl():
+ obj = make_row(row)
+ hashed = strategy(obj) if strategy else obj
+ if hashed in uniques:
+ continue
+ uniques.add(hashed)
+ if post_creational_filter:
+ obj = post_creational_filter(obj)
+ yield obj
- if not self.context.compiled:
- raise exc.InvalidRequestError(
- "Statement is not a compiled " "expression construct."
- )
- elif not self.context.isinsert and not self.context.isupdate:
- raise exc.InvalidRequestError(
- "Statement is not an insert() or update() "
- "expression construct."
- )
- return self.context.prefetch_cols
+ else:
- def supports_sane_rowcount(self):
- """Return ``supports_sane_rowcount`` from the dialect.
+ def iterrows(self):
+ for row in self._fetchiter_impl():
+ row = make_row(row)
+ if post_creational_filter:
+ row = post_creational_filter(row)
+ yield row
- See :attr:`_engine.ResultProxy.rowcount` for background.
+ return iterrows
- """
+ @HasMemoized.memoized_attribute
+ def _allrow_getter(self):
- return self.dialect.supports_sane_rowcount
+ make_row = self._row_getter()
- def supports_sane_multi_rowcount(self):
- """Return ``supports_sane_multi_rowcount`` from the dialect.
+ post_creational_filter = self._post_creational_filter
- See :attr:`_engine.ResultProxy.rowcount` for background.
+ if self._unique_filter_state:
+ uniques, strategy = self._unique_strategy
- """
+ def allrows(self):
+ rows = self._fetchall_impl()
+ rows = [
+ made_row
+ for made_row, sig_row in [
+ (
+ made_row,
+ strategy(made_row) if strategy else made_row,
+ )
+ for made_row in [make_row(row) for row in rows]
+ ]
+ if sig_row not in uniques and not uniques.add(sig_row)
+ ]
- return self.dialect.supports_sane_multi_rowcount
+ if post_creational_filter:
+ rows = [post_creational_filter(row) for row in rows]
+ return rows
- @util.memoized_property
- def rowcount(self):
- """Return the 'rowcount' for this result.
+ else:
- The 'rowcount' reports the number of rows *matched*
- by the WHERE criterion of an UPDATE or DELETE statement.
+ def allrows(self):
+ rows = self._fetchall_impl()
+ if post_creational_filter:
+ rows = [
+ post_creational_filter(make_row(row)) for row in rows
+ ]
+ else:
+ rows = [make_row(row) for row in rows]
+ return rows
+
+ return allrows
+
+ @HasMemoized.memoized_attribute
+ def _onerow_getter(self):
+ make_row = self._row_getter()
+
+ # TODO: this is a lot for results that are only one row.
+ # all of this could be in _only_one_row except for fetchone()
+ # and maybe __next__
+
+ post_creational_filter = self._post_creational_filter
+
+ if self._unique_filter_state:
+ uniques, strategy = self._unique_strategy
+
+ def onerow(self):
+ _onerow = self._fetchone_impl
+ while True:
+ row = _onerow()
+ if row is None:
+ return _NO_ROW
+ else:
+ obj = make_row(row)
+ hashed = strategy(obj) if strategy else obj
+ if hashed in uniques:
+ continue
+ else:
+ uniques.add(hashed)
+ if post_creational_filter:
+ obj = post_creational_filter(obj)
+ return obj
- .. note::
+ else:
- Notes regarding :attr:`_engine.ResultProxy.rowcount`:
+ def onerow(self):
+ row = self._fetchone_impl()
+ if row is None:
+ return _NO_ROW
+ else:
+ row = make_row(row)
+ if post_creational_filter:
+ row = post_creational_filter(row)
+ return row
+
+ return onerow
+
+ @HasMemoized.memoized_attribute
+ def _manyrow_getter(self):
+ make_row = self._row_getter()
+
+ post_creational_filter = self._post_creational_filter
+
+ if self._unique_filter_state:
+ uniques, strategy = self._unique_strategy
+
+ def filterrows(make_row, rows, strategy, uniques):
+ return [
+ made_row
+ for made_row, sig_row in [
+ (
+ made_row,
+ strategy(made_row) if strategy else made_row,
+ )
+ for made_row in [make_row(row) for row in rows]
+ ]
+ if sig_row not in uniques and not uniques.add(sig_row)
+ ]
+ def manyrows(self, num):
+ collect = []
+
+ _manyrows = self._fetchmany_impl
+
+ if num is None:
+ # if None is passed, we don't know the default
+ # manyrows number, DBAPI has this as cursor.arraysize
+ # different DBAPIs / fetch strategies may be different.
+ # do a fetch to find what the number is. if there are
+ # only fewer rows left, then it doesn't matter.
+ if self._yield_per:
+ num_required = num = self._yield_per
+ else:
+ rows = _manyrows(num)
+ num = len(rows)
+ collect.extend(
+ filterrows(make_row, rows, strategy, uniques)
+ )
+ num_required = num - len(collect)
+ else:
+ num_required = num
- * This attribute returns the number of rows *matched*,
- which is not necessarily the same as the number of rows
- that were actually *modified* - an UPDATE statement, for example,
- may have no net change on a given row if the SET values
- given are the same as those present in the row already.
- Such a row would be matched but not modified.
- On backends that feature both styles, such as MySQL,
- rowcount is configured by default to return the match
- count in all cases.
+ while num_required:
+ rows = _manyrows(num_required)
+ if not rows:
+ break
- * :attr:`_engine.ResultProxy.rowcount`
- is *only* useful in conjunction
- with an UPDATE or DELETE statement. Contrary to what the Python
- DBAPI says, it does *not* return the
- number of rows available from the results of a SELECT statement
- as DBAPIs cannot support this functionality when rows are
- unbuffered.
+ collect.extend(
+ filterrows(make_row, rows, strategy, uniques)
+ )
+ num_required = num - len(collect)
- * :attr:`_engine.ResultProxy.rowcount`
- may not be fully implemented by
- all dialects. In particular, most DBAPIs do not support an
- aggregate rowcount result from an executemany call.
- The :meth:`_engine.ResultProxy.supports_sane_rowcount` and
- :meth:`_engine.ResultProxy.supports_sane_multi_rowcount` methods
- will report from the dialect if each usage is known to be
- supported.
+ if post_creational_filter:
+ collect = [post_creational_filter(row) for row in collect]
+ return collect
- * Statements that use RETURNING may not return a correct
- rowcount.
+ else:
- """
- try:
- return self.context.rowcount
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
+ def manyrows(self, num):
+ if num is None:
+ num = self._yield_per
- @property
- def lastrowid(self):
- """return the 'lastrowid' accessor on the DBAPI cursor.
+ rows = self._fetchmany_impl(num)
+ rows = [make_row(row) for row in rows]
+ if post_creational_filter:
+ rows = [post_creational_filter(row) for row in rows]
+ return rows
- This is a DBAPI specific method and is only functional
- for those backends which support it, for statements
- where it is appropriate. It's behavior is not
- consistent across backends.
+ return manyrows
- Usage of this method is normally unnecessary when
- using insert() expression constructs; the
- :attr:`~ResultProxy.inserted_primary_key` attribute provides a
- tuple of primary key values for a newly inserted row,
- regardless of database backend.
+ def _fetchiter_impl(self):
+ raise NotImplementedError()
- """
- try:
- return self.context.get_lastrowid()
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
+ def _fetchone_impl(self):
+ raise NotImplementedError()
- @property
- def returns_rows(self):
- """True if this :class:`_engine.ResultProxy` returns rows.
+ def _fetchall_impl(self):
+ raise NotImplementedError()
- I.e. if it is legal to call the methods
- :meth:`_engine.ResultProxy.fetchone`,
- :meth:`_engine.ResultProxy.fetchmany`
- :meth:`_engine.ResultProxy.fetchall`.
+ def _fetchmany_impl(self, size=None):
+ raise NotImplementedError()
- """
- return self._metadata is not None
+ def __iter__(self):
+ return self._iterator_getter(self)
- @property
- def is_insert(self):
- """True if this :class:`_engine.ResultProxy` is the result
- of a executing an expression language compiled
- :func:`_expression.insert` construct.
+ def __next__(self):
+ row = self._onerow_getter(self)
+ if row is _NO_ROW:
+ raise StopIteration()
+ else:
+ return row
- When True, this implies that the
- :attr:`inserted_primary_key` attribute is accessible,
- assuming the statement did not include
- a user defined "returning" construct.
+ next = __next__
- """
- return self.context.isinsert
+ def fetchall(self):
+ """A synonym for the :meth:`_engine.Result.all` method."""
+ return self._allrow_getter(self)
-class ResultProxy(BaseResult):
- """A facade around a DBAPI cursor object.
+ def fetchone(self):
+ """Fetch one row.
- Returns database rows via the :class:`.Row` class, which provides
- additional API features and behaviors on top of the raw data returned
- by the DBAPI.
+ When all rows are exhausted, returns None.
- Within the scope of the 1.x series of SQLAlchemy, the
- :class:`_engine.ResultProxy`
- will in fact return instances of the :class:`.LegacyRow` class, which
- maintains Python mapping (i.e. dictionary) like behaviors upon the object
- itself. Going forward, the :attr:`.Row._mapping` attribute should be used
- for dictionary behaviors.
+ .. note:: This method is not compatible with the
+ :meth:`_result.Result.scalars`
+ filter, as there is no way to distinguish between a data value of
+ None and the ending value. Prefer to use iterative / collection
+ methods which support scalar None values.
- .. seealso::
+ this method is provided for backwards compatibility with
+ SQLAlchemy 1.x.x.
- :ref:`coretutorial_selecting` - introductory material for accessing
- :class:`_engine.ResultProxy` and :class:`.Row` objects.
+ To fetch the first row of a result only, use the
+ :meth:`_engine.Result.first` method. To iterate through all
+ rows, iterate the :class:`_engine.Result` object directly.
- """
+ :return: a :class:`.Row` object if no filters are applied, or None
+ if no rows remain.
+ When filters are applied, such as :meth:`_engine.Result.mappings`
+ or :meth:`._engine.Result.scalar`, different kinds of objects
+ may be returned.
- _autoclose_connection = False
- _process_row = LegacyRow
- _cursor_metadata = LegacyCursorResultMetaData
- _cursor_strategy_cls = DefaultCursorFetchStrategy
+ """
+ if self._no_scalar_onerow:
+ raise exc.InvalidRequestError(
+ "Can't use fetchone() when returning scalar values; there's "
+ "no way to distinguish between end of results and None"
+ )
+ row = self._onerow_getter(self)
+ if row is _NO_ROW:
+ return None
+ else:
+ return row
- def __iter__(self):
- """Implement iteration protocol."""
+ def fetchmany(self, size=None):
+ """Fetch many rows.
- while True:
- row = self.fetchone()
- if row is None:
- return
- else:
- yield row
+ When all rows are exhausted, returns an empty list.
- def close(self):
- """Close this :class:`_engine.ResultProxy`.
-
- This closes out the underlying DBAPI cursor corresponding
- to the statement execution, if one is still present. Note that the
- DBAPI cursor is automatically released when the
- :class:`_engine.ResultProxy`
- exhausts all available rows. :meth:`_engine.ResultProxy.close`
- is generally
- an optional method except in the case when discarding a
- :class:`_engine.ResultProxy`
- that still has additional rows pending for fetch.
-
- In the case of a result that is the product of
- :ref:`connectionless execution <dbengine_implicit>`,
- the underlying :class:`_engine.Connection` object is also closed,
- which
- :term:`releases` DBAPI connection resources.
-
- .. deprecated:: 2.0 "connectionless" execution is deprecated and will
- be removed in version 2.0. Version 2.0 will feature the
- :class:`_future.Result`
- object that will no longer affect the status
- of the originating connection in any case.
-
- After this method is called, it is no longer valid to call upon
- the fetch methods, which will raise a :class:`.ResourceClosedError`
- on subsequent use.
+ this method is provided for backwards compatibility with
+ SQLAlchemy 1.x.x.
- .. seealso::
+ To fetch rows in groups, use the :meth:`._result.Result.partitions`
+ method.
- :ref:`connections_toplevel`
+ :return: a list of :class:`.Row` objects if no filters are applied.
+ When filters are applied, such as :meth:`_engine.Result.mappings`
+ or :meth:`._engine.Result.scalar`, different kinds of objects
+ may be returned.
"""
- self._soft_close(hard=True)
+ return self._manyrow_getter(self, size)
- def _soft_close(self, hard=False):
- soft_closed = self._soft_closed
- super(ResultProxy, self)._soft_close(hard=hard)
- if (
- not soft_closed
- and self._soft_closed
- and self._autoclose_connection
- ):
- self.connection.close()
+ def all(self):
+ """Return all rows in a list.
- def __next__(self):
- """Implement the Python next() protocol.
+ Closes the result set after invocation. Subsequent invocations
+ will return an empty list.
- This method, mirrored as both ``.next()`` and ``.__next__()``, is part
- of Python's API for producing iterator-like behavior.
+ .. versionadded:: 1.4
- .. versionadded:: 1.2
+ :return: a list of :class:`.Row` objects if no filters are applied.
+ When filters are applied, such as :meth:`_engine.Result.mappings`
+ or :meth:`._engine.Result.scalar`, different kinds of objects
+ may be returned.
"""
- row = self.fetchone()
- if row is None:
- raise StopIteration()
+ return self._allrow_getter(self)
+
+ def _only_one_row(self, raise_for_second_row, raise_for_none):
+ row = self._onerow_getter(self)
+ if row is _NO_ROW:
+ if raise_for_none:
+ self._soft_close(hard=True)
+ raise exc.NoResultFound(
+ "No row was found when one was required"
+ )
+ else:
+ return None
else:
- return row
+ if raise_for_second_row:
+ next_row = self._onerow_getter(self)
+ else:
+ next_row = _NO_ROW
+ self._soft_close(hard=True)
+ if next_row is not _NO_ROW:
+ raise exc.MultipleResultsFound(
+ "Multiple rows were found when exactly one was required"
+ if raise_for_none
+ else "Multiple rows were found when one or none "
+ "was required"
+ )
+ else:
+ return row
- next = __next__
+ def first(self):
+ """Fetch the first row or None if no row is present.
- def process_rows(self, rows):
- process_row = self._process_row
- metadata = self._metadata
- keymap = metadata._keymap
- processors = metadata._processors
+ Closes the result set and discards remaining rows.
- if self._echo:
- log = self.context.engine.logger.debug
- l = []
- for row in rows:
- log("Row %r", sql_util._repr_row(row))
- l.append(process_row(metadata, processors, keymap, row))
- return l
- else:
- return [
- process_row(metadata, processors, keymap, row) for row in rows
- ]
+ .. comment: A warning is emitted if additional rows remain.
- def fetchall(self):
- """Fetch all rows, just like DB-API ``cursor.fetchall()``.
+ :return: a :class:`.Row` object if no filters are applied, or None
+ if no rows remain.
+ When filters are applied, such as :meth:`_engine.Result.mappings`
+ or :meth:`._engine.Result.scalar`, different kinds of objects
+ may be returned.
- After all rows have been exhausted, the underlying DBAPI
- cursor resource is released, and the object may be safely
- discarded.
+ """
+ return self._only_one_row(False, False)
- Subsequent calls to :meth:`_engine.ResultProxy.fetchall` will return
- an empty list. After the :meth:`_engine.ResultProxy.close` method is
- called, the method will raise :class:`.ResourceClosedError`.
+ def one_or_none(self):
+ """Return at most one result or raise an exception.
- :return: a list of :class:`.Row` objects
+ Returns ``None`` if the result has no rows.
+ Raises :class:`.MultipleResultsFound`
+ if multiple rows are returned.
- """
+ .. versionadded:: 1.4
- try:
- l = self.process_rows(self.cursor_strategy.fetchall())
- self._soft_close()
- return l
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
+ :return: The first :class:`.Row` or None if no row is available.
+ When filters are applied, such as :meth:`_engine.Result.mappings`
+ or :meth:`._engine.Result.scalar`, different kinds of objects
+ may be returned.
- def fetchmany(self, size=None):
- """Fetch many rows, just like DB-API
- ``cursor.fetchmany(size=cursor.arraysize)``.
+ :raises: :class:`.MultipleResultsFound`
- After all rows have been exhausted, the underlying DBAPI
- cursor resource is released, and the object may be safely
- discarded.
+ .. seealso::
- Calls to :meth:`_engine.ResultProxy.fetchmany`
- after all rows have been
- exhausted will return
- an empty list. After the :meth:`_engine.ResultProxy.close` method is
- called, the method will raise :class:`.ResourceClosedError`.
+ :meth:`_result.Result.first`
- :return: a list of :class:`.Row` objects
+ :meth:`_result.Result.one`
"""
+ return self._only_one_row(True, False)
- try:
- l = self.process_rows(self.cursor_strategy.fetchmany(size))
- if len(l) == 0:
- self._soft_close()
- return l
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
+ def one(self):
+ """Return exactly one result or raise an exception.
- def _onerow(self):
- return self.fetchone()
+ Raises :class:`.NoResultFound` if the result returns no
+ rows, or :class:`.MultipleResultsFound` if multiple rows
+ would be returned.
- def fetchone(self):
- """Fetch one row, just like DB-API ``cursor.fetchone()``.
+ .. versionadded:: 1.4
- After all rows have been exhausted, the underlying DBAPI
- cursor resource is released, and the object may be safely
- discarded.
+ :return: The first :class:`.Row`.
+ When filters are applied, such as :meth:`_engine.Result.mappings`
+ or :meth:`._engine.Result.scalar`, different kinds of objects
+ may be returned.
- Calls to :meth:`_engine.ResultProxy.fetchone` after all rows have
- been exhausted will return ``None``.
- After the :meth:`_engine.ResultProxy.close` method is
- called, the method will raise :class:`.ResourceClosedError`.
+ :raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound`
- :return: a :class:`.Row` object, or None if no rows remain
-
- """
- try:
- row = self.cursor_strategy.fetchone()
- if row is not None:
- return self.process_rows([row])[0]
- else:
- self._soft_close()
- return None
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
-
- def first(self):
- """Fetch the first row and then close the result set unconditionally.
+ .. seealso::
- After calling this method, the object is fully closed,
- e.g. the :meth:`_engine.ResultProxy.close`
- method will have been called.
+ :meth:`_result.Result.first`
- :return: a :class:`.Row` object, or None if no rows remain
+ :meth:`_result.Result.one_or_none`
"""
- try:
- row = self.cursor_strategy.fetchone()
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
-
- try:
- if row is not None:
- return self.process_rows([row])[0]
- else:
- return None
- finally:
- self.close()
+ return self._only_one_row(True, True)
def scalar(self):
"""Fetch the first column of the first row, and close the result set.
After calling this method, the object is fully closed,
- e.g. the :meth:`_engine.ResultProxy.close`
+ e.g. the :meth:`_engine.CursorResult.close`
method will have been called.
:return: a Python scalar value , or None if no rows remain
@@ -1764,38 +1008,94 @@ class ResultProxy(BaseResult):
return None
-class BufferedRowResultProxy(ResultProxy):
- """A ResultProxy with row buffering behavior.
+class FrozenResult(object):
+ def __init__(self, result):
+ self.metadata = result._metadata._for_freeze()
+ self._post_creational_filter = result._post_creational_filter
+ result._post_creational_filter = None
- .. deprecated:: 1.4 this class is now supplied using a strategy object.
- See :class:`.BufferedRowCursorFetchStrategy`.
+ self.data = result.fetchall()
- """
+ def with_data(self, data):
+ fr = FrozenResult.__new__(FrozenResult)
+ fr.metadata = self.metadata
+ fr._post_creational_filter = self._post_creational_filter
+ fr.data = data
+ return fr
- _cursor_strategy_cls = BufferedRowCursorFetchStrategy
+ def __call__(self):
+ result = IteratorResult(self.metadata, iter(self.data))
+ result._post_creational_filter = self._post_creational_filter
+ return result
-class FullyBufferedResultProxy(ResultProxy):
- """A result proxy that buffers rows fully upon creation.
+class IteratorResult(Result):
+ def __init__(self, cursor_metadata, iterator):
+ self._metadata = cursor_metadata
+ self.iterator = iterator
- .. deprecated:: 1.4 this class is now supplied using a strategy object.
- See :class:`.FullyBufferedCursorFetchStrategy`.
+ def _soft_close(self, **kw):
+ self.iterator = iter([])
- """
+ def _raw_row_iterator(self):
+ return self.iterator
- _cursor_strategy_cls = FullyBufferedCursorFetchStrategy
+ def _fetchiter_impl(self):
+ return self.iterator
+ def _fetchone_impl(self):
+ try:
+ return next(self.iterator)
+ except StopIteration:
+ self._soft_close()
+ return None
-class BufferedColumnRow(LegacyRow):
- """Row is now BufferedColumn in all cases"""
+ def _fetchall_impl(self):
+ try:
+ return list(self.iterator)
+ finally:
+ self._soft_close()
+ def _fetchmany_impl(self, size=None):
+ return list(itertools.islice(self.iterator, 0, size))
-class BufferedColumnResultProxy(ResultProxy):
- """A ResultProxy with column buffering behavior.
- .. versionchanged:: 1.4 This is now the default behavior of the Row
- and this class does not change behavior in any way.
+class ChunkedIteratorResult(IteratorResult):
+ def __init__(self, cursor_metadata, chunks):
+ self._metadata = cursor_metadata
+ self.chunks = chunks
- """
+ self.iterator = itertools.chain.from_iterable(self.chunks(None))
+
+ @_generative
+ def yield_per(self, num):
+ self._yield_per = num
+ self.iterator = itertools.chain.from_iterable(self.chunks(num))
- _process_row = BufferedColumnRow
+
+class MergedResult(IteratorResult):
+ closed = False
+
+ def __init__(self, cursor_metadata, results):
+ self._results = results
+ super(MergedResult, self).__init__(
+ cursor_metadata,
+ itertools.chain.from_iterable(
+ r._raw_row_iterator() for r in results
+ ),
+ )
+
+ self._unique_filter_state = results[0]._unique_filter_state
+ self._post_creational_filter = results[0]._post_creational_filter
+ self._no_scalar_onerow = results[0]._no_scalar_onerow
+ self._yield_per = results[0]._yield_per
+
+ def close(self):
+ self._soft_close(hard=True)
+
+ def _soft_close(self, hard=False):
+ for r in self._results:
+ r._soft_close(hard=hard)
+
+ if hard:
+ self.closed = True
diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py
index 68e32057d..6cd020110 100644
--- a/lib/sqlalchemy/engine/row.py
+++ b/lib/sqlalchemy/engine/row.py
@@ -51,7 +51,7 @@ except ImportError:
__slots__ = ("_parent", "_data", "_keymap")
def __init__(self, parent, processors, keymap, data):
- """Row objects are constructed by ResultProxy objects."""
+ """Row objects are constructed by CursorResult objects."""
self._parent = parent
@@ -69,6 +69,9 @@ except ImportError:
(self.__class__, self.__getstate__()),
)
+ def _filter_on_values(self, filters):
+ return Row(self._parent, filters, self._keymap, self._data)
+
def _values_impl(self):
return list(self)
@@ -81,6 +84,9 @@ except ImportError:
def __hash__(self):
return hash(self._data)
+ def __getitem__(self, key):
+ return self._data[key]
+
def _subscript_impl(self, key, ismapping):
try:
rec = self._keymap[key]
@@ -127,7 +133,7 @@ class Row(BaseRow, collections_abc.Sequence):
The :class:`.Row` object represents a row of a database result. It is
typically associated in the 1.x series of SQLAlchemy with the
- :class:`_engine.ResultProxy` object, however is also used by the ORM for
+ :class:`_engine.CursorResult` object, however is also used by the ORM for
tuple-like results as of SQLAlchemy 1.4.
The :class:`.Row` object seeks to act as much like a Python named
@@ -150,7 +156,7 @@ class Row(BaseRow, collections_abc.Sequence):
and now acts mostly like a named tuple. Mapping-like functionality is
moved to the :attr:`.Row._mapping` attribute, but will remain available
in SQLAlchemy 1.x series via the :class:`.LegacyRow` class that is used
- by :class:`_engine.ResultProxy`.
+ by :class:`_engine.LegacyCursorResult`.
See :ref:`change_4710_core` for background
on this change.
@@ -182,9 +188,6 @@ class Row(BaseRow, collections_abc.Sequence):
def __contains__(self, key):
return key in self._data
- def __getitem__(self, key):
- return self._data[key]
-
def __getstate__(self):
return {"_parent": self._parent, "_data": self._data}
@@ -243,7 +246,7 @@ class Row(BaseRow, collections_abc.Sequence):
:attr:`.Row._mapping`
"""
- return [k for k in self._parent.keys if k is not None]
+ return self._parent.keys
@property
def _fields(self):
@@ -477,6 +480,9 @@ class RowMapping(collections_abc.Mapping):
def __contains__(self, key):
return self.row._parent._has_key(key)
+ def __repr__(self):
+ return repr(dict(self))
+
def items(self):
"""Return a view of key/value tuples for the elements in the
underlying :class:`.Row`.
@@ -489,9 +495,8 @@ class RowMapping(collections_abc.Mapping):
by the underlying :class:`.Row`.
"""
- return ROMappingView(
- self, [k for k in self.row._parent.keys if k is not None]
- )
+
+ return self.row._parent.keys
def values(self):
"""Return a view of values for the values represented in the
diff --git a/lib/sqlalchemy/exc.py b/lib/sqlalchemy/exc.py
index aef8598a9..94cc25eab 100644
--- a/lib/sqlalchemy/exc.py
+++ b/lib/sqlalchemy/exc.py
@@ -234,6 +234,29 @@ class NoSuchColumnError(KeyError, InvalidRequestError):
"""A nonexistent column is requested from a ``Row``."""
+class NoResultFound(InvalidRequestError):
+ """A database result was required but none was found.
+
+
+ .. versionchanged:: 1.4 This exception is now part of the
+ ``sqlalchemy.exc`` module in Core, moved from the ORM. The symbol
+ remains importable from ``sqlalchemy.orm.exc``.
+
+
+ """
+
+
+class MultipleResultsFound(InvalidRequestError):
+ """A single database result was required but more than one were found.
+
+ .. versionchanged:: 1.4 This exception is now part of the
+ ``sqlalchemy.exc`` module in Core, moved from the ORM. The symbol
+ remains importable from ``sqlalchemy.orm.exc``.
+
+
+ """
+
+
class NoReferenceError(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
diff --git a/lib/sqlalchemy/future/__init__.py b/lib/sqlalchemy/future/__init__.py
index 02cbd697b..635afa78c 100644
--- a/lib/sqlalchemy/future/__init__.py
+++ b/lib/sqlalchemy/future/__init__.py
@@ -11,7 +11,6 @@
from .engine import Connection # noqa
from .engine import create_engine # noqa
from .engine import Engine # noqa
-from .result import Result # noqa
from ..sql.selectable import Select
from ..util.langhelpers import public_factory
diff --git a/lib/sqlalchemy/future/result.py b/lib/sqlalchemy/future/result.py
deleted file mode 100644
index 21a42e1f6..000000000
--- a/lib/sqlalchemy/future/result.py
+++ /dev/null
@@ -1,305 +0,0 @@
-import operator
-
-from .. import util
-from ..engine.result import BaseResult
-from ..engine.result import CursorResultMetaData
-from ..engine.result import DefaultCursorFetchStrategy
-from ..engine.result import Row
-from ..sql import util as sql_util
-from ..sql.base import _generative
-from ..sql.base import InPlaceGenerative
-
-
-class Result(InPlaceGenerative, BaseResult):
- """Interim "future" result proxy so that dialects can build on
- upcoming 2.0 patterns.
-
-
- """
-
- _process_row = Row
- _cursor_metadata = CursorResultMetaData
- _cursor_strategy_cls = DefaultCursorFetchStrategy
-
- _column_slice_filter = None
- _post_creational_filter = None
-
- def close(self):
- """Close this :class:`_future.Result`.
-
- This closes out the underlying DBAPI cursor corresponding
- to the statement execution, if one is still present. Note that the
- DBAPI cursor is automatically released when the
- :class:`_future.Result`
- exhausts all available rows. :meth:`_future.Result.close`
- is generally
- an optional method except in the case when discarding a
- :class:`_future.Result`
- that still has additional rows pending for fetch.
-
- After this method is called, it is no longer valid to call upon
- the fetch methods, which will raise a :class:`.ResourceClosedError`
- on subsequent use.
-
- .. seealso::
-
- :ref:`connections_toplevel`
-
- """
- self._soft_close(hard=True)
-
- def columns(self, *col_expressions):
- r"""Establish the columns that should be returned in each row.
-
- This method may be used to limit the columns returned as well
- as to reorder them. The given list of expressions are normally
- a series of integers or string key names. They may also be
- appropriate :class:`.ColumnElement` objects which correspond to
- a given statement construct.
-
- E.g.::
-
- statement = select(table.c.x, table.c.y, table.c.z)
- result = connection.execute(statement)
-
- for z, y in result.columns('z', 'y'):
- # ...
-
-
- Example of using the column objects from the statement itself::
-
- for z, y in result.columns(
- statement.selected_columns.c.z,
- statement.selected_columns.c.y
- ):
- # ...
-
- :param \*col_expressions: indicates columns to be returned. Elements
- may be integer row indexes, string column names, or appropriate
- :class:`.ColumnElement` objects corresponding to a select construct.
-
- :return: this :class:`_future.Result` object with the modifications
- given.
-
- """
- return self._column_slices(col_expressions)
-
- def partitions(self, size=100):
- """Iterate through sub-lists of rows of the size given.
-
- Each list will be of the size given, excluding the last list to
- be yielded, which may have a small number of rows. No empty
- lists will be yielded.
-
- The result object is automatically closed when the iterator
- is fully consumed.
-
- Note that the backend driver will usually buffer the entire result
- ahead of time unless the
- :paramref:`.Connection.execution_options.stream_results` execution
- option is used indicating that the driver should not pre-buffer
- results, if possible. Not all drivers support this option and
- the option is silently ignored for those who do. For a positive
- assertion that the driver supports streaming results that will
- fail if not supported, use the
- :paramref:`.Connection.execution_options.stream_per`
- execution option.
-
- :param size: indicate the maximum number of rows to be present
- in each list yielded.
- :return: iterator of lists
-
- """
- getter = self._row_getter()
- while True:
- partition = [
- getter(r) for r in self._safe_fetchmany_impl(size=size)
- ]
- if partition:
- yield partition
- else:
- break
-
- def scalars(self):
- result = self._column_slices(0)
- result._post_creational_filter = operator.itemgetter(0)
- return result
-
- @_generative
- def _column_slices(self, indexes):
- self._column_slice_filter = self._metadata._tuple_getter(indexes)
-
- @_generative
- def mappings(self):
- self._post_creational_filter = operator.attrgetter("_mapping")
-
- def _row_getter(self):
- process_row = self._process_row
- metadata = self._metadata
- keymap = metadata._keymap
- processors = metadata._processors
-
- fns = ()
-
- if self._echo:
- log = self.context.engine.logger.debug
-
- def log_row(row):
- log("Row %r", sql_util._repr_row(row))
- return row
-
- fns += (log_row,)
-
- if self._column_slice_filter:
- fns += (self._column_slice_filter,)
-
- if self._post_creational_filter:
- fns += (self._post_creational_filter,)
-
- def make_row(row):
- row = process_row(metadata, processors, keymap, row)
- for fn in fns:
- row = fn(row)
- return row
-
- return make_row
-
- def _safe_fetchone_impl(self):
- try:
- return self.cursor_strategy.fetchone()
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
-
- def _safe_fetchall_impl(self):
- try:
- result = self.cursor_strategy.fetchall()
- self._soft_close()
- return result
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
-
- def _safe_fetchmany_impl(self, size=None):
- try:
- l = self.cursor_strategy.fetchmany(size)
- if len(l) == 0:
- self._soft_close()
- return l
- except BaseException as e:
- self.connection._handle_dbapi_exception(
- e, None, None, self.cursor, self.context
- )
-
- def __iter__(self):
- getter = self._row_getter()
- return (getter(r) for r in self._safe_fetchall_impl())
-
- def _onerow(self):
- getter = self._row_getter()
- row = self._safe_fetchone_impl()
- if row is None:
- return None
- else:
- return getter(row)
-
- @util.deprecated(
- "2.0",
- "The :meth:`_future.Result.fetchall` "
- "method is provided for backwards "
- "compatibility and will be removed in a future release.",
- )
- def fetchall(self):
- """A synonym for the :meth:`_future.Result.all` method."""
-
- return self.all()
-
- @util.deprecated(
- "2.0",
- "The :meth:`_future.Result.fetchone` "
- "method is provided for backwards "
- "compatibility and will be removed in a future release.",
- )
- def fetchone(self):
- """Fetch one row.
-
- this method is provided for backwards compatibility with
- SQLAlchemy 1.x.x.
-
- To fetch the first row of a result only, use the
- :meth:`.future.Result.first` method. To iterate through all
- rows, iterate the :class:`_future.Result` object directly.
-
- """
- return self._onerow()
-
- @util.deprecated(
- "2.0",
- "The :meth:`_future.Result.fetchmany` "
- "method is provided for backwards "
- "compatibility and will be removed in a future release.",
- )
- def fetchmany(self, size=None):
- """Fetch many rows.
-
- this method is provided for backwards compatibility with
- SQLAlchemy 1.x.x.
-
- To fetch rows in groups, use the :meth:`.future.Result.partitions`
- method, or the :meth:`.future.Result.chunks` method in combination
- with the :paramref:`.Connection.execution_options.stream_per`
- option which sets up the buffer size before fetching the result.
-
- """
- getter = self._row_getter()
- return [getter(r) for r in self._safe_fetchmany_impl(size=size)]
-
- def all(self):
- """Return all rows in a list.
-
- Closes the result set after invocation.
-
- :return: a list of :class:`.Row` objects.
-
- """
- getter = self._row_getter()
- return [getter(r) for r in self._safe_fetchall_impl()]
-
- def first(self):
- """Fetch the first row or None if no row is present.
-
- Closes the result set and discards remaining rows. A warning
- is emitted if additional rows remain.
-
- :return: a :class:`.Row` object, or None if no rows remain
-
- """
- getter = self._row_getter()
- row = self._safe_fetchone_impl()
- if row is None:
- return None
- else:
- row = getter(row)
- second_row = self._safe_fetchone_impl()
- if second_row is not None:
- self._soft_close()
- util.warn("Additional rows remain")
- return row
-
- def scalar(self):
- """Fetch the first column of the first row, and close the result set.
-
- After calling this method, the object is fully closed,
- e.g. the :meth:`_engine.ResultProxy.close`
- method will have been called.
-
- :return: a Python scalar value , or None if no rows remain
-
- """
- row = self.first()
- if row is not None:
- return row[0]
- else:
- return None
diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py
index b04b844b3..7b0f84866 100644
--- a/lib/sqlalchemy/orm/exc.py
+++ b/lib/sqlalchemy/orm/exc.py
@@ -8,6 +8,8 @@
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc
from .. import util
+from ..exc import MultipleResultsFound # noqa
+from ..exc import NoResultFound # noqa
NO_STATE = (AttributeError, KeyError)
@@ -146,14 +148,6 @@ class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
-class NoResultFound(sa_exc.InvalidRequestError):
- """A database result was required but none was found."""
-
-
-class MultipleResultsFound(sa_exc.InvalidRequestError):
- """A single database result was required but more than one were found."""
-
-
class LoaderStrategyException(sa_exc.InvalidRequestError):
"""A loader strategy for an attribute does not exist."""
diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py
index 5ef2f10ec..d781df980 100644
--- a/lib/sqlalchemy/orm/loading.py
+++ b/lib/sqlalchemy/orm/loading.py
@@ -141,7 +141,7 @@ def merge_result(query, iterator, load=True):
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = result_tuple(
- keys, [ent.entities for ent in query._entities]
+ keys, [tuple(ent.entities) for ent in query._entities]
)
for row in iterator:
newrow = list(row)
@@ -528,7 +528,7 @@ def _instance_processor(
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
- tuple_getter = result._tuple_getter(pk_cols, True)
+ tuple_getter = result._tuple_getter(pk_cols)
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index 31f005769..c05705b67 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -2647,8 +2647,9 @@ class Mapper(sql_base.HasCacheKey, InspectionAttr):
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
+ rk = result.keys()
for col in pk_cols:
- if not result._has_key(col):
+ if col not in rk:
return False
else:
return True
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
index ab49a4dcc..6ec520a3e 100644
--- a/lib/sqlalchemy/orm/query.py
+++ b/lib/sqlalchemy/orm/query.py
@@ -4315,7 +4315,7 @@ class _MapperEntity(_QueryEntity):
polymorphic_discriminator=self._polymorphic_discriminator,
)
- return _instance, self._label_name, self.entities
+ return _instance, self._label_name, tuple(self.entities)
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index 23eff773c..d32e3fd7a 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -702,7 +702,7 @@ class SQLCompiler(Compiled):
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
- # TypeEngine. ResultProxy uses this for type processing and
+ # TypeEngine. CursorResult uses this for type processing and
# column targeting
self._result_columns = []
@@ -1056,11 +1056,11 @@ class SQLCompiler(Compiled):
return expanded_state
- @util.preload_module("sqlalchemy.engine.result")
+ @util.preload_module("sqlalchemy.engine.cursor")
def _create_result_map(self):
"""utility method used for unit tests only."""
- result = util.preloaded.engine_result
- return result.CursorResultMetaData._create_description_match_map(
+ cursor = util.preloaded.engine_cursor
+ return cursor.CursorResultMetaData._create_description_match_map(
self._result_columns
)
diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py
index a4399830e..1fa7daed6 100644
--- a/lib/sqlalchemy/testing/fixtures.py
+++ b/lib/sqlalchemy/testing/fixtures.py
@@ -59,7 +59,8 @@ class TestBase(object):
@config.fixture()
def connection(self):
- conn = config.db.connect()
+ eng = getattr(self, "bind", config.db)
+ conn = eng.connect()
trans = conn.begin()
try:
yield conn
@@ -87,25 +88,27 @@ class TestBase(object):
class FutureEngineMixin(object):
@classmethod
def setup_class(cls):
- super_ = super(FutureEngineMixin, cls)
- if hasattr(super_, "setup_class"):
- super_.setup_class()
from ..future.engine import Engine
from sqlalchemy import testing
- config._current.push_engine(Engine._future_facade(config.db), testing)
+ facade = Engine._future_facade(config.db)
+ config._current.push_engine(facade, testing)
+
+ super_ = super(FutureEngineMixin, cls)
+ if hasattr(super_, "setup_class"):
+ super_.setup_class()
@classmethod
def teardown_class(cls):
- from sqlalchemy import testing
-
- config._current.pop(testing)
-
super_ = super(FutureEngineMixin, cls)
if hasattr(super_, "teardown_class"):
super_.teardown_class()
+ from sqlalchemy import testing
+
+ config._current.pop(testing)
+
class TablesTest(TestBase):
@@ -195,7 +198,7 @@ class TablesTest(TestBase):
# no need to run deletes if tables are recreated on setup
if self.run_define_tables != "each" and self.run_deletes == "each":
- with self.bind.connect() as conn:
+ with self.bind.begin() as conn:
for table in reversed(self.metadata.sorted_tables):
try:
conn.execute(table.delete())
diff --git a/lib/sqlalchemy/testing/suite/test_insert.py b/lib/sqlalchemy/testing/suite/test_insert.py
index f449b2fe6..92e38ab20 100644
--- a/lib/sqlalchemy/testing/suite/test_insert.py
+++ b/lib/sqlalchemy/testing/suite/test_insert.py
@@ -114,7 +114,13 @@ class InsertBehaviorTest(fixtures.TablesTest):
assert r._soft_closed
assert not r.closed
assert r.is_insert
- assert not r.returns_rows
+
+ # new as of I8091919d45421e3f53029b8660427f844fee0228; for the moment
+ # an insert where the PK was taken from a row that the dialect
+ # selected, as is the case for mssql/pyodbc, will still report
+ # returns_rows as true because there's a cursor description. in that
+ # case, the row had to have been consumed at least.
+ assert not r.returns_rows or r.fetchone() is None
@requirements.returning
def test_autoclose_on_insert_implicit_returning(self, connection):
@@ -124,7 +130,21 @@ class InsertBehaviorTest(fixtures.TablesTest):
assert r._soft_closed
assert not r.closed
assert r.is_insert
- assert not r.returns_rows
+
+ # note we are experimenting with having this be True
+ # as of I8091919d45421e3f53029b8660427f844fee0228 .
+ # implicit returning has fetched the row, but it still is a
+ # "returns rows"
+ assert r.returns_rows
+
+ # and we should be able to fetchone() on it, we just get no row
+ eq_(r.fetchone(), None)
+
+ # and the keys, etc.
+ eq_(r.keys(), ["id"])
+
+ # but the dialect took in the row already. not really sure
+ # what the best behavior is.
@requirements.empty_inserts
def test_empty_insert(self, connection):