diff options
author | Mike Bayer <mike_mp@zzzcomputing.com> | 2022-02-17 13:43:04 -0500 |
---|---|---|
committer | Mike Bayer <mike_mp@zzzcomputing.com> | 2022-03-01 09:09:02 -0500 |
commit | a4bb502cf95ea3523e4d383c4377e50f402d7d52 (patch) | |
tree | 124400f741b6b91f0e9e582b510268607394dfaa /lib/sqlalchemy/engine/_py_row.py | |
parent | 60fca2ac8cf44bdaf68552ab5c69854a6776c73c (diff) | |
download | sqlalchemy-a4bb502cf95ea3523e4d383c4377e50f402d7d52.tar.gz |
pep-484 for engine
All modules in sqlalchemy.engine are strictly
typed with the exception of cursor, default, and
reflection. cursor and default pass with non-strict
typing, reflection is waiting on the multi-reflection
refactor.
Behavioral changes:
* create_connect_args() methods return a tuple of list,
dict, rather than a list of list, dict
* removed allow_chars parameter from
pyodbc connector ._get_server_version_info()
method
* the parameter list passed to do_executemany is now
a list in all cases. previously, this was being run
through dialect.execute_sequence_format, which
defaults to tuple and was only intended for individual
tuple params.
* broke up dialect.dbapi into dialect.import_dbapi
class method and dialect.dbapi module object. added
a deprecation path for legacy dialects. it's not
really feasible to type a single attr as a classmethod
vs. module type. The "type_compiler" attribute also
has this problem with greater ability to work around,
left that one for now.
* lots of constants changing to be Enum, so that we can
type them. for fixed tuple-position constants in
cursor.py / compiler.py (which are used to avoid the
speed overhead of namedtuple), using Literal[value]
which seems to work well
* some tightening up in Row regarding __getitem__, which
we can do since we are on full 2.0 style result use
* altered the set_connection_execution_options and
set_engine_execution_options event flows so that the
dictionary of options may be mutated within the event
hook, where it will then take effect as the actual
options used. Previously, changing the dict would
be silently ignored which seems counter-intuitive
and not very useful.
* A lot of DefaultDialect/DefaultExecutionContext
methods and attributes, including underscored ones, move
to interfaces. This is not fully ideal as it means
the Dialect/ExecutionContext interfaces aren't publicly
subclassable directly, but their current purpose
is more of documentation for dialect authors who should
(and certainly are) still be subclassing the DefaultXYZ
versions in all cases
Overall, Result was the most extremely difficult class
hierarchy to type here as this hierarchy passes through
largely amorphous "row" datatypes throughout, which
can in fact by all kinds of different things, like
raw DBAPI rows, or Row objects, or "scalar"/Any, but
at the same time these types have meaning so I tried still
maintaining some level of semantic markings for these,
it highlights how complex Result is now, as it's trying
to be extremely efficient and inlined while also being
very open-ended and extensible.
Change-Id: I98b75c0c09eab5355fc7a33ba41dd9874274f12a
Diffstat (limited to 'lib/sqlalchemy/engine/_py_row.py')
-rw-r--r-- | lib/sqlalchemy/engine/_py_row.py | 104 |
1 files changed, 58 insertions, 46 deletions
diff --git a/lib/sqlalchemy/engine/_py_row.py b/lib/sqlalchemy/engine/_py_row.py index a6d5b79d5..7cbac552f 100644 --- a/lib/sqlalchemy/engine/_py_row.py +++ b/lib/sqlalchemy/engine/_py_row.py @@ -1,26 +1,59 @@ from __future__ import annotations +import enum import operator +import typing +from typing import Any +from typing import Callable +from typing import Dict +from typing import Iterator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import Union + +if typing.TYPE_CHECKING: + from .result import _KeyMapType + from .result import _KeyType + from .result import _ProcessorsType + from .result import _RawRowType + from .result import _TupleGetterType + from .result import ResultMetaData MD_INDEX = 0 # integer index in cursor.description -KEY_INTEGER_ONLY = 0 -"""__getitem__ only allows integer values and slices, raises TypeError - otherwise""" -KEY_OBJECTS_ONLY = 1 -"""__getitem__ only allows string/object values, raises TypeError otherwise""" +class _KeyStyle(enum.Enum): + KEY_INTEGER_ONLY = 0 + """__getitem__ only allows integer values and slices, raises TypeError + otherwise""" -sqlalchemy_engine_row = None + KEY_OBJECTS_ONLY = 1 + """__getitem__ only allows string/object values, raises TypeError + otherwise""" + + +KEY_INTEGER_ONLY, KEY_OBJECTS_ONLY = list(_KeyStyle) class BaseRow: - Row = None __slots__ = ("_parent", "_data", "_keymap", "_key_style") - def __init__(self, parent, processors, keymap, key_style, data): + _parent: ResultMetaData + _data: _RawRowType + _keymap: _KeyMapType + _key_style: _KeyStyle + + def __init__( + self, + parent: ResultMetaData, + processors: Optional[_ProcessorsType], + keymap: _KeyMapType, + key_style: _KeyStyle, + data: _RawRowType, + ): """Row objects are constructed by CursorResult objects.""" - object.__setattr__(self, "_parent", parent) if processors: @@ -41,68 +74,45 @@ class BaseRow: object.__setattr__(self, "_key_style", key_style) - def __reduce__(self): + def __reduce__(self) -> Tuple[Callable[..., BaseRow], Tuple[Any, ...]]: return ( rowproxy_reconstructor, (self.__class__, self.__getstate__()), ) - def __getstate__(self): + def __getstate__(self) -> Dict[str, Any]: return { "_parent": self._parent, "_data": self._data, "_key_style": self._key_style, } - def __setstate__(self, state): + def __setstate__(self, state: Dict[str, Any]) -> None: parent = state["_parent"] object.__setattr__(self, "_parent", parent) object.__setattr__(self, "_data", state["_data"]) object.__setattr__(self, "_keymap", parent._keymap) object.__setattr__(self, "_key_style", state["_key_style"]) - def _filter_on_values(self, filters): - global sqlalchemy_engine_row - if sqlalchemy_engine_row is None: - from sqlalchemy.engine.row import Row as sqlalchemy_engine_row - - return sqlalchemy_engine_row( - self._parent, - filters, - self._keymap, - self._key_style, - self._data, - ) - - def _values_impl(self): + def _values_impl(self) -> List[Any]: return list(self) - def __iter__(self): + def __iter__(self) -> Iterator[Any]: return iter(self._data) - def __len__(self): + def __len__(self) -> int: return len(self._data) - def __hash__(self): + def __hash__(self) -> int: return hash(self._data) - def _get_by_int_impl(self, key): + def _get_by_int_impl(self, key: Union[int, slice]) -> Any: return self._data[key] - def _get_by_key_impl(self, key): - # keep two isinstance since it's noticeably faster in the int case - if isinstance(key, int) or isinstance(key, slice): - return self._data[key] - - self._parent._raise_for_nonint(key) - - # The original 1.4 plan was that Row would not allow row["str"] - # access, however as the C extensions were inadvertently allowing - # this coupled with the fact that orm Session sets future=True, - # this allows a softer upgrade path. see #6218 - __getitem__ = _get_by_key_impl + if not typing.TYPE_CHECKING: + __getitem__ = _get_by_int_impl - def _get_by_key_impl_mapping(self, key): + def _get_by_key_impl_mapping(self, key: _KeyType) -> Any: try: rec = self._keymap[key] except KeyError as ke: @@ -116,7 +126,7 @@ class BaseRow: return self._data[mdindex] - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: try: return self._get_by_key_impl_mapping(name) except KeyError as e: @@ -125,13 +135,15 @@ class BaseRow: # This reconstructor is necessary so that pickles with the Cy extension or # without use the same Binary format. -def rowproxy_reconstructor(cls, state): +def rowproxy_reconstructor( + cls: Type[BaseRow], state: Dict[str, Any] +) -> BaseRow: obj = cls.__new__(cls) obj.__setstate__(state) return obj -def tuplegetter(*indexes): +def tuplegetter(*indexes: int) -> _TupleGetterType: it = operator.itemgetter(*indexes) if len(indexes) > 1: |