summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/dialects/postgresql
diff options
context:
space:
mode:
authorBrian Jarrett <celttechie@gmail.com>2014-07-20 12:44:40 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2014-07-20 12:44:40 -0400
commitcca03097f47f22783d42d1853faac6cf84607c5a (patch)
tree4fe1a63d03a2d88d1cf37e1167759dfaf84f4ce7 /lib/sqlalchemy/dialects/postgresql
parent827329a0cca5351094a1a86b6b2be2b9182f0ae2 (diff)
downloadsqlalchemy-cca03097f47f22783d42d1853faac6cf84607c5a.tar.gz
- apply pep8 formatting to sqlalchemy/sql, sqlalchemy/util, sqlalchemy/dialects,
sqlalchemy/orm, sqlalchemy/event, sqlalchemy/testing
Diffstat (limited to 'lib/sqlalchemy/dialects/postgresql')
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py543
-rw-r--r--lib/sqlalchemy/dialects/postgresql/constraints.py15
-rw-r--r--lib/sqlalchemy/dialects/postgresql/hstore.py21
-rw-r--r--lib/sqlalchemy/dialects/postgresql/json.py66
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py127
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pypostgresql.py3
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ranges.py7
8 files changed, 434 insertions, 350 deletions
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index 8081f75dd..6f23a497b 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -50,22 +50,25 @@ Transaction Isolation Level
All Postgresql dialects support setting of transaction isolation level
both via a dialect-specific parameter ``isolation_level``
accepted by :func:`.create_engine`,
-as well as the ``isolation_level`` argument as passed to :meth:`.Connection.execution_options`.
-When using a non-psycopg2 dialect, this feature works by issuing the
-command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL
-<level>`` for each new connection.
+as well as the ``isolation_level`` argument as passed to
+:meth:`.Connection.execution_options`. When using a non-psycopg2 dialect,
+this feature works by issuing the command
+``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for
+each new connection.
To set isolation level using :func:`.create_engine`::
engine = create_engine(
- "postgresql+pg8000://scott:tiger@localhost/test",
- isolation_level="READ UNCOMMITTED"
- )
+ "postgresql+pg8000://scott:tiger@localhost/test",
+ isolation_level="READ UNCOMMITTED"
+ )
To set using per-connection execution options::
connection = engine.connect()
- connection = connection.execution_options(isolation_level="READ COMMITTED")
+ connection = connection.execution_options(
+ isolation_level="READ COMMITTED"
+ )
Valid values for ``isolation_level`` include:
@@ -93,12 +96,13 @@ The Postgresql dialect can reflect tables from any schema. The
:paramref:`.Table.schema` argument, or alternatively the
:paramref:`.MetaData.reflect.schema` argument determines which schema will
be searched for the table or tables. The reflected :class:`.Table` objects
-will in all cases retain this ``.schema`` attribute as was specified. However,
-with regards to tables which these :class:`.Table` objects refer to via
-foreign key constraint, a decision must be made as to how the ``.schema``
+will in all cases retain this ``.schema`` attribute as was specified.
+However, with regards to tables which these :class:`.Table` objects refer to
+via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
-`Postgresql search path <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
+`Postgresql search path
+<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the Postgresql dialect mimics the behavior encouraged by
Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function
@@ -115,7 +119,8 @@ illustrates this behavior::
CREATE TABLE
test=> SET search_path TO public, test_schema;
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
- test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+ test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f'
test-> ;
@@ -124,10 +129,11 @@ illustrates this behavior::
FOREIGN KEY (referred_id) REFERENCES referred(id)
(1 row)
-Above, we created a table ``referred`` as a member of the remote schema ``test_schema``, however
-when we added ``test_schema`` to the PG ``search_path`` and then asked ``pg_get_constraintdef()``
-for the ``FOREIGN KEY`` syntax, ``test_schema`` was not included in the
-output of the function.
+Above, we created a table ``referred`` as a member of the remote schema
+``test_schema``, however when we added ``test_schema`` to the
+PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
+``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
+the function.
On the other hand, if we set the search path back to the typical default
of ``public``::
@@ -139,7 +145,8 @@ The same query against ``pg_get_constraintdef()`` now returns the fully
schema-qualified name for us::
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
- test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+ test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f';
pg_get_constraintdef
@@ -157,7 +164,8 @@ reflection process as follows::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
- ... referring = Table('referring', meta, autoload=True, autoload_with=conn)
+ ... referring = Table('referring', meta,
+ ... autoload=True, autoload_with=conn)
...
<sqlalchemy.engine.result.ResultProxy object at 0x101612ed0>
@@ -167,16 +175,18 @@ The above process would deliver to the :attr:`.MetaData.tables` collection
>>> meta.tables['referred'].schema is None
True
-To alter the behavior of reflection such that the referred schema is maintained
-regardless of the ``search_path`` setting, use the ``postgresql_ignore_search_path``
-option, which can be specified as a dialect-specific argument to both
-:class:`.Table` as well as :meth:`.MetaData.reflect`::
+To alter the behavior of reflection such that the referred schema is
+maintained regardless of the ``search_path`` setting, use the
+``postgresql_ignore_search_path`` option, which can be specified as a
+dialect-specific argument to both :class:`.Table` as well as
+:meth:`.MetaData.reflect`::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
- ... referring = Table('referring', meta, autoload=True, autoload_with=conn,
- ... postgresql_ignore_search_path=True)
+ ... referring = Table('referring', meta, autoload=True,
+ ... autoload_with=conn,
+ ... postgresql_ignore_search_path=True)
...
<sqlalchemy.engine.result.ResultProxy object at 0x1016126d0>
@@ -187,29 +197,33 @@ We will now have ``test_schema.referred`` stored as schema-qualified::
.. sidebar:: Best Practices for Postgresql Schema reflection
- The description of Postgresql schema reflection behavior is complex, and is
- the product of many years of dealing with widely varied use cases and user preferences.
- But in fact, there's no need to understand any of it if you just stick to the simplest
- use pattern: leave the ``search_path`` set to its default of ``public`` only, never refer
- to the name ``public`` as an explicit schema name otherwise, and
- refer to all other schema names explicitly when building
- up a :class:`.Table` object. The options described here are only for those users
- who can't, or prefer not to, stay within these guidelines.
-
-Note that **in all cases**, the "default" schema is always reflected as ``None``.
-The "default" schema on Postgresql is that which is returned by the
-Postgresql ``current_schema()`` function. On a typical Postgresql installation,
-this is the name ``public``. So a table that refers to another which is
-in the ``public`` (i.e. default) schema will always have the ``.schema`` attribute
-set to ``None``.
+ The description of Postgresql schema reflection behavior is complex, and
+ is the product of many years of dealing with widely varied use cases and
+ user preferences. But in fact, there's no need to understand any of it if
+ you just stick to the simplest use pattern: leave the ``search_path`` set
+ to its default of ``public`` only, never refer to the name ``public`` as
+ an explicit schema name otherwise, and refer to all other schema names
+ explicitly when building up a :class:`.Table` object. The options
+ described here are only for those users who can't, or prefer not to, stay
+ within these guidelines.
+
+Note that **in all cases**, the "default" schema is always reflected as
+``None``. The "default" schema on Postgresql is that which is returned by the
+Postgresql ``current_schema()`` function. On a typical Postgresql
+installation, this is the name ``public``. So a table that refers to another
+which is in the ``public`` (i.e. default) schema will always have the
+``.schema`` attribute set to ``None``.
.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
- dialect-level option accepted by :class:`.Table` and :meth:`.MetaData.reflect`.
+ dialect-level option accepted by :class:`.Table` and
+ :meth:`.MetaData.reflect`.
.. seealso::
- `The Schema Search Path <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_ - on the Postgresql website.
+ `The Schema Search Path
+ <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
+ - on the Postgresql website.
INSERT/UPDATE...RETURNING
-------------------------
@@ -273,19 +287,19 @@ produces a statement equivalent to::
SELECT CAST('some text' AS TSVECTOR) AS anon_1
Full Text Searches in Postgresql are influenced by a combination of: the
-PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used to
-build the GIN/GiST indexes, and the ``regconfig`` optionally passed in during a
-query.
+PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used
+to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in
+during a query.
When performing a Full Text Search against a column that has a GIN or
-GiST index that is already pre-computed (which is common on full text searches)
-one may need to explicitly pass in a particular PostgresSQL ``regconfig`` value
-to ensure the query-planner utilizes the index and does not re-compute the
-column on demand.
+GiST index that is already pre-computed (which is common on full text
+searches) one may need to explicitly pass in a particular PostgresSQL
+``regconfig`` value to ensure the query-planner utilizes the index and does
+not re-compute the column on demand.
-In order to provide for this explicit query planning, or to use different search
-strategies, the ``match`` method accepts a ``postgresql_regconfig`` keyword
-argument.
+In order to provide for this explicit query planning, or to use different
+search strategies, the ``match`` method accepts a ``postgresql_regconfig``
+keyword argument.
select([mytable.c.id]).where(
mytable.c.title.match('somestring', postgresql_regconfig='english')
@@ -296,8 +310,8 @@ Emits the equivalent of::
SELECT mytable.id FROM mytable
WHERE mytable.title @@ to_tsquery('english', 'somestring')
-One can also specifically pass in a `'regconfig'` value to the ``to_tsvector()``
-command as the initial argument.
+One can also specifically pass in a `'regconfig'` value to the
+``to_tsvector()`` command as the initial argument.
select([mytable.c.id]).where(
func.to_tsvector('english', mytable.c.title )\
@@ -310,9 +324,9 @@ produces a statement equivalent to::
WHERE to_tsvector('english', mytable.title) @@
to_tsquery('english', 'somestring')
-It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from PostgresSQL
-to ensure that you are generating queries with SQLAlchemy that take full
-advantage of any indexes you may have created for full text search.
+It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
+PostgresSQL to ensure that you are generating queries with SQLAlchemy that
+take full advantage of any indexes you may have created for full text search.
FROM ONLY ...
------------------------
@@ -402,26 +416,26 @@ except ImportError:
_python_UUID = None
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
- CHAR, TEXT, FLOAT, NUMERIC, \
- DATE, BOOLEAN, REAL
+ CHAR, TEXT, FLOAT, NUMERIC, \
+ DATE, BOOLEAN, REAL
RESERVED_WORDS = set(
["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
- "asymmetric", "both", "case", "cast", "check", "collate", "column",
- "constraint", "create", "current_catalog", "current_date",
- "current_role", "current_time", "current_timestamp", "current_user",
- "default", "deferrable", "desc", "distinct", "do", "else", "end",
- "except", "false", "fetch", "for", "foreign", "from", "grant", "group",
- "having", "in", "initially", "intersect", "into", "leading", "limit",
- "localtime", "localtimestamp", "new", "not", "null", "of", "off", "offset",
- "old", "on", "only", "or", "order", "placing", "primary", "references",
- "returning", "select", "session_user", "some", "symmetric", "table",
- "then", "to", "trailing", "true", "union", "unique", "user", "using",
- "variadic", "when", "where", "window", "with", "authorization",
- "between", "binary", "cross", "current_schema", "freeze", "full",
- "ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
- "notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
- ])
+ "asymmetric", "both", "case", "cast", "check", "collate", "column",
+ "constraint", "create", "current_catalog", "current_date",
+ "current_role", "current_time", "current_timestamp", "current_user",
+ "default", "deferrable", "desc", "distinct", "do", "else", "end",
+ "except", "false", "fetch", "for", "foreign", "from", "grant", "group",
+ "having", "in", "initially", "intersect", "into", "leading", "limit",
+ "localtime", "localtimestamp", "new", "not", "null", "of", "off",
+ "offset", "old", "on", "only", "or", "order", "placing", "primary",
+ "references", "returning", "select", "session_user", "some", "symmetric",
+ "table", "then", "to", "trailing", "true", "union", "unique", "user",
+ "using", "variadic", "when", "where", "window", "with", "authorization",
+ "between", "binary", "cross", "current_schema", "freeze", "full",
+ "ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
+ "notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
+ ])
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
@@ -452,6 +466,7 @@ PGMacAddr = MACADDR
class OID(sqltypes.TypeEngine):
+
"""Provide the Postgresql OID type.
.. versionadded:: 0.9.5
@@ -461,18 +476,21 @@ class OID(sqltypes.TypeEngine):
class TIMESTAMP(sqltypes.TIMESTAMP):
+
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
+
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.TypeEngine):
+
"""Postgresql INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
@@ -511,6 +529,7 @@ PGBit = BIT
class UUID(sqltypes.TypeEngine):
+
"""Postgresql UUID type.
Represents the UUID column type, interpreting
@@ -534,7 +553,8 @@ class UUID(sqltypes.TypeEngine):
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
- "This version of Python does not support the native UUID type."
+ "This version of Python does not support "
+ "the native UUID type."
)
self.as_uuid = as_uuid
@@ -560,7 +580,9 @@ class UUID(sqltypes.TypeEngine):
PGUuid = UUID
+
class TSVECTOR(sqltypes.TypeEngine):
+
"""The :class:`.postgresql.TSVECTOR` type implements the Postgresql
text search type TSVECTOR.
@@ -577,21 +599,21 @@ class TSVECTOR(sqltypes.TypeEngine):
__visit_name__ = 'TSVECTOR'
-
class _Slice(expression.ColumnElement):
__visit_name__ = 'slice'
type = sqltypes.NULLTYPE
def __init__(self, slice_, source_comparator):
self.start = source_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.start)
+ source_comparator.expr,
+ operators.getitem, slice_.start)
self.stop = source_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.stop)
+ source_comparator.expr,
+ operators.getitem, slice_.stop)
class Any(expression.ColumnElement):
+
"""Represent the clause ``left operator ANY (right)``. ``right`` must be
an array expression.
@@ -612,6 +634,7 @@ class Any(expression.ColumnElement):
class All(expression.ColumnElement):
+
"""Represent the clause ``left operator ALL (right)``. ``right`` must be
an array expression.
@@ -632,6 +655,7 @@ class All(expression.ColumnElement):
class array(expression.Tuple):
+
"""A Postgresql ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
@@ -673,7 +697,7 @@ class array(expression.Tuple):
def _bind_param(self, operator, obj):
return array(*[
expression.BindParameter(None, o, _compared_to_operator=operator,
- _compared_to_type=self.type, unique=True)
+ _compared_to_type=self.type, unique=True)
for o in obj
])
@@ -682,6 +706,7 @@ class array(expression.Tuple):
class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
+
"""Postgresql ARRAY type.
Represents values as Python lists.
@@ -757,6 +782,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
__visit_name__ = 'ARRAY'
class Comparator(sqltypes.Concatenable.Comparator):
+
"""Define comparison operations for :class:`.ARRAY`."""
def __getitem__(self, index):
@@ -775,7 +801,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
return_type = self.type.item_type
return self._binary_operate(self.expr, operators.getitem, index,
- result_type=return_type)
+ result_type=return_type)
def any(self, other, operator=operators.eq):
"""Return ``other operator ANY (array)`` clause.
@@ -902,7 +928,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
- "handles multi-dimensional arrays of basetype")
+ "handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
@@ -921,59 +947,60 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
- # this has to be (list, tuple), or at least
- # not hasattr('__iter__'), since Py3K strings
- # etc. have __iter__
- not arr or not isinstance(arr[0], (list, tuple))):
+ # this has to be (list, tuple), or at least
+ # not hasattr('__iter__'), since Py3K strings
+ # etc. have __iter__
+ not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
- self._proc_array(
- x, itemproc,
- dim - 1 if dim is not None else None,
- collection)
- for x in arr
- )
+ self._proc_array(
+ x, itemproc,
+ dim - 1 if dim is not None else None,
+ collection)
+ for x in arr
+ )
def bind_processor(self, dialect):
item_proc = self.item_type.\
- dialect_impl(dialect).\
- bind_processor(dialect)
+ dialect_impl(dialect).\
+ bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- list)
+ value,
+ item_proc,
+ self.dimensions,
+ list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.\
- dialect_impl(dialect).\
- result_processor(dialect, coltype)
+ dialect_impl(dialect).\
+ result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- tuple if self.as_tuple else list)
+ value,
+ item_proc,
+ self.dimensions,
+ tuple if self.as_tuple else list)
return process
PGArray = ARRAY
class ENUM(sqltypes.Enum):
+
"""Postgresql ENUM type.
This is a subclass of :class:`.types.Enum` which includes
@@ -1047,7 +1074,8 @@ class ENUM(sqltypes.Enum):
return
if not checkfirst or \
- not bind.dialect.has_type(bind, self.name, schema=self.schema):
+ not bind.dialect.has_type(
+ bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
@@ -1069,7 +1097,7 @@ class ENUM(sqltypes.Enum):
return
if not checkfirst or \
- bind.dialect.has_type(bind, self.name, schema=self.schema):
+ bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
@@ -1144,7 +1172,7 @@ ischema_names = {
'interval': INTERVAL,
'interval year to month': INTERVAL,
'interval day to second': INTERVAL,
- 'tsvector' : TSVECTOR
+ 'tsvector': TSVECTOR
}
@@ -1155,9 +1183,9 @@ class PGCompiler(compiler.SQLCompiler):
def visit_slice(self, element, **kw):
return "%s:%s" % (
- self.process(element.start, **kw),
- self.process(element.stop, **kw),
- )
+ self.process(element.start, **kw),
+ self.process(element.stop, **kw),
+ )
def visit_any(self, element, **kw):
return "%s%sANY (%s)" % (
@@ -1181,7 +1209,7 @@ class PGCompiler(compiler.SQLCompiler):
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
- regconfig = self.render_literal_value(\
+ regconfig = self.render_literal_value(
binary.modifiers['postgresql_regconfig'],
sqltypes.STRINGTYPE)
if regconfig:
@@ -1199,8 +1227,8 @@ class PGCompiler(compiler.SQLCompiler):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
@@ -1210,8 +1238,8 @@ class PGCompiler(compiler.SQLCompiler):
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
@@ -1265,12 +1293,12 @@ class PGCompiler(compiler.SQLCompiler):
if select._for_update_arg.of:
tables = util.OrderedSet(
- c.table if isinstance(c, expression.ColumnClause)
- else c for c in select._for_update_arg.of)
+ c.table if isinstance(c, expression.ColumnClause)
+ else c for c in select._for_update_arg.of)
tmp += " OF " + ", ".join(
- self.process(table, ashint=True)
- for table in tables
- )
+ self.process(table, ashint=True)
+ for table in tables
+ )
if select._for_update_arg.nowait:
tmp += " NOWAIT"
@@ -1280,13 +1308,12 @@ class PGCompiler(compiler.SQLCompiler):
def returning_clause(self, stmt, returning_cols):
columns = [
- self._label_select_column(None, c, True, False, {})
- for c in expression._select_iterables(returning_cols)
- ]
+ self._label_select_column(None, c, True, False, {})
+ for c in expression._select_iterables(returning_cols)
+ ]
return 'RETURNING ' + ', '.join(columns)
-
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
start = self.process(func.clauses.clauses[1], **kw)
@@ -1296,7 +1323,9 @@ class PGCompiler(compiler.SQLCompiler):
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
+
class PGDDLCompiler(compiler.DDLCompiler):
+
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
@@ -1335,7 +1364,7 @@ class PGDDLCompiler(compiler.DDLCompiler):
self.preparer.format_type(type_),
", ".join(
self.sql_compiler.process(sql.literal(e), literal_binds=True)
- for e in type_.enums)
+ for e in type_.enums)
)
def visit_drop_enum_type(self, drop):
@@ -1353,10 +1382,10 @@ class PGDDLCompiler(compiler.DDLCompiler):
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s " % (
- self._prepared_index_name(index,
- include_schema=False),
- preparer.format_table(index.table)
- )
+ self._prepared_index_name(index,
+ include_schema=False),
+ preparer.format_table(index.table)
+ )
using = index.dialect_options['postgresql']['using']
if using:
@@ -1367,20 +1396,20 @@ class PGDDLCompiler(compiler.DDLCompiler):
% (
', '.join([
self.sql_compiler.process(
- expr.self_group()
- if not isinstance(expr, expression.ColumnClause)
- else expr,
- include_table=False, literal_binds=True) +
+ expr.self_group()
+ if not isinstance(expr, expression.ColumnClause)
+ else expr,
+ include_table=False, literal_binds=True) +
(c.key in ops and (' ' + ops[c.key]) or '')
for expr, c in zip(index.expressions, index.columns)])
- )
+ )
whereclause = index.dialect_options["postgresql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
- whereclause, include_table=False,
- literal_binds=True)
+ whereclause, include_table=False,
+ literal_binds=True)
text += " WHERE " + where_compiled
return text
@@ -1392,17 +1421,19 @@ class PGDDLCompiler(compiler.DDLCompiler):
elements = []
for c in constraint.columns:
op = constraint.operators[c.name]
- elements.append(self.preparer.quote(c.name) + ' WITH '+op)
- text += "EXCLUDE USING %s (%s)" % (constraint.using, ', '.join(elements))
+ elements.append(self.preparer.quote(c.name) + ' WITH ' + op)
+ text += "EXCLUDE USING %s (%s)" % (constraint.using,
+ ', '.join(elements))
if constraint.where is not None:
text += ' WHERE (%s)' % self.sql_compiler.process(
- constraint.where,
- literal_binds=True)
+ constraint.where,
+ literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
class PGTypeCompiler(compiler.GenericTypeCompiler):
+
def visit_TSVECTOR(self, type):
return "TSVECTOR"
@@ -1509,8 +1540,8 @@ class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_ARRAY(self, type_):
return self.process(type_.item_type) + ('[]' * (type_.dimensions
- if type_.dimensions
- is not None else 1))
+ if type_.dimensions
+ is not None else 1))
class PGIdentifierPreparer(compiler.IdentifierPreparer):
@@ -1520,7 +1551,7 @@ class PGIdentifierPreparer(compiler.IdentifierPreparer):
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].\
- replace(self.escape_to_quote, self.escape_quote)
+ replace(self.escape_to_quote, self.escape_quote)
return value
def format_type(self, type_, use_schema=True):
@@ -1554,21 +1585,25 @@ class DropEnumType(schema._CreateDropBase):
class PGExecutionContext(default.DefaultExecutionContext):
+
def fire_sequence(self, seq, type_):
- return self._execute_scalar(("select nextval('%s')" % \
- self.dialect.identifier_preparer.format_sequence(seq)), type_)
+ return self._execute_scalar((
+ "select nextval('%s')" %
+ self.dialect.identifier_preparer.format_sequence(seq)), type_)
def get_insert_default(self, column):
- if column.primary_key and column is column.table._autoincrement_column:
+ if column.primary_key and \
+ column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar("select %s" %
- column.server_default.arg, column.type)
+ column.server_default.arg,
+ column.type)
elif (column.default is None or
- (column.default.is_sequence and
- column.default.optional)):
+ (column.default.is_sequence and
+ column.default.optional)):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
@@ -1587,10 +1622,10 @@ class PGExecutionContext(default.DefaultExecutionContext):
sch = column.table.schema
if sch is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
- (sch, seq_name)
+ (sch, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
- (seq_name, )
+ (seq_name, )
return self._execute_scalar(exc, column.type)
@@ -1643,7 +1678,7 @@ class PGDialect(default.DefaultDialect):
_backslash_escapes = True
def __init__(self, isolation_level=None, json_serializer=None,
- json_deserializer=None, **kwargs):
+ json_deserializer=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
@@ -1652,7 +1687,7 @@ class PGDialect(default.DefaultDialect):
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (8, 2) and \
- self.__dict__.get('implicit_returning', True)
+ self.__dict__.get('implicit_returning', True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
@@ -1665,9 +1700,9 @@ class PGDialect(default.DefaultDialect):
self.supports_smallserial = self.server_version_info >= (9, 2)
self._backslash_escapes = self.server_version_info < (8, 2) or \
- connection.scalar(
- "show standard_conforming_strings"
- ) == 'off'
+ connection.scalar(
+ "show standard_conforming_strings"
+ ) == 'off'
def on_connect(self):
if self.isolation_level is not None:
@@ -1677,8 +1712,8 @@ class PGDialect(default.DefaultDialect):
else:
return None
- _isolation_lookup = set(['SERIALIZABLE',
- 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
@@ -1687,7 +1722,7 @@ class PGDialect(default.DefaultDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
@@ -1709,10 +1744,10 @@ class PGDialect(default.DefaultDialect):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
- is_prepared=True, recover=False):
+ is_prepared=True, recover=False):
if is_prepared:
if recover:
- #FIXME: ugly hack to get out of transaction
+ # FIXME: ugly hack to get out of transaction
# context when committing recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
@@ -1724,7 +1759,7 @@ class PGDialect(default.DefaultDialect):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
- is_prepared=True, recover=False):
+ is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
@@ -1736,14 +1771,15 @@ class PGDialect(default.DefaultDialect):
def do_recover_twophase(self, connection):
resultset = connection.execute(
- sql.text("SELECT gid FROM pg_prepared_xacts"))
+ sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
- query = "select nspname from pg_namespace where lower(nspname)=:schema"
+ query = ("select nspname from pg_namespace "
+ "where lower(nspname)=:schema")
cursor = connection.execute(
sql.text(
query,
@@ -1761,25 +1797,27 @@ class PGDialect(default.DefaultDialect):
if schema is None:
cursor = connection.execute(
sql.text(
- "select relname from pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where n.nspname=current_schema() and "
- "relname=:name",
- bindparams=[
+ "select relname from pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where n.nspname=current_schema() "
+ "and relname=:name",
+ bindparams=[
sql.bindparam('name', util.text_type(table_name),
- type_=sqltypes.Unicode)]
+ type_=sqltypes.Unicode)]
)
)
else:
cursor = connection.execute(
sql.text(
- "select relname from pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where n.nspname=:schema and "
- "relname=:name",
+ "select relname from pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where n.nspname=:schema and "
+ "relname=:name",
bindparams=[
sql.bindparam('name',
- util.text_type(table_name), type_=sqltypes.Unicode),
+ util.text_type(table_name),
+ type_=sqltypes.Unicode),
sql.bindparam('schema',
- util.text_type(schema), type_=sqltypes.Unicode)]
+ util.text_type(schema),
+ type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
@@ -1794,23 +1832,24 @@ class PGDialect(default.DefaultDialect):
"and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode)
+ type_=sqltypes.Unicode)
]
)
)
else:
cursor = connection.execute(
sql.text(
- "SELECT relname FROM pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where relkind='S' and "
- "n.nspname=:schema and relname=:name",
- bindparams=[
- sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode),
- sql.bindparam('schema',
- util.text_type(schema), type_=sqltypes.Unicode)
- ]
- )
+ "SELECT relname FROM pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where relkind='S' and "
+ "n.nspname=:schema and relname=:name",
+ bindparams=[
+ sql.bindparam('name', util.text_type(sequence_name),
+ type_=sqltypes.Unicode),
+ sql.bindparam('schema',
+ util.text_type(schema),
+ type_=sqltypes.Unicode)
+ ]
+ )
)
return bool(cursor.first())
@@ -1836,14 +1875,14 @@ class PGDialect(default.DefaultDialect):
"""
query = sql.text(query)
query = query.bindparams(
- sql.bindparam('typname',
- util.text_type(type_name), type_=sqltypes.Unicode),
- )
+ sql.bindparam('typname',
+ util.text_type(type_name), type_=sqltypes.Unicode),
+ )
if schema is not None:
query = query.bindparams(
- sql.bindparam('nspname',
- util.text_type(schema), type_=sqltypes.Unicode),
- )
+ sql.bindparam('nspname',
+ util.text_type(schema), type_=sqltypes.Unicode),
+ )
cursor = connection.execute(query)
return bool(cursor.scalar())
@@ -1855,7 +1894,7 @@ class PGDialect(default.DefaultDialect):
v)
if not m:
raise AssertionError(
- "Could not determine version from string '%s'" % v)
+ "Could not determine version from string '%s'" % v)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
@@ -1905,11 +1944,11 @@ class PGDialect(default.DefaultDialect):
# what about system tables?
if util.py2k:
- schema_names = [row[0].decode(self.encoding) for row in rp \
- if not row[0].startswith('pg_')]
+ schema_names = [row[0].decode(self.encoding) for row in rp
+ if not row[0].startswith('pg_')]
else:
- schema_names = [row[0] for row in rp \
- if not row[0].startswith('pg_')]
+ schema_names = [row[0] for row in rp
+ if not row[0].startswith('pg_')]
return schema_names
@reflection.cache
@@ -1921,12 +1960,12 @@ class PGDialect(default.DefaultDialect):
result = connection.execute(
sql.text("SELECT relname FROM pg_class c "
- "WHERE relkind = 'r' "
- "AND '%s' = (select nspname from pg_namespace n "
- "where n.oid = c.relnamespace) " %
- current_schema,
- typemap={'relname': sqltypes.Unicode}
- )
+ "WHERE relkind = 'r' "
+ "AND '%s' = (select nspname from pg_namespace n "
+ "where n.oid = c.relnamespace) " %
+ current_schema,
+ typemap={'relname': sqltypes.Unicode}
+ )
)
return [row[0] for row in result]
@@ -1946,7 +1985,7 @@ class PGDialect(default.DefaultDialect):
if util.py2k:
view_names = [row[0].decode(self.encoding)
- for row in connection.execute(s)]
+ for row in connection.execute(s)]
else:
view_names = [row[0] for row in connection.execute(s)]
return view_names
@@ -1991,9 +2030,12 @@ class PGDialect(default.DefaultDialect):
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
- bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
- typemap={'attname': sqltypes.Unicode, 'default': sqltypes.Unicode}
- )
+ bindparams=[
+ sql.bindparam('table_oid', type_=sqltypes.Integer)],
+ typemap={
+ 'attname': sqltypes.Unicode,
+ 'default': sqltypes.Unicode}
+ )
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
domains = self._load_domains(connection)
@@ -2009,7 +2051,7 @@ class PGDialect(default.DefaultDialect):
def _get_column_info(self, name, format_type, default,
notnull, domains, enums, schema):
- ## strip (*) from character varying(5), timestamp(5)
+ # strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
attype = re.sub(r'\(.*\)', '', format_type)
@@ -2057,7 +2099,7 @@ class PGDialect(default.DefaultDialect):
else:
args = ()
elif attype in ('interval', 'interval year to month',
- 'interval day to second'):
+ 'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
@@ -2112,8 +2154,8 @@ class PGDialect(default.DefaultDialect):
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
- ('"%s"' % sch) + '.' + \
- match.group(2) + match.group(3)
+ ('"%s"' % sch) + '.' + \
+ match.group(2) + match.group(3)
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
@@ -2169,7 +2211,7 @@ class PGDialect(default.DefaultDialect):
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None,
- postgresql_ignore_search_path=False, **kw):
+ postgresql_ignore_search_path=False, **kw):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
@@ -2192,29 +2234,32 @@ class PGDialect(default.DefaultDialect):
FK_REGEX = re.compile(
r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)'
r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?'
- r'[\s]?(ON UPDATE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
- r'[\s]?(ON DELETE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
+ r'[\s]?(ON UPDATE '
+ r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
+ r'[\s]?(ON DELETE '
+ r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?'
r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?'
)
t = sql.text(FK_SQL, typemap={
- 'conname': sqltypes.Unicode,
- 'condef': sqltypes.Unicode})
+ 'conname': sqltypes.Unicode,
+ 'condef': sqltypes.Unicode})
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search(FK_REGEX, condef).groups()
constrained_columns, referred_schema, \
- referred_table, referred_columns, \
- _, match, _, onupdate, _, ondelete, \
- deferrable, _, initially = m
+ referred_table, referred_columns, \
+ _, match, _, onupdate, _, ondelete, \
+ deferrable, _, initially = m
if deferrable is not None:
deferrable = True if deferrable == 'DEFERRABLE' else False
constrained_columns = [preparer._unquote_identifier(x)
- for x in re.split(r'\s*,\s*', constrained_columns)]
+ for x in re.split(
+ r'\s*,\s*', constrained_columns)]
if postgresql_ignore_search_path:
# when ignoring search path, we use the actual schema
@@ -2228,7 +2273,7 @@ class PGDialect(default.DefaultDialect):
# pg_get_constraintdef(). If the schema is in the search
# path, pg_get_constraintdef() will give us None.
referred_schema = \
- preparer._unquote_identifier(referred_schema)
+ preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# If the actual schema matches the schema of the table
# we're reflecting, then we will use that.
@@ -2236,7 +2281,8 @@ class PGDialect(default.DefaultDialect):
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
- for x in re.split(r'\s*,\s', referred_columns)]
+ for x in
+ re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name': conname,
'constrained_columns': constrained_columns,
@@ -2263,9 +2309,9 @@ class PGDialect(default.DefaultDialect):
# for now.
# regards, tom lane"
return "(%s)" % " OR ".join(
- "%s[%d] = %s" % (compare_to, ind, col)
- for ind in range(0, 10)
- )
+ "%s[%d] = %s" % (compare_to, ind, col)
+ for ind in range(0, 10)
+ )
else:
return "%s = ANY(%s)" % (col, compare_to)
@@ -2297,12 +2343,12 @@ class PGDialect(default.DefaultDialect):
t.relname,
i.relname
""" % (
- # version 8.3 here was based on observing the
- # cast does not work in PG 8.2.4, does work in 8.3.0.
- # nothing in PG changelogs regarding this.
- "::varchar" if self.server_version_info >= (8, 3) else "",
- self._pg_index_any("a.attnum", "ix.indkey")
- )
+ # version 8.3 here was based on observing the
+ # cast does not work in PG 8.2.4, does work in 8.3.0.
+ # nothing in PG changelogs regarding this.
+ "::varchar" if self.server_version_info >= (8, 3) else "",
+ self._pg_index_any("a.attnum", "ix.indkey")
+ )
t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
@@ -2316,16 +2362,16 @@ class PGDialect(default.DefaultDialect):
if expr:
if idx_name != sv_idx_name:
util.warn(
- "Skipped unsupported reflection of "
- "expression-based index %s"
- % idx_name)
+ "Skipped unsupported reflection of "
+ "expression-based index %s"
+ % idx_name)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
- "Predicate of partial index %s ignored during reflection"
- % idx_name)
+ "Predicate of partial index %s ignored during reflection"
+ % idx_name)
sv_idx_name = idx_name
index = indexes[idx_name]
@@ -2356,7 +2402,8 @@ class PGDialect(default.DefaultDialect):
FROM
pg_catalog.pg_constraint cons
join pg_attribute a
- on cons.conrelid = a.attrelid AND a.attnum = ANY(cons.conkey)
+ on cons.conrelid = a.attrelid AND
+ a.attnum = ANY(cons.conkey)
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'u'
@@ -2381,7 +2428,7 @@ class PGDialect(default.DefaultDialect):
if not self.supports_native_enum:
return {}
- ## Load data types for enums:
+ # Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
@@ -2397,8 +2444,8 @@ class PGDialect(default.DefaultDialect):
"""
s = sql.text(SQL_ENUMS, typemap={
- 'attname': sqltypes.Unicode,
- 'label': sqltypes.Unicode})
+ 'attname': sqltypes.Unicode,
+ 'label': sqltypes.Unicode})
c = connection.execute(s)
enums = {}
@@ -2416,13 +2463,13 @@ class PGDialect(default.DefaultDialect):
enums[name]['labels'].append(enum['label'])
else:
enums[name] = {
- 'labels': [enum['label']],
- }
+ 'labels': [enum['label']],
+ }
return enums
def _load_domains(self, connection):
- ## Load data types for domains:
+ # Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
@@ -2440,7 +2487,7 @@ class PGDialect(default.DefaultDialect):
domains = {}
for domain in c.fetchall():
- ## strip (30) from character varying(30)
+ # strip (30) from character varying(30)
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
@@ -2452,9 +2499,9 @@ class PGDialect(default.DefaultDialect):
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
- 'attype': attype,
- 'nullable': domain['nullable'],
- 'default': domain['default']
- }
+ 'attype': attype,
+ 'nullable': domain['nullable'],
+ 'default': domain['default']
+ }
return domains
diff --git a/lib/sqlalchemy/dialects/postgresql/constraints.py b/lib/sqlalchemy/dialects/postgresql/constraints.py
index 2eed2fb36..e8ebc75dd 100644
--- a/lib/sqlalchemy/dialects/postgresql/constraints.py
+++ b/lib/sqlalchemy/dialects/postgresql/constraints.py
@@ -6,13 +6,15 @@
from sqlalchemy.schema import ColumnCollectionConstraint
from sqlalchemy.sql import expression
+
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
- __ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
+ __ http://www.postgresql.org/docs/9.0/\
+static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
"""
__visit_name__ = 'exclude_constraint'
@@ -52,7 +54,7 @@ class ExcludeConstraint(ColumnCollectionConstraint):
name=kw.get('name'),
deferrable=kw.get('deferrable'),
initially=kw.get('initially')
- )
+ )
self.operators = {}
for col_or_string, op in elements:
name = getattr(col_or_string, 'name', col_or_string)
@@ -60,15 +62,14 @@ class ExcludeConstraint(ColumnCollectionConstraint):
self.using = kw.get('using', 'gist')
where = kw.get('where')
if where:
- self.where = expression._literal_as_text(where)
+ self.where = expression._literal_as_text(where)
def copy(self, **kw):
elements = [(col, self.operators[col])
for col in self.columns.keys()]
c = self.__class__(*elements,
- name=self.name,
- deferrable=self.deferrable,
- initially=self.initially)
+ name=self.name,
+ deferrable=self.deferrable,
+ initially=self.initially)
c.dispatch._update(self.dispatch)
return c
-
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
index f1fb3d308..9601edc41 100644
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ b/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -69,11 +69,13 @@ def _parse_hstore(hstore_str):
pair_match = HSTORE_PAIR_RE.match(hstore_str)
while pair_match is not None:
- key = pair_match.group('key').replace(r'\"', '"').replace("\\\\", "\\")
+ key = pair_match.group('key').replace(r'\"', '"').replace(
+ "\\\\", "\\")
if pair_match.group('value_null'):
value = None
else:
- value = pair_match.group('value').replace(r'\"', '"').replace("\\\\", "\\")
+ value = pair_match.group('value').replace(
+ r'\"', '"').replace("\\\\", "\\")
result[key] = value
pos += pair_match.end()
@@ -140,15 +142,16 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
data_table.c.data + {"k1": "v1"}
- For a full list of special methods see :class:`.HSTORE.comparator_factory`.
+ For a full list of special methods see
+ :class:`.HSTORE.comparator_factory`.
For usage with the SQLAlchemy ORM, it may be desirable to combine
the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
now part of the :mod:`sqlalchemy.ext.mutable`
extension. This extension will allow "in-place" changes to the
dictionary, e.g. addition of new keys or replacement/removal of existing
- keys to/from the current dictionary, to produce events which will be detected
- by the unit of work::
+ keys to/from the current dictionary, to produce events which will be
+ detected by the unit of work::
from sqlalchemy.ext.mutable import MutableDict
@@ -167,9 +170,9 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
session.commit()
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
- will not be alerted to any changes to the contents of an existing dictionary,
- unless that dictionary value is re-assigned to the HSTORE-attribute itself,
- thus generating a change event.
+ will not be alerted to any changes to the contents of an existing
+ dictionary, unless that dictionary value is re-assigned to the
+ HSTORE-attribute itself, thus generating a change event.
.. versionadded:: 0.8
@@ -272,6 +275,7 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
def bind_processor(self, dialect):
if util.py2k:
encoding = dialect.encoding
+
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value).encode(encoding)
@@ -288,6 +292,7 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
def result_processor(self, dialect, coltype):
if util.py2k:
encoding = dialect.encoding
+
def process(value):
if value is not None:
return _parse_hstore(value.decode(encoding))
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
index 902d0a80d..25ac342af 100644
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ b/lib/sqlalchemy/dialects/postgresql/json.py
@@ -27,24 +27,28 @@ class JSONElement(elements.BinaryExpression):
expr = mytable.c.json_data['some_key']
The expression typically compiles to a JSON access such as ``col -> key``.
- Modifiers are then available for typing behavior, including :meth:`.JSONElement.cast`
- and :attr:`.JSONElement.astext`.
+ Modifiers are then available for typing behavior, including
+ :meth:`.JSONElement.cast` and :attr:`.JSONElement.astext`.
"""
- def __init__(self, left, right, astext=False, opstring=None, result_type=None):
+
+ def __init__(self, left, right, astext=False,
+ opstring=None, result_type=None):
self._astext = astext
if opstring is None:
if hasattr(right, '__iter__') and \
- not isinstance(right, util.string_types):
+ not isinstance(right, util.string_types):
opstring = "#>"
- right = "{%s}" % (", ".join(util.text_type(elem) for elem in right))
+ right = "{%s}" % (
+ ", ".join(util.text_type(elem) for elem in right))
else:
opstring = "->"
self._json_opstring = opstring
operator = custom_op(opstring, precedence=5)
right = left._check_literal(left, operator, right)
- super(JSONElement, self).__init__(left, right, operator, type_=result_type)
+ super(JSONElement, self).__init__(
+ left, right, operator, type_=result_type)
@property
def astext(self):
@@ -64,12 +68,12 @@ class JSONElement(elements.BinaryExpression):
return self
else:
return JSONElement(
- self.left,
- self.right,
- astext=True,
- opstring=self._json_opstring + ">",
- result_type=sqltypes.String(convert_unicode=True)
- )
+ self.left,
+ self.right,
+ astext=True,
+ opstring=self._json_opstring + ">",
+ result_type=sqltypes.String(convert_unicode=True)
+ )
def cast(self, type_):
"""Convert this :class:`.JSONElement` to apply both the 'astext' operator
@@ -126,15 +130,16 @@ class JSON(sqltypes.TypeEngine):
* Path index operations returning text (required for text comparison)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == 'some value'
+ data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
+ 'some value'
- Index operations return an instance of :class:`.JSONElement`, which represents
- an expression such as ``column -> index``. This element then defines
- methods such as :attr:`.JSONElement.astext` and :meth:`.JSONElement.cast`
- for setting up type behavior.
+ Index operations return an instance of :class:`.JSONElement`, which
+ represents an expression such as ``column -> index``. This element then
+ defines methods such as :attr:`.JSONElement.astext` and
+ :meth:`.JSONElement.cast` for setting up type behavior.
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not detect
- in-place mutations to the structure. In order to detect these, the
+ The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
+ detect in-place mutations to the structure. In order to detect these, the
:mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
allow "in-place" changes to the datastructure to produce events which
will be detected by the unit of work. See the example at :class:`.HSTORE`
@@ -178,6 +183,7 @@ class JSON(sqltypes.TypeEngine):
json_serializer = dialect._json_serializer or json.dumps
if util.py2k:
encoding = dialect.encoding
+
def process(value):
return json_serializer(value).encode(encoding)
else:
@@ -189,6 +195,7 @@ class JSON(sqltypes.TypeEngine):
json_deserializer = dialect._json_deserializer or json.loads
if util.py2k:
encoding = dialect.encoding
+
def process(value):
return json_deserializer(value.decode(encoding))
else:
@@ -200,7 +207,6 @@ class JSON(sqltypes.TypeEngine):
ischema_names['json'] = JSON
-
class JSONB(JSON):
"""Represent the Postgresql JSONB type.
@@ -237,15 +243,16 @@ class JSONB(JSON):
* Path index operations returning text (required for text comparison)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == 'some value'
+ data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
+ 'some value'
- Index operations return an instance of :class:`.JSONElement`, which represents
- an expression such as ``column -> index``. This element then defines
- methods such as :attr:`.JSONElement.astext` and :meth:`.JSONElement.cast`
- for setting up type behavior.
+ Index operations return an instance of :class:`.JSONElement`, which
+ represents an expression such as ``column -> index``. This element then
+ defines methods such as :attr:`.JSONElement.astext` and
+ :meth:`.JSONElement.cast` for setting up type behavior.
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not detect
- in-place mutations to the structure. In order to detect these, the
+ The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
+ detect in-place mutations to the structure. In order to detect these, the
:mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
allow "in-place" changes to the datastructure to produce events which
will be detected by the unit of work. See the example at :class:`.HSTORE`
@@ -280,7 +287,8 @@ class JSONB(JSON):
return JSONElement(self.expr, other)
def _adapt_expression(self, op, other_comparator):
- # How does one do equality?? jsonb also has "=" eg. '[1,2,3]'::jsonb = '[1,2,3]'::jsonb
+ # How does one do equality?? jsonb also has "=" eg.
+ # '[1,2,3]'::jsonb = '[1,2,3]'::jsonb
if isinstance(op, custom_op):
if op.opstring in ['?', '?&', '?|', '@>', '<@']:
return op, sqltypes.Boolean
@@ -317,4 +325,4 @@ class JSONB(JSON):
"""
return self.expr.op('<@')(other)
-ischema_names['jsonb'] = JSONB \ No newline at end of file
+ischema_names['jsonb'] = JSONB
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index dc5ed6e73..512f3e1b0 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -165,6 +165,6 @@ class PGDialect_pg8000(PGDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s or AUTOCOMMIT" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
dialect = PGDialect_pg8000
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index 0ab4abb09..e6450c97f 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -9,7 +9,8 @@
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
- :connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
+ :connectstring: postgresql+psycopg2://user:password@host:port/dbname\
+[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
@@ -21,9 +22,9 @@ psycopg2-specific keyword arguments which are accepted by
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
- ``connection.cursor('some name')``, which has the effect that result rows are
- not immediately pre-fetched and buffered after statement execution, but are
- instead left on the server and only retrieved as needed. SQLAlchemy's
+ ``connection.cursor('some name')``, which has the effect that result rows
+ are not immediately pre-fetched and buffered after statement execution, but
+ are instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
@@ -54,7 +55,8 @@ using ``host`` as an additional keyword argument::
See also:
-`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
+`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static\
+/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
@@ -90,11 +92,13 @@ Typically, this can be changed to ``utf-8``, as a more useful default::
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
-method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
+method (see:
+http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
- engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
+ engine = create_engine("postgresql://user:pass@host/dbname",
+ client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
@@ -128,11 +132,12 @@ Psycopg2 Transaction Isolation Level
As discussed in :ref:`postgresql_isolation_level`,
all Postgresql dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
-as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`.
-When using the psycopg2 dialect, these options make use of
-psycopg2's ``set_isolation_level()`` connection method, rather than
-emitting a Postgresql directive; this is because psycopg2's API-level
-setting is always emitted at the start of each transaction in any case.
+as well as the ``isolation_level`` argument used by
+:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these
+options make use of psycopg2's ``set_isolation_level()`` connection method,
+rather than emitting a Postgresql directive; this is because psycopg2's
+API-level setting is always emitted at the start of each transaction in any
+case.
The psycopg2 dialect supports these constants for isolation level:
@@ -166,35 +171,38 @@ The psycopg2 dialect will log Postgresql NOTICE messages via the
HSTORE type
------------
-The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the
-HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
+The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
+the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when it is detected that the target database has the HSTORE
type set up for use. In other words, when the dialect makes the first
connection, a sequence like the following is performed:
-1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``.
- If this function returns a list of HSTORE identifiers, we then determine that
- the ``HSTORE`` extension is present.
+1. Request the available HSTORE oids using
+ ``psycopg2.extras.HstoreAdapter.get_oids()``.
+ If this function returns a list of HSTORE identifiers, we then determine
+ that the ``HSTORE`` extension is present.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
-The ``register_hstore()`` extension has the effect of **all Python dictionaries
-being accepted as parameters regardless of the type of target column in SQL**.
-The dictionaries are converted by this extension into a textual HSTORE expression.
-If this behavior is not desired, disable the
-use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows::
+The ``register_hstore()`` extension has the effect of **all Python
+dictionaries being accepted as parameters regardless of the type of target
+column in SQL**. The dictionaries are converted by this extension into a
+textual HSTORE expression. If this behavior is not desired, disable the
+use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
+follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
-The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()``
-extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE
+The ``HSTORE`` type is **still supported** when the
+``psycopg2.extensions.register_hstore()`` extension is not used. It merely
+means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
-place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which
-may be more performant.
+place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
+which may be more performant.
"""
from __future__ import absolute_import
@@ -209,9 +217,9 @@ from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
- PGIdentifierPreparer, PGExecutionContext, \
- ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
- _INT_TYPES
+ PGIdentifierPreparer, PGExecutionContext, \
+ ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
+ _INT_TYPES
from .hstore import HSTORE
from .json import JSON
@@ -227,14 +235,14 @@ class _PGNumeric(sqltypes.Numeric):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
- decimal.Decimal,
- self._effective_decimal_return_scale)
+ decimal.Decimal,
+ self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
@@ -243,7 +251,7 @@ class _PGNumeric(sqltypes.Numeric):
return processors.to_float
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
@@ -255,6 +263,7 @@ class _PGEnum(ENUM):
self.convert_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
+
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
@@ -293,13 +302,16 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
- (self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
- or \
- (
+ (self.compiled and isinstance(self.compiled.statement,
+ expression.Selectable)
+ or
+ (
(not self.compiled or
- isinstance(self.compiled.statement, expression.TextClause))
- and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
- )
+ isinstance(self.compiled.statement,
+ expression.TextClause))
+ and self.statement and SERVER_SIDE_CURSOR_RE.match(
+ self.statement))
+ )
)
else:
is_server_side = \
@@ -309,7 +321,8 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
- ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
+ ident = "c_%s_%s" % (hex(id(self))[2:],
+ hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
@@ -336,7 +349,7 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
class PGCompiler_psycopg2(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
+ self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
@@ -354,7 +367,8 @@ class PGDialect_psycopg2(PGDialect):
supports_unicode_statements = False
default_paramstyle = 'pyformat'
- supports_sane_multi_rowcount = False # set to true based on psycopg2 version
+ # set to true based on psycopg2 version
+ supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
@@ -375,9 +389,9 @@ class PGDialect_psycopg2(PGDialect):
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
- client_encoding=None,
- use_native_hstore=True,
- **kwargs):
+ client_encoding=None,
+ use_native_hstore=True,
+ **kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
@@ -386,18 +400,18 @@ class PGDialect_psycopg2(PGDialect):
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
- self.dbapi.__version__)
+ self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
- int(x)
- for x in m.group(1, 2, 3)
- if x is not None)
+ int(x)
+ for x in m.group(1, 2, 3)
+ if x is not None)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = self.use_native_hstore and \
- self._hstore_oids(connection.connection) \
- is not None
+ self._hstore_oids(connection.connection) \
+ is not None
self._has_native_json = self.psycopg2_version >= (2, 5)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
@@ -427,7 +441,7 @@ class PGDialect_psycopg2(PGDialect):
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
connection.set_isolation_level(level)
@@ -458,16 +472,17 @@ class PGDialect_psycopg2(PGDialect):
oid, array_oid = hstore_oids
if util.py2k:
extras.register_hstore(conn, oid=oid,
- array_oid=array_oid,
- unicode=True)
+ array_oid=array_oid,
+ unicode=True)
else:
extras.register_hstore(conn, oid=oid,
- array_oid=array_oid)
+ array_oid=array_oid)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
- extras.register_default_json(conn, loads=self._json_deserializer)
+ extras.register_default_json(
+ conn, loads=self._json_deserializer)
fns.append(on_connect)
if fns:
diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
index fc785d450..3ebd0135f 100644
--- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
+++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
@@ -9,7 +9,8 @@
.. dialect:: postgresql+pypostgresql
:name: py-postgresql
:dbapi: pypostgresql
- :connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]
+ :connectstring: postgresql+pypostgresql://user:password@host:port/dbname\
+[?key=value&key=value...]
:url: http://python.projects.pgfoundry.org/
diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py
index 31434743c..28f80d000 100644
--- a/lib/sqlalchemy/dialects/postgresql/ranges.py
+++ b/lib/sqlalchemy/dialects/postgresql/ranges.py
@@ -9,6 +9,7 @@ from ... import types as sqltypes
__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE')
+
class RangeOperators(object):
"""
This mixin provides functionality for the Range Operators
@@ -94,6 +95,7 @@ class RangeOperators(object):
"""
return self.expr.op('+')(other)
+
class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql INT4RANGE type.
@@ -105,6 +107,7 @@ class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['int4range'] = INT4RANGE
+
class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql INT8RANGE type.
@@ -116,6 +119,7 @@ class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['int8range'] = INT8RANGE
+
class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql NUMRANGE type.
@@ -127,6 +131,7 @@ class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['numrange'] = NUMRANGE
+
class DATERANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql DATERANGE type.
@@ -138,6 +143,7 @@ class DATERANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['daterange'] = DATERANGE
+
class TSRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql TSRANGE type.
@@ -149,6 +155,7 @@ class TSRANGE(RangeOperators, sqltypes.TypeEngine):
ischema_names['tsrange'] = TSRANGE
+
class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql TSTZRANGE type.