From cca03097f47f22783d42d1853faac6cf84607c5a Mon Sep 17 00:00:00 2001 From: Brian Jarrett Date: Sun, 20 Jul 2014 12:44:40 -0400 Subject: - apply pep8 formatting to sqlalchemy/sql, sqlalchemy/util, sqlalchemy/dialects, sqlalchemy/orm, sqlalchemy/event, sqlalchemy/testing --- lib/sqlalchemy/dialects/postgresql/base.py | 543 ++++++++++++++++------------- 1 file changed, 295 insertions(+), 248 deletions(-) (limited to 'lib/sqlalchemy/dialects/postgresql/base.py') diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 8081f75dd..6f23a497b 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -50,22 +50,25 @@ Transaction Isolation Level All Postgresql dialects support setting of transaction isolation level both via a dialect-specific parameter ``isolation_level`` accepted by :func:`.create_engine`, -as well as the ``isolation_level`` argument as passed to :meth:`.Connection.execution_options`. -When using a non-psycopg2 dialect, this feature works by issuing the -command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL -`` for each new connection. +as well as the ``isolation_level`` argument as passed to +:meth:`.Connection.execution_options`. When using a non-psycopg2 dialect, +this feature works by issuing the command +``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL `` for +each new connection. To set isolation level using :func:`.create_engine`:: engine = create_engine( - "postgresql+pg8000://scott:tiger@localhost/test", - isolation_level="READ UNCOMMITTED" - ) + "postgresql+pg8000://scott:tiger@localhost/test", + isolation_level="READ UNCOMMITTED" + ) To set using per-connection execution options:: connection = engine.connect() - connection = connection.execution_options(isolation_level="READ COMMITTED") + connection = connection.execution_options( + isolation_level="READ COMMITTED" + ) Valid values for ``isolation_level`` include: @@ -93,12 +96,13 @@ The Postgresql dialect can reflect tables from any schema. The :paramref:`.Table.schema` argument, or alternatively the :paramref:`.MetaData.reflect.schema` argument determines which schema will be searched for the table or tables. The reflected :class:`.Table` objects -will in all cases retain this ``.schema`` attribute as was specified. However, -with regards to tables which these :class:`.Table` objects refer to via -foreign key constraint, a decision must be made as to how the ``.schema`` +will in all cases retain this ``.schema`` attribute as was specified. +However, with regards to tables which these :class:`.Table` objects refer to +via foreign key constraint, a decision must be made as to how the ``.schema`` is represented in those remote tables, in the case where that remote schema name is also a member of the current -`Postgresql search path `_. +`Postgresql search path +`_. By default, the Postgresql dialect mimics the behavior encouraged by Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function @@ -115,7 +119,8 @@ illustrates this behavior:: CREATE TABLE test=> SET search_path TO public, test_schema; test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM - test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n + test-> ON n.oid = c.relnamespace test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid test-> WHERE c.relname='referring' AND r.contype = 'f' test-> ; @@ -124,10 +129,11 @@ illustrates this behavior:: FOREIGN KEY (referred_id) REFERENCES referred(id) (1 row) -Above, we created a table ``referred`` as a member of the remote schema ``test_schema``, however -when we added ``test_schema`` to the PG ``search_path`` and then asked ``pg_get_constraintdef()`` -for the ``FOREIGN KEY`` syntax, ``test_schema`` was not included in the -output of the function. +Above, we created a table ``referred`` as a member of the remote schema +``test_schema``, however when we added ``test_schema`` to the +PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the +``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of +the function. On the other hand, if we set the search path back to the typical default of ``public``:: @@ -139,7 +145,8 @@ The same query against ``pg_get_constraintdef()`` now returns the fully schema-qualified name for us:: test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM - test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n + test-> ON n.oid = c.relnamespace test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid test-> WHERE c.relname='referring' AND r.contype = 'f'; pg_get_constraintdef @@ -157,7 +164,8 @@ reflection process as follows:: >>> with engine.connect() as conn: ... conn.execute("SET search_path TO test_schema, public") ... meta = MetaData() - ... referring = Table('referring', meta, autoload=True, autoload_with=conn) + ... referring = Table('referring', meta, + ... autoload=True, autoload_with=conn) ... @@ -167,16 +175,18 @@ The above process would deliver to the :attr:`.MetaData.tables` collection >>> meta.tables['referred'].schema is None True -To alter the behavior of reflection such that the referred schema is maintained -regardless of the ``search_path`` setting, use the ``postgresql_ignore_search_path`` -option, which can be specified as a dialect-specific argument to both -:class:`.Table` as well as :meth:`.MetaData.reflect`:: +To alter the behavior of reflection such that the referred schema is +maintained regardless of the ``search_path`` setting, use the +``postgresql_ignore_search_path`` option, which can be specified as a +dialect-specific argument to both :class:`.Table` as well as +:meth:`.MetaData.reflect`:: >>> with engine.connect() as conn: ... conn.execute("SET search_path TO test_schema, public") ... meta = MetaData() - ... referring = Table('referring', meta, autoload=True, autoload_with=conn, - ... postgresql_ignore_search_path=True) + ... referring = Table('referring', meta, autoload=True, + ... autoload_with=conn, + ... postgresql_ignore_search_path=True) ... @@ -187,29 +197,33 @@ We will now have ``test_schema.referred`` stored as schema-qualified:: .. sidebar:: Best Practices for Postgresql Schema reflection - The description of Postgresql schema reflection behavior is complex, and is - the product of many years of dealing with widely varied use cases and user preferences. - But in fact, there's no need to understand any of it if you just stick to the simplest - use pattern: leave the ``search_path`` set to its default of ``public`` only, never refer - to the name ``public`` as an explicit schema name otherwise, and - refer to all other schema names explicitly when building - up a :class:`.Table` object. The options described here are only for those users - who can't, or prefer not to, stay within these guidelines. - -Note that **in all cases**, the "default" schema is always reflected as ``None``. -The "default" schema on Postgresql is that which is returned by the -Postgresql ``current_schema()`` function. On a typical Postgresql installation, -this is the name ``public``. So a table that refers to another which is -in the ``public`` (i.e. default) schema will always have the ``.schema`` attribute -set to ``None``. + The description of Postgresql schema reflection behavior is complex, and + is the product of many years of dealing with widely varied use cases and + user preferences. But in fact, there's no need to understand any of it if + you just stick to the simplest use pattern: leave the ``search_path`` set + to its default of ``public`` only, never refer to the name ``public`` as + an explicit schema name otherwise, and refer to all other schema names + explicitly when building up a :class:`.Table` object. The options + described here are only for those users who can't, or prefer not to, stay + within these guidelines. + +Note that **in all cases**, the "default" schema is always reflected as +``None``. The "default" schema on Postgresql is that which is returned by the +Postgresql ``current_schema()`` function. On a typical Postgresql +installation, this is the name ``public``. So a table that refers to another +which is in the ``public`` (i.e. default) schema will always have the +``.schema`` attribute set to ``None``. .. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path`` - dialect-level option accepted by :class:`.Table` and :meth:`.MetaData.reflect`. + dialect-level option accepted by :class:`.Table` and + :meth:`.MetaData.reflect`. .. seealso:: - `The Schema Search Path `_ - on the Postgresql website. + `The Schema Search Path + `_ + - on the Postgresql website. INSERT/UPDATE...RETURNING ------------------------- @@ -273,19 +287,19 @@ produces a statement equivalent to:: SELECT CAST('some text' AS TSVECTOR) AS anon_1 Full Text Searches in Postgresql are influenced by a combination of: the -PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used to -build the GIN/GiST indexes, and the ``regconfig`` optionally passed in during a -query. +PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used +to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in +during a query. When performing a Full Text Search against a column that has a GIN or -GiST index that is already pre-computed (which is common on full text searches) -one may need to explicitly pass in a particular PostgresSQL ``regconfig`` value -to ensure the query-planner utilizes the index and does not re-compute the -column on demand. +GiST index that is already pre-computed (which is common on full text +searches) one may need to explicitly pass in a particular PostgresSQL +``regconfig`` value to ensure the query-planner utilizes the index and does +not re-compute the column on demand. -In order to provide for this explicit query planning, or to use different search -strategies, the ``match`` method accepts a ``postgresql_regconfig`` keyword -argument. +In order to provide for this explicit query planning, or to use different +search strategies, the ``match`` method accepts a ``postgresql_regconfig`` +keyword argument. select([mytable.c.id]).where( mytable.c.title.match('somestring', postgresql_regconfig='english') @@ -296,8 +310,8 @@ Emits the equivalent of:: SELECT mytable.id FROM mytable WHERE mytable.title @@ to_tsquery('english', 'somestring') -One can also specifically pass in a `'regconfig'` value to the ``to_tsvector()`` -command as the initial argument. +One can also specifically pass in a `'regconfig'` value to the +``to_tsvector()`` command as the initial argument. select([mytable.c.id]).where( func.to_tsvector('english', mytable.c.title )\ @@ -310,9 +324,9 @@ produces a statement equivalent to:: WHERE to_tsvector('english', mytable.title) @@ to_tsquery('english', 'somestring') -It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from PostgresSQL -to ensure that you are generating queries with SQLAlchemy that take full -advantage of any indexes you may have created for full text search. +It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from +PostgresSQL to ensure that you are generating queries with SQLAlchemy that +take full advantage of any indexes you may have created for full text search. FROM ONLY ... ------------------------ @@ -402,26 +416,26 @@ except ImportError: _python_UUID = None from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \ - CHAR, TEXT, FLOAT, NUMERIC, \ - DATE, BOOLEAN, REAL + CHAR, TEXT, FLOAT, NUMERIC, \ + DATE, BOOLEAN, REAL RESERVED_WORDS = set( ["all", "analyse", "analyze", "and", "any", "array", "as", "asc", - "asymmetric", "both", "case", "cast", "check", "collate", "column", - "constraint", "create", "current_catalog", "current_date", - "current_role", "current_time", "current_timestamp", "current_user", - "default", "deferrable", "desc", "distinct", "do", "else", "end", - "except", "false", "fetch", "for", "foreign", "from", "grant", "group", - "having", "in", "initially", "intersect", "into", "leading", "limit", - "localtime", "localtimestamp", "new", "not", "null", "of", "off", "offset", - "old", "on", "only", "or", "order", "placing", "primary", "references", - "returning", "select", "session_user", "some", "symmetric", "table", - "then", "to", "trailing", "true", "union", "unique", "user", "using", - "variadic", "when", "where", "window", "with", "authorization", - "between", "binary", "cross", "current_schema", "freeze", "full", - "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", - "notnull", "outer", "over", "overlaps", "right", "similar", "verbose" - ]) + "asymmetric", "both", "case", "cast", "check", "collate", "column", + "constraint", "create", "current_catalog", "current_date", + "current_role", "current_time", "current_timestamp", "current_user", + "default", "deferrable", "desc", "distinct", "do", "else", "end", + "except", "false", "fetch", "for", "foreign", "from", "grant", "group", + "having", "in", "initially", "intersect", "into", "leading", "limit", + "localtime", "localtimestamp", "new", "not", "null", "of", "off", + "offset", "old", "on", "only", "or", "order", "placing", "primary", + "references", "returning", "select", "session_user", "some", "symmetric", + "table", "then", "to", "trailing", "true", "union", "unique", "user", + "using", "variadic", "when", "where", "window", "with", "authorization", + "between", "binary", "cross", "current_schema", "freeze", "full", + "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", + "notnull", "outer", "over", "overlaps", "right", "similar", "verbose" + ]) _DECIMAL_TYPES = (1231, 1700) _FLOAT_TYPES = (700, 701, 1021, 1022) @@ -452,6 +466,7 @@ PGMacAddr = MACADDR class OID(sqltypes.TypeEngine): + """Provide the Postgresql OID type. .. versionadded:: 0.9.5 @@ -461,18 +476,21 @@ class OID(sqltypes.TypeEngine): class TIMESTAMP(sqltypes.TIMESTAMP): + def __init__(self, timezone=False, precision=None): super(TIMESTAMP, self).__init__(timezone=timezone) self.precision = precision class TIME(sqltypes.TIME): + def __init__(self, timezone=False, precision=None): super(TIME, self).__init__(timezone=timezone) self.precision = precision class INTERVAL(sqltypes.TypeEngine): + """Postgresql INTERVAL type. The INTERVAL type may not be supported on all DBAPIs. @@ -511,6 +529,7 @@ PGBit = BIT class UUID(sqltypes.TypeEngine): + """Postgresql UUID type. Represents the UUID column type, interpreting @@ -534,7 +553,8 @@ class UUID(sqltypes.TypeEngine): """ if as_uuid and _python_UUID is None: raise NotImplementedError( - "This version of Python does not support the native UUID type." + "This version of Python does not support " + "the native UUID type." ) self.as_uuid = as_uuid @@ -560,7 +580,9 @@ class UUID(sqltypes.TypeEngine): PGUuid = UUID + class TSVECTOR(sqltypes.TypeEngine): + """The :class:`.postgresql.TSVECTOR` type implements the Postgresql text search type TSVECTOR. @@ -577,21 +599,21 @@ class TSVECTOR(sqltypes.TypeEngine): __visit_name__ = 'TSVECTOR' - class _Slice(expression.ColumnElement): __visit_name__ = 'slice' type = sqltypes.NULLTYPE def __init__(self, slice_, source_comparator): self.start = source_comparator._check_literal( - source_comparator.expr, - operators.getitem, slice_.start) + source_comparator.expr, + operators.getitem, slice_.start) self.stop = source_comparator._check_literal( - source_comparator.expr, - operators.getitem, slice_.stop) + source_comparator.expr, + operators.getitem, slice_.stop) class Any(expression.ColumnElement): + """Represent the clause ``left operator ANY (right)``. ``right`` must be an array expression. @@ -612,6 +634,7 @@ class Any(expression.ColumnElement): class All(expression.ColumnElement): + """Represent the clause ``left operator ALL (right)``. ``right`` must be an array expression. @@ -632,6 +655,7 @@ class All(expression.ColumnElement): class array(expression.Tuple): + """A Postgresql ARRAY literal. This is used to produce ARRAY literals in SQL expressions, e.g.:: @@ -673,7 +697,7 @@ class array(expression.Tuple): def _bind_param(self, operator, obj): return array(*[ expression.BindParameter(None, o, _compared_to_operator=operator, - _compared_to_type=self.type, unique=True) + _compared_to_type=self.type, unique=True) for o in obj ]) @@ -682,6 +706,7 @@ class array(expression.Tuple): class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): + """Postgresql ARRAY type. Represents values as Python lists. @@ -757,6 +782,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): __visit_name__ = 'ARRAY' class Comparator(sqltypes.Concatenable.Comparator): + """Define comparison operations for :class:`.ARRAY`.""" def __getitem__(self, index): @@ -775,7 +801,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): return_type = self.type.item_type return self._binary_operate(self.expr, operators.getitem, index, - result_type=return_type) + result_type=return_type) def any(self, other, operator=operators.eq): """Return ``other operator ANY (array)`` clause. @@ -902,7 +928,7 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): """ if isinstance(item_type, ARRAY): raise ValueError("Do not nest ARRAY types; ARRAY(basetype) " - "handles multi-dimensional arrays of basetype") + "handles multi-dimensional arrays of basetype") if isinstance(item_type, type): item_type = item_type() self.item_type = item_type @@ -921,59 +947,60 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): if dim is None: arr = list(arr) if dim == 1 or dim is None and ( - # this has to be (list, tuple), or at least - # not hasattr('__iter__'), since Py3K strings - # etc. have __iter__ - not arr or not isinstance(arr[0], (list, tuple))): + # this has to be (list, tuple), or at least + # not hasattr('__iter__'), since Py3K strings + # etc. have __iter__ + not arr or not isinstance(arr[0], (list, tuple))): if itemproc: return collection(itemproc(x) for x in arr) else: return collection(arr) else: return collection( - self._proc_array( - x, itemproc, - dim - 1 if dim is not None else None, - collection) - for x in arr - ) + self._proc_array( + x, itemproc, + dim - 1 if dim is not None else None, + collection) + for x in arr + ) def bind_processor(self, dialect): item_proc = self.item_type.\ - dialect_impl(dialect).\ - bind_processor(dialect) + dialect_impl(dialect).\ + bind_processor(dialect) def process(value): if value is None: return value else: return self._proc_array( - value, - item_proc, - self.dimensions, - list) + value, + item_proc, + self.dimensions, + list) return process def result_processor(self, dialect, coltype): item_proc = self.item_type.\ - dialect_impl(dialect).\ - result_processor(dialect, coltype) + dialect_impl(dialect).\ + result_processor(dialect, coltype) def process(value): if value is None: return value else: return self._proc_array( - value, - item_proc, - self.dimensions, - tuple if self.as_tuple else list) + value, + item_proc, + self.dimensions, + tuple if self.as_tuple else list) return process PGArray = ARRAY class ENUM(sqltypes.Enum): + """Postgresql ENUM type. This is a subclass of :class:`.types.Enum` which includes @@ -1047,7 +1074,8 @@ class ENUM(sqltypes.Enum): return if not checkfirst or \ - not bind.dialect.has_type(bind, self.name, schema=self.schema): + not bind.dialect.has_type( + bind, self.name, schema=self.schema): bind.execute(CreateEnumType(self)) def drop(self, bind=None, checkfirst=True): @@ -1069,7 +1097,7 @@ class ENUM(sqltypes.Enum): return if not checkfirst or \ - bind.dialect.has_type(bind, self.name, schema=self.schema): + bind.dialect.has_type(bind, self.name, schema=self.schema): bind.execute(DropEnumType(self)) def _check_for_name_in_memos(self, checkfirst, kw): @@ -1144,7 +1172,7 @@ ischema_names = { 'interval': INTERVAL, 'interval year to month': INTERVAL, 'interval day to second': INTERVAL, - 'tsvector' : TSVECTOR + 'tsvector': TSVECTOR } @@ -1155,9 +1183,9 @@ class PGCompiler(compiler.SQLCompiler): def visit_slice(self, element, **kw): return "%s:%s" % ( - self.process(element.start, **kw), - self.process(element.stop, **kw), - ) + self.process(element.start, **kw), + self.process(element.stop, **kw), + ) def visit_any(self, element, **kw): return "%s%sANY (%s)" % ( @@ -1181,7 +1209,7 @@ class PGCompiler(compiler.SQLCompiler): def visit_match_op_binary(self, binary, operator, **kw): if "postgresql_regconfig" in binary.modifiers: - regconfig = self.render_literal_value(\ + regconfig = self.render_literal_value( binary.modifiers['postgresql_regconfig'], sqltypes.STRINGTYPE) if regconfig: @@ -1199,8 +1227,8 @@ class PGCompiler(compiler.SQLCompiler): escape = binary.modifiers.get("escape", None) return '%s ILIKE %s' % \ - (self.process(binary.left, **kw), - self.process(binary.right, **kw)) \ + (self.process(binary.left, **kw), + self.process(binary.right, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) @@ -1210,8 +1238,8 @@ class PGCompiler(compiler.SQLCompiler): def visit_notilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s NOT ILIKE %s' % \ - (self.process(binary.left, **kw), - self.process(binary.right, **kw)) \ + (self.process(binary.left, **kw), + self.process(binary.right, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) @@ -1265,12 +1293,12 @@ class PGCompiler(compiler.SQLCompiler): if select._for_update_arg.of: tables = util.OrderedSet( - c.table if isinstance(c, expression.ColumnClause) - else c for c in select._for_update_arg.of) + c.table if isinstance(c, expression.ColumnClause) + else c for c in select._for_update_arg.of) tmp += " OF " + ", ".join( - self.process(table, ashint=True) - for table in tables - ) + self.process(table, ashint=True) + for table in tables + ) if select._for_update_arg.nowait: tmp += " NOWAIT" @@ -1280,13 +1308,12 @@ class PGCompiler(compiler.SQLCompiler): def returning_clause(self, stmt, returning_cols): columns = [ - self._label_select_column(None, c, True, False, {}) - for c in expression._select_iterables(returning_cols) - ] + self._label_select_column(None, c, True, False, {}) + for c in expression._select_iterables(returning_cols) + ] return 'RETURNING ' + ', '.join(columns) - def visit_substring_func(self, func, **kw): s = self.process(func.clauses.clauses[0], **kw) start = self.process(func.clauses.clauses[1], **kw) @@ -1296,7 +1323,9 @@ class PGCompiler(compiler.SQLCompiler): else: return "SUBSTRING(%s FROM %s)" % (s, start) + class PGDDLCompiler(compiler.DDLCompiler): + def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) @@ -1335,7 +1364,7 @@ class PGDDLCompiler(compiler.DDLCompiler): self.preparer.format_type(type_), ", ".join( self.sql_compiler.process(sql.literal(e), literal_binds=True) - for e in type_.enums) + for e in type_.enums) ) def visit_drop_enum_type(self, drop): @@ -1353,10 +1382,10 @@ class PGDDLCompiler(compiler.DDLCompiler): if index.unique: text += "UNIQUE " text += "INDEX %s ON %s " % ( - self._prepared_index_name(index, - include_schema=False), - preparer.format_table(index.table) - ) + self._prepared_index_name(index, + include_schema=False), + preparer.format_table(index.table) + ) using = index.dialect_options['postgresql']['using'] if using: @@ -1367,20 +1396,20 @@ class PGDDLCompiler(compiler.DDLCompiler): % ( ', '.join([ self.sql_compiler.process( - expr.self_group() - if not isinstance(expr, expression.ColumnClause) - else expr, - include_table=False, literal_binds=True) + + expr.self_group() + if not isinstance(expr, expression.ColumnClause) + else expr, + include_table=False, literal_binds=True) + (c.key in ops and (' ' + ops[c.key]) or '') for expr, c in zip(index.expressions, index.columns)]) - ) + ) whereclause = index.dialect_options["postgresql"]["where"] if whereclause is not None: where_compiled = self.sql_compiler.process( - whereclause, include_table=False, - literal_binds=True) + whereclause, include_table=False, + literal_binds=True) text += " WHERE " + where_compiled return text @@ -1392,17 +1421,19 @@ class PGDDLCompiler(compiler.DDLCompiler): elements = [] for c in constraint.columns: op = constraint.operators[c.name] - elements.append(self.preparer.quote(c.name) + ' WITH '+op) - text += "EXCLUDE USING %s (%s)" % (constraint.using, ', '.join(elements)) + elements.append(self.preparer.quote(c.name) + ' WITH ' + op) + text += "EXCLUDE USING %s (%s)" % (constraint.using, + ', '.join(elements)) if constraint.where is not None: text += ' WHERE (%s)' % self.sql_compiler.process( - constraint.where, - literal_binds=True) + constraint.where, + literal_binds=True) text += self.define_constraint_deferrability(constraint) return text class PGTypeCompiler(compiler.GenericTypeCompiler): + def visit_TSVECTOR(self, type): return "TSVECTOR" @@ -1509,8 +1540,8 @@ class PGTypeCompiler(compiler.GenericTypeCompiler): def visit_ARRAY(self, type_): return self.process(type_.item_type) + ('[]' * (type_.dimensions - if type_.dimensions - is not None else 1)) + if type_.dimensions + is not None else 1)) class PGIdentifierPreparer(compiler.IdentifierPreparer): @@ -1520,7 +1551,7 @@ class PGIdentifierPreparer(compiler.IdentifierPreparer): def _unquote_identifier(self, value): if value[0] == self.initial_quote: value = value[1:-1].\ - replace(self.escape_to_quote, self.escape_quote) + replace(self.escape_to_quote, self.escape_quote) return value def format_type(self, type_, use_schema=True): @@ -1554,21 +1585,25 @@ class DropEnumType(schema._CreateDropBase): class PGExecutionContext(default.DefaultExecutionContext): + def fire_sequence(self, seq, type_): - return self._execute_scalar(("select nextval('%s')" % \ - self.dialect.identifier_preparer.format_sequence(seq)), type_) + return self._execute_scalar(( + "select nextval('%s')" % + self.dialect.identifier_preparer.format_sequence(seq)), type_) def get_insert_default(self, column): - if column.primary_key and column is column.table._autoincrement_column: + if column.primary_key and \ + column is column.table._autoincrement_column: if column.server_default and column.server_default.has_argument: # pre-execute passive defaults on primary key columns return self._execute_scalar("select %s" % - column.server_default.arg, column.type) + column.server_default.arg, + column.type) elif (column.default is None or - (column.default.is_sequence and - column.default.optional)): + (column.default.is_sequence and + column.default.optional)): # execute the sequence associated with a SERIAL primary # key column. for non-primary-key SERIAL, the ID just @@ -1587,10 +1622,10 @@ class PGExecutionContext(default.DefaultExecutionContext): sch = column.table.schema if sch is not None: exc = "select nextval('\"%s\".\"%s\"')" % \ - (sch, seq_name) + (sch, seq_name) else: exc = "select nextval('\"%s\"')" % \ - (seq_name, ) + (seq_name, ) return self._execute_scalar(exc, column.type) @@ -1643,7 +1678,7 @@ class PGDialect(default.DefaultDialect): _backslash_escapes = True def __init__(self, isolation_level=None, json_serializer=None, - json_deserializer=None, **kwargs): + json_deserializer=None, **kwargs): default.DefaultDialect.__init__(self, **kwargs) self.isolation_level = isolation_level self._json_deserializer = json_deserializer @@ -1652,7 +1687,7 @@ class PGDialect(default.DefaultDialect): def initialize(self, connection): super(PGDialect, self).initialize(connection) self.implicit_returning = self.server_version_info > (8, 2) and \ - self.__dict__.get('implicit_returning', True) + self.__dict__.get('implicit_returning', True) self.supports_native_enum = self.server_version_info >= (8, 3) if not self.supports_native_enum: self.colspecs = self.colspecs.copy() @@ -1665,9 +1700,9 @@ class PGDialect(default.DefaultDialect): self.supports_smallserial = self.server_version_info >= (9, 2) self._backslash_escapes = self.server_version_info < (8, 2) or \ - connection.scalar( - "show standard_conforming_strings" - ) == 'off' + connection.scalar( + "show standard_conforming_strings" + ) == 'off' def on_connect(self): if self.isolation_level is not None: @@ -1677,8 +1712,8 @@ class PGDialect(default.DefaultDialect): else: return None - _isolation_lookup = set(['SERIALIZABLE', - 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ']) + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ']) def set_isolation_level(self, connection, level): level = level.replace('_', ' ') @@ -1687,7 +1722,7 @@ class PGDialect(default.DefaultDialect): "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) - ) + ) cursor = connection.cursor() cursor.execute( "SET SESSION CHARACTERISTICS AS TRANSACTION " @@ -1709,10 +1744,10 @@ class PGDialect(default.DefaultDialect): connection.execute("PREPARE TRANSACTION '%s'" % xid) def do_rollback_twophase(self, connection, xid, - is_prepared=True, recover=False): + is_prepared=True, recover=False): if is_prepared: if recover: - #FIXME: ugly hack to get out of transaction + # FIXME: ugly hack to get out of transaction # context when committing recoverable transactions # Must find out a way how to make the dbapi not # open a transaction. @@ -1724,7 +1759,7 @@ class PGDialect(default.DefaultDialect): self.do_rollback(connection.connection) def do_commit_twophase(self, connection, xid, - is_prepared=True, recover=False): + is_prepared=True, recover=False): if is_prepared: if recover: connection.execute("ROLLBACK") @@ -1736,14 +1771,15 @@ class PGDialect(default.DefaultDialect): def do_recover_twophase(self, connection): resultset = connection.execute( - sql.text("SELECT gid FROM pg_prepared_xacts")) + sql.text("SELECT gid FROM pg_prepared_xacts")) return [row[0] for row in resultset] def _get_default_schema_name(self, connection): return connection.scalar("select current_schema()") def has_schema(self, connection, schema): - query = "select nspname from pg_namespace where lower(nspname)=:schema" + query = ("select nspname from pg_namespace " + "where lower(nspname)=:schema") cursor = connection.execute( sql.text( query, @@ -1761,25 +1797,27 @@ class PGDialect(default.DefaultDialect): if schema is None: cursor = connection.execute( sql.text( - "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=current_schema() and " - "relname=:name", - bindparams=[ + "select relname from pg_class c join pg_namespace n on " + "n.oid=c.relnamespace where n.nspname=current_schema() " + "and relname=:name", + bindparams=[ sql.bindparam('name', util.text_type(table_name), - type_=sqltypes.Unicode)] + type_=sqltypes.Unicode)] ) ) else: cursor = connection.execute( sql.text( - "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=:schema and " - "relname=:name", + "select relname from pg_class c join pg_namespace n on " + "n.oid=c.relnamespace where n.nspname=:schema and " + "relname=:name", bindparams=[ sql.bindparam('name', - util.text_type(table_name), type_=sqltypes.Unicode), + util.text_type(table_name), + type_=sqltypes.Unicode), sql.bindparam('schema', - util.text_type(schema), type_=sqltypes.Unicode)] + util.text_type(schema), + type_=sqltypes.Unicode)] ) ) return bool(cursor.first()) @@ -1794,23 +1832,24 @@ class PGDialect(default.DefaultDialect): "and relname=:name", bindparams=[ sql.bindparam('name', util.text_type(sequence_name), - type_=sqltypes.Unicode) + type_=sqltypes.Unicode) ] ) ) else: cursor = connection.execute( sql.text( - "SELECT relname FROM pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where relkind='S' and " - "n.nspname=:schema and relname=:name", - bindparams=[ - sql.bindparam('name', util.text_type(sequence_name), - type_=sqltypes.Unicode), - sql.bindparam('schema', - util.text_type(schema), type_=sqltypes.Unicode) - ] - ) + "SELECT relname FROM pg_class c join pg_namespace n on " + "n.oid=c.relnamespace where relkind='S' and " + "n.nspname=:schema and relname=:name", + bindparams=[ + sql.bindparam('name', util.text_type(sequence_name), + type_=sqltypes.Unicode), + sql.bindparam('schema', + util.text_type(schema), + type_=sqltypes.Unicode) + ] + ) ) return bool(cursor.first()) @@ -1836,14 +1875,14 @@ class PGDialect(default.DefaultDialect): """ query = sql.text(query) query = query.bindparams( - sql.bindparam('typname', - util.text_type(type_name), type_=sqltypes.Unicode), - ) + sql.bindparam('typname', + util.text_type(type_name), type_=sqltypes.Unicode), + ) if schema is not None: query = query.bindparams( - sql.bindparam('nspname', - util.text_type(schema), type_=sqltypes.Unicode), - ) + sql.bindparam('nspname', + util.text_type(schema), type_=sqltypes.Unicode), + ) cursor = connection.execute(query) return bool(cursor.scalar()) @@ -1855,7 +1894,7 @@ class PGDialect(default.DefaultDialect): v) if not m: raise AssertionError( - "Could not determine version from string '%s'" % v) + "Could not determine version from string '%s'" % v) return tuple([int(x) for x in m.group(1, 2, 3) if x is not None]) @reflection.cache @@ -1905,11 +1944,11 @@ class PGDialect(default.DefaultDialect): # what about system tables? if util.py2k: - schema_names = [row[0].decode(self.encoding) for row in rp \ - if not row[0].startswith('pg_')] + schema_names = [row[0].decode(self.encoding) for row in rp + if not row[0].startswith('pg_')] else: - schema_names = [row[0] for row in rp \ - if not row[0].startswith('pg_')] + schema_names = [row[0] for row in rp + if not row[0].startswith('pg_')] return schema_names @reflection.cache @@ -1921,12 +1960,12 @@ class PGDialect(default.DefaultDialect): result = connection.execute( sql.text("SELECT relname FROM pg_class c " - "WHERE relkind = 'r' " - "AND '%s' = (select nspname from pg_namespace n " - "where n.oid = c.relnamespace) " % - current_schema, - typemap={'relname': sqltypes.Unicode} - ) + "WHERE relkind = 'r' " + "AND '%s' = (select nspname from pg_namespace n " + "where n.oid = c.relnamespace) " % + current_schema, + typemap={'relname': sqltypes.Unicode} + ) ) return [row[0] for row in result] @@ -1946,7 +1985,7 @@ class PGDialect(default.DefaultDialect): if util.py2k: view_names = [row[0].decode(self.encoding) - for row in connection.execute(s)] + for row in connection.execute(s)] else: view_names = [row[0] for row in connection.execute(s)] return view_names @@ -1991,9 +2030,12 @@ class PGDialect(default.DefaultDialect): ORDER BY a.attnum """ s = sql.text(SQL_COLS, - bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)], - typemap={'attname': sqltypes.Unicode, 'default': sqltypes.Unicode} - ) + bindparams=[ + sql.bindparam('table_oid', type_=sqltypes.Integer)], + typemap={ + 'attname': sqltypes.Unicode, + 'default': sqltypes.Unicode} + ) c = connection.execute(s, table_oid=table_oid) rows = c.fetchall() domains = self._load_domains(connection) @@ -2009,7 +2051,7 @@ class PGDialect(default.DefaultDialect): def _get_column_info(self, name, format_type, default, notnull, domains, enums, schema): - ## strip (*) from character varying(5), timestamp(5) + # strip (*) from character varying(5), timestamp(5) # with time zone, geometry(POLYGON), etc. attype = re.sub(r'\(.*\)', '', format_type) @@ -2057,7 +2099,7 @@ class PGDialect(default.DefaultDialect): else: args = () elif attype in ('interval', 'interval year to month', - 'interval day to second'): + 'interval day to second'): if charlen: kwargs['precision'] = int(charlen) args = () @@ -2112,8 +2154,8 @@ class PGDialect(default.DefaultDialect): # later be enhanced to obey quoting rules / # "quote schema" default = match.group(1) + \ - ('"%s"' % sch) + '.' + \ - match.group(2) + match.group(3) + ('"%s"' % sch) + '.' + \ + match.group(2) + match.group(3) column_info = dict(name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement) @@ -2169,7 +2211,7 @@ class PGDialect(default.DefaultDialect): @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, - postgresql_ignore_search_path=False, **kw): + postgresql_ignore_search_path=False, **kw): preparer = self.identifier_preparer table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get('info_cache')) @@ -2192,29 +2234,32 @@ class PGDialect(default.DefaultDialect): FK_REGEX = re.compile( r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)' r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?' - r'[\s]?(ON UPDATE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' - r'[\s]?(ON DELETE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' + r'[\s]?(ON UPDATE ' + r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' + r'[\s]?(ON DELETE ' + r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?' r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?' ) t = sql.text(FK_SQL, typemap={ - 'conname': sqltypes.Unicode, - 'condef': sqltypes.Unicode}) + 'conname': sqltypes.Unicode, + 'condef': sqltypes.Unicode}) c = connection.execute(t, table=table_oid) fkeys = [] for conname, condef, conschema in c.fetchall(): m = re.search(FK_REGEX, condef).groups() constrained_columns, referred_schema, \ - referred_table, referred_columns, \ - _, match, _, onupdate, _, ondelete, \ - deferrable, _, initially = m + referred_table, referred_columns, \ + _, match, _, onupdate, _, ondelete, \ + deferrable, _, initially = m if deferrable is not None: deferrable = True if deferrable == 'DEFERRABLE' else False constrained_columns = [preparer._unquote_identifier(x) - for x in re.split(r'\s*,\s*', constrained_columns)] + for x in re.split( + r'\s*,\s*', constrained_columns)] if postgresql_ignore_search_path: # when ignoring search path, we use the actual schema @@ -2228,7 +2273,7 @@ class PGDialect(default.DefaultDialect): # pg_get_constraintdef(). If the schema is in the search # path, pg_get_constraintdef() will give us None. referred_schema = \ - preparer._unquote_identifier(referred_schema) + preparer._unquote_identifier(referred_schema) elif schema is not None and schema == conschema: # If the actual schema matches the schema of the table # we're reflecting, then we will use that. @@ -2236,7 +2281,8 @@ class PGDialect(default.DefaultDialect): referred_table = preparer._unquote_identifier(referred_table) referred_columns = [preparer._unquote_identifier(x) - for x in re.split(r'\s*,\s', referred_columns)] + for x in + re.split(r'\s*,\s', referred_columns)] fkey_d = { 'name': conname, 'constrained_columns': constrained_columns, @@ -2263,9 +2309,9 @@ class PGDialect(default.DefaultDialect): # for now. # regards, tom lane" return "(%s)" % " OR ".join( - "%s[%d] = %s" % (compare_to, ind, col) - for ind in range(0, 10) - ) + "%s[%d] = %s" % (compare_to, ind, col) + for ind in range(0, 10) + ) else: return "%s = ANY(%s)" % (col, compare_to) @@ -2297,12 +2343,12 @@ class PGDialect(default.DefaultDialect): t.relname, i.relname """ % ( - # version 8.3 here was based on observing the - # cast does not work in PG 8.2.4, does work in 8.3.0. - # nothing in PG changelogs regarding this. - "::varchar" if self.server_version_info >= (8, 3) else "", - self._pg_index_any("a.attnum", "ix.indkey") - ) + # version 8.3 here was based on observing the + # cast does not work in PG 8.2.4, does work in 8.3.0. + # nothing in PG changelogs regarding this. + "::varchar" if self.server_version_info >= (8, 3) else "", + self._pg_index_any("a.attnum", "ix.indkey") + ) t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode}) c = connection.execute(t, table_oid=table_oid) @@ -2316,16 +2362,16 @@ class PGDialect(default.DefaultDialect): if expr: if idx_name != sv_idx_name: util.warn( - "Skipped unsupported reflection of " - "expression-based index %s" - % idx_name) + "Skipped unsupported reflection of " + "expression-based index %s" + % idx_name) sv_idx_name = idx_name continue if prd and not idx_name == sv_idx_name: util.warn( - "Predicate of partial index %s ignored during reflection" - % idx_name) + "Predicate of partial index %s ignored during reflection" + % idx_name) sv_idx_name = idx_name index = indexes[idx_name] @@ -2356,7 +2402,8 @@ class PGDialect(default.DefaultDialect): FROM pg_catalog.pg_constraint cons join pg_attribute a - on cons.conrelid = a.attrelid AND a.attnum = ANY(cons.conkey) + on cons.conrelid = a.attrelid AND + a.attnum = ANY(cons.conkey) WHERE cons.conrelid = :table_oid AND cons.contype = 'u' @@ -2381,7 +2428,7 @@ class PGDialect(default.DefaultDialect): if not self.supports_native_enum: return {} - ## Load data types for enums: + # Load data types for enums: SQL_ENUMS = """ SELECT t.typname as "name", -- no enum defaults in 8.4 at least @@ -2397,8 +2444,8 @@ class PGDialect(default.DefaultDialect): """ s = sql.text(SQL_ENUMS, typemap={ - 'attname': sqltypes.Unicode, - 'label': sqltypes.Unicode}) + 'attname': sqltypes.Unicode, + 'label': sqltypes.Unicode}) c = connection.execute(s) enums = {} @@ -2416,13 +2463,13 @@ class PGDialect(default.DefaultDialect): enums[name]['labels'].append(enum['label']) else: enums[name] = { - 'labels': [enum['label']], - } + 'labels': [enum['label']], + } return enums def _load_domains(self, connection): - ## Load data types for domains: + # Load data types for domains: SQL_DOMAINS = """ SELECT t.typname as "name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype", @@ -2440,7 +2487,7 @@ class PGDialect(default.DefaultDialect): domains = {} for domain in c.fetchall(): - ## strip (30) from character varying(30) + # strip (30) from character varying(30) attype = re.search('([^\(]+)', domain['attype']).group(1) if domain['visible']: # 'visible' just means whether or not the domain is in a @@ -2452,9 +2499,9 @@ class PGDialect(default.DefaultDialect): name = "%s.%s" % (domain['schema'], domain['name']) domains[name] = { - 'attype': attype, - 'nullable': domain['nullable'], - 'default': domain['default'] - } + 'attype': attype, + 'nullable': domain['nullable'], + 'default': domain['default'] + } return domains -- cgit v1.2.1