summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/sqlalchemy/connectors/mxodbc.py2
-rw-r--r--lib/sqlalchemy/connectors/pyodbc.py10
-rw-r--r--lib/sqlalchemy/connectors/zxJDBC.py6
-rw-r--r--lib/sqlalchemy/dialects/__init__.py4
-rw-r--r--lib/sqlalchemy/dialects/access/base.py12
-rw-r--r--lib/sqlalchemy/dialects/firebird/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/informix/base.py12
-rw-r--r--lib/sqlalchemy/dialects/maxdb/base.py14
-rw-r--r--lib/sqlalchemy/dialects/mssql/__init__.py4
-rw-r--r--lib/sqlalchemy/dialects/mssql/adodbapi.py4
-rw-r--r--lib/sqlalchemy/dialects/mssql/pymssql.py4
-rw-r--r--lib/sqlalchemy/dialects/mssql/pyodbc.py22
-rw-r--r--lib/sqlalchemy/dialects/mssql/zxjdbc.py2
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py102
-rw-r--r--lib/sqlalchemy/dialects/mysql/mysqldb.py14
-rw-r--r--lib/sqlalchemy/dialects/mysql/oursql.py10
-rw-r--r--lib/sqlalchemy/dialects/mysql/pyodbc.py2
-rw-r--r--lib/sqlalchemy/dialects/oracle/cx_oracle.py108
-rw-r--r--lib/sqlalchemy/dialects/postgresql/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py116
-rw-r--r--lib/sqlalchemy/dialects/sqlite/pysqlite.py38
-rw-r--r--lib/sqlalchemy/dialects/sybase/pyodbc.py4
-rw-r--r--lib/sqlalchemy/dialects/sybase/pysybase.py4
-rw-r--r--lib/sqlalchemy/engine/__init__.py92
-rw-r--r--lib/sqlalchemy/engine/ddl.py22
-rw-r--r--lib/sqlalchemy/engine/strategies.py6
-rw-r--r--lib/sqlalchemy/engine/threadlocal.py2
-rw-r--r--lib/sqlalchemy/event.py36
-rw-r--r--lib/sqlalchemy/log.py6
-rw-r--r--lib/sqlalchemy/orm/dynamic.py26
-rw-r--r--lib/sqlalchemy/orm/evaluator.py12
-rw-r--r--lib/sqlalchemy/orm/exc.py22
-rw-r--r--lib/sqlalchemy/orm/identity.py2
-rw-r--r--lib/sqlalchemy/orm/scoping.py8
-rw-r--r--lib/sqlalchemy/orm/sync.py12
-rw-r--r--lib/sqlalchemy/pool.py96
-rw-r--r--lib/sqlalchemy/processors.py8
-rw-r--r--lib/sqlalchemy/sql/functions.py4
-rw-r--r--lib/sqlalchemy/sql/visitors.py34
-rw-r--r--lib/sqlalchemy/util/queue.py2
-rw-r--r--lib/sqlalchemy/util/topological.py10
41 files changed, 449 insertions, 449 deletions
diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py
index f3ce924d1..4456f351f 100644
--- a/lib/sqlalchemy/connectors/mxodbc.py
+++ b/lib/sqlalchemy/connectors/mxodbc.py
@@ -117,7 +117,7 @@ class MxODBCConnector(Connector):
return False
def _get_server_version_info(self, connection):
- # eGenix suggests using conn.dbms_version instead
+ # eGenix suggests using conn.dbms_version instead
# of what we're doing here
dbapi_con = connection.connection
version = []
diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py
index f190329f7..7ef0922cf 100644
--- a/lib/sqlalchemy/connectors/pyodbc.py
+++ b/lib/sqlalchemy/connectors/pyodbc.py
@@ -70,7 +70,7 @@ class PyODBCConnector(Connector):
if 'port' in keys and not 'port' in query:
port = ',%d' % int(keys.pop('port'))
- connectors = ["DRIVER={%s}" %
+ connectors = ["DRIVER={%s}" %
keys.pop('driver', self.pyodbc_driver_name),
'Server=%s%s' % (keys.pop('host', ''), port),
'Database=%s' % keys.pop('database', '') ]
@@ -83,9 +83,9 @@ class PyODBCConnector(Connector):
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
- # convert textual data from your database encoding to your
- # client encoding. This should obviously be set to 'No' if
- # you query a cp1253 encoded database from a latin1 client...
+ # convert textual data from your database encoding to your
+ # client encoding. This should obviously be set to 'No' if
+ # you query a cp1253 encoded database from a latin1 client...
if 'odbc_autotranslate' in keys:
connectors.append("AutoTranslate=%s" %
keys.pop("odbc_autotranslate"))
@@ -126,7 +126,7 @@ class PyODBCConnector(Connector):
if self._user_supports_unicode_binds is not None:
self.supports_unicode_binds = self._user_supports_unicode_binds
else:
- self.supports_unicode_binds = (not self.freetds or
+ self.supports_unicode_binds = (not self.freetds or
self.freetds_driver_version >= '0.91'
) and not self.easysoft
# end Py2K
diff --git a/lib/sqlalchemy/connectors/zxJDBC.py b/lib/sqlalchemy/connectors/zxJDBC.py
index 46ab7efa5..1db7a619d 100644
--- a/lib/sqlalchemy/connectors/zxJDBC.py
+++ b/lib/sqlalchemy/connectors/zxJDBC.py
@@ -33,7 +33,7 @@ class ZxJDBCConnector(Connector):
def _create_jdbc_url(self, url):
"""Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
- url.port is not None
+ url.port is not None
and ':%s' % url.port or '',
url.database)
@@ -41,8 +41,8 @@ class ZxJDBCConnector(Connector):
opts = self._driver_kwargs()
opts.update(url.query)
return [
- [self._create_jdbc_url(url),
- url.username, url.password,
+ [self._create_jdbc_url(url),
+ url.username, url.password,
self.jdbc_driver_name],
opts]
diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py
index 4b34da82e..69212cd6e 100644
--- a/lib/sqlalchemy/dialects/__init__.py
+++ b/lib/sqlalchemy/dialects/__init__.py
@@ -22,10 +22,10 @@ from .. import util
def _auto_fn(name):
"""default dialect importer.
-
+
plugs into the :class:`.PluginLoader`
as a first-hit system.
-
+
"""
if "." in name:
dialect, driver = name.split(".")
diff --git a/lib/sqlalchemy/dialects/access/base.py b/lib/sqlalchemy/dialects/access/base.py
index 29f10c560..f107c9c8c 100644
--- a/lib/sqlalchemy/dialects/access/base.py
+++ b/lib/sqlalchemy/dialects/access/base.py
@@ -11,7 +11,7 @@ Support for the Microsoft Access database.
.. note::
- The Access dialect is **non-functional as of SQLAlchemy 0.6**,
+ The Access dialect is **non-functional as of SQLAlchemy 0.6**,
pending development efforts to bring it up-to-date.
@@ -125,7 +125,7 @@ class AccessExecutionContext(default.DefaultExecutionContext):
# self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
- self._last_inserted_ids = [int(row[0])]
+ self._last_inserted_ids = [int(row[0])]
#+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
@@ -260,7 +260,7 @@ class AccessDialect(default.DefaultDialect):
colargs = \
{
- 'nullable': not(col.Required or
+ 'nullable': not(col.Required or
col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
@@ -287,7 +287,7 @@ class AccessDialect(default.DefaultDialect):
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and
isinstance(
- thecol.default.arg,
+ thecol.default.arg,
schema.Sequence
)):
thecol.autoincrement = False
@@ -322,7 +322,7 @@ class AccessDialect(default.DefaultDialect):
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
- names = [t.Name for t in dtbs.TableDefs
+ names = [t.Name for t in dtbs.TableDefs
if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
dtbs.Close()
return names
@@ -373,7 +373,7 @@ class AccessCompiler(compiler.SQLCompiler):
'length': 'len',
}
def visit_function(self, func):
- """Access function names differ from the ANSI SQL names;
+ """Access function names differ from the ANSI SQL names;
rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
return super(AccessCompiler, self).visit_function(func)
diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py
index 665e32267..f79588d24 100644
--- a/lib/sqlalchemy/dialects/firebird/__init__.py
+++ b/lib/sqlalchemy/dialects/firebird/__init__.py
@@ -14,7 +14,7 @@ from sqlalchemy.dialects.firebird.base import \
dialect
__all__ = (
- 'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
+ 'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
'dialect'
)
diff --git a/lib/sqlalchemy/dialects/informix/base.py b/lib/sqlalchemy/dialects/informix/base.py
index ff096acc6..d1c5933f4 100644
--- a/lib/sqlalchemy/dialects/informix/base.py
+++ b/lib/sqlalchemy/dialects/informix/base.py
@@ -10,7 +10,7 @@
.. note::
The Informix dialect functions on current SQLAlchemy versions
- but is not regularly tested, and may have many issues and
+ but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
@@ -467,7 +467,7 @@ class InformixDialect(default.DefaultDialect):
c = connection.execute(
"""select t1.constrname as cons_name,
t4.colname as local_column, t7.tabname as remote_table,
- t6.colname as remote_column, t7.owner as remote_owner
+ t6.colname as remote_column, t7.owner as remote_owner
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4 ,
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
@@ -476,7 +476,7 @@ class InformixDialect(default.DefaultDialect):
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3,
t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10,
- t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
+ t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
and t5.constrid = t1.constrid and t8.constrid = t5.primary
and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3,
t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10,
@@ -523,7 +523,7 @@ class InformixDialect(default.DefaultDialect):
# Select the column positions from sysindexes for sysconstraints
data = connection.execute(
- """select t2.*
+ """select t2.*
from systables as t1, sysindexes as t2, sysconstraints as t3
where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=?
and t2.idxname=t3.idxname and t3.constrtype='P'""",
@@ -545,7 +545,7 @@ class InformixDialect(default.DefaultDialect):
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
- where t2.tabname=? and t1.tabid = t2.tabid and
+ where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colpositions
).fetchall()
@@ -570,7 +570,7 @@ class InformixDialect(default.DefaultDialect):
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
- where t2.tabname=? and t1.tabid = t2.tabid and
+ where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colnames
).fetchall()
diff --git a/lib/sqlalchemy/dialects/maxdb/base.py b/lib/sqlalchemy/dialects/maxdb/base.py
index 76adf97ff..f52fc4fa6 100644
--- a/lib/sqlalchemy/dialects/maxdb/base.py
+++ b/lib/sqlalchemy/dialects/maxdb/base.py
@@ -8,7 +8,7 @@
.. note::
- The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**,
+ The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**,
pending development efforts to bring it up-to-date.
Overview
@@ -255,7 +255,7 @@ class MaxTimestamp(sqltypes.DateTime):
value[20:])])
else:
raise exc.InvalidRequestError(
- "datetimeformat '%s' is not supported." %
+ "datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
@@ -283,18 +283,18 @@ class MaxDate(sqltypes.Date):
if value is None:
return None
else:
- return datetime.date(int(value[0:4]), int(value[4:6]),
+ return datetime.date(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
- return datetime.date(int(value[0:4]), int(value[5:7]),
+ return datetime.date(int(value[0:4]), int(value[5:7]),
int(value[8:10]))
else:
raise exc.InvalidRequestError(
- "datetimeformat '%s' is not supported." %
+ "datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
@@ -322,7 +322,7 @@ class MaxTime(sqltypes.Time):
if value is None:
return None
else:
- return datetime.time(int(value[0:4]), int(value[4:6]),
+ return datetime.time(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
@@ -333,7 +333,7 @@ class MaxTime(sqltypes.Time):
int(value[8:10]))
else:
raise exc.InvalidRequestError(
- "datetimeformat '%s' is not supported." %
+ "datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py
index 8a2101c51..e262d208b 100644
--- a/lib/sqlalchemy/dialects/mssql/__init__.py
+++ b/lib/sqlalchemy/dialects/mssql/__init__.py
@@ -18,9 +18,9 @@ from sqlalchemy.dialects.mssql.base import \
__all__ = (
- 'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
+ 'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
- 'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
+ 'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
) \ No newline at end of file
diff --git a/lib/sqlalchemy/dialects/mssql/adodbapi.py b/lib/sqlalchemy/dialects/mssql/adodbapi.py
index 21e632880..5b2328269 100644
--- a/lib/sqlalchemy/dialects/mssql/adodbapi.py
+++ b/lib/sqlalchemy/dialects/mssql/adodbapi.py
@@ -16,7 +16,7 @@ import sys
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
- # adodbapi will return datetimes with empty time
+ # adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
@@ -49,7 +49,7 @@ class MSDialect_adodbapi(MSDialect):
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
- connectors.append ("Data Source=%s, %s" %
+ connectors.append ("Data Source=%s, %s" %
(keys.get("host"), keys.get("port")))
else:
connectors.append ("Data Source=%s" % keys.get("host"))
diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py
index 9cc42c093..f9f2e7a48 100644
--- a/lib/sqlalchemy/dialects/mssql/pymssql.py
+++ b/lib/sqlalchemy/dialects/mssql/pymssql.py
@@ -21,8 +21,8 @@ Sample connect string::
mssql+pymssql://<username>:<password>@<freetds_name>
Adding "?charset=utf8" or similar will cause pymssql to return
-strings as Python unicode objects. This can potentially improve
-performance in some scenarios as decoding of strings is
+strings as Python unicode objects. This can potentially improve
+performance in some scenarios as decoding of strings is
handled natively.
Limitations
diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py
index 17dcbfecd..b3b1641e0 100644
--- a/lib/sqlalchemy/dialects/mssql/pyodbc.py
+++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py
@@ -35,14 +35,14 @@ Examples of pyodbc connection string URLs:
dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
-* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
+* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
that would appear like::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
string which includes the port
- information using the comma syntax. This will create the following
+ information using the comma syntax. This will create the following
connection string::
DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
@@ -83,9 +83,9 @@ the python shell. For example::
Unicode Binds
^^^^^^^^^^^^^
-The current state of PyODBC on a unix backend with FreeTDS and/or
+The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC
-versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically
+versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically
alter how strings are received. The PyODBC dialect attempts to use all the information
it knows to determine whether or not a Python unicode literal can be
passed directly to the PyODBC driver or not; while SQLAlchemy can encode
@@ -93,13 +93,13 @@ these to bytestrings first, some users have reported that PyODBC mis-handles
bytestrings for certain encodings and requires a Python unicode object,
while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
-the information schema tables used in table reflection, and the value
+the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
-parameters be sent to PyODBC can be controlled using the
-``supports_unicode_binds`` parameter to ``create_engine()``. When
-left at its default of ``None``, the PyODBC dialect will use its
+parameters be sent to PyODBC can be controlled using the
+``supports_unicode_binds`` parameter to ``create_engine()``. When
+left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
@@ -199,7 +199,7 @@ class MSExecutionContext_pyodbc(MSExecutionContext):
super(MSExecutionContext_pyodbc, self).pre_exec()
- # don't embed the scope_identity select into an
+ # don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
@@ -211,11 +211,11 @@ class MSExecutionContext_pyodbc(MSExecutionContext):
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
- # We may have to skip over a number of result sets with
+ # We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
- # fetchall() ensures the cursor is consumed
+ # fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
diff --git a/lib/sqlalchemy/dialects/mssql/zxjdbc.py b/lib/sqlalchemy/dialects/mssql/zxjdbc.py
index bfa358c0c..4bbd82c07 100644
--- a/lib/sqlalchemy/dialects/mssql/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/mssql/zxjdbc.py
@@ -68,7 +68,7 @@ class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
def _get_server_version_info(self, connection):
return tuple(
- int(x)
+ int(x)
for x in connection.connection.dbversion.split('.')
)
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index 5a020f416..ff1cf625c 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -44,7 +44,7 @@ Connection Timeouts
-------------------
MySQL features an automatic connection close behavior, for connections that have
-been idle for eight hours or more. To circumvent having this issue, use the
+been idle for eight hours or more. To circumvent having this issue, use the
``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
@@ -87,15 +87,15 @@ to be used.
Transaction Isolation Level
---------------------------
-:func:`.create_engine` accepts an ``isolation_level``
-parameter which results in the command ``SET SESSION
-TRANSACTION ISOLATION LEVEL <level>`` being invoked for
+:func:`.create_engine` accepts an ``isolation_level``
+parameter which results in the command ``SET SESSION
+TRANSACTION ISOLATION LEVEL <level>`` being invoked for
every new connection. Valid values for this parameter are
-``READ COMMITTED``, ``READ UNCOMMITTED``,
+``READ COMMITTED``, ``READ UNCOMMITTED``,
``REPEATABLE READ``, and ``SERIALIZABLE``::
engine = create_engine(
- "mysql://scott:tiger@localhost/test",
+ "mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
@@ -193,7 +193,7 @@ usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
-or whatever is equivalent for the DBAPI in use, on connect, unless the flag value
+or whatever is equivalent for the DBAPI in use, on connect, unless the flag value
is overridden using DBAPI-specific options
(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
OurSQL driver).
@@ -260,7 +260,7 @@ Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
-an index or primary key constraint. SQLAlchemy provides this feature via the
+an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash')
@@ -270,7 +270,7 @@ As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash')
The value passed to the keyword argument will be simply passed through to the
-underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
+underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
@@ -1307,13 +1307,13 @@ class MySQLCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select):
"""Add special MySQL keywords in place of DISTINCT.
-
- .. note::
-
+
+ .. note::
+
this usage is deprecated. :meth:`.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
-
+
"""
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
@@ -1361,16 +1361,16 @@ class MySQLCompiler(compiler.SQLCompiler):
if limit is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
- # (https://bugs.launchpad.net/oursql/+bug/686232),
+ # (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
- self.process(sql.literal(offset)),
+ self.process(sql.literal(offset)),
"18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
- self.process(sql.literal(offset)),
+ self.process(sql.literal(offset)),
self.process(sql.literal(limit)))
else:
# No offset provided, so just use the limit
@@ -1384,10 +1384,10 @@ class MySQLCompiler(compiler.SQLCompiler):
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
- return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
+ return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms))
- def update_from_clause(self, update_stmt, from_table,
+ def update_from_clause(self, update_stmt, from_table,
extra_froms, from_hints, **kw):
return None
@@ -1416,7 +1416,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
constraint_string += "KEY %s (%s)" % (
self.preparer.quote(
"idx_autoinc_%s" % auto_inc_column.name, None
- ),
+ ),
self.preparer.format_column(auto_inc_column)
)
@@ -1453,7 +1453,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
opts = dict(
(
- k[len(self.dialect.name)+1:].upper(),
+ k[len(self.dialect.name)+1:].upper(),
v
)
for k, v in table.kwargs.items()
@@ -1469,7 +1469,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
- 'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
+ 'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARSET',
'DEFAULT_COLLATE'):
opt = opt.replace('_', ' ')
@@ -1489,7 +1489,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
table = preparer.format_table(index.table)
columns = [preparer.quote(c.name, c.quote) for c in index.columns]
name = preparer.quote(
- self._index_identifier(index.name),
+ self._index_identifier(index.name),
index.quote)
text = "CREATE "
@@ -1598,24 +1598,24 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
- return self._extend_numeric(type_,
- "NUMERIC(%(precision)s)" %
+ return self._extend_numeric(type_,
+ "NUMERIC(%(precision)s)" %
{'precision': type_.precision})
else:
- return self._extend_numeric(type_,
- "NUMERIC(%(precision)s, %(scale)s)" %
+ return self._extend_numeric(type_,
+ "NUMERIC(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DECIMAL(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
- return self._extend_numeric(type_,
- "DECIMAL(%(precision)s)" %
+ return self._extend_numeric(type_,
+ "DECIMAL(%(precision)s)" %
{'precision': type_.precision})
else:
- return self._extend_numeric(type_,
- "DECIMAL(%(precision)s, %(scale)s)" %
+ return self._extend_numeric(type_,
+ "DECIMAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DOUBLE(self, type_):
@@ -1638,7 +1638,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
- return self._extend_numeric(type_,
+ return self._extend_numeric(type_,
"FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,))
@@ -1647,24 +1647,24 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_INTEGER(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "INTEGER(%(display_width)s)" %
+ return self._extend_numeric(type_,
+ "INTEGER(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "BIGINT(%(display_width)s)" %
+ return self._extend_numeric(type_,
+ "BIGINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "MEDIUMINT(%(display_width)s)" %
+ return self._extend_numeric(type_,
+ "MEDIUMINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
@@ -1677,8 +1677,8 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_SMALLINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
- return self._extend_numeric(type_,
- "SMALLINT(%(display_width)s)" %
+ return self._extend_numeric(type_,
+ "SMALLINT(%(display_width)s)" %
{'display_width': type_.display_width}
)
else:
@@ -1728,7 +1728,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
- "VARCHAR requires a length on dialect %s" %
+ "VARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_CHAR(self, type_):
@@ -1744,7 +1744,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
return self._extend_string(type_, {'national':True}, "VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.CompileError(
- "NVARCHAR requires a length on dialect %s" %
+ "NVARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_NCHAR(self, type_):
@@ -1805,8 +1805,8 @@ class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
- dialect,
- initial_quote=quote,
+ dialect,
+ initial_quote=quote,
escape_quote=quote)
def _quote_free_identifiers(self, *ids):
@@ -1839,7 +1839,7 @@ class MySQLDialect(default.DefaultDialect):
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
- # these are modified upon initialize(),
+ # these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
@@ -1856,7 +1856,7 @@ class MySQLDialect(default.DefaultDialect):
else:
return None
- _isolation_lookup = set(['SERIALIZABLE',
+ _isolation_lookup = set(['SERIALIZABLE',
'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
@@ -1864,7 +1864,7 @@ class MySQLDialect(default.DefaultDialect):
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
+ "Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
@@ -1936,7 +1936,7 @@ class MySQLDialect(default.DefaultDialect):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
- # if underlying connection is closed,
+ # if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
@@ -2148,9 +2148,9 @@ class MySQLDialect(default.DefaultDialect):
def _parsed_state_or_create(self, connection, table_name, schema=None, **kw):
return self._setup_parser(
- connection,
- table_name,
- schema,
+ connection,
+ table_name,
+ schema,
info_cache=kw.get('info_cache', None)
)
@@ -2158,7 +2158,7 @@ class MySQLDialect(default.DefaultDialect):
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
- The deferred creation ensures that the dialect has
+ The deferred creation ensures that the dialect has
retrieved server version information first.
"""
diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py
index 656e105a7..240f30251 100644
--- a/lib/sqlalchemy/dialects/mysql/mysqldb.py
+++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py
@@ -26,20 +26,20 @@ MySQLdb will accommodate Python ``unicode`` objects if the
``use_unicode=1`` parameter, or the ``charset`` parameter,
is passed as a connection argument.
-Without this setting, many MySQL server installations default to
+Without this setting, many MySQL server installations default to
a ``latin1`` encoding for client connections, which has the effect
-of all data being converted into ``latin1``, even if you have ``utf8``
+of all data being converted into ``latin1``, even if you have ``utf8``
or another character set configured on your tables
and columns. With versions 4.1 and higher, you can change the connection
character set either through server configuration or by including the
``charset`` parameter. The ``charset``
-parameter as received by MySQL-Python also has the side-effect of
+parameter as received by MySQL-Python also has the side-effect of
enabling ``use_unicode=1``::
# set client encoding to utf8; all strings come back as unicode
create_engine('mysql+mysqldb:///mydb?charset=utf8')
-Manually configuring ``use_unicode=0`` will cause MySQL-python to
+Manually configuring ``use_unicode=0`` will cause MySQL-python to
return encoded strings::
# set client encoding to utf8; all strings come back as utf8 str
@@ -57,9 +57,9 @@ It is strongly advised to use the latest version of MySQL-Python.
from sqlalchemy.dialects.mysql.base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
- MySQLDBExecutionContext,
- MySQLDBCompiler,
- MySQLDBIdentifierPreparer,
+ MySQLDBExecutionContext,
+ MySQLDBCompiler,
+ MySQLDBIdentifierPreparer,
MySQLDBConnector
)
diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py
index 2a3c6b09c..8f7bebe9c 100644
--- a/lib/sqlalchemy/dialects/mysql/oursql.py
+++ b/lib/sqlalchemy/dialects/mysql/oursql.py
@@ -108,9 +108,9 @@ class MySQLDialect_oursql(MySQLDialect):
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(query % arg)
- # Because mysql is bad, these methods have to be
+ # Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
- # refuse to return any data if they're run through
+ # refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
@@ -135,7 +135,7 @@ class MySQLDialect_oursql(MySQLDialect):
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
- return MySQLDialect.has_table(self,
+ return MySQLDialect.has_table(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
table_name, schema)
@@ -183,7 +183,7 @@ class MySQLDialect_oursql(MySQLDialect):
def initialize(self, connection):
return MySQLDialect.initialize(
- self,
+ self,
connection.execution_options(_oursql_plain_query=True)
)
@@ -222,7 +222,7 @@ class MySQLDialect_oursql(MySQLDialect):
opts.setdefault('found_rows', True)
ssl = {}
- for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
+ for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py
index 20a16988a..6271286f9 100644
--- a/lib/sqlalchemy/dialects/mysql/pyodbc.py
+++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py
@@ -20,7 +20,7 @@ Connect string::
Limitations
-----------
-The mysql-pyodbc dialect is subject to unresolved character encoding issues
+The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
index 6e2bc2760..6f0569c30 100644
--- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -9,19 +9,19 @@
Driver
------
-The Oracle dialect uses the cx_oracle driver, available at
-http://cx-oracle.sourceforge.net/ . The dialect has several behaviors
+The Oracle dialect uses the cx_oracle driver, available at
+http://cx-oracle.sourceforge.net/ . The dialect has several behaviors
which are specifically tailored towards compatibility with this module.
Version 5.0 or greater is **strongly** recommended, as SQLAlchemy makes
-extensive use of the cx_oracle output converters for numeric and
+extensive use of the cx_oracle output converters for numeric and
string conversions.
Connecting
----------
-Connecting with create_engine() uses the standard URL approach of
-``oracle://user:pass@host:port/dbname[?key=value&key=value...]``. If dbname is present, the
-host, port, and dbname tokens are converted to a TNS name using the cx_oracle
+Connecting with create_engine() uses the standard URL approach of
+``oracle://user:pass@host:port/dbname[?key=value&key=value...]``. If dbname is present, the
+host, port, and dbname tokens are converted to a TNS name using the cx_oracle
:func:`makedsn()` function. Otherwise, the host token is taken directly as a TNS name.
Additional arguments which may be specified either as query string arguments on the
@@ -53,7 +53,7 @@ handler so that all string based result values are returned as unicode as well.
Generally, the ``NLS_LANG`` environment variable determines the nature
of the encoding to be used.
-Note that this behavior is disabled when Oracle 8 is detected, as it has been
+Note that this behavior is disabled when Oracle 8 is detected, as it has been
observed that issues remain when passing Python unicodes to cx_oracle with Oracle 8.
LOB Objects
@@ -71,7 +71,7 @@ To disable this processing, pass ``auto_convert_lobs=False`` to :func:`create_en
Two Phase Transaction Support
-----------------------------
-Two Phase transactions are implemented using XA transactions. Success has been reported
+Two Phase transactions are implemented using XA transactions. Success has been reported
with this feature but it should be regarded as experimental.
Precision Numerics
@@ -95,14 +95,14 @@ If precision numerics aren't required, the decimal handling
can be disabled by passing the flag ``coerce_to_decimal=False``
to :func:`.create_engine`::
- engine = create_engine("oracle+cx_oracle://dsn",
+ engine = create_engine("oracle+cx_oracle://dsn",
coerce_to_decimal=False)
.. versionadded:: 0.7.6
Add the ``coerce_to_decimal`` flag.
-Another alternative to performance is to use the
-`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
+Another alternative to performance is to use the
+`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
see :class:`.Numeric` for additional notes.
The handler attempts to use the "precision" and "scale"
@@ -160,7 +160,7 @@ class _OracleNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
# we apply a cx_oracle type handler to all connections
# that converts floating point strings to Decimal().
- # However, in some subquery situations, Oracle doesn't
+ # However, in some subquery situations, Oracle doesn't
# give us enough information to determine int or Decimal.
# It could even be int/Decimal differently on each row,
# regardless of the scale given for the originating type.
@@ -190,7 +190,7 @@ class _OracleNumeric(sqltypes.Numeric):
else:
return None
else:
- # cx_oracle 4 behavior, will assume
+ # cx_oracle 4 behavior, will assume
# floats
return super(_OracleNumeric, self).\
result_processor(dialect, coltype)
@@ -237,7 +237,7 @@ class _NativeUnicodeMixin(object):
# end Py2K
# we apply a connection output handler that returns
- # unicode in all cases, so the "native_unicode" flag
+ # unicode in all cases, so the "native_unicode" flag
# will be set for the default String.result_processor.
class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
@@ -317,15 +317,15 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
getattr(self.compiled, '_quoted_bind_names', None)
if quoted_bind_names:
if not self.dialect.supports_unicode_statements:
- # if DBAPI doesn't accept unicode statements,
+ # if DBAPI doesn't accept unicode statements,
# keys in self.parameters would have been encoded
# here. so convert names in quoted_bind_names
# to encoded as well.
quoted_bind_names = \
dict(
- (fromname.encode(self.dialect.encoding),
- toname.encode(self.dialect.encoding))
- for fromname, toname in
+ (fromname.encode(self.dialect.encoding),
+ toname.encode(self.dialect.encoding))
+ for fromname, toname in
quoted_bind_names.items()
)
for param in self.parameters:
@@ -334,10 +334,10 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
del param[fromname]
if self.dialect.auto_setinputsizes:
- # cx_oracle really has issues when you setinputsizes
+ # cx_oracle really has issues when you setinputsizes
# on String, including that outparams/RETURNING
# breaks for varchars
- self.set_input_sizes(quoted_bind_names,
+ self.set_input_sizes(quoted_bind_names,
exclude_types=self.dialect._cx_oracle_string_types
)
@@ -370,7 +370,7 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
def get_result_proxy(self):
if hasattr(self, 'out_parameters') and self.compiled.returning:
returning_params = dict(
- (k, v.getvalue())
+ (k, v.getvalue())
for k, v in self.out_parameters.items()
)
return ReturningResultProxy(self, returning_params)
@@ -396,7 +396,7 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
impl_type = type.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi)
result_processor = impl_type.\
- result_processor(self.dialect,
+ result_processor(self.dialect,
dbapi_type)
if result_processor is not None:
out_parameters[name] = \
@@ -405,7 +405,7 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
out_parameters[name] = self.out_parameters[name].getvalue()
else:
result.out_parameters = dict(
- (k, v.getvalue())
+ (k, v.getvalue())
for k, v in self.out_parameters.items()
)
@@ -414,13 +414,13 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle):
"""Support WITH_UNICODE in Python 2.xx.
- WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
- behavior under Python 2.x. This mode in some cases disallows
- and in other cases silently passes corrupted data when
- non-Python-unicode strings (a.k.a. plain old Python strings)
- are passed as arguments to connect(), the statement sent to execute(),
+ WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
+ behavior under Python 2.x. This mode in some cases disallows
+ and in other cases silently passes corrupted data when
+ non-Python-unicode strings (a.k.a. plain old Python strings)
+ are passed as arguments to connect(), the statement sent to execute(),
or any of the bind parameter keys or values sent to execute().
- This optional context therefore ensures that all statements are
+ This optional context therefore ensures that all statements are
passed as Python unicode objects.
"""
@@ -451,7 +451,7 @@ class ReturningResultProxy(base.FullyBufferedResultProxy):
return ret
def _buffer_rows(self):
- return collections.deque([tuple(self._returning_params["ret_%d" % i]
+ return collections.deque([tuple(self._returning_params["ret_%d" % i]
for i, c in enumerate(self._returning_params))])
class OracleDialect_cx_oracle(OracleDialect):
@@ -483,11 +483,11 @@ class OracleDialect_cx_oracle(OracleDialect):
execute_sequence_format = list
- def __init__(self,
- auto_setinputsizes=True,
- auto_convert_lobs=True,
- threaded=True,
- allow_twophase=True,
+ def __init__(self,
+ auto_setinputsizes=True,
+ auto_convert_lobs=True,
+ threaded=True,
+ allow_twophase=True,
coerce_to_decimal=True,
arraysize=50, **kwargs):
OracleDialect.__init__(self, **kwargs)
@@ -510,11 +510,11 @@ class OracleDialect_cx_oracle(OracleDialect):
self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB")
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
- self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
+ self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
self.supports_native_decimal = (
- self.cx_oracle_ver >= (5, 0) and
+ self.cx_oracle_ver >= (5, 0) and
coerce_to_decimal
)
@@ -572,12 +572,12 @@ class OracleDialect_cx_oracle(OracleDialect):
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
- """detect if the decimal separator character is not '.', as
+ """detect if the decimal separator character is not '.', as
is the case with european locale settings for NLS_LANG.
cx_oracle itself uses similar logic when it formats Python
- Decimal objects to strings on the bind side (as of 5.0.3),
- as Oracle sends/receives string numerics only in the
+ Decimal objects to strings on the bind side (as of 5.0.3),
+ as Oracle sends/receives string numerics only in the
current locale.
"""
@@ -588,14 +588,14 @@ class OracleDialect_cx_oracle(OracleDialect):
cx_Oracle = self.dbapi
conn = connection.connection
- # override the output_type_handler that's
- # on the cx_oracle connection with a plain
+ # override the output_type_handler that's
+ # on the cx_oracle connection with a plain
# one on the cursor
- def output_type_handler(cursor, name, defaultType,
+ def output_type_handler(cursor, name, defaultType,
size, precision, scale):
return cursor.var(
- cx_Oracle.STRING,
+ cx_Oracle.STRING,
255, arraysize=cursor.arraysize)
cursor = conn.cursor()
@@ -625,7 +625,7 @@ class OracleDialect_cx_oracle(OracleDialect):
return
cx_Oracle = self.dbapi
- def output_type_handler(cursor, name, defaultType,
+ def output_type_handler(cursor, name, defaultType,
size, precision, scale):
# convert all NUMBER with precision + positive scale to Decimal
# this almost allows "native decimal" mode.
@@ -633,22 +633,22 @@ class OracleDialect_cx_oracle(OracleDialect):
defaultType == cx_Oracle.NUMBER and \
precision and scale > 0:
return cursor.var(
- cx_Oracle.STRING,
- 255,
- outconverter=self._to_decimal,
+ cx_Oracle.STRING,
+ 255,
+ outconverter=self._to_decimal,
arraysize=cursor.arraysize)
# if NUMBER with zero precision and 0 or neg scale, this appears
- # to indicate "ambiguous". Use a slower converter that will
- # make a decision based on each value received - the type
+ # to indicate "ambiguous". Use a slower converter that will
+ # make a decision based on each value received - the type
# may change from row to row (!). This kills
# off "native decimal" mode, handlers still needed.
elif self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER \
and not precision and scale <= 0:
return cursor.var(
- cx_Oracle.STRING,
- 255,
- outconverter=self._detect_decimal,
+ cx_Oracle.STRING,
+ 255,
+ outconverter=self._detect_decimal,
arraysize=cursor.arraysize)
# allow all strings to come back natively as Unicode
elif defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
@@ -712,7 +712,7 @@ class OracleDialect_cx_oracle(OracleDialect):
def _get_server_version_info(self, connection):
return tuple(
- int(x)
+ int(x)
for x in connection.connection.version.split('.')
)
diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py
index bc0c31275..339634020 100644
--- a/lib/sqlalchemy/dialects/postgresql/__init__.py
+++ b/lib/sqlalchemy/dialects/postgresql/__init__.py
@@ -14,7 +14,7 @@ from sqlalchemy.dialects.postgresql.base import \
DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect
__all__ = (
-'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', 'FLOAT', 'REAL', 'INET',
+'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', 'FLOAT', 'REAL', 'INET',
'CIDR', 'UUID', 'BIT', 'MACADDR', 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME',
'DATE', 'BYTEA', 'BOOLEAN', 'INTERVAL', 'ARRAY', 'ENUM', 'dialect'
)
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index b5cb3b782..717d6b49a 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -12,7 +12,7 @@ section regarding that driver.
Date and Time Types
-------------------
-SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
+SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
out of the box functionality for translating values between Python `datetime` objects
and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`
and related types provide date formatting and parsing functionality when SQlite is used.
@@ -36,19 +36,19 @@ Two things to note:
This is regardless of the AUTOINCREMENT keyword being present or not.
To specifically render the AUTOINCREMENT keyword on the primary key
-column when rendering DDL, add the flag ``sqlite_autoincrement=True``
+column when rendering DDL, add the flag ``sqlite_autoincrement=True``
to the Table construct::
Table('sometable', metadata,
- Column('id', Integer, primary_key=True),
+ Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Transaction Isolation Level
---------------------------
-:func:`.create_engine` accepts an ``isolation_level`` parameter which results in
-the command ``PRAGMA read_uncommitted <level>`` being invoked for every new
-connection. Valid values for this parameter are ``SERIALIZABLE`` and
+:func:`.create_engine` accepts an ``isolation_level`` parameter which results in
+the command ``PRAGMA read_uncommitted <level>`` being invoked for every new
+connection. Valid values for this parameter are ``SERIALIZABLE`` and
``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively.
See the section :ref:`pysqlite_serializable` for an important workaround
when using serializable isolation with Pysqlite.
@@ -57,31 +57,31 @@ Database Locking Behavior / Concurrency
---------------------------------------
Note that SQLite is not designed for a high level of concurrency. The database
-itself, being a file, is locked completely during write operations and within
+itself, being a file, is locked completely during write operations and within
transactions, meaning exactly one connection has exclusive access to the database
during this period - all other connections will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is always
in a transaction; there is no BEGIN method, only commit and rollback. This implies
-that a SQLite DBAPI driver would technically allow only serialized access to a
+that a SQLite DBAPI driver would technically allow only serialized access to a
particular database file at all times. The pysqlite driver attempts to ameliorate this by
deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or
DELETE) is received within a transaction. While this breaks serializable isolation,
it at least delays the exclusive locking inherent in SQLite's design.
-SQLAlchemy's default mode of usage with the ORM is known
-as "autocommit=False", which means the moment the :class:`.Session` begins to be
+SQLAlchemy's default mode of usage with the ORM is known
+as "autocommit=False", which means the moment the :class:`.Session` begins to be
used, a transaction is begun. As the :class:`.Session` is used, the autoflush
-feature, also on by default, will flush out pending changes to the database
+feature, also on by default, will flush out pending changes to the database
before each query. The effect of this is that a :class:`.Session` used in its
default mode will often emit DML early on, long before the transaction is actually
-committed. This again will have the effect of serializing access to the SQLite
+committed. This again will have the effect of serializing access to the SQLite
database. If highly concurrent reads are desired against the SQLite database,
it is advised that the autoflush feature be disabled, and potentially even
that autocommit be re-enabled, which has the effect of each SQL statement and
flush committing changes immediately.
-For more information on SQLite's lack of concurrency by design, please
+For more information on SQLite's lack of concurrency by design, please
see `Situations Where Another RDBMS May Work Better - High Concurrency <http://www.sqlite.org/whentouse.html>`_
near the bottom of the page.
@@ -112,30 +112,30 @@ class _DateTimeMixin(object):
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
-
+
The default string storage format is::
-
+
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d"
-
+
e.g.::
-
+
2011-03-15 12:05:57.10558
-
- The storage format can be customized to some degree using the
+
+ The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
-
+
import re
from sqlalchemy.dialects.sqlite import DATETIME
-
+
dt = DATETIME(
storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d",
regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)")
)
-
- :param storage_format: format string which will be applied to the
+
+ :param storage_format: format string which will be applied to the
dict with keys year, month, day, hour, minute, second, and microsecond.
-
- :param regexp: regular expression which will be applied to
+
+ :param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python datetime() constructor
as keyword arguments. Otherwise, if positional groups are used, the
@@ -204,16 +204,16 @@ class DATE(_DateTimeMixin, sqltypes.Date):
"""Represent a Python date object in SQLite using a string.
The default string storage format is::
-
+
"%(year)04d-%(month)02d-%(day)02d"
-
+
e.g.::
-
+
2011-03-15
-
- The storage format can be customized to some degree using the
+
+ The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
-
+
import re
from sqlalchemy.dialects.sqlite import DATE
@@ -221,11 +221,11 @@ class DATE(_DateTimeMixin, sqltypes.Date):
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
-
- :param storage_format: format string which will be applied to the
+
+ :param storage_format: format string which will be applied to the
dict with keys year, month, and day.
-
- :param regexp: regular expression which will be applied to
+
+ :param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
@@ -261,18 +261,18 @@ class DATE(_DateTimeMixin, sqltypes.Date):
class TIME(_DateTimeMixin, sqltypes.Time):
"""Represent a Python time object in SQLite using a string.
-
+
The default string storage format is::
-
+
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
-
+
e.g.::
-
+
12:05:57.10558
-
- The storage format can be customized to some degree using the
+
+ The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
-
+
import re
from sqlalchemy.dialects.sqlite import TIME
@@ -280,11 +280,11 @@ class TIME(_DateTimeMixin, sqltypes.Time):
storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
-
- :param storage_format: format string which will be applied to the
+
+ :param storage_format: format string which will be applied to the
dict with keys hour, minute, second, and microsecond.
-
- :param regexp: regular expression which will be applied to
+
+ :param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python time() constructor
as keyword arguments. Otherwise, if positional groups are used, the
@@ -447,7 +447,7 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
issubclass(c.type._type_affinity, sqltypes.Integer) and \
not c.foreign_keys:
return None
-
+
return super(SQLiteDDLCompiler, self).\
visit_primary_key_constraint(constraint)
@@ -522,7 +522,7 @@ class SQLiteExecutionContext(default.DefaultExecutionContext):
def _translate_colname(self, colname):
# adjust for dotted column names. SQLite
- # in the case of UNION may store col names as
+ # in the case of UNION may store col names as
# "tablename.colname"
# in cursor.description
if not self._preserve_raw_colnames and "." in colname:
@@ -559,7 +559,7 @@ class SQLiteDialect(default.DefaultDialect):
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
- # conversions (and perhaps datetime/time as well on some
+ # conversions (and perhaps datetime/time as well on some
# hypothetical driver ?)
self.native_datetime = native_datetime
@@ -579,9 +579,9 @@ class SQLiteDialect(default.DefaultDialect):
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
+ "Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
- )
+ )
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
@@ -592,11 +592,11 @@ class SQLiteDialect(default.DefaultDialect):
res = cursor.fetchone()
if res:
value = res[0]
- else:
+ else:
# http://www.sqlite.org/changes.html#version_3_3_3
- # "Optional READ UNCOMMITTED isolation (instead of the
- # default isolation level of SERIALIZABLE) and
- # table level locking when database connections
+ # "Optional READ UNCOMMITTED isolation (instead of the
+ # default isolation level of SERIALIZABLE) and
+ # table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
@@ -712,7 +712,7 @@ class SQLiteDialect(default.DefaultDialect):
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(
- connection.execute("%stable_info(%s)" %
+ connection.execute("%stable_info(%s)" %
(pragma, qtable)))
found_table = False
columns = []
@@ -721,7 +721,7 @@ class SQLiteDialect(default.DefaultDialect):
if row is None:
break
(name, type_, nullable, default, has_default, primary_key) = \
- (row[1], row[2].upper(), not row[3],
+ (row[1], row[2].upper(), not row[3],
row[4], row[4] is not None, row[5])
name = re.sub(r'^\"|\"$', '', name)
match = re.match(r'(\w+)(\(.*?\))?', type_)
@@ -838,7 +838,7 @@ class SQLiteDialect(default.DefaultDialect):
def _pragma_cursor(cursor):
- """work around SQLite issue whereby cursor.description
+ """work around SQLite issue whereby cursor.description
is blank when PRAGMA returns no rows."""
if cursor.closed:
diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
index 71f91aa36..c18fd302f 100644
--- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py
+++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
@@ -12,15 +12,15 @@ module included with the Python distribution.
Driver
------
-When using Python 2.5 and above, the built in ``sqlite3`` driver is
+When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
-precedence over the built in one. As with all dialects, a specific
-DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
+precedence over the built in one. As with all dialects, a specific
+DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
@@ -64,25 +64,25 @@ The sqlite ``:memory:`` identifier is the default if no filepath is present. Sp
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
-The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
+The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
-to a Python date or datetime object. The date and datetime types provided
-with the pysqlite dialect are not currently compatible with these options,
-since they render the ISO date/datetime including microseconds, which
+to a Python date or datetime object. The date and datetime types provided
+with the pysqlite dialect are not currently compatible with these options,
+since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
-this time automatically render the "cast" syntax required for the
+this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
-datetime/date types natively. Unfortunately, pysqlite
+datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
-leaving SQLAlchemy with no way to detect these types on the fly
+leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
-nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
+nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
- engine = create_engine('sqlite://',
+ engine = create_engine('sqlite://',
connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
@@ -99,7 +99,7 @@ Threading/Pooling Behavior
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is controlled by the ``check_same_thread``
Pysqlite flag. This default is intended to work with older versions
-of SQLite that did not support multithreaded operation under
+of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
@@ -109,9 +109,9 @@ SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default will use
:class:`.SingletonThreadPool`. This pool maintains a single connection per
thread, so that all access to the engine within the current thread use the
- same ``:memory:`` database - other threads would access a different
+ same ``:memory:`` database - other threads would access a different
``:memory:`` database.
-* When a file-based database is specified, the dialect will use :class:`.NullPool`
+* When a file-based database is specified, the dialect will use :class:`.NullPool`
as the source of connections. This pool closes and discards connections
which are returned to the pool immediately. SQLite file-based connections
have extremely low overhead, so pooling is not necessary. The scheme also
@@ -141,7 +141,7 @@ can be passed to Pysqlite as ``False``::
connect_args={'check_same_thread':False},
poolclass=StaticPool)
-Note that using a ``:memory:`` database in multiple threads requires a recent
+Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
@@ -175,8 +175,8 @@ Unicode
The pysqlite driver only returns Python ``unicode`` objects in result sets, never
plain strings, and accommodates ``unicode`` objects within bound parameter
-values in all cases. Regardless of the SQLAlchemy string type in use,
-string-based result values will by Python ``unicode`` in Python 2.
+values in all cases. Regardless of the SQLAlchemy string type in use,
+string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
@@ -191,7 +191,7 @@ The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
-and has the advantage that the SQLite database file is not prematurely
+and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.
diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py
index 35d8d1542..70bdd71a2 100644
--- a/lib/sqlalchemy/dialects/sybase/pyodbc.py
+++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py
@@ -17,7 +17,7 @@ Connect strings are of the form::
Unicode Support
---------------
-The pyodbc driver currently supports usage of these Sybase types with
+The pyodbc driver currently supports usage of these Sybase types with
Unicode or multibyte strings::
CHAR
@@ -43,7 +43,7 @@ from sqlalchemy.util.compat import decimal
class _SybNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < -6 into floats.
- It's not yet known how to get decimals with many
+ It's not yet known how to get decimals with many
significant digits or very large adjusted() into Sybase
via pyodbc.
diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py
index e3bfae06c..bf8c2096b 100644
--- a/lib/sqlalchemy/dialects/sybase/pysybase.py
+++ b/lib/sqlalchemy/dialects/sybase/pysybase.py
@@ -38,7 +38,7 @@ class SybaseExecutionContext_pysybase(SybaseExecutionContext):
def set_ddl_autocommit(self, dbapi_connection, value):
if value:
# call commit() on the Sybase connection directly,
- # to avoid any side effects of calling a Connection
+ # to avoid any side effects of calling a Connection
# transactional method inside of pre_exec()
dbapi_connection.commit()
@@ -83,7 +83,7 @@ class SybaseDialect_pysybase(SybaseDialect):
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version_number")
- # i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
+ # i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
# (12, 5, 0, 0)
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py
index 0f8c09850..500dd0dba 100644
--- a/lib/sqlalchemy/engine/__init__.py
+++ b/lib/sqlalchemy/engine/__init__.py
@@ -101,8 +101,8 @@ default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
- The standard calling form is to send the URL as the
- first positional argument, usually a string
+ The standard calling form is to send the URL as the
+ first positional argument, usually a string
that indicates database dialect and connection arguments.
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
@@ -111,14 +111,14 @@ def create_engine(*args, **kwargs):
The string form of the URL is
``dialect+driver://user:password@host/dbname[?key=value..]``, where
- ``dialect`` is a database name such as ``mysql``, ``oracle``,
- ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
- ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
+ ``dialect`` is a database name such as ``mysql``, ``oracle``,
+ ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
+ ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
- ``**kwargs`` takes a wide variety of options which are routed
- towards their appropriate components. Arguments may be
- specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
+ ``**kwargs`` takes a wide variety of options which are routed
+ towards their appropriate components. Arguments may be
+ specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
@@ -136,11 +136,11 @@ def create_engine(*args, **kwargs):
:ref:`engines_toplevel`
:ref:`connections_toplevel`
-
+
:param assert_unicode: Deprecated. This flag
sets an engine-wide default value for
- the ``assert_unicode`` flag on the
- :class:`.String` type - see that
+ the ``assert_unicode`` flag on the
+ :class:`.String` type - see that
type for further details.
:param case_sensitive=True: if False, result column names
@@ -159,16 +159,16 @@ def create_engine(*args, **kwargs):
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
- of a setting of ``False`` on an individual
+ of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
- is useful as an engine-wide setting when using a
+ is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
-
- See :class:`.String` for further details on
+
+ See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
@@ -192,43 +192,43 @@ def create_engine(*args, **kwargs):
:ref:`dbengine_logging` for information on how to configure logging
directly.
- :param encoding: Defaults to ``utf-8``. This is the string
- encoding used by SQLAlchemy for string encode/decode
- operations which occur within SQLAlchemy, **outside of
- the DBAPI.** Most modern DBAPIs feature some degree of
+ :param encoding: Defaults to ``utf-8``. This is the string
+ encoding used by SQLAlchemy for string encode/decode
+ operations which occur within SQLAlchemy, **outside of
+ the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
- ``u'some string'``. For those scenarios where the
+ ``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
- object, this encoding is used to determine the
+ object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
-
+
To properly configure a system to accommodate Python
- ``unicode`` objects, the DBAPI should be
+ ``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
- target database in use at :ref:`dialect_toplevel`.
-
- Areas where string encoding may need to be accommodated
- outside of the DBAPI include zero or more of:
-
- * the values passed to bound parameters, corresponding to
+ target database in use at :ref:`dialect_toplevel`.
+
+ Areas where string encoding may need to be accommodated
+ outside of the DBAPI include zero or more of:
+
+ * the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
- * the values returned in result set columns corresponding
- to the :class:`.Unicode` type or the :class:`.String`
+ * the values returned in result set columns corresponding
+ to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
- * the string SQL statement passed to the DBAPI's
- ``cursor.execute()`` method;
- * the string names of the keys in the bound parameter
- dictionary passed to the DBAPI's ``cursor.execute()``
+ * the string SQL statement passed to the DBAPI's
+ ``cursor.execute()`` method;
+ * the string names of the keys in the bound parameter
+ dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
- * the string column names retrieved from the DBAPI's
+ * the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
-
+
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
@@ -244,9 +244,9 @@ def create_engine(*args, **kwargs):
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
- INSERT statement is emitted with no existing returning()
- clause. This applies to those backends which support RETURNING
- or a compatible construct, including Postgresql, Firebird, Oracle,
+ INSERT statement is emitted with no existing returning()
+ clause. This applies to those backends which support RETURNING
+ or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
@@ -256,13 +256,13 @@ def create_engine(*args, **kwargs):
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
- :param listeners: A list of one or more
- :class:`~sqlalchemy.interfaces.PoolListener` objects which will
+ :param listeners: A list of one or more
+ :class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
- "sqlalchemy.engine" logger. Defaults to a hexstring of the
+ "sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
@@ -294,8 +294,8 @@ def create_engine(*args, **kwargs):
of pool to be used.
:param pool_logging_name: String identifier which will be used within
- the "name" field of logging records generated within the
- "sqlalchemy.pool" logger. Defaults to a hexstring of the object's
+ the "name" field of logging records generated within the
+ "sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
@@ -315,7 +315,7 @@ def create_engine(*args, **kwargs):
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
- behavior of the pool, which is whether ``rollback()``,
+ behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
diff --git a/lib/sqlalchemy/engine/ddl.py b/lib/sqlalchemy/engine/ddl.py
index d6fdaee2e..c3b32505e 100644
--- a/lib/sqlalchemy/engine/ddl.py
+++ b/lib/sqlalchemy/engine/ddl.py
@@ -28,7 +28,7 @@ class SchemaGenerator(DDLBase):
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or \
- not self.dialect.has_table(self.connection,
+ not self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_create_sequence(self, sequence):
@@ -39,8 +39,8 @@ class SchemaGenerator(DDLBase):
(
not self.checkfirst or
not self.dialect.has_sequence(
- self.connection,
- sequence.name,
+ self.connection,
+ sequence.name,
schema=sequence.schema)
)
)
@@ -50,9 +50,9 @@ class SchemaGenerator(DDLBase):
tables = self.tables
else:
tables = metadata.tables.values()
- collection = [t for t in sql_util.sort_tables(tables)
+ collection = [t for t in sql_util.sort_tables(tables)
if self._can_create_table(t)]
- seq_coll = [s for s in metadata._sequences.values()
+ seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)]
metadata.dispatch.before_create(metadata, self.connection,
@@ -95,7 +95,7 @@ class SchemaGenerator(DDLBase):
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
- return
+ return
self.connection.execute(schema.CreateSequence(sequence))
def visit_index(self, index):
@@ -116,9 +116,9 @@ class SchemaDropper(DDLBase):
tables = self.tables
else:
tables = metadata.tables.values()
- collection = [t for t in reversed(sql_util.sort_tables(tables))
+ collection = [t for t in reversed(sql_util.sort_tables(tables))
if self._can_drop_table(t)]
- seq_coll = [s for s in metadata._sequences.values()
+ seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)]
metadata.dispatch.before_drop(metadata, self.connection,
@@ -141,7 +141,7 @@ class SchemaDropper(DDLBase):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
- return not self.checkfirst or self.dialect.has_table(self.connection,
+ return not self.checkfirst or self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_drop_sequence(self, sequence):
@@ -150,8 +150,8 @@ class SchemaDropper(DDLBase):
not sequence.optional) and
(not self.checkfirst or
self.dialect.has_sequence(
- self.connection,
- sequence.name,
+ self.connection,
+ sequence.name,
schema=sequence.schema))
)
diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py
index a781cb451..1e321603e 100644
--- a/lib/sqlalchemy/engine/strategies.py
+++ b/lib/sqlalchemy/engine/strategies.py
@@ -80,7 +80,7 @@ class DefaultEngineStrategy(EngineStrategy):
return dialect.connect(*cargs, **cparams)
except Exception, e:
# Py3K
- #raise exc.DBAPIError.instance(None, None,
+ #raise exc.DBAPIError.instance(None, None,
# e, dialect.dbapi.Error,
# connection_invalidated=
# dialect.is_disconnect(e, None, None)
@@ -245,8 +245,8 @@ class MockEngineStrategy(EngineStrategy):
from sqlalchemy.engine import ddl
ddl.SchemaDropper(self.dialect, self, **kwargs).traverse_single(entity)
- def _run_visitor(self, visitorcallable, element,
- connection=None,
+ def _run_visitor(self, visitorcallable, element,
+ connection=None,
**kwargs):
kwargs['checkfirst'] = False
visitorcallable(self.dialect, self,
diff --git a/lib/sqlalchemy/engine/threadlocal.py b/lib/sqlalchemy/engine/threadlocal.py
index f0d6803dc..7def7dd9b 100644
--- a/lib/sqlalchemy/engine/threadlocal.py
+++ b/lib/sqlalchemy/engine/threadlocal.py
@@ -7,7 +7,7 @@
"""Provides a thread-local transactional wrapper around the root Engine class.
The ``threadlocal`` module is invoked when using the ``strategy="threadlocal"`` flag
-with :func:`~sqlalchemy.engine.create_engine`. This module is semi-private and is
+with :func:`~sqlalchemy.engine.create_engine`. This module is semi-private and is
invoked automatically when the threadlocal engine strategy is used.
"""
diff --git a/lib/sqlalchemy/event.py b/lib/sqlalchemy/event.py
index 03018f575..8e9064cfc 100644
--- a/lib/sqlalchemy/event.py
+++ b/lib/sqlalchemy/event.py
@@ -25,8 +25,8 @@ def listen(target, identifier, fn, *args, **kw):
list(const.columns)[0].name
)
event.listen(
- UniqueConstraint,
- "after_parent_attach",
+ UniqueConstraint,
+ "after_parent_attach",
unique_constraint_name)
"""
@@ -90,12 +90,12 @@ class _UnpickleDispatch(object):
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
- """Mirror the event listening definitions of an Events class with
+ """Mirror the event listening definitions of an Events class with
listener collections.
- Classes which define a "dispatch" member will return a
- non-instantiated :class:`._Dispatch` subclass when the member
- is accessed at the class level. When the "dispatch" member is
+ Classes which define a "dispatch" member will return a
+ non-instantiated :class:`._Dispatch` subclass when the member
+ is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
@@ -103,7 +103,7 @@ class _Dispatch(object):
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
- the implementation used by the event internals, and allows
+ the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
@@ -127,7 +127,7 @@ def _event_descriptors(target):
return [getattr(target, k) for k in dir(target) if _is_event_name(k)]
class _EventMeta(type):
- """Intercept new Event subclasses and create
+ """Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
@@ -135,14 +135,14 @@ class _EventMeta(type):
return type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
- """Create a :class:`._Dispatch` class corresponding to an
+ """Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
dispatch_base = getattr(cls, 'dispatch', _Dispatch)
- cls.dispatch = dispatch_cls = type("%sDispatch" % classname,
+ cls.dispatch = dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {})
dispatch_cls._listen = cls._listen
dispatch_cls._clear = cls._clear
@@ -236,8 +236,8 @@ class _DispatchDescriptor(object):
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend([
- fn for fn
- in self._clslevel[cls]
+ fn for fn
+ in self._clslevel[cls]
if fn not in clslevel
])
@@ -278,7 +278,7 @@ class _DispatchDescriptor(object):
class _EmptyListener(object):
"""Serves as a class-level interface to the events
- served by a _DispatchDescriptor, when there are no
+ served by a _DispatchDescriptor, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
@@ -298,7 +298,7 @@ class _EmptyListener(object):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
- a dispatcher, this generates a new
+ a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
@@ -379,7 +379,7 @@ class _ListenerCollection(object):
# I'm not entirely thrilled about the overhead here,
# but this allows class-level listeners to be added
# at any point.
- #
+ #
# In the absense of instance-level listeners,
# we stay with the _EmptyListener object when called
# at the instance level.
@@ -403,8 +403,8 @@ class _ListenerCollection(object):
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
- existing_listeners.extend([l for l
- in other.listeners
+ existing_listeners.extend([l for l
+ in other.listeners
if l not in existing_listener_set
and not only_propagate or l in self.propagate
])
@@ -431,7 +431,7 @@ class _ListenerCollection(object):
self.propagate.clear()
class dispatcher(object):
- """Descriptor used by target classes to
+ """Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py
index d82cae634..5370c6431 100644
--- a/lib/sqlalchemy/log.py
+++ b/lib/sqlalchemy/log.py
@@ -12,7 +12,7 @@ module. The regular dotted module namespace is used, starting at
'sqlalchemy'. For class-level logging, the class name is appended.
The "echo" keyword parameter, available on SQLA :class:`.Engine`
-and :class:`.Pool` objects, corresponds to a logger specific to that
+and :class:`.Pool` objects, corresponds to a logger specific to that
instance only.
"""
@@ -60,7 +60,7 @@ class InstanceLogger(object):
"""A logger adapter (wrapper) for :class:`.Identified` subclasses.
This allows multiple instances (e.g. Engine or Pool instances)
- to share a logger, but have its verbosity controlled on a
+ to share a logger, but have its verbosity controlled on a
per-instance basis.
The basic functionality is to return a logging level
@@ -185,7 +185,7 @@ def instance_logger(instance, echoflag=None):
logger = logging.getLogger(name)
else:
# if a specified echo flag, return an EchoLogger,
- # which checks the flag, overrides normal log
+ # which checks the flag, overrides normal log
# levels by calling logger._log()
logger = InstanceLogger(echoflag, name)
diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py
index c2f4aff02..d2cb0ab05 100644
--- a/lib/sqlalchemy/orm/dynamic.py
+++ b/lib/sqlalchemy/orm/dynamic.py
@@ -14,7 +14,7 @@ basic add/delete mutation.
from .. import log, util
from ..sql import operators
from . import (
- attributes, object_session, util as orm_util, strategies,
+ attributes, object_session, util as orm_util, strategies,
object_mapper, exc as orm_exc, collections
)
from .query import Query
@@ -60,7 +60,7 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
else:
return self.query_class(self, state)
- def get_collection(self, state, dict_, user_data=None,
+ def get_collection(self, state, dict_, user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE):
if not passive & attributes.SQL_OK:
return self._get_collection_history(state,
@@ -94,7 +94,7 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
- state._modified_event(dict_,
+ state._modified_event(dict_,
self,
attributes.NEVER_SET)
@@ -104,7 +104,7 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
return state.committed_state[self.key]
def set(self, state, dict_, value, initiator,
- passive=attributes.PASSIVE_OFF,
+ passive=attributes.PASSIVE_OFF,
check_old=None, pop=False):
if initiator and initiator.parent_token is self.parent_token:
return
@@ -141,8 +141,8 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
def get_all_pending(self, state, dict_):
c = self._get_collection_history(state, attributes.PASSIVE_NO_INITIALIZE)
return [
- (attributes.instance_state(x), x)
- for x in
+ (attributes.instance_state(x), x)
+ for x in
c.added_items + c.unchanged_items + c.deleted_items
]
@@ -159,12 +159,12 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
else:
return c
- def append(self, state, dict_, value, initiator,
+ def append(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
- def remove(self, state, dict_, value, initiator,
+ def remove(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
@@ -203,9 +203,9 @@ class AppenderMixin(object):
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
self._criterion = prop.compare(
- operators.eq,
- instance,
- value_is_parent=True,
+ operators.eq,
+ instance,
+ value_is_parent=True,
alias_secondary=False)
if self.attr.order_by:
@@ -279,12 +279,12 @@ class AppenderMixin(object):
def append(self, item):
self.attr.append(
- attributes.instance_state(self.instance),
+ attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def remove(self, item):
self.attr.remove(
- attributes.instance_state(self.instance),
+ attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py
index 5de514da8..0bc635db6 100644
--- a/lib/sqlalchemy/orm/evaluator.py
+++ b/lib/sqlalchemy/orm/evaluator.py
@@ -11,10 +11,10 @@ class UnevaluatableError(Exception):
pass
_straight_ops = set(getattr(operators, op)
- for op in ('add', 'mul', 'sub',
+ for op in ('add', 'mul', 'sub',
# Py2K
'div',
- # end Py2K
+ # end Py2K
'mod', 'truediv',
'lt', 'le', 'ne', 'gt', 'ge', 'eq'))
@@ -71,13 +71,13 @@ class EvaluatorCompiler(object):
return True
else:
raise UnevaluatableError(
- "Cannot evaluate clauselist with operator %s" %
+ "Cannot evaluate clauselist with operator %s" %
clause.operator)
return evaluate
def visit_binary(self, clause):
- eval_left,eval_right = map(self.process,
+ eval_left,eval_right = map(self.process,
[clause.left, clause.right])
operator = clause.operator
if operator is operators.is_:
@@ -95,7 +95,7 @@ class EvaluatorCompiler(object):
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
- "Cannot evaluate %s with operator %s" %
+ "Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
return evaluate
@@ -109,7 +109,7 @@ class EvaluatorCompiler(object):
return not value
return evaluate
raise UnevaluatableError(
- "Cannot evaluate %s with operator %s" %
+ "Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
def visit_bindparam(self, clause):
diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py
index d42dd42a7..783434504 100644
--- a/lib/sqlalchemy/orm/exc.py
+++ b/lib/sqlalchemy/orm/exc.py
@@ -18,13 +18,13 @@ class StaleDataError(sa_exc.SQLAlchemyError):
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
- and an unexpected number of rows were matched during
- the UPDATE or DELETE statement. Note that when
+ and an unexpected number of rows were matched during
+ the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
- * A mapped object with version_id_col was refreshed,
+ * A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
@@ -52,7 +52,7 @@ class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage collected."""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
- """An attempt to access unloaded attributes on a
+ """An attempt to access unloaded attributes on a
mapped instance that is detached."""
class UnmappedInstanceError(UnmappedError):
@@ -91,21 +91,21 @@ class UnmappedClassError(UnmappedError):
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
-
- A refresh operation proceeds when an expired attribute is
+
+ A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
-
- The true meaning of this exception is simply that
+
+ The true meaning of this exception is simply that
no row exists for the primary key identifier associated
- with a persistent object. The row may have been
+ with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
- object.
-
+ object.
+
"""
def __init__(self, state, msg=None):
if not msg:
diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py
index 4ba54b2f0..6fd184350 100644
--- a/lib/sqlalchemy/orm/identity.py
+++ b/lib/sqlalchemy/orm/identity.py
@@ -117,7 +117,7 @@ class WeakInstanceDict(IdentityMap):
o = existing_state._is_really_none()
if o is not None:
raise AssertionError("A conflicting state is already "
- "present in the identity map for key %r"
+ "present in the identity map for key %r"
% (key, ))
else:
return
diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py
index 940ae1db9..649ab7b87 100644
--- a/lib/sqlalchemy/orm/scoping.py
+++ b/lib/sqlalchemy/orm/scoping.py
@@ -15,10 +15,10 @@ __all__ = ['ScopedSession']
class ScopedSession(object):
"""Provides thread-local management of Sessions.
-
+
Typical invocation is via the :func:`.scoped_session`
function::
-
+
Session = scoped_session(sessionmaker())
The internal registry is accessible,
@@ -70,7 +70,7 @@ class ScopedSession(object):
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
- """return a class property which produces a `Query` object
+ """return a class property which produces a `Query` object
against the class when called.
e.g.::
@@ -121,7 +121,7 @@ def makeprop(name):
def get(self):
return getattr(self.registry(), name)
return property(get, set)
-for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
+for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush'):
setattr(ScopedSession, prop, makeprop(prop))
diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py
index 6f1e6c166..2b2f6d092 100644
--- a/lib/sqlalchemy/orm/sync.py
+++ b/lib/sqlalchemy/orm/sync.py
@@ -4,14 +4,14 @@
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""private module containing functions used for copying data
+"""private module containing functions used for copying data
between instances based on join conditions.
"""
from . import exc, util as orm_util, attributes
-def populate(source, source_mapper, dest, dest_mapper,
+def populate(source, source_mapper, dest, dest_mapper,
synchronize_pairs, uowcommit, flag_cascaded_pks):
source_dict = source.dict
dest_dict = dest.dict
@@ -20,7 +20,7 @@ def populate(source, source_mapper, dest, dest_mapper,
try:
# inline of source_mapper._get_state_attr_by_column
prop = source_mapper._columntoproperty[l]
- value = source.manager[prop.key].impl.get(source, source_dict,
+ value = source.manager[prop.key].impl.get(source, source_dict,
attributes.PASSIVE_OFF)
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, dest_mapper, r)
@@ -47,7 +47,7 @@ def clear(dest, dest_mapper, synchronize_pairs):
if r.primary_key:
raise AssertionError(
"Dependency rule tried to blank-out primary key "
- "column '%s' on instance '%s'" %
+ "column '%s' on instance '%s'" %
(r, orm_util.state_str(dest))
)
try:
@@ -75,7 +75,7 @@ def populate_dict(source, source_mapper, dict_, synchronize_pairs):
dict_[r.key] = value
def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
- """return true if the source object has changes from an old to a
+ """return true if the source object has changes from an old to a
new value on the given synchronize pairs
"""
@@ -84,7 +84,7 @@ def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
prop = source_mapper._columntoproperty[l]
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, None, r)
- history = uowcommit.get_attribute_history(source, prop.key,
+ history = uowcommit.get_attribute_history(source, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
return bool(history.deleted)
else:
diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py
index 4f06bd5d9..0b3d7d0eb 100644
--- a/lib/sqlalchemy/pool.py
+++ b/lib/sqlalchemy/pool.py
@@ -25,7 +25,7 @@ from .util import threading, memoized_property, \
proxies = {}
def manage(module, **params):
- """Return a proxy for a DB-API module that automatically
+ """Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
@@ -64,11 +64,11 @@ reset_none = util.symbol('reset_none')
class Pool(log.Identified):
"""Abstract base class for connection pools."""
- def __init__(self,
- creator, recycle=-1, echo=None,
+ def __init__(self,
+ creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
- reset_on_return=True,
+ reset_on_return=True,
listeners=None,
events=None,
_dispatch=None):
@@ -85,8 +85,8 @@ class Pool(log.Identified):
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
- the "name" field of logging records generated within the
- "sqlalchemy.pool" logger. Defaults to a hexstring of the object's
+ the "name" field of logging records generated within the
+ "sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
@@ -119,7 +119,7 @@ class Pool(log.Identified):
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
- pool. This has been superseded by
+ pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
@@ -141,7 +141,7 @@ class Pool(log.Identified):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
- "Invalid value for 'reset_on_return': %r"
+ "Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
@@ -190,8 +190,8 @@ class Pool(log.Identified):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
- This method is used in conjunection with :meth:`dispose`
- to close out an entire :class:`.Pool` and create a new one in
+ This method is used in conjunection with :meth:`dispose`
+ to close out an entire :class:`.Pool` and create a new one in
its place.
"""
@@ -204,7 +204,7 @@ class Pool(log.Identified):
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
-
+
See also the :meth:`Pool.recreate` method.
"""
@@ -213,11 +213,11 @@ class Pool(log.Identified):
def _replace(self):
"""Dispose + recreate this pool.
-
- Subclasses may employ special logic to
+
+ Subclasses may employ special logic to
move threads waiting on this pool to the
new one.
-
+
"""
self.dispose()
return self.recreate()
@@ -225,8 +225,8 @@ class Pool(log.Identified):
def connect(self):
"""Return a DBAPI connection from the pool.
- The connection is instrumented such that when its
- ``close()`` method is called, the connection will be returned to
+ The connection is instrumented such that when its
+ ``close()`` method is called, the connection will be returned to
the pool.
"""
@@ -372,11 +372,11 @@ def _finalize_fairy(connection, connection_record, pool, ref, echo):
if connection_record is not None:
connection_record.fairy = None
if echo:
- pool.logger.debug("Connection %r being returned to pool",
+ pool.logger.debug("Connection %r being returned to pool",
connection)
if connection_record.finalize_callback:
connection_record.finalize_callback(connection)
- del connection_record.finalize_callback
+ del connection_record.finalize_callback
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, connection_record)
pool._return_conn(connection_record)
@@ -399,13 +399,13 @@ class _ConnectionFairy(object):
rec = self._connection_record = pool._do_get()
conn = self.connection = self._connection_record.get_connection()
rec.fairy = weakref.ref(
- self,
+ self,
lambda ref:_finalize_fairy and _finalize_fairy(conn, rec, pool, ref, _echo)
)
_refs.add(rec)
except:
# helps with endless __getattr__ loops later on
- self.connection = None
+ self.connection = None
self._connection_record = None
raise
if self._echo:
@@ -467,7 +467,7 @@ class _ConnectionFairy(object):
attempts = 2
while attempts > 0:
try:
- self._pool.dispatch.checkout(self.connection,
+ self._pool.dispatch.checkout(self.connection,
self._connection_record,
self)
return self
@@ -510,7 +510,7 @@ class _ConnectionFairy(object):
self._close()
def _close(self):
- _finalize_fairy(self.connection, self._connection_record,
+ _finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo)
self.connection = None
self._connection_record = None
@@ -523,7 +523,7 @@ class SingletonThreadPool(Pool):
Options are the same as those of :class:`.Pool`, as well as:
- :param pool_size: The number of threads in which to maintain connections
+ :param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
@@ -541,12 +541,12 @@ class SingletonThreadPool(Pool):
def recreate(self):
self.logger.info("Pool recreating")
- return self.__class__(self._creator,
- pool_size=self.size,
- recycle=self._recycle,
- echo=self.echo,
+ return self.__class__(self._creator,
+ pool_size=self.size,
+ recycle=self._recycle,
+ echo=self.echo,
logging_name=self._orig_logging_name,
- use_threadlocal=self._use_threadlocal,
+ use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def dispose(self):
@@ -599,7 +599,7 @@ class DummyLock(object):
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
- :class:`.QueuePool` is the default pooling implementation used for
+ :class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
@@ -658,18 +658,18 @@ class QueuePool(Pool):
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
- :param reset_on_return: Determine steps to take on
- connections as they are returned to the pool.
+ :param reset_on_return: Determine steps to take on
+ connections as they are returned to the pool.
reset_on_return can have any of these values:
* 'rollback' - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
- * True - same as 'rollback', this is here for
+ * True - same as 'rollback', this is here for
backwards compatibility.
* 'commit' - call commit() on the connection,
- to release locks and transaction resources.
+ to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
@@ -681,7 +681,7 @@ class QueuePool(Pool):
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
- setting should **never be selected** for a
+ setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
@@ -732,7 +732,7 @@ class QueuePool(Pool):
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
- "connection timed out, timeout %d" %
+ "connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
self._overflow_lock.acquire()
@@ -749,10 +749,10 @@ class QueuePool(Pool):
def recreate(self):
self.logger.info("Pool recreating")
- return self.__class__(self._creator, pool_size=self._pool.maxsize,
+ return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
- timeout=self._timeout,
- recycle=self._recycle, echo=self.echo,
+ timeout=self._timeout,
+ recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
@@ -777,9 +777,9 @@ class QueuePool(Pool):
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
- "connections: %d" % (self.size(),
- self.checkedin(),
- self.overflow(),
+ "connections: %d" % (self.size(),
+ self.checkedin(),
+ self.overflow(),
self.checkedout())
def size(self):
@@ -822,11 +822,11 @@ class NullPool(Pool):
def recreate(self):
self.logger.info("Pool recreating")
- return self.__class__(self._creator,
- recycle=self._recycle,
- echo=self.echo,
+ return self.__class__(self._creator,
+ recycle=self._recycle,
+ echo=self.echo,
logging_name=self._orig_logging_name,
- use_threadlocal=self._use_threadlocal,
+ use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def dispose(self):
@@ -915,7 +915,7 @@ class AssertionPool(Pool):
def recreate(self):
self.logger.info("Pool recreating")
- return self.__class__(self._creator, echo=self.echo,
+ return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
@@ -982,7 +982,7 @@ class _DBProxy(object):
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
- pool = self.poolclass(lambda:
+ pool = self.poolclass(lambda:
self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
@@ -1021,6 +1021,6 @@ class _DBProxy(object):
return kw['sa_pool_key']
return tuple(
- list(args) +
+ list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
diff --git a/lib/sqlalchemy/processors.py b/lib/sqlalchemy/processors.py
index a3adbe293..240263feb 100644
--- a/lib/sqlalchemy/processors.py
+++ b/lib/sqlalchemy/processors.py
@@ -5,7 +5,7 @@
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-"""defines generic type conversion functions, as used in bind and result
+"""defines generic type conversion functions, as used in bind and result
processors.
They all share one common characteristic: None is passed through unchanged.
@@ -116,9 +116,9 @@ try:
def to_decimal_processor_factory(target_class, scale=10):
# Note that the scale argument is not taken into account for integer
- # values in the C implementation while it is in the Python one.
- # For example, the Python implementation might return
- # Decimal('5.00000') whereas the C implementation will
+ # values in the C implementation while it is in the Python one.
+ # For example, the Python implementation might return
+ # Decimal('5.00000') whereas the C implementation will
# return Decimal('5'). These are equivalent of course.
return DecimalResultProcessor(target_class, "%%.%df" % scale).process
diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py
index 22f127fcc..5a480f0c3 100644
--- a/lib/sqlalchemy/sql/functions.py
+++ b/lib/sqlalchemy/sql/functions.py
@@ -33,11 +33,11 @@ class GenericFunction(Function):
class next_value(Function):
"""Represent the 'next value', given a :class:`.Sequence`
as it's single argument.
-
+
Compiles into the appropriate function on each backend,
or will raise NotImplementedError if used on a backend
that does not provide support for sequences.
-
+
"""
type = sqltypes.Integer()
name = "next_value"
diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py
index a79168e75..6f2c82992 100644
--- a/lib/sqlalchemy/sql/visitors.py
+++ b/lib/sqlalchemy/sql/visitors.py
@@ -8,15 +8,15 @@
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
-they apply functionality. The most common use of this pattern
-is statement compilation, where individual expression classes match
-up to rendering methods that produce a string result. Beyond this,
-the visitor system is also used to inspect expressions for various
-information and patterns, as well as for usage in
+they apply functionality. The most common use of this pattern
+is statement compilation, where individual expression classes match
+up to rendering methods that produce a string result. Beyond this,
+the visitor system is also used to inspect expressions for various
+information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
-For many examples of how the visit system is used, see the
+For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
@@ -28,18 +28,18 @@ import re
from .. import util
import operator
-__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
- 'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
+__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
+ 'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
-
+
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
-
+
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
@@ -92,7 +92,7 @@ class Visitable(object):
__metaclass__ = VisitableType
class ClauseVisitor(object):
- """Base class for visitor objects which can traverse using
+ """Base class for visitor objects which can traverse using
the traverse() function.
"""
@@ -144,7 +144,7 @@ class ClauseVisitor(object):
return self
class CloningVisitor(ClauseVisitor):
- """Base class for visitor objects which can traverse using
+ """Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
@@ -160,7 +160,7 @@ class CloningVisitor(ClauseVisitor):
return cloned_traverse(obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
- """Base class for visitor objects which can traverse using
+ """Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
@@ -168,8 +168,8 @@ class ReplacingCloningVisitor(CloningVisitor):
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
- If the method returns a new element, the element is used
- instead of creating a simple copy of the element. Traversal
+ If the method returns a new element, the element is used
+ instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
@@ -232,7 +232,7 @@ def traverse_depthfirst(obj, opts, visitors):
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
- """clone the given expression structure, allowing
+ """clone the given expression structure, allowing
modifications by visitors."""
cloned = util.column_dict()
@@ -256,7 +256,7 @@ def cloned_traverse(obj, opts, visitors):
def replacement_traverse(obj, opts, replace):
- """clone the given expression structure, allowing element
+ """clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = util.column_dict()
diff --git a/lib/sqlalchemy/util/queue.py b/lib/sqlalchemy/util/queue.py
index 571456577..650733288 100644
--- a/lib/sqlalchemy/util/queue.py
+++ b/lib/sqlalchemy/util/queue.py
@@ -186,7 +186,7 @@ class Queue:
def abort(self, context):
"""Issue an 'abort', will force any thread waiting on get()
to stop waiting and raise SAAbort.
-
+
"""
self._sqla_abort_context = context
if not self.not_full.acquire(False):
diff --git a/lib/sqlalchemy/util/topological.py b/lib/sqlalchemy/util/topological.py
index 2ba86b23d..58227af20 100644
--- a/lib/sqlalchemy/util/topological.py
+++ b/lib/sqlalchemy/util/topological.py
@@ -29,7 +29,7 @@ def sort_as_subsets(tuples, allitems):
if not output:
raise CircularDependencyError(
"Circular dependency detected.",
- find_cycles(tuples, allitems),
+ find_cycles(tuples, allitems),
_gen_edges(edges)
)
@@ -56,7 +56,7 @@ def find_cycles(tuples, allitems):
output = set()
- # we'd like to find all nodes that are
+ # we'd like to find all nodes that are
# involved in cycles, so we do the full
# pass through the whole thing for each
# node in the original list.
@@ -86,7 +86,7 @@ def find_cycles(tuples, allitems):
def _gen_edges(edges):
return set([
- (right, left)
- for left in edges
- for right in edges[left]
+ (right, left)
+ for left in edges
+ for right in edges[left]
])