diff options
Diffstat (limited to 'lib/sqlalchemy/pool.py')
-rw-r--r-- | lib/sqlalchemy/pool.py | 69 |
1 files changed, 45 insertions, 24 deletions
diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py index b0d6e0b01..9d37b1838 100644 --- a/lib/sqlalchemy/pool.py +++ b/lib/sqlalchemy/pool.py @@ -1,5 +1,6 @@ # pool.py - Connection pooling for SQLAlchemy -# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com +# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer +# mike_mp@zzzcomputing.com # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -232,10 +233,12 @@ class _ConnectionRecord(object): def invalidate(self, e=None): if e is not None: - self.__pool.logger.info("Invalidate connection %r (reason: %s:%s)", - self.connection, e.__class__.__name__, e) + self.__pool.logger.info( + "Invalidate connection %r (reason: %s:%s)", + self.connection, e.__class__.__name__, e) else: - self.__pool.logger.info("Invalidate connection %r", self.connection) + self.__pool.logger.info( + "Invalidate connection %r", self.connection) self.__close() self.connection = None @@ -248,8 +251,9 @@ class _ConnectionRecord(object): l.connect(self.connection, self) elif self.__pool._recycle > -1 and \ time.time() - self.starttime > self.__pool._recycle: - self.__pool.logger.info("Connection %r exceeded timeout; recycling", - self.connection) + self.__pool.logger.info( + "Connection %r exceeded timeout; recycling", + self.connection) self.__close() self.connection = self.__connect() self.info.clear() @@ -265,8 +269,9 @@ class _ConnectionRecord(object): except (SystemExit, KeyboardInterrupt): raise except Exception, e: - self.__pool.logger.debug("Connection %r threw an error on close: %s", - self.connection, e) + self.__pool.logger.debug( + "Connection %r threw an error on close: %s", + self.connection, e) def __connect(self): try: @@ -282,7 +287,9 @@ class _ConnectionRecord(object): def _finalize_fairy(connection, connection_record, pool, ref=None): _refs.discard(connection_record) - if ref is not None and (connection_record.fairy is not ref or isinstance(pool, AssertionPool)): + if ref is not None and \ + (connection_record.fairy is not ref or + isinstance(pool, AssertionPool)): return if connection is not None: @@ -309,7 +316,8 @@ def _finalize_fairy(connection, connection_record, pool, ref=None): _refs = set() class _ConnectionFairy(object): - """Proxies a DB-API connection and provides return-on-dereference support.""" + """Proxies a DB-API connection and provides return-on-dereference + support.""" __slots__ = '_pool', '__counter', 'connection', \ '_connection_record', '__weakref__', '_detached_info' @@ -320,10 +328,14 @@ class _ConnectionFairy(object): try: rec = self._connection_record = pool.get() conn = self.connection = self._connection_record.get_connection() - rec.fairy = weakref.ref(self, lambda ref:_finalize_fairy(conn, rec, pool, ref)) + rec.fairy = weakref.ref( + self, + lambda ref:_finalize_fairy(conn, rec, pool, ref) + ) _refs.add(rec) except: - self.connection = None # helps with endless __getattr__ loops later on + # helps with endless __getattr__ loops later on + self.connection = None self._connection_record = None raise self._pool.logger.debug("Connection %r checked out from pool" % @@ -531,7 +543,8 @@ class SingletonThreadPool(Pool): self._all_conns.pop() def status(self): - return "SingletonThreadPool id:%d size: %d" % (id(self), len(self._all_conns)) + return "SingletonThreadPool id:%d size: %d" % \ + (id(self), len(self._all_conns)) def do_return_conn(self, conn): pass @@ -624,15 +637,18 @@ class QueuePool(Pool): self._overflow = 0 - pool_size self._max_overflow = max_overflow self._timeout = timeout - self._overflow_lock = self._max_overflow > -1 and threading.Lock() or None + self._overflow_lock = self._max_overflow > -1 and \ + threading.Lock() or None def recreate(self): self.logger.info("Pool recreating") return QueuePool(self._creator, pool_size=self._pool.maxsize, - max_overflow=self._max_overflow, timeout=self._timeout, + max_overflow=self._max_overflow, + timeout=self._timeout, recycle=self._recycle, echo=self.echo, logging_name=self._orig_logging_name, - use_threadlocal=self._use_threadlocal, listeners=self.listeners) + use_threadlocal=self._use_threadlocal, + listeners=self.listeners) def do_return_conn(self, conn): try: @@ -649,22 +665,25 @@ class QueuePool(Pool): def do_get(self): try: - wait = self._max_overflow > -1 and self._overflow >= self._max_overflow + wait = self._max_overflow > -1 and \ + self._overflow >= self._max_overflow return self._pool.get(wait, self._timeout) except sqla_queue.Empty: - if self._max_overflow > -1 and self._overflow >= self._max_overflow: + if self._max_overflow > -1 and \ + self._overflow >= self._max_overflow: if not wait: return self.do_get() else: raise exc.TimeoutError( - "QueuePool limit of size %d overflow %d reached, " - "connection timed out, timeout %d" % - (self.size(), self.overflow(), self._timeout)) + "QueuePool limit of size %d overflow %d reached, " + "connection timed out, timeout %d" % + (self.size(), self.overflow(), self._timeout)) if self._overflow_lock is not None: self._overflow_lock.acquire() - if self._max_overflow > -1 and self._overflow >= self._max_overflow: + if self._max_overflow > -1 and \ + self._overflow >= self._max_overflow: if self._overflow_lock is not None: self._overflow_lock.release() return self.do_get() @@ -795,7 +814,8 @@ class StaticPool(Pool): return self.connection class AssertionPool(Pool): - """A Pool that allows at most one checked out connection at any given time. + """A Pool that allows at most one checked out connection at any given + time. This will raise an exception if more than one connection is checked out at a time. Useful for debugging code that is using more connections @@ -887,7 +907,8 @@ class _DBProxy(object): self._create_pool_mutex.acquire() try: if key not in self.pools: - pool = self.poolclass(lambda: self.module.connect(*args, **kw), **self.kw) + pool = self.poolclass(lambda: + self.module.connect(*args, **kw), **self.kw) self.pools[key] = pool return pool else: |