summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLance Bragstad <lbragstad@gmail.com>2014-09-26 15:31:24 +0000
committerDavid Stanek <dstanek@dstanek.com>2014-09-26 16:05:32 +0000
commitbdc0f68210a29e7ad02734d11fb88e5c31930cd8 (patch)
tree18ac3dfe4e2445d3666b080cf6af030364bb2985
parent24f101737c9a007cbcb9dd7fde00f3a422f313a1 (diff)
downloadkeystone-bdc0f68210a29e7ad02734d11fb88e5c31930cd8.tar.gz
Address some late comments for memcache clients
This change addresses some late comments from the following review: https://review.openstack.org/#/c/119452/31 Change-Id: I031620f9085ff914aa9b99c21387f953b6ada171 Related-Bug: #1360446
-rw-r--r--doc/source/configuration.rst21
-rw-r--r--etc/keystone.conf.sample12
-rw-r--r--keystone/common/cache/_memcache_pool.py47
-rw-r--r--keystone/common/config.py10
-rw-r--r--keystone/config.py1
5 files changed, 55 insertions, 36 deletions
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 6459d22f3..65029ed1e 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -316,28 +316,27 @@ configuration option.
The drivers Keystone provides are:
+* ``keystone.token.persistence.backends.memcache_pool.Token`` - The pooled memcached
+ token persistence engine. This backend supports the concept of pooled memcache
+ client object (allowing for the re-use of the client objects). This backend has
+ a number of extra tunable options in the ``[memcache]`` section of the config.
+
* ``keystone.token.persistence.backends.sql.Token`` - The SQL-based (default)
- token persistence engine. This backend stores all token data in the same SQL
- store that is used for Identity/Assignment/etc.
+ token persistence engine.
* ``keystone.token.persistence.backends.memcache.Token`` - The memcached based
token persistence backend. This backend relies on ``dogpile.cache`` and stores
- the token data in a set of memcached servers. The servers urls are specified
+ the token data in a set of memcached servers. The servers URLs are specified
in the ``[memcache]\servers`` configuration option in the Keystone config.
-* ``keystone.token.persistence.backends.memcache_pool.Token`` - The pooled memcached
- token persistence engine. This backend supports the concept of pooled memcache
- client object (allowing for the re-use of the client objects). This backend has
- a number of extra tunable options in the ``[memcache]`` section of the config.
-
.. WARNING::
It is recommended you use the ``keystone.token.persistence.backend.memcache_pool.Token``
backend instead of ``keystone.token.persistence.backend.memcache.Token`` as the token
persistence driver if you are deploying Keystone under eventlet instead of
- Apache + mod_wsgi. This recommendation are due to known issues with the use of
- ``thread.local`` under eventlet that can allow the leaking of memcache client objects
- and consumption of extra sockets.
+ Apache + mod_wsgi. This recommendation is due to known issues with the
+ use of ``thread.local`` under eventlet that can allow the leaking of
+ memcache client objects and consumption of extra sockets.
Token Provider
diff --git a/etc/keystone.conf.sample b/etc/keystone.conf.sample
index c058a030c..3daea8937 100644
--- a/etc/keystone.conf.sample
+++ b/etc/keystone.conf.sample
@@ -536,27 +536,27 @@
# Memcache servers in the format of "host:port".
# (dogpile.cache.memcache and keystone.cache.memcache_pool
-# backends only) (list value)
+# backends only). (list value)
#memcache_servers=localhost:11211
# Number of seconds memcached server is considered dead before
# it is tried again. (dogpile.cache.memcache and
-# keystone.cache.memcache_pool backends only) (integer value)
+# keystone.cache.memcache_pool backends only). (integer value)
#memcache_dead_retry=300
# Timeout in seconds for every call to a server.
# (dogpile.cache.memcache and keystone.cache.memcache_pool
-# backends only) (integer value)
+# backends only). (integer value)
#memcache_socket_timeout=3
# Max total number of open connections to every memcached
-# server. (keystone.cache.memcache_pool backend only) (integer
-# value)
+# server. (keystone.cache.memcache_pool backend only).
+# (integer value)
#memcache_pool_maxsize=10
# Number of seconds a connection to memcached is held unused
# in the pool before it is closed.
-# (keystone.cache.memcache_pool backend only) (integer value)
+# (keystone.cache.memcache_pool backend only). (integer value)
#memcache_pool_unused_timeout=60
# Number of seconds that an operation will wait to get a
diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py
index 70b86b684..5b6422a33 100644
--- a/keystone/common/cache/_memcache_pool.py
+++ b/keystone/common/cache/_memcache_pool.py
@@ -35,11 +35,6 @@ from keystone.openstack.common import log
LOG = log.getLogger(__name__)
-# NOTE(morganfainberg): This is used as the maximum number of seconds a get
-# of a new connection will wait for before raising an exception indicating
-# a serious / most likely non-recoverable delay has occurred.
-CONNECTION_GET_TIMEOUT = 120
-
# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
# Don't inherit client from threading.local so that we can reuse clients in
# different threads
@@ -78,9 +73,25 @@ class ConnectionPool(queue.Queue):
self._acquired = 0
def _create_connection(self):
+ """Returns a connection instance.
+
+ This is called when the pool needs another instance created.
+
+ :returns: a new connection instance
+
+ """
raise NotImplementedError
def _destroy_connection(self, conn):
+ """Destroy and cleanup a connection instance.
+
+ This is called when the pool wishes to get rid of an existing
+ connection. This is the opportunity for a subclass to free up
+ resources and cleaup after itself.
+
+ :param conn: the connection object to destroy
+
+ """
raise NotImplementedError
def _debug_logger(self, msg, *args, **kwargs):
@@ -110,6 +121,9 @@ class ConnectionPool(queue.Queue):
def _qsize(self):
return self.maxsize - self._acquired
+ # NOTE(dstanek): stdlib and eventlet Queue implementations
+ # have different names for the qsize method. This ensures
+ # that we override both of them.
if not hasattr(queue.Queue, '_qsize'):
qsize = _qsize
@@ -121,18 +135,24 @@ class ConnectionPool(queue.Queue):
self._acquired += 1
return conn
+ def _drop_expired_connections(self, conn):
+ """Drop all expired connections from the right end of the queue.
+
+ :param conn: connection object
+ """
+ now = time.time()
+ while self.queue and self.queue[0].ttl < now:
+ conn = self.queue.popleft().connection
+ self._debug_logger('Reaping connection %s', id(conn))
+ self._destroy_connection(conn)
+
def _put(self, conn):
self.queue.append(_PoolItem(
ttl=time.time() + self._unused_timeout,
connection=conn,
))
self._acquired -= 1
- # Drop all expired connections from the right end of the queue
- now = time.time()
- while self.queue and self.queue[0].ttl < now:
- conn = self.queue.popleft().connection
- self._debug_logger('Reaping connection %s', id(conn))
- self._destroy_connection(conn)
+ self._drop_expired_connections(conn)
class MemcacheClientPool(ConnectionPool):
@@ -173,9 +193,8 @@ class MemcacheClientPool(ConnectionPool):
# If this client found that one of the hosts is dead, mark it as
# such in our internal list
now = time.time()
- for i, deaduntil, host in zip(itertools.count(),
- self._hosts_deaduntil,
- conn.servers):
+ for i, host in zip(itertools.count(), conn.servers):
+ deaduntil = self._hosts_deaduntil[i]
# Do nothing if we already know this host is dead
if deaduntil <= now:
if host.deaduntil > now:
diff --git a/keystone/common/config.py b/keystone/common/config.py
index d7f9dd811..ca8f4c825 100644
--- a/keystone/common/config.py
+++ b/keystone/common/config.py
@@ -336,27 +336,27 @@ FILE_OPTIONS = {
cfg.ListOpt('memcache_servers', default=['localhost:11211'],
help='Memcache servers in the format of "host:port".'
' (dogpile.cache.memcache and keystone.cache.memcache_pool'
- ' backends only)'),
+ ' backends only).'),
cfg.IntOpt('memcache_dead_retry',
default=5 * 60,
help='Number of seconds memcached server is considered dead'
' before it is tried again. (dogpile.cache.memcache and'
- ' keystone.cache.memcache_pool backends only)'),
+ ' keystone.cache.memcache_pool backends only).'),
cfg.IntOpt('memcache_socket_timeout',
default=3,
help='Timeout in seconds for every call to a server.'
' (dogpile.cache.memcache and keystone.cache.memcache_pool'
- ' backends only)'),
+ ' backends only).'),
cfg.IntOpt('memcache_pool_maxsize',
default=10,
help='Max total number of open connections to every'
' memcached server. (keystone.cache.memcache_pool backend'
- ' only)'),
+ ' only).'),
cfg.IntOpt('memcache_pool_unused_timeout',
default=60,
help='Number of seconds a connection to memcached is held'
' unused in the pool before it is closed.'
- ' (keystone.cache.memcache_pool backend only)'),
+ ' (keystone.cache.memcache_pool backend only).'),
cfg.IntOpt('memcache_pool_connection_get_timeout',
default=10,
help='Number of seconds that an operation will wait to get '
diff --git a/keystone/config.py b/keystone/config.py
index 8236afd45..c5c332a42 100644
--- a/keystone/config.py
+++ b/keystone/config.py
@@ -40,6 +40,7 @@ def set_default_for_default_log_levels():
extra_log_level_defaults = [
'dogpile=INFO',
'routes=INFO',
+ 'keystone.common._memcache_pool=INFO',
]
def find_default_log_levels_opt():