summaryrefslogtreecommitdiff
path: root/oslo_db/api.py
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2015-01-27 15:33:15 +0000
committerGerrit Code Review <review@openstack.org>2015-01-27 15:33:15 +0000
commit32359046d9d135b95d3fa573f4996e8d65594cb0 (patch)
tree565d04af8fb1429a00d74a469648e135c9396af9 /oslo_db/api.py
parent776907b9fa66efa12f02a6474989e5ce12075eaf (diff)
parent1b7c295d522e26aa8bf09242dcc1355c3fb1ab3e (diff)
downloadoslo-db-32359046d9d135b95d3fa573f4996e8d65594cb0.tar.gz
Merge "Retry query if db deadlock error is received"
Diffstat (limited to 'oslo_db/api.py')
-rw-r--r--oslo_db/api.py46
1 files changed, 33 insertions, 13 deletions
diff --git a/oslo_db/api.py b/oslo_db/api.py
index 0031e6c..dc71d96 100644
--- a/oslo_db/api.py
+++ b/oslo_db/api.py
@@ -55,18 +55,26 @@ def safe_for_db_retry(f):
:param f: database api method.
:type f: function.
"""
- f.__dict__['enable_retry'] = True
+ f.enable_retry_on_disconnect = True
+ return f
+
+
+def retry_on_deadlock(f):
+ """Retry a DB API call if Deadlock was received.
+
+ wrap_db_entry will be applied to all db.api functions marked with this
+ decorator.
+ """
+ f.enable_retry_on_deadlock = True
return f
class wrap_db_retry(object):
- """Decorator class. Retry db.api methods, if DBConnectionError() raised.
+ """Retry db.api methods, if db_error raised
- Retry decorated db.api methods. If we enabled `use_db_reconnect`
- in config, this decorator will be applied to all db.api functions,
- marked with @safe_for_db_retry decorator.
- Decorator catches DBConnectionError() and retries function in a
- loop until it succeeds, or until maximum retries count will be reached.
+ Retry decorated db.api methods. This decorator catches db_error and retries
+ function in a loop until it succeeds, or until maximum retries count
+ will be reached.
Keyword arguments:
@@ -84,9 +92,14 @@ class wrap_db_retry(object):
"""
def __init__(self, retry_interval, max_retries, inc_retry_interval,
- max_retry_interval):
+ max_retry_interval, retry_on_disconnect, retry_on_deadlock):
super(wrap_db_retry, self).__init__()
+ self.db_error = ()
+ if retry_on_disconnect:
+ self.db_error += (exception.DBConnectionError, )
+ if retry_on_deadlock:
+ self.db_error += (exception.DBDeadlock, )
self.retry_interval = retry_interval
self.max_retries = max_retries
self.inc_retry_interval = inc_retry_interval
@@ -97,17 +110,18 @@ class wrap_db_retry(object):
def wrapper(*args, **kwargs):
next_interval = self.retry_interval
remaining = self.max_retries
+ db_error = self.db_error
while True:
try:
return f(*args, **kwargs)
- except exception.DBConnectionError as e:
+ except db_error as e:
if remaining == 0:
LOG.exception(_LE('DB exceeded retry limit.'))
- raise exception.DBError(e)
+ raise e
if remaining != -1:
remaining -= 1
- LOG.exception(_LE('DB connection error.'))
+ LOG.exception(_LE('DB error.'))
# NOTE(vsergeyev): We are using patched time module, so
# this effectively yields the execution
# context to another green thread.
@@ -193,12 +207,18 @@ class DBAPI(object):
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
# DB API methods, decorated with @safe_for_db_retry
# on disconnect.
- if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
+ retry_on_disconnect = self.use_db_reconnect and getattr(
+ attr, 'enable_retry_on_disconnect', False)
+ retry_on_deadlock = getattr(attr, 'enable_retry_on_deadlock', False)
+
+ if retry_on_disconnect or retry_on_deadlock:
attr = wrap_db_retry(
retry_interval=self.retry_interval,
max_retries=self.max_retries,
inc_retry_interval=self.inc_retry_interval,
- max_retry_interval=self.max_retry_interval)(attr)
+ max_retry_interval=self.max_retry_interval,
+ retry_on_disconnect=retry_on_disconnect,
+ retry_on_deadlock=retry_on_deadlock)(attr)
return attr