summaryrefslogtreecommitdiff
path: root/heat/db/sqlalchemy/api.py
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2019-12-19 03:42:51 +0000
committerGerrit Code Review <review@openstack.org>2019-12-19 03:42:51 +0000
commit68f233a5f0a5e6266770b149e7f5209ccd50dc40 (patch)
tree3d0674ec64cdc94340b077928c4614350e6473bc /heat/db/sqlalchemy/api.py
parent496e8df76bf2b80432c9c2b2af766587bfa546f0 (diff)
parentfc0e5c948c702222ef22bc2b587140c31bb14c3e (diff)
downloadheat-68f233a5f0a5e6266770b149e7f5209ccd50dc40.tar.gz
Merge "DB: Never retry a subtransaction"
Diffstat (limited to 'heat/db/sqlalchemy/api.py')
-rw-r--r--heat/db/sqlalchemy/api.py43
1 files changed, 29 insertions, 14 deletions
diff --git a/heat/db/sqlalchemy/api.py b/heat/db/sqlalchemy/api.py
index b886ef035..a33b39d12 100644
--- a/heat/db/sqlalchemy/api.py
+++ b/heat/db/sqlalchemy/api.py
@@ -13,6 +13,7 @@
"""Implementation of SQLAlchemy backend."""
import datetime
+import functools
import itertools
import random
@@ -24,6 +25,7 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils
from oslo_log import log as logging
from oslo_utils import encodeutils
+from oslo_utils import excutils
from oslo_utils import timeutils
import osprofiler.sqlalchemy
import six
@@ -88,6 +90,26 @@ def get_session():
return get_facade().get_session()
+def retry_on_deadlock(func):
+ @functools.wraps(func)
+ def try_func(context, *args, **kwargs):
+ if (context.session.transaction is None or
+ not context.session.autocommit):
+ wrapped = oslo_db_api.wrap_db_retry(max_retries=3,
+ retry_on_deadlock=True,
+ retry_interval=0.5,
+ inc_retry_interval=True)(func)
+ return wrapped(context, *args, **kwargs)
+ else:
+ try:
+ return func(context, *args, **kwargs)
+ except db_exception.DBDeadlock:
+ with excutils.save_and_reraise_exception():
+ LOG.debug('Not retrying on DBDeadlock '
+ 'because transaction not closed')
+ return try_func
+
+
def update_and_save(context, obj, values):
with context.session.begin(subtransactions=True):
for k, v in six.iteritems(values):
@@ -243,8 +265,7 @@ def resource_get_all(context):
return results
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_deadlock
def resource_purge_deleted(context, stack_id):
filters = {'stack_id': stack_id, 'action': 'DELETE', 'status': 'COMPLETE'}
query = context.session.query(models.Resource)
@@ -265,8 +286,7 @@ def _add_atomic_key_to_values(values, atomic_key):
values['atomic_key'] = atomic_key + 1
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_deadlock
def resource_update(context, resource_id, values, atomic_key,
expected_engine_id=None):
return _try_resource_update(context, resource_id, values, atomic_key,
@@ -463,8 +483,7 @@ def resource_create(context, values):
return resource_ref
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_deadlock
def resource_create_replacement(context,
existing_res_id, existing_res_values,
new_res_values,
@@ -788,8 +807,7 @@ def stack_create(context, values):
return stack_ref
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_deadlock
def stack_update(context, stack_id, values, exp_trvsl=None):
session = context.session
with session.begin(subtransactions=True):
@@ -1145,8 +1163,7 @@ def _delete_event_rows(context, stack_id, limit):
return retval
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_deadlock
def event_create(context, values):
if 'stack_id' in values and cfg.CONF.max_events_per_stack:
# only count events and purge on average
@@ -1558,8 +1575,7 @@ def sync_point_delete_all_by_stack_and_traversal(context, stack_id,
return rows_deleted
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_deadlock
def sync_point_create(context, values):
values['entity_id'] = str(values['entity_id'])
sync_point_ref = models.SyncPoint()
@@ -1575,8 +1591,7 @@ def sync_point_get(context, entity_id, traversal_id, is_update):
)
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_deadlock
def sync_point_update_input_data(context, entity_id,
traversal_id, is_update, atomic_key,
input_data):