diff options
author | Yikun Jiang <yikunkero@gmail.com> | 2018-03-29 18:29:17 +0800 |
---|---|---|
committer | Yikun Jiang <yikunkero@gmail.com> | 2018-04-13 09:26:12 +0800 |
commit | 4c205341797da9e00e8fa176ca6dde3cbd400ca1 (patch) | |
tree | fb497f2e3a622b25da2bec5ecc239424a774eb37 /oslo_db/tests/test_api.py | |
parent | 85cf42e841bb59ddf294ec39286ce97e57a9faae (diff) | |
download | oslo-db-4c205341797da9e00e8fa176ca6dde3cbd400ca1.tar.gz |
Improve exponential backoff for wrap_db_retry
The @oslo_db.api.wrap_db_retry is used for db.api methods
retrying. If db_error raised, the decorator help us to call
the api methods again after a few seconds(accurately, is
2**retry_times seconds).
If the db_error is deadlock error, the old wrap_db_retry
is not so suitable anymore. As we know, some deadlocks cause
because we call some methods(transactions) concurrently. If
we only retry after stable 2**retry_times seconds, we will
recall the method concurrently again.
In order to minimize the chance of regenerating a deadlock
and reduce the average sleep time, we propose to add some
random jitter to the delay period by default when the
deadlock error is detected.
Change-Id: I206745708570f1f292529ff58eee9b83fc09a9f2
Closes-bug: #1737869
Diffstat (limited to 'oslo_db/tests/test_api.py')
-rw-r--r-- | oslo_db/tests/test_api.py | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/oslo_db/tests/test_api.py b/oslo_db/tests/test_api.py index 6863790..7aafba6 100644 --- a/oslo_db/tests/test_api.py +++ b/oslo_db/tests/test_api.py @@ -253,3 +253,59 @@ class DBRetryRequestCase(DBAPITestCase): self.assertRaises(AttributeError, some_method) self.assertFalse(mock_log.called) + + @mock.patch('oslo_db.api.time.sleep', return_value=None) + def test_retry_wrapper_deadlock(self, mock_sleep): + + # Tests that jitter is False, if the retry wrapper hits a + # non-deadlock error + @api.wrap_db_retry(max_retries=1, retry_on_deadlock=True) + def some_method_no_deadlock(): + raise exception.RetryRequest(ValueError()) + with mock.patch( + 'oslo_db.api.wrap_db_retry._get_inc_interval') as mock_get: + mock_get.return_value = 2, 2 + self.assertRaises(ValueError, some_method_no_deadlock) + mock_get.assert_called_once_with(1, False) + + # Tests that jitter is True, if the retry wrapper hits a deadlock + # error. + @api.wrap_db_retry(max_retries=1, retry_on_deadlock=True) + def some_method_deadlock(): + raise exception.DBDeadlock('test') + with mock.patch( + 'oslo_db.api.wrap_db_retry._get_inc_interval') as mock_get: + mock_get.return_value = 0.1, 2 + self.assertRaises(exception.DBDeadlock, some_method_deadlock) + mock_get.assert_called_once_with(1, True) + + # Tests that jitter is True, if the jitter is enable by user + @api.wrap_db_retry(max_retries=1, retry_on_deadlock=True, jitter=True) + def some_method_no_deadlock_exp(): + raise exception.RetryRequest(ValueError()) + with mock.patch( + 'oslo_db.api.wrap_db_retry._get_inc_interval') as mock_get: + mock_get.return_value = 0.1, 2 + self.assertRaises(ValueError, some_method_no_deadlock_exp) + mock_get.assert_called_once_with(1, True) + + def test_wrap_db_retry_get_interval(self): + x = api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, + max_retry_interval=11) + self.assertEqual(11, x.max_retry_interval) + for i in (1, 2, 4): + # With jitter: sleep_time = [0, 2 ** retry_times) + sleep_time, n = x._get_inc_interval(i, True) + self.assertEqual(2 * i, n) + self.assertTrue(2 * i > sleep_time) + # Without jitter: sleep_time = 2 ** retry_times + sleep_time, n = x._get_inc_interval(i, False) + self.assertEqual(2 * i, n) + self.assertEqual(2 * i, sleep_time) + for i in (8, 16, 32): + sleep_time, n = x._get_inc_interval(i, False) + self.assertEqual(x.max_retry_interval, sleep_time) + self.assertEqual(2 * i, n) + sleep_time, n = x._get_inc_interval(i, True) + self.assertTrue(x.max_retry_interval >= sleep_time) + self.assertEqual(2 * i, n) |