summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKrunal Bauskar <krunalbauskar@gmail.com>2020-06-04 18:37:18 +0800
committerMarko Mäkelä <marko.makela@mariadb.com>2020-06-17 09:12:56 +0300
commita0d598a4d220fc3845cdee9c8b46ca10b4c1093e (patch)
treef353c366629ff355ff93e11d21580c9166ac6a23
parent592a10d0799807b19ee68ff6c391ee52d564b822 (diff)
downloadmariadb-git-a0d598a4d220fc3845cdee9c8b46ca10b4c1093e.tar.gz
MDEV-22794: Avoid potential rollback segment contention with
increased scalability through even distribution Rollback segments are allocated to transactions in round-robin fashion. This is controlled by incrementing a static-scope counter named rseg_slot. Said logic is not protected by any mutex or use of atomic for the counter. This potentially can cause the same rollback segment to get allocated to N different transactions (requesting allocation at the same time). While this is not an issue as a rollback segment can host multiple transactions from contention (performance) perspective it is better to allocate these rollback segments in round-robin fashion. Fix for the said issue ports use of atomic for the said counter that would ensure the original design semantic (even distribution through round-robin) is retained.
-rw-r--r--storage/innobase/trx/trx0trx.cc18
1 files changed, 5 insertions, 13 deletions
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index c61eba8f421..1741ae37cb4 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -842,14 +842,9 @@ static trx_rseg_t* trx_assign_rseg_low()
/* Choose a rollback segment evenly distributed between 0 and
innodb_undo_logs-1 in a round-robin fashion, skipping those
- undo tablespaces that are scheduled for truncation.
-
- Because rseg_slot is not protected by atomics or any mutex, race
- conditions are possible, meaning that multiple transactions
- that start modifications concurrently will write their undo
- log to the same rollback segment. */
- static ulong rseg_slot;
- ulint slot = rseg_slot++ % srv_undo_logs;
+ undo tablespaces that are scheduled for truncation. */
+ static Atomic_counter<unsigned> rseg_slot;
+ ulong slot = ulong{rseg_slot++} % srv_undo_logs;
trx_rseg_t* rseg;
#ifdef UNIV_DEBUG
@@ -941,11 +936,8 @@ trx_t::assign_temp_rseg()
compile_time_assert(ut_is_2pow(TRX_SYS_N_RSEGS));
/* Choose a temporary rollback segment between 0 and 127
- in a round-robin fashion. Because rseg_slot is not protected by
- atomics or any mutex, race conditions are possible, meaning that
- multiple transactions that start modifications concurrently
- will write their undo log to the same rollback segment. */
- static ulong rseg_slot;
+ in a round-robin fashion. */
+ static Atomic_counter<unsigned> rseg_slot;
trx_rseg_t* rseg = trx_sys.temp_rsegs[
rseg_slot++ & (TRX_SYS_N_RSEGS - 1)];
ut_ad(!rseg->is_persistent());