summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-11-24 11:33:39 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2020-11-24 14:01:00 +0200
commit1b12e251cdc692e4ce2dd130ea45e7b17a9ea5e1 (patch)
tree96e86dd108771212c4b168109316df92662136d6
parentdcdc8c3506f4da6d09e98937c299f9bf3f88a814 (diff)
downloadmariadb-git-1b12e251cdc692e4ce2dd130ea45e7b17a9ea5e1.tar.gz
MDEV-24271 rw_lock::read_lock_yield() may cause writer starvation
The greedy fetch_add(1) approach of read_trylock() may cause starvation of a waiting write lock request. Let us use a compare-and-swap for the read lock acquisition in order to guarantee the progress of writers.
-rw-r--r--storage/innobase/buf/buf0buf.cc16
-rw-r--r--storage/innobase/include/rw_lock.h26
2 files changed, 20 insertions, 22 deletions
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 9e24698d07c..2ddd9f278b1 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -281,25 +281,17 @@ the read requests for the whole area.
#ifndef UNIV_INNOCHECKSUM
void page_hash_latch::read_lock_wait()
{
- auto l= read_lock_yield();
/* First, try busy spinning for a while. */
for (auto spin= srv_n_spin_wait_rounds; spin--; )
{
- if (l & WRITER_PENDING)
- ut_delay(srv_spin_wait_delay);
+ ut_delay(srv_spin_wait_delay);
if (read_trylock())
return;
- l= read_lock_yield();
}
/* Fall back to yielding to other threads. */
- for (;;)
- {
- if (l & WRITER_PENDING)
- os_thread_yield();
- if (read_trylock())
- return;
- l= read_lock_yield();
- }
+ do
+ os_thread_yield();
+ while (!read_trylock());
}
void page_hash_latch::write_lock_wait()
diff --git a/storage/innobase/include/rw_lock.h b/storage/innobase/include/rw_lock.h
index 613adfef3f5..9fcafacc426 100644
--- a/storage/innobase/include/rw_lock.h
+++ b/storage/innobase/include/rw_lock.h
@@ -36,17 +36,24 @@ protected:
/** Flag to indicate that write_lock() or write_lock_wait() is pending */
static constexpr uint32_t WRITER_PENDING= WRITER | WRITER_WAITING;
- /** Yield a read lock request due to a conflict with a write lock.
- @return the lock value */
- uint32_t read_lock_yield()
- {
- uint32_t l= lock.fetch_sub(1, std::memory_order_relaxed);
- DBUG_ASSERT(l & ~WRITER_PENDING);
- return l;
- }
/** Start waiting for an exclusive lock. */
void write_lock_wait_start()
{ lock.fetch_or(WRITER_WAITING, std::memory_order_relaxed); }
+ /** Try to acquire a shared lock.
+ @param l the value of the lock word
+ @return whether the lock was acquired */
+ bool read_trylock(uint32_t &l)
+ {
+ l= UNLOCKED;
+ while (!lock.compare_exchange_strong(l, l + 1, std::memory_order_acquire,
+ std::memory_order_relaxed))
+ {
+ DBUG_ASSERT(!(WRITER & l) || !(~WRITER_PENDING & l));
+ if (l & WRITER_PENDING)
+ return false;
+ }
+ return true;
+ }
/** Wait for an exclusive lock.
@return whether the exclusive lock was acquired */
bool write_lock_poll()
@@ -80,8 +87,7 @@ public:
}
/** Try to acquire a shared lock.
@return whether the lock was acquired */
- bool read_trylock()
- { return !(lock.fetch_add(1, std::memory_order_acquire) & WRITER_PENDING); }
+ bool read_trylock() { uint32_t l; return read_trylock(l); }
/** Try to acquire an exclusive lock.
@return whether the lock was acquired */
bool write_trylock()