summaryrefslogtreecommitdiff
path: root/storage/innobase/sync/srw_lock.cc
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2021-09-06 12:32:24 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2021-09-06 12:32:24 +0300
commit277ba134ad1c994bda50de574a87a06a071fbecb (patch)
treed31a06a7a8c3c4fe116664ccf7c2f6c0273e67e0 /storage/innobase/sync/srw_lock.cc
parent0f0b7e47bc794d0af0b7f758a6fe1518b8388e3a (diff)
downloadmariadb-git-277ba134ad1c994bda50de574a87a06a071fbecb.tar.gz
MDEV-26467: Avoid futile spin loops
Typically, index_lock and fil_space_t::latch will be held for a longer time than the spin loop in latch acquisition would be waiting for. Let us avoid spin loops for those as well as dict_sys.latch, which could be held in exclusive mode for a longer time (while loading metadata into the buffer pool and the dictionary cache). Performance testing on a dual Intel Xeon E5-2630 v4 (2 NUMA nodes) suggests that the buffer pool page latch (block_lock) benefits from a spin loop in both read-only and read-write workloads where the working set is slightly larger than the buffer pool. Presumably, most contention would occur on leaf page latches. Contention on upper level pages in the buffer pool should intuitively last longer. We introduce srw_spin_lock and srw_spin_mutex to allow users of srw_lock or srw_mutex to opt in for the spin loop. On Microsoft Windows, a spin loop variant was and will not be available; srw_mutex and srw_lock will simply wrap SRWLOCK. That is, on Microsoft Windows, the parameters innodb_sync_spin_loops and innodb_spin_wait_delay will only affect block_lock.
Diffstat (limited to 'storage/innobase/sync/srw_lock.cc')
-rw-r--r--storage/innobase/sync/srw_lock.cc166
1 files changed, 119 insertions, 47 deletions
diff --git a/storage/innobase/sync/srw_lock.cc b/storage/innobase/sync/srw_lock.cc
index b267969c3df..4cd4b56bdd0 100644
--- a/storage/innobase/sync/srw_lock.cc
+++ b/storage/innobase/sync/srw_lock.cc
@@ -36,7 +36,8 @@ static inline void srw_pause(unsigned delay)
}
#ifdef SUX_LOCK_GENERIC
-void ssux_lock_low::init()
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::init()
{
DBUG_ASSERT(!is_locked_or_waiting());
pthread_mutex_init(&mutex, nullptr);
@@ -44,7 +45,8 @@ void ssux_lock_low::init()
pthread_cond_init(&cond_exclusive, nullptr);
}
-void ssux_lock_low::destroy()
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::destroy()
{
DBUG_ASSERT(!is_locked_or_waiting());
pthread_mutex_destroy(&mutex);
@@ -52,7 +54,8 @@ void ssux_lock_low::destroy()
pthread_cond_destroy(&cond_exclusive);
}
-inline void ssux_lock_low::writer_wait(uint32_t l)
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::writer_wait(uint32_t l)
{
pthread_mutex_lock(&mutex);
while (value() == l)
@@ -60,7 +63,8 @@ inline void ssux_lock_low::writer_wait(uint32_t l)
pthread_mutex_unlock(&mutex);
}
-inline void ssux_lock_low::readers_wait(uint32_t l)
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::readers_wait(uint32_t l)
{
pthread_mutex_lock(&mutex);
while (value() == l)
@@ -68,7 +72,8 @@ inline void ssux_lock_low::readers_wait(uint32_t l)
pthread_mutex_unlock(&mutex);
}
-inline void ssux_lock_low::wake()
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::wake()
{
pthread_mutex_lock(&mutex);
uint32_t l= value();
@@ -85,7 +90,8 @@ inline void ssux_lock_low::wake()
/** Wait for a read lock.
@param lock word value from a failed read_trylock() */
-void ssux_lock_low::read_lock(uint32_t l)
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::read_lock(uint32_t l)
{
do
{
@@ -105,16 +111,18 @@ void ssux_lock_low::read_lock(uint32_t l)
pthread_mutex_unlock(&mutex);
continue;
}
-
- const unsigned delay= srw_pause_delay();
-
- for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
+ else if (spinloop)
{
- srw_pause(delay);
- if (read_trylock<true>(l))
- return;
- else if (l == WRITER_WAITING)
- goto wake_writer;
+ const unsigned delay= srw_pause_delay();
+
+ for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
+ {
+ srw_pause(delay);
+ if (read_trylock<true>(l))
+ return;
+ else if (l == WRITER_WAITING)
+ goto wake_writer;
+ }
}
readers_wait(l);
@@ -124,7 +132,8 @@ void ssux_lock_low::read_lock(uint32_t l)
/** Wait for an update lock.
@param lock word value from a failed update_trylock() */
-void ssux_lock_low::update_lock(uint32_t l)
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::update_lock(uint32_t l)
{
do
{
@@ -144,7 +153,7 @@ void ssux_lock_low::update_lock(uint32_t l)
pthread_mutex_unlock(&mutex);
continue;
}
- else
+ else if (spinloop)
{
const unsigned delay= srw_pause_delay();
@@ -165,23 +174,12 @@ void ssux_lock_low::update_lock(uint32_t l)
/** Wait for a write lock after a failed write_trylock() or upgrade_trylock()
@param holding_u whether we already hold u_lock() */
-void ssux_lock_low::write_lock(bool holding_u)
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::write_lock(bool holding_u)
{
- const unsigned delay= srw_pause_delay();
-
for (;;)
{
uint32_t l= write_lock_wait_start();
- /* We are the first writer to be granted the lock. Spin for a while. */
- for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
- {
- l= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
- if (write_lock_wait_try(l))
- return;
- if (!(l & WRITER_WAITING))
- l= write_lock_wait_start();
- srw_pause(delay);
- }
const uint32_t e= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
l= e;
@@ -213,21 +211,34 @@ void ssux_lock_low::write_lock(bool holding_u)
}
}
-void ssux_lock_low::rd_unlock() { if (read_unlock()) wake(); }
-void ssux_lock_low::u_unlock() { update_unlock(); wake(); }
-void ssux_lock_low::wr_unlock() { write_unlock(); wake(); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::rd_unlock() { if (read_unlock()) wake(); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::u_unlock() { update_unlock(); wake(); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wr_unlock() { write_unlock(); wake(); }
+
+template void ssux_lock_impl<false>::init();
+template void ssux_lock_impl<false>::destroy();
+template void ssux_lock_impl<false>::rd_unlock();
+template void ssux_lock_impl<false>::u_unlock();
+template void ssux_lock_impl<false>::wr_unlock();
#else /* SUX_LOCK_GENERIC */
static_assert(4 == sizeof(rw_lock), "ABI");
# ifdef _WIN32
# include <synchapi.h>
-inline void srw_mutex::wait(uint32_t lk)
+template<bool spinloop>
+inline void srw_mutex_impl<spinloop>::wait(uint32_t lk)
{ WaitOnAddress(&lock, &lk, 4, INFINITE); }
-void srw_mutex::wake() { WakeByAddressSingle(&lock); }
+template<bool spinloop>
+void srw_mutex_impl<spinloop>::wake() { WakeByAddressSingle(&lock); }
-inline void ssux_lock_low::wait(uint32_t lk)
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::wait(uint32_t lk)
{ WaitOnAddress(&readers, &lk, 4, INFINITE); }
-void ssux_lock_low::wake() { WakeByAddressSingle(&readers); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wake() { WakeByAddressSingle(&readers); }
# else
# ifdef __linux__
@@ -244,16 +255,27 @@ void ssux_lock_low::wake() { WakeByAddressSingle(&readers); }
# error "no futex support"
# endif
-inline void srw_mutex::wait(uint32_t lk) { SRW_FUTEX(&lock, WAIT, lk); }
-void srw_mutex::wake() { SRW_FUTEX(&lock, WAKE, 1); }
+template<bool spinloop>
+inline void srw_mutex_impl<spinloop>::wait(uint32_t lk)
+{ SRW_FUTEX(&lock, WAIT, lk); }
+template<bool spinloop>
+void srw_mutex_impl<spinloop>::wake() { SRW_FUTEX(&lock, WAKE, 1); }
-inline void ssux_lock_low::wait(uint32_t lk) { SRW_FUTEX(&readers, WAIT, lk); }
-void ssux_lock_low::wake() { SRW_FUTEX(&readers, WAKE, 1); }
+template<bool spinloop>
+inline void ssux_lock_impl<spinloop>::wait(uint32_t lk)
+{ SRW_FUTEX(&readers, WAIT, lk); }
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wake() { SRW_FUTEX(&readers, WAKE, 1); }
# endif
+template void srw_mutex_impl<false>::wake();
+template void ssux_lock_impl<false>::wake();
+template void srw_mutex_impl<true>::wake();
+template void ssux_lock_impl<true>::wake();
-void srw_mutex::wait_and_lock()
+template<>
+void srw_mutex_impl<true>::wait_and_lock()
{
uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed);
@@ -295,7 +317,31 @@ acquired:
}
}
-void ssux_lock_low::wr_wait(uint32_t lk)
+template<>
+void srw_mutex_impl<false>::wait_and_lock()
+{
+ uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed);
+ for (;; wait(lk))
+ {
+ if (lk & HOLDER)
+ {
+ lk= lock.load(std::memory_order_relaxed);
+ if (lk & HOLDER)
+ continue;
+ }
+ lk= lock.fetch_or(HOLDER, std::memory_order_relaxed);
+ if (!(lk & HOLDER))
+ {
+ DBUG_ASSERT(lk);
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return;
+ }
+ DBUG_ASSERT(lk > HOLDER);
+ }
+}
+
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::wr_wait(uint32_t lk)
{
DBUG_ASSERT(writer.is_locked());
DBUG_ASSERT(lk);
@@ -310,7 +356,11 @@ void ssux_lock_low::wr_wait(uint32_t lk)
while (lk != WRITER);
}
-void ssux_lock_low::rd_wait()
+template void ssux_lock_impl<true>::wr_wait(uint32_t);
+template void ssux_lock_impl<false>::wr_wait(uint32_t);
+
+template<bool spinloop>
+void ssux_lock_impl<spinloop>::rd_wait()
{
for (;;)
{
@@ -329,10 +379,22 @@ void ssux_lock_low::rd_wait()
}
writer.wr_unlock();
}
+
+template void ssux_lock_impl<true>::rd_wait();
+template void ssux_lock_impl<false>::rd_wait();
#endif /* SUX_LOCK_GENERIC */
#ifdef UNIV_PFS_RWLOCK
-void srw_lock::psi_rd_lock(const char *file, unsigned line)
+# if defined _WIN32 || defined SUX_LOCK_GENERIC
+# define void_srw_lock void srw_lock_impl
+# else
+# define void_srw_lock template<bool spinloop> void srw_lock_impl<spinloop>
+template void srw_lock_impl<false>::psi_rd_lock(const char*, unsigned);
+template void srw_lock_impl<false>::psi_wr_lock(const char*, unsigned);
+template void srw_lock_impl<true>::psi_rd_lock(const char*, unsigned);
+template void srw_lock_impl<true>::psi_wr_lock(const char*, unsigned);
+# endif
+void_srw_lock::psi_rd_lock(const char *file, unsigned line)
{
PSI_rwlock_locker_state state;
const bool nowait= lock.rd_lock_try();
@@ -348,7 +410,7 @@ void srw_lock::psi_rd_lock(const char *file, unsigned line)
lock.rd_lock();
}
-void srw_lock::psi_wr_lock(const char *file, unsigned line)
+void_srw_lock::psi_wr_lock(const char *file, unsigned line)
{
PSI_rwlock_locker_state state;
const bool nowait= lock.wr_lock_try();
@@ -428,7 +490,7 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line)
DBUG_ASSERT(lock.writer.is_locked());
uint32_t lk= 1;
const bool nowait=
- lock.readers.compare_exchange_strong(lk, ssux_lock_low::WRITER,
+ lock.readers.compare_exchange_strong(lk, ssux_lock_impl<false>::WRITER,
std::memory_order_acquire,
std::memory_order_relaxed);
if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
@@ -444,4 +506,14 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line)
else if (!nowait)
lock.u_wr_upgrade();
}
+#else /* UNIV_PFS_RWLOCK */
+template void ssux_lock_impl<false>::rd_lock();
+# ifdef SUX_LOCK_GENERIC
+template void ssux_lock_impl<false>::write_lock(bool);
+template void ssux_lock_impl<false>::update_lock(uint32_t);
+# else
+template void ssux_lock_impl<false>::rd_unlock();
+template void ssux_lock_impl<false>::u_unlock();
+template void ssux_lock_impl<false>::wr_unlock();
+# endif
#endif /* UNIV_PFS_RWLOCK */