summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2021-09-28 08:43:56 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2021-09-28 09:04:58 +0300
commita5a5833764b4459945902fd14facd0fa03208a29 (patch)
tree82dce016e6a2bb6b6d0be80a40f7f0f698745a96
parent2c4e103d45279f4fcdca78ccdc9a77f235bfad68 (diff)
downloadmariadb-git-a5a5833764b4459945902fd14facd0fa03208a29.tar.gz
fixup! 5ed6c49e84adba6a0fdace8103596b7a90c45691 non-x86 friendliness
Let us prefer fetch_or() to compare_exchange_weak() on ISAs that can support it efficiently. Even on IA-32 and AMD64 it would be possible to emit LOCK BTS, but unfortunately the contemporary compilers do not support that.
-rw-r--r--storage/innobase/sync/srw_lock.cc27
1 files changed, 27 insertions, 0 deletions
diff --git a/storage/innobase/sync/srw_lock.cc b/storage/innobase/sync/srw_lock.cc
index 926787c0052..c443bd61905 100644
--- a/storage/innobase/sync/srw_lock.cc
+++ b/storage/innobase/sync/srw_lock.cc
@@ -294,10 +294,16 @@ void srw_mutex_impl<true>::wait_and_lock()
DBUG_ASSERT(~HOLDER & lk);
if (lk & HOLDER)
lk= lock.load(std::memory_order_relaxed);
+#if defined _WIN32 || defined __i386__ || defined __x86_64__
else if (lock.compare_exchange_weak(lk, lk | HOLDER,
std::memory_order_acquire,
std::memory_order_relaxed))
return;
+#else
+ else if (!((lk= lock.fetch_or(HOLDER, std::memory_order_relaxed)) &
+ HOLDER))
+ goto acquired;
+#endif
else
srw_pause(delay);
if (!--spin)
@@ -312,10 +318,21 @@ void srw_mutex_impl<true>::wait_and_lock()
wait(lk);
lk= lock.load(std::memory_order_relaxed);
}
+#if defined _WIN32 || defined __i386__ || defined __x86_64__
else if (lock.compare_exchange_weak(lk, lk | HOLDER,
std::memory_order_acquire,
std::memory_order_relaxed))
return;
+#else
+ else if (!((lk= lock.fetch_or(HOLDER, std::memory_order_relaxed)) &
+ HOLDER))
+ {
+acquired:
+ DBUG_ASSERT(lk);
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return;
+ }
+#endif
}
}
@@ -330,10 +347,20 @@ void srw_mutex_impl<false>::wait_and_lock()
wait(lk);
lk= lock.load(std::memory_order_relaxed);
}
+#if defined _WIN32 || defined __i386__ || defined __x86_64__
else if (lock.compare_exchange_weak(lk, lk | HOLDER,
std::memory_order_acquire,
std::memory_order_relaxed))
return;
+#else
+ else if (!((lk= lock.fetch_or(HOLDER, std::memory_order_relaxed)) &
+ HOLDER))
+ {
+ DBUG_ASSERT(lk);
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return;
+ }
+#endif
}
}