summaryrefslogtreecommitdiff
path: root/storage/innobase/include/sync0rw.ic
diff options
context:
space:
mode:
authorSergey Vojtovich <svoj@mariadb.org>2018-10-14 13:44:49 +0400
committerSergey Vojtovich <svoj@mariadb.org>2018-12-27 22:46:38 +0400
commit9581c4a8f5d6c87a6d34c6d3b826c32bf7f15143 (patch)
treeab8442121a7896d0d757bc95ec96b28ab9fb4995 /storage/innobase/include/sync0rw.ic
parent4404ee29010efe02292373411991e4433a9e57bd (diff)
downloadmariadb-git-9581c4a8f5d6c87a6d34c6d3b826c32bf7f15143.tar.gz
MDEV-17441 - InnoDB transition to C++11 atomics
Almost trivial rw_lock_t::lock_word transition. Since C++11 doesn't seem to allow mixed (atomic and non-atomic) access to atomic variables, we have to perform atomic initialisation. Also made previously broken code in gis0sea.cc even more broken. It is unclear how it was supposed to work and what exactly it was supposed to do.
Diffstat (limited to 'storage/innobase/include/sync0rw.ic')
-rw-r--r--storage/innobase/include/sync0rw.ic75
1 files changed, 35 insertions, 40 deletions
diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
index f0c33ecbeda..fc24812ab5b 100644
--- a/storage/innobase/include/sync0rw.ic
+++ b/storage/innobase/include/sync0rw.ic
@@ -77,8 +77,7 @@ rw_lock_get_writer(
/*===============*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) {
@@ -110,8 +109,7 @@ rw_lock_get_reader_count(
/*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) {
@@ -147,8 +145,7 @@ rw_lock_get_x_lock_count(
/*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_copy <= X_LOCK_DECR);
if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
@@ -181,8 +178,7 @@ rw_lock_get_sx_lock_count(
const rw_lock_t* lock) /*!< in: rw-lock */
{
#ifdef UNIV_DEBUG
- int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_copy <= X_LOCK_DECR);
@@ -213,14 +209,15 @@ rw_lock_lock_word_decr(
int32_t amount, /*!< in: amount to decrement */
int32_t threshold) /*!< in: threshold of judgement */
{
- int32_t lock_copy = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
+
while (lock_copy > threshold) {
- if (my_atomic_cas32_strong_explicit(&lock->lock_word,
- &lock_copy,
- lock_copy - amount,
- MY_MEMORY_ORDER_ACQUIRE,
- MY_MEMORY_ORDER_RELAXED)) {
+ if (lock->lock_word.compare_exchange_strong(
+ lock_copy,
+ lock_copy - amount,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+
return(true);
}
}
@@ -304,9 +301,9 @@ rw_lock_x_lock_func_nowait(
{
int32_t oldval = X_LOCK_DECR;
- if (my_atomic_cas32_strong_explicit(&lock->lock_word, &oldval, 0,
- MY_MEMORY_ORDER_ACQUIRE,
- MY_MEMORY_ORDER_RELAXED)) {
+ if (lock->lock_word.compare_exchange_strong(oldval, 0,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
lock->writer_thread = os_thread_get_curr_id();
} else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) {
@@ -316,12 +313,12 @@ rw_lock_x_lock_func_nowait(
observe consistent values. */
if (oldval == 0 || oldval == -X_LOCK_HALF_DECR) {
/* There are 1 x-locks */
- my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(X_LOCK_DECR,
+ std::memory_order_relaxed);
} else if (oldval <= -X_LOCK_DECR) {
/* There are 2 or more x-locks */
- my_atomic_add32_explicit(&lock->lock_word, -1,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(1,
+ std::memory_order_relaxed);
/* Watch for too many recursive locks */
ut_ad(oldval < 1);
} else {
@@ -356,8 +353,7 @@ rw_lock_s_unlock_func(
rw_lock_t* lock) /*!< in/out: rw-lock */
{
#ifdef UNIV_DEBUG
- int32_t dbg_lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto dbg_lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(dbg_lock_word > -X_LOCK_DECR);
ut_ad(dbg_lock_word != 0);
ut_ad(dbg_lock_word < X_LOCK_DECR);
@@ -366,8 +362,8 @@ rw_lock_s_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
/* Increment lock_word to indicate 1 less reader */
- int32_t lock_word = my_atomic_add32_explicit(&lock->lock_word, 1,
- MY_MEMORY_ORDER_RELEASE) + 1;
+ auto lock_word = lock->lock_word.fetch_add(
+ 1, std::memory_order_release) + 1;
if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
/* wait_ex waiter exists. It may not be asleep, but we signal
@@ -393,8 +389,7 @@ rw_lock_x_unlock_func(
#endif /* UNIV_DEBUG */
rw_lock_t* lock) /*!< in/out: rw-lock */
{
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_word == 0 || lock_word == -X_LOCK_HALF_DECR
|| lock_word <= -X_LOCK_DECR);
@@ -411,8 +406,8 @@ rw_lock_x_unlock_func(
ACQ_REL due to...
RELEASE: we release rw-lock
ACQUIRE: we want waiters to be loaded after lock_word is stored */
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR,
- MY_MEMORY_ORDER_ACQ_REL);
+ lock->lock_word.fetch_add(X_LOCK_DECR,
+ std::memory_order_acq_rel);
/* This no longer has an X-lock but it may still have
an SX-lock. So it is now free for S-locks by other threads.
@@ -429,13 +424,13 @@ rw_lock_x_unlock_func(
} else if (lock_word == -X_LOCK_DECR
|| lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
/* There are 2 x-locks */
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_add(X_LOCK_DECR,
+ std::memory_order_relaxed);
} else {
/* There are more than 2 x-locks. */
ut_ad(lock_word < -X_LOCK_DECR);
- my_atomic_add32_explicit(&lock->lock_word, 1,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_add(1,
+ std::memory_order_relaxed);
}
ut_ad(rw_lock_validate(lock));
@@ -461,8 +456,8 @@ rw_lock_sx_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
if (lock->sx_recursive == 0) {
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word =
+ lock->lock_word.load(std::memory_order_relaxed);
/* Last caller in a possible recursive chain. */
if (lock_word > 0) {
lock->writer_thread = 0;
@@ -472,8 +467,8 @@ rw_lock_sx_unlock_func(
ACQ_REL due to...
RELEASE: we release rw-lock
ACQUIRE: we want waiters to be loaded after lock_word is stored */
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR,
- MY_MEMORY_ORDER_ACQ_REL);
+ lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
+ std::memory_order_acq_rel);
/* Lock is now free. May have to signal read/write
waiters. We do not need to signal wait_ex waiters,
@@ -490,8 +485,8 @@ rw_lock_sx_unlock_func(
/* still has x-lock */
ut_ad(lock_word == -X_LOCK_HALF_DECR ||
lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
+ std::memory_order_relaxed);
}
}