summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--storage/innobase/include/os0sync.h75
-rw-r--r--storage/innobase/include/os0sync.ic32
-rw-r--r--storage/innobase/include/sync0sync.h2
-rw-r--r--storage/innobase/include/sync0sync.ic18
-rw-r--r--storage/innobase/os/os0sync.cc19
-rw-r--r--storage/innobase/sync/sync0sync.cc2
-rw-r--r--storage/xtradb/include/os0sync.h75
-rw-r--r--storage/xtradb/include/os0sync.ic31
-rw-r--r--storage/xtradb/include/sync0sync.h2
-rw-r--r--storage/xtradb/include/sync0sync.ic18
-rw-r--r--storage/xtradb/os/os0sync.cc19
-rw-r--r--storage/xtradb/sync/sync0sync.cc2
12 files changed, 219 insertions, 76 deletions
diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h
index 8bf57677ecf..b16a99b51c0 100644
--- a/storage/innobase/include/os0sync.h
+++ b/storage/innobase/include/os0sync.h
@@ -322,12 +322,29 @@ pfs_os_fast_mutex_unlock(
#endif /* UNIV_PFS_MUTEX */
/**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+@return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+ os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */
+/**********************************************************//**
Releases ownership of a fast mutex. */
UNIV_INTERN
void
os_fast_mutex_unlock_func(
/*======================*/
fast_mutex_t* fast_mutex); /*!< in: mutex to release */
+/**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+ os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */
/*********************************************************//**
Initializes an operating system fast mutex semaphore. */
UNIV_INTERN
@@ -432,14 +449,31 @@ amount to decrement. */
/**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */
-# define os_atomic_test_and_set_byte(ptr, new_val) \
- __sync_lock_test_and_set(ptr, (byte) new_val)
-
# define os_atomic_test_and_set_ulint(ptr, new_val) \
__sync_lock_test_and_set(ptr, new_val)
-# define os_atomic_lock_release_byte(ptr) \
- __sync_lock_release(ptr)
+#ifdef __powerpc__
+/*
+ os_atomic_test_and_set_byte_release() should imply a release barrier before
+ setting, and a full barrier after. But __sync_lock_test_and_set() is only
+ documented as an aquire barrier. So on PowerPC we need to add the full
+ barrier explicitly. */
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+ do { __sync_lock_release(ptr); \
+ __sync_synchronize(); } while (0)
+#else
+/*
+ On x86, __sync_lock_test_and_set() happens to be full barrier, due to
+ LOCK prefix.
+*/
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+ __sync_lock_test_and_set(ptr, (byte) new_val)
+#endif
+/*
+ os_atomic_test_and_set_byte_acquire() is a full memory barrier on x86. But
+ in general, just an aquire barrier should be sufficient. */
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+ __sync_lock_test_and_set(ptr, (byte) new_val)
#elif defined(HAVE_IB_SOLARIS_ATOMICS)
@@ -517,14 +551,14 @@ amount to decrement. */
/**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */
-# define os_atomic_test_and_set_byte(ptr, new_val) \
- atomic_swap_uchar(ptr, new_val)
-
# define os_atomic_test_and_set_ulint(ptr, new_val) \
atomic_swap_ulong(ptr, new_val)
-# define os_atomic_lock_release_byte(ptr) \
- (void) atomic_swap_uchar(ptr, 0)
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+ atomic_swap_uchar(ptr, new_val)
+
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+ atomic_swap_uchar(ptr, new_val)
#elif defined(HAVE_WINDOWS_ATOMICS)
@@ -644,7 +678,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val.
InterlockedExchange() operates on LONG, and the LONG will be
clobbered */
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+ ((byte) InterlockedExchange(ptr, new_val))
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
((byte) InterlockedExchange(ptr, new_val))
# define os_atomic_test_and_set_ulong(ptr, new_val) \
@@ -713,11 +749,7 @@ architecture. Disable memory barrier for Intel architecture for now. */
# define HAVE_MEMORY_BARRIER
# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
-#ifdef __powerpc__
-# define os_isync __asm __volatile ("isync":::"memory")
-#else
-#define os_isync do { } while(0)
-#endif
+# define os_mb __atomic_thread_fence(__ATOMIC_SEQ_CST)
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __atomic_thread_fence() is used for memory barrier"
@@ -726,7 +758,7 @@ architecture. Disable memory barrier for Intel architecture for now. */
# define HAVE_MEMORY_BARRIER
# define os_rmb __sync_synchronize()
# define os_wmb __sync_synchronize()
-# define os_isync __sync_synchronize()
+# define os_mb __sync_synchronize()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __sync_synchronize() is used for memory barrier"
@@ -735,7 +767,7 @@ architecture. Disable memory barrier for Intel architecture for now. */
# include <mbarrier.h>
# define os_rmb __machine_r_barrier()
# define os_wmb __machine_w_barrier()
-# define os_isync os_rmb; os_wmb
+# define os_mb __machine_rw_barrier()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"Solaris memory ordering functions are used for memory barrier"
@@ -744,17 +776,14 @@ architecture. Disable memory barrier for Intel architecture for now. */
# include <intrin.h>
# define os_rmb _mm_lfence()
# define os_wmb _mm_sfence()
-# define os_isync os_rmb; os_wmb
+# define os_mb _mm_mfence()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"_mm_lfence() and _mm_sfence() are used for memory barrier"
-# define os_atomic_lock_release_byte(ptr) \
- (void) InterlockedExchange(ptr, 0)
-
#else
# define os_rmb do { } while(0)
# define os_wmb do { } while(0)
-# define os_isync do { } while(0)
+# define os_mb do { } while(0)
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"Memory barrier is not used"
#endif
diff --git a/storage/innobase/include/os0sync.ic b/storage/innobase/include/os0sync.ic
index 9a7e520ece6..4ebf84dba98 100644
--- a/storage/innobase/include/os0sync.ic
+++ b/storage/innobase/include/os0sync.ic
@@ -232,3 +232,35 @@ win_cmp_and_xchg_dword(
#endif /* HAVE_WINDOWS_ATOMICS */
+
+/**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+@return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+ os_fast_mutex_t* fast_mutex) /*!< in: mutex to acquire */
+{
+#ifdef __WIN__
+ if (TryEnterCriticalSection(&fast_mutex->mutex)) {
+
+ return(0);
+ } else {
+
+ return(1);
+ }
+#else
+ /* NOTE that the MySQL my_pthread.h redefines pthread_mutex_trylock
+ so that it returns 0 on success. In the operating system
+ libraries, HP-UX-10.20 follows the old Posix 1003.4a Draft 4 and
+ returns 1 on success (but MySQL remaps that to 0), while Linux,
+ FreeBSD, Solaris, AIX, Tru64 Unix, HP-UX-11.0 return 0 on success. */
+
+#ifdef __powerpc__
+ os_mb;
+#endif
+ return((ulint) pthread_mutex_trylock(&fast_mutex->mutex));
+#endif
+}
diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h
index 82fb353a41b..7b00e16476b 100644
--- a/storage/innobase/include/sync0sync.h
+++ b/storage/innobase/include/sync0sync.h
@@ -49,8 +49,6 @@ extern "C" my_bool timed_mutexes;
#ifdef HAVE_WINDOWS_ATOMICS
typedef LONG lock_word_t; /*!< On Windows, InterlockedExchange operates
on LONG variable */
-#elif defined(HAVE_ATOMIC_BUILTINS) && !defined(HAVE_ATOMIC_BUILTINS_BYTE)
-typedef ulint lock_word_t;
#else
typedef byte lock_word_t;
#endif
diff --git a/storage/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic
index 687f7ee5142..97ec63c0dd2 100644
--- a/storage/innobase/include/sync0sync.ic
+++ b/storage/innobase/include/sync0sync.ic
@@ -80,15 +80,11 @@ ib_mutex_test_and_set(
ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
-# if defined(HAVE_ATOMIC_BUILTINS_BYTE)
- return(os_atomic_test_and_set_byte(&mutex->lock_word, 1));
-# else
- return(os_atomic_test_and_set_ulint(&mutex->lock_word, 1));
-# endif
+ return(os_atomic_test_and_set_byte_acquire(&mutex->lock_word, 1));
#else
ibool ret;
- ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex));
+ ret = os_fast_mutex_trylock_full_barrier(&(mutex->os_fast_mutex));
if (ret == 0) {
/* We check that os_fast_mutex_trylock does not leak
@@ -96,7 +92,6 @@ ib_mutex_test_and_set(
ut_a(mutex->lock_word == 0);
mutex->lock_word = 1;
- os_wmb;
}
return((byte) ret);
@@ -113,11 +108,14 @@ mutex_reset_lock_word(
ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
- os_atomic_lock_release_byte(&mutex->lock_word);
+ /* In theory __sync_lock_release should be used to release the lock.
+ Unfortunately, it does not work properly alone. The workaround is
+ that more conservative __sync_lock_test_and_set is used instead. */
+ os_atomic_test_and_set_byte_release(&mutex->lock_word, 0);
#else
mutex->lock_word = 0;
- os_fast_mutex_unlock(&(mutex->os_fast_mutex));
+ os_fast_mutex_unlock_full_barrier(&(mutex->os_fast_mutex));
#endif
}
@@ -149,7 +147,6 @@ mutex_get_waiters(
ptr = &(mutex->waiters);
- os_rmb;
return(*ptr); /* Here we assume that the read of a single
word from memory is atomic */
}
@@ -184,7 +181,6 @@ mutex_exit_func(
to wake up possible hanging threads if
they are missed in mutex_signal_object. */
- os_isync;
if (mutex_get_waiters(mutex) != 0) {
mutex_signal_object(mutex);
diff --git a/storage/innobase/os/os0sync.cc b/storage/innobase/os/os0sync.cc
index 779152a3a56..451ba5285e3 100644
--- a/storage/innobase/os/os0sync.cc
+++ b/storage/innobase/os/os0sync.cc
@@ -890,6 +890,25 @@ os_fast_mutex_unlock_func(
}
/**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+ os_fast_mutex_t* fast_mutex) /*!< in: mutex to release */
+{
+#ifdef __WIN__
+ LeaveCriticalSection(&fast_mutex->mutex);
+#else
+ pthread_mutex_unlock(&fast_mutex->mutex);
+#ifdef __powerpc__
+ os_mb;
+#endif
+#endif
+}
+
+/**********************************************************//**
Frees a mutex object. */
UNIV_INTERN
void
diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index 6494191bfe9..2729bc39228 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -457,8 +457,6 @@ mutex_set_waiters(
ptr = &(mutex->waiters);
- os_wmb;
-
*ptr = n; /* Here we assume that the write of a single
word in memory is atomic */
}
diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h
index 066fd34d668..f968de7c6dd 100644
--- a/storage/xtradb/include/os0sync.h
+++ b/storage/xtradb/include/os0sync.h
@@ -322,12 +322,29 @@ pfs_os_fast_mutex_unlock(
#endif /* UNIV_PFS_MUTEX */
/**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+@return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+ os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */
+/**********************************************************//**
Releases ownership of a fast mutex. */
UNIV_INTERN
void
os_fast_mutex_unlock_func(
/*======================*/
fast_mutex_t* fast_mutex); /*!< in: mutex to release */
+/**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+ os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */
/*********************************************************//**
Initializes an operating system fast mutex semaphore. */
UNIV_INTERN
@@ -432,14 +449,31 @@ amount to decrement. */
/**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */
-# define os_atomic_test_and_set_byte(ptr, new_val) \
- __sync_lock_test_and_set(ptr, (byte) new_val)
-
# define os_atomic_test_and_set_ulint(ptr, new_val) \
__sync_lock_test_and_set(ptr, new_val)
-# define os_atomic_lock_release_byte(ptr) \
- __sync_lock_release(ptr)
+#ifdef __powerpc__
+/*
+ os_atomic_test_and_set_byte_release() should imply a release barrier before
+ setting, and a full barrier after. But __sync_lock_test_and_set() is only
+ documented as an aquire barrier. So on PowerPC we need to add the full
+ barrier explicitly. */
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+ do { __sync_lock_release(ptr); \
+ __sync_synchronize(); } while (0)
+#else
+/*
+ On x86, __sync_lock_test_and_set() happens to be full barrier, due to
+ LOCK prefix.
+*/
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+ __sync_lock_test_and_set(ptr, (byte) new_val)
+#endif
+/*
+ os_atomic_test_and_set_byte_acquire() is a full memory barrier on x86. But
+ in general, just an aquire barrier should be sufficient. */
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+ __sync_lock_test_and_set(ptr, (byte) new_val)
#elif defined(HAVE_IB_SOLARIS_ATOMICS)
@@ -517,14 +551,14 @@ amount to decrement. */
/**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */
-# define os_atomic_test_and_set_byte(ptr, new_val) \
- atomic_swap_uchar(ptr, new_val)
-
# define os_atomic_test_and_set_ulint(ptr, new_val) \
atomic_swap_ulong(ptr, new_val)
-# define os_atomic_lock_release_byte(ptr) \
- (void) atomic_swap_uchar(ptr, 0)
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+ atomic_swap_uchar(ptr, new_val)
+
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+ atomic_swap_uchar(ptr, new_val)
#elif defined(HAVE_WINDOWS_ATOMICS)
@@ -644,7 +678,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val.
InterlockedExchange() operates on LONG, and the LONG will be
clobbered */
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+ ((byte) InterlockedExchange(ptr, new_val))
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
((byte) InterlockedExchange(ptr, new_val))
# define os_atomic_test_and_set_ulong(ptr, new_val) \
@@ -713,11 +749,7 @@ architecture. Disable memory barrier for Intel architecture for now. */
# define HAVE_MEMORY_BARRIER
# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
-#ifdef __powerpc__
-# define os_isync __asm __volatile ("isync":::"memory")
-#else
-#define os_isync do { } while(0)
-#endif
+# define os_mb __atomic_thread_fence(__ATOMIC_SEQ_CST)
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __atomic_thread_fence() is used for memory barrier"
@@ -726,7 +758,7 @@ architecture. Disable memory barrier for Intel architecture for now. */
# define HAVE_MEMORY_BARRIER
# define os_rmb __sync_synchronize()
# define os_wmb __sync_synchronize()
-# define os_isync __sync_synchronize()
+# define os_mb __sync_synchronize()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __sync_synchronize() is used for memory barrier"
@@ -735,7 +767,7 @@ architecture. Disable memory barrier for Intel architecture for now. */
# include <mbarrier.h>
# define os_rmb __machine_r_barrier()
# define os_wmb __machine_w_barrier()
-# define os_isync os_rmb; os_wmb
+# define os_mb __machine_rw_barrier()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"Solaris memory ordering functions are used for memory barrier"
@@ -744,17 +776,14 @@ architecture. Disable memory barrier for Intel architecture for now. */
# include <intrin.h>
# define os_rmb _mm_lfence()
# define os_wmb _mm_sfence()
-# define os_isync os_rmb; os_wmb
+# define os_mb _mm_mfence()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"_mm_lfence() and _mm_sfence() are used for memory barrier"
-# define os_atomic_lock_release_byte(ptr) \
- (void) InterlockedExchange(ptr, 0)
-
#else
# define os_rmb do { } while(0)
# define os_wmb do { } while(0)
-# define os_isync do { } while(0)
+# define os_mb do { } while(0)
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"Memory barrier is not used"
#endif
diff --git a/storage/xtradb/include/os0sync.ic b/storage/xtradb/include/os0sync.ic
index 9a7e520ece6..5f4b0d24089 100644
--- a/storage/xtradb/include/os0sync.ic
+++ b/storage/xtradb/include/os0sync.ic
@@ -232,3 +232,34 @@ win_cmp_and_xchg_dword(
#endif /* HAVE_WINDOWS_ATOMICS */
+/**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+@return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+ os_fast_mutex_t* fast_mutex) /*!< in: mutex to acquire */
+{
+#ifdef __WIN__
+ if (TryEnterCriticalSection(&fast_mutex->mutex)) {
+
+ return(0);
+ } else {
+
+ return(1);
+ }
+#else
+ /* NOTE that the MySQL my_pthread.h redefines pthread_mutex_trylock
+ so that it returns 0 on success. In the operating system
+ libraries, HP-UX-10.20 follows the old Posix 1003.4a Draft 4 and
+ returns 1 on success (but MySQL remaps that to 0), while Linux,
+ FreeBSD, Solaris, AIX, Tru64 Unix, HP-UX-11.0 return 0 on success. */
+
+#ifdef __powerpc__
+ os_mb;
+#endif
+ return((ulint) pthread_mutex_trylock(&fast_mutex->mutex));
+#endif
+}
diff --git a/storage/xtradb/include/sync0sync.h b/storage/xtradb/include/sync0sync.h
index 8c5dde4c142..788f765f919 100644
--- a/storage/xtradb/include/sync0sync.h
+++ b/storage/xtradb/include/sync0sync.h
@@ -50,8 +50,6 @@ extern "C" my_bool timed_mutexes;
#ifdef _WIN32
typedef LONG lock_word_t; /*!< On Windows, InterlockedExchange operates
on LONG variable */
-#elif defined(HAVE_ATOMIC_BUILTINS) && !defined(HAVE_ATOMIC_BUILTINS_BYTE)
-typedef ulint lock_word_t;
#else
typedef byte lock_word_t;
#endif
diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic
index 6bd80ee7dea..a641ef8bb9e 100644
--- a/storage/xtradb/include/sync0sync.ic
+++ b/storage/xtradb/include/sync0sync.ic
@@ -83,15 +83,11 @@ ib_mutex_test_and_set(
ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
-# if defined(HAVE_ATOMIC_BUILTINS_BYTE)
- return(os_atomic_test_and_set_byte(&mutex->lock_word, 1));
-# else
- return(os_atomic_test_and_set_ulint(&mutex->lock_word, 1));
-# endif
+ return(os_atomic_test_and_set_byte_acquire(&mutex->lock_word, 1));
#else
ibool ret;
- ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex));
+ ret = os_fast_mutex_trylock_full_barrier(&(mutex->os_fast_mutex));
if (ret == 0) {
/* We check that os_fast_mutex_trylock does not leak
@@ -99,7 +95,6 @@ ib_mutex_test_and_set(
ut_a(mutex->lock_word == 0);
mutex->lock_word = 1;
- os_wmb;
}
return((byte) ret);
@@ -116,11 +111,14 @@ mutex_reset_lock_word(
ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
- os_atomic_lock_release_byte(&mutex->lock_word);
+ /* In theory __sync_lock_release should be used to release the lock.
+ Unfortunately, it does not work properly alone. The workaround is
+ that more conservative __sync_lock_test_and_set is used instead. */
+ os_atomic_test_and_set_byte_release(&mutex->lock_word, 0);
#else
mutex->lock_word = 0;
- os_fast_mutex_unlock(&(mutex->os_fast_mutex));
+ os_fast_mutex_unlock_full_barrier(&(mutex->os_fast_mutex));
#endif
}
@@ -152,7 +150,6 @@ mutex_get_waiters(
ptr = &(mutex->waiters);
- os_rmb;
return(*ptr); /* Here we assume that the read of a single
word from memory is atomic */
}
@@ -187,7 +184,6 @@ mutex_exit_func(
to wake up possible hanging threads if
they are missed in mutex_signal_object. */
- os_isync;
if (mutex_get_waiters(mutex) != 0) {
mutex_signal_object(mutex);
diff --git a/storage/xtradb/os/os0sync.cc b/storage/xtradb/os/os0sync.cc
index 779152a3a56..451ba5285e3 100644
--- a/storage/xtradb/os/os0sync.cc
+++ b/storage/xtradb/os/os0sync.cc
@@ -890,6 +890,25 @@ os_fast_mutex_unlock_func(
}
/**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+ os_fast_mutex_t* fast_mutex) /*!< in: mutex to release */
+{
+#ifdef __WIN__
+ LeaveCriticalSection(&fast_mutex->mutex);
+#else
+ pthread_mutex_unlock(&fast_mutex->mutex);
+#ifdef __powerpc__
+ os_mb;
+#endif
+#endif
+}
+
+/**********************************************************//**
Frees a mutex object. */
UNIV_INTERN
void
diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc
index 67d4835ed95..ca630531086 100644
--- a/storage/xtradb/sync/sync0sync.cc
+++ b/storage/xtradb/sync/sync0sync.cc
@@ -536,8 +536,6 @@ mutex_set_waiters(
ptr = &(mutex->waiters);
- os_wmb;
-
*ptr = n; /* Here we assume that the write of a single
word in memory is atomic */
}