summaryrefslogtreecommitdiff
path: root/include/atomic
diff options
context:
space:
mode:
authorDavi Arnaut <davi.arnaut@oracle.com>2010-07-23 09:37:10 -0300
committerDavi Arnaut <davi.arnaut@oracle.com>2010-07-23 09:37:10 -0300
commit93e38e8a3ea9fdbd88a34da487cb2ccbfdaf8c11 (patch)
tree49a56f15656733bfb1bab6fc971e59860daa8979 /include/atomic
parent9deafdd2db229886f37bdfae719936757caa3e1d (diff)
downloadmariadb-git-93e38e8a3ea9fdbd88a34da487cb2ccbfdaf8c11.tar.gz
Bug#22320: my_atomic-t unit test fails
Bug#52261: 64 bit atomic operations do not work on Solaris i386 gcc in debug compilation One of the various problems was that the source operand to CMPXCHG8b was marked as a input/output operand, causing GCC to use the EBX register as the destination register for the CMPXCHG8b instruction. This could lead to crashes as the EBX register is also implicitly used by the instruction, causing the value to be potentially garbaged and a protection fault once the value is used to access a position in memory. Another problem was the lack of proper clobbers for the atomic operations and, also, a discrepancy between the implementations for the Compare and Set operation. The specific problems are described and fixed by Kristian Nielsen patches: Patch: 1 Fix bugs in my_atomic_cas*(val,cmp,new) that *cmp is accessed after CAS succeds. In the gcc builtin implementation, problem was that *cmp was read again after atomic CAS to check if old *val == *cmp; this fails if CAS is successful and another thread modifies *cmp in-between. In the x86-gcc implementation, problem was that *cmp was set also in the case of successful CAS; this means there is a window where it can clobber a value written by another thread after successful CAS. Patch 2: Add a GCC asm "memory" clobber to primitives that imply a memory barrier. This signifies to GCC that any potentially aliased memory must be flushed before the operation, and re-read after the operation, so that read or modification in other threads of such memory values will work as intended. In effect, it makes these primitives work as memory barriers for the compiler as well as the CPU. This is better and more correct than adding "volatile" to variables. include/atomic/gcc_builtins.h: Do not read from *cmp after the operation as it might be already gone if the operation was successful. include/atomic/nolock.h: Prefer system provided atomics over the broken x86 asm. include/atomic/x86-gcc.h: Do not mark source operands as input/output operands. Add proper memory clobbers. include/my_atomic.h: Add notes about my_atomic_add and my_atomic_cas behaviors. unittest/mysys/my_atomic-t.c: Remove work around, if it fails, there is either a problem with the atomic operations code or the specific compiler version should be black-listed.
Diffstat (limited to 'include/atomic')
-rw-r--r--include/atomic/gcc_builtins.h5
-rw-r--r--include/atomic/nolock.h17
-rw-r--r--include/atomic/x86-gcc.h41
3 files changed, 41 insertions, 22 deletions
diff --git a/include/atomic/gcc_builtins.h b/include/atomic/gcc_builtins.h
index 100ff80cacd..d03d28f572e 100644
--- a/include/atomic/gcc_builtins.h
+++ b/include/atomic/gcc_builtins.h
@@ -22,8 +22,9 @@
v= __sync_lock_test_and_set(a, v);
#define make_atomic_cas_body(S) \
int ## S sav; \
- sav= __sync_val_compare_and_swap(a, *cmp, set); \
- if (!(ret= (sav == *cmp))) *cmp= sav;
+ int ## S cmp_val= *cmp; \
+ sav= __sync_val_compare_and_swap(a, cmp_val, set);\
+ if (!(ret= (sav == cmp_val))) *cmp= sav
#ifdef MY_ATOMIC_MODE_DUMMY
#define make_atomic_load_body(S) ret= *a
diff --git a/include/atomic/nolock.h b/include/atomic/nolock.h
index 5a0c41d9078..4c871473b60 100644
--- a/include/atomic/nolock.h
+++ b/include/atomic/nolock.h
@@ -29,21 +29,22 @@
We choose implementation as follows:
------------------------------------
On Windows using Visual C++ the native implementation should be
- preferrable. When using gcc we prefer the native x86 implementation,
- we prefer the Solaris implementation before the gcc because of
- stability preference, we choose gcc implementation if nothing else
- works on gcc. If neither Visual C++ or gcc we still choose the
- Solaris implementation on Solaris (mainly for SunStudio compiles.
+ preferrable. When using gcc we prefer the Solaris implementation
+ before the gcc because of stability preference, we choose gcc
+ builtins if available, otherwise we choose the somewhat broken
+ native x86 implementation. If neither Visual C++ or gcc we still
+ choose the Solaris implementation on Solaris (mainly for SunStudio
+ compilers).
*/
# if defined(_MSV_VER)
# include "generic-msvc.h"
# elif __GNUC__
-# if defined(__i386__) || defined(__x86_64__)
-# include "x86-gcc.h"
-# elif defined(HAVE_SOLARIS_ATOMIC)
+# if defined(HAVE_SOLARIS_ATOMIC)
# include "solaris.h"
# elif defined(HAVE_GCC_ATOMIC_BUILTINS)
# include "gcc_builtins.h"
+# elif defined(__i386__) || defined(__x86_64__)
+# include "x86-gcc.h"
# endif
# elif defined(HAVE_SOLARIS_ATOMIC)
# include "solaris.h"
diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h
index 61b94a48568..8baa84e110e 100644
--- a/include/atomic/x86-gcc.h
+++ b/include/atomic/x86-gcc.h
@@ -53,18 +53,29 @@
#endif
#define make_atomic_add_body32 \
- asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a))
+ asm volatile (LOCK_prefix "; xadd %0, %1;" \
+ : "+r" (v), "=m" (*a) \
+ : "m" (*a) \
+ : "memory")
#define make_atomic_cas_body32 \
+ __typeof__(*cmp) sav; \
asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \
- : "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set))
+ : "=m" (*a), "=a" (sav), "=q" (ret) \
+ : "r" (set), "m" (*a), "a" (*cmp) \
+ : "memory"); \
+ if (!ret) \
+ *cmp= sav
#ifdef __x86_64__
#define make_atomic_add_body64 make_atomic_add_body32
#define make_atomic_cas_body64 make_atomic_cas_body32
-#define make_atomic_fas_body(S) \
- asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a))
+#define make_atomic_fas_body(S) \
+ asm volatile ("xchg %0, %1;" \
+ : "+r" (v), "=m" (*a) \
+ : "m" (*a) \
+ : "memory")
/*
Actually 32-bit reads/writes are always atomic on x86
@@ -73,9 +84,14 @@
#define make_atomic_load_body(S) \
ret=0; \
asm volatile (LOCK_prefix "; cmpxchg %2, %0" \
- : "+m" (*a), "+a" (ret): "r" (ret))
+ : "=m" (*a), "=a" (ret) \
+ : "r" (ret), "m" (*a) \
+ : "memory")
#define make_atomic_store_body(S) \
- asm volatile ("; xchg %0, %1;" : "+m" (*a), "+r" (v))
+ asm volatile ("; xchg %0, %1;" \
+ : "=m" (*a), "+r" (v) \
+ : "m" (*a) \
+ : "memory")
#else
/*
@@ -104,12 +120,13 @@
platforms the much simpler make_atomic_cas_body32 will work
fine.
*/
-#define make_atomic_cas_body64 \
- int32 ebx=(set & 0xFFFFFFFF), ecx=(set >> 32); \
- asm volatile ("push %%ebx; movl %3, %%ebx;" \
- LOCK_prefix "; cmpxchg8b %0; setz %2; pop %%ebx"\
- : "+m" (*a), "+A" (*cmp), "=c" (ret) \
- :"m" (ebx), "c" (ecx))
+#define make_atomic_cas_body64 \
+ int32 ebx=(set & 0xFFFFFFFF), ecx=(set >> 32); \
+ asm volatile ("push %%ebx; movl %3, %%ebx;" \
+ LOCK_prefix "; cmpxchg8b %0; setz %2; pop %%ebx" \
+ : "=m" (*a), "+A" (*cmp), "=c" (ret) \
+ : "m" (ebx), "c" (ecx), "m" (*a) \
+ : "memory", "esp")
#endif
/*