summaryrefslogtreecommitdiff
path: root/include/atomic/x86-gcc.h
diff options
context:
space:
mode:
authorunknown <serg@serg.mylan>2006-06-17 16:20:39 +0200
committerunknown <serg@serg.mylan>2006-06-17 16:20:39 +0200
commitd1fb292251fc6ba5a455e39d59dad5f7d0d0023f (patch)
tree1ed4d5cf639665eb2901d57998183b18637eab1a /include/atomic/x86-gcc.h
parent0d8d39c8c6ff44e54e0b7869c7497e79ff4d697f (diff)
downloadmariadb-git-d1fb292251fc6ba5a455e39d59dad5f7d0d0023f.tar.gz
atomic ops:
my_atomic_XX_t -> intXX, no implicit locking anymore simplified framework, support for requested cleanups dbug/dbug.c: compiler warning include/atomic/nolock.h: my_atomic_XX_t -> intXX include/atomic/rwlock.h: my_atomic_XX_t -> intXX, no implicit locking anymore include/atomic/x86-gcc.h: my_atomic_XX_t -> intXX, no implicit locking anymore include/atomic/x86-msvc.h: my_atomic_XX_t -> intXX simplified defines support for cleanups include/my_atomic.h: my_atomic_XX_t -> intXX, no implicit locking anymore simplified framework, support for requested cleanups unittest/examples/no_plan-t.c: compiler warning unittest/mysys/Makefile.am: fix for dependencies unittest/mysys/my_atomic-t.c: my_atomic_XX_t -> intXX, no implicit locking anymore unittest/mytap/tap.c: cosmetic fix
Diffstat (limited to 'include/atomic/x86-gcc.h')
-rw-r--r--include/atomic/x86-gcc.h42
1 files changed, 18 insertions, 24 deletions
diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h
index 7576db54d69..df6fcdc5ad2 100644
--- a/include/atomic/x86-gcc.h
+++ b/include/atomic/x86-gcc.h
@@ -16,44 +16,38 @@
/*
XXX 64-bit atomic operations can be implemented using
- cmpxchg8b, if necessary
+ cmpxchg8b, if necessary. Though I've heard that not all 64-bit
+ architectures support double-word (128-bit) cas.
*/
+#define MY_ATOMIC_MODE "gcc-x86" ## LOCK
+
/* fix -ansi errors while maintaining readability */
+#ifndef asm
#define asm __asm__
+#endif
-#define make_atomic_add_body8 \
- asm volatile (LOCK "xadd %0, %1;" : "+r" (v) , "+m" (a->val))
-#define make_atomic_swap_body8 \
- asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (a->val))
-#define make_atomic_cas_body8 \
+#define make_atomic_add_body(S) \
+ asm volatile (LOCK "xadd %0, %1;" : "+r" (v) , "+m" (*a))
+#define make_atomic_swap_body(S) \
+ asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a))
+#define make_atomic_cas_body(S) \
asm volatile (LOCK "cmpxchg %3, %0; setz %2;" \
- : "+m" (a->val), "+a" (*cmp), "=q" (ret): "r" (set))
+ : "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set))
#ifdef MY_ATOMIC_MODE_DUMMY
-#define make_atomic_load_body8 ret=a->val
-#define make_atomic_store_body8 a->val=v
+#define make_atomic_load_body(S) ret=*a
+#define make_atomic_store_body(S) *a=v
#else
/*
Actually 32-bit reads/writes are always atomic on x86
But we add LOCK here anyway to force memory barriers
*/
-#define make_atomic_load_body8 \
+#define make_atomic_load_body(S) \
ret=0; \
asm volatile (LOCK "cmpxchg %2, %0" \
- : "+m" (a->val), "+a" (ret): "r" (ret))
-#define make_atomic_store_body8 \
- asm volatile ("xchg %0, %1;" : "+m" (a->val) : "r" (v))
+ : "+m" (*a), "+a" (ret): "r" (ret))
+#define make_atomic_store_body(S) \
+ asm volatile ("xchg %0, %1;" : "+m" (*a) : "r" (v))
#endif
-#define make_atomic_add_body16 make_atomic_add_body8
-#define make_atomic_add_body32 make_atomic_add_body8
-#define make_atomic_cas_body16 make_atomic_cas_body8
-#define make_atomic_cas_body32 make_atomic_cas_body8
-#define make_atomic_load_body16 make_atomic_load_body8
-#define make_atomic_load_body32 make_atomic_load_body8
-#define make_atomic_store_body16 make_atomic_store_body8
-#define make_atomic_store_body32 make_atomic_store_body8
-#define make_atomic_swap_body16 make_atomic_swap_body8
-#define make_atomic_swap_body32 make_atomic_swap_body8
-