summaryrefslogtreecommitdiff
path: root/include/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'include/atomic')
-rw-r--r--include/atomic/nolock.h4
-rw-r--r--include/atomic/x86-gcc.h18
-rw-r--r--include/atomic/x86-msvc.h10
3 files changed, 16 insertions, 16 deletions
diff --git a/include/atomic/nolock.h b/include/atomic/nolock.h
index 21f41484ac9..a696e008f03 100644
--- a/include/atomic/nolock.h
+++ b/include/atomic/nolock.h
@@ -17,9 +17,9 @@
#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__)
# ifdef MY_ATOMIC_MODE_DUMMY
-# define LOCK ""
+# define LOCK_prefix ""
# else
-# define LOCK "lock"
+# define LOCK_prefix "lock"
# endif
# ifdef __GNUC__
diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h
index 3f0a82a9400..0be8fdf9244 100644
--- a/include/atomic/x86-gcc.h
+++ b/include/atomic/x86-gcc.h
@@ -22,15 +22,15 @@
#ifdef __x86_64__
# ifdef MY_ATOMIC_NO_XADD
-# define MY_ATOMIC_MODE "gcc-amd64" LOCK "-no-xadd"
+# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix "-no-xadd"
# else
-# define MY_ATOMIC_MODE "gcc-amd64" LOCK
+# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix
# endif
#else
# ifdef MY_ATOMIC_NO_XADD
-# define MY_ATOMIC_MODE "gcc-x86" LOCK "-no-xadd"
+# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix "-no-xadd"
# else
-# define MY_ATOMIC_MODE "gcc-x86" LOCK
+# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix
# endif
#endif
@@ -41,12 +41,12 @@
#ifndef MY_ATOMIC_NO_XADD
#define make_atomic_add_body(S) \
- asm volatile (LOCK "; xadd %0, %1;" : "+r" (v) , "+m" (*a))
+ asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a))
#endif
#define make_atomic_swap_body(S) \
- asm volatile ("; xchg %0, %1;" : "+r" (v) , "+m" (*a))
+ asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a))
#define make_atomic_cas_body(S) \
- asm volatile (LOCK "; cmpxchg %3, %0; setz %2;" \
+ asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \
: "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set))
#ifdef MY_ATOMIC_MODE_DUMMY
@@ -55,11 +55,11 @@
#else
/*
Actually 32-bit reads/writes are always atomic on x86
- But we add LOCK here anyway to force memory barriers
+ But we add LOCK_prefix here anyway to force memory barriers
*/
#define make_atomic_load_body(S) \
ret=0; \
- asm volatile (LOCK "; cmpxchg %2, %0" \
+ asm volatile (LOCK_prefix "; cmpxchg %2, %0" \
: "+m" (*a), "+a" (ret): "r" (ret))
#define make_atomic_store_body(S) \
asm volatile ("; xchg %0, %1;" : "+m" (*a), "+r" (v))
diff --git a/include/atomic/x86-msvc.h b/include/atomic/x86-msvc.h
index d4024a854fb..8f3e55aaed7 100644
--- a/include/atomic/x86-msvc.h
+++ b/include/atomic/x86-msvc.h
@@ -26,19 +26,19 @@
#ifndef _atomic_h_cleanup_
#define _atomic_h_cleanup_ "atomic/x86-msvc.h"
-#define MY_ATOMIC_MODE "msvc-x86" LOCK
+#define MY_ATOMIC_MODE "msvc-x86" LOCK_prefix
#define make_atomic_add_body(S) \
_asm { \
_asm mov reg_ ## S, v \
- _asm LOCK xadd *a, reg_ ## S \
+ _asm LOCK_prefix xadd *a, reg_ ## S \
_asm movzx v, reg_ ## S \
}
#define make_atomic_cas_body(S) \
_asm { \
_asm mov areg_ ## S, *cmp \
_asm mov reg2_ ## S, set \
- _asm LOCK cmpxchg *a, reg2_ ## S \
+ _asm LOCK_prefix cmpxchg *a, reg2_ ## S \
_asm mov *cmp, areg_ ## S \
_asm setz al \
_asm movzx ret, al \
@@ -56,13 +56,13 @@
#else
/*
Actually 32-bit reads/writes are always atomic on x86
- But we add LOCK here anyway to force memory barriers
+ But we add LOCK_prefix here anyway to force memory barriers
*/
#define make_atomic_load_body(S) \
_asm { \
_asm mov areg_ ## S, 0 \
_asm mov reg2_ ## S, areg_ ## S \
- _asm LOCK cmpxchg *a, reg2_ ## S \
+ _asm LOCK_prefix cmpxchg *a, reg2_ ## S \
_asm mov ret, areg_ ## S \
}
#define make_atomic_store_body(S) \