summaryrefslogtreecommitdiff
path: root/include/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'include/atomic')
-rw-r--r--include/atomic/gcc_builtins.h88
-rw-r--r--include/atomic/gcc_sync.h106
-rw-r--r--include/atomic/generic-msvc.h197
-rw-r--r--include/atomic/solaris.h143
-rw-r--r--include/atomic/x86-gcc.h151
5 files changed, 375 insertions, 310 deletions
diff --git a/include/atomic/gcc_builtins.h b/include/atomic/gcc_builtins.h
index 56a0323aedf..e2c3b10c267 100644
--- a/include/atomic/gcc_builtins.h
+++ b/include/atomic/gcc_builtins.h
@@ -1,7 +1,7 @@
#ifndef ATOMIC_GCC_BUILTINS_INCLUDED
#define ATOMIC_GCC_BUILTINS_INCLUDED
-/* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2017 MariaDB Foundation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,33 +16,63 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#define make_atomic_add_body(S) \
- v= __sync_fetch_and_add(a, v);
-#define make_atomic_fas_body(S) \
- v= __sync_lock_test_and_set(a, v);
-#define make_atomic_cas_body(S) \
- int ## S sav; \
- int ## S cmp_val= *cmp; \
- sav= __sync_val_compare_and_swap(a, cmp_val, set);\
- if (!(ret= (sav == cmp_val))) *cmp= sav
-
-#ifdef MY_ATOMIC_MODE_DUMMY
-#define make_atomic_load_body(S) ret= *a
-#define make_atomic_store_body(S) *a= v
-#define MY_ATOMIC_MODE "gcc-builtins-up"
-
-#elif defined(HAVE_GCC_C11_ATOMICS)
-#define MY_ATOMIC_MODE "gcc-atomics-smp"
-#define make_atomic_load_body(S) \
- ret= __atomic_load_n(a, __ATOMIC_SEQ_CST)
-#define make_atomic_store_body(S) \
- __atomic_store_n(a, v, __ATOMIC_SEQ_CST)
-#else
-#define MY_ATOMIC_MODE "gcc-builtins-smp"
-#define make_atomic_load_body(S) \
- ret= __sync_fetch_and_or(a, 0);
-#define make_atomic_store_body(S) \
- (void) __sync_lock_test_and_set(a, v);
-#endif
+
+#define MY_MEMORY_ORDER_RELAXED __ATOMIC_RELAXED
+#define MY_MEMORY_ORDER_CONSUME __ATOMIC_CONSUME
+#define MY_MEMORY_ORDER_ACQUIRE __ATOMIC_ACQUIRE
+#define MY_MEMORY_ORDER_RELEASE __ATOMIC_RELEASE
+#define MY_MEMORY_ORDER_ACQ_REL __ATOMIC_ACQ_REL
+#define MY_MEMORY_ORDER_SEQ_CST __ATOMIC_SEQ_CST
+
+#define my_atomic_store32_explicit(P, D, O) __atomic_store_n((P), (D), (O))
+#define my_atomic_store64_explicit(P, D, O) __atomic_store_n((P), (D), (O))
+#define my_atomic_storeptr_explicit(P, D, O) __atomic_store_n((P), (D), (O))
+
+#define my_atomic_load32_explicit(P, O) __atomic_load_n((P), (O))
+#define my_atomic_load64_explicit(P, O) __atomic_load_n((P), (O))
+#define my_atomic_loadptr_explicit(P, O) __atomic_load_n((P), (O))
+
+#define my_atomic_fas32_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
+#define my_atomic_fas64_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
+#define my_atomic_fasptr_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
+
+#define my_atomic_add32_explicit(P, A, O) __atomic_fetch_add((P), (A), (O))
+#define my_atomic_add64_explicit(P, A, O) __atomic_fetch_add((P), (A), (O))
+
+#define my_atomic_cas32_weak_explicit(P, E, D, S, F) \
+ __atomic_compare_exchange_n((P), (E), (D), 1, (S), (F))
+#define my_atomic_cas64_weak_explicit(P, E, D, S, F) \
+ __atomic_compare_exchange_n((P), (E), (D), 1, (S), (F))
+#define my_atomic_casptr_weak_explicit(P, E, D, S, F) \
+ __atomic_compare_exchange_n((P), (E), (D), 1, (S), (F))
+
+#define my_atomic_cas32_strong_explicit(P, E, D, S, F) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, (S), (F))
+#define my_atomic_cas64_strong_explicit(P, E, D, S, F) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, (S), (F))
+#define my_atomic_casptr_strong_explicit(P, E, D, S, F) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, (S), (F))
+
+#define my_atomic_store32(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_store64(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_storeptr(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST)
+
+#define my_atomic_load32(P) __atomic_load_n((P), __ATOMIC_SEQ_CST)
+#define my_atomic_load64(P) __atomic_load_n((P), __ATOMIC_SEQ_CST)
+#define my_atomic_loadptr(P) __atomic_load_n((P), __ATOMIC_SEQ_CST)
+
+#define my_atomic_fas32(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_fas64(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_fasptr(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST)
+
+#define my_atomic_add32(P, A) __atomic_fetch_add((P), (A), __ATOMIC_SEQ_CST)
+#define my_atomic_add64(P, A) __atomic_fetch_add((P), (A), __ATOMIC_SEQ_CST)
+
+#define my_atomic_cas32(P, E, D) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define my_atomic_cas64(P, E, D) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define my_atomic_casptr(P, E, D) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#endif /* ATOMIC_GCC_BUILTINS_INCLUDED */
diff --git a/include/atomic/gcc_sync.h b/include/atomic/gcc_sync.h
new file mode 100644
index 00000000000..82eea35b2ce
--- /dev/null
+++ b/include/atomic/gcc_sync.h
@@ -0,0 +1,106 @@
+#ifndef GCC_SYNC_INCLUDED
+#define GCC_SYNC_INCLUDED
+
+/* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/* Old GCC __sync builtins introduced in GCC 4.1 */
+
+static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set)
+{
+ int32 cmp_val= *cmp;
+ int32 sav= __sync_val_compare_and_swap(a, cmp_val, set);
+ int ret= (sav == cmp_val);
+ if (!ret)
+ *cmp = sav;
+ return ret;
+}
+
+static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set)
+{
+ int64 cmp_val= *cmp;
+ int64 sav= __sync_val_compare_and_swap(a, cmp_val, set);
+ int ret= (sav == cmp_val);
+ if (!ret)
+ *cmp = sav;
+ return ret;
+}
+
+static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set)
+{
+ void *cmp_val= *cmp;
+ void *sav= __sync_val_compare_and_swap(a, cmp_val, set);
+ int ret= (sav == cmp_val);
+ if (!ret)
+ *cmp = sav;
+ return ret;
+}
+
+static inline int32 my_atomic_add32(int32 volatile *a, int32 v)
+{
+ return __sync_fetch_and_add(a, v);
+}
+
+static inline int64 my_atomic_add64(int64 volatile *a, int64 v)
+{
+ return __sync_fetch_and_add(a, v);
+}
+
+static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
+{
+ return __sync_lock_test_and_set(a, v);
+}
+
+static inline int64 my_atomic_fas64(int64 volatile *a, int64 v)
+{
+ return __sync_lock_test_and_set(a, v);
+}
+
+static inline void * my_atomic_fasptr(void * volatile *a, void * v)
+{
+ return __sync_lock_test_and_set(a, v);
+}
+
+static inline int32 my_atomic_load32(int32 volatile *a)
+{
+ return __sync_fetch_and_or(a, 0);
+}
+
+static inline int64 my_atomic_load64(int64 volatile *a)
+{
+ return __sync_fetch_and_or(a, 0);
+}
+
+static inline void* my_atomic_loadptr(void * volatile *a)
+{
+ return __sync_fetch_and_or(a, 0);
+}
+
+static inline void my_atomic_store32(int32 volatile *a, int32 v)
+{
+ (void) __sync_lock_test_and_set(a, v);
+}
+
+static inline void my_atomic_store64(int64 volatile *a, int64 v)
+{
+ (void) __sync_lock_test_and_set(a, v);
+}
+
+static inline void my_atomic_storeptr(void * volatile *a, void *v)
+{
+ (void) __sync_lock_test_and_set(a, v);
+}
+
+#endif /* GCC_SYNC_INCLUDED */
diff --git a/include/atomic/generic-msvc.h b/include/atomic/generic-msvc.h
index 8daa497036f..d5eaa4738c7 100644
--- a/include/atomic/generic-msvc.h
+++ b/include/atomic/generic-msvc.h
@@ -1,5 +1,7 @@
-/* Copyright (c) 2006-2008 MySQL AB, 2009 Sun Microsystems, Inc.
- Use is subject to license terms.
+#ifndef ATOMIC_MSC_INCLUDED
+#define ATOMIC_MSC_INCLUDED
+
+/* Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -14,92 +16,125 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#ifndef _atomic_h_cleanup_
-#define _atomic_h_cleanup_ "atomic/generic-msvc.h"
+#include <windows.h>
+
+static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set)
+{
+ int32 initial_cmp= *cmp;
+ int32 initial_a= InterlockedCompareExchange((volatile LONG*)a,
+ set, initial_cmp);
+ int ret= (initial_a == initial_cmp);
+ if (!ret)
+ *cmp= initial_a;
+ return ret;
+}
+
+static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set)
+{
+ int64 initial_cmp= *cmp;
+ int64 initial_a= InterlockedCompareExchange64((volatile LONGLONG*)a,
+ (LONGLONG)set,
+ (LONGLONG)initial_cmp);
+ int ret= (initial_a == initial_cmp);
+ if (!ret)
+ *cmp= initial_a;
+ return ret;
+}
+
+static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set)
+{
+ void *initial_cmp= *cmp;
+ void *initial_a= InterlockedCompareExchangePointer(a, set, initial_cmp);
+ int ret= (initial_a == initial_cmp);
+ if (!ret)
+ *cmp= initial_a;
+ return ret;
+}
+
+static inline int32 my_atomic_add32(int32 volatile *a, int32 v)
+{
+ return (int32)InterlockedExchangeAdd((volatile LONG*)a, v);
+}
+
+static inline int64 my_atomic_add64(int64 volatile *a, int64 v)
+{
+ return (int64)InterlockedExchangeAdd64((volatile LONGLONG*)a, (LONGLONG)v);
+}
-/*
- We don't implement anything specific for MY_ATOMIC_MODE_DUMMY, always use
- intrinsics.
- 8 and 16-bit atomics are not implemented, but it can be done if necessary.
-*/
-#undef MY_ATOMIC_HAS_8_16
-#include <windows.h>
/*
- x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate
- function calls to kernel32 instead, even in the optimized build.
- We force intrinsics as described in MSDN documentation for
- _InterlockedCompareExchange.
+ According to MSDN:
+
+ Simple reads and writes to properly-aligned 32-bit variables are atomic
+ operations.
+ ...
+ Simple reads and writes to properly aligned 64-bit variables are atomic on
+ 64-bit Windows. Reads and writes to 64-bit values are not guaranteed to be
+ atomic on 32-bit Windows.
+
+ https://msdn.microsoft.com/en-us/library/windows/desktop/ms684122(v=vs.85).aspx
*/
-#ifdef _M_IX86
-#if (_MSC_VER >= 1500)
-#include <intrin.h>
+static inline int32 my_atomic_load32(int32 volatile *a)
+{
+ int32 value= *a;
+ MemoryBarrier();
+ return value;
+}
+
+static inline int64 my_atomic_load64(int64 volatile *a)
+{
+#ifdef _M_X64
+ int64 value= *a;
+ MemoryBarrier();
+ return value;
#else
-C_MODE_START
-/*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/
-LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp);
-LONGLONG _InterlockedCompareExchange64 (LONGLONG volatile *Target,
- LONGLONG Value, LONGLONG Comp);
-C_MODE_END
-
-#pragma intrinsic(_InterlockedCompareExchange)
-#pragma intrinsic(_InterlockedCompareExchange64)
+ return (int64) InterlockedCompareExchange64((volatile LONGLONG *) a, 0, 0);
#endif
-
-#define InterlockedCompareExchange _InterlockedCompareExchange
-#define InterlockedCompareExchange64 _InterlockedCompareExchange64
-/*
- No need to do something special for InterlockedCompareExchangePointer
- as it is a #define to InterlockedCompareExchange. The same applies to
- InterlockedExchangePointer.
-*/
-#endif /*_M_IX86*/
-
-#define MY_ATOMIC_MODE "msvc-intrinsics"
-/* Implement using CAS on WIN32 */
-#define IL_COMP_EXCHG32(X,Y,Z) \
- InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z))
-#define IL_COMP_EXCHG64(X,Y,Z) \
- InterlockedCompareExchange64((volatile LONGLONG *)(X), \
- (LONGLONG)(Y),(LONGLONG)(Z))
-#define IL_COMP_EXCHGptr InterlockedCompareExchangePointer
-
-#define make_atomic_cas_body(S) \
- int ## S initial_cmp= *cmp; \
- int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \
- if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a;
-
-#ifndef _M_IX86
-/* Use full set of optimised functions on WIN64 */
-#define IL_EXCHG_ADD32(X,Y) \
- InterlockedExchangeAdd((volatile LONG *)(X),(Y))
-#define IL_EXCHG_ADD64(X,Y) \
- InterlockedExchangeAdd64((volatile LONGLONG *)(X),(LONGLONG)(Y))
-#define IL_EXCHG32(X,Y) \
- InterlockedExchange((volatile LONG *)(X),(Y))
-#define IL_EXCHG64(X,Y) \
- InterlockedExchange64((volatile LONGLONG *)(X),(LONGLONG)(Y))
-#define IL_EXCHGptr InterlockedExchangePointer
-
-#define make_atomic_add_body(S) \
- v= IL_EXCHG_ADD ## S (a, v)
-#define make_atomic_swap_body(S) \
- v= IL_EXCHG ## S (a, v)
-#define make_atomic_load_body(S) \
- ret= 0; /* avoid compiler warning */ \
- ret= IL_COMP_EXCHG ## S (a, ret, ret);
+}
+
+static inline void* my_atomic_loadptr(void * volatile *a)
+{
+ void *value= *a;
+ MemoryBarrier();
+ return value;
+}
+
+static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
+{
+ return (int32)InterlockedExchange((volatile LONG*)a, v);
+}
+
+static inline int64 my_atomic_fas64(int64 volatile *a, int64 v)
+{
+ return (int64)InterlockedExchange64((volatile LONGLONG*)a, v);
+}
+
+static inline void * my_atomic_fasptr(void * volatile *a, void * v)
+{
+ return InterlockedExchangePointer(a, v);
+}
+
+static inline void my_atomic_store32(int32 volatile *a, int32 v)
+{
+ MemoryBarrier();
+ *a= v;
+}
+
+static inline void my_atomic_store64(int64 volatile *a, int64 v)
+{
+#ifdef _M_X64
+ MemoryBarrier();
+ *a= v;
+#else
+ (void) InterlockedExchange64((volatile LONGLONG *) a, v);
#endif
+}
-#else /* cleanup */
-
-#undef IL_EXCHG_ADD32
-#undef IL_EXCHG_ADD64
-#undef IL_COMP_EXCHG32
-#undef IL_COMP_EXCHG64
-#undef IL_COMP_EXCHGptr
-#undef IL_EXCHG32
-#undef IL_EXCHG64
-#undef IL_EXCHGptr
+static inline void my_atomic_storeptr(void * volatile *a, void *v)
+{
+ MemoryBarrier();
+ *a= v;
+}
-#endif
+#endif /* ATOMIC_MSC_INCLUDED */
diff --git a/include/atomic/solaris.h b/include/atomic/solaris.h
index 7b6f0561ff0..5be36ec5e77 100644
--- a/include/atomic/solaris.h
+++ b/include/atomic/solaris.h
@@ -1,4 +1,7 @@
-/* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+#ifndef ATOMIC_SOLARIS_INCLUDED
+#define ATOMIC_SOLARIS_INCLUDED
+
+/* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,60 +16,102 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#ifndef _atomic_h_cleanup_
-#define _atomic_h_cleanup_ "atomic/solaris.h"
-
#include <atomic.h>
-#define MY_ATOMIC_MODE "solaris-atomic"
-
#if defined(__GNUC__)
#define atomic_typeof(T,V) __typeof__(V)
#else
#define atomic_typeof(T,V) T
#endif
-#define uintptr_t void *
-#define atomic_or_ptr_nv(X,Y) (void *)atomic_or_ulong_nv((volatile ulong_t *)X, Y)
-
-#define make_atomic_cas_body(S) \
- atomic_typeof(uint ## S ## _t, *cmp) sav; \
- sav = atomic_cas_ ## S( \
- (volatile uint ## S ## _t *)a, \
- (uint ## S ## _t)*cmp, \
- (uint ## S ## _t)set); \
- if (! (ret= (sav == *cmp))) \
+static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set)
+{
+ int ret;
+ atomic_typeof(uint32_t, *cmp) sav;
+ sav= atomic_cas_32((volatile uint32_t *)a, (uint32_t)*cmp, (uint32_t)set);
+ ret= (sav == *cmp);
+ if (!ret)
*cmp= sav;
-
-#define make_atomic_add_body(S) \
- int ## S nv; /* new value */ \
- nv= atomic_add_ ## S ## _nv((volatile uint ## S ## _t *)a, v); \
- v= nv - v
-
-/* ------------------------------------------------------------------------ */
-
-#ifdef MY_ATOMIC_MODE_DUMMY
-
-#define make_atomic_load_body(S) ret= *a
-#define make_atomic_store_body(S) *a= v
-
-#else /* MY_ATOMIC_MODE_DUMMY */
-
-#define make_atomic_load_body(S) \
- ret= atomic_or_ ## S ## _nv((volatile uint ## S ## _t *)a, 0)
-
-#define make_atomic_store_body(S) \
- (void) atomic_swap_ ## S((volatile uint ## S ## _t *)a, (uint ## S ## _t)v)
-
-#endif
-
-#define make_atomic_fas_body(S) \
- v= atomic_swap_ ## S((volatile uint ## S ## _t *)a, (uint ## S ## _t)v)
-
-#else /* cleanup */
-
-#undef uintptr_t
-#undef atomic_or_ptr_nv
-
-#endif
-
+ return ret;
+}
+
+static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set)
+{
+ int ret;
+ atomic_typeof(uint64_t, *cmp) sav;
+ sav= atomic_cas_64((volatile uint64_t *)a, (uint64_t)*cmp, (uint64_t)set);
+ ret= (sav == *cmp);
+ if (!ret)
+ *cmp= sav;
+ return ret;
+}
+
+static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set)
+{
+ int ret;
+ atomic_typeof(void *, *cmp) sav;
+ sav= atomic_cas_ptr((volatile void **)a, (void *)*cmp, (void *)set);
+ ret= (sav == *cmp);
+ if (!ret)
+ *cmp= sav;
+ return ret;
+}
+
+static inline int32 my_atomic_add32(int32 volatile *a, int32 v)
+{
+ int32 nv= atomic_add_32_nv((volatile uint32_t *)a, v);
+ return nv - v;
+}
+
+static inline int64 my_atomic_add64(int64 volatile *a, int64 v)
+{
+ int64 nv= atomic_add_64_nv((volatile uint64_t *)a, v);
+ return nv - v;
+}
+
+static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
+{
+ return atomic_swap_32((volatile uint32_t *)a, (uint32_t)v);
+}
+
+static inline int64 my_atomic_fas64(int64 volatile *a, int64 v)
+{
+ return atomic_swap_64((volatile uint64_t *)a, (uint64_t)v);
+}
+
+static inline void * my_atomic_fasptr(void * volatile *a, void * v)
+{
+ return atomic_swap_ptr(a, v);
+}
+
+static inline int32 my_atomic_load32(int32 volatile *a)
+{
+ return atomic_or_32_nv((volatile uint32_t *)a, 0);
+}
+
+static inline int64 my_atomic_load64(int64 volatile *a)
+{
+ return atomic_or_64_nv((volatile uint64_t *)a, 0);
+}
+
+static inline void* my_atomic_loadptr(void * volatile *a)
+{
+ return atomic_add_ptr_nv(a, 0);
+}
+
+static inline void my_atomic_store32(int32 volatile *a, int32 v)
+{
+ (void) atomic_swap_32((volatile uint32_t *)a, (uint32_t)v);
+}
+
+static inline void my_atomic_store64(int64 volatile *a, int64 v)
+{
+ (void) atomic_swap_64((volatile uint64_t *)a, (uint64_t)v);
+}
+
+static inline void my_atomic_storeptr(void * volatile *a, void *v)
+{
+ (void) atomic_swap_ptr((volatile void **)a, (void *)v);
+}
+
+#endif /* ATOMIC_SOLARIS_INCLUDED */
diff --git a/include/atomic/x86-gcc.h b/include/atomic/x86-gcc.h
deleted file mode 100644
index 3a081d9bbc5..00000000000
--- a/include/atomic/x86-gcc.h
+++ /dev/null
@@ -1,151 +0,0 @@
-#ifndef ATOMIC_X86_GCC_INCLUDED
-#define ATOMIC_X86_GCC_INCLUDED
-
-/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-
-/*
- XXX 64-bit atomic operations can be implemented using
- cmpxchg8b, if necessary. Though I've heard that not all 64-bit
- architectures support double-word (128-bit) cas.
-*/
-
-/*
- No special support of 8 and 16 bit operations are implemented here
- currently.
-*/
-#undef MY_ATOMIC_HAS_8_AND_16
-
-#ifdef MY_ATOMIC_MODE_DUMMY
-#define LOCK_prefix ""
-#else
-#define LOCK_prefix "lock"
-#endif
-
-#ifdef __x86_64__
-# ifdef MY_ATOMIC_NO_XADD
-# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix "-no-xadd"
-# else
-# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix
-# endif
-#else
-# ifdef MY_ATOMIC_NO_XADD
-# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix "-no-xadd"
-# else
-# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix
-# endif
-#endif
-
-/* fix -ansi errors while maintaining readability */
-#ifndef asm
-#define asm __asm__
-#endif
-
-#ifndef MY_ATOMIC_NO_XADD
-#define make_atomic_add_body(S) make_atomic_add_body ## S
-#define make_atomic_cas_body(S) make_atomic_cas_body ## S
-#endif
-
-#define make_atomic_add_body32 \
- asm volatile (LOCK_prefix "; xadd %0, %1;" \
- : "+r" (v), "=m" (*a) \
- : "m" (*a) \
- : "memory")
-
-#define make_atomic_cas_body32 \
- __typeof__(*cmp) sav; \
- asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \
- : "=m" (*a), "=a" (sav), "=q" (ret) \
- : "r" (set), "m" (*a), "a" (*cmp) \
- : "memory"); \
- if (!ret) \
- *cmp= sav
-
-#ifdef __x86_64__
-#define make_atomic_add_body64 make_atomic_add_body32
-#define make_atomic_cas_body64 make_atomic_cas_body32
-
-#define make_atomic_fas_body(S) \
- asm volatile ("xchg %0, %1;" \
- : "+r" (v), "=m" (*a) \
- : "m" (*a) \
- : "memory")
-
-/*
- Actually 32/64-bit reads/writes are always atomic on x86_64,
- nonetheless issue memory barriers as appropriate.
-*/
-#define make_atomic_load_body(S) \
- /* Serialize prior load and store operations. */ \
- asm volatile ("mfence" ::: "memory"); \
- ret= *a; \
- /* Prevent compiler from reordering instructions. */ \
- asm volatile ("" ::: "memory")
-#define make_atomic_store_body(S) \
- asm volatile ("; xchg %0, %1;" \
- : "=m" (*a), "+r" (v) \
- : "m" (*a) \
- : "memory")
-
-#else
-/*
- Use default implementations of 64-bit operations since we solved
- the 64-bit problem on 32-bit platforms for CAS, no need to solve it
- once more for ADD, LOAD, STORE and FAS as well.
- Since we already added add32 support, we need to define add64
- here, but we haven't defined fas, load and store at all, so
- we can fallback on default implementations.
-*/
-#define make_atomic_add_body64 \
- int64 tmp=*a; \
- while (!my_atomic_cas64(a, &tmp, tmp+v)) ; \
- v=tmp;
-
-/*
- On some platforms (e.g. Mac OS X and Solaris) the ebx register
- is held as a pointer to the global offset table. Thus we're not
- allowed to use the b-register on those platforms when compiling
- PIC code, to avoid this we push ebx and pop ebx. The new value
- is copied directly from memory to avoid problems with a implicit
- manipulation of the stack pointer by the push.
-
- cmpxchg8b works on both 32-bit platforms and 64-bit platforms but
- the code here is only used on 32-bit platforms, on 64-bit
- platforms the much simpler make_atomic_cas_body32 will work
- fine.
-*/
-#define make_atomic_cas_body64 \
- asm volatile ("push %%ebx;" \
- "movl (%%ecx), %%ebx;" \
- "movl 4(%%ecx), %%ecx;" \
- LOCK_prefix "; cmpxchg8b (%%esi);" \
- "setz %2; pop %%ebx" \
- : "+S" (a), "+A" (*cmp), "=c" (ret) \
- : "c" (&set) \
- : "memory", "esp")
-#endif
-
-/*
- The implementation of make_atomic_cas_body32 is adaptable to
- the OS word size, so on 64-bit platforms it will automatically
- adapt to 64-bits and so it will work also on 64-bit platforms
-*/
-#define make_atomic_cas_bodyptr make_atomic_cas_body32
-
-#ifdef MY_ATOMIC_MODE_DUMMY
-#define make_atomic_load_body(S) ret=*a
-#define make_atomic_store_body(S) *a=v
-#endif
-#endif /* ATOMIC_X86_GCC_INCLUDED */