summaryrefslogtreecommitdiff
path: root/include/atomic/generic-msvc.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/atomic/generic-msvc.h')
-rw-r--r--include/atomic/generic-msvc.h206
1 files changed, 122 insertions, 84 deletions
diff --git a/include/atomic/generic-msvc.h b/include/atomic/generic-msvc.h
index d06229ce5ef..56fa4f66fcd 100644
--- a/include/atomic/generic-msvc.h
+++ b/include/atomic/generic-msvc.h
@@ -1,5 +1,7 @@
-/* Copyright (c) 2006-2008 MySQL AB, 2009 Sun Microsystems, Inc.
- Use is subject to license terms.
+#ifndef ATOMIC_MSC_INCLUDED
+#define ATOMIC_MSC_INCLUDED
+
+/* Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -14,79 +16,132 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#ifndef _atomic_h_cleanup_
-#define _atomic_h_cleanup_ "atomic/generic-msvc.h"
-
#include <windows.h>
+
+static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set)
+{
+ int32 initial_cmp= *cmp;
+ int32 initial_a= InterlockedCompareExchange((volatile LONG*)a,
+ set, initial_cmp);
+ int ret= (initial_a == initial_cmp);
+ if (!ret)
+ *cmp= initial_a;
+ return ret;
+}
+
+static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set)
+{
+ int64 initial_cmp= *cmp;
+ int64 initial_a= InterlockedCompareExchange64((volatile LONGLONG*)a,
+ (LONGLONG)set,
+ (LONGLONG)initial_cmp);
+ int ret= (initial_a == initial_cmp);
+ if (!ret)
+ *cmp= initial_a;
+ return ret;
+}
+
+static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set)
+{
+ void *initial_cmp= *cmp;
+ void *initial_a= InterlockedCompareExchangePointer(a, set, initial_cmp);
+ int ret= (initial_a == initial_cmp);
+ if (!ret)
+ *cmp= initial_a;
+ return ret;
+}
+
+static inline int32 my_atomic_add32(int32 volatile *a, int32 v)
+{
+ return (int32)InterlockedExchangeAdd((volatile LONG*)a, v);
+}
+
+static inline int64 my_atomic_add64(int64 volatile *a, int64 v)
+{
+ return (int64)InterlockedExchangeAdd64((volatile LONGLONG*)a, (LONGLONG)v);
+}
+
+
/*
- x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate
- function calls to kernel32 instead, even in the optimized build.
- We force intrinsics as described in MSDN documentation for
- _InterlockedCompareExchange.
+ According to MSDN:
+
+ Simple reads and writes to properly-aligned 32-bit variables are atomic
+ operations.
+ ...
+ Simple reads and writes to properly aligned 64-bit variables are atomic on
+ 64-bit Windows. Reads and writes to 64-bit values are not guaranteed to be
+ atomic on 32-bit Windows.
+
+ https://msdn.microsoft.com/en-us/library/windows/desktop/ms684122(v=vs.85).aspx
*/
-#ifdef _M_IX86
-#if (_MSC_VER >= 1500)
-#include <intrin.h>
+static inline int32 my_atomic_load32(int32 volatile *a)
+{
+ int32 value= *a;
+ MemoryBarrier();
+ return value;
+}
+
+static inline int64 my_atomic_load64(int64 volatile *a)
+{
+#ifdef _M_X64
+ int64 value= *a;
+ MemoryBarrier();
+ return value;
#else
-C_MODE_START
-/*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/
-LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp);
-LONGLONG _InterlockedCompareExchange64 (LONGLONG volatile *Target,
- LONGLONG Value, LONGLONG Comp);
-C_MODE_END
-
-#pragma intrinsic(_InterlockedCompareExchange)
-#pragma intrinsic(_InterlockedCompareExchange64)
+ return (int64) InterlockedCompareExchange64((volatile LONGLONG *) a, 0, 0);
#endif
+}
-#define InterlockedCompareExchange _InterlockedCompareExchange
-#define InterlockedCompareExchange64 _InterlockedCompareExchange64
-/*
- No need to do something special for InterlockedCompareExchangePointer
- as it is a #define to InterlockedCompareExchange. The same applies to
- InterlockedExchangePointer.
-*/
-#endif /*_M_IX86*/
-
-#define MY_ATOMIC_MODE "msvc-intrinsics"
-/* Implement using CAS on WIN32 */
-#define IL_COMP_EXCHG32(X,Y,Z) \
- InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z))
-#define IL_COMP_EXCHG64(X,Y,Z) \
- InterlockedCompareExchange64((volatile LONGLONG *)(X), \
- (LONGLONG)(Y),(LONGLONG)(Z))
-#define IL_COMP_EXCHGptr InterlockedCompareExchangePointer
-
-#define make_atomic_cas_body(S) \
- int ## S initial_cmp= *cmp; \
- int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \
- if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a;
-
-#ifndef _M_IX86
-/* Use full set of optimised functions on WIN64 */
-#define IL_EXCHG_ADD32(X,Y) \
- InterlockedExchangeAdd((volatile LONG *)(X),(Y))
-#define IL_EXCHG_ADD64(X,Y) \
- InterlockedExchangeAdd64((volatile LONGLONG *)(X),(LONGLONG)(Y))
-#define IL_EXCHG32(X,Y) \
- InterlockedExchange((volatile LONG *)(X),(Y))
-#define IL_EXCHG64(X,Y) \
- InterlockedExchange64((volatile LONGLONG *)(X),(LONGLONG)(Y))
-#define IL_EXCHGptr InterlockedExchangePointer
-
-#define make_atomic_add_body(S) \
- v= IL_EXCHG_ADD ## S (a, v)
-#define make_atomic_swap_body(S) \
- v= IL_EXCHG ## S (a, v)
-#define make_atomic_load_body(S) \
- ret= 0; /* avoid compiler warning */ \
- ret= IL_COMP_EXCHG ## S (a, ret, ret);
+static inline void* my_atomic_loadptr(void * volatile *a)
+{
+ void *value= *a;
+ MemoryBarrier();
+ return value;
+}
+
+static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
+{
+ return (int32)InterlockedExchange((volatile LONG*)a, v);
+}
+
+static inline int64 my_atomic_fas64(int64 volatile *a, int64 v)
+{
+ return (int64)InterlockedExchange64((volatile LONGLONG*)a, v);
+}
+
+static inline void * my_atomic_fasptr(void * volatile *a, void * v)
+{
+ return InterlockedExchangePointer(a, v);
+}
+
+static inline void my_atomic_store32(int32 volatile *a, int32 v)
+{
+ MemoryBarrier();
+ *a= v;
+}
+
+static inline void my_atomic_store64(int64 volatile *a, int64 v)
+{
+#ifdef _M_X64
+ MemoryBarrier();
+ *a= v;
+#else
+ (void) InterlockedExchange64((volatile LONGLONG *) a, v);
#endif
+}
+
+static inline void my_atomic_storeptr(void * volatile *a, void *v)
+{
+ MemoryBarrier();
+ *a= v;
+}
+
+
/*
my_yield_processor (equivalent of x86 PAUSE instruction) should be used
to improve performance on hyperthreaded CPUs. Intel recommends to use it in
- spin loops also on non-HT machines to reduce power consumption (see e.g
+ spin loops also on non-HT machines to reduce power consumption (see e.g
http://softwarecommunity.intel.com/articles/eng/2004.htm)
Running benchmarks for spinlocks implemented with InterlockedCompareExchange
@@ -94,35 +149,18 @@ C_MODE_END
YieldProcessor in a loop - that is, yielding longer. On Intel boxes setting
loop count in the range 200-300 brought best results.
*/
-#ifndef YIELD_LOOPS
#define YIELD_LOOPS 200
-#endif
-static __inline int my_yield_processor()
+static inline int my_yield_processor()
{
int i;
- for(i=0; i<YIELD_LOOPS; i++)
+ for (i=0; i<YIELD_LOOPS; i++)
{
-#if (_MSC_VER <= 1310)
- /* On older compilers YieldProcessor is not available, use inline assembly*/
- __asm { rep nop }
-#else
YieldProcessor();
-#endif
}
return 1;
}
#define LF_BACKOFF my_yield_processor()
-#else /* cleanup */
-
-#undef IL_EXCHG_ADD32
-#undef IL_EXCHG_ADD64
-#undef IL_COMP_EXCHG32
-#undef IL_COMP_EXCHG64
-#undef IL_COMP_EXCHGptr
-#undef IL_EXCHG32
-#undef IL_EXCHG64
-#undef IL_EXCHGptr
-#endif
+#endif /* ATOMIC_MSC_INCLUDED */