1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
|
/* Copyright (C) 2006-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef _atomic_h_cleanup_
#define _atomic_h_cleanup_ "atomic/generic-msvc.h"
/*
We don't implement anything specific for MY_ATOMIC_MODE_DUMMY, always use
intrinsics.
8 and 16-bit atomics are not implemented, but it can be done if necessary.
*/
/*
x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate
function calls to kernel32 instead, even in the optimized build.
We force intrinsics as described in MSDN documentation for
_InterlockedCompareExchange.
*/
#ifdef _M_IX86
#if (_MSC_VER >= 1400)
#include <intrin.h>
#else
/*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/
LONG _InterlockedExchange (LONG volatile *Target,LONG Value);
LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp);
LONG _InterlockedExchangeAdd (LONG volatile *Addend, LONG Value);
#pragma intrinsic(_InterlockedExchangeAdd)
#pragma intrinsic(_InterlockedCompareExchange)
#pragma intrinsic(_InterlockedExchange)
#endif
#define InterlockedExchange _InterlockedExchange
#define InterlockedExchangeAdd _InterlockedExchangeAdd
#define InterlockedCompareExchange _InterlockedCompareExchange
/*
No need to do something special for InterlockedCompareExchangePointer
as it is a #define to InterlockedCompareExchange. The same applies to
InterlockedExchangePointer.
*/
#endif /*_M_IX86*/
#define MY_ATOMIC_MODE "msvc-intrinsics"
#define IL_EXCHG_ADD32(X,Y) InterlockedExchangeAdd((volatile LONG *)(X),(Y))
#define IL_COMP_EXCHG32(X,Y,Z) InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z))
#define IL_COMP_EXCHGptr InterlockedCompareExchangePointer
#define IL_EXCHG32(X,Y) InterlockedExchange((volatile LONG *)(X),(Y))
#define IL_EXCHGptr InterlockedExchangePointer
#define make_atomic_add_body(S) \
v= IL_EXCHG_ADD ## S (a, v)
#define make_atomic_cas_body(S) \
int ## S initial_cmp= *cmp; \
int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \
if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a;
#define make_atomic_swap_body(S) \
v= IL_EXCHG ## S (a, v)
/*
my_yield_processor (equivalent of x86 PAUSE instruction) should be used
to improve performance on hyperthreaded CPUs. Intel recommends to use it in
spin loops also on non-HT machines to reduce power consumption (see e.g
http://softwarecommunity.intel.com/articles/eng/2004.htm)
Running benchmarks for spinlocks implemented with InterlockedCompareExchange
and YieldProcessor shows that much better performance is achieved by calling
YieldProcessor in a loop - that is, yielding longer. On Intel boxes setting
loop count in the range 200-300 brought best results.
*/
#ifndef YIELD_LOOPS
#define YIELD_LOOPS 200
#endif
static __inline int my_yield_processor()
{
int i;
for(i=0; i<YIELD_LOOPS; i++)
{
#if (_MSC_VER <= 1310)
/* On older compilers YieldProcessor is not available, use inline assembly*/
__asm { rep nop }
#else
YieldProcessor();
#endif
}
return 1;
}
#define LF_BACKOFF my_yield_processor()
#else /* cleanup */
#undef IL_EXCHG_ADD32
#undef IL_COMP_EXCHG32
#undef IL_COMP_EXCHGptr
#undef IL_EXCHG32
#undef IL_EXCHGptr
#endif
|