blob: b362d8983877f37ac7c9e52fa15e2be13a8f1288 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
|
/* ----------------------------------------------------------------------------
*
* (c) The GHC Team, 2006-2009
*
* Spin locks
*
* These are simple spin-only locks as opposed to Mutexes which
* probably spin for a while before blocking in the kernel. We use
* these when we are sure that all our threads are actively running on
* a CPU, eg. in the GC.
*
* TODO: measure whether we really need these, or whether Mutexes
* would do (and be a bit safer if a CPU becomes loaded).
*
* Do not #include this file directly: #include "Rts.h" instead.
*
* To understand the structure of the RTS headers, see the wiki:
* https://gitlab.haskell.org/ghc/ghc/wikis/commentary/source-tree/includes
*
* -------------------------------------------------------------------------- */
#include "PosixSource.h"
#include "Rts.h"
#if defined(THREADED_RTS)
#if defined(PROF_SPIN)
ATTR_ALWAYS_INLINE static inline bool try_acquire_spin_slow_path(SpinLock * p)
{
StgWord r;
r = cas((StgVolatilePtr)&(p->lock), 1, 0);
if (r == 0) RELAXED_ADD(&p->spin, 1);
return r != 0;
}
#else /* !PROF_SPIN */
ATTR_ALWAYS_INLINE static inline bool try_acquire_spin_slow_path(SpinLock * p)
{
StgWord r;
// Note
//
// Here we first check if we can obtain the lock without trying to cas.
// The cas instruction will add extra inter-CPU traffic on most CPU
// architectures as it has to invalidate cache lines. Rather than adding
// this traffic in the spin loop, we rather restrict it to times when the
// lock might be available.
//
// We do not need to do this when PROF_SPIN is enabled, since we write to
// the lock in both cases (acquired/not acquired).
r = RELAXED_LOAD(&p->lock);
if (r != 0) {
r = cas((StgVolatilePtr)&(p->lock), 1, 0);
}
return r != 0;
}
#endif
void acquire_spin_lock_slow_path(SpinLock * p)
{
do {
for (uint32_t i = 0; i < SPIN_COUNT; i++) {
if (try_acquire_spin_slow_path(p)) return;
busy_wait_nop();
}
IF_PROF_SPIN(RELAXED_ADD(&p->yield, 1));
yieldThread();
} while (1);
}
#endif
|