diff options
author | Simon Marlow <marlowsd@gmail.com> | 2013-12-13 20:06:40 +0000 |
---|---|---|
committer | Simon Marlow <marlowsd@gmail.com> | 2014-12-15 22:40:04 +0000 |
commit | 192128da38b0aa9b23cd2df45c92eb20529f1c75 (patch) | |
tree | 943f80ccc33efaac3d01d2d6e543921c057a60b0 /includes | |
parent | a30dbc64e21c09008e8500f6ff41d76a52e57d46 (diff) | |
download | haskell-192128da38b0aa9b23cd2df45c92eb20529f1c75.tar.gz |
comments
Diffstat (limited to 'includes')
-rw-r--r-- | includes/stg/SMP.h | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/includes/stg/SMP.h b/includes/stg/SMP.h index f6fd394087..5ded05d05e 100644 --- a/includes/stg/SMP.h +++ b/includes/stg/SMP.h @@ -312,12 +312,22 @@ atomic_dec(StgVolatilePtr p) #endif } +/* + * Some architectures have a way to tell the CPU that we're in a + * busy-wait loop, and the processor should look for something else to + * do (such as run another hardware thread). + */ EXTERN_INLINE void busy_wait_nop(void) { #if defined(i386_HOST_ARCH) || defined(x86_64_HOST_ARCH) + // On Intel, the busy-wait-nop instruction is called "pause", + // which is actually represented as a nop with the rep prefix. + // On processors before the P4 this behaves as a nop; on P4 and + // later it might do something clever like yield to another + // hyperthread. In any case, Intel recommends putting one + // of these in a spin lock loop. __asm__ __volatile__ ("rep; nop"); - // #else // nothing #endif |