summaryrefslogtreecommitdiff
path: root/compiler/GHC/CmmToAsm/X86/CodeGen.hs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/GHC/CmmToAsm/X86/CodeGen.hs')
-rw-r--r--compiler/GHC/CmmToAsm/X86/CodeGen.hs7
1 files changed, 5 insertions, 2 deletions
diff --git a/compiler/GHC/CmmToAsm/X86/CodeGen.hs b/compiler/GHC/CmmToAsm/X86/CodeGen.hs
index 5e7c261cbb..1ab24c4a25 100644
--- a/compiler/GHC/CmmToAsm/X86/CodeGen.hs
+++ b/compiler/GHC/CmmToAsm/X86/CodeGen.hs
@@ -2595,10 +2595,11 @@ genCCall' _ _ (PrimTarget (MO_AtomicWrite width)) [] [addr, val] _ = do
code <- assignMem_IntCode (intFormat width) addr val
return $ code `snocOL` MFENCE
-genCCall' _ is32Bit (PrimTarget (MO_Cmpxchg width)) [dst] [addr, old, new] _ = do
+genCCall' _ is32Bit (PrimTarget (MO_Cmpxchg width)) [dst] [addr, old, new] _
-- On x86 we don't have enough registers to use cmpxchg with a
-- complicated addressing mode, so on that architecture we
-- pre-compute the address first.
+ | not (is32Bit && width == W64) = do
Amode amode addr_code <- getSimpleAmode is32Bit addr
newval <- getNewRegNat format
newval_code <- getAnyReg new
@@ -3441,7 +3442,9 @@ outOfLineCmmOp bid mop res args
MO_AtomicRMW _ _ -> fsLit "atomicrmw"
MO_AtomicRead _ -> fsLit "atomicread"
MO_AtomicWrite _ -> fsLit "atomicwrite"
- MO_Cmpxchg _ -> fsLit "cmpxchg"
+ MO_Cmpxchg w -> cmpxchgLabel w -- for W64 on 32-bit
+ -- TODO: implement
+ -- cmpxchg8b instr
MO_Xchg _ -> should_be_inline
MO_UF_Conv _ -> unsupported