summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Terei <davidterei@gmail.com>2012-01-03 18:07:05 +1100
committerDavid Terei <davidterei@gmail.com>2012-01-09 17:00:55 -0800
commit4384e146640230399b38cd62e8e5df391f72c3a7 (patch)
treeb619fb40002d7e66d9ba38c7232111e66872abbf
parent43178674471928560dc645983ce6f185b20a5a26 (diff)
downloadhaskell-4384e146640230399b38cd62e8e5df391f72c3a7.tar.gz
Track STG live register information for use in LLVM
We now carry around with CmmJump statements a list of the STG registers that are live at that jump site. This is used by the LLVM backend so it can avoid unnesecarily passing around dead registers, improving perfromance. This gives us the framework to finally fix trac #4308.
-rw-r--r--compiler/cmm/CmmCvt.hs4
-rw-r--r--compiler/cmm/CmmLint.hs2
-rw-r--r--compiler/cmm/CmmOpt.hs6
-rw-r--r--compiler/cmm/CmmParse.y8
-rw-r--r--compiler/cmm/OldCmm.hs54
-rw-r--r--compiler/cmm/OldPprCmm.hs28
-rw-r--r--compiler/cmm/PprC.hs4
-rw-r--r--compiler/codeGen/CgClosure.lhs10
-rw-r--r--compiler/codeGen/CgCon.lhs13
-rw-r--r--compiler/codeGen/CgExpr.lhs8
-rw-r--r--compiler/codeGen/CgHeapery.lhs90
-rw-r--r--compiler/codeGen/CgInfoTbls.hs8
-rw-r--r--compiler/codeGen/CgMonad.lhs2
-rw-r--r--compiler/codeGen/CgTailCall.lhs88
-rw-r--r--compiler/codeGen/CgUtils.hs2
-rw-r--r--compiler/llvmGen/LlvmCodeGen/CodeGen.hs32
-rw-r--r--compiler/llvmGen/LlvmCodeGen/Regs.hs8
-rw-r--r--compiler/nativeGen/AsmCodeGen.lhs4
-rw-r--r--compiler/nativeGen/PPC/CodeGen.hs2
-rw-r--r--compiler/nativeGen/SPARC/CodeGen.hs2
-rw-r--r--compiler/nativeGen/X86/CodeGen.hs2
21 files changed, 221 insertions, 156 deletions
diff --git a/compiler/cmm/CmmCvt.hs b/compiler/cmm/CmmCvt.hs
index 42aaabc305..1c09599156 100644
--- a/compiler/cmm/CmmCvt.hs
+++ b/compiler/cmm/CmmCvt.hs
@@ -105,8 +105,10 @@ ofZgraph g = Old.ListGraph $ mapMaybe convert_block $ postorderDfs g
, Just expr' <- maybeInvertCmmExpr expr -> Old.CmmCondBranch expr' fid : tail_of tid
| otherwise -> [Old.CmmCondBranch expr tid, Old.CmmBranch fid]
CmmSwitch arg ids -> [Old.CmmSwitch arg ids]
- CmmCall e _ _ _ _ -> [Old.CmmJump e]
+ -- ToDo: STG Live
+ CmmCall e _ _ _ _ -> [Old.CmmJump e Nothing]
CmmForeignCall {} -> panic "ofZgraph: CmmForeignCall"
tail_of bid = case foldBlockNodesB3 (first, middle, last) block () of
Old.BasicBlock _ stmts -> stmts
where Just block = mapLookup bid $ toBlockMap g
+
diff --git a/compiler/cmm/CmmLint.hs b/compiler/cmm/CmmLint.hs
index a99e5a50a8..bed3b18b8e 100644
--- a/compiler/cmm/CmmLint.hs
+++ b/compiler/cmm/CmmLint.hs
@@ -143,7 +143,7 @@ lintCmmStmt platform labels = lint
then return ()
else cmmLintErr (text "switch scrutinee is not a word: " <> pprPlatform platform e <>
text " :: " <> ppr erep)
- lint (CmmJump e) = lintCmmExpr platform e >> return ()
+ lint (CmmJump e _) = lintCmmExpr platform e >> return ()
lint (CmmReturn) = return ()
lint (CmmBranch id) = checkTarget id
checkTarget id = if setMember id labels then return ()
diff --git a/compiler/cmm/CmmOpt.hs b/compiler/cmm/CmmOpt.hs
index 84f106980e..ae715a9eb7 100644
--- a/compiler/cmm/CmmOpt.hs
+++ b/compiler/cmm/CmmOpt.hs
@@ -65,7 +65,7 @@ cmmEliminateDeadBlocks blocks@(BasicBlock base_id _:_) =
stmt m (CmmBranch b) = b:m
stmt m (CmmCondBranch e b) = b:(expr m e)
stmt m (CmmSwitch e bs) = catMaybes bs ++ expr m e
- stmt m (CmmJump e) = expr m e
+ stmt m (CmmJump e _) = expr m e
stmt m (CmmReturn) = m
actuals m as = foldl' (\m h -> expr m (hintlessCmm h)) m as
-- We have to do a deep fold into CmmExpr because
@@ -273,7 +273,7 @@ inlineStmt u a (CmmCall target regs es ret)
es' = [ (CmmHinted (inlineExpr u a e) hint) | (CmmHinted e hint) <- es ]
inlineStmt u a (CmmCondBranch e d) = CmmCondBranch (inlineExpr u a e) d
inlineStmt u a (CmmSwitch e d) = CmmSwitch (inlineExpr u a e) d
-inlineStmt u a (CmmJump e) = CmmJump (inlineExpr u a e)
+inlineStmt u a (CmmJump e live) = CmmJump (inlineExpr u a e) live
inlineStmt _ _ other_stmt = other_stmt
inlineExpr :: Unique -> CmmExpr -> CmmExpr -> CmmExpr
@@ -669,7 +669,7 @@ cmmLoopifyForC (CmmProc (Just info@(Statics info_lbl _)) entry_lbl
where blocks' = [ BasicBlock id (map do_stmt stmts)
| BasicBlock id stmts <- blocks ]
- do_stmt (CmmJump (CmmLit (CmmLabel lbl))) | lbl == jump_lbl
+ do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
= CmmBranch top_id
do_stmt stmt = stmt
diff --git a/compiler/cmm/CmmParse.y b/compiler/cmm/CmmParse.y
index f20a05f40f..029c3323db 100644
--- a/compiler/cmm/CmmParse.y
+++ b/compiler/cmm/CmmParse.y
@@ -411,8 +411,8 @@ stmt :: { ExtCode }
{ do as <- sequence $5; doSwitch $2 $3 as $6 }
| 'goto' NAME ';'
{ do l <- lookupLabel $2; stmtEC (CmmBranch l) }
- | 'jump' expr ';'
- { do e <- $2; stmtEC (CmmJump e) }
+ | 'jump' expr vols ';'
+ { do e <- $2; stmtEC (CmmJump e $3) }
| 'return' ';'
{ stmtEC CmmReturn }
| 'if' bool_expr 'goto' NAME
@@ -940,12 +940,12 @@ doStore rep addr_code val_code
emitRetUT :: [(CgRep,CmmExpr)] -> Code
emitRetUT args = do
tickyUnboxedTupleReturn (length args) -- TICK
- (sp, stmts) <- pushUnboxedTuple 0 args
+ (sp, stmts, live) <- pushUnboxedTuple 0 args
emitSimultaneously stmts -- NB. the args might overlap with the stack slots
-- or regs that we assign to, so better use
-- simultaneous assignments here (#3546)
when (sp /= 0) $ stmtC (CmmAssign spReg (cmmRegOffW spReg (-sp)))
- stmtC $ CmmJump (entryCode (CmmLoad (cmmRegOffW spReg sp) bWord))
+ stmtC $ CmmJump (entryCode (CmmLoad (cmmRegOffW spReg sp) bWord)) (Just live)
-- -----------------------------------------------------------------------------
-- If-then-else and boolean expressions
diff --git a/compiler/cmm/OldCmm.hs b/compiler/cmm/OldCmm.hs
index 98e6db627f..7b5917d3bf 100644
--- a/compiler/cmm/OldCmm.hs
+++ b/compiler/cmm/OldCmm.hs
@@ -146,32 +146,46 @@ data CmmStmt
= CmmNop
| CmmComment FastString
- | CmmAssign CmmReg CmmExpr -- Assign to register
+ | CmmAssign CmmReg CmmExpr -- Assign to register
- | CmmStore CmmExpr CmmExpr -- Assign to memory location. Size is
- -- given by cmmExprType of the rhs.
+ | CmmStore CmmExpr CmmExpr -- Assign to memory location. Size is
+ -- given by cmmExprType of the rhs.
- | CmmCall -- A call (foreign, native or primitive), with
- CmmCallTarget
- [HintedCmmFormal] -- zero or more results
- [HintedCmmActual] -- zero or more arguments
- CmmReturnInfo
- -- Some care is necessary when handling the arguments of these, see
- -- [Register parameter passing] and the hack in cmm/CmmOpt.hs
+ | CmmCall -- A call (foreign, native or primitive), with
+ CmmCallTarget
+ [HintedCmmFormal] -- zero or more results
+ [HintedCmmActual] -- zero or more arguments
+ CmmReturnInfo
+ -- Some care is necessary when handling the arguments of these, see
+ -- [Register parameter passing] and the hack in cmm/CmmOpt.hs
| CmmBranch BlockId -- branch to another BB in this fn
| CmmCondBranch CmmExpr BlockId -- conditional branch
- | CmmSwitch CmmExpr [Maybe BlockId] -- Table branch
- -- The scrutinee is zero-based;
- -- zero -> first block
- -- one -> second block etc
- -- Undefined outside range, and when there's a Nothing
-
- | CmmJump CmmExpr -- Jump to another C-- function,
-
- | CmmReturn -- Return from a native C-- function,
+ | CmmSwitch -- Table branch
+ CmmExpr -- The scrutinee is zero-based;
+ [Maybe BlockId] -- zero -> first block
+ -- one -> second block etc
+ -- Undefined outside range, and when
+ -- there's a Nothing
+
+ | CmmJump -- Jump to another C-- function,
+ CmmExpr -- Target
+ (Maybe [GlobalReg]) -- Live registers at call site;
+ -- Nothing -> no information, assume
+ -- all live
+ -- Just .. -> info on liveness, []
+ -- means no live registers
+ -- This isn't all 'live' registers, just
+ -- the argument STG registers that are live
+ -- AND also possibly mapped to machine
+ -- registers. (So Sp, Hp, HpLim... ect
+ -- are never included here as they are
+ -- always live, only R2.., D1.. are
+ -- on this list)
+
+ | CmmReturn -- Return from a native C-- function,
data CmmHinted a
= CmmHinted {
@@ -201,7 +215,7 @@ instance UserOfLocalRegs CmmStmt where
stmt (CmmBranch _) = id
stmt (CmmCondBranch e _) = gen e
stmt (CmmSwitch e _) = gen e
- stmt (CmmJump e) = gen e
+ stmt (CmmJump e _) = gen e
stmt (CmmReturn) = id
gen :: UserOfLocalRegs a => a -> b -> b
diff --git a/compiler/cmm/OldPprCmm.hs b/compiler/cmm/OldPprCmm.hs
index 44692d45ac..4b1da0b242 100644
--- a/compiler/cmm/OldPprCmm.hs
+++ b/compiler/cmm/OldPprCmm.hs
@@ -32,12 +32,11 @@
-- A useful example pass over Cmm is in nativeGen/MachCodeGen.hs
--
-module OldPprCmm
- ( pprStmt
- , module PprCmmDecl
- , module PprCmmExpr
- )
-where
+module OldPprCmm (
+ pprStmt,
+ module PprCmmDecl,
+ module PprCmmExpr
+ ) where
import BlockId
import CLabel
@@ -46,7 +45,6 @@ import OldCmm
import PprCmmDecl
import PprCmmExpr
-
import BasicTypes
import ForeignCall
import Outputable
@@ -109,7 +107,7 @@ pprStmt platform stmt = case stmt of
-- ;
CmmNop -> semi
- -- // text
+ -- // text
CmmComment s -> text "//" <+> ftext s
-- reg = expr;
@@ -153,7 +151,7 @@ pprStmt platform stmt = case stmt of
CmmBranch ident -> genBranch ident
CmmCondBranch expr ident -> genCondBranch platform expr ident
- CmmJump expr -> genJump platform expr
+ CmmJump expr live -> genJump platform expr live
CmmReturn -> genReturn platform
CmmSwitch arg ids -> genSwitch platform arg ids
@@ -176,7 +174,6 @@ pprUpdateFrame platform (UpdateFrame expr args) =
, space
, parens ( commafy $ map (pprPlatform platform) args ) ]
-
-- --------------------------------------------------------------------------
-- goto local label. [1], section 6.6
--
@@ -203,17 +200,17 @@ genCondBranch platform expr ident =
--
-- jump foo(a, b, c);
--
-genJump :: Platform -> CmmExpr -> SDoc
-genJump platform expr =
+genJump :: Platform -> CmmExpr -> Maybe [GlobalReg] -> SDoc
+genJump platform expr live =
hcat [ ptext (sLit "jump")
, space
, if isTrivialCmmExpr expr
then pprExpr platform expr
else case expr of
CmmLoad (CmmReg _) _ -> pprExpr platform expr
- _ -> parens (pprExpr platform expr)
- , semi ]
-
+ _ -> parens (pprExpr platform expr)
+ , semi <+> ptext (sLit "// ")
+ , maybe empty ppr live]
-- --------------------------------------------------------------------------
-- Return from a function. [1], Section 6.8.2 of version 1.128
@@ -264,3 +261,4 @@ genSwitch platform expr maybe_ids
commafy :: [SDoc] -> SDoc
commafy xs = fsep $ punctuate comma xs
+
diff --git a/compiler/cmm/PprC.hs b/compiler/cmm/PprC.hs
index 330d09082b..658e3ca5d8 100644
--- a/compiler/cmm/PprC.hs
+++ b/compiler/cmm/PprC.hs
@@ -248,7 +248,7 @@ pprStmt platform stmt = case stmt of
CmmBranch ident -> pprBranch ident
CmmCondBranch expr ident -> pprCondBranch platform expr ident
- CmmJump lbl -> mkJMP_(pprExpr platform lbl) <> semi
+ CmmJump lbl _ -> mkJMP_(pprExpr platform lbl) <> semi
CmmSwitch arg ids -> pprSwitch platform arg ids
pprCFunType :: SDoc -> CCallConv -> [HintedCmmFormal] -> [HintedCmmActual] -> SDoc
@@ -930,7 +930,7 @@ te_Stmt (CmmCall _ rs es _) = mapM_ (te_temp.hintlessCmm) rs >>
mapM_ (te_Expr.hintlessCmm) es
te_Stmt (CmmCondBranch e _) = te_Expr e
te_Stmt (CmmSwitch e _) = te_Expr e
-te_Stmt (CmmJump e) = te_Expr e
+te_Stmt (CmmJump e _) = te_Expr e
te_Stmt _ = return ()
te_Expr :: CmmExpr -> TE ()
diff --git a/compiler/codeGen/CgClosure.lhs b/compiler/codeGen/CgClosure.lhs
index 8e599c3fb5..d6537c27e5 100644
--- a/compiler/codeGen/CgClosure.lhs
+++ b/compiler/codeGen/CgClosure.lhs
@@ -362,6 +362,7 @@ mkSlowEntryCode cl_info reg_args
= mapAccumL (\off (rep,_) -> (off + cgRepSizeW rep, off))
0 reps_w_regs
+
load_assts = zipWithEqual "mk_load" mk_load reps_w_regs stk_offsets
mk_load (rep,reg) offset = CmmAssign (CmmGlobal reg)
(CmmLoad (cmmRegOffW spReg offset)
@@ -374,7 +375,8 @@ mkSlowEntryCode cl_info reg_args
stk_adj_pop = CmmAssign spReg (cmmRegOffW spReg final_stk_offset)
stk_adj_push = CmmAssign spReg (cmmRegOffW spReg (- final_stk_offset))
- jump_to_entry = CmmJump (mkLblExpr (entryLabelFromCI cl_info))
+ live_regs = Just $ map snd reps_w_regs
+ jump_to_entry = CmmJump (mkLblExpr (entryLabelFromCI cl_info)) live_regs
\end{code}
@@ -412,6 +414,7 @@ funWrapper :: ClosureInfo -- Closure whose code body this is
-> Code
funWrapper closure_info arg_regs reg_save_code fun_body = do
{ let node_points = nodeMustPointToIt (closureLFInfo closure_info)
+ live = Just $ map snd arg_regs
{-
-- Debugging: check that R1 has the correct tag
@@ -431,8 +434,7 @@ funWrapper closure_info arg_regs reg_save_code fun_body = do
; granYield arg_regs node_points
-- Heap and/or stack checks wrap the function body
- ; funEntryChecks closure_info reg_save_code
- fun_body
+ ; funEntryChecks closure_info reg_save_code live fun_body
}
\end{code}
@@ -590,7 +592,7 @@ link_caf cl_info _is_upd = do
-- assuming lots of things, like the stack pointer hasn't
-- moved since we entered the CAF.
let target = entryCode (closureInfoPtr (CmmReg nodeReg)) in
- stmtC (CmmJump target)
+ stmtC (CmmJump target $ Just [node])
; returnFC hp_rel }
where
diff --git a/compiler/codeGen/CgCon.lhs b/compiler/codeGen/CgCon.lhs
index 99690945cb..9049504dca 100644
--- a/compiler/codeGen/CgCon.lhs
+++ b/compiler/codeGen/CgCon.lhs
@@ -116,7 +116,7 @@ buildDynCon :: Id -- Name of the thing to which this constr will
-> CostCentreStack -- Where to grab cost centre from;
-- current CCS if currentOrSubsumedCCS
-> DataCon -- The data constructor
- -> [(CgRep,CmmExpr)] -- Its args
+ -> [(CgRep,CmmExpr)] -- Its args
-> FCode CgIdInfo -- Return details about how to find it
buildDynCon binder ccs con args
= do dflags <- getDynFlags
@@ -348,12 +348,15 @@ cgReturnDataCon con amodes
| otherwise -> build_it_then (jump_to deflt_lbl) }
_otherwise -- The usual case
- -> build_it_then emitReturnInstr
+ -> build_it_then $ emitReturnInstr node_live
}
where
+ node_live = Just [node]
enter_it = stmtsC [ CmmAssign nodeReg (cmmUntag (CmmReg nodeReg)),
- CmmJump (entryCode (closureInfoPtr (CmmReg nodeReg)))]
- jump_to lbl = stmtC (CmmJump (CmmLit lbl))
+ CmmJump (entryCode $ closureInfoPtr $ CmmReg nodeReg)
+ node_live
+ ]
+ jump_to lbl = stmtC $ CmmJump (CmmLit lbl) node_live
build_it_then return_code
= do { -- BUILD THE OBJECT IN THE HEAP
-- The first "con" says that the name bound to this
@@ -472,7 +475,7 @@ cgDataCon data_con
-- The case continuation code is expecting a tagged pointer
; stmtC (CmmAssign nodeReg
(tagCons data_con (CmmReg nodeReg)))
- ; performReturn emitReturnInstr }
+ ; performReturn $ emitReturnInstr (Just []) }
-- noStmts: Ptr to thing already in Node
; whenC (not (isNullaryRepDataCon data_con))
diff --git a/compiler/codeGen/CgExpr.lhs b/compiler/codeGen/CgExpr.lhs
index e69db9f61b..cb3a86ef7f 100644
--- a/compiler/codeGen/CgExpr.lhs
+++ b/compiler/codeGen/CgExpr.lhs
@@ -149,7 +149,7 @@ cgExpr (StgOpApp (StgPrimOp TagToEnumOp) [arg] res_ty)
; amode' <- assignTemp amode -- We're going to use it twice,
-- so save in a temp if non-trivial
; stmtC (CmmAssign nodeReg (tagToClosure tycon amode'))
- ; performReturn emitReturnInstr }
+ ; performReturn $ emitReturnInstr (Just [node]) }
where
-- If you're reading this code in the attempt to figure
-- out why the compiler panic'ed here, it is probably because
@@ -172,7 +172,8 @@ cgExpr (StgOpApp (StgPrimOp primop) args res_ty)
| ReturnsPrim VoidRep <- result_info
= do cgPrimOp [] primop args emptyVarSet
- performReturn emitReturnInstr
+ -- ToDo: STG Live -- worried about this
+ performReturn $ emitReturnInstr (Just [])
| ReturnsPrim rep <- result_info
= do res <- newTemp (typeCmmType res_ty)
@@ -191,7 +192,8 @@ cgExpr (StgOpApp (StgPrimOp primop) args res_ty)
stmtC (CmmAssign nodeReg
(tagToClosure tycon
(CmmReg (CmmLocal tag_reg))))
- performReturn emitReturnInstr
+ -- ToDo: STG Live -- worried about this
+ performReturn $ emitReturnInstr (Just [node])
where
result_info = getPrimOpResultInfo primop
diff --git a/compiler/codeGen/CgHeapery.lhs b/compiler/codeGen/CgHeapery.lhs
index d8ac298b58..dfe146dfc8 100644
--- a/compiler/codeGen/CgHeapery.lhs
+++ b/compiler/codeGen/CgHeapery.lhs
@@ -54,6 +54,7 @@ import Outputable
import FastString
import Data.List
+import Data.Maybe (fromMaybe)
\end{code}
@@ -273,21 +274,22 @@ an automatic context switch is done.
A heap/stack check at a function or thunk entry point.
\begin{code}
-funEntryChecks :: ClosureInfo -> CmmStmts -> Code -> Code
-funEntryChecks cl_info reg_save_code code
- = hpStkCheck cl_info True reg_save_code code
+funEntryChecks :: ClosureInfo -> CmmStmts -> Maybe [GlobalReg] -> Code -> Code
+funEntryChecks cl_info reg_save_code live code
+ = hpStkCheck cl_info True reg_save_code live code
thunkEntryChecks :: ClosureInfo -> Code -> Code
thunkEntryChecks cl_info code
- = hpStkCheck cl_info False noStmts code
+ = hpStkCheck cl_info False noStmts (Just [node]) code
hpStkCheck :: ClosureInfo -- Function closure
-> Bool -- Is a function? (not a thunk)
-> CmmStmts -- Register saves
+ -> Maybe [GlobalReg] -- Live registers
-> Code
-> Code
-hpStkCheck cl_info is_fun reg_save_code code
+hpStkCheck cl_info is_fun reg_save_code live code
= getFinalStackHW $ \ spHw -> do
{ sp <- getRealSp
; let stk_words = spHw - sp
@@ -295,17 +297,18 @@ hpStkCheck cl_info is_fun reg_save_code code
{ -- Emit heap checks, but be sure to do it lazily so
-- that the conditionals on hpHw don't cause a black hole
codeOnly $ do
- { do_checks stk_words hpHw full_save_code rts_label
+ { do_checks stk_words hpHw full_save_code rts_label full_live
; tickyAllocHeap hpHw }
; setRealHp hpHw
; code }
}
where
- node_asst
+ (node_asst, full_live)
| nodeMustPointToIt (closureLFInfo cl_info)
- = noStmts
+ = (noStmts, live)
| otherwise
- = oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
+ = (oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
+ ,Just $ node : fromMaybe [] live)
-- Strictly speaking, we should tag node here. But if
-- node doesn't point to the closure, the code for the closure
-- cannot depend on the value of R1 anyway, so we're safe.
@@ -349,12 +352,17 @@ altHeapCheck alt_type code
{ codeOnly $ do
{ do_checks 0 {- no stack chk -} hpHw
noStmts {- nothign to save -}
- (rts_label alt_type)
+ rts_label live
; tickyAllocHeap hpHw }
; setRealHp hpHw
; code }
where
- rts_label PolyAlt = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_unpt_r1")))
+ (rts_label, live) = gc_info alt_type
+
+ mkL l = CmmLit . CmmLabel $ mkCmmCodeLabel rtsPackageId (fsLit l)
+
+ gc_info PolyAlt = (mkL "stg_gc_unpt_r1" , Just [node])
+
-- Do *not* enter R1 after a heap check in
-- a polymorphic case. It might be a function
-- and the entry code for a function (currently)
@@ -362,22 +370,21 @@ altHeapCheck alt_type code
--
-- However R1 is guaranteed to be a pointer
- rts_label (AlgAlt _) = stg_gc_enter1
+ gc_info (AlgAlt _) = (stg_gc_enter1, Just [node])
-- Enter R1 after the heap check; it's a pointer
- rts_label (PrimAlt tc)
- = CmmLit $ CmmLabel $
- case primRepToCgRep (tyConPrimRep tc) of
- VoidArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_noregs")
- FloatArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_f1")
- DoubleArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_d1")
- LongArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_l1")
+ gc_info (PrimAlt tc)
+ = case primRepToCgRep (tyConPrimRep tc) of
+ VoidArg -> (mkL "stg_gc_noregs", Just [])
+ FloatArg -> (mkL "stg_gc_f1", Just [FloatReg 1])
+ DoubleArg -> (mkL "stg_gc_d1", Just [DoubleReg 1])
+ LongArg -> (mkL "stg_gc_l1", Just [LongReg 1])
-- R1 is boxed but unlifted:
- PtrArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_unpt_r1")
+ PtrArg -> (mkL "stg_gc_unpt_r1", Just [node])
-- R1 is unboxed:
- NonPtrArg -> mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_unbx_r1")
+ NonPtrArg -> (mkL "stg_gc_unbx_r1", Just [node])
- rts_label (UbxTupAlt _) = panic "altHeapCheck"
+ gc_info (UbxTupAlt _) = panic "altHeapCheck"
\end{code}
@@ -404,7 +411,7 @@ unbxTupleHeapCheck regs ptrs nptrs fail_code code
| otherwise
= initHeapUsage $ \ hpHw -> do
{ codeOnly $ do { do_checks 0 {- no stack check -} hpHw
- full_fail_code rts_label
+ full_fail_code rts_label live
; tickyAllocHeap hpHw }
; setRealHp hpHw
; code }
@@ -413,6 +420,7 @@ unbxTupleHeapCheck regs ptrs nptrs fail_code code
assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9 VNonGcPtr)) -- Ho ho ho!
(CmmLit (mkWordCLit liveness))
liveness = mkRegLiveness regs ptrs nptrs
+ live = Just $ map snd regs
rts_label = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_ut")))
\end{code}
@@ -434,14 +442,15 @@ again on re-entry because someone else might have stolen the resource
in the meantime.
\begin{code}
-do_checks :: WordOff -- Stack headroom
- -> WordOff -- Heap headroom
- -> CmmStmts -- Assignments to perform on failure
- -> CmmExpr -- Rts address to jump to on failure
+do_checks :: WordOff -- Stack headroom
+ -> WordOff -- Heap headroom
+ -> CmmStmts -- Assignments to perform on failure
+ -> CmmExpr -- Rts address to jump to on failure
+ -> Maybe [GlobalReg] -- Live registers
-> Code
-do_checks 0 0 _ _ = nopC
+do_checks 0 0 _ _ _ = nopC
-do_checks _ hp _ _
+do_checks _ hp _ _ _
| hp > bLOCKS_PER_MBLOCK * bLOCK_SIZE_W
= sorry (unlines [
"Trying to allocate more than " ++ show (bLOCKS_PER_MBLOCK * bLOCK_SIZE) ++ " bytes.",
@@ -450,21 +459,22 @@ do_checks _ hp _ _
"Suggestion: read data from a file instead of having large static data",
"structures in the code."])
-do_checks stk hp reg_save_code rts_lbl
+do_checks stk hp reg_save_code rts_lbl live
= do_checks' (CmmLit (mkIntCLit (stk*wORD_SIZE)))
(CmmLit (mkIntCLit (hp*wORD_SIZE)))
- (stk /= 0) (hp /= 0) reg_save_code rts_lbl
+ (stk /= 0) (hp /= 0) reg_save_code rts_lbl live
-- The offsets are now in *bytes*
-do_checks' :: CmmExpr -> CmmExpr -> Bool -> Bool -> CmmStmts -> CmmExpr -> Code
-do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
+do_checks' :: CmmExpr -> CmmExpr -> Bool -> Bool -> CmmStmts -> CmmExpr
+ -> Maybe [GlobalReg] -> Code
+do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl live
= do { doGranAllocate hp_expr
-- The failure block: this saves the registers and jumps to
-- the appropriate RTS stub.
; exit_blk_id <- forkLabelledCode $ do {
; emitStmts reg_save_code
- ; stmtC (CmmJump rts_lbl) }
+ ; stmtC (CmmJump rts_lbl live) }
-- In the case of a heap-check failure, we must also set
-- HpAlloc. NB. HpAlloc is *only* set if Hp has been
@@ -514,7 +524,8 @@ do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
\begin{code}
hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
hpChkGen bytes liveness reentry
- = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns stg_gc_gen
+ = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns
+ stg_gc_gen (Just activeStgRegs)
where
assigns = mkStmts [ mk_vanilla_assignment 9 liveness,
mk_vanilla_assignment 10 reentry ]
@@ -523,12 +534,14 @@ hpChkGen bytes liveness reentry
-- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> Code
hpChkNodePointsAssignSp0 bytes sp0
- = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign stg_gc_enter1
+ = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign
+ stg_gc_enter1 (Just [node])
where assign = oneStmt (CmmStore (CmmReg spReg) sp0)
stkChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
stkChkGen bytes liveness reentry
- = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns stg_gc_gen
+ = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns
+ stg_gc_gen (Just activeStgRegs)
where
assigns = mkStmts [ mk_vanilla_assignment 9 liveness,
mk_vanilla_assignment 10 reentry ]
@@ -539,7 +552,8 @@ mk_vanilla_assignment n e
stkChkNodePoints :: CmmExpr -> Code
stkChkNodePoints bytes
- = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts stg_gc_enter1
+ = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts
+ stg_gc_enter1 (Just [node])
stg_gc_gen :: CmmExpr
stg_gc_gen = CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "stg_gc_gen")))
diff --git a/compiler/codeGen/CgInfoTbls.hs b/compiler/codeGen/CgInfoTbls.hs
index 9f003a2302..1e80616887 100644
--- a/compiler/codeGen/CgInfoTbls.hs
+++ b/compiler/codeGen/CgInfoTbls.hs
@@ -250,10 +250,10 @@ emitAlgReturnTarget name branches mb_deflt fam_sz
-- global labels, so we can't use them at the 'call site'
--------------------------------
-emitReturnInstr :: Code
-emitReturnInstr
- = do { info_amode <- getSequelAmode
- ; stmtC (CmmJump (entryCode info_amode)) }
+emitReturnInstr :: Maybe [GlobalReg] -> Code
+emitReturnInstr live
+ = do { info_amode <- getSequelAmode
+ ; stmtC (CmmJump (entryCode info_amode) live) }
-----------------------------------------------------------------------------
--
diff --git a/compiler/codeGen/CgMonad.lhs b/compiler/codeGen/CgMonad.lhs
index c05019e3ac..c0e3e3be8b 100644
--- a/compiler/codeGen/CgMonad.lhs
+++ b/compiler/codeGen/CgMonad.lhs
@@ -249,7 +249,7 @@ flattenCgStmts id stmts =
where (block,blocks) = flatten ss
isJump :: CmmStmt -> Bool
-isJump (CmmJump _ ) = True
+isJump (CmmJump _ _) = True
isJump (CmmBranch _ ) = True
isJump (CmmSwitch _ _) = True
isJump (CmmReturn ) = True
diff --git a/compiler/codeGen/CgTailCall.lhs b/compiler/codeGen/CgTailCall.lhs
index 07be7f23fa..499529d841 100644
--- a/compiler/codeGen/CgTailCall.lhs
+++ b/compiler/codeGen/CgTailCall.lhs
@@ -45,6 +45,7 @@ import Outputable
import StaticFlags
import Control.Monad
+import Data.Maybe
-----------------------------------------------------------------------------
-- Tail Calls
@@ -103,17 +104,19 @@ performTailCall fun_info arg_amodes pending_assts
-- to make the heap check easier. The tail-call sequence
-- is very similar to returning an unboxed tuple, so we
-- share some code.
- do { (final_sp, arg_assts) <- pushUnboxedTuple join_sp arg_amodes
+ do { (final_sp, arg_assts, live) <- pushUnboxedTuple join_sp arg_amodes
; emitSimultaneously (pending_assts `plusStmts` arg_assts)
; let lbl = enterReturnPtLabel (idUnique (cgIdInfoId fun_info))
- ; doFinalJump final_sp True {- Is LNE -} (jumpToLbl lbl) }
+ ; doFinalJump final_sp True $ jumpToLbl lbl (Just live) }
| otherwise
= do { fun_amode <- idInfoToAmode fun_info
; let assignSt = CmmAssign nodeReg fun_amode
node_asst = oneStmt assignSt
- opt_node_asst | nodeMustPointToIt lf_info = node_asst
- | otherwise = noStmts
+ node_live = Just [node]
+ (opt_node_asst, opt_node_live)
+ | nodeMustPointToIt lf_info = (node_asst, node_live)
+ | otherwise = (noStmts, Just [])
; EndOfBlockInfo sp _ <- getEndOfBlockInfo
; dflags <- getDynFlags
@@ -122,8 +125,8 @@ performTailCall fun_info arg_amodes pending_assts
-- Node must always point to things we enter
EnterIt -> do
{ emitSimultaneously (node_asst `plusStmts` pending_assts)
- ; let target = entryCode (closureInfoPtr (CmmReg nodeReg))
- enterClosure = stmtC (CmmJump target)
+ ; let target = entryCode (closureInfoPtr (CmmReg nodeReg))
+ enterClosure = stmtC (CmmJump target node_live)
-- If this is a scrutinee
-- let's check if the closure is a constructor
-- so we can directly jump to the alternatives switch
@@ -137,18 +140,18 @@ performTailCall fun_info arg_amodes pending_assts
-- As with any return, Node must point to it.
ReturnIt -> do
{ emitSimultaneously (node_asst `plusStmts` pending_assts)
- ; doFinalJump sp False emitReturnInstr }
+ ; doFinalJump sp False $ emitReturnInstr node_live }
-- A real constructor. Don't bother entering it,
-- just do the right sort of return instead.
-- As with any return, Node must point to it.
ReturnCon _ -> do
{ emitSimultaneously (node_asst `plusStmts` pending_assts)
- ; doFinalJump sp False emitReturnInstr }
+ ; doFinalJump sp False $ emitReturnInstr node_live }
JumpToIt lbl -> do
{ emitSimultaneously (opt_node_asst `plusStmts` pending_assts)
- ; doFinalJump sp False (jumpToLbl lbl) }
+ ; doFinalJump sp False $ jumpToLbl lbl opt_node_live }
-- A slow function call via the RTS apply routines
-- Node must definitely point to the thing
@@ -163,7 +166,7 @@ performTailCall fun_info arg_amodes pending_assts
; let (apply_lbl, args, extra_args)
= constructSlowCall arg_amodes
- ; directCall sp apply_lbl args extra_args
+ ; directCall sp apply_lbl args extra_args node_live
(node_asst `plusStmts` pending_assts)
}
@@ -179,7 +182,7 @@ performTailCall fun_info arg_amodes pending_assts
-- The args beyond the arity go straight on the stack
(arity_args, extra_args) = splitAt arity arg_amodes
- ; directCall sp lbl arity_args extra_args
+ ; directCall sp lbl arity_args extra_args opt_node_live
(opt_node_asst `plusStmts` pending_assts)
}
}
@@ -203,7 +206,8 @@ performTailCall fun_info arg_amodes pending_assts
-- No, enter the closure.
; enterClosure
; labelC is_constr
- ; stmtC (CmmJump (entryCode $ CmmLit (CmmLabel lbl)))
+ ; stmtC (CmmJump (entryCode $
+ CmmLit (CmmLabel lbl)) (Just [node]))
}
{-
-- This is a scrutinee for a case expression
@@ -243,9 +247,9 @@ performTailCall fun_info arg_amodes pending_assts
-}
directCall :: VirtualSpOffset -> CLabel -> [(CgRep, CmmExpr)]
- -> [(CgRep, CmmExpr)] -> CmmStmts
+ -> [(CgRep, CmmExpr)] -> Maybe [GlobalReg] -> CmmStmts
-> Code
-directCall sp lbl args extra_args assts = do
+directCall sp lbl args extra_args live_node assts = do
let
-- First chunk of args go in registers
(reg_arg_amodes, stk_args) = assignCallRegs args
@@ -255,14 +259,12 @@ directCall sp lbl args extra_args assts = do
slow_stk_args = slowArgs extra_args
reg_assts = assignToRegs reg_arg_amodes
+ live_args = map snd reg_arg_amodes
+ live_regs = Just $ (fromMaybe [] live_node) ++ live_args
--
(final_sp, stk_assts) <- mkStkAmodes sp (stk_args ++ slow_stk_args)
-
- emitSimultaneously (reg_assts `plusStmts`
- stk_assts `plusStmts`
- assts)
-
- doFinalJump final_sp False (jumpToLbl lbl)
+ emitSimultaneously $ reg_assts `plusStmts` stk_assts `plusStmts` assts
+ doFinalJump final_sp False $ jumpToLbl lbl live_regs
-- -----------------------------------------------------------------------------
-- The final clean-up before we do a jump at the end of a basic block.
@@ -296,20 +298,27 @@ performReturn :: Code -- The code to execute to actually do the return
performReturn finish_code
= do { EndOfBlockInfo args_sp _sequel <- getEndOfBlockInfo
- ; doFinalJump args_sp False{-not a LNE-} finish_code }
+ ; doFinalJump args_sp False finish_code }
-- ----------------------------------------------------------------------------
-- Primitive Returns
-- Just load the return value into the right register, and return.
-performPrimReturn :: CgRep -> CmmExpr -- The thing to return
- -> Code
-performPrimReturn rep amode
- = do { whenC (not (isVoidArg rep))
- (stmtC (CmmAssign ret_reg amode))
- ; performReturn emitReturnInstr }
+performPrimReturn :: CgRep -> CmmExpr -> Code
+
+-- non-void return value
+performPrimReturn rep amode | not (isVoidArg rep)
+ = do { stmtC (CmmAssign ret_reg amode)
+ ; performReturn $ emitReturnInstr live_regs }
where
- ret_reg = dataReturnConvPrim rep
+ -- careful here as 'dataReturnConvPrim' will panic if given a Void rep
+ ret_reg@(CmmGlobal r) = dataReturnConvPrim rep
+ live_regs = Just [r]
+
+-- void return value
+performPrimReturn _ _
+ = performReturn $ emitReturnInstr (Just [])
+
-- ---------------------------------------------------------------------------
-- Unboxed tuple returns
@@ -329,19 +338,21 @@ returnUnboxedTuple :: [(CgRep, CmmExpr)] -> Code
returnUnboxedTuple amodes
= do { (EndOfBlockInfo args_sp _sequel) <- getEndOfBlockInfo
; tickyUnboxedTupleReturn (length amodes)
- ; (final_sp, assts) <- pushUnboxedTuple args_sp amodes
+ ; (final_sp, assts, live_regs) <- pushUnboxedTuple args_sp amodes
; emitSimultaneously assts
- ; doFinalJump final_sp False{-not a LNE-} emitReturnInstr }
+ ; doFinalJump final_sp False $ emitReturnInstr (Just live_regs) }
pushUnboxedTuple :: VirtualSpOffset -- Sp at which to start pushing
-> [(CgRep, CmmExpr)] -- amodes of the components
-> FCode (VirtualSpOffset, -- final Sp
- CmmStmts) -- assignments (regs+stack)
+ CmmStmts, -- assignments (regs+stack)
+ [GlobalReg]) -- registers used (liveness)
pushUnboxedTuple sp []
- = return (sp, noStmts)
+ = return (sp, noStmts, [])
pushUnboxedTuple sp amodes
= do { let (reg_arg_amodes, stk_arg_amodes) = assignReturnRegs amodes
+ live_regs = map snd reg_arg_amodes
-- separate the rest of the args into pointers and non-pointers
(ptr_args, nptr_args) = separateByPtrFollowness stk_arg_amodes
@@ -352,8 +363,8 @@ pushUnboxedTuple sp amodes
; (final_sp, nptr_assts) <- mkStkAmodes ptr_sp nptr_args
; returnFC (final_sp,
- reg_arg_assts `plusStmts`
- ptr_assts `plusStmts` nptr_assts) }
+ reg_arg_assts `plusStmts` ptr_assts `plusStmts` nptr_assts,
+ live_regs) }
-- -----------------------------------------------------------------------------
@@ -403,13 +414,14 @@ tailCallPrim lbl args
-- Hence the ASSERT( null leftovers )
arg_amodes <- getArgAmodes args
; let (arg_regs, leftovers) = assignPrimOpCallRegs arg_amodes
- jump_to_primop = jumpToLbl lbl
+ live_regs = Just $ map snd arg_regs
+ jump_to_primop = jumpToLbl lbl live_regs
; ASSERT(null leftovers) -- no stack-resident args
emitSimultaneously (assignToRegs arg_regs)
; EndOfBlockInfo args_sp _ <- getEndOfBlockInfo
- ; doFinalJump args_sp False{-not a LNE-} jump_to_primop }
+ ; doFinalJump args_sp False jump_to_primop }
-- -----------------------------------------------------------------------------
-- Return Addresses
@@ -439,8 +451,8 @@ pushReturnAddress _ = nopC
-- Misc.
-- Passes no argument to the destination procedure
-jumpToLbl :: CLabel -> Code
-jumpToLbl lbl = stmtC (CmmJump (CmmLit (CmmLabel lbl)))
+jumpToLbl :: CLabel -> Maybe [GlobalReg] -> Code
+jumpToLbl lbl live = stmtC $ CmmJump (CmmLit $ CmmLabel lbl) live
assignToRegs :: [(CmmExpr, GlobalReg)] -> CmmStmts
assignToRegs reg_args
diff --git a/compiler/codeGen/CgUtils.hs b/compiler/codeGen/CgUtils.hs
index 2a524a182c..2bd35c8796 100644
--- a/compiler/codeGen/CgUtils.hs
+++ b/compiler/codeGen/CgUtils.hs
@@ -1020,7 +1020,7 @@ fixStgRegStmt stmt
CmmSwitch expr ids -> CmmSwitch (fixStgRegExpr expr) ids
- CmmJump addr -> CmmJump (fixStgRegExpr addr)
+ CmmJump addr live -> CmmJump (fixStgRegExpr addr) live
-- CmmNop, CmmComment, CmmBranch, CmmReturn
_other -> stmt
diff --git a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
index b8a44447fa..07ccbb1348 100644
--- a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
+++ b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
@@ -127,7 +127,7 @@ stmtToInstrs env stmt = case stmt of
-> genCall env target res args ret
-- Tail call
- CmmJump arg -> genJump env arg
+ CmmJump arg live -> genJump env arg live
-- CPS, only tail calls, no return's
-- Actually, there are a few return statements that occur because of hand
@@ -470,19 +470,19 @@ cmmPrimOpFunctions env mop
-- | Tail function calls
-genJump :: LlvmEnv -> CmmExpr -> UniqSM StmtData
+genJump :: LlvmEnv -> CmmExpr -> Maybe [GlobalReg] -> UniqSM StmtData
-- Call to known function
-genJump env (CmmLit (CmmLabel lbl)) = do
+genJump env (CmmLit (CmmLabel lbl)) live = do
(env', vf, stmts, top) <- getHsFunc env lbl
- (stgRegs, stgStmts) <- funEpilogue
+ (stgRegs, stgStmts) <- funEpilogue live
let s1 = Expr $ Call TailCall vf stgRegs llvmStdFunAttrs
let s2 = Return Nothing
return (env', stmts `appOL` stgStmts `snocOL` s1 `snocOL` s2, top)
-- Call to unknown function / address
-genJump env expr = do
+genJump env expr live = do
let fty = llvmFunTy
(env', vf, stmts, top) <- exprToVar env expr
@@ -494,7 +494,7 @@ genJump env expr = do
++ show (ty) ++ ")"
(v1, s1) <- doExpr (pLift fty) $ Cast cast vf (pLift fty)
- (stgRegs, stgStmts) <- funEpilogue
+ (stgRegs, stgStmts) <- funEpilogue live
let s2 = Expr $ Call TailCall v1 stgRegs llvmStdFunAttrs
let s3 = Return Nothing
return (env', stmts `snocOL` s1 `appOL` stgStmts `snocOL` s2 `snocOL` s3,
@@ -1197,15 +1197,29 @@ funPrologue = concat $ map getReg activeStgRegs
-- | Function epilogue. Load STG variables to use as argument for call.
-funEpilogue :: UniqSM ([LlvmVar], LlvmStatements)
-funEpilogue = do
- let loadExpr r = do
+funEpilogue :: Maybe [GlobalReg] -> UniqSM ([LlvmVar], LlvmStatements)
+funEpilogue Nothing = do
+ loads <- mapM loadExpr activeStgRegs
+ let (vars, stmts) = unzip loads
+ return (vars, concatOL stmts)
+ where
+ loadExpr r = do
let reg = lmGlobalRegVar r
(v,s) <- doExpr (pLower $ getVarType reg) $ Load reg
return (v, unitOL s)
+
+funEpilogue (Just live) = do
loads <- mapM loadExpr activeStgRegs
let (vars, stmts) = unzip loads
return (vars, concatOL stmts)
+ where
+ loadExpr r | r `elem` alwaysLive || r `elem` live = do
+ let reg = lmGlobalRegVar r
+ (v,s) <- doExpr (pLower $ getVarType reg) $ Load reg
+ return (v, unitOL s)
+ loadExpr r = do
+ let ty = (pLower . getVarType $ lmGlobalRegVar r)
+ return (LMLitVar $ LMUndefLit ty, unitOL Nop)
-- | A serries of statements to trash all the STG registers.
diff --git a/compiler/llvmGen/LlvmCodeGen/Regs.hs b/compiler/llvmGen/LlvmCodeGen/Regs.hs
index b0c63a4c34..ecce7a317b 100644
--- a/compiler/llvmGen/LlvmCodeGen/Regs.hs
+++ b/compiler/llvmGen/LlvmCodeGen/Regs.hs
@@ -3,7 +3,7 @@
--
module LlvmCodeGen.Regs (
- lmGlobalRegArg, lmGlobalRegVar
+ lmGlobalRegArg, lmGlobalRegVar, alwaysLive
) where
#include "HsVersions.h"
@@ -24,7 +24,7 @@ lmGlobalRegArg = lmGlobalReg "_Arg"
{- Need to make sure the names here can't conflict with the unique generated
names. Uniques generated names containing only base62 chars. So using say
- the '_' char guarantees this.
+ the '_' char guarantees this.
-}
lmGlobalReg :: String -> GlobalReg -> LlvmVar
lmGlobalReg suf reg
@@ -55,3 +55,7 @@ lmGlobalReg suf reg
floatGlobal name = LMNLocalVar (fsLit name) LMFloat
doubleGlobal name = LMNLocalVar (fsLit name) LMDouble
+-- | A list of STG Registers that should always be considered alive
+alwaysLive :: [GlobalReg]
+alwaysLive = [BaseReg, Sp, Hp, SpLim, HpLim, node]
+
diff --git a/compiler/nativeGen/AsmCodeGen.lhs b/compiler/nativeGen/AsmCodeGen.lhs
index b404e87f31..02878bfff5 100644
--- a/compiler/nativeGen/AsmCodeGen.lhs
+++ b/compiler/nativeGen/AsmCodeGen.lhs
@@ -878,9 +878,9 @@ cmmStmtConFold stmt
src' <- cmmExprConFold DataReference src
return $ CmmStore addr' src'
- CmmJump addr
+ CmmJump addr live
-> do addr' <- cmmExprConFold JumpReference addr
- return $ CmmJump addr'
+ return $ CmmJump addr' live
CmmCall target regs args returns
-> do target' <- case target of
diff --git a/compiler/nativeGen/PPC/CodeGen.hs b/compiler/nativeGen/PPC/CodeGen.hs
index 8b96f7140a..7b704cbe8f 100644
--- a/compiler/nativeGen/PPC/CodeGen.hs
+++ b/compiler/nativeGen/PPC/CodeGen.hs
@@ -141,7 +141,7 @@ stmtToInstrs stmt = do
CmmBranch id -> genBranch id
CmmCondBranch arg id -> genCondJump id arg
CmmSwitch arg ids -> genSwitch arg ids
- CmmJump arg -> genJump arg
+ CmmJump arg _ -> genJump arg
CmmReturn ->
panic "stmtToInstrs: return statement should have been cps'd away"
diff --git a/compiler/nativeGen/SPARC/CodeGen.hs b/compiler/nativeGen/SPARC/CodeGen.hs
index 0022e043ee..4c295f11d5 100644
--- a/compiler/nativeGen/SPARC/CodeGen.hs
+++ b/compiler/nativeGen/SPARC/CodeGen.hs
@@ -141,7 +141,7 @@ stmtToInstrs stmt = case stmt of
CmmBranch id -> genBranch id
CmmCondBranch arg id -> genCondJump id arg
CmmSwitch arg ids -> genSwitch arg ids
- CmmJump arg -> genJump arg
+ CmmJump arg _ -> genJump arg
CmmReturn
-> panic "stmtToInstrs: return statement should have been cps'd away"
diff --git a/compiler/nativeGen/X86/CodeGen.hs b/compiler/nativeGen/X86/CodeGen.hs
index b7356ea3fd..c68519522d 100644
--- a/compiler/nativeGen/X86/CodeGen.hs
+++ b/compiler/nativeGen/X86/CodeGen.hs
@@ -166,7 +166,7 @@ stmtToInstrs stmt = do
CmmBranch id -> genBranch id
CmmCondBranch arg id -> genCondJump id arg
CmmSwitch arg ids -> genSwitch arg ids
- CmmJump arg -> genJump arg
+ CmmJump arg _ -> genJump arg
CmmReturn ->
panic "stmtToInstrs: return statement should have been cps'd away"