diff options
author | Ömer Sinan Ağacan <omeragacan@gmail.com> | 2016-08-10 16:47:47 +0000 |
---|---|---|
committer | Ömer Sinan Ağacan <omeragacan@gmail.com> | 2016-08-10 16:48:17 +0000 |
commit | 9684dbb1d776a8e086f8db4191b3bcf826867dda (patch) | |
tree | 9ecdca8cca16090aeb5cccf135f84c6120f7a805 /compiler/codeGen | |
parent | e79bb2c10d66ec8f0c037e66c7d0a22aa887fdc1 (diff) | |
download | haskell-9684dbb1d776a8e086f8db4191b3bcf826867dda.tar.gz |
Remove StgRubbishArg and CmmArg
The idea behind adding special "rubbish" arguments was in unboxed sum types
depending on the tag some arguments are not used and we don't want to move some
special values (like 0 for literals and some special pointer for boxed slots)
for those arguments (to stack locations or registers). "StgRubbishArg" was an
indicator to the code generator that the value won't be used. During Stg-to-Cmm
we were then not generating any move or store instructions at all.
This caused problems in the register allocator because some variables were only
initialized in some code paths. As an example, suppose we have this STG: (after
unarise)
Lib.$WT =
\r [dt_sit]
case
case dt_sit of {
Lib.F dt_siv [Occ=Once] ->
(#,,#) [1# dt_siv StgRubbishArg::GHC.Prim.Int#];
Lib.I dt_siw [Occ=Once] ->
(#,,#) [2# StgRubbishArg::GHC.Types.Any dt_siw];
}
of
dt_six
{ (#,,#) us_giC us_giD us_giE -> Lib.T [us_giC us_giD us_giE];
};
This basically unpacks a sum type to an unboxed sum with 3 fields, and then
moves the unboxed sum to a constructor (`Lib.T`).
This is the Cmm for the inner case expression (case expression in the scrutinee
position of the outer case):
ciN:
...
-- look at dt_sit's tag
if (_ciT::P64 != 1) goto ciS; else goto ciR;
ciS: -- Tag is 2, i.e. Lib.F
_siw::I64 = I64[_siu::P64 + 6];
_giE::I64 = _siw::I64;
_giD::P64 = stg_RUBBISH_ENTRY_info;
_giC::I64 = 2;
goto ciU;
ciR: -- Tag is 1, i.e. Lib.I
_siv::P64 = P64[_siu::P64 + 7];
_giD::P64 = _siv::P64;
_giC::I64 = 1;
goto ciU;
Here one of the blocks `ciS` and `ciR` is executed and then the execution
continues to `ciR`, but only `ciS` initializes `_giE`, in the other branch
`_giE` is not initialized, because it's "rubbish" in the STG and so we don't
generate an assignment during code generator. The code generator then panics
during the register allocations:
ghc-stage1: panic! (the 'impossible' happened)
(GHC version 8.1.20160722 for x86_64-unknown-linux):
LocalReg's live-in to graph ciY {_giE::I64}
(`_giD` is also "rubbish" in `ciS`, but it's still initialized because it's a
pointer slot, we have to initialize it otherwise garbage collector follows the
pointer to some random place. So we only remove assignment if the "rubbish" arg
has unboxed type.)
This patch removes `StgRubbishArg` and `CmmArg`. We now always initialize
rubbish slots. If the slot is for boxed types we use the existing `absentError`,
otherwise we initialize the slot with literal 0.
Reviewers: simonpj, erikd, austin, simonmar, bgamari
Reviewed By: erikd
Subscribers: thomie
Differential Revision: https://phabricator.haskell.org/D2446
Diffstat (limited to 'compiler/codeGen')
-rw-r--r-- | compiler/codeGen/StgCmm.hs | 2 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmBind.hs | 2 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmCon.hs | 2 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmEnv.hs | 28 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmExpr.hs | 14 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmForeign.hs | 6 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmHeap.hs | 20 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmLayout.hs | 28 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmMonad.hs | 17 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmPrim.hs | 25 | ||||
-rw-r--r-- | compiler/codeGen/StgCmmUtils.hs | 22 |
11 files changed, 67 insertions, 99 deletions
diff --git a/compiler/codeGen/StgCmm.hs b/compiler/codeGen/StgCmm.hs index d6e0cf2f72..85f8845c8a 100644 --- a/compiler/codeGen/StgCmm.hs +++ b/compiler/codeGen/StgCmm.hs @@ -241,7 +241,7 @@ cgDataCon data_con do { _ <- ticky_code ; ldvEnter (CmmReg nodeReg) ; tickyReturnOldCon (length arg_things) - ; void $ emitReturn [CmmExprArg (cmmOffsetB dflags (CmmReg nodeReg) (tagForCon dflags data_con))] + ; void $ emitReturn [cmmOffsetB dflags (CmmReg nodeReg) (tagForCon dflags data_con)] } -- The case continuation code expects a tagged pointer diff --git a/compiler/codeGen/StgCmmBind.hs b/compiler/codeGen/StgCmmBind.hs index 745fd33f73..93756ec406 100644 --- a/compiler/codeGen/StgCmmBind.hs +++ b/compiler/codeGen/StgCmmBind.hs @@ -551,7 +551,7 @@ mkSlowEntryCode bndr cl_info arg_regs -- function closure is already in `Node' -- mkDirectJump does not clobber `Node' containing function closure jump = mkJump dflags NativeNodeCall (mkLblExpr fast_lbl) - (map (CmmExprArg . CmmReg . CmmLocal) (node : arg_regs)) + (map (CmmReg . CmmLocal) (node : arg_regs)) (initUpdFrameOff dflags) tscope <- getTickScope emitProcWithConvention Slow Nothing slow_lbl diff --git a/compiler/codeGen/StgCmmCon.hs b/compiler/codeGen/StgCmmCon.hs index c77816a819..4255f10201 100644 --- a/compiler/codeGen/StgCmmCon.hs +++ b/compiler/codeGen/StgCmmCon.hs @@ -88,7 +88,7 @@ cgTopRhsCon dflags id con args = -- needs to poke around inside it. info_tbl = mkDataConInfoTable dflags con True ptr_wds nonptr_wds - get_lit (arg, _offset) = do { CmmExprArg (CmmLit lit) <- getArgAmode arg + get_lit (arg, _offset) = do { CmmLit lit <- getArgAmode arg ; return lit } ; payload <- mapM get_lit nv_args_w_offsets diff --git a/compiler/codeGen/StgCmmEnv.hs b/compiler/codeGen/StgCmmEnv.hs index ec4c75f0bc..44d3df84ee 100644 --- a/compiler/codeGen/StgCmmEnv.hs +++ b/compiler/codeGen/StgCmmEnv.hs @@ -19,8 +19,7 @@ module StgCmmEnv ( bindArgsToRegs, bindToReg, rebindToReg, bindArgToReg, idToReg, - getArgAmode, getArgAmode_no_rubbish, - getNonVoidArgAmodes, getNonVoidArgAmodes_no_rubbish, + getArgAmode, getNonVoidArgAmodes, getCgIdInfo, maybeLetNoEscape, ) where @@ -37,7 +36,6 @@ import CLabel import BlockId import CmmExpr import CmmUtils -import Control.Monad import DynFlags import Id import MkGraph @@ -166,19 +164,11 @@ cgLookupPanic id -------------------- -getArgAmode :: NonVoid StgArg -> FCode CmmArg -getArgAmode (NonVoid (StgVarArg var)) = - do { info <- getCgIdInfo var; return (CmmExprArg (idInfoToAmode info)) } -getArgAmode (NonVoid (StgLitArg lit)) = liftM (CmmExprArg . CmmLit) $ cgLit lit -getArgAmode (NonVoid (StgRubbishArg ty)) = return (CmmRubbishArg ty) - -getArgAmode_no_rubbish :: NonVoid StgArg -> FCode CmmExpr -getArgAmode_no_rubbish (NonVoid (StgVarArg var)) = - do { info <- getCgIdInfo var; return (idInfoToAmode info) } -getArgAmode_no_rubbish (NonVoid (StgLitArg lit)) = liftM CmmLit $ cgLit lit -getArgAmode_no_rubbish arg@(NonVoid (StgRubbishArg _)) = pprPanic "getArgAmode_no_rubbish" (ppr arg) - -getNonVoidArgAmodes :: [StgArg] -> FCode [CmmArg] +getArgAmode :: NonVoid StgArg -> FCode CmmExpr +getArgAmode (NonVoid (StgVarArg var)) = idInfoToAmode <$> getCgIdInfo var +getArgAmode (NonVoid (StgLitArg lit)) = CmmLit <$> cgLit lit + +getNonVoidArgAmodes :: [StgArg] -> FCode [CmmExpr] -- NB: Filters out void args, -- so the result list may be shorter than the argument list getNonVoidArgAmodes [] = return [] @@ -188,12 +178,6 @@ getNonVoidArgAmodes (arg:args) ; amodes <- getNonVoidArgAmodes args ; return ( amode : amodes ) } --- This version assumes arguments are not rubbish. I think this assumption holds --- as long as we don't pass unboxed sums to primops and foreign fns. -getNonVoidArgAmodes_no_rubbish :: [StgArg] -> FCode [CmmExpr] -getNonVoidArgAmodes_no_rubbish - = mapM (getArgAmode_no_rubbish . NonVoid) . filter (not . isVoidRep . argPrimRep) - ------------------------------------------------------------------------ -- Interface functions for binding and re-binding names diff --git a/compiler/codeGen/StgCmmExpr.hs b/compiler/codeGen/StgCmmExpr.hs index 005e332d07..91cfba6bd0 100644 --- a/compiler/codeGen/StgCmmExpr.hs +++ b/compiler/codeGen/StgCmmExpr.hs @@ -68,7 +68,7 @@ cgExpr (StgOpApp op args ty) = cgOpApp op args ty cgExpr (StgConApp con args _)= cgConApp con args cgExpr (StgTick t e) = cgTick t >> cgExpr e cgExpr (StgLit lit) = do cmm_lit <- cgLit lit - emitReturn [CmmExprArg (CmmLit cmm_lit)] + emitReturn [CmmLit cmm_lit] cgExpr (StgLet binds expr) = do { cgBind binds; cgExpr expr } cgExpr (StgLetNoEscape binds expr) = @@ -309,7 +309,7 @@ cgCase (StgOpApp (StgPrimOp op) args _) bndr (AlgAlt tycon) alts where do_enum_primop :: PrimOp -> [StgArg] -> FCode CmmExpr do_enum_primop TagToEnumOp [arg] -- No code! - = getArgAmode_no_rubbish (NonVoid arg) + = getArgAmode (NonVoid arg) do_enum_primop primop args = do dflags <- getDynFlags tmp <- newTemp (bWord dflags) @@ -517,7 +517,7 @@ isSimpleOp :: StgOp -> [StgArg] -> FCode Bool -- True iff the op cannot block or allocate isSimpleOp (StgFCallOp (CCall (CCallSpec _ _ safe)) _) _ = return $! not (playSafe safe) isSimpleOp (StgPrimOp op) stg_args = do - arg_exprs <- getNonVoidArgAmodes_no_rubbish stg_args + arg_exprs <- getNonVoidArgAmodes stg_args dflags <- getDynFlags -- See Note [Inlining out-of-line primops and heap checks] return $! isJust $ shouldInlinePrimOp dflags op arg_exprs @@ -684,7 +684,7 @@ cgConApp con stg_args ; emit =<< fcode_init ; tickyReturnNewCon (length stg_args) - ; emitReturn [CmmExprArg (idInfoToAmode idinfo)] } + ; emitReturn [idInfoToAmode idinfo] } cgIdApp :: Id -> [StgArg] -> FCode ReturnKind cgIdApp fun_id [] | isVoidTy (idType fun_id) = emitReturn [] @@ -707,7 +707,7 @@ cgIdApp fun_id args = do case getCallMethod dflags fun_name cg_fun_id lf_info n_args v_args (cg_loc fun_info) self_loop_info of -- A value in WHNF, so we can just return it. - ReturnIt -> emitReturn [CmmExprArg fun] -- ToDo: does ReturnIt guarantee tagged? + ReturnIt -> emitReturn [fun] -- ToDo: does ReturnIt guarantee tagged? EnterIt -> ASSERT( null args ) -- Discarding arguments emitEnter fun @@ -857,7 +857,7 @@ emitEnter fun = do Return -> do { let entry = entryCode dflags $ closureInfoPtr dflags $ CmmReg nodeReg ; emit $ mkJump dflags NativeNodeCall entry - [CmmExprArg (cmmUntag dflags fun)] updfr_off + [cmmUntag dflags fun] updfr_off ; return AssignedDirectly } @@ -893,7 +893,7 @@ emitEnter fun = do ; updfr_off <- getUpdFrameOff ; let area = Young lret ; let (outArgs, regs, copyout) = copyOutOflow dflags NativeNodeCall Call area - [CmmExprArg fun] updfr_off [] + [fun] updfr_off [] -- refer to fun via nodeReg after the copyout, to avoid having -- both live simultaneously; this sometimes enables fun to be -- inlined in the RHS of the R1 assignment. diff --git a/compiler/codeGen/StgCmmForeign.hs b/compiler/codeGen/StgCmmForeign.hs index eb14e8cce6..fdfdb77375 100644 --- a/compiler/codeGen/StgCmmForeign.hs +++ b/compiler/codeGen/StgCmmForeign.hs @@ -111,7 +111,7 @@ cgForeignCall (CCall (CCallSpec target cconv safety)) stg_args res_ty _something_else -> do { _ <- emitForeignCall safety res_regs call_target call_args - ; emitReturn (map (CmmExprArg . CmmReg . CmmLocal) res_regs) + ; emitReturn (map (CmmReg . CmmLocal) res_regs) } } @@ -524,12 +524,10 @@ getFCallArgs args = do { mb_cmms <- mapM get args ; return (catMaybes mb_cmms) } where - get arg@(StgRubbishArg{}) - = pprPanic "getFCallArgs" (text "Rubbish arg in foreign call:" <+> ppr arg) get arg | isVoidRep arg_rep = return Nothing | otherwise - = do { cmm <- getArgAmode_no_rubbish (NonVoid arg) + = do { cmm <- getArgAmode (NonVoid arg) ; dflags <- getDynFlags ; return (Just (add_shim dflags arg_ty cmm, hint)) } where diff --git a/compiler/codeGen/StgCmmHeap.hs b/compiler/codeGen/StgCmmHeap.hs index fa1780449d..ebff4402d0 100644 --- a/compiler/codeGen/StgCmmHeap.hs +++ b/compiler/codeGen/StgCmmHeap.hs @@ -72,7 +72,7 @@ allocDynClosure allocDynClosureCmm :: Maybe Id -> CmmInfoTable -> LambdaFormInfo -> CmmExpr -> CmmExpr - -> [(CmmArg, ByteOff)] + -> [(CmmExpr, ByteOff)] -> FCode CmmExpr -- returns Hp+n -- allocDynClosure allocates the thing in the heap, @@ -113,7 +113,7 @@ allocHeapClosure :: SMRep -- ^ representation of the object -> CmmExpr -- ^ info pointer -> CmmExpr -- ^ cost centre - -> [(CmmArg,ByteOff)] -- ^ payload + -> [(CmmExpr,ByteOff)] -- ^ payload -> FCode CmmExpr -- ^ returns the address of the object allocHeapClosure rep info_ptr use_cc payload = do profDynAlloc rep use_cc @@ -144,7 +144,7 @@ allocHeapClosure rep info_ptr use_cc payload = do emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> FCode () emitSetDynHdr base info_ptr ccs = do dflags <- getDynFlags - hpStore base (zip (map CmmExprArg (header dflags)) [0, wORD_SIZE dflags ..]) + hpStore base (zip (header dflags) [0, wORD_SIZE dflags ..]) where header :: DynFlags -> [CmmExpr] header dflags = [info_ptr] ++ dynProfHdr dflags ccs @@ -152,11 +152,11 @@ emitSetDynHdr base info_ptr ccs -- No ticky header -- Store the item (expr,off) in base[off] -hpStore :: CmmExpr -> [(CmmArg, ByteOff)] -> FCode () +hpStore :: CmmExpr -> [(CmmExpr, ByteOff)] -> FCode () hpStore base vals = do dflags <- getDynFlags sequence_ $ - [ emitStore (cmmOffsetB dflags base off) val | (CmmExprArg val,off) <- vals ] + [ emitStore (cmmOffsetB dflags base off) val | (val,off) <- vals ] ----------------------------------------------------------- -- Layout of static closures @@ -364,7 +364,7 @@ entryHeapCheck' is_fastf node arity args code = do dflags <- getDynFlags let is_thunk = arity == 0 - args' = map (CmmExprArg . CmmReg . CmmLocal) args + args' = map (CmmReg . CmmLocal) args stg_gc_fun = CmmReg (CmmGlobal GCFun) stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1) @@ -376,13 +376,13 @@ entryHeapCheck' is_fastf node arity args code -} gc_call upd | is_thunk - = mkJump dflags NativeNodeCall stg_gc_enter1 [CmmExprArg node] upd + = mkJump dflags NativeNodeCall stg_gc_enter1 [node] upd | is_fastf - = mkJump dflags NativeNodeCall stg_gc_fun (CmmExprArg node : args') upd + = mkJump dflags NativeNodeCall stg_gc_fun (node : args') upd | otherwise - = mkJump dflags Slow stg_gc_fun (CmmExprArg node : args') upd + = mkJump dflags Slow stg_gc_fun (node : args') upd updfr_sz <- getUpdFrameOff @@ -446,7 +446,7 @@ cannedGCReturnsTo checkYield cont_on_stack gc regs lret off code updfr_sz <- getUpdFrameOff heapCheck False checkYield (gc_call dflags gc updfr_sz) code where - reg_exprs = map (CmmExprArg . CmmReg . CmmLocal) regs + reg_exprs = map (CmmReg . CmmLocal) regs -- Note [stg_gc arguments] -- NB. we use the NativeReturn convention for passing arguments diff --git a/compiler/codeGen/StgCmmLayout.hs b/compiler/codeGen/StgCmmLayout.hs index 39f3cd7fa3..59bbc8d5ea 100644 --- a/compiler/codeGen/StgCmmLayout.hs +++ b/compiler/codeGen/StgCmmLayout.hs @@ -68,7 +68,7 @@ import Control.Monad -- -- > p=x; q=y; -- -emitReturn :: [CmmArg] -> FCode ReturnKind +emitReturn :: [CmmExpr] -> FCode ReturnKind emitReturn results = do { dflags <- getDynFlags ; sequel <- getSequel @@ -90,7 +90,7 @@ emitReturn results -- using the call/return convention @conv@, passing @args@, and -- returning the results to the current sequel. -- -emitCall :: (Convention, Convention) -> CmmExpr -> [CmmArg] -> FCode ReturnKind +emitCall :: (Convention, Convention) -> CmmExpr -> [CmmExpr] -> FCode ReturnKind emitCall convs fun args = emitCallWithExtraStack convs fun args noExtraStack @@ -101,8 +101,8 @@ emitCall convs fun args -- @stack@, and returning the results to the current sequel. -- emitCallWithExtraStack - :: (Convention, Convention) -> CmmExpr -> [CmmArg] - -> [CmmArg] -> FCode ReturnKind + :: (Convention, Convention) -> CmmExpr -> [CmmExpr] + -> [CmmExpr] -> FCode ReturnKind emitCallWithExtraStack (callConv, retConv) fun args extra_stack = do { dflags <- getDynFlags ; adjustHpBackwards @@ -187,7 +187,7 @@ slowCall fun stg_args (r, slow_code) <- getCodeR $ do r <- direct_call "slow_call" NativeNodeCall - (mkRtsApFastLabel rts_fun) arity ((P,Just (CmmExprArg fun)):argsreps) + (mkRtsApFastLabel rts_fun) arity ((P,Just fun):argsreps) emitComment $ mkFastString ("slow_call for " ++ showSDoc dflags (ppr fun) ++ " with pat " ++ unpackFS rts_fun) @@ -213,7 +213,7 @@ slowCall fun stg_args fast_code <- getCode $ emitCall (NativeNodeCall, NativeReturn) (entryCode dflags fun_iptr) - (nonVArgs ((P,Just (CmmExprArg funv)):argsreps)) + (nonVArgs ((P,Just funv):argsreps)) slow_lbl <- newLabelC fast_lbl <- newLabelC @@ -271,7 +271,7 @@ slowCall fun stg_args direct_call :: String -> Convention -- e.g. NativeNodeCall or NativeDirectCall -> CLabel -> RepArity - -> [(ArgRep,Maybe CmmArg)] -> FCode ReturnKind + -> [(ArgRep,Maybe CmmExpr)] -> FCode ReturnKind direct_call caller call_conv lbl arity args | debugIsOn && real_arity > length args -- Too few args = do -- Caller should ensure that there enough args! @@ -299,11 +299,11 @@ direct_call caller call_conv lbl arity args -- When constructing calls, it is easier to keep the ArgReps and the --- CmmArgs zipped together. However, a void argument has no --- representation, so we need to use Maybe CmmArg (the alternative of +-- CmmExprs zipped together. However, a void argument has no +-- representation, so we need to use Maybe CmmExpr (the alternative of -- using zeroCLit or even undefined would work, but would be ugly). -- -getArgRepsAmodes :: [StgArg] -> FCode [(ArgRep, Maybe CmmArg)] +getArgRepsAmodes :: [StgArg] -> FCode [(ArgRep, Maybe CmmExpr)] getArgRepsAmodes = mapM getArgRepAmode where getArgRepAmode arg | V <- rep = return (V, Nothing) @@ -311,7 +311,7 @@ getArgRepsAmodes = mapM getArgRepAmode return (rep, Just expr) where rep = toArgRep (argPrimRep arg) -nonVArgs :: [(ArgRep, Maybe CmmArg)] -> [CmmArg] +nonVArgs :: [(ArgRep, Maybe CmmExpr)] -> [CmmExpr] nonVArgs [] = [] nonVArgs ((_,Nothing) : args) = nonVArgs args nonVArgs ((_,Just arg) : args) = arg : nonVArgs args @@ -354,7 +354,7 @@ just more arguments that we are passing on the stack (cml_args). -- | 'slowArgs' takes a list of function arguments and prepares them for -- pushing on the stack for "extra" arguments to a function which requires -- fewer arguments than we currently have. -slowArgs :: DynFlags -> [(ArgRep, Maybe CmmArg)] -> [(ArgRep, Maybe CmmArg)] +slowArgs :: DynFlags -> [(ArgRep, Maybe CmmExpr)] -> [(ArgRep, Maybe CmmExpr)] slowArgs _ [] = [] slowArgs dflags args -- careful: reps contains voids (V), but args does not | gopt Opt_SccProfilingOn dflags @@ -365,8 +365,8 @@ slowArgs dflags args -- careful: reps contains voids (V), but args does not (call_args, rest_args) = splitAt n args stg_ap_pat = mkCmmRetInfoLabel rtsUnitId arg_pat - this_pat = (N, Just (CmmExprArg (mkLblExpr stg_ap_pat))) : call_args - save_cccs = [(N, Just (CmmExprArg (mkLblExpr save_cccs_lbl))), (N, Just (CmmExprArg curCCS))] + this_pat = (N, Just (mkLblExpr stg_ap_pat)) : call_args + save_cccs = [(N, Just (mkLblExpr save_cccs_lbl)), (N, Just curCCS)] save_cccs_lbl = mkCmmRetInfoLabel rtsUnitId (fsLit "stg_restore_cccs") ------------------------------------------------------------------------- diff --git a/compiler/codeGen/StgCmmMonad.hs b/compiler/codeGen/StgCmmMonad.hs index 471a94df64..836bf30f29 100644 --- a/compiler/codeGen/StgCmmMonad.hs +++ b/compiler/codeGen/StgCmmMonad.hs @@ -19,7 +19,7 @@ module StgCmmMonad ( emit, emitDecl, emitProc, emitProcWithConvention, emitProcWithStackFrame, - emitOutOfLine, emitAssign, emitAssign', emitStore, + emitOutOfLine, emitAssign, emitStore, emitComment, emitTick, emitUnwind, getCmm, aGraphToGraph, @@ -76,7 +76,6 @@ import Unique import UniqSupply import FastString import Outputable -import RepType (typePrimRep) import Control.Monad import Data.List @@ -743,14 +742,6 @@ emitUnwind g e = do emitAssign :: CmmReg -> CmmExpr -> FCode () emitAssign l r = emitCgStmt (CgStmt (CmmAssign l r)) -emitAssign' :: CmmReg -> CmmArg -> FCode () -emitAssign' l (CmmExprArg r) = emitAssign l r -emitAssign' l (CmmRubbishArg ty) - | isGcPtrRep (typePrimRep ty) - = emitAssign l rubbishExpr - | otherwise - = return () - emitStore :: CmmExpr -> CmmExpr -> FCode () emitStore l r = emitCgStmt (CgStmt (CmmStore l r)) @@ -866,8 +857,8 @@ mkCmmIfThen e tbranch = do , mkLabel tid tscp, tbranch, mkLabel endif tscp ] -mkCall :: CmmExpr -> (Convention, Convention) -> [CmmFormal] -> [CmmArg] - -> UpdFrameOffset -> [CmmArg] -> FCode CmmAGraph +mkCall :: CmmExpr -> (Convention, Convention) -> [CmmFormal] -> [CmmExpr] + -> UpdFrameOffset -> [CmmExpr] -> FCode CmmAGraph mkCall f (callConv, retConv) results actuals updfr_off extra_stack = do dflags <- getDynFlags k <- newLabelC @@ -877,7 +868,7 @@ mkCall f (callConv, retConv) results actuals updfr_off extra_stack = do copyout = mkCallReturnsTo dflags f callConv actuals k off updfr_off extra_stack return $ catAGraphs [copyout, mkLabel k tscp, copyin] -mkCmmCall :: CmmExpr -> [CmmFormal] -> [CmmArg] -> UpdFrameOffset +mkCmmCall :: CmmExpr -> [CmmFormal] -> [CmmExpr] -> UpdFrameOffset -> FCode CmmAGraph mkCmmCall f results actuals updfr_off = mkCall f (NativeDirectCall, NativeReturn) results actuals updfr_off [] diff --git a/compiler/codeGen/StgCmmPrim.hs b/compiler/codeGen/StgCmmPrim.hs index c02f992bed..d3c09c584e 100644 --- a/compiler/codeGen/StgCmmPrim.hs +++ b/compiler/codeGen/StgCmmPrim.hs @@ -46,7 +46,6 @@ import Util import Prelude hiding ((<*>)) import Data.Bits ((.&.), bit) -import Data.Bifunctor (first) import Control.Monad (liftM, when) ------------------------------------------------------------------------ @@ -80,10 +79,10 @@ cgOpApp (StgFCallOp fcall _) stg_args res_ty cgOpApp (StgPrimOp TagToEnumOp) [arg] res_ty = ASSERT(isEnumerationTyCon tycon) do { dflags <- getDynFlags - ; args' <- getNonVoidArgAmodes_no_rubbish [arg] + ; args' <- getNonVoidArgAmodes [arg] ; let amode = case args' of [amode] -> amode _ -> panic "TagToEnumOp had void arg" - ; emitReturn [CmmExprArg (tagToClosure dflags tycon amode)] } + ; emitReturn [tagToClosure dflags tycon amode] } where -- If you're reading this code in the attempt to figure -- out why the compiler panic'ed here, it is probably because @@ -94,11 +93,11 @@ cgOpApp (StgPrimOp TagToEnumOp) [arg] res_ty cgOpApp (StgPrimOp primop) args res_ty = do dflags <- getDynFlags - cmm_args <- getNonVoidArgAmodes_no_rubbish args + cmm_args <- getNonVoidArgAmodes args case shouldInlinePrimOp dflags primop cmm_args of Nothing -> do -- out-of-line let fun = CmmLit (CmmLabel (mkRtsPrimOpLabel primop)) - emitCall (NativeNodeCall, NativeReturn) fun (map CmmExprArg cmm_args) + emitCall (NativeNodeCall, NativeReturn) fun cmm_args Just f -- inline | ReturnsPrim VoidRep <- result_info @@ -109,12 +108,12 @@ cgOpApp (StgPrimOp primop) args res_ty = do -> do dflags <- getDynFlags res <- newTemp (primRepCmmType dflags rep) f [res] - emitReturn [CmmExprArg (CmmReg (CmmLocal res))] + emitReturn [CmmReg (CmmLocal res)] | ReturnsAlg tycon <- result_info, isUnboxedTupleTyCon tycon -> do (regs, _hints) <- newUnboxedTupleRegs res_ty f regs - emitReturn (map (CmmExprArg . CmmReg . CmmLocal) regs) + emitReturn (map (CmmReg . CmmLocal) regs) | otherwise -> panic "cgPrimop" where @@ -257,7 +256,7 @@ cgPrimOp :: [LocalReg] -- where to put the results cgPrimOp results op args = do dflags <- getDynFlags - arg_exprs <- getNonVoidArgAmodes_no_rubbish args + arg_exprs <- getNonVoidArgAmodes args emitPrimOp dflags results op arg_exprs @@ -1658,7 +1657,7 @@ doNewByteArrayOp res_r n = do let hdr_size = fixedHdrSize dflags base <- allocHeapClosure rep info_ptr curCCS - [ (CmmExprArg (mkIntExpr dflags n), + [ (mkIntExpr dflags n, hdr_size + oFFSET_StgArrBytes_bytes dflags) ] @@ -1771,7 +1770,7 @@ doNewArrayOp res_r rep info payload n init = do (mkIntExpr dflags (nonHdrSize dflags rep)) (zeroExpr dflags) - base <- allocHeapClosure rep info_ptr curCCS (map (first CmmExprArg) payload) + base <- allocHeapClosure rep info_ptr curCCS payload arr <- CmmLocal `fmap` newTemp (bWord dflags) emit $ mkAssign arr base @@ -1954,9 +1953,9 @@ emitCloneArray info_p res_r src src_off n = do let hdr_size = fixedHdrSize dflags base <- allocHeapClosure rep info_ptr curCCS - [ (CmmExprArg (mkIntExpr dflags n), + [ (mkIntExpr dflags n, hdr_size + oFFSET_StgMutArrPtrs_ptrs dflags) - , (CmmExprArg (mkIntExpr dflags (nonHdrSizeW rep)), + , (mkIntExpr dflags (nonHdrSizeW rep), hdr_size + oFFSET_StgMutArrPtrs_size dflags) ] @@ -1993,7 +1992,7 @@ emitCloneSmallArray info_p res_r src src_off n = do let hdr_size = fixedHdrSize dflags base <- allocHeapClosure rep info_ptr curCCS - [ (CmmExprArg (mkIntExpr dflags n), + [ (mkIntExpr dflags n, hdr_size + oFFSET_StgSmallMutArrPtrs_ptrs dflags) ] diff --git a/compiler/codeGen/StgCmmUtils.hs b/compiler/codeGen/StgCmmUtils.hs index f1437eb640..7372ab9102 100644 --- a/compiler/codeGen/StgCmmUtils.hs +++ b/compiler/codeGen/StgCmmUtils.hs @@ -38,7 +38,7 @@ module StgCmmUtils ( addToMem, addToMemE, addToMemLblE, addToMemLbl, mkWordCLit, newStringCLit, newByteStringCLit, - blankWord, rubbishExpr + blankWord, ) where #include "HsVersions.h" @@ -194,7 +194,7 @@ emitRtsCallGen res lbl args safe where call updfr_off = if safe then - emit =<< mkCmmCall fun_expr res' (map CmmExprArg args') updfr_off + emit =<< mkCmmCall fun_expr res' args' updfr_off else do let conv = ForeignConvention CCallConv arg_hints res_hints CmmMayReturn emit $ mkUnsafeCall (ForeignTarget fun_expr conv) res' args' @@ -374,14 +374,14 @@ newUnboxedTupleRegs res_ty -- emitMultiAssign ------------------------------------------------------------------------- -emitMultiAssign :: [LocalReg] -> [CmmArg] -> FCode () +emitMultiAssign :: [LocalReg] -> [CmmExpr] -> FCode () -- Emit code to perform the assignments in the -- input simultaneously, using temporary variables when necessary. type Key = Int type Vrtx = (Key, Stmt) -- Give each vertex a unique number, -- for fast comparison -type Stmt = (LocalReg, CmmArg) -- r := e +type Stmt = (LocalReg, CmmExpr) -- r := e -- We use the strongly-connected component algorithm, in which -- * the vertices are the statements @@ -390,7 +390,7 @@ type Stmt = (LocalReg, CmmArg) -- r := e -- that is, if s1 should *follow* s2 in the final order emitMultiAssign [] [] = return () -emitMultiAssign [reg] [rhs] = emitAssign' (CmmLocal reg) rhs +emitMultiAssign [reg] [rhs] = emitAssign (CmmLocal reg) rhs emitMultiAssign regs rhss = do dflags <- getDynFlags ASSERT2( equalLength regs rhss, ppr regs $$ ppr rhss ) @@ -429,20 +429,16 @@ unscramble dflags vertices = mapM_ do_component components split :: DynFlags -> Unique -> Stmt -> (Stmt, Stmt) split dflags uniq (reg, rhs) - = ((tmp, rhs), (reg, CmmExprArg (CmmReg (CmmLocal tmp)))) + = ((tmp, rhs), (reg, CmmReg (CmmLocal tmp))) where - rep = cmmArgType dflags rhs + rep = cmmExprType dflags rhs tmp = LocalReg uniq rep mk_graph :: Stmt -> FCode () - mk_graph (reg, rhs) = emitAssign' (CmmLocal reg) rhs + mk_graph (reg, rhs) = emitAssign (CmmLocal reg) rhs mustFollow :: Stmt -> Stmt -> Bool - (reg, _) `mustFollow` (_, rhs) = regUsedIn' dflags (CmmLocal reg) rhs - -regUsedIn' :: DynFlags -> CmmReg -> CmmArg -> Bool -regUsedIn' _ _ (CmmRubbishArg _) = False -regUsedIn' dflags reg (CmmExprArg expr) = regUsedIn dflags reg expr + (reg, _) `mustFollow` (_, rhs) = regUsedIn dflags (CmmLocal reg) rhs ------------------------------------------------------------------------- -- mkSwitch |