summaryrefslogtreecommitdiff
path: root/compiler/codeGen
diff options
context:
space:
mode:
authorÖmer Sinan Ağacan <omeragacan@gmail.com>2016-07-21 08:07:41 +0000
committerÖmer Sinan Ağacan <omeragacan@gmail.com>2016-07-21 08:11:27 +0000
commit714bebff44076061d0a719c4eda2cfd213b7ac3d (patch)
treeb697e786a8f5f25e8a47886bc5d5487c01678ec6 /compiler/codeGen
parent83e4f49577665278fe08fbaafe2239553f3c448e (diff)
downloadhaskell-714bebff44076061d0a719c4eda2cfd213b7ac3d.tar.gz
Implement unboxed sum primitive type
Summary: This patch implements primitive unboxed sum types, as described in https://ghc.haskell.org/trac/ghc/wiki/UnpackedSumTypes. Main changes are: - Add new syntax for unboxed sums types, terms and patterns. Hidden behind `-XUnboxedSums`. - Add unlifted unboxed sum type constructors and data constructors, extend type and pattern checkers and desugarer. - Add new RuntimeRep for unboxed sums. - Extend unarise pass to translate unboxed sums to unboxed tuples right before code generation. - Add `StgRubbishArg` to `StgArg`, and a new type `CmmArg` for better code generation when sum values are involved. - Add user manual section for unboxed sums. Some other changes: - Generalize `UbxTupleRep` to `MultiRep` and `UbxTupAlt` to `MultiValAlt` to be able to use those with both sums and tuples. - Don't use `tyConPrimRep` in `isVoidTy`: `tyConPrimRep` is really wrong, given an `Any` `TyCon`, there's no way to tell what its kind is, but `kindPrimRep` and in turn `tyConPrimRep` returns `PtrRep`. - Fix some bugs on the way: #12375. Not included in this patch: - Update Haddock for new the new unboxed sum syntax. - `TemplateHaskell` support is left as future work. For reviewers: - Front-end code is mostly trivial and adapted from unboxed tuple code for type checking, pattern checking, renaming, desugaring etc. - Main translation routines are in `RepType` and `UnariseStg`. Documentation in `UnariseStg` should be enough for understanding what's going on. Credits: - Johan Tibell wrote the initial front-end and interface file extensions. - Simon Peyton Jones reviewed this patch many times, wrote some code, and helped with debugging. Reviewers: bgamari, alanz, goldfire, RyanGlScott, simonpj, austin, simonmar, hvr, erikd Reviewed By: simonpj Subscribers: Iceland_jack, ggreif, ezyang, RyanGlScott, goldfire, thomie, mpickering Differential Revision: https://phabricator.haskell.org/D2259
Diffstat (limited to 'compiler/codeGen')
-rw-r--r--compiler/codeGen/StgCmm.hs8
-rw-r--r--compiler/codeGen/StgCmmBind.hs6
-rw-r--r--compiler/codeGen/StgCmmClosure.hs5
-rw-r--r--compiler/codeGen/StgCmmCon.hs10
-rw-r--r--compiler/codeGen/StgCmmEnv.hs37
-rw-r--r--compiler/codeGen/StgCmmExpr.hs30
-rw-r--r--compiler/codeGen/StgCmmForeign.hs7
-rw-r--r--compiler/codeGen/StgCmmHeap.hs20
-rw-r--r--compiler/codeGen/StgCmmLayout.hs28
-rw-r--r--compiler/codeGen/StgCmmMonad.hs19
-rw-r--r--compiler/codeGen/StgCmmPrim.hs25
-rw-r--r--compiler/codeGen/StgCmmUtils.hs35
12 files changed, 133 insertions, 97 deletions
diff --git a/compiler/codeGen/StgCmm.hs b/compiler/codeGen/StgCmm.hs
index 73b9bf62ff..d6e0cf2f72 100644
--- a/compiler/codeGen/StgCmm.hs
+++ b/compiler/codeGen/StgCmm.hs
@@ -33,7 +33,7 @@ import HscTypes
import CostCentre
import Id
import IdInfo
-import Type
+import RepType
import DataCon
import Name
import TyCon
@@ -241,13 +241,13 @@ cgDataCon data_con
do { _ <- ticky_code
; ldvEnter (CmmReg nodeReg)
; tickyReturnOldCon (length arg_things)
- ; void $ emitReturn [cmmOffsetB dflags (CmmReg nodeReg)
- (tagForCon dflags data_con)]
+ ; void $ emitReturn [CmmExprArg (cmmOffsetB dflags (CmmReg nodeReg) (tagForCon dflags data_con))]
}
-- The case continuation code expects a tagged pointer
arg_reps :: [(PrimRep, UnaryType)]
- arg_reps = [(typePrimRep rep_ty, rep_ty) | ty <- dataConRepArgTys data_con, rep_ty <- flattenRepType (repType ty)]
+ arg_reps = [(typePrimRep rep_ty, rep_ty) | ty <- dataConRepArgTys data_con
+ , rep_ty <- repTypeArgs ty]
-- Dynamic closure code for non-nullary constructors only
; when (not (isNullaryRepDataCon data_con))
diff --git a/compiler/codeGen/StgCmmBind.hs b/compiler/codeGen/StgCmmBind.hs
index 8adf3b088e..e8fd8f8d9b 100644
--- a/compiler/codeGen/StgCmmBind.hs
+++ b/compiler/codeGen/StgCmmBind.hs
@@ -210,9 +210,9 @@ cgRhs id (StgRhsCon cc con args)
buildDynCon id True cc con args
{- See Note [GC recovery] in compiler/codeGen/StgCmmClosure.hs -}
-cgRhs name (StgRhsClosure cc bi fvs upd_flag args body)
+cgRhs id (StgRhsClosure cc bi fvs upd_flag args body)
= do dflags <- getDynFlags
- mkRhsClosure dflags name cc bi (nonVoidIds fvs) upd_flag args body
+ mkRhsClosure dflags id cc bi (nonVoidIds fvs) upd_flag args body
------------------------------------------------------------------------
-- Non-constructor right hand sides
@@ -551,7 +551,7 @@ mkSlowEntryCode bndr cl_info arg_regs -- function closure is already in `Node'
-- mkDirectJump does not clobber `Node' containing function closure
jump = mkJump dflags NativeNodeCall
(mkLblExpr fast_lbl)
- (map (CmmReg . CmmLocal) (node : arg_regs))
+ (map (CmmExprArg . CmmReg . CmmLocal) (node : arg_regs))
(initUpdFrameOff dflags)
tscope <- getTickScope
emitProcWithConvention Slow Nothing slow_lbl
diff --git a/compiler/codeGen/StgCmmClosure.hs b/compiler/codeGen/StgCmmClosure.hs
index 8c1aeef55d..f831789454 100644
--- a/compiler/codeGen/StgCmmClosure.hs
+++ b/compiler/codeGen/StgCmmClosure.hs
@@ -78,6 +78,7 @@ import Type
import TyCoRep
import TcType
import TyCon
+import RepType
import BasicTypes
import Outputable
import DynFlags
@@ -286,14 +287,12 @@ mkLFImported id
| otherwise
= mkLFArgument id -- Not sure of exact arity
where
- arity = idRepArity id
+ arity = idFunRepArity id
-----------------------------------------------------
-- Dynamic pointer tagging
-----------------------------------------------------
-type ConTagZ = Int -- A *zero-indexed* constructor tag
-
type DynTag = Int -- The tag on a *pointer*
-- (from the dynamic-tagging paper)
diff --git a/compiler/codeGen/StgCmmCon.hs b/compiler/codeGen/StgCmmCon.hs
index 04257dd991..c77816a819 100644
--- a/compiler/codeGen/StgCmmCon.hs
+++ b/compiler/codeGen/StgCmmCon.hs
@@ -38,6 +38,7 @@ import DataCon
import DynFlags
import FastString
import Id
+import RepType (countConRepArgs)
import Literal
import PrelInfo
import Outputable
@@ -72,7 +73,7 @@ cgTopRhsCon dflags id con args =
; when (platformOS (targetPlatform dflags) == OSMinGW32) $
-- Windows DLLs have a problem with static cross-DLL refs.
ASSERT( not (isDllConApp dflags this_mod con args) ) return ()
- ; ASSERT( args `lengthIs` dataConRepRepArity con ) return ()
+ ; ASSERT( args `lengthIs` countConRepArgs con ) return ()
-- LAY IT OUT
; let
@@ -87,12 +88,13 @@ cgTopRhsCon dflags id con args =
-- needs to poke around inside it.
info_tbl = mkDataConInfoTable dflags con True ptr_wds nonptr_wds
- get_lit (arg, _offset) = do { CmmLit lit <- getArgAmode arg
+ get_lit (arg, _offset) = do { CmmExprArg (CmmLit lit) <- getArgAmode arg
; return lit }
; payload <- mapM get_lit nv_args_w_offsets
-- NB1: nv_args_w_offsets is sorted into ptrs then non-ptrs
-- NB2: all the amodes should be Lits!
+ -- TODO (osa): Why?
; let closure_rep = mkStaticClosureFields
dflags
@@ -113,7 +115,8 @@ cgTopRhsCon dflags id con args =
buildDynCon :: Id -- Name of the thing to which this constr will
-- be bound
- -> Bool -- is it genuinely bound to that name, or just for profiling?
+ -> Bool -- is it genuinely bound to that name, or just
+ -- for profiling?
-> CostCentreStack -- Where to grab cost centre from;
-- current CCS if currentOrSubsumedCCS
-> DataCon -- The data constructor
@@ -155,6 +158,7 @@ premature looking at the args will cause the compiler to black-hole!
-- at all.
buildDynCon' dflags _ binder _ _cc con []
+ | isNullaryRepDataCon con
= return (litIdInfo dflags binder (mkConLFInfo con)
(CmmLabel (mkClosureLabel (dataConName con) (idCafInfo binder))),
return mkNop)
diff --git a/compiler/codeGen/StgCmmEnv.hs b/compiler/codeGen/StgCmmEnv.hs
index d60828cd0d..ec4c75f0bc 100644
--- a/compiler/codeGen/StgCmmEnv.hs
+++ b/compiler/codeGen/StgCmmEnv.hs
@@ -19,7 +19,8 @@ module StgCmmEnv (
bindArgsToRegs, bindToReg, rebindToReg,
bindArgToReg, idToReg,
- getArgAmode, getNonVoidArgAmodes,
+ getArgAmode, getArgAmode_no_rubbish,
+ getNonVoidArgAmodes, getNonVoidArgAmodes_no_rubbish,
getCgIdInfo,
maybeLetNoEscape,
) where
@@ -33,18 +34,18 @@ import StgCmmClosure
import CLabel
-import DynFlags
-import MkGraph
import BlockId
import CmmExpr
import CmmUtils
-import Id
-import VarEnv
import Control.Monad
+import DynFlags
+import Id
+import MkGraph
import Name
-import StgSyn
import Outputable
+import StgSyn
import UniqFM
+import VarEnv
-------------------------------------
-- Non-void types
@@ -165,20 +166,34 @@ cgLookupPanic id
--------------------
-getArgAmode :: NonVoid StgArg -> FCode CmmExpr
+getArgAmode :: NonVoid StgArg -> FCode CmmArg
getArgAmode (NonVoid (StgVarArg var)) =
+ do { info <- getCgIdInfo var; return (CmmExprArg (idInfoToAmode info)) }
+getArgAmode (NonVoid (StgLitArg lit)) = liftM (CmmExprArg . CmmLit) $ cgLit lit
+getArgAmode (NonVoid (StgRubbishArg ty)) = return (CmmRubbishArg ty)
+
+getArgAmode_no_rubbish :: NonVoid StgArg -> FCode CmmExpr
+getArgAmode_no_rubbish (NonVoid (StgVarArg var)) =
do { info <- getCgIdInfo var; return (idInfoToAmode info) }
-getArgAmode (NonVoid (StgLitArg lit)) = liftM CmmLit $ cgLit lit
+getArgAmode_no_rubbish (NonVoid (StgLitArg lit)) = liftM CmmLit $ cgLit lit
+getArgAmode_no_rubbish arg@(NonVoid (StgRubbishArg _)) = pprPanic "getArgAmode_no_rubbish" (ppr arg)
-getNonVoidArgAmodes :: [StgArg] -> FCode [CmmExpr]
+getNonVoidArgAmodes :: [StgArg] -> FCode [CmmArg]
-- NB: Filters out void args,
-- so the result list may be shorter than the argument list
getNonVoidArgAmodes [] = return []
getNonVoidArgAmodes (arg:args)
| isVoidRep (argPrimRep arg) = getNonVoidArgAmodes args
| otherwise = do { amode <- getArgAmode (NonVoid arg)
- ; amodes <- getNonVoidArgAmodes args
- ; return ( amode : amodes ) }
+ ; amodes <- getNonVoidArgAmodes args
+ ; return ( amode : amodes ) }
+
+-- This version assumes arguments are not rubbish. I think this assumption holds
+-- as long as we don't pass unboxed sums to primops and foreign fns.
+getNonVoidArgAmodes_no_rubbish :: [StgArg] -> FCode [CmmExpr]
+getNonVoidArgAmodes_no_rubbish
+ = mapM (getArgAmode_no_rubbish . NonVoid) . filter (not . isVoidRep . argPrimRep)
+
------------------------------------------------------------------------
-- Interface functions for binding and re-binding names
diff --git a/compiler/codeGen/StgCmmExpr.hs b/compiler/codeGen/StgCmmExpr.hs
index 811ea3c44a..142d30cddb 100644
--- a/compiler/codeGen/StgCmmExpr.hs
+++ b/compiler/codeGen/StgCmmExpr.hs
@@ -40,6 +40,7 @@ import Id
import PrimOp
import TyCon
import Type
+import RepType ( isVoidTy, countConRepArgs )
import CostCentre ( CostCentreStack, currentCCS )
import Maybes
import Util
@@ -64,10 +65,10 @@ cgExpr (StgOpApp (StgPrimOp SeqOp) [StgVarArg a, _] _res_ty) =
cgIdApp a []
cgExpr (StgOpApp op args ty) = cgOpApp op args ty
-cgExpr (StgConApp con args) = cgConApp con args
+cgExpr (StgConApp con args _)= cgConApp con args
cgExpr (StgTick t e) = cgTick t >> cgExpr e
cgExpr (StgLit lit) = do cmm_lit <- cgLit lit
- emitReturn [CmmLit cmm_lit]
+ emitReturn [CmmExprArg (CmmLit cmm_lit)]
cgExpr (StgLet binds expr) = do { cgBind binds; cgExpr expr }
cgExpr (StgLetNoEscape binds expr) =
@@ -142,7 +143,9 @@ cgLetNoEscapeRhsBody
cgLetNoEscapeRhsBody local_cc bndr (StgRhsClosure cc _bi _ _upd args body)
= cgLetNoEscapeClosure bndr local_cc cc (nonVoidIds args) body
cgLetNoEscapeRhsBody local_cc bndr (StgRhsCon cc con args)
- = cgLetNoEscapeClosure bndr local_cc cc [] (StgConApp con args)
+ = cgLetNoEscapeClosure bndr local_cc cc []
+ (StgConApp con args (pprPanic "cgLetNoEscapeRhsBody" $
+ text "StgRhsCon doesn't have type args"))
-- For a constructor RHS we want to generate a single chunk of
-- code which can be jumped to from many places, which will
-- return the constructor. It's easy; just behave as if it
@@ -306,7 +309,7 @@ cgCase (StgOpApp (StgPrimOp op) args _) bndr (AlgAlt tycon) alts
where
do_enum_primop :: PrimOp -> [StgArg] -> FCode CmmExpr
do_enum_primop TagToEnumOp [arg] -- No code!
- = getArgAmode (NonVoid arg)
+ = getArgAmode_no_rubbish (NonVoid arg)
do_enum_primop primop args
= do dflags <- getDynFlags
tmp <- newTemp (bWord dflags)
@@ -514,7 +517,7 @@ isSimpleOp :: StgOp -> [StgArg] -> FCode Bool
-- True iff the op cannot block or allocate
isSimpleOp (StgFCallOp (CCall (CCallSpec _ _ safe)) _) _ = return $! not (playSafe safe)
isSimpleOp (StgPrimOp op) stg_args = do
- arg_exprs <- getNonVoidArgAmodes stg_args
+ arg_exprs <- getNonVoidArgAmodes_no_rubbish stg_args
dflags <- getDynFlags
-- See Note [Inlining out-of-line primops and heap checks]
return $! isJust $ shouldInlinePrimOp dflags op arg_exprs
@@ -528,8 +531,9 @@ chooseReturnBndrs :: Id -> AltType -> [StgAlt] -> [NonVoid Id]
chooseReturnBndrs bndr (PrimAlt _) _alts
= nonVoidIds [bndr]
-chooseReturnBndrs _bndr (UbxTupAlt _) [(_, ids, _)]
- = nonVoidIds ids -- 'bndr' is not assigned!
+chooseReturnBndrs _bndr (MultiValAlt n) [(_, ids, _)]
+ = ASSERT2(n == length (nonVoidIds ids), ppr n $$ ppr ids $$ ppr _bndr)
+ nonVoidIds ids -- 'bndr' is not assigned!
chooseReturnBndrs bndr (AlgAlt _) _alts
= nonVoidIds [bndr] -- Only 'bndr' is assigned
@@ -547,7 +551,7 @@ cgAlts :: (GcPlan,ReturnKind) -> NonVoid Id -> AltType -> [StgAlt]
cgAlts gc_plan _bndr PolyAlt [(_, _, rhs)]
= maybeAltHeapCheck gc_plan (cgExpr rhs)
-cgAlts gc_plan _bndr (UbxTupAlt _) [(_, _, rhs)]
+cgAlts gc_plan _bndr (MultiValAlt _) [(_, _, rhs)]
= maybeAltHeapCheck gc_plan (cgExpr rhs)
-- Here bndrs are *already* in scope, so don't rebind them
@@ -671,7 +675,7 @@ cgConApp con stg_args
; emitReturn arg_exprs }
| otherwise -- Boxed constructors; allocate and return
- = ASSERT2( stg_args `lengthIs` dataConRepRepArity con, ppr con <+> ppr stg_args )
+ = ASSERT2( stg_args `lengthIs` countConRepArgs con, ppr con <> parens (ppr (countConRepArgs con)) <+> ppr stg_args )
do { (idinfo, fcode_init) <- buildDynCon (dataConWorkId con) False
currentCCS con stg_args
-- The first "con" says that the name bound to this
@@ -680,7 +684,7 @@ cgConApp con stg_args
; emit =<< fcode_init
; tickyReturnNewCon (length stg_args)
- ; emitReturn [idInfoToAmode idinfo] }
+ ; emitReturn [CmmExprArg (idInfoToAmode idinfo)] }
cgIdApp :: Id -> [StgArg] -> FCode ReturnKind
cgIdApp fun_id [] | isVoidTy (idType fun_id) = emitReturn []
@@ -703,7 +707,7 @@ cgIdApp fun_id args = do
case getCallMethod dflags fun_name cg_fun_id lf_info n_args v_args (cg_loc fun_info) self_loop_info of
-- A value in WHNF, so we can just return it.
- ReturnIt -> emitReturn [fun] -- ToDo: does ReturnIt guarantee tagged?
+ ReturnIt -> emitReturn [CmmExprArg fun] -- ToDo: does ReturnIt guarantee tagged?
EnterIt -> ASSERT( null args ) -- Discarding arguments
emitEnter fun
@@ -853,7 +857,7 @@ emitEnter fun = do
Return _ -> do
{ let entry = entryCode dflags $ closureInfoPtr dflags $ CmmReg nodeReg
; emit $ mkJump dflags NativeNodeCall entry
- [cmmUntag dflags fun] updfr_off
+ [CmmExprArg (cmmUntag dflags fun)] updfr_off
; return AssignedDirectly
}
@@ -889,7 +893,7 @@ emitEnter fun = do
; updfr_off <- getUpdFrameOff
; let area = Young lret
; let (outArgs, regs, copyout) = copyOutOflow dflags NativeNodeCall Call area
- [fun] updfr_off []
+ [CmmExprArg fun] updfr_off []
-- refer to fun via nodeReg after the copyout, to avoid having
-- both live simultaneously; this sometimes enables fun to be
-- inlined in the RHS of the R1 assignment.
diff --git a/compiler/codeGen/StgCmmForeign.hs b/compiler/codeGen/StgCmmForeign.hs
index c8db8644db..eb14e8cce6 100644
--- a/compiler/codeGen/StgCmmForeign.hs
+++ b/compiler/codeGen/StgCmmForeign.hs
@@ -34,6 +34,7 @@ import Cmm
import CmmUtils
import MkGraph
import Type
+import RepType
import TysPrim
import CLabel
import SMRep
@@ -110,7 +111,7 @@ cgForeignCall (CCall (CCallSpec target cconv safety)) stg_args res_ty
_something_else ->
do { _ <- emitForeignCall safety res_regs call_target call_args
- ; emitReturn (map (CmmReg . CmmLocal) res_regs)
+ ; emitReturn (map (CmmExprArg . CmmReg . CmmLocal) res_regs)
}
}
@@ -523,10 +524,12 @@ getFCallArgs args
= do { mb_cmms <- mapM get args
; return (catMaybes mb_cmms) }
where
+ get arg@(StgRubbishArg{})
+ = pprPanic "getFCallArgs" (text "Rubbish arg in foreign call:" <+> ppr arg)
get arg | isVoidRep arg_rep
= return Nothing
| otherwise
- = do { cmm <- getArgAmode (NonVoid arg)
+ = do { cmm <- getArgAmode_no_rubbish (NonVoid arg)
; dflags <- getDynFlags
; return (Just (add_shim dflags arg_ty cmm, hint)) }
where
diff --git a/compiler/codeGen/StgCmmHeap.hs b/compiler/codeGen/StgCmmHeap.hs
index ebff4402d0..fa1780449d 100644
--- a/compiler/codeGen/StgCmmHeap.hs
+++ b/compiler/codeGen/StgCmmHeap.hs
@@ -72,7 +72,7 @@ allocDynClosure
allocDynClosureCmm
:: Maybe Id -> CmmInfoTable -> LambdaFormInfo -> CmmExpr -> CmmExpr
- -> [(CmmExpr, ByteOff)]
+ -> [(CmmArg, ByteOff)]
-> FCode CmmExpr -- returns Hp+n
-- allocDynClosure allocates the thing in the heap,
@@ -113,7 +113,7 @@ allocHeapClosure
:: SMRep -- ^ representation of the object
-> CmmExpr -- ^ info pointer
-> CmmExpr -- ^ cost centre
- -> [(CmmExpr,ByteOff)] -- ^ payload
+ -> [(CmmArg,ByteOff)] -- ^ payload
-> FCode CmmExpr -- ^ returns the address of the object
allocHeapClosure rep info_ptr use_cc payload = do
profDynAlloc rep use_cc
@@ -144,7 +144,7 @@ allocHeapClosure rep info_ptr use_cc payload = do
emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
emitSetDynHdr base info_ptr ccs
= do dflags <- getDynFlags
- hpStore base (zip (header dflags) [0, wORD_SIZE dflags ..])
+ hpStore base (zip (map CmmExprArg (header dflags)) [0, wORD_SIZE dflags ..])
where
header :: DynFlags -> [CmmExpr]
header dflags = [info_ptr] ++ dynProfHdr dflags ccs
@@ -152,11 +152,11 @@ emitSetDynHdr base info_ptr ccs
-- No ticky header
-- Store the item (expr,off) in base[off]
-hpStore :: CmmExpr -> [(CmmExpr, ByteOff)] -> FCode ()
+hpStore :: CmmExpr -> [(CmmArg, ByteOff)] -> FCode ()
hpStore base vals = do
dflags <- getDynFlags
sequence_ $
- [ emitStore (cmmOffsetB dflags base off) val | (val,off) <- vals ]
+ [ emitStore (cmmOffsetB dflags base off) val | (CmmExprArg val,off) <- vals ]
-----------------------------------------------------------
-- Layout of static closures
@@ -364,7 +364,7 @@ entryHeapCheck' is_fastf node arity args code
= do dflags <- getDynFlags
let is_thunk = arity == 0
- args' = map (CmmReg . CmmLocal) args
+ args' = map (CmmExprArg . CmmReg . CmmLocal) args
stg_gc_fun = CmmReg (CmmGlobal GCFun)
stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
@@ -376,13 +376,13 @@ entryHeapCheck' is_fastf node arity args code
-}
gc_call upd
| is_thunk
- = mkJump dflags NativeNodeCall stg_gc_enter1 [node] upd
+ = mkJump dflags NativeNodeCall stg_gc_enter1 [CmmExprArg node] upd
| is_fastf
- = mkJump dflags NativeNodeCall stg_gc_fun (node : args') upd
+ = mkJump dflags NativeNodeCall stg_gc_fun (CmmExprArg node : args') upd
| otherwise
- = mkJump dflags Slow stg_gc_fun (node : args') upd
+ = mkJump dflags Slow stg_gc_fun (CmmExprArg node : args') upd
updfr_sz <- getUpdFrameOff
@@ -446,7 +446,7 @@ cannedGCReturnsTo checkYield cont_on_stack gc regs lret off code
updfr_sz <- getUpdFrameOff
heapCheck False checkYield (gc_call dflags gc updfr_sz) code
where
- reg_exprs = map (CmmReg . CmmLocal) regs
+ reg_exprs = map (CmmExprArg . CmmReg . CmmLocal) regs
-- Note [stg_gc arguments]
-- NB. we use the NativeReturn convention for passing arguments
diff --git a/compiler/codeGen/StgCmmLayout.hs b/compiler/codeGen/StgCmmLayout.hs
index 47ee370212..713d542bdc 100644
--- a/compiler/codeGen/StgCmmLayout.hs
+++ b/compiler/codeGen/StgCmmLayout.hs
@@ -68,7 +68,7 @@ import Control.Monad
--
-- > p=x; q=y;
--
-emitReturn :: [CmmExpr] -> FCode ReturnKind
+emitReturn :: [CmmArg] -> FCode ReturnKind
emitReturn results
= do { dflags <- getDynFlags
; sequel <- getSequel
@@ -90,7 +90,7 @@ emitReturn results
-- using the call/return convention @conv@, passing @args@, and
-- returning the results to the current sequel.
--
-emitCall :: (Convention, Convention) -> CmmExpr -> [CmmExpr] -> FCode ReturnKind
+emitCall :: (Convention, Convention) -> CmmExpr -> [CmmArg] -> FCode ReturnKind
emitCall convs fun args
= emitCallWithExtraStack convs fun args noExtraStack
@@ -101,8 +101,8 @@ emitCall convs fun args
-- @stack@, and returning the results to the current sequel.
--
emitCallWithExtraStack
- :: (Convention, Convention) -> CmmExpr -> [CmmExpr]
- -> [CmmExpr] -> FCode ReturnKind
+ :: (Convention, Convention) -> CmmExpr -> [CmmArg]
+ -> [CmmArg] -> FCode ReturnKind
emitCallWithExtraStack (callConv, retConv) fun args extra_stack
= do { dflags <- getDynFlags
; adjustHpBackwards
@@ -187,7 +187,7 @@ slowCall fun stg_args
(r, slow_code) <- getCodeR $ do
r <- direct_call "slow_call" NativeNodeCall
- (mkRtsApFastLabel rts_fun) arity ((P,Just fun):argsreps)
+ (mkRtsApFastLabel rts_fun) arity ((P,Just (CmmExprArg fun)):argsreps)
emitComment $ mkFastString ("slow_call for " ++
showSDoc dflags (ppr fun) ++
" with pat " ++ unpackFS rts_fun)
@@ -213,7 +213,7 @@ slowCall fun stg_args
fast_code <- getCode $
emitCall (NativeNodeCall, NativeReturn)
(entryCode dflags fun_iptr)
- (nonVArgs ((P,Just funv):argsreps))
+ (nonVArgs ((P,Just (CmmExprArg funv)):argsreps))
slow_lbl <- newLabelC
fast_lbl <- newLabelC
@@ -271,7 +271,7 @@ slowCall fun stg_args
direct_call :: String
-> Convention -- e.g. NativeNodeCall or NativeDirectCall
-> CLabel -> RepArity
- -> [(ArgRep,Maybe CmmExpr)] -> FCode ReturnKind
+ -> [(ArgRep,Maybe CmmArg)] -> FCode ReturnKind
direct_call caller call_conv lbl arity args
| debugIsOn && real_arity > length args -- Too few args
= do -- Caller should ensure that there enough args!
@@ -299,11 +299,11 @@ direct_call caller call_conv lbl arity args
-- When constructing calls, it is easier to keep the ArgReps and the
--- CmmExprs zipped together. However, a void argument has no
--- representation, so we need to use Maybe CmmExpr (the alternative of
+-- CmmArgs zipped together. However, a void argument has no
+-- representation, so we need to use Maybe CmmArg (the alternative of
-- using zeroCLit or even undefined would work, but would be ugly).
--
-getArgRepsAmodes :: [StgArg] -> FCode [(ArgRep, Maybe CmmExpr)]
+getArgRepsAmodes :: [StgArg] -> FCode [(ArgRep, Maybe CmmArg)]
getArgRepsAmodes = mapM getArgRepAmode
where getArgRepAmode arg
| V <- rep = return (V, Nothing)
@@ -311,7 +311,7 @@ getArgRepsAmodes = mapM getArgRepAmode
return (rep, Just expr)
where rep = toArgRep (argPrimRep arg)
-nonVArgs :: [(ArgRep, Maybe CmmExpr)] -> [CmmExpr]
+nonVArgs :: [(ArgRep, Maybe CmmArg)] -> [CmmArg]
nonVArgs [] = []
nonVArgs ((_,Nothing) : args) = nonVArgs args
nonVArgs ((_,Just arg) : args) = arg : nonVArgs args
@@ -354,7 +354,7 @@ just more arguments that we are passing on the stack (cml_args).
-- | 'slowArgs' takes a list of function arguments and prepares them for
-- pushing on the stack for "extra" arguments to a function which requires
-- fewer arguments than we currently have.
-slowArgs :: DynFlags -> [(ArgRep, Maybe CmmExpr)] -> [(ArgRep, Maybe CmmExpr)]
+slowArgs :: DynFlags -> [(ArgRep, Maybe CmmArg)] -> [(ArgRep, Maybe CmmArg)]
slowArgs _ [] = []
slowArgs dflags args -- careful: reps contains voids (V), but args does not
| gopt Opt_SccProfilingOn dflags
@@ -365,8 +365,8 @@ slowArgs dflags args -- careful: reps contains voids (V), but args does not
(call_args, rest_args) = splitAt n args
stg_ap_pat = mkCmmRetInfoLabel rtsUnitId arg_pat
- this_pat = (N, Just (mkLblExpr stg_ap_pat)) : call_args
- save_cccs = [(N, Just (mkLblExpr save_cccs_lbl)), (N, Just curCCS)]
+ this_pat = (N, Just (CmmExprArg (mkLblExpr stg_ap_pat))) : call_args
+ save_cccs = [(N, Just (CmmExprArg (mkLblExpr save_cccs_lbl))), (N, Just (CmmExprArg curCCS))]
save_cccs_lbl = mkCmmRetInfoLabel rtsUnitId (fsLit "stg_restore_cccs")
-------------------------------------------------------------------------
diff --git a/compiler/codeGen/StgCmmMonad.hs b/compiler/codeGen/StgCmmMonad.hs
index 2742acdcdb..8f66cfaa91 100644
--- a/compiler/codeGen/StgCmmMonad.hs
+++ b/compiler/codeGen/StgCmmMonad.hs
@@ -19,8 +19,8 @@ module StgCmmMonad (
emit, emitDecl, emitProc,
emitProcWithConvention, emitProcWithStackFrame,
- emitOutOfLine, emitAssign, emitStore, emitComment,
- emitTick, emitUnwind,
+ emitOutOfLine, emitAssign, emitAssign', emitStore,
+ emitComment, emitTick, emitUnwind,
getCmm, aGraphToGraph,
getCodeR, getCode, getCodeScoped, getHeapUsage,
@@ -76,6 +76,7 @@ import Unique
import UniqSupply
import FastString
import Outputable
+import RepType (typePrimRep)
import Control.Monad
import Data.List
@@ -743,6 +744,14 @@ emitUnwind g e = do
emitAssign :: CmmReg -> CmmExpr -> FCode ()
emitAssign l r = emitCgStmt (CgStmt (CmmAssign l r))
+emitAssign' :: CmmReg -> CmmArg -> FCode ()
+emitAssign' l (CmmExprArg r) = emitAssign l r
+emitAssign' l (CmmRubbishArg ty)
+ | isGcPtrRep (typePrimRep ty)
+ = emitAssign l rubbishExpr
+ | otherwise
+ = return ()
+
emitStore :: CmmExpr -> CmmExpr -> FCode ()
emitStore l r = emitCgStmt (CgStmt (CmmStore l r))
@@ -858,8 +867,8 @@ mkCmmIfThen e tbranch = do
, mkLabel tid tscp, tbranch, mkLabel endif tscp ]
-mkCall :: CmmExpr -> (Convention, Convention) -> [CmmFormal] -> [CmmActual]
- -> UpdFrameOffset -> [CmmActual] -> FCode CmmAGraph
+mkCall :: CmmExpr -> (Convention, Convention) -> [CmmFormal] -> [CmmArg]
+ -> UpdFrameOffset -> [CmmArg] -> FCode CmmAGraph
mkCall f (callConv, retConv) results actuals updfr_off extra_stack = do
dflags <- getDynFlags
k <- newLabelC
@@ -869,7 +878,7 @@ mkCall f (callConv, retConv) results actuals updfr_off extra_stack = do
copyout = mkCallReturnsTo dflags f callConv actuals k off updfr_off extra_stack
return $ catAGraphs [copyout, mkLabel k tscp, copyin]
-mkCmmCall :: CmmExpr -> [CmmFormal] -> [CmmActual] -> UpdFrameOffset
+mkCmmCall :: CmmExpr -> [CmmFormal] -> [CmmArg] -> UpdFrameOffset
-> FCode CmmAGraph
mkCmmCall f results actuals updfr_off
= mkCall f (NativeDirectCall, NativeReturn) results actuals updfr_off []
diff --git a/compiler/codeGen/StgCmmPrim.hs b/compiler/codeGen/StgCmmPrim.hs
index d3c09c584e..c02f992bed 100644
--- a/compiler/codeGen/StgCmmPrim.hs
+++ b/compiler/codeGen/StgCmmPrim.hs
@@ -46,6 +46,7 @@ import Util
import Prelude hiding ((<*>))
import Data.Bits ((.&.), bit)
+import Data.Bifunctor (first)
import Control.Monad (liftM, when)
------------------------------------------------------------------------
@@ -79,10 +80,10 @@ cgOpApp (StgFCallOp fcall _) stg_args res_ty
cgOpApp (StgPrimOp TagToEnumOp) [arg] res_ty
= ASSERT(isEnumerationTyCon tycon)
do { dflags <- getDynFlags
- ; args' <- getNonVoidArgAmodes [arg]
+ ; args' <- getNonVoidArgAmodes_no_rubbish [arg]
; let amode = case args' of [amode] -> amode
_ -> panic "TagToEnumOp had void arg"
- ; emitReturn [tagToClosure dflags tycon amode] }
+ ; emitReturn [CmmExprArg (tagToClosure dflags tycon amode)] }
where
-- If you're reading this code in the attempt to figure
-- out why the compiler panic'ed here, it is probably because
@@ -93,11 +94,11 @@ cgOpApp (StgPrimOp TagToEnumOp) [arg] res_ty
cgOpApp (StgPrimOp primop) args res_ty = do
dflags <- getDynFlags
- cmm_args <- getNonVoidArgAmodes args
+ cmm_args <- getNonVoidArgAmodes_no_rubbish args
case shouldInlinePrimOp dflags primop cmm_args of
Nothing -> do -- out-of-line
let fun = CmmLit (CmmLabel (mkRtsPrimOpLabel primop))
- emitCall (NativeNodeCall, NativeReturn) fun cmm_args
+ emitCall (NativeNodeCall, NativeReturn) fun (map CmmExprArg cmm_args)
Just f -- inline
| ReturnsPrim VoidRep <- result_info
@@ -108,12 +109,12 @@ cgOpApp (StgPrimOp primop) args res_ty = do
-> do dflags <- getDynFlags
res <- newTemp (primRepCmmType dflags rep)
f [res]
- emitReturn [CmmReg (CmmLocal res)]
+ emitReturn [CmmExprArg (CmmReg (CmmLocal res))]
| ReturnsAlg tycon <- result_info, isUnboxedTupleTyCon tycon
-> do (regs, _hints) <- newUnboxedTupleRegs res_ty
f regs
- emitReturn (map (CmmReg . CmmLocal) regs)
+ emitReturn (map (CmmExprArg . CmmReg . CmmLocal) regs)
| otherwise -> panic "cgPrimop"
where
@@ -256,7 +257,7 @@ cgPrimOp :: [LocalReg] -- where to put the results
cgPrimOp results op args
= do dflags <- getDynFlags
- arg_exprs <- getNonVoidArgAmodes args
+ arg_exprs <- getNonVoidArgAmodes_no_rubbish args
emitPrimOp dflags results op arg_exprs
@@ -1657,7 +1658,7 @@ doNewByteArrayOp res_r n = do
let hdr_size = fixedHdrSize dflags
base <- allocHeapClosure rep info_ptr curCCS
- [ (mkIntExpr dflags n,
+ [ (CmmExprArg (mkIntExpr dflags n),
hdr_size + oFFSET_StgArrBytes_bytes dflags)
]
@@ -1770,7 +1771,7 @@ doNewArrayOp res_r rep info payload n init = do
(mkIntExpr dflags (nonHdrSize dflags rep))
(zeroExpr dflags)
- base <- allocHeapClosure rep info_ptr curCCS payload
+ base <- allocHeapClosure rep info_ptr curCCS (map (first CmmExprArg) payload)
arr <- CmmLocal `fmap` newTemp (bWord dflags)
emit $ mkAssign arr base
@@ -1953,9 +1954,9 @@ emitCloneArray info_p res_r src src_off n = do
let hdr_size = fixedHdrSize dflags
base <- allocHeapClosure rep info_ptr curCCS
- [ (mkIntExpr dflags n,
+ [ (CmmExprArg (mkIntExpr dflags n),
hdr_size + oFFSET_StgMutArrPtrs_ptrs dflags)
- , (mkIntExpr dflags (nonHdrSizeW rep),
+ , (CmmExprArg (mkIntExpr dflags (nonHdrSizeW rep)),
hdr_size + oFFSET_StgMutArrPtrs_size dflags)
]
@@ -1992,7 +1993,7 @@ emitCloneSmallArray info_p res_r src src_off n = do
let hdr_size = fixedHdrSize dflags
base <- allocHeapClosure rep info_ptr curCCS
- [ (mkIntExpr dflags n,
+ [ (CmmExprArg (mkIntExpr dflags n),
hdr_size + oFFSET_StgSmallMutArrPtrs_ptrs dflags)
]
diff --git a/compiler/codeGen/StgCmmUtils.hs b/compiler/codeGen/StgCmmUtils.hs
index 5d6710197b..f1437eb640 100644
--- a/compiler/codeGen/StgCmmUtils.hs
+++ b/compiler/codeGen/StgCmmUtils.hs
@@ -38,7 +38,7 @@ module StgCmmUtils (
addToMem, addToMemE, addToMemLblE, addToMemLbl,
mkWordCLit,
newStringCLit, newByteStringCLit,
- blankWord
+ blankWord, rubbishExpr
) where
#include "HsVersions.h"
@@ -67,6 +67,7 @@ import UniqSupply (MonadUnique(..))
import DynFlags
import FastString
import Outputable
+import RepType
import qualified Data.ByteString as BS
import qualified Data.Map as M
@@ -193,7 +194,7 @@ emitRtsCallGen res lbl args safe
where
call updfr_off =
if safe then
- emit =<< mkCmmCall fun_expr res' args' updfr_off
+ emit =<< mkCmmCall fun_expr res' (map CmmExprArg args') updfr_off
else do
let conv = ForeignConvention CCallConv arg_hints res_hints CmmMayReturn
emit $ mkUnsafeCall (ForeignTarget fun_expr conv) res' args'
@@ -251,7 +252,7 @@ callerSaveVolatileRegs dflags = (caller_save, caller_load)
callerRestoreGlobalReg reg
= mkAssign (CmmGlobal reg)
- (CmmLoad (get_GlobalReg_addr dflags reg) (globalRegType dflags reg))
+ (CmmLoad (get_GlobalReg_addr dflags reg) (globalRegType dflags reg))
-- -----------------------------------------------------------------------------
-- Global registers
@@ -361,15 +362,11 @@ newUnboxedTupleRegs res_ty
; sequel <- getSequel
; regs <- choose_regs dflags sequel
; ASSERT( regs `equalLength` reps )
- return (regs, map primRepForeignHint reps) }
+ return (regs, map slotForeignHint reps) }
where
- UbxTupleRep ty_args = repType res_ty
- reps = [ rep
- | ty <- ty_args
- , let rep = typePrimRep ty
- , not (isVoidRep rep) ]
+ MultiRep reps = repType res_ty
choose_regs _ (AssignTo regs _) = return regs
- choose_regs dflags _ = mapM (newTemp . primRepCmmType dflags) reps
+ choose_regs dflags _ = mapM (newTemp . slotCmmType dflags) reps
@@ -377,14 +374,14 @@ newUnboxedTupleRegs res_ty
-- emitMultiAssign
-------------------------------------------------------------------------
-emitMultiAssign :: [LocalReg] -> [CmmExpr] -> FCode ()
+emitMultiAssign :: [LocalReg] -> [CmmArg] -> FCode ()
-- Emit code to perform the assignments in the
-- input simultaneously, using temporary variables when necessary.
type Key = Int
type Vrtx = (Key, Stmt) -- Give each vertex a unique number,
-- for fast comparison
-type Stmt = (LocalReg, CmmExpr) -- r := e
+type Stmt = (LocalReg, CmmArg) -- r := e
-- We use the strongly-connected component algorithm, in which
-- * the vertices are the statements
@@ -393,7 +390,7 @@ type Stmt = (LocalReg, CmmExpr) -- r := e
-- that is, if s1 should *follow* s2 in the final order
emitMultiAssign [] [] = return ()
-emitMultiAssign [reg] [rhs] = emitAssign (CmmLocal reg) rhs
+emitMultiAssign [reg] [rhs] = emitAssign' (CmmLocal reg) rhs
emitMultiAssign regs rhss = do
dflags <- getDynFlags
ASSERT2( equalLength regs rhss, ppr regs $$ ppr rhss )
@@ -432,16 +429,20 @@ unscramble dflags vertices = mapM_ do_component components
split :: DynFlags -> Unique -> Stmt -> (Stmt, Stmt)
split dflags uniq (reg, rhs)
- = ((tmp, rhs), (reg, CmmReg (CmmLocal tmp)))
+ = ((tmp, rhs), (reg, CmmExprArg (CmmReg (CmmLocal tmp))))
where
- rep = cmmExprType dflags rhs
+ rep = cmmArgType dflags rhs
tmp = LocalReg uniq rep
mk_graph :: Stmt -> FCode ()
- mk_graph (reg, rhs) = emitAssign (CmmLocal reg) rhs
+ mk_graph (reg, rhs) = emitAssign' (CmmLocal reg) rhs
mustFollow :: Stmt -> Stmt -> Bool
- (reg, _) `mustFollow` (_, rhs) = regUsedIn dflags (CmmLocal reg) rhs
+ (reg, _) `mustFollow` (_, rhs) = regUsedIn' dflags (CmmLocal reg) rhs
+
+regUsedIn' :: DynFlags -> CmmReg -> CmmArg -> Bool
+regUsedIn' _ _ (CmmRubbishArg _) = False
+regUsedIn' dflags reg (CmmExprArg expr) = regUsedIn dflags reg expr
-------------------------------------------------------------------------
-- mkSwitch