summaryrefslogtreecommitdiff
path: root/compiler/cmm/CmmProcPointZ.hs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/cmm/CmmProcPointZ.hs')
-rw-r--r--compiler/cmm/CmmProcPointZ.hs717
1 files changed, 593 insertions, 124 deletions
diff --git a/compiler/cmm/CmmProcPointZ.hs b/compiler/cmm/CmmProcPointZ.hs
index 82d3e26452..cedb9ef726 100644
--- a/compiler/cmm/CmmProcPointZ.hs
+++ b/compiler/cmm/CmmProcPointZ.hs
@@ -1,16 +1,19 @@
module CmmProcPointZ
( callProcPoints, minimalProcPointSet
- , addProcPointProtocols
- , splitAtProcPoints
+ , addProcPointProtocols, splitAtProcPoints, procPointAnalysis
+ , liveSlotAnal, cafAnal, layout, manifestSP, igraph, areaBuilder
)
where
+import Constants
+import qualified Prelude as P
import Prelude hiding (zip, unzip, last)
+import Util (sortLe)
import BlockId
+import Bitmap
import CLabel
---import ClosureInfo
import Cmm hiding (blockId)
import CmmExpr
import CmmContFlowOpt
@@ -18,13 +21,17 @@ import CmmLiveZ
import CmmTx
import DFMonad
import FiniteMap
-import MachOp (MachHint(NoHint))
+import IdInfo
+import List (sortBy)
import Maybes
-import MkZipCfgCmm hiding (CmmBlock, CmmGraph)
+import MkZipCfgCmm hiding (CmmBlock, CmmGraph, CmmTopZ)
import Monad
import Name
import Outputable
import Panic
+import SMRep (rET_SMALL)
+import StgCmmClosure
+import StgCmmUtils
import UniqFM
import UniqSet
import UniqSupply
@@ -66,7 +73,7 @@ be the start of a new procedure to which the continuations can jump:
You might think then that a criterion to make a node a proc point is
that it is directly reached by two distinct proc points. (Note
-[Direct reachability].) But this criterion is a bit two simple; for
+[Direct reachability].) But this criterion is a bit too simple; for
example, 'return x' is also reached by two proc points, yet there is
no point in pulling it out of k_join. A good criterion would be to
say that a node should be made a proc point if it is reached by a set
@@ -123,7 +130,7 @@ forward = ForwardTransfers first middle last exit
where first ProcPoint id = ReachedBy $ unitUniqSet id
first x _ = x
middle x _ = x
- last _ (LastCall _ (Just id)) = LastOutFacts [(id, ProcPoint)]
+ last _ (LastCall _ (Just id) _) = LastOutFacts [(id, ProcPoint)]
last x l = LastOutFacts $ map (\id -> (id, x)) (succs l)
exit x = x
@@ -136,32 +143,31 @@ minimalProcPointSet :: ProcPointSet -> CmmGraph -> FuelMonad ProcPointSet
callProcPoints g = fold_blocks add entryPoint g
where entryPoint = unitUniqSet (lg_entry g)
add b set = case last $ unzip b of
- LastOther (LastCall _ (Just k)) -> extendBlockSet set k
+ LastOther (LastCall _ (Just k) _) -> extendBlockSet set k
_ -> set
minimalProcPointSet callProcPoints g = extendPPSet g (postorder_dfs g) callProcPoints
type PPFix = FuelMonad (ForwardFixedPoint Middle Last Status ())
-procPointAnalysis :: ProcPointSet -> CmmGraph -> FuelMonad PPFix
+procPointAnalysis :: ProcPointSet -> CmmGraph -> FuelMonad (BlockEnv Status)
procPointAnalysis procPoints g =
let addPP env id = extendBlockEnv env id ProcPoint
initProcPoints = foldl addPP emptyBlockEnv (uniqSetToList procPoints)
- in runDFM lattice $ -- init with old facts and solve
- return $ (zdfSolveFrom initProcPoints "proc-point reachability" lattice
+ in liftM zdfFpFacts $
+ (zdfSolveFrom initProcPoints "proc-point reachability" lattice
forward (fact_bot lattice) $ graphOfLGraph g :: PPFix)
extendPPSet :: CmmGraph -> [CmmBlock] -> ProcPointSet -> FuelMonad ProcPointSet
extendPPSet g blocks procPoints =
- do res <- procPointAnalysis procPoints g
- env <- liftM zdfFpFacts res
+ do env <- procPointAnalysis procPoints g
let add block pps = let id = blockId block
in case lookupBlockEnv env id of
Just ProcPoint -> extendBlockSet pps id
_ -> pps
procPoints' = fold_blocks add emptyBlockSet g
newPoint = listToMaybe (mapMaybe ppSuccessor blocks)
- ppSuccessor b@(Block id _) =
+ ppSuccessor b@(Block id _ _) =
let nreached id = case lookupBlockEnv env id `orElse` panic "no ppt" of
ProcPoint -> 1
ReachedBy ps -> sizeUniqSet ps
@@ -178,8 +184,6 @@ extendPPSet g blocks procPoints =
Nothing -> return procPoints'
-
-
------------------------------------------------------------------------
-- Computing Proc-Point Protocols --
------------------------------------------------------------------------
@@ -243,12 +247,13 @@ addProcPointProtocols callPPs procPoints g =
do liveness <- cmmLivenessZ g
(protos, g') <- return $ optimize_calls liveness g
blocks'' <- add_CopyOuts protos procPoints g'
- return $ LGraph (lg_entry g) blocks''
+ return $ LGraph (lg_entry g) (lg_argoffset g) blocks''
where optimize_calls liveness g = -- see Note [Separate Adams optimization]
let (protos, blocks') =
fold_blocks maybe_add_call (init_protocols, emptyBlockEnv) g
protos' = add_unassigned liveness procPoints protos
- g' = LGraph (lg_entry g) $ add_CopyIns callPPs protos' blocks'
+ g' = LGraph (lg_entry g) (lg_argoffset g) $
+ add_CopyIns callPPs protos' blocks'
in (protos', runTx removeUnreachableBlocksZ g')
maybe_add_call :: CmmBlock -> (BlockEnv Protocol, BlockEnv CmmBlock)
-> (BlockEnv Protocol, BlockEnv CmmBlock)
@@ -257,11 +262,10 @@ addProcPointProtocols callPPs procPoints g =
-- redirect the call (cf 'newblock') and set the protocol if necessary
maybe_add_call block (protos, blocks) =
case goto_end $ unzip block of
- (h, LastOther (LastCall tgt (Just k)))
+ (h, LastOther (LastCall tgt (Just k) s))
| Just proto <- lookupBlockEnv protos k,
- Just pee <- jumpsToProcPoint k
- -> let newblock =
- zipht h (tailOfLast (LastCall tgt (Just pee)))
+ Just pee <- branchesToProcPoint k
+ -> let newblock = zipht h (tailOfLast (LastCall tgt (Just pee) s))
changed_blocks = insertBlock newblock blocks
unchanged_blocks = insertBlock block blocks
in case lookupBlockEnv protos pee of
@@ -271,21 +275,20 @@ addProcPointProtocols callPPs procPoints g =
else (protos, unchanged_blocks)
_ -> (protos, insertBlock block blocks)
- jumpsToProcPoint :: BlockId -> Maybe BlockId
- -- ^ Tells whether the named block is just a jump to a proc point
- jumpsToProcPoint id =
- let (Block _ t) = lookupBlockEnv (lg_blocks g) id `orElse`
- panic "jump out of graph"
+ branchesToProcPoint :: BlockId -> Maybe BlockId
+ -- ^ Tells whether the named block is just a branch to a proc point
+ branchesToProcPoint id =
+ let (Block _ _ t) = lookupBlockEnv (lg_blocks g) id `orElse`
+ panic "branch out of graph"
in case t of
- ZTail (CopyIn {}) (ZLast (LastOther (LastBranch pee)))
+ ZLast (LastOther (LastBranch pee))
| elemBlockSet pee procPoints -> Just pee
_ -> Nothing
init_protocols = fold_blocks maybe_add_proto emptyBlockEnv g
maybe_add_proto :: CmmBlock -> BlockEnv Protocol -> BlockEnv Protocol
- maybe_add_proto (Block id (ZTail (CopyIn c fs _srt) _)) env =
- extendBlockEnv env id (Protocol c fs $ toArea id fs)
+ --maybe_add_proto (Block id (ZTail (CopyIn c _ fs _srt) _)) env =
+ -- extendBlockEnv env id (Protocol c fs $ toArea id fs)
maybe_add_proto _ env = env
- toArea id fs = mkCallArea id fs $ Just fs
-- | For now, following a suggestion by Ben Lippmeier, we pass all
-- live variables as arguments, hoping that a clever register
@@ -297,17 +300,17 @@ add_unassigned = pass_live_vars_as_args
pass_live_vars_as_args :: BlockEnv CmmLive -> ProcPointSet ->
BlockEnv Protocol -> BlockEnv Protocol
-pass_live_vars_as_args liveness procPoints protos = protos'
+pass_live_vars_as_args _liveness procPoints protos = protos'
where protos' = foldUniqSet addLiveVars protos procPoints
addLiveVars :: BlockId -> BlockEnv Protocol -> BlockEnv Protocol
addLiveVars id protos =
case lookupBlockEnv protos id of
Just _ -> protos
- Nothing -> let live = lookupBlockEnv liveness id `orElse`
- panic ("no liveness at block " ++ show id)
- formals = map (\x -> CmmKinded x NoHint) $ uniqSetToList live
- prot = Protocol ConventionPrivate formals $
- mkCallArea id formals $ Just formals
+ Nothing -> let live = emptyBlockEnv
+ --lookupBlockEnv _liveness id `orElse`
+ --panic ("no liveness at block " ++ show id)
+ formals = uniqSetToList live
+ prot = Protocol Private formals $ CallArea $ Young id
in extendBlockEnv protos id prot
@@ -315,131 +318,597 @@ pass_live_vars_as_args liveness procPoints protos = protos'
-- instruction. (Proc-points that arise from calls already have their copy-in instructions.)
add_CopyIns :: ProcPointSet -> BlockEnv Protocol -> BlockEnv CmmBlock -> BlockEnv CmmBlock
-add_CopyIns callPPs protos = mapUFM maybe_insert_CopyIns
+add_CopyIns callPPs protos blocks = mapUFM maybe_insert_CopyIns blocks
where maybe_insert_CopyIns :: CmmBlock -> CmmBlock
- maybe_insert_CopyIns b@(Block id t) | not $ elementOfUniqSet id callPPs =
- case lookupBlockEnv protos id of
- Nothing -> b
- Just (Protocol c fs area) ->
- case t of
- --ZTail (CopyIn c' fs' _) _ ->
- -- if c == c' && fs == fs' then b
- -- else panic ("mismatched protocols for block " ++ show id)
- _ -> Block id -- (ZTail (CopyIn c fs NoC_SRT) t)
- $ foldr ZTail t (copyIn c area fs)
+ maybe_insert_CopyIns b@(Block id off t) | not $ elementOfUniqSet id callPPs =
+ case (off, lookupBlockEnv protos id) of
+ (Just _, _) -> panic "shouldn't copy arguments twice into a block"
+ (_, Just (Protocol c fs area)) -> Block id (Just off) $ foldr ZTail t copies
+ where (off, copies) = copyIn c False area fs
+ (_, Nothing) -> b
maybe_insert_CopyIns b = b
-- | Add a CopyOut node before each procpoint.
--- If the predecessor is a call, then the CopyOut should already exist (in the callee).
+-- If the predecessor is a call, then the copy outs should already be done by the callee.
+-- Note: If we need to add copy-out instructions, they may require stack space,
+-- so we accumulate a map from the successors to the necessary stack space,
+-- then update the successors after we have finished inserting the copy-outs.
add_CopyOuts :: BlockEnv Protocol -> ProcPointSet -> CmmGraph ->
FuelMonad (BlockEnv CmmBlock)
-add_CopyOuts protos procPoints g = fold_blocks maybe_insert_CopyOut (return emptyBlockEnv) g
- where maybe_insert_CopyOut :: CmmBlock -> FuelMonad (BlockEnv CmmBlock) ->
- FuelMonad (BlockEnv CmmBlock)
- maybe_insert_CopyOut b@(Block bid _) blocks | bid == lg_entry g = skip b blocks
- maybe_insert_CopyOut b blocks =
+add_CopyOuts protos procPoints g = fold_blocks mb_copy_out (return emptyBlockEnv) g
+ where mb_copy_out :: CmmBlock -> FuelMonad (BlockEnv CmmBlock) ->
+ FuelMonad (BlockEnv CmmBlock)
+ mb_copy_out b@(Block bid _ _) z | bid == lg_entry g = skip b z
+ mb_copy_out b z =
case last $ unzip b of
- LastOther (LastCall _ _) -> skip b blocks -- copy out done by callee
- _ -> maybe_insert_CopyOut' b blocks
- maybe_insert_CopyOut' b blocks = fold_succs trySucc b init >>= finish
- where init = blocks >>= (\bmap -> return (b, bmap))
+ LastOther (LastCall _ _ _) -> skip b z -- copy out done by callee
+ _ -> mb_copy_out' b z
+ mb_copy_out' b z = fold_succs trySucc b init >>= finish
+ where init = z >>= (\bmap -> return (b, bmap))
trySucc succId z =
if elemBlockSet succId procPoints then
case lookupBlockEnv protos succId of
Nothing -> z
Just (Protocol c fs area) ->
- insert z succId $ copyOut c area $ map fetch fs
- -- CopyOut c $ map fetch fs
+ let (_, copies) = copyOut c Jump area $ map (CmmReg . CmmLocal) fs
+ in insert z succId copies
else z
- fetch k = k {kindlessCmm = CmmReg $ CmmLocal $ kindlessCmm k}
insert z succId m =
do (b, bmap) <- z
(b, bs) <- insertBetween b m succId
- return $ (b, foldl (flip insertBlock) bmap bs)
- finish (b@(Block bid _), bmap) = return $ extendBlockEnv bmap bid b
- skip b@(Block bid _) bs = bs >>= (\bmap -> return $ extendBlockEnv bmap bid b)
-
-
-
+ pprTrace "insert for succ" (ppr succId <> ppr m) $
+ return $ (b, foldl (flip insertBlock) bmap bs)
+ finish (b@(Block bid _ _), bmap) =
+ return $ (extendBlockEnv bmap bid b)
+ skip b@(Block bid _ _) bs =
+ bs >>= (\bmap -> return (extendBlockEnv bmap bid b))
+
+-- At this point, we have found a set of procpoints, each of which should be
+-- the entry point of a procedure.
+-- Now, we create the procedure for each proc point,
+-- which requires that we:
+-- 1. build a map from proc points to the blocks reachable from the proc point
+-- 2. turn each branch to a proc point into a jump
+-- 3. turn calls and returns into jumps
+-- 4. build info tables for the procedures -- and update the info table for
+-- the SRTs in the entry procedure as well.
-- Input invariant: A block should only be reachable from a single ProcPoint.
--- If you want to duplicate blocks, do it before this gets called.
-splitAtProcPoints :: CmmFormalsWithoutKinds -> CLabel -> ProcPointSet ->
- CmmGraph -> FuelMonad [CmmGraph]
-splitAtProcPoints formals entry_label procPoints g@(LGraph entry _) =
- do let layout = layout_stack formals g
- pprTrace "stack layout" (ppr layout) $ return ()
- res <- procPointAnalysis procPoints g
- procMap <- liftM zdfFpFacts res
- let addBlock b@(Block bid _) graphEnv =
- case lookupBlockEnv procMap bid of
- Just ProcPoint -> add graphEnv bid bid b
- Just (ReachedBy set) ->
- case uniqSetToList set of
- [] -> graphEnv
- [id] -> add graphEnv id bid b
- _ -> panic "Each block should be reachable from only one ProcPoint"
- Nothing -> panic "block not reached by a proc point?"
+splitAtProcPoints :: CLabel -> ProcPointSet-> ProcPointSet -> BlockEnv Status ->
+ BlockEnv SubAreaSet -> AreaMap -> CmmTopZ -> FuelMonad [CmmTopZ]
+splitAtProcPoints entry_label callPPs procPoints procMap slotEnv areaMap
+ (CmmProc top_info top_l top_args g@(LGraph entry e_off blocks)) =
+ do -- Build a map from procpoints to the blocks they reach
+ let addBlock b@(Block bid _ _) graphEnv =
+ case lookupBlockEnv procMap bid of
+ Just ProcPoint -> add graphEnv bid bid b
+ Just (ReachedBy set) ->
+ case uniqSetToList set of
+ [] -> graphEnv
+ [id] -> add graphEnv id bid b
+ _ -> panic "Each block should be reachable from only one ProcPoint"
+ Nothing -> pprPanic "block not reached by a proc point?" (ppr bid)
add graphEnv procId bid b = extendBlockEnv graphEnv procId graph'
where graph = lookupBlockEnv graphEnv procId `orElse` emptyBlockEnv
graph' = extendBlockEnv graph bid b
graphEnv <- return $ fold_blocks addBlock emptyBlockEnv g
-- Build a map from proc point BlockId to labels for their new procedures
- let add_label map pp = clabel pp >>= (\l -> return $ (pp, l) : map)
- clabel procPoint = if procPoint == entry then return entry_label
- else getUniqueM >>= return . to_label
- to_label u = mkEntryLabel (mkFCallName u "procpoint")
- procLabels <- foldM add_label [] (uniqSetToList procPoints)
+ let add_label map pp = return $ addToFM map pp lbl
+ where lbl = if pp == entry then entry_label else blockLbl pp
+ procLabels <- foldM add_label emptyFM (uniqSetToList procPoints)
+ -- Convert call and return instructions to jumps.
+ let last (LastCall e _ n) = LastJump e n
+ last l = l
+ graphEnv <- return $ mapUFM (mapUFM (map_one_block id id last)) graphEnv
-- In each new graph, add blocks jumping off to the new procedures,
-- and replace branches to procpoints with branches to the jump-off blocks
let add_jump_block (env, bs) (pp, l) =
do bid <- liftM mkBlockId getUniqueM
- let b = Block bid (ZLast (LastOther (LastJump $ CmmLit $ CmmLabel l)))
+ let b = Block bid Nothing (ZLast (LastOther jump))
+ argSpace = case lookupBlockEnv blocks pp of
+ Just (Block _ (Just s) _) -> s
+ Just (Block _ Nothing _) -> panic "no args at procpoint"
+ _ -> panic "can't find procpoint block"
+ jump = LastJump (CmmLit (CmmLabel l)) argSpace
return $ (extendBlockEnv env pp bid, b : bs)
add_jumps newGraphEnv (guniq, blockEnv) =
- do (jumpEnv, jumpBlocks) <- foldM add_jump_block (emptyBlockEnv, []) procLabels
+ do (jumpEnv, jumpBlocks) <- foldM add_jump_block (emptyBlockEnv, [])
+ $ fmToList procLabels
let ppId = mkBlockId guniq
- LGraph _ blockEnv' = replaceLabelsZ jumpEnv $ LGraph ppId blockEnv
- blockEnv'' = foldl (flip insertBlock) blockEnv' jumpBlocks
+ (b_off, b) =
+ case lookupBlockEnv blockEnv ppId of
+ Just (Block id (Just b_off) t) -> (b_off, Block id Nothing t)
+ Just b@(Block _ Nothing _) -> (0, b)
+ Nothing -> panic "couldn't find entry block while splitting"
+ off = if ppId == entry then e_off else b_off
+ LGraph _ _ blockEnv' = pprTrace "jumpEnv" (ppr jumpEnv) $
+ replaceLabelsZ jumpEnv $ LGraph ppId off blockEnv
+ blockEnv'' = foldl (flip insertBlock) (extendBlockEnv blockEnv' ppId b)
+ jumpBlocks
return $ extendBlockEnv newGraphEnv ppId $
- runTx cmmCfgOptsZ $ LGraph ppId blockEnv''
- _ <- return $ replaceLabelsZ
+ runTx cmmCfgOptsZ $ LGraph ppId off blockEnv''
+ upd_info_tbl srt' (CmmInfoTable p t typeinfo) = CmmInfoTable p t typeinfo'
+ where typeinfo' = case typeinfo of
+ t@(ConstrInfo _ _ _) -> t
+ (FunInfo c _ a d e) -> FunInfo c srt' a d e
+ (ThunkInfo c _) -> ThunkInfo c srt'
+ (ThunkSelectorInfo s _) -> ThunkSelectorInfo s srt'
+ (ContInfo vars _) -> ContInfo vars srt'
+ upd_info_tbl _ CmmNonInfoTable = CmmNonInfoTable
+ to_proc cafMap (ppUniq, g) | elementOfUniqSet bid callPPs =
+ if bid == entry then
+ CmmProc (CmmInfo gc upd_fr (upd_info_tbl srt' info_tbl)) top_l top_args g
+ else
+ pprTrace "adding infotable for" (ppr bid) $
+ CmmProc (CmmInfo Nothing Nothing $ infoTbl) lbl [] g
+ where bid = mkBlockId ppUniq
+ lbl = expectJust "pp label" $ lookupFM procLabels bid
+ infoTbl = CmmInfoTable (ProfilingInfo zero zero) rET_SMALL
+ (ContInfo stack_vars srt')
+ stack_vars = pprTrace "slotEnv" (ppr slotEnv) $
+ live_vars slotEnv areaMap bid
+ zero = CmmInt 0 wordWidth
+ srt' = expectJust "procpoint.infoTbl" $ lookupBlockEnv cafMap bid
+ CmmInfo gc upd_fr info_tbl = top_info
+ to_proc _ (ppUniq, g) =
+ pprTrace "not adding infotable for" (ppr bid) $
+ CmmProc (CmmInfo Nothing Nothing CmmNonInfoTable) lbl [] g
+ where bid = mkBlockId ppUniq
+ lbl = expectJust "pp label" $ lookupFM procLabels bid
graphEnv <- foldM add_jumps emptyBlockEnv $ ufmToList graphEnv
+ cafEnv <- cafAnal g
+ (cafTable, blockCafs) <- buildCafs cafEnv
+ procs <- return $ map (to_proc blockCafs) $ ufmToList graphEnv
return $ pprTrace "procLabels" (ppr procLabels) $
- pprTrace "splitting graphs" (ppr graphEnv) $ [g]
+ pprTrace "splitting graphs" (ppr graphEnv) $ cafTable ++ procs
+splitAtProcPoints _ _ _ _ _ _ t@(CmmData _ _) = return [t]
------------------------------------------------------------------------
--- Stack Layout (completely bogus for now) --
+-- Stack Layout --
------------------------------------------------------------------------
--- At some point, we'll do stack layout properly.
--- But for now, we can move forward on generating code by just producing
--- a brain dead layout, giving a separate slot to every variable,
--- and (incorrectly) assuming that all parameters are passed on the stack.
-
--- For now, variables are placed at explicit offsets from a virtual
--- frame pointer.
--- We may want to use abstract stack slots at some point.
-data Placement = VFPMinus Int
-
-instance Outputable Placement where
- ppr (VFPMinus k) = text "VFP - " <> int k
-
--- Build a map from registers to stack locations.
--- Return that map along with the offset to the end of the block
--- containing local registers.
-layout_stack ::CmmFormalsWithoutKinds -> CmmGraph ->
- (Int, FiniteMap LocalReg Placement, FiniteMap LocalReg Placement)
-layout_stack formals g = (ix', incomingMap, localMap)
- where (ix, incomingMap) = foldl (flip place) (1, emptyFM) formals -- IGNORES CC'S
- -- 1 leaves space for the return infotable
- (ix', localMap) = foldUniqSet place (ix, emptyFM) regs
- place r (ix, map) = (ix', addToFM map r $ VFPMinus ix') where ix' = ix + 1
- regs = fold_blocks (fold_fwd_block (\_ y -> y) add addL) emptyRegSet g
- add x y = foldRegsDefd extendRegSet y x
- addL (LastOther l) z = add l z
- addL LastExit z = z
+-- | Before we lay out the stack, we need to know something about the
+-- liveness of the stack slots. In particular, to decide whether we can
+-- reuse a stack location to hold multiple stack slots, we need to know
+-- when each of the stack slots is used.
+-- Although tempted to use something simpler, we really need a full interference
+-- graph. Consider the following case:
+-- case <...> of
+-- 1 -> <spill x>; // y is dead out
+-- 2 -> <spill y>; // x is dead out
+-- 3 -> <spill x and y>
+-- If we consider the arms in order and we use just the deadness information given by a
+-- dataflow analysis, we might decide to allocate the stack slots for x and y
+-- to the same stack location, which will lead to incorrect code in the third arm.
+-- We won't make this mistake with an interference graph.
+
+-- First, the liveness analysis.
+-- We represent a slot with an area, an offset into the area, and a width.
+-- Tracking the live slots is a bit tricky because there may be loads and stores
+-- into only a part of a stack slot (e.g. loading the low word of a 2-word long),
+-- e.g. Slot A 0 8 overlaps with Slot A 4 4.
+--
+-- The definition of a slot set is intended to reduce the number of overlap
+-- checks we have to make. There's no reason to check for overlap between
+-- slots in different areas, so we segregate the map by Area's.
+-- We expect few slots in each Area, so we collect them in an unordered list.
+-- To keep these lists short, any contiguous live slots are coalesced into
+-- a single slot, on insertion.
+
+type SubAreaSet = FiniteMap Area [SubArea]
+fold_subareas :: (SubArea -> z -> z) -> SubAreaSet -> z -> z
+fold_subareas f m z = foldFM (\_ s z -> foldr (\a z -> f a z) z s) z m
+
+liveGen :: SubArea -> [SubArea] -> (Bool, [SubArea])
+liveGen s set = liveGen' s set []
+ where liveGen' s [] z = (True, s : z)
+ liveGen' s@(a, hi, w) (s'@(a', hi', w') : rst) z =
+ if a /= a' || hi < lo' || lo > hi' then -- no overlap
+ liveGen' s rst (s' : z)
+ else if s' `contains` s then -- old contains new
+ (False, set)
+ else -- overlap: coalesce the slots
+ let new_hi = max hi hi'
+ new_lo = min lo lo'
+ in liveGen' (a, new_hi, new_hi - new_lo) rst z
+ where lo = hi - w -- remember: areas grow down
+ lo' = hi' - w'
+ contains (a, hi, w) (a', hi', w') =
+ a == a' && hi >= hi' && hi - w <= hi' - w'
+
+liveKill :: SubArea -> [SubArea] -> [SubArea]
+liveKill (a, hi, w) set = pprTrace "killing slots in area" (ppr a) $ liveKill' set []
+ where liveKill' [] z = z
+ liveKill' (s'@(a', hi', w') : rst) z =
+ if a /= a' || hi < lo' || lo > hi' then -- no overlap
+ liveKill' rst (s' : z)
+ else -- overlap: split the old slot
+ let z' = if hi' > hi then (a, hi', hi' - hi) : z else z
+ z'' = if lo > lo' then (a, lo, lo - lo') : z' else z'
+ in liveKill' rst z''
+ where lo = hi - w -- remember: areas grow down
+ lo' = hi' - w'
+
+slotLattice :: DataflowLattice SubAreaSet
+slotLattice = DataflowLattice "live slots" emptyFM add True
+ where add new old = case foldFM addArea (False, old) new of
+ (True, x) -> aTx x
+ (False, x) -> noTx x
+ addArea a newSlots z = foldr (addSlot a) z newSlots
+ addSlot a slot (changed, map) =
+ let (c, live) = liveGen slot $ lookupWithDefaultFM map [] a
+ in (c || changed, addToFM map a live)
+
+liveInSlots :: (DefinerOfSlots s, UserOfSlots s) => SubAreaSet -> s -> SubAreaSet
+liveInSlots live x = foldSlotsUsed add (foldSlotsDefd remove live x) x
+ where add live (a, i, w) = liftToArea a (snd . liveGen (a, i, w)) live
+ remove live (a, i, w) = liftToArea a (liveKill (a, i, w)) live
+ liftToArea a f map = addToFM map a $ f (lookupWithDefaultFM map [] a)
+
+-- Unlike the liveness transfer functions @gen@ and @kill@, this function collects
+-- _any_ slot that is named.
+--addNamedSlots :: (DefinerOfSlots s, UserOfSlots s) => SubAreaSet -> s -> SubAreaSet
+--addNamedSlots live x = foldSlotsUsed add (foldSlotsDefd add live x) x
+-- where add live (a, i, w) = liftToArea a (snd . liveGen (a, i, w)) live
+-- liftToArea a f map = addToFM map a $ f (lookupWithDefaultFM map [] a)
+
+-- Note: the stack slots that hold variables returned on the stack are not
+-- considered live in to the block -- we treat the first node as a definition site.
+-- BEWARE: I'm being a little careless here in failing to check for the
+-- entry Id (which would use the CallArea Old).
+liveTransfers :: BackwardTransfers Middle Last SubAreaSet
+liveTransfers = BackwardTransfers first liveInSlots liveLastIn
+ where first live id = delFromFM live (CallArea (Young id))
+
+liveLastIn :: (BlockId -> SubAreaSet) -> Last -> SubAreaSet
+liveLastIn env l = liveInSlots (liveLastOut env l) l
+
+-- Don't forget to keep the outgoing parameters in the CallArea live.
+liveLastOut :: (BlockId -> SubAreaSet) -> Last -> SubAreaSet
+liveLastOut env l =
+ case l of
+ LastReturn n -> add_area (CallArea Old) n out
+ LastJump _ n -> add_area (CallArea Old) n out
+ LastCall _ Nothing n -> add_area (CallArea Old) n out
+ LastCall _ (Just k) n -> add_area (CallArea (Young k)) n out
+ _ -> out
+ where out = joinOuts slotLattice env l
+add_area :: Area -> Int -> SubAreaSet -> SubAreaSet
+add_area a n live =
+ addToFM live a $ snd $ liveGen (a, n, n) $ lookupWithDefaultFM live [] a
+
+type SlotFix a = FuelMonad (BackwardFixedPoint Middle Last SubAreaSet a)
+liveSlotAnal :: LGraph Middle Last -> FuelMonad (BlockEnv SubAreaSet)
+liveSlotAnal g = liftM zdfFpFacts (res :: SlotFix ())
+ where res = zdfSolveFromL emptyBlockEnv "live slot analysis" slotLattice
+ liveTransfers (fact_bot slotLattice) g
+
+-- The liveness analysis must be precise: otherwise, we won't know if a definition
+-- should really kill a live-out stack slot.
+-- But the interference graph does not have to be precise -- it might decide that
+-- any live areas interfere. To maintain both a precise analysis and an imprecise
+-- interference graph, we need to convert the live-out stack slots to graph nodes
+-- at each and every instruction; rather than reconstruct a new list of nodes
+-- every time, I provide a function to fold over the nodes, which should be a
+-- reasonably efficient approach for the implementations we envision.
+-- Of course, it will probably be much easier to program if we just return a list...
+type Set x = FiniteMap x ()
+type AreaMap = FiniteMap Area Int
+data IGraphBuilder n =
+ Builder { foldNodes :: forall z. SubArea -> (n -> z -> z) -> z -> z
+ , _wordsOccupied :: AreaMap -> AreaMap -> n -> [Int]
+ }
+
+areaBuilder :: IGraphBuilder Area
+areaBuilder = Builder fold words
+ where fold (a, _, _) f z = f a z
+ words areaSize areaMap a =
+ case lookupFM areaMap a of
+ Just addr -> [addr .. addr + (lookupFM areaSize a `orElse`
+ pprPanic "wordsOccupied: unknown area" (ppr a))]
+ Nothing -> []
+
+--slotBuilder :: IGraphBuilder (Area, Int)
+--slotBuilder = undefined
+
+-- Now, we can build the interference graph.
+-- The usual story: a definition interferes with all live outs and all other
+-- definitions.
+type IGraph x = FiniteMap x (Set x)
+type IGPair x = (IGraph x, IGraphBuilder x)
+igraph :: (Ord x) => IGraphBuilder x -> BlockEnv SubAreaSet -> LGraph Middle Last -> IGraph x
+igraph builder env g = foldr interfere emptyFM (postorder_dfs g)
+ where foldN = foldNodes builder
+ interfere block igraph =
+ let (h, l) = goto_end (unzip block)
+ --heads :: ZHead Middle -> (IGraph x, SubAreaSet) -> IGraph x
+ heads (ZFirst _ _) (igraph, _) = igraph
+ heads (ZHead h m) (igraph, liveOut) =
+ heads h (addEdges igraph m liveOut, liveInSlots liveOut m)
+ -- add edges between a def and the other defs and liveouts
+ addEdges igraph i out = fst $ foldSlotsDefd addDef (igraph, out) i
+ addDef (igraph, out) def@(a, _, _) =
+ (foldN def (addDefN out) igraph,
+ addToFM out a (snd $ liveGen def (lookupWithDefaultFM out [] a)))
+ addDefN out n igraph =
+ let addEdgeNO o igraph = foldN o addEdgeNN igraph
+ addEdgeNN n' igraph = addEdgeNN' n n' $ addEdgeNN' n' n igraph
+ addEdgeNN' n n' igraph = addToFM igraph n (addToFM set n' ())
+ where set = lookupWithDefaultFM igraph emptyFM n
+ in foldFM (\ _ os igraph -> foldr addEdgeNO igraph os) igraph out
+ env' bid = lookupBlockEnv env bid `orElse` panic "unknown blockId in igraph"
+ in heads h $ case l of LastExit -> (igraph, emptyFM)
+ LastOther l -> (addEdges igraph l $ liveLastOut env' l,
+ liveLastIn env' l)
+
+-- Before allocating stack slots, we need to collect one more piece of information:
+-- what's the highest offset (in bytes) used in each Area?
+-- We'll need to allocate that much space for each Area.
+getAreaSize :: LGraph Middle Last -> AreaMap
+getAreaSize g@(LGraph _ off _) =
+ fold_blocks (fold_fwd_block first add add) (unitFM (CallArea Old) off) g
+ where first _ z = z
+ add x z = foldSlotsUsed addSlot (foldSlotsDefd addSlot z x) x
+ addSlot z (a, off, _) = addToFM z a $ max off $ lookupWithDefaultFM z 0 a
+
+
+-- Find the Stack slots occupied by the subarea's conflicts
+conflictSlots :: Ord x => IGPair x -> AreaMap -> AreaMap -> SubArea -> Set Int
+conflictSlots (ig, Builder foldNodes wordsOccupied) areaSize areaMap subarea =
+ foldNodes subarea foldNode emptyFM
+ where foldNode n set = foldFM conflict set $ lookupWithDefaultFM ig emptyFM n
+ conflict n' () set = liveInSlots areaMap n' set
+ -- Add stack slots occupied by igraph node n
+ liveInSlots areaMap n set = foldr setAdd set (wordsOccupied areaSize areaMap n)
+ setAdd w s = addToFM s w ()
+
+-- Find any open space on the stack, starting from the offset.
+freeSlotFrom :: Ord x => IGPair x -> AreaMap -> Int -> AreaMap -> Area -> Int
+freeSlotFrom ig areaSize offset areaMap area =
+ let size = lookupFM areaSize area `orElse` 0
+ conflicts = conflictSlots ig areaSize areaMap (area, size, size)
+ -- Find a space big enough to hold the area
+ findSpace curr 0 = curr
+ findSpace curr cnt = -- target slot, considerand, # left to check
+ if elemFM curr conflicts then
+ findSpace (curr + size) size
+ else findSpace (curr - 1) (cnt - 1)
+ in findSpace (offset + size) size
+
+-- Find an open space on the stack, and assign it to the area.
+allocSlotFrom :: Ord x => IGPair x -> AreaMap -> Int -> AreaMap -> Area -> AreaMap
+allocSlotFrom ig areaSize from areaMap area =
+ if elemFM area areaMap then areaMap
+ else addToFM areaMap area $ freeSlotFrom ig areaSize from areaMap area
+
+-- | Greedy stack layout.
+-- Compute liveness, build the interference graph, and allocate slots for the areas.
+-- We visit each basic block in a (generally) forward order.
+-- At each instruction that names a register subarea r, we immediately allocate
+-- any available slot on the stack by the following procedure:
+-- 1. Find the nodes N' that conflict with r
+-- 2. Find the stack slots used for N'
+-- 3. Choose a contiguous stack space s not in N' (s must be large enough to hold r)
+-- For a CallArea, we allocate the stack space only when we reach a function
+-- call that returns to the CallArea's blockId.
+-- We use a similar procedure, with one exception: the stack space
+-- must be allocated below the youngest stack slot that is live out.
+
+-- Note: The stack pointer only has to be younger than the youngest live stack slot
+-- at proc points. Otherwise, the stack pointer can point anywhere.
+layout :: ProcPointSet -> BlockEnv SubAreaSet -> LGraph Middle Last -> AreaMap
+layout procPoints env g@(LGraph _ entrySp _) =
+ let builder = areaBuilder
+ ig = (igraph builder env g, builder)
+ env' bid = lookupBlockEnv env bid `orElse` panic "unknown blockId in igraph"
+ areaSize = getAreaSize g
+ -- Find the slots that are live-in to the block
+ live_in (ZTail m l) = liveInSlots (live_in l) m
+ live_in (ZLast (LastOther l)) = liveLastIn env' l
+ live_in (ZLast LastExit) = emptyFM
+ -- Find the youngest live stack slot
+ youngest_live areaMap live = fold_subareas young_slot live 0
+ where young_slot (a, o, _) z = case lookupFM areaMap a of
+ Just top -> max z $ top + o
+ Nothing -> z
+ -- Allocate space for spill slots and call areas
+ allocVarSlot = allocSlotFrom ig areaSize 0
+ allocCallSlot areaMap (Block id _ t) | elemBlockSet id procPoints =
+ allocSlotFrom ig areaSize (youngest_live areaMap $ live_in t)
+ areaMap (CallArea (Young id))
+ allocCallSlot areaMap _ = areaMap
+ alloc i areaMap = foldSlotsDefd alloc' (foldSlotsUsed alloc' areaMap i) i
+ where alloc' areaMap (a@(RegSlot _), _, _) = allocVarSlot areaMap a
+ alloc' areaMap _ = areaMap
+ layoutAreas areaMap b@(Block _ _ t) = layout areaMap t
+ where layout areaMap (ZTail m t) = layout (alloc m areaMap) t
+ layout areaMap (ZLast _) = allocCallSlot areaMap b
+ areaMap = foldl layoutAreas (addToFM emptyFM (CallArea Old) 0) $ postorder_dfs g
+ in pprTrace "ProcPoints" (ppr procPoints) $
+ pprTrace "Area SizeMap" (ppr areaSize) $
+ pprTrace "Entry SP" (ppr entrySp) $
+ pprTrace "Area Map" (ppr areaMap) $ areaMap
+
+-- After determining the stack layout, we can:
+-- 1. Replace references to stack Areas with addresses relative to the stack
+-- pointer.
+-- 2. Insert adjustments to the stack pointer to ensure that it is at a
+-- conventional location at each proc point.
+-- Because we don't take interrupts on the execution stack, we only need the
+-- stack pointer to be younger than the live values on the stack at proc points.
+-- 3. At some point, we should check for stack overflow, but not just yet.
+manifestSP :: ProcPointSet -> BlockEnv Status -> AreaMap ->
+ LGraph Middle Last -> FuelMonad (LGraph Middle Last)
+manifestSP procPoints procMap areaMap g@(LGraph entry args blocks) =
+ liftM (LGraph entry args) blocks'
+ where blocks' = foldl replB (return emptyBlockEnv) (postorder_dfs g)
+ slot a = pprTrace "slot" (ppr a) $ lookupFM areaMap a `orElse` panic "unallocated Area"
+ slot' id = pprTrace "slot'" (ppr id)$ slot $ CallArea (Young id)
+ sp_on_entry id | id == entry = slot (CallArea Old) + args
+ sp_on_entry id | elemBlockSet id procPoints =
+ case lookupBlockEnv blocks id of
+ Just (Block _ (Just o) _) -> slot' id + o
+ Just (Block _ Nothing _) -> slot' id
+ Nothing -> panic "procpoint dropped from block env"
+ sp_on_entry id =
+ case lookupBlockEnv procMap id of
+ Just (ReachedBy pp) -> case uniqSetToList pp of
+ [id] -> sp_on_entry id
+ _ -> panic "block not reached by single proc point"
+ Just ProcPoint -> panic "procpoint not in procpoint set"
+ Nothing -> panic "block not found in procmap"
+ -- On entry to procpoints, the stack pointer is conventional;
+ -- otherwise, we check the SP set by predecessors.
+ replB :: FuelMonad (BlockEnv CmmBlock) -> CmmBlock -> FuelMonad (BlockEnv CmmBlock)
+ replB blocks (Block id o t) =
+ do bs <- replTail (Block id o) spIn t
+ pprTrace "spIn" (ppr id <+> ppr spIn)$ liftM (flip (foldr insertBlock) bs) blocks
+ where spIn = sp_on_entry id
+ replTail :: (ZTail Middle Last -> CmmBlock) -> Int -> (ZTail Middle Last) ->
+ FuelMonad ([CmmBlock])
+ replTail h spOff (ZTail m t) = replTail (h . ZTail (middle spOff m)) spOff t
+ replTail h spOff (ZLast (LastOther l)) = fixSp h spOff l
+ replTail h _ l@(ZLast LastExit) = return [h l]
+ middle spOff m = mapExpDeepMiddle (replSlot spOff) m
+ last spOff l = mapExpDeepLast (replSlot spOff) l
+ replSlot spOff (CmmStackSlot a i) = CmmRegOff (CmmGlobal Sp) (spOff - (slot a + i))
+ replSlot _ e = e
+ -- The block must establish the SP expected at each successsor.
+ fixSp :: (ZTail Middle Last -> CmmBlock) -> Int -> Last -> FuelMonad ([CmmBlock])
+ fixSp h spOff l@(LastReturn n) = updSp h spOff (slot (CallArea Old) + n) l
+ fixSp h spOff l@(LastJump _ n) = updSp h spOff (slot (CallArea Old) + n) l
+ fixSp h spOff l@(LastCall _ (Just k) n) = updSp h spOff (slot' k + n) l
+ fixSp h spOff l@(LastCall _ Nothing n) = updSp h spOff (slot (CallArea Old) + n) l
+ fixSp h spOff l@(LastBranch k) | elemBlockSet k procPoints =
+ pprTrace "updSp" (ppr k <> ppr spOff <> ppr (sp_on_entry k)) $ updSp h spOff (sp_on_entry k) l
+ fixSp h spOff l = liftM (uncurry (:)) $ fold_succs succ l $ return (b, [])
+ where b = h (ZLast (LastOther (last spOff l)))
+ succ succId z =
+ let succSp = sp_on_entry succId in
+ if elemBlockSet succId procPoints && succSp /= spOff then
+ do (b, bs) <- z
+ (b', bs') <- insertBetween b [setSpMid spOff succSp] succId
+ return (b', bs ++ bs')
+ else z
+ updSp h old new l = return [h $ setSp old new $ ZLast $ LastOther (last new l)]
+ setSpMid sp sp' = MidAssign (CmmGlobal Sp) e
+ where e = CmmMachOp (MO_Add wordWidth) [CmmReg (CmmGlobal Sp), off]
+ off = CmmLit $ CmmInt (toInteger $ sp - sp') wordWidth
+ setSp sp sp' t = if sp == sp' then t else ZTail (setSpMid sp sp') t
+----------------------------------------------------------------
+-- Building InfoTables
+
+type CAFSet = FiniteMap CLabel ()
+
+-- First, an analysis to find live CAFs.
+cafLattice :: DataflowLattice CAFSet
+cafLattice = DataflowLattice "live cafs" emptyFM add True
+ where add new old = if sizeFM new' > sizeFM old then aTx new' else noTx new'
+ where new' = new `plusFM` old
+
+cafTransfers :: BackwardTransfers Middle Last CAFSet
+cafTransfers = BackwardTransfers first middle last
+ where first live _ = live
+ middle live m = pprTrace "cafmiddle" (ppr m) $ foldExpDeepMiddle addCaf m live
+ last env l = foldExpDeepLast addCaf l (joinOuts cafLattice env l)
+ addCaf e set = case e of
+ CmmLit (CmmLabel c) -> add c set
+ CmmLit (CmmLabelOff c _) -> add c set
+ CmmLit (CmmLabelDiffOff c1 c2 _) -> add c1 $ add c2 set
+ _ -> set
+ add c s = pprTrace "CAF analysis saw label" (ppr c) $
+ if hasCAF c then (pprTrace "has caf" (ppr c) $ addToFM s c ()) else (pprTrace "no cafs" (ppr c) $ s)
+
+type CafFix a = FuelMonad (BackwardFixedPoint Middle Last CAFSet a)
+cafAnal :: LGraph Middle Last -> FuelMonad (BlockEnv CAFSet)
+cafAnal g = liftM zdfFpFacts (res :: CafFix ())
+ where res = zdfSolveFromL emptyBlockEnv "live CAF analysis" cafLattice
+ cafTransfers (fact_bot cafLattice) g
+
+-- Once we have found the CAFs, we need to do two things:
+-- 1. Build a table of all the CAFs used in the procedure.
+-- 2. Compute the C_SRT describing the subset of CAFs live at each procpoint.
+buildCafs :: (BlockEnv CAFSet) -> FuelMonad ([CmmTopZ], BlockEnv C_SRT)
+buildCafs blockCafs =
+ -- This is surely the wrong way to get names, as in BlockId
+ do top_lbl <- getUniqueM >>= \ u -> return $ mkSRTLabel (mkFCallName u "srt") MayHaveCafRefs
+ let allCafs = foldBlockEnv (\_ x y -> plusFM x y) emptyFM blockCafs
+ caf_entry (ix, map, tbl') caf = (ix + 1, addToFM map caf ix, entry : tbl')
+ where entry = CmmStaticLit $ CmmLabel caf
+ (_::Int, cafMap, tbl') = foldl caf_entry (0, emptyFM, []) $ keysFM allCafs
+ top_tbl = CmmData RelocatableReadOnlyData $ CmmDataLabel top_lbl : reverse tbl'
+ sub_srt id cafs z =
+ do (tbls, blocks) <- z
+ (top, srt) <- procpointSRT top_lbl cafMap cafs
+ let blocks' = extendBlockEnv blocks id srt
+ case top of Just t -> return (t:tbls, blocks')
+ Nothing -> return (tbls, blocks')
+ (sub_tbls, blockSRTs) <- foldBlockEnv sub_srt (return ([], emptyBlockEnv)) blockCafs
+ return (top_tbl : sub_tbls, blockSRTs)
+
+-- Construct an SRT bitmap.
+-- Adapted from simpleStg/SRT.lhs, which expects Id's.
+procpointSRT :: CLabel -> FiniteMap CLabel Int -> FiniteMap CLabel () ->
+ FuelMonad (Maybe CmmTopZ, C_SRT)
+procpointSRT top_srt top_table entries
+ | isEmptyFM entries = pprTrace "nil SRT" (ppr top_srt) $ return (Nothing, NoC_SRT)
+ | otherwise = pprTrace "non-nil SRT" (ppr top_srt) $ bitmap `seq` to_SRT top_srt offset len bitmap
+ where
+ ints = map (expectJust "constructSRT" . lookupFM top_table) (keysFM entries)
+ sorted_ints = sortLe (<=) ints
+ offset = head sorted_ints
+ bitmap_entries = map (subtract offset) sorted_ints
+ len = P.last bitmap_entries + 1
+ bitmap = intsToBitmap len bitmap_entries
+
+-- Adapted from codeGen/StgCmmUtils, which converts from SRT to C_SRT.
+to_SRT :: CLabel -> Int -> Int -> Bitmap -> FuelMonad (Maybe CmmTopZ, C_SRT)
+to_SRT top_srt off len bmp
+ | len > widthInBits wordWidth `div` 2 || bmp == [fromIntegral srt_escape]
+ = do id <- getUniqueM
+ let srt_desc_lbl = mkLargeSRTLabel id
+ tbl = CmmData RelocatableReadOnlyData $
+ CmmDataLabel srt_desc_lbl : map CmmStaticLit
+ ( cmmLabelOffW top_srt off
+ : mkWordCLit (fromIntegral len)
+ : map mkWordCLit bmp)
+ return (Just tbl, C_SRT srt_desc_lbl 0 srt_escape)
+ | otherwise
+ = return (Nothing, C_SRT top_srt off (fromIntegral (head bmp)))
+ -- The fromIntegral converts to StgHalfWord
+
+-- Given a block ID, we return a representation of the layout of the stack.
+-- If the element is `Nothing`, then it represents an empty or dead
+-- word on the stack.
+-- If the element is `Just` a register, then it represents a live spill slot
+-- for the register; note that a register may occupy multiple words.
+-- The head of the list represents the young end of the stack where the infotable
+-- pointer for the block `Bid` is stored.
+-- The infotable pointer itself is not included in the list.
+live_vars :: BlockEnv SubAreaSet -> AreaMap -> BlockId -> [Maybe LocalReg]
+live_vars slotEnv areaMap bid = slotsToList youngByte liveSlots
+ where slotsToList 0 [] = []
+ slotsToList 0 ((_, r, _) : _) = pprPanic "slot left off live_vars" (ppr r)
+ slotsToList n _ | n < 0 = panic "stack slots not allocated on word boundaries?"
+ slotsToList n ((n', r, w) : rst) =
+ if n == n' then Just r : slotsToList (n - w) rst
+ else Nothing : slotsToList (n - wORD_SIZE) rst
+ slotsToList n [] = Nothing : slotsToList (n - wORD_SIZE) []
+ liveSlots = sortBy (\ (_,off,_) (_,off',_) -> compare off' off)
+ (foldFM (\_ -> flip $ foldr add_slot) [] slots)
+ add_slot (a@(RegSlot r@(LocalReg _ ty)), off, w) rst =
+ if off == w && widthInBytes (typeWidth ty) == w then
+ (expectJust "add_slot" (lookupFM areaMap a), r, w) : rst
+ else panic "live_vars: only part of a variable live at a proc point"
+ add_slot (CallArea Old, off, w) rst =
+ if off == wORD_SIZE && w == wORD_SIZE then
+ rst -- the return infotable should be live
+ else pprPanic "CallAreas must not be live across function calls" (ppr bid)
+ add_slot (CallArea (Young _), _, _) _ =
+ pprPanic "CallAreas must not be live across function calls" (ppr bid)
+ slots = expectJust "live_vars slots" $ lookupBlockEnv slotEnv bid
+ youngByte = expectJust "live_vars bid_pos" $ lookupFM areaMap (CallArea (Young bid))
----------------------------------------------------------------