summaryrefslogtreecommitdiff
path: root/compiler/nativeGen/RegAlloc
diff options
context:
space:
mode:
authorBen.Lippmeier@anu.edu.au <unknown>2009-02-03 04:05:40 +0000
committerBen.Lippmeier@anu.edu.au <unknown>2009-02-03 04:05:40 +0000
commita842f3d5e7c9026a642589948ef67dbaf6272396 (patch)
tree7991bdf943ac022bc33359b190d1cd58086651e6 /compiler/nativeGen/RegAlloc
parentcbc96da034482b769889c109f6cc822f42b12027 (diff)
downloadhaskell-a842f3d5e7c9026a642589948ef67dbaf6272396.tar.gz
NCG: Split out joinToTargets from linear alloctor into its own module.
* Also fix a nasty bug when creating fixup code that has a cyclic register movement graph.
Diffstat (limited to 'compiler/nativeGen/RegAlloc')
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/Base.hs9
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs332
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/Main.hs273
3 files changed, 386 insertions, 228 deletions
diff --git a/compiler/nativeGen/RegAlloc/Linear/Base.hs b/compiler/nativeGen/RegAlloc/Linear/Base.hs
index 95c99653a8..a986c0ff32 100644
--- a/compiler/nativeGen/RegAlloc/Linear/Base.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/Base.hs
@@ -3,7 +3,9 @@
module RegAlloc.Linear.Base (
BlockAssignment,
+
Loc(..),
+ regsOfLoc,
-- for stats
SpillReason(..),
@@ -65,6 +67,13 @@ instance Outputable Loc where
ppr l = text (show l)
+-- | Get the reg numbers stored in this Loc.
+regsOfLoc :: Loc -> [RegNo]
+regsOfLoc (InReg r) = [r]
+regsOfLoc (InBoth r _) = [r]
+regsOfLoc (InMem _) = []
+
+
-- | Reasons why instructions might be inserted by the spiller.
-- Used when generating stats for -ddrop-asm-stats.
--
diff --git a/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs b/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
new file mode 100644
index 0000000000..1dd7da2f6e
--- /dev/null
+++ b/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
@@ -0,0 +1,332 @@
+{-# OPTIONS -fno-warn-missing-signatures #-}
+
+
+-- | Handles joining of a jump instruction to its targets.
+
+-- The first time we encounter a jump to a particular basic block, we
+-- record the assignment of temporaries. The next time we encounter a
+-- jump to the same block, we compare our current assignment to the
+-- stored one. They might be different if spilling has occrred in one
+-- branch; so some fixup code will be required to match up the assignments.
+--
+module RegAlloc.Linear.JoinToTargets (
+ joinToTargets
+)
+
+where
+
+import RegAlloc.Linear.State
+import RegAlloc.Linear.Base
+import RegAlloc.Linear.FreeRegs
+
+import BlockId
+import MachInstrs
+import MachRegs
+import RegAllocInfo
+import RegLiveness
+import Cmm hiding (RegSet)
+
+import Digraph
+import Outputable
+import Unique
+import UniqFM
+import UniqSet
+
+
+-- | For a jump instruction at the end of a block, generate fixup code so its
+-- vregs are in the correct regs for its destination.
+--
+joinToTargets
+ :: BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
+ -- that are known to be live on the entry to each block.
+
+ -> BlockId -- ^ id of the current block
+ -> Instr -- ^ branch instr on the end of the source block.
+
+ -> RegM ([NatBasicBlock] -- fresh blocks of fixup code.
+ , Instr) -- the original branch instruction, but maybe patched to jump
+ -- to a fixup block first.
+
+joinToTargets block_live id instr
+
+ -- we only need to worry about jump instructions.
+ | not $ isJumpish instr
+ = return ([], instr)
+
+ | otherwise
+ = joinToTargets' block_live [] id instr (jumpDests instr [])
+
+-----
+joinToTargets'
+ :: BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
+ -- that are known to be live on the entry to each block.
+
+ -> [NatBasicBlock] -- ^ acc blocks of fixup code.
+
+ -> BlockId -- ^ id of the current block
+ -> Instr -- ^ branch instr on the end of the source block.
+
+ -> [BlockId] -- ^ branch destinations still to consider.
+
+ -> RegM ( [NatBasicBlock]
+ , Instr)
+
+-- no more targets to consider. all done.
+joinToTargets' _ new_blocks _ instr []
+ = return (new_blocks, instr)
+
+-- handle a branch target.
+joinToTargets' block_live new_blocks block_id instr (dest:dests)
+ = do
+ -- get the map of where the vregs are stored on entry to each basic block.
+ block_assig <- getBlockAssigR
+
+ -- get the assignment on entry to the branch instruction.
+ assig <- getAssigR
+
+ -- adjust the current assignment to remove any vregs that are not live
+ -- on entry to the destination block.
+ let Just live_set = lookupBlockEnv block_live dest
+ let still_live uniq _ = uniq `elemUniqSet_Directly` live_set
+ let adjusted_assig = filterUFM_Directly still_live assig
+
+ -- and free up those registers which are now free.
+ let to_free =
+ [ r | (reg, loc) <- ufmToList assig
+ , not (elemUniqSet_Directly reg live_set)
+ , r <- regsOfLoc loc ]
+
+ case lookupBlockEnv block_assig dest of
+ Nothing
+ -> joinToTargets_first
+ block_live new_blocks block_id instr dest dests
+ block_assig adjusted_assig to_free
+
+ Just (_, dest_assig)
+ -> joinToTargets_again
+ block_live new_blocks block_id instr dest dests
+ adjusted_assig dest_assig
+
+
+-- this is the first time we jumped to this block.
+joinToTargets_first block_live new_blocks block_id instr dest dests
+ block_assig src_assig to_free
+
+ = do -- free up the regs that are not live on entry to this block.
+ freeregs <- getFreeRegsR
+ let freeregs' = foldr releaseReg freeregs to_free
+
+ -- remember the current assignment on entry to this block.
+ setBlockAssigR (extendBlockEnv block_assig dest
+ (freeregs', src_assig))
+
+ joinToTargets' block_live new_blocks block_id instr dests
+
+
+-- we've jumped to this block before
+joinToTargets_again
+ block_live new_blocks block_id instr dest dests
+ src_assig dest_assig
+
+ -- the assignments already match, no problem.
+ | ufmToList dest_assig == ufmToList src_assig
+ = joinToTargets' block_live new_blocks block_id instr dests
+
+ -- assignments don't match, need fixup code
+ | otherwise
+ = do
+
+ -- make a graph of what things need to be moved where.
+ let graph = makeRegMovementGraph src_assig dest_assig
+
+ -- look for cycles in the graph. This can happen if regs need to be swapped.
+ -- Note that we depend on the fact that this function does a
+ -- bottom up traversal of the tree-like portions of the graph.
+ --
+ -- eg, if we have
+ -- R1 -> R2 -> R3
+ --
+ -- ie move value in R1 to R2 and value in R2 to R3.
+ --
+ -- We need to do the R2 -> R3 move before R1 -> R2.
+ --
+ let sccs = stronglyConnCompFromEdgedVerticesR graph
+
+ -- debugging
+{- pprTrace
+ ("joinToTargets: making fixup code")
+ (vcat [ text " in block: " <> ppr block_id
+ , text " jmp instruction: " <> ppr instr
+ , text " src assignment: " <> ppr src_assig
+ , text " dest assignment: " <> ppr dest_assig
+ , text " movement graph: " <> ppr graph
+ , text " sccs of graph: " <> ppr sccs
+ , text ""])
+ (return ())
+-}
+ delta <- getDeltaR
+ fixUpInstrs_ <- mapM (handleComponent delta instr) sccs
+ let fixUpInstrs = concat fixUpInstrs_
+
+ -- make a new basic block containing the fixup code.
+ -- A the end of the current block we will jump to the fixup one,
+ -- then that will jump to our original destination.
+ fixup_block_id <- getUniqueR
+ let block = BasicBlock (BlockId fixup_block_id)
+ $ fixUpInstrs ++ mkBranchInstr dest
+
+{- pprTrace
+ ("joinToTargets: fixup code is:")
+ (vcat [ ppr block
+ , text ""])
+ (return ())
+-}
+ -- if we didn't need any fixups, then don't include the block
+ case fixUpInstrs of
+ [] -> joinToTargets' block_live new_blocks block_id instr dests
+
+ -- patch the original branch instruction so it goes to our
+ -- fixup block instead.
+ _ -> let instr' = patchJump instr dest (BlockId fixup_block_id)
+ in joinToTargets' block_live (block : new_blocks) block_id instr' dests
+
+
+-- | Construct a graph of register\/spill movements.
+--
+-- Cyclic components seem to occur only very rarely.
+--
+-- We cut some corners by not handling memory-to-memory moves.
+-- This shouldn't happen because every temporary gets its own stack slot.
+--
+makeRegMovementGraph :: RegMap Loc -> RegMap Loc -> [(Unique, Loc, [Loc])]
+makeRegMovementGraph adjusted_assig dest_assig
+ = let
+ mkNodes src vreg
+ = expandNode vreg src
+ $ lookupWithDefaultUFM_Directly
+ dest_assig
+ (panic "RegAllocLinear.makeRegMovementGraph")
+ vreg
+
+ in [ node | (vreg, src) <- ufmToList adjusted_assig
+ , node <- mkNodes src vreg ]
+
+
+-- | Expand out the destination, so InBoth destinations turn into
+-- a combination of InReg and InMem.
+
+-- The InBoth handling is a little tricky here. If the destination is
+-- InBoth, then we must ensure that the value ends up in both locations.
+-- An InBoth destination must conflict with an InReg or InMem source, so
+-- we expand an InBoth destination as necessary.
+--
+-- An InBoth source is slightly different: we only care about the register
+-- that the source value is in, so that we can move it to the destinations.
+--
+expandNode
+ :: a
+ -> Loc -- ^ source of move
+ -> Loc -- ^ destination of move
+ -> [(a, Loc, [Loc])]
+
+expandNode vreg loc@(InReg src) (InBoth dst mem)
+ | src == dst = [(vreg, loc, [InMem mem])]
+ | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
+
+expandNode vreg loc@(InMem src) (InBoth dst mem)
+ | src == mem = [(vreg, loc, [InReg dst])]
+ | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
+
+expandNode _ (InBoth _ src) (InMem dst)
+ | src == dst = [] -- guaranteed to be true
+
+expandNode _ (InBoth src _) (InReg dst)
+ | src == dst = []
+
+expandNode vreg (InBoth src _) dst
+ = expandNode vreg (InReg src) dst
+
+expandNode vreg src dst
+ | src == dst = []
+ | otherwise = [(vreg, src, [dst])]
+
+
+-- | Generate fixup code for a particular component in the move graph
+-- This component tells us what values need to be moved to what
+-- destinations. We have eliminated any possibility of single-node
+-- cycles in expandNode above.
+--
+handleComponent :: Int -> Instr -> SCC (Unique, Loc, [Loc]) -> RegM [Instr]
+
+-- If the graph is acyclic then we won't get the swapping problem below.
+-- In this case we can just do the moves directly, and avoid having to
+-- go via a spill slot.
+--
+handleComponent delta _ (AcyclicSCC (vreg, src, dsts))
+ = mapM (makeMove delta vreg src) dsts
+
+
+-- Handle some cyclic moves.
+-- This can happen if we have two regs that need to be swapped.
+-- eg:
+-- vreg source loc dest loc
+-- (vreg1, InReg r1, [InReg r2])
+-- (vreg2, InReg r2, [InReg r1])
+--
+-- To avoid needing temp register, we just spill all the source regs, then
+-- reaload them into their destination regs.
+--
+-- Note that we can not have cycles that involve memory locations as
+-- sources as single destination because memory locations (stack slots)
+-- are allocated exclusively for a virtual register and therefore can not
+-- require a fixup.
+--
+handleComponent delta instr
+ (CyclicSCC ( (vreg, InReg sreg, [InReg dreg]) : rest))
+ = do
+ -- spill the source into its slot
+ (instrSpill, slot)
+ <- spillR (RealReg sreg) vreg
+
+ -- reload into destination reg
+ instrLoad <- loadR (RealReg dreg) slot
+
+ remainingFixUps <- mapM (handleComponent delta instr)
+ (stronglyConnCompFromEdgedVerticesR rest)
+
+ -- make sure to do all the reloads after all the spills,
+ -- so we don't end up clobbering the source values.
+ return ([instrSpill] ++ concat remainingFixUps ++ [instrLoad])
+
+handleComponent _ _ (CyclicSCC _)
+ = panic "Register Allocator: handleComponent cyclic"
+
+
+-- | Move a vreg between these two locations.
+--
+makeMove
+ :: Int -- ^ current C stack delta.
+ -> Unique -- ^ unique of the vreg that we're moving.
+ -> Loc -- ^ source location.
+ -> Loc -- ^ destination location.
+ -> RegM Instr -- ^ move instruction.
+
+makeMove _ vreg (InReg src) (InReg dst)
+ = do recordSpill (SpillJoinRR vreg)
+ return $ mkRegRegMoveInstr (RealReg src) (RealReg dst)
+
+makeMove delta vreg (InMem src) (InReg dst)
+ = do recordSpill (SpillJoinRM vreg)
+ return $ mkLoadInstr (RealReg dst) delta src
+
+makeMove delta vreg (InReg src) (InMem dst)
+ = do recordSpill (SpillJoinRM vreg)
+ return $ mkSpillInstr (RealReg src) delta dst
+
+-- we don't handle memory to memory moves.
+-- they shouldn't happen because we don't share stack slots between vregs.
+makeMove _ vreg src dst
+ = panic $ "makeMove " ++ show vreg ++ " (" ++ show src ++ ") ("
+ ++ show dst ++ ")"
+ ++ " we don't handle mem->mem moves."
+
diff --git a/compiler/nativeGen/RegAlloc/Linear/Main.hs b/compiler/nativeGen/RegAlloc/Linear/Main.hs
index 6dde72a3c1..46954cec75 100644
--- a/compiler/nativeGen/RegAlloc/Linear/Main.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/Main.hs
@@ -95,6 +95,7 @@ import RegAlloc.Linear.Base
import RegAlloc.Linear.StackMap
import RegAlloc.Linear.FreeRegs
import RegAlloc.Linear.Stats
+import RegAlloc.Linear.JoinToTargets
import BlockId
import MachRegs
@@ -104,7 +105,7 @@ import RegLiveness
import Cmm hiding (RegSet)
import Digraph
-import Unique ( Uniquable(getUnique), Unique )
+import Unique
import UniqSet
import UniqFM
import UniqSupply
@@ -215,7 +216,7 @@ processBlock
processBlock block_live (BasicBlock id instrs)
= do initBlock id
(instrs', fixups)
- <- linearRA block_live [] [] instrs
+ <- linearRA block_live [] [] id instrs
return $ BasicBlock id instrs' : fixups
@@ -238,38 +239,51 @@ initBlock id
setAssigR assig
+-- | Do allocation for a sequence of instructions.
linearRA
- :: BlockMap RegSet
- -> [Instr] -> [NatBasicBlock] -> [LiveInstr]
- -> RegM ([Instr], [NatBasicBlock])
+ :: BlockMap RegSet -- ^ map of what vregs are live on entry to each block.
+ -> [Instr] -- ^ accumulator for instructions already processed.
+ -> [NatBasicBlock] -- ^ accumulator for blocks of fixup code.
+ -> BlockId -- ^ id of the current block, for debugging.
+ -> [LiveInstr] -- ^ liveness annotated instructions in this block.
-linearRA _ instr_acc fixups []
- = return (reverse instr_acc, fixups)
+ -> RegM ( [Instr] -- ^ instructions after register allocation
+ , [NatBasicBlock]) -- ^ fresh blocks of fixup code.
-linearRA block_live instr_acc fixups (instr:instrs)
- = do (instr_acc', new_fixups) <- raInsn block_live instr_acc instr
- linearRA block_live instr_acc' (new_fixups++fixups) instrs
--- -----------------------------------------------------------------------------
--- Register allocation for a single instruction
+linearRA _ accInstr accFixup _ []
+ = return
+ ( reverse accInstr -- instrs need to be returned in the correct order.
+ , accFixup) -- it doesn't matter what order the fixup blocks are returned in.
+
+
+linearRA block_live accInstr accFixups id (instr:instrs)
+ = do
+ (accInstr', new_fixups)
+ <- raInsn block_live accInstr id instr
+
+ linearRA block_live accInstr' (new_fixups ++ accFixups) id instrs
-raInsn :: BlockMap RegSet -- Live temporaries at each basic block
- -> [Instr] -- new instructions (accum.)
- -> LiveInstr -- the instruction (with "deaths")
- -> RegM (
- [Instr], -- new instructions
- [NatBasicBlock] -- extra fixup blocks
- )
-raInsn _ new_instrs (Instr (COMMENT _) Nothing)
+-- | Do allocation for a single instruction.
+raInsn
+ :: BlockMap RegSet -- ^ map of what vregs are love on entry to each block.
+ -> [Instr] -- ^ accumulator for instructions already processed.
+ -> BlockId -- ^ the id of the current block, for debugging
+ -> LiveInstr -- ^ the instr to have its regs allocated, with liveness info.
+ -> RegM
+ ( [Instr] -- new instructions
+ , [NatBasicBlock]) -- extra fixup blocks
+
+raInsn _ new_instrs _ (Instr (COMMENT _) Nothing)
= return (new_instrs, [])
-raInsn _ new_instrs (Instr (DELTA n) Nothing)
+raInsn _ new_instrs _ (Instr (DELTA n) Nothing)
= do
setDeltaR n
return (new_instrs, [])
-raInsn block_live new_instrs (Instr instr (Just live))
+raInsn block_live new_instrs id (Instr instr (Just live))
= do
assig <- getAssigR
@@ -299,20 +313,23 @@ raInsn block_live new_instrs (Instr instr (Just live))
{-
freeregs <- getFreeRegsR
assig <- getAssigR
- pprTrace "raInsn" (text "ELIMINATED: " <> docToSDoc (pprInstr instr) $$ ppr r_dying <+> ppr w_dying $$ text (show freeregs) $$ ppr assig) $ do
+ pprTrace "raInsn" (text "ELIMINATED: " <> docToSDoc (pprInstr instr)
+ $$ ppr r_dying <+> ppr w_dying $$ text (show freeregs) $$ ppr assig) $ do
-}
return (new_instrs, [])
- _ -> genRaInsn block_live new_instrs instr
+ _ -> genRaInsn block_live new_instrs id instr
(uniqSetToList $ liveDieRead live)
(uniqSetToList $ liveDieWrite live)
-raInsn _ _ li
- = pprPanic "raInsn" (text "no match for:" <> ppr li)
+raInsn _ _ id instr
+ = pprPanic "raInsn" (text "no match for:" <> ppr instr)
+
+
-genRaInsn block_live new_instrs instr r_dying w_dying =
+genRaInsn block_live new_instrs block_id instr r_dying w_dying =
case regUsage instr of { RU read written ->
case partition isRealReg written of { (real_written1,virt_written) ->
do
@@ -346,7 +363,7 @@ genRaInsn block_live new_instrs instr r_dying w_dying =
-- these dead regs might in fact be live in the jump targets (they're
-- only dead in the code that follows in the current basic block).
(fixup_blocks, adjusted_instr)
- <- joinToTargets block_live [] instr (jumpDests instr [])
+ <- joinToTargets block_live block_id instr
-- (e) Delete all register assignments for temps which are read
-- (only) and die here. Update the free register list.
@@ -613,203 +630,3 @@ loadTemp True vreg (Just (InMem slot)) hreg spills
loadTemp _ _ _ _ spills =
return spills
-
--- -----------------------------------------------------------------------------
--- Joining a jump instruction to its targets
-
--- The first time we encounter a jump to a particular basic block, we
--- record the assignment of temporaries. The next time we encounter a
--- jump to the same block, we compare our current assignment to the
--- stored one. They might be different if spilling has occrred in one
--- branch; so some fixup code will be required to match up the
--- assignments.
-
-joinToTargets
- :: BlockMap RegSet
- -> [NatBasicBlock]
- -> Instr
- -> [BlockId]
- -> RegM ([NatBasicBlock], Instr)
-
-joinToTargets _ new_blocks instr []
- = return (new_blocks, instr)
-
-joinToTargets block_live new_blocks instr (dest:dests) = do
- block_assig <- getBlockAssigR
- assig <- getAssigR
- let
- -- adjust the assignment to remove any registers which are not
- -- live on entry to the destination block.
- adjusted_assig = filterUFM_Directly still_live assig
-
- live_set = lookItUp "joinToTargets" block_live dest
- still_live uniq _ = uniq `elemUniqSet_Directly` live_set
-
- -- and free up those registers which are now free.
- to_free =
- [ r | (reg, loc) <- ufmToList assig,
- not (elemUniqSet_Directly reg live_set),
- r <- regsOfLoc loc ]
-
- regsOfLoc (InReg r) = [r]
- regsOfLoc (InBoth r _) = [r]
- regsOfLoc (InMem _) = []
- -- in
- case lookupBlockEnv block_assig dest of
- -- Nothing <=> this is the first time we jumped to this
- -- block.
- Nothing -> do
- freeregs <- getFreeRegsR
- let freeregs' = foldr releaseReg freeregs to_free
- setBlockAssigR (extendBlockEnv block_assig dest
- (freeregs',adjusted_assig))
- joinToTargets block_live new_blocks instr dests
-
- Just (_, dest_assig)
-
- -- the assignments match
- | ufmToList dest_assig == ufmToList adjusted_assig
- -> joinToTargets block_live new_blocks instr dests
-
- -- need fixup code
- | otherwise
- -> do
- delta <- getDeltaR
-
- let graph = makeRegMovementGraph adjusted_assig dest_assig
- let sccs = stronglyConnCompFromEdgedVerticesR graph
- fixUpInstrs <- mapM (handleComponent delta instr) sccs
-
- block_id <- getUniqueR
- let block = BasicBlock (BlockId block_id) $
- concat fixUpInstrs ++ mkBranchInstr dest
-
- let instr' = patchJump instr dest (BlockId block_id)
-
- joinToTargets block_live (block : new_blocks) instr' dests
-
-
--- | Construct a graph of register\/spill movements.
---
--- We cut some corners by
--- a) not handling cyclic components
--- b) not handling memory-to-memory moves.
---
--- Cyclic components seem to occur only very rarely,
--- and we don't need memory-to-memory moves because we
--- make sure that every temporary always gets its own
--- stack slot.
-
-makeRegMovementGraph :: RegMap Loc -> RegMap Loc -> [(Unique, Loc, [Loc])]
-makeRegMovementGraph adjusted_assig dest_assig
- = let
- mkNodes src vreg
- = expandNode vreg src
- $ lookupWithDefaultUFM_Directly
- dest_assig
- (panic "RegAllocLinear.makeRegMovementGraph")
- vreg
-
- in [ node | (vreg, src) <- ufmToList adjusted_assig
- , node <- mkNodes src vreg ]
-
--- The InBoth handling is a little tricky here. If
--- the destination is InBoth, then we must ensure that
--- the value ends up in both locations. An InBoth
--- destination must conflict with an InReg or InMem
--- source, so we expand an InBoth destination as
--- necessary. An InBoth source is slightly different:
--- we only care about the register that the source value
--- is in, so that we can move it to the destinations.
-
-expandNode vreg loc@(InReg src) (InBoth dst mem)
- | src == dst = [(vreg, loc, [InMem mem])]
- | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
-
-expandNode vreg loc@(InMem src) (InBoth dst mem)
- | src == mem = [(vreg, loc, [InReg dst])]
- | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
-
-expandNode _ (InBoth _ src) (InMem dst)
- | src == dst = [] -- guaranteed to be true
-
-expandNode _ (InBoth src _) (InReg dst)
- | src == dst = []
-
-expandNode vreg (InBoth src _) dst
- = expandNode vreg (InReg src) dst
-
-expandNode vreg src dst
- | src == dst = []
- | otherwise = [(vreg, src, [dst])]
-
-
--- | Make a move instruction between these two locations so we
--- can join together allocations for different basic blocks.
---
-makeMove :: Int -> Unique -> Loc -> Loc -> RegM Instr
-makeMove _ vreg (InReg src) (InReg dst)
- = do recordSpill (SpillJoinRR vreg)
- return $ mkRegRegMoveInstr (RealReg src) (RealReg dst)
-
-makeMove delta vreg (InMem src) (InReg dst)
- = do recordSpill (SpillJoinRM vreg)
- return $ mkLoadInstr (RealReg dst) delta src
-
-makeMove delta vreg (InReg src) (InMem dst)
- = do recordSpill (SpillJoinRM vreg)
- return $ mkSpillInstr (RealReg src) delta dst
-
-makeMove _ vreg src dst
- = panic $ "makeMove " ++ show vreg ++ " (" ++ show src ++ ") ("
- ++ show dst ++ ")"
- ++ " (workaround: use -fviaC)"
-
-
--- we have eliminated any possibility of single-node cylces
--- in expandNode above.
-handleComponent :: Int -> Instr -> SCC (Unique, Loc, [Loc]) -> RegM [Instr]
-handleComponent delta _ (AcyclicSCC (vreg,src,dsts))
- = mapM (makeMove delta vreg src) dsts
-
--- we can not have cycles that involve memory
--- locations as source nor as single destination
--- because memory locations (stack slots) are
--- allocated exclusively for a virtual register and
--- therefore can not require a fixup
-handleComponent delta instr (CyclicSCC ((vreg, (InReg sreg),dsts):rest))
- = do
- spill_id <- getUniqueR
- (_, slot) <- spillR (RealReg sreg) spill_id
- remainingFixUps <- mapM (handleComponent delta instr) (stronglyConnCompFromEdgedVerticesR rest)
- restoreAndFixInstr <- getRestoreMoves dsts slot
- return ([instr] ++ concat remainingFixUps ++ restoreAndFixInstr)
-
- where
- getRestoreMoves [r@(InReg reg), mem@(InMem _)] slot
- = do
- restoreToReg <- loadR (RealReg reg) slot
- moveInstr <- makeMove delta vreg r mem
- return $ [COMMENT (fsLit "spill join move"), restoreToReg, moveInstr]
-
- getRestoreMoves [InReg reg] slot
- = loadR (RealReg reg) slot >>= return . (:[])
-
- getRestoreMoves [InMem _] _ = panic "getRestoreMoves can not handle memory only restores"
- getRestoreMoves _ _ = panic "getRestoreMoves unknown case"
-
-
-handleComponent _ _ (CyclicSCC _)
- = panic "Register Allocator: handleComponent cyclic"
-
-
-
--- -----------------------------------------------------------------------------
--- Utils
-
-my_fromJust :: String -> SDoc -> Maybe a -> a
-my_fromJust _ _ (Just x) = x
-my_fromJust s p Nothing = pprPanic ("fromJust: " ++ s) p
-
-lookItUp :: String -> BlockMap a -> BlockId -> a
-lookItUp str fm x = my_fromJust str (ppr x) (lookupBlockEnv fm x)