summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
authorAndreas Klebinger <klebinger.andreas@gmx.at>2018-11-17 11:20:36 +0100
committerAndreas Klebinger <klebinger.andreas@gmx.at>2018-11-17 11:20:36 +0100
commit912fd2b6ca0bc51076835b6e3d1f469b715e2760 (patch)
treeae1c96217e0eea77d0bfd53101d3fa868d45027d /compiler
parent6ba9aa5dd0a539adf02690a9c71d1589f541b3c5 (diff)
downloadhaskell-912fd2b6ca0bc51076835b6e3d1f469b715e2760.tar.gz
NCG: New code layout algorithm.
Summary: This patch implements a new code layout algorithm. It has been tested for x86 and is disabled on other platforms. Performance varies slightly be CPU/Machine but in general seems to be better by around 2%. Nofib shows only small differences of about +/- ~0.5% overall depending on flags/machine performance in other benchmarks improved significantly. Other benchmarks includes at least the benchmarks of: aeson, vector, megaparsec, attoparsec, containers, text and xeno. While the magnitude of gains differed three different CPUs where tested with all getting faster although to differing degrees. I tested: Sandy Bridge(Xeon), Haswell, Skylake * Library benchmark results summarized: * containers: ~1.5% faster * aeson: ~2% faster * megaparsec: ~2-5% faster * xml library benchmarks: 0.2%-1.1% faster * vector-benchmarks: 1-4% faster * text: 5.5% faster On average GHC compile times go down, as GHC compiled with the new layout is faster than the overhead introduced by using the new layout algorithm, Things this patch does: * Move code responsilbe for block layout in it's own module. * Move the NcgImpl Class into the NCGMonad module. * Extract a control flow graph from the input cmm. * Update this cfg to keep it in sync with changes during asm codegen. This has been tested on x64 but should work on x86. Other platforms still use the old codelayout. * Assign weights to the edges in the CFG based on type and limited static analysis which are then used for block layout. * Once we have the final code layout eliminate some redundant jumps. In particular turn a sequences of: jne .foo jmp .bar foo: into je bar foo: .. Test Plan: ci Reviewers: bgamari, jmct, jrtc27, simonmar, simonpj, RyanGlScott Reviewed By: RyanGlScott Subscribers: RyanGlScott, trommler, jmct, carter, thomie, rwbarton GHC Trac Issues: #15124 Differential Revision: https://phabricator.haskell.org/D4726
Diffstat (limited to 'compiler')
-rw-r--r--compiler/cmm/CmmMachOp.hs13
-rw-r--r--compiler/cmm/CmmNode.hs23
-rw-r--r--compiler/cmm/CmmPipeline.hs1
-rw-r--r--compiler/cmm/Hoopl/Collections.hs11
-rw-r--r--compiler/cmm/Hoopl/Label.hs7
-rw-r--r--compiler/ghc.cabal.in2
-rw-r--r--compiler/main/DynFlags.hs105
-rw-r--r--compiler/nativeGen/AsmCodeGen.hs334
-rw-r--r--compiler/nativeGen/BlockLayout.hs819
-rw-r--r--compiler/nativeGen/CFG.hs635
-rw-r--r--compiler/nativeGen/NCGMonad.hs89
-rw-r--r--compiler/nativeGen/PPC/CodeGen.hs4
-rw-r--r--compiler/nativeGen/PPC/Instr.hs10
-rw-r--r--compiler/nativeGen/PPC/RegInfo.hs5
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/Base.hs11
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs47
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/Main.hs8
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/State.hs14
-rw-r--r--compiler/nativeGen/RegAlloc/Liveness.hs44
-rw-r--r--compiler/nativeGen/SPARC/CodeGen.hs2
-rw-r--r--compiler/nativeGen/SPARC/ShortcutJump.hs7
-rw-r--r--compiler/nativeGen/X86/CodeGen.hs251
-rw-r--r--compiler/nativeGen/X86/Cond.hs41
-rw-r--r--compiler/nativeGen/X86/Instr.hs18
-rw-r--r--compiler/nativeGen/X86/Regs.hs1
-rw-r--r--compiler/utils/Digraph.hs94
-rw-r--r--compiler/utils/OrdList.hs10
-rw-r--r--compiler/utils/Util.hs22
28 files changed, 2295 insertions, 333 deletions
diff --git a/compiler/cmm/CmmMachOp.hs b/compiler/cmm/CmmMachOp.hs
index 70e53d2325..1441ecaa0f 100644
--- a/compiler/cmm/CmmMachOp.hs
+++ b/compiler/cmm/CmmMachOp.hs
@@ -2,7 +2,7 @@ module CmmMachOp
( MachOp(..)
, pprMachOp, isCommutableMachOp, isAssociativeMachOp
, isComparisonMachOp, maybeIntComparison, machOpResultType
- , machOpArgReps, maybeInvertComparison
+ , machOpArgReps, maybeInvertComparison, isFloatComparison
-- MachOp builders
, mo_wordAdd, mo_wordSub, mo_wordEq, mo_wordNe,mo_wordMul, mo_wordSQuot
@@ -322,6 +322,17 @@ maybeIntComparison mop =
MO_U_Lt w -> Just w
_ -> Nothing
+isFloatComparison :: MachOp -> Bool
+isFloatComparison mop =
+ case mop of
+ MO_F_Eq {} -> True
+ MO_F_Ne {} -> True
+ MO_F_Ge {} -> True
+ MO_F_Le {} -> True
+ MO_F_Gt {} -> True
+ MO_F_Lt {} -> True
+ _other -> False
+
-- -----------------------------------------------------------------------------
-- Inverting conditions
diff --git a/compiler/cmm/CmmNode.hs b/compiler/cmm/CmmNode.hs
index 286b1e306c..7ecfa468a2 100644
--- a/compiler/cmm/CmmNode.hs
+++ b/compiler/cmm/CmmNode.hs
@@ -7,6 +7,8 @@
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE UndecidableInstances #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+
-- CmmNode type for representation using Hoopl graphs.
@@ -16,7 +18,7 @@ module CmmNode (
ForeignConvention(..), ForeignTarget(..), foreignTargetHints,
CmmReturnInfo(..),
mapExp, mapExpDeep, wrapRecExp, foldExp, foldExpDeep, wrapRecExpf,
- mapExpM, mapExpDeepM, wrapRecExpM, mapSuccessors,
+ mapExpM, mapExpDeepM, wrapRecExpM, mapSuccessors, mapCollectSuccessors,
-- * Tick scopes
CmmTickScope(..), isTickSubScope, combineTickScopes,
@@ -37,6 +39,7 @@ import qualified Unique as U
import Hoopl.Block
import Hoopl.Graph
+import Hoopl.Collections
import Hoopl.Label
import Data.Maybe
import Data.List (tails,sortBy)
@@ -569,6 +572,24 @@ mapSuccessors f (CmmCondBranch p y n l) = CmmCondBranch p (f y) (f n) l
mapSuccessors f (CmmSwitch e ids) = CmmSwitch e (mapSwitchTargets f ids)
mapSuccessors _ n = n
+mapCollectSuccessors :: forall a. (Label -> (Label,a)) -> CmmNode O C
+ -> (CmmNode O C, [a])
+mapCollectSuccessors f (CmmBranch bid)
+ = let (bid', acc) = f bid in (CmmBranch bid', [acc])
+mapCollectSuccessors f (CmmCondBranch p y n l)
+ = let (bidt, acct) = f y
+ (bidf, accf) = f n
+ in (CmmCondBranch p bidt bidf l, [accf, acct])
+mapCollectSuccessors f (CmmSwitch e ids)
+ = let lbls = switchTargetsToList ids :: [Label]
+ lblMap = mapFromList $ zip lbls (map f lbls) :: LabelMap (Label, a)
+ in ( CmmSwitch e
+ (mapSwitchTargets
+ (\l -> fst $ mapFindWithDefault (error "impossible") l lblMap) ids)
+ , map snd (mapElems lblMap)
+ )
+mapCollectSuccessors _ n = (n, [])
+
-- -----------------------------------------------------------------------------
-- | Tickish in Cmm context (annotations only)
diff --git a/compiler/cmm/CmmPipeline.hs b/compiler/cmm/CmmPipeline.hs
index 77598a4b09..8c4f21452a 100644
--- a/compiler/cmm/CmmPipeline.hs
+++ b/compiler/cmm/CmmPipeline.hs
@@ -156,7 +156,6 @@ cpsTop hsc_env proc =
return g
else return g
-
-- we don't need to split proc points for the NCG, unless
-- tablesNextToCode is off. The latter is because we have no
-- label to put on info tables for basic blocks that are not
diff --git a/compiler/cmm/Hoopl/Collections.hs b/compiler/cmm/Hoopl/Collections.hs
index f8bdfda3d1..d7f53a0bad 100644
--- a/compiler/cmm/Hoopl/Collections.hs
+++ b/compiler/cmm/Hoopl/Collections.hs
@@ -35,6 +35,7 @@ class IsSet set where
setDifference :: set -> set -> set
setIntersection :: set -> set -> set
setIsSubsetOf :: set -> set -> Bool
+ setFilter :: (ElemOf set -> Bool) -> set -> set
setFoldl :: (b -> ElemOf set -> b) -> b -> set -> b
setFoldr :: (ElemOf set -> b -> b) -> b -> set -> b
@@ -69,6 +70,7 @@ class IsMap map where
mapInsertWith :: (a -> a -> a) -> KeyOf map -> a -> map a -> map a
mapDelete :: KeyOf map -> map a -> map a
mapAlter :: (Maybe a -> Maybe a) -> KeyOf map -> map a -> map a
+ mapAdjust :: (a -> a) -> KeyOf map -> map a -> map a
mapUnion :: map a -> map a -> map a
mapUnionWithKey :: (KeyOf map -> a -> a -> a) -> map a -> map a -> map a
@@ -81,7 +83,10 @@ class IsMap map where
mapFoldl :: (b -> a -> b) -> b -> map a -> b
mapFoldr :: (a -> b -> b) -> b -> map a -> b
mapFoldlWithKey :: (b -> KeyOf map -> a -> b) -> b -> map a -> b
+ mapFoldMapWithKey :: Monoid m => (KeyOf map -> a -> m) -> map a -> m
mapFilter :: (a -> Bool) -> map a -> map a
+ mapFilterWithKey :: (KeyOf map -> a -> Bool) -> map a -> map a
+
mapElems :: map a -> [a]
mapKeys :: map a -> [KeyOf map]
@@ -104,7 +109,7 @@ mapUnions maps = foldl1' mapUnion maps
-- Basic instances
-----------------------------------------------------------------------------
-newtype UniqueSet = US S.IntSet deriving (Eq, Ord, Show)
+newtype UniqueSet = US S.IntSet deriving (Eq, Ord, Show, Semigroup, Monoid)
instance IsSet UniqueSet where
type ElemOf UniqueSet = Int
@@ -122,6 +127,7 @@ instance IsSet UniqueSet where
setDifference (US x) (US y) = US (S.difference x y)
setIntersection (US x) (US y) = US (S.intersection x y)
setIsSubsetOf (US x) (US y) = S.isSubsetOf x y
+ setFilter f (US s) = US (S.filter f s)
setFoldl k z (US s) = S.foldl' k z s
setFoldr k z (US s) = S.foldr k z s
@@ -147,6 +153,7 @@ instance IsMap UniqueMap where
mapInsertWith f k v (UM m) = UM (M.insertWith f k v m)
mapDelete k (UM m) = UM (M.delete k m)
mapAlter f k (UM m) = UM (M.alter f k m)
+ mapAdjust f k (UM m) = UM (M.adjust f k m)
mapUnion (UM x) (UM y) = UM (M.union x y)
mapUnionWithKey f (UM x) (UM y) = UM (M.unionWithKey f x y)
@@ -159,7 +166,9 @@ instance IsMap UniqueMap where
mapFoldl k z (UM m) = M.foldl' k z m
mapFoldr k z (UM m) = M.foldr k z m
mapFoldlWithKey k z (UM m) = M.foldlWithKey' k z m
+ mapFoldMapWithKey f (UM m) = M.foldMapWithKey f m
mapFilter f (UM m) = UM (M.filter f m)
+ mapFilterWithKey f (UM m) = UM (M.filterWithKey f m)
mapElems (UM m) = M.elems m
mapKeys (UM m) = M.keys m
diff --git a/compiler/cmm/Hoopl/Label.hs b/compiler/cmm/Hoopl/Label.hs
index 7fddbf4c3f..2e75d97244 100644
--- a/compiler/cmm/Hoopl/Label.hs
+++ b/compiler/cmm/Hoopl/Label.hs
@@ -46,7 +46,7 @@ instance Outputable Label where
-----------------------------------------------------------------------------
-- LabelSet
-newtype LabelSet = LS UniqueSet deriving (Eq, Ord, Show)
+newtype LabelSet = LS UniqueSet deriving (Eq, Ord, Show, Monoid, Semigroup)
instance IsSet LabelSet where
type ElemOf LabelSet = Label
@@ -64,7 +64,7 @@ instance IsSet LabelSet where
setDifference (LS x) (LS y) = LS (setDifference x y)
setIntersection (LS x) (LS y) = LS (setIntersection x y)
setIsSubsetOf (LS x) (LS y) = setIsSubsetOf x y
-
+ setFilter f (LS s) = LS (setFilter (f . mkHooplLabel) s)
setFoldl k z (LS s) = setFoldl (\a v -> k a (mkHooplLabel v)) z s
setFoldr k z (LS s) = setFoldr (\v a -> k (mkHooplLabel v) a) z s
@@ -92,6 +92,7 @@ instance IsMap LabelMap where
mapInsertWith f (Label k) v (LM m) = LM (mapInsertWith f k v m)
mapDelete (Label k) (LM m) = LM (mapDelete k m)
mapAlter f (Label k) (LM m) = LM (mapAlter f k m)
+ mapAdjust f (Label k) (LM m) = LM (mapAdjust f k m)
mapUnion (LM x) (LM y) = LM (mapUnion x y)
mapUnionWithKey f (LM x) (LM y) = LM (mapUnionWithKey (f . mkHooplLabel) x y)
@@ -105,7 +106,9 @@ instance IsMap LabelMap where
mapFoldr k z (LM m) = mapFoldr k z m
mapFoldlWithKey k z (LM m) =
mapFoldlWithKey (\a v -> k a (mkHooplLabel v)) z m
+ mapFoldMapWithKey f (LM m) = mapFoldMapWithKey (\k v -> f (mkHooplLabel k) v) m
mapFilter f (LM m) = LM (mapFilter f m)
+ mapFilterWithKey f (LM m) = LM (mapFilterWithKey (f . mkHooplLabel) m)
mapElems (LM m) = mapElems m
mapKeys (LM m) = map mkHooplLabel (mapKeys m)
diff --git a/compiler/ghc.cabal.in b/compiler/ghc.cabal.in
index a9a8da5806..2844e2d56e 100644
--- a/compiler/ghc.cabal.in
+++ b/compiler/ghc.cabal.in
@@ -558,6 +558,8 @@ Library
TargetReg
NCGMonad
Instruction
+ BlockLayout
+ CFG
Format
Reg
RegClass
diff --git a/compiler/main/DynFlags.hs b/compiler/main/DynFlags.hs
index 8cc360fce2..d78b5984e1 100644
--- a/compiler/main/DynFlags.hs
+++ b/compiler/main/DynFlags.hs
@@ -171,7 +171,11 @@ module DynFlags (
FilesToClean(..), emptyFilesToClean,
-- * Include specifications
- IncludeSpecs(..), addGlobalInclude, addQuoteInclude, flattenIncludes
+ IncludeSpecs(..), addGlobalInclude, addQuoteInclude, flattenIncludes,
+
+
+ -- * Make use of the Cmm CFG
+ CfgWeights(..), backendMaintainsCfg
) where
#include "HsVersions.h"
@@ -344,6 +348,7 @@ data DumpFlag
| Opt_D_dump_cmm_info
| Opt_D_dump_cmm_cps
-- end cmm subflags
+ | Opt_D_dump_cfg_weights -- ^ Dump the cfg used for block layout.
| Opt_D_dump_asm
| Opt_D_dump_asm_native
| Opt_D_dump_asm_liveness
@@ -484,6 +489,8 @@ data GeneralFlag
| Opt_DictsStrict -- be strict in argument dictionaries
| Opt_DmdTxDictSel -- use a special demand transformer for dictionary selectors
| Opt_Loopification -- See Note [Self-recursive tail calls]
+ | Opt_CfgBlocklayout -- ^ Use the cfg based block layout algorithm.
+ | Opt_WeightlessBlocklayout -- ^ Layout based on last instruction per block.
| Opt_CprAnal
| Opt_WorkerWrapper
| Opt_SolveConstantDicts
@@ -689,6 +696,8 @@ optimisationFlags = EnumSet.fromList
, Opt_DictsStrict
, Opt_DmdTxDictSel
, Opt_Loopification
+ , Opt_CfgBlocklayout
+ , Opt_WeightlessBlocklayout
, Opt_CprAnal
, Opt_WorkerWrapper
, Opt_SolveConstantDicts
@@ -1127,9 +1136,86 @@ data DynFlags = DynFlags {
-- | Unique supply configuration for testing build determinism
initialUnique :: Int,
- uniqueIncrement :: Int
+ uniqueIncrement :: Int,
+
+ -- | Temporary: CFG Edge weights for fast iterations
+ cfgWeightInfo :: CfgWeights
}
+-- | Edge weights to use when generating a CFG from CMM
+data CfgWeights
+ = CFGWeights
+ { uncondWeight :: Int
+ , condBranchWeight :: Int
+ , switchWeight :: Int
+ , callWeight :: Int
+ , likelyCondWeight :: Int
+ , unlikelyCondWeight :: Int
+ , infoTablePenalty :: Int
+ , backEdgeBonus :: Int
+ }
+
+defaultCfgWeights :: CfgWeights
+defaultCfgWeights
+ = CFGWeights
+ { uncondWeight = 1000
+ , condBranchWeight = 800
+ , switchWeight = 1
+ , callWeight = -10
+ , likelyCondWeight = 900
+ , unlikelyCondWeight = 300
+ , infoTablePenalty = 300
+ , backEdgeBonus = 400
+ }
+
+parseCfgWeights :: String -> CfgWeights -> CfgWeights
+parseCfgWeights s oldWeights =
+ foldl' (\cfg (n,v) -> update n v cfg) oldWeights assignments
+ where
+ assignments = map assignment $ settings s
+ update "uncondWeight" n w =
+ w {uncondWeight = n}
+ update "condBranchWeight" n w =
+ w {condBranchWeight = n}
+ update "switchWeight" n w =
+ w {switchWeight = n}
+ update "callWeight" n w =
+ w {callWeight = n}
+ update "likelyCondWeight" n w =
+ w {likelyCondWeight = n}
+ update "unlikelyCondWeight" n w =
+ w {unlikelyCondWeight = n}
+ update "infoTablePenalty" n w =
+ w {infoTablePenalty = n}
+ update "backEdgeBonus" n w =
+ w {backEdgeBonus = n}
+ update other _ _
+ = panic $ other ++
+ " is not a cfg weight parameter. " ++
+ exampleString
+ settings s
+ | (s1,rest) <- break (== ',') s
+ , null rest
+ = [s1]
+ | (s1,rest) <- break (== ',') s
+ = [s1] ++ settings (drop 1 rest)
+ | otherwise = panic $ "Invalid cfg parameters." ++ exampleString
+ assignment as
+ | (name, _:val) <- break (== '=') as
+ = (name,read val)
+ | otherwise
+ = panic $ "Invalid cfg parameters." ++ exampleString
+ exampleString = "Example parameters: uncondWeight=1000," ++
+ "condBranchWeight=800,switchWeight=0,callWeight=300" ++
+ ",likelyCondWeight=900,unlikelyCondWeight=300" ++
+ ",infoTablePenalty=300,backEdgeBonus=400"
+
+backendMaintainsCfg :: DynFlags -> Bool
+backendMaintainsCfg dflags = case (platformArch $ targetPlatform dflags) of
+ -- ArchX86 -- Should work but not tested so disabled currently.
+ ArchX86_64 -> True
+ _otherwise -> False
+
class HasDynFlags m where
getDynFlags :: m DynFlags
@@ -1935,7 +2021,8 @@ defaultDynFlags mySettings (myLlvmTargets, myLlvmPasses) =
uniqueIncrement = 1,
reverseErrors = False,
- maxErrors = Nothing
+ maxErrors = Nothing,
+ cfgWeightInfo = defaultCfgWeights
}
defaultWays :: Settings -> [Way]
@@ -3117,6 +3204,8 @@ dynamic_flags_deps = [
(setDumpFlag Opt_D_dump_cmm_info)
, make_ord_flag defGhcFlag "ddump-cmm-cps"
(setDumpFlag Opt_D_dump_cmm_cps)
+ , make_ord_flag defGhcFlag "ddump-cfg-weights"
+ (setDumpFlag Opt_D_dump_cfg_weights)
, make_ord_flag defGhcFlag "ddump-core-stats"
(setDumpFlag Opt_D_dump_core_stats)
, make_ord_flag defGhcFlag "ddump-asm"
@@ -3430,8 +3519,10 @@ dynamic_flags_deps = [
(noArg (\d -> d { floatLamArgs = Nothing }))
, make_ord_flag defFlag "fproc-alignment"
(intSuffix (\n d -> d { cmmProcAlignment = Just n }))
-
-
+ , make_ord_flag defFlag "fblock-layout-weights"
+ (HasArg (\s ->
+ upd (\d -> d { cfgWeightInfo =
+ parseCfgWeights s (cfgWeightInfo d)})))
, make_ord_flag defFlag "fhistory-size"
(intSuffix (\n d -> d { historySize = n }))
, make_ord_flag defFlag "funfolding-creation-threshold"
@@ -3963,6 +4054,8 @@ fFlagsDeps = [
flagHiddenSpec "llvm-tbaa" Opt_LlvmTBAA,
flagHiddenSpec "llvm-fill-undef-with-garbage" Opt_LlvmFillUndefWithGarbage,
flagSpec "loopification" Opt_Loopification,
+ flagSpec "block-layout-cfg" Opt_CfgBlocklayout,
+ flagSpec "block-layout-weightless" Opt_WeightlessBlocklayout,
flagSpec "omit-interface-pragmas" Opt_OmitInterfacePragmas,
flagSpec "omit-yields" Opt_OmitYields,
flagSpec "optimal-applicative-do" Opt_OptimalApplicativeDo,
@@ -4452,6 +4545,8 @@ optLevelFlags -- see Note [Documenting optimisation flags]
, ([1,2], Opt_FullLaziness)
, ([1,2], Opt_IgnoreAsserts)
, ([1,2], Opt_Loopification)
+ , ([1,2], Opt_CfgBlocklayout) -- Experimental
+
, ([1,2], Opt_Specialise)
, ([1,2], Opt_CrossModuleSpecialise)
, ([1,2], Opt_Strictness)
diff --git a/compiler/nativeGen/AsmCodeGen.hs b/compiler/nativeGen/AsmCodeGen.hs
index 79c3440ff6..956528bf81 100644
--- a/compiler/nativeGen/AsmCodeGen.hs
+++ b/compiler/nativeGen/AsmCodeGen.hs
@@ -56,11 +56,13 @@ import qualified RegAlloc.Graph.TrivColorable as Color
import AsmUtils
import TargetReg
import Platform
+import BlockLayout
import Config
import Instruction
import PIC
import Reg
import NCGMonad
+import CFG
import Dwarf
import Debug
@@ -79,10 +81,8 @@ import UniqFM
import UniqSupply
import DynFlags
import Util
-import Unique
import BasicTypes ( Alignment )
-import Digraph
import qualified Pretty
import BufWrite
import Outputable
@@ -151,38 +151,14 @@ The machine-dependent bits break down as follows:
* ["RegisterAlloc"] The (machine-independent) register allocator.
-}
--- -----------------------------------------------------------------------------
--- Top-level of the native codegen
-
-data NcgImpl statics instr jumpDest = NcgImpl {
- cmmTopCodeGen :: RawCmmDecl -> NatM [NatCmmDecl statics instr],
- generateJumpTableForInstr :: instr -> Maybe (NatCmmDecl statics instr),
- getJumpDestBlockId :: jumpDest -> Maybe BlockId,
- canShortcut :: instr -> Maybe jumpDest,
- shortcutStatics :: (BlockId -> Maybe jumpDest) -> statics -> statics,
- shortcutJump :: (BlockId -> Maybe jumpDest) -> instr -> instr,
- pprNatCmmDecl :: NatCmmDecl statics instr -> SDoc,
- maxSpillSlots :: Int,
- allocatableRegs :: [RealReg],
- ncg_x86fp_kludge :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
- ncgExpandTop :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
- ncgAllocMoreStack :: Int -> NatCmmDecl statics instr -> UniqSM (NatCmmDecl statics instr),
- ncgMakeFarBranches :: LabelMap CmmStatics
- -> [NatBasicBlock instr] -> [NatBasicBlock instr],
- extractUnwindPoints :: [instr] -> [UnwindPoint]
- -- ^ given the instruction sequence of a block, produce a list of
- -- the block's 'UnwindPoint's
- -- See Note [What is this unwinding business?] in Debug
- -- and Note [Unwinding information in the NCG] in this module.
- }
-
--------------------
nativeCodeGen :: DynFlags -> Module -> ModLocation -> Handle -> UniqSupply
-> Stream IO RawCmmGroup ()
-> IO UniqSupply
nativeCodeGen dflags this_mod modLoc h us cmms
= let platform = targetPlatform dflags
- nCG' :: (Outputable statics, Outputable instr, Instruction instr)
+ nCG' :: ( Outputable statics, Outputable instr
+ , Outputable jumpDest, Instruction instr)
=> NcgImpl statics instr jumpDest -> IO UniqSupply
nCG' ncgImpl = nativeCodeGen' dflags this_mod modLoc ncgImpl h us cmms
in case platformArch platform of
@@ -200,11 +176,13 @@ nativeCodeGen dflags this_mod modLoc h us cmms
ArchUnknown -> panic "nativeCodeGen: No NCG for unknown arch"
ArchJavaScript-> panic "nativeCodeGen: No NCG for JavaScript"
-x86NcgImpl :: DynFlags -> NcgImpl (Alignment, CmmStatics) X86.Instr.Instr X86.Instr.JumpDest
+x86NcgImpl :: DynFlags -> NcgImpl (Alignment, CmmStatics)
+ X86.Instr.Instr X86.Instr.JumpDest
x86NcgImpl dflags
= (x86_64NcgImpl dflags) { ncg_x86fp_kludge = map x86fp_kludge }
-x86_64NcgImpl :: DynFlags -> NcgImpl (Alignment, CmmStatics) X86.Instr.Instr X86.Instr.JumpDest
+x86_64NcgImpl :: DynFlags -> NcgImpl (Alignment, CmmStatics)
+ X86.Instr.Instr X86.Instr.JumpDest
x86_64NcgImpl dflags
= NcgImpl {
cmmTopCodeGen = X86.CodeGen.cmmTopCodeGen
@@ -221,6 +199,7 @@ x86_64NcgImpl dflags
,ncgExpandTop = id
,ncgMakeFarBranches = const id
,extractUnwindPoints = X86.CodeGen.extractUnwindPoints
+ ,invertCondBranches = X86.CodeGen.invertCondBranches
}
where platform = targetPlatform dflags
@@ -241,6 +220,7 @@ ppcNcgImpl dflags
,ncgExpandTop = id
,ncgMakeFarBranches = PPC.Instr.makeFarBranches
,extractUnwindPoints = const []
+ ,invertCondBranches = \_ _ -> id
}
where platform = targetPlatform dflags
@@ -261,6 +241,7 @@ sparcNcgImpl dflags
,ncgExpandTop = map SPARC.CodeGen.Expand.expandTop
,ncgMakeFarBranches = const id
,extractUnwindPoints = const []
+ ,invertCondBranches = \_ _ -> id
}
--
@@ -269,7 +250,8 @@ sparcNcgImpl dflags
-- default to the panic below. To support allocating extra stack on
-- more platforms provide a definition of ncgAllocMoreStack.
--
-noAllocMoreStack :: Int -> NatCmmDecl statics instr -> UniqSM (NatCmmDecl statics instr)
+noAllocMoreStack :: Int -> NatCmmDecl statics instr
+ -> UniqSM (NatCmmDecl statics instr, [(BlockId,BlockId)])
noAllocMoreStack amount _
= panic $ "Register allocator: out of stack slots (need " ++ show amount ++ ")\n"
++ " If you are trying to compile SHA1.hs from the crypto library then this\n"
@@ -323,7 +305,8 @@ unwinding table).
See also Note [What is this unwinding business?] in Debug.
-}
-nativeCodeGen' :: (Outputable statics, Outputable instr, Instruction instr)
+nativeCodeGen' :: (Outputable statics, Outputable instr,Outputable jumpDest,
+ Instruction instr)
=> DynFlags
-> Module -> ModLocation
-> NcgImpl statics instr jumpDest
@@ -394,7 +377,8 @@ finishNativeGen dflags modLoc bufh@(BufHandle _ _ h) us ngs
where
dump_stats = dumpSDoc dflags alwaysQualify Opt_D_dump_asm_stats "NCG stats"
-cmmNativeGenStream :: (Outputable statics, Outputable instr, Instruction instr)
+cmmNativeGenStream :: (Outputable statics, Outputable instr
+ ,Outputable jumpDest, Instruction instr)
=> DynFlags
-> Module -> ModLocation
-> NcgImpl statics instr jumpDest
@@ -459,7 +443,8 @@ cmmNativeGenStream dflags this_mod modLoc ncgImpl h us cmm_stream ngs
-- | Do native code generation on all these cmms.
--
cmmNativeGens :: forall statics instr jumpDest.
- (Outputable statics, Outputable instr, Instruction instr)
+ (Outputable statics, Outputable instr
+ ,Outputable jumpDest, Instruction instr)
=> DynFlags
-> Module -> ModLocation
-> NcgImpl statics instr jumpDest
@@ -538,7 +523,8 @@ emitNativeCode dflags h sdoc = do
-- Dumping the output of each stage along the way.
-- Global conflict graph and NGC stats
cmmNativeGen
- :: (Outputable statics, Outputable instr, Instruction instr)
+ :: forall statics instr jumpDest. (Instruction instr,
+ Outputable statics, Outputable instr, Outputable jumpDest)
=> DynFlags
-> Module -> ModLocation
-> NcgImpl statics instr jumpDest
@@ -574,30 +560,43 @@ cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
Opt_D_dump_opt_cmm "Optimised Cmm"
(pprCmmGroup [opt_cmm])
+ let cmmCfg = {-# SCC "getCFG" #-}
+ getCfgProc (cfgWeightInfo dflags) opt_cmm
+
-- generate native code from cmm
- let ((native, lastMinuteImports, fileIds'), usGen) =
+ let ((native, lastMinuteImports, fileIds', nativeCfgWeights), usGen) =
{-# SCC "genMachCode" #-}
initUs us $ genMachCode dflags this_mod modLoc
(cmmTopCodeGen ncgImpl)
- fileIds dbgMap opt_cmm
+ fileIds dbgMap opt_cmm cmmCfg
+
dumpIfSet_dyn dflags
Opt_D_dump_asm_native "Native code"
(vcat $ map (pprNatCmmDecl ncgImpl) native)
+ dumpIfSet_dyn dflags
+ Opt_D_dump_cfg_weights "CFG Weights"
+ (pprEdgeWeights nativeCfgWeights)
+
-- tag instructions with register liveness information
+ -- also drops dead code
+ let livenessCfg = if (backendMaintainsCfg dflags)
+ then Just nativeCfgWeights
+ else Nothing
let (withLiveness, usLive) =
{-# SCC "regLiveness" #-}
initUs usGen
$ mapM (regLiveness platform)
- $ map natCmmTopToLive native
+ -- TODO: Only use CFG for x86
+ $ map (natCmmTopToLive livenessCfg) native
dumpIfSet_dyn dflags
Opt_D_dump_asm_liveness "Liveness annotations added"
(vcat $ map ppr withLiveness)
-- allocate registers
- (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
+ (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear, raStats, stack_updt_blks) <-
if ( gopt Opt_RegsGraph dflags
|| gopt Opt_RegsIterative dflags )
then do
@@ -640,7 +639,8 @@ cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
return ( alloced, usAlloc
, mPprStats
- , Nothing)
+ , Nothing
+ , [], [])
else do
-- do linear register allocation
@@ -648,15 +648,16 @@ cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
(alloced, maybe_more_stack, ra_stats) <-
Linear.regAlloc dflags proc
case maybe_more_stack of
- Nothing -> return ( alloced, ra_stats )
+ Nothing -> return ( alloced, ra_stats, [] )
Just amount -> do
- alloced' <- ncgAllocMoreStack ncgImpl amount alloced
- return (alloced', ra_stats )
+ (alloced',stack_updt_blks) <-
+ ncgAllocMoreStack ncgImpl amount alloced
+ return (alloced', ra_stats, stack_updt_blks )
- let ((alloced, regAllocStats), usAlloc)
+ let ((alloced, regAllocStats, stack_updt_blks), usAlloc)
= {-# SCC "RegAlloc-linear" #-}
initUs usLive
- $ liftM unzip
+ $ liftM unzip3
$ mapM reg_alloc withLiveness
dumpIfSet_dyn dflags
@@ -672,7 +673,20 @@ cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
return ( alloced, usAlloc
, Nothing
- , mPprStats)
+ , mPprStats, (catMaybes regAllocStats)
+ , concat stack_updt_blks )
+
+ -- Fixupblocks the register allocator inserted (from, regMoves, to)
+ let cfgRegAllocUpdates :: [(BlockId,BlockId,BlockId)]
+ cfgRegAllocUpdates = (concatMap Linear.ra_fixupList raStats)
+
+ let cfgWithFixupBlks =
+ addNodesBetween nativeCfgWeights cfgRegAllocUpdates
+
+ -- Insert stack update blocks
+ let postRegCFG =
+ foldl' (\m (from,to) -> addImmediateSuccessor from to m )
+ cfgWithFixupBlks stack_updt_blks
---- x86fp_kludge. This pass inserts ffree instructions to clear
---- the FPU stack on x86. The x86 ABI requires that the FPU stack
@@ -688,20 +702,59 @@ cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
{-# SCC "generateJumpTables" #-}
generateJumpTables ncgImpl kludged
+ dumpIfSet_dyn dflags
+ Opt_D_dump_cfg_weights "CFG Update information"
+ ( text "stack:" <+> ppr stack_updt_blks $$
+ text "linearAlloc:" <+> ppr cfgRegAllocUpdates )
+
---- shortcut branches
- let shorted =
+ let (shorted, postShortCFG) =
{-# SCC "shortcutBranches" #-}
- shortcutBranches dflags ncgImpl tabled
+ shortcutBranches dflags ncgImpl tabled postRegCFG
+
+ let optimizedCFG =
+ optimizeCFG (cfgWeightInfo dflags) cmm postShortCFG
+
+ dumpIfSet_dyn dflags
+ Opt_D_dump_cfg_weights "CFG Final Weights"
+ ( pprEdgeWeights optimizedCFG )
+
+ --TODO: Partially check validity of the cfg.
+ let getBlks (CmmProc _info _lbl _live (ListGraph blocks)) = blocks
+ getBlks _ = []
+
+ when ( backendMaintainsCfg dflags &&
+ (gopt Opt_DoAsmLinting dflags || debugIsOn )) $ do
+ let blocks = concatMap getBlks shorted
+ let labels = setFromList $ fmap blockId blocks :: LabelSet
+ return $! seq (sanityCheckCfg optimizedCFG labels $
+ text "cfg not in lockstep") ()
---- sequence blocks
- let sequenced =
+ let sequenced :: [NatCmmDecl statics instr]
+ sequenced =
+ checkLayout shorted $
{-# SCC "sequenceBlocks" #-}
- map (sequenceTop ncgImpl) shorted
+ map (BlockLayout.sequenceTop
+ dflags
+ ncgImpl optimizedCFG)
+ shorted
+
+ let branchOpt :: [NatCmmDecl statics instr]
+ branchOpt =
+ {-# SCC "invertCondBranches" #-}
+ map invert sequenced
+ where
+ invertConds = (invertCondBranches ncgImpl) optimizedCFG
+ invert top@CmmData {} = top
+ invert (CmmProc info lbl live (ListGraph blocks)) =
+ CmmProc info lbl live (ListGraph $ invertConds info blocks)
---- expansion of SPARC synthetic instrs
let expanded =
{-# SCC "sparc_expand" #-}
- ncgExpandTop ncgImpl sequenced
+ ncgExpandTop ncgImpl branchOpt
+ --ncgExpandTop ncgImpl sequenced
dumpIfSet_dyn dflags
Opt_D_dump_asm_expanded "Synthetic instructions expanded"
@@ -724,6 +777,24 @@ cmmNativeGen dflags this_mod modLoc ncgImpl us fileIds dbgMap cmm count
, ppr_raStatsLinear
, unwinds )
+-- | Make sure all blocks we want the layout algorithm to place have been placed.
+checkLayout :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr]
+ -> [NatCmmDecl statics instr]
+checkLayout procsUnsequenced procsSequenced =
+ ASSERT2(setNull diff,
+ ppr "Block sequencing dropped blocks:" <> ppr diff)
+ procsSequenced
+ where
+ blocks1 = foldl' (setUnion) setEmpty $
+ map getBlockIds procsUnsequenced :: LabelSet
+ blocks2 = foldl' (setUnion) setEmpty $
+ map getBlockIds procsSequenced
+ diff = setDifference blocks1 blocks2
+
+ getBlockIds (CmmData _ _) = setEmpty
+ getBlockIds (CmmProc _ _ _ (ListGraph blocks)) =
+ setFromList $ map blockId blocks
+
x86fp_kludge :: NatCmmDecl (Alignment, CmmStatics) X86.Instr.Instr -> NatCmmDecl (Alignment, CmmStatics) X86.Instr.Instr
x86fp_kludge top@(CmmData _ _) = top
@@ -810,107 +881,6 @@ makeImportsDoc dflags imports
doPpr lbl = (lbl, renderWithStyle dflags (pprCLabel platform lbl) astyle)
astyle = mkCodeStyle AsmStyle
-
--- -----------------------------------------------------------------------------
--- Sequencing the basic blocks
-
--- Cmm BasicBlocks are self-contained entities: they always end in a
--- jump, either non-local or to another basic block in the same proc.
--- In this phase, we attempt to place the basic blocks in a sequence
--- such that as many of the local jumps as possible turn into
--- fallthroughs.
-
-sequenceTop
- :: Instruction instr
- => NcgImpl statics instr jumpDest -> NatCmmDecl statics instr -> NatCmmDecl statics instr
-
-sequenceTop _ top@(CmmData _ _) = top
-sequenceTop ncgImpl (CmmProc info lbl live (ListGraph blocks)) =
- CmmProc info lbl live (ListGraph $ ncgMakeFarBranches ncgImpl info $ sequenceBlocks info blocks)
-
--- The algorithm is very simple (and stupid): we make a graph out of
--- the blocks where there is an edge from one block to another iff the
--- first block ends by jumping to the second. Then we topologically
--- sort this graph. Then traverse the list: for each block, we first
--- output the block, then if it has an out edge, we move the
--- destination of the out edge to the front of the list, and continue.
-
--- FYI, the classic layout for basic blocks uses postorder DFS; this
--- algorithm is implemented in Hoopl.
-
-sequenceBlocks
- :: Instruction instr
- => LabelMap i
- -> [NatBasicBlock instr]
- -> [NatBasicBlock instr]
-
-sequenceBlocks _ [] = []
-sequenceBlocks infos (entry:blocks) =
- seqBlocks infos (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
- -- the first block is the entry point ==> it must remain at the start.
-
-
-sccBlocks
- :: Instruction instr
- => [NatBasicBlock instr]
- -> [SCC (Node BlockId (NatBasicBlock instr))]
-
-sccBlocks blocks = stronglyConnCompFromEdgedVerticesUniqR (map mkNode blocks)
-
--- we're only interested in the last instruction of
--- the block, and only if it has a single destination.
-getOutEdges
- :: Instruction instr
- => [instr] -> [BlockId]
-
-getOutEdges instrs
- = case jumpDestsOfInstr (last instrs) of
- [one] -> [one]
- _many -> []
-
-mkNode :: (Instruction t)
- => GenBasicBlock t
- -> Node BlockId (GenBasicBlock t)
-mkNode block@(BasicBlock id instrs) = DigraphNode block id (getOutEdges instrs)
-
-seqBlocks :: LabelMap i -> [Node BlockId (GenBasicBlock t1)]
- -> [GenBasicBlock t1]
-seqBlocks infos blocks = placeNext pullable0 todo0
- where
- -- pullable: Blocks that are not yet placed
- -- todo: Original order of blocks, to be followed if we have no good
- -- reason not to;
- -- may include blocks that have already been placed, but then
- -- these are not in pullable
- pullable0 = listToUFM [ (i,(b,n)) | DigraphNode b i n <- blocks ]
- todo0 = map node_key blocks
-
- placeNext _ [] = []
- placeNext pullable (i:rest)
- | Just (block, pullable') <- lookupDeleteUFM pullable i
- = place pullable' rest block
- | otherwise
- -- We already placed this block, so ignore
- = placeNext pullable rest
-
- place pullable todo (block,[])
- = block : placeNext pullable todo
- place pullable todo (block@(BasicBlock id instrs),[next])
- | mapMember next infos
- = block : placeNext pullable todo
- | Just (nextBlock, pullable') <- lookupDeleteUFM pullable next
- = BasicBlock id (init instrs) : place pullable' todo nextBlock
- | otherwise
- = block : placeNext pullable todo
- place _ _ (_,tooManyNextNodes)
- = pprPanic "seqBlocks" (ppr tooManyNextNodes)
-
-
-lookupDeleteUFM :: Uniquable key => UniqFM elt -> key -> Maybe (elt, UniqFM elt)
-lookupDeleteUFM m k = do -- Maybe monad
- v <- lookupUFM m k
- return (v, delFromUFM m k)
-
-- -----------------------------------------------------------------------------
-- Generate jump tables
@@ -928,26 +898,31 @@ generateJumpTables ncgImpl xs = concatMap f xs
-- Shortcut branches
shortcutBranches
- :: DynFlags
+ :: forall statics instr jumpDest. (Outputable jumpDest) => DynFlags
-> NcgImpl statics instr jumpDest
-> [NatCmmDecl statics instr]
- -> [NatCmmDecl statics instr]
+ -> CFG
+ -> ([NatCmmDecl statics instr],CFG)
-shortcutBranches dflags ncgImpl tops
+shortcutBranches dflags ncgImpl tops weights
| gopt Opt_AsmShortcutting dflags
- = map (apply_mapping ncgImpl mapping) tops'
+ = ( map (apply_mapping ncgImpl mapping) tops'
+ , shortcutWeightMap weights mappingBid )
| otherwise
- = tops
+ = (tops, weights)
where
(tops', mappings) = mapAndUnzip (build_mapping ncgImpl) tops
- mapping = plusUFMList mappings
+ mapping = mapUnions mappings :: LabelMap jumpDest
+ mappingBid = fmap (getJumpDestBlockId ncgImpl) mapping
-build_mapping :: NcgImpl statics instr jumpDest
+build_mapping :: forall instr t d statics jumpDest.
+ NcgImpl statics instr jumpDest
-> GenCmmDecl d (LabelMap t) (ListGraph instr)
- -> (GenCmmDecl d (LabelMap t) (ListGraph instr), UniqFM jumpDest)
-build_mapping _ top@(CmmData _ _) = (top, emptyUFM)
+ -> (GenCmmDecl d (LabelMap t) (ListGraph instr)
+ ,LabelMap jumpDest)
+build_mapping _ top@(CmmData _ _) = (top, mapEmpty)
build_mapping _ (CmmProc info lbl live (ListGraph []))
- = (CmmProc info lbl live (ListGraph []), emptyUFM)
+ = (CmmProc info lbl live (ListGraph []), mapEmpty)
build_mapping ncgImpl (CmmProc info lbl live (ListGraph (head:blocks)))
= (CmmProc info lbl live (ListGraph (head:others)), mapping)
-- drop the shorted blocks, but don't ever drop the first one,
@@ -956,17 +931,18 @@ build_mapping ncgImpl (CmmProc info lbl live (ListGraph (head:blocks)))
-- find all the blocks that just consist of a jump that can be
-- shorted.
-- Don't completely eliminate loops here -- that can leave a dangling jump!
+ shortcut_blocks :: [(BlockId, jumpDest)]
(_, shortcut_blocks, others) =
foldl' split (setEmpty :: LabelSet, [], []) blocks
split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
- | Just jd <- canShortcut ncgImpl insn,
- Just dest <- getJumpDestBlockId ncgImpl jd,
- not (has_info id),
- (setMember dest s) || dest == id -- loop checks
+ | Just jd <- canShortcut ncgImpl insn
+ , Just dest <- getJumpDestBlockId ncgImpl jd
+ , not (has_info id)
+ , (setMember dest s) || dest == id -- loop checks
= (s, shortcut_blocks, b : others)
split (s, shortcut_blocks, others) (BasicBlock id [insn])
- | Just dest <- canShortcut ncgImpl insn,
- not (has_info id)
+ | Just dest <- canShortcut ncgImpl insn
+ , not (has_info id)
= (setInsert id s, (id,dest) : shortcut_blocks, others)
split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
@@ -974,20 +950,19 @@ build_mapping ncgImpl (CmmProc info lbl live (ListGraph (head:blocks)))
has_info l = mapMember l info
-- build a mapping from BlockId to JumpDest for shorting branches
- mapping = foldl' add emptyUFM shortcut_blocks
- add ufm (id,dest) = addToUFM ufm id dest
+ mapping = mapFromList shortcut_blocks
apply_mapping :: NcgImpl statics instr jumpDest
- -> UniqFM jumpDest
+ -> LabelMap jumpDest
-> GenCmmDecl statics h (ListGraph instr)
-> GenCmmDecl statics h (ListGraph instr)
apply_mapping ncgImpl ufm (CmmData sec statics)
- = CmmData sec (shortcutStatics ncgImpl (lookupUFM ufm) statics)
+ = CmmData sec (shortcutStatics ncgImpl (\bid -> mapLookup bid ufm) statics)
apply_mapping ncgImpl ufm (CmmProc info lbl live (ListGraph blocks))
= CmmProc info lbl live (ListGraph $ map short_bb blocks)
where
short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
- short_insn i = shortcutJump ncgImpl (lookupUFM ufm) i
+ short_insn i = shortcutJump ncgImpl (\bid -> mapLookup bid ufm) i
-- shortcutJump should apply the mapping repeatedly,
-- just in case we can short multiple branches.
@@ -1021,20 +996,25 @@ genMachCode
-> DwarfFiles
-> LabelMap DebugBlock
-> RawCmmDecl
+ -> CFG
-> UniqSM
( [NatCmmDecl statics instr]
, [CLabel]
- , DwarfFiles)
+ , DwarfFiles
+ , CFG
+ )
-genMachCode dflags this_mod modLoc cmmTopCodeGen fileIds dbgMap cmm_top
+genMachCode dflags this_mod modLoc cmmTopCodeGen fileIds dbgMap cmm_top cmm_cfg
= do { initial_us <- getUniqueSupplyM
; let initial_st = mkNatM_State initial_us 0 dflags this_mod
- modLoc fileIds dbgMap
+ modLoc fileIds dbgMap cmm_cfg
(new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
final_delta = natm_delta final_st
final_imports = natm_imports final_st
+ final_cfg = natm_cfg final_st
; if final_delta == 0
- then return (new_tops, final_imports, natm_fileid final_st)
+ then return (new_tops, final_imports
+ , natm_fileid final_st, final_cfg)
else pprPanic "genMachCode: nonzero final delta" (int final_delta)
}
diff --git a/compiler/nativeGen/BlockLayout.hs b/compiler/nativeGen/BlockLayout.hs
new file mode 100644
index 0000000000..72aea5bf10
--- /dev/null
+++ b/compiler/nativeGen/BlockLayout.hs
@@ -0,0 +1,819 @@
+--
+-- Copyright (c) 2018 Andreas Klebinger
+--
+
+{-# LANGUAGE TypeFamilies, ScopedTypeVariables, CPP #-}
+
+{-# OPTIONS_GHC -fprof-auto #-}
+--{-# OPTIONS_GHC -ddump-simpl -ddump-to-file -ddump-cmm #-}
+
+module BlockLayout
+ ( sequenceTop )
+where
+
+#include "HsVersions.h"
+import GhcPrelude
+
+import Instruction
+import NCGMonad
+import CFG
+
+import BlockId
+import Cmm
+import Hoopl.Collections
+import Hoopl.Label
+import Hoopl.Block
+
+import DynFlags (gopt, GeneralFlag(..), DynFlags, backendMaintainsCfg)
+import UniqFM
+import Util
+import Unique
+
+import Digraph
+import Outputable
+import Maybes
+
+-- DEBUGGING ONLY
+--import Debug
+--import Debug.Trace
+import ListSetOps (removeDups)
+import PprCmm ()
+
+import OrdList
+import Data.List
+import Data.Foldable (toList)
+import Hoopl.Graph
+
+import qualified Data.Set as Set
+import Control.Applicative
+
+{-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ~~~ Note [Chain based CFG serialization]
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ For additional information also look at
+ https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/CodeLayout
+
+ We have a CFG with edge weights based on which we try to place blocks next to
+ each other.
+
+ Edge weights not only represent likelyhood of control transfer between blocks
+ but also how much a block would benefit from being placed sequentially after
+ it's predecessor.
+ For example blocks which are preceeded by an info table are more likely to end
+ up in a different cache line than their predecessor. So there is less benefit
+ in placing them sequentially.
+
+ For example consider this example:
+
+ A: ...
+ jmp cond D (weak successor)
+ jmp B
+ B: ...
+ jmp C
+ C: ...
+ jmp X
+ D: ...
+ jmp B (weak successor)
+
+ We determine a block layout by building up chunks (calling them chains) of
+ possible control flows for which blocks will be placed sequentially.
+
+ Eg for our example we might end up with two chains like:
+ [A->B->C->X],[D]. Blocks inside chains will always be placed sequentially.
+ However there is no particular order in which chains are placed since
+ (hopefully) the blocks for which sequentially is important have already
+ been placed in the same chain.
+
+ -----------------------------------------------------------------------------
+ First try to create a lists of good chains.
+ -----------------------------------------------------------------------------
+
+ We do so by taking a block not yet placed in a chain and
+ looking at these cases:
+
+ *) Check if the best predecessor of the block is at the end of a chain.
+ If so add the current block to the end of that chain.
+
+ Eg if we look at block C and already have the chain (A -> B)
+ then we extend the chain to (A -> B -> C).
+
+ Combined with the fact that we process blocks in reverse post order
+ this means loop bodies and trivially sequential control flow already
+ ends up as a single chain.
+
+ *) Otherwise we create a singleton chain from the block we are looking at.
+ Eg if we have from the example above already constructed (A->B)
+ and look at D we create the chain (D) resulting in the chains [A->B, D]
+
+ -----------------------------------------------------------------------------
+ We then try to fuse chains.
+ -----------------------------------------------------------------------------
+
+ There are edge cases which result in two chains being created which trivially
+ represent linear control flow. For example we might have the chains
+ [(A-B-C),(D-E)] with an cfg triangle:
+
+ A----->C->D->E
+ \->B-/
+
+ We also get three independent chains if two branches end with a jump
+ to a common successor.
+
+ We take care of these cases by fusing chains which are connected by an
+ edge.
+
+ We do so by looking at the list of edges sorted by weight.
+ Given the edge (C -> D) we try to find two chains such that:
+ * C is at the end of chain one.
+ * D is in front of chain two.
+ * If two such chains exist we fuse them.
+ We then remove the edge and repeat the process for the rest of the edges.
+
+ -----------------------------------------------------------------------------
+ Place indirect successors (neighbours) after each other
+ -----------------------------------------------------------------------------
+
+ We might have chains [A,B,C,X],[E] in a CFG of the sort:
+
+ A ---> B ---> C --------> X(exit)
+ \- ->E- -/
+
+ While E does not follow X it's still beneficial to place them near each other.
+ This can be advantageous if eg C,X,E will end up in the same cache line.
+
+ TODO: If we remove edges as we use them (eg if we build up A->B remove A->B
+ from the list) we could save some more work in later phases.
+
+
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ~~~ Note [Triangle Control Flow]
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Checking if an argument is already evaluating leads to a somewhat
+ special case which looks like this:
+
+ A:
+ if (R1 & 7 != 0) goto Leval; else goto Lwork;
+ Leval: // global
+ call (I64[R1])(R1) returns to Lwork, args: 8, res: 8, upd: 8;
+ Lwork: // global
+ ...
+
+ A
+ |\
+ | Leval
+ |/ - (This edge can be missing because of optimizations)
+ Lwork
+
+ Once we hit the metal the call instruction is just 2-3 bytes large
+ depending on the register used. So we lay out the assembly like this:
+
+ movq %rbx,%rax
+ andl $7,%eax
+ cmpq $1,%rax
+ jne Lwork
+ Leval:
+ jmp *(%rbx) # encoded in 2-3 bytes.
+ <info table>
+ Lwork:
+ ...
+
+ We could explicitly check for this control flow pattern.
+
+ This is advantageous because:
+ * It's optimal if the argument isn't evaluated.
+ * If it's evaluated we only have the extra cost of jumping over
+ the 2-3 bytes for the call.
+ * Guarantees the smaller encoding for the conditional jump.
+
+ However given that Lwork usually has an info table we
+ penalize this edge. So Leval should get placed first
+ either way and things work out for the best.
+
+ Optimizing for the evaluated case instead would penalize
+ the other code path. It adds an jump as we can't fall through
+ to Lwork because of the info table.
+ Assuming that Lwork is large the chance that the "call" ends up
+ in the same cache line is also fairly small.
+
+-}
+
+
+-- | Look at X number of blocks in two chains to determine
+-- if they are "neighbours".
+neighbourOverlapp :: Int
+neighbourOverlapp = 2
+
+-- | Only edges heavier than this are considered
+-- for fusing two chains into a single chain.
+fuseEdgeThreshold :: EdgeWeight
+fuseEdgeThreshold = 0
+
+
+-- | A non empty ordered sequence of basic blocks.
+-- It is suitable for serialization in this order.
+data BlockChain
+ = BlockChain
+ { chainMembers :: !LabelSet
+ , chainBlocks :: !BlockSequence
+ }
+
+instance Eq (BlockChain) where
+ (BlockChain s1 _) == (BlockChain s2 _)
+ = s1 == s2
+
+instance Outputable (BlockChain) where
+ ppr (BlockChain _ blks) =
+ parens (text "Chain:" <+> ppr (seqToList $ blks) )
+
+data WeightedEdge = WeightedEdge !BlockId !BlockId EdgeWeight deriving (Eq)
+
+-- Useful for things like sets and debugging purposes, sorts by blocks
+-- in the chain.
+instance Ord (BlockChain) where
+ (BlockChain lbls1 _) `compare` (BlockChain lbls2 _)
+ = lbls1 `compare` lbls2
+
+-- | Non deterministic! (Uniques) Sorts edges by weight and nodes.
+instance Ord WeightedEdge where
+ compare (WeightedEdge from1 to1 weight1)
+ (WeightedEdge from2 to2 weight2)
+ | weight1 < weight2 || weight1 == weight2 && from1 < from2 ||
+ weight1 == weight2 && from1 == from2 && to1 < to2
+ = LT
+ | from1 == from2 && to1 == to2 && weight1 == weight2
+ = EQ
+ | otherwise
+ = GT
+
+instance Outputable WeightedEdge where
+ ppr (WeightedEdge from to info) =
+ ppr from <> text "->" <> ppr to <> brackets (ppr info)
+
+type WeightedEdgeList = [WeightedEdge]
+
+noDups :: [BlockChain] -> Bool
+noDups chains =
+ let chainBlocks = concatMap chainToBlocks chains :: [BlockId]
+ (_blocks, dups) = removeDups compare chainBlocks
+ in if null dups then True
+ else pprTrace "Duplicates:" (ppr (map toList dups) $$ text "chains" <+> ppr chains ) False
+
+inFront :: BlockId -> BlockChain -> Bool
+inFront bid (BlockChain _ seq)
+ = seqFront seq == bid
+
+chainMember :: BlockId -> BlockChain -> Bool
+chainMember bid chain
+ = setMember bid . chainMembers $ chain
+
+chainSingleton :: BlockId -> BlockChain
+chainSingleton lbl
+ = BlockChain (setSingleton lbl) (Singleton lbl)
+
+chainSnoc :: BlockChain -> BlockId -> BlockChain
+chainSnoc (BlockChain lbls blks) lbl
+ = BlockChain (setInsert lbl lbls) (seqSnoc blks lbl)
+
+chainConcat :: BlockChain -> BlockChain -> BlockChain
+chainConcat (BlockChain lbls1 blks1) (BlockChain lbls2 blks2)
+ = BlockChain (setUnion lbls1 lbls2) (blks1 `seqConcat` blks2)
+
+chainToBlocks :: BlockChain -> [BlockId]
+chainToBlocks (BlockChain _ blks) = seqToList blks
+
+-- | Given the Chain A -> B -> C -> D and we break at C
+-- we get the two Chains (A -> B, C -> D) as result.
+breakChainAt :: BlockId -> BlockChain
+ -> (BlockChain,BlockChain)
+breakChainAt bid (BlockChain lbls blks)
+ | not (setMember bid lbls)
+ = panic "Block not in chain"
+ | otherwise
+ = let (lblks, rblks) = break (\lbl -> lbl == bid)
+ (seqToList blks)
+ --TODO: Remove old
+ --lblSet :: [GenBasicBlock i] -> BlockChain
+ --lblSet blks =
+ -- setFromList
+ --(map (\(BasicBlock lbl _) -> lbl) $ toList blks)
+ in
+ (BlockChain (setFromList lblks) (seqFromBids lblks),
+ BlockChain (setFromList rblks) (seqFromBids rblks))
+
+takeR :: Int -> BlockChain -> [BlockId]
+takeR n (BlockChain _ blks) =
+ take n . seqToRList $ blks
+
+
+takeL :: Int -> BlockChain -> [BlockId]
+takeL n (BlockChain _ blks) = --error "TODO: takeLn"
+ take n . seqToList $ blks
+
+-- | For a given list of chains try to fuse chains with strong
+-- edges between them into a single chain.
+-- Returns the list of fused chains together with a set of
+-- used edges. The set of edges is indirectly encoded in the
+-- chains so doesn't need to be considered for later passes.
+fuseChains :: WeightedEdgeList -> LabelMap BlockChain
+ -> (LabelMap BlockChain, Set.Set WeightedEdge)
+fuseChains weights chains
+ = let fronts = mapFromList $
+ map (\chain -> (head $ takeL 1 chain,chain)) $
+ mapElems chains :: LabelMap BlockChain
+ (chains', used, _) = applyEdges weights chains fronts Set.empty
+ in (chains', used)
+ where
+ applyEdges :: WeightedEdgeList -> LabelMap BlockChain
+ -> LabelMap BlockChain -> Set.Set WeightedEdge
+ -> (LabelMap BlockChain, Set.Set WeightedEdge, LabelMap BlockChain)
+ applyEdges [] chainsEnd chainsFront used
+ = (chainsEnd, used, chainsFront)
+ applyEdges (edge@(WeightedEdge from to w):edges) chainsEnd chainsFront used
+ --Since we order edges descending by weight we can stop here
+ | w <= fuseEdgeThreshold
+ = ( chainsEnd, used, chainsFront)
+ --Fuse the two chains
+ | Just c1 <- mapLookup from chainsEnd
+ , Just c2 <- mapLookup to chainsFront
+ , c1 /= c2
+ = let newChain = chainConcat c1 c2
+ front = head $ takeL 1 newChain
+ end = head $ takeR 1 newChain
+ chainsFront' = mapInsert front newChain $
+ mapDelete to chainsFront
+ chainsEnd' = mapInsert end newChain $
+ mapDelete from chainsEnd
+ in applyEdges edges chainsEnd' chainsFront'
+ (Set.insert edge used)
+ | otherwise
+ --Check next edge
+ = applyEdges edges chainsEnd chainsFront used
+
+
+-- See also Note [Chain based CFG serialization]
+-- We have the chains (A-B-C-D) and (E-F) and an Edge C->E.
+--
+-- While placing the later after the former doesn't result in sequential
+-- control flow it is still be benefical since block C and E might end
+-- up in the same cache line.
+--
+-- So we place these chains next to each other even if we can't fuse them.
+--
+-- A -> B -> C -> D
+-- v
+-- - -> E -> F ...
+--
+-- Simple heuristic to chose which chains we want to combine:
+-- * Process edges in descending priority.
+-- * Check if there is a edge near the end of one chain which goes
+-- to a block near the start of another edge.
+--
+-- While we could take into account the space between the two blocks which
+-- share an edge this blows up compile times quite a bit. It requires
+-- us to find all edges between two chains, check the distance for all edges,
+-- rank them based on the distance and and only then we can select two chains
+-- to combine. Which would add a lot of complexity for little gain.
+
+-- | For a given list of chains and edges try to combine chains with strong
+-- edges between them.
+combineNeighbourhood :: WeightedEdgeList -> [BlockChain]
+ -> [BlockChain]
+combineNeighbourhood edges chains
+ = -- pprTraceIt "Neigbours" $
+ applyEdges edges endFrontier startFrontier
+ where
+ --Build maps from chain ends to chains
+ endFrontier, startFrontier :: FrontierMap
+ endFrontier =
+ mapFromList $ concatMap (\chain ->
+ let ends = getEnds chain
+ entry = (ends,chain)
+ in map (\x -> (x,entry)) ends ) chains
+ startFrontier =
+ mapFromList $ concatMap (\chain ->
+ let front = getFronts chain
+ entry = (front,chain)
+ in map (\x -> (x,entry)) front) chains
+ applyEdges :: WeightedEdgeList -> FrontierMap -> FrontierMap
+ -> [BlockChain]
+ applyEdges [] chainEnds _chainFronts =
+ ordNub $ map snd $ mapElems chainEnds
+ applyEdges ((WeightedEdge from to _w):edges) chainEnds chainFronts
+ | Just (c1_e,c1) <- mapLookup from chainEnds
+ , Just (c2_f,c2) <- mapLookup to chainFronts
+ , c1 /= c2 -- Avoid trying to concat a short chain with itself.
+ = let newChain = chainConcat c1 c2
+ newChainFrontier = getFronts newChain
+ newChainEnds = getEnds newChain
+ newFronts :: FrontierMap
+ newFronts =
+ let withoutOld =
+ foldl' (\m b -> mapDelete b m :: FrontierMap) chainFronts (c2_f ++ getFronts c1)
+ entry =
+ (newChainFrontier,newChain) --let bound to ensure sharing
+ in foldl' (\m x -> mapInsert x entry m)
+ withoutOld newChainFrontier
+
+ newEnds =
+ let withoutOld = foldl' (\m b -> mapDelete b m) chainEnds (c1_e ++ getEnds c2)
+ entry = (newChainEnds,newChain) --let bound to ensure sharing
+ in foldl' (\m x -> mapInsert x entry m)
+ withoutOld newChainEnds
+ in
+ -- pprTrace "ApplyEdges"
+ -- (text "before" $$
+ -- text "fronts" <+> ppr chainFronts $$
+ -- text "ends" <+> ppr chainEnds $$
+
+ -- text "various" $$
+ -- text "newChain" <+> ppr newChain $$
+ -- text "newChainFrontier" <+> ppr newChainFrontier $$
+ -- text "newChainEnds" <+> ppr newChainEnds $$
+ -- text "drop" <+> ppr ((c2_f ++ getFronts c1) ++ (c1_e ++ getEnds c2)) $$
+
+ -- text "after" $$
+ -- text "fronts" <+> ppr newFronts $$
+ -- text "ends" <+> ppr newEnds
+ -- )
+ applyEdges edges newEnds newFronts
+ | otherwise
+ = --pprTrace "noNeigbours" (ppr ()) $
+ applyEdges edges chainEnds chainFronts
+ where
+
+ getFronts chain = takeL neighbourOverlapp chain
+ getEnds chain = takeR neighbourOverlapp chain
+
+
+
+-- See [Chain based CFG serialization]
+buildChains :: CFG -> [BlockId]
+ -> ( LabelMap BlockChain -- Resulting chains.
+ , Set.Set (BlockId, BlockId)) --List of fused edges.
+buildChains succWeights blocks
+ = let (_, fusedEdges, chains) = buildNext setEmpty mapEmpty blocks Set.empty
+ in (chains, fusedEdges)
+ where
+ -- We keep a map from the last block in a chain to the chain itself.
+ -- This we we can easily check if an block should be appened to an
+ -- existing chain!
+ buildNext :: LabelSet
+ -> LabelMap BlockChain -- Map from last element to chain.
+ -> [BlockId] -- Blocks to place
+ -> Set.Set (BlockId, BlockId)
+ -> ( [BlockChain] -- Placed Blocks
+ , Set.Set (BlockId, BlockId) --List of fused edges
+ , LabelMap BlockChain
+ )
+ buildNext _placed chains [] linked =
+ ([], linked, chains)
+ buildNext placed chains (block:todo) linked
+ | setMember block placed
+ = buildNext placed chains todo linked
+ | otherwise
+ = buildNext placed' chains' todo linked'
+ where
+ placed' = (foldl' (flip setInsert) placed placedBlocks)
+ linked' = Set.union linked linkedEdges
+ (placedBlocks, chains', linkedEdges) = findChain block
+
+ --Add the block to a existing or new chain
+ --Returns placed blocks, list of resulting chains
+ --and fused edges
+ findChain :: BlockId
+ -> ([BlockId],LabelMap BlockChain, Set.Set (BlockId, BlockId))
+ findChain block
+ -- B) place block at end of existing chain if
+ -- there is no better block to append.
+ | (pred:_) <- preds
+ , alreadyPlaced pred
+ , Just predChain <- mapLookup pred chains
+ , (best:_) <- filter (not . alreadyPlaced) $ getSuccs pred
+ , best == lbl
+ = --pprTrace "B.2)" (ppr (pred,lbl)) $
+ let newChain = chainSnoc predChain block
+ chainMap = mapInsert lbl newChain $ mapDelete pred chains
+ in ( [lbl]
+ , chainMap
+ , Set.singleton (pred,lbl) )
+
+ | otherwise
+ = --pprTrace "single" (ppr lbl)
+ ( [lbl]
+ , mapInsert lbl (chainSingleton lbl) chains
+ , Set.empty)
+ where
+ alreadyPlaced blkId = (setMember blkId placed)
+ lbl = block
+ getSuccs = map fst . getSuccEdgesSorted succWeights
+ preds = map fst $ getSuccEdgesSorted predWeights lbl
+ --For efficiency we also create the map to look up predecessors here
+ predWeights = reverseEdges succWeights
+
+
+
+-- We make the CFG a Hoopl Graph, so we can reuse revPostOrder.
+newtype BlockNode e x = BN (BlockId,[BlockId])
+instance NonLocal (BlockNode) where
+ entryLabel (BN (lbl,_)) = lbl
+ successors (BN (_,succs)) = succs
+
+fromNode :: BlockNode C C -> BlockId
+fromNode (BN x) = fst x
+
+sequenceChain :: forall a i. (Instruction i, Outputable i) => LabelMap a -> CFG
+ -> [GenBasicBlock i] -> [GenBasicBlock i]
+sequenceChain _info _weights [] = []
+sequenceChain _info _weights [x] = [x]
+sequenceChain info weights' blocks@((BasicBlock entry _):_) =
+ --Optimization, delete edges of weight <= 0.
+ --This significantly improves performance whenever
+ --we iterate over all edges, which is a few times!
+ let weights :: CFG
+ weights
+ = filterEdges (\_f _t edgeInfo -> edgeWeight edgeInfo > 0) weights'
+ blockMap :: LabelMap (GenBasicBlock i)
+ blockMap
+ = foldl' (\m blk@(BasicBlock lbl _ins) ->
+ mapInsert lbl blk m)
+ mapEmpty blocks
+
+ toNode :: BlockId -> BlockNode C C
+ toNode bid =
+ -- sorted such that heavier successors come first.
+ BN (bid,map fst . getSuccEdgesSorted weights' $ bid)
+
+ orderedBlocks :: [BlockId]
+ orderedBlocks
+ = map fromNode $
+ revPostorderFrom (fmap (toNode . blockId) blockMap) entry
+
+ (builtChains, builtEdges)
+ = {-# SCC "buildChains" #-}
+ --pprTraceIt "generatedChains" $
+ --pprTrace "orderedBlocks" (ppr orderedBlocks) $
+ buildChains weights orderedBlocks
+
+ rankedEdges :: WeightedEdgeList
+ -- Sort edges descending, remove fused eges
+ rankedEdges =
+ map (\(from, to, weight) -> WeightedEdge from to weight) .
+ filter (\(from, to, _)
+ -> not (Set.member (from,to) builtEdges)) .
+ sortWith (\(_,_,w) -> - w) $ weightedEdgeList weights
+
+ (fusedChains, fusedEdges)
+ = ASSERT(noDups $ mapElems builtChains)
+ {-# SCC "fuseChains" #-}
+ --(pprTrace "RankedEdges" $ ppr rankedEdges) $
+ --pprTraceIt "FusedChains" $
+ fuseChains rankedEdges builtChains
+
+ rankedEdges' =
+ filter (\edge -> not $ Set.member edge fusedEdges) $ rankedEdges
+
+ neighbourChains
+ = ASSERT(noDups $ mapElems fusedChains)
+ {-# SCC "groupNeighbourChains" #-}
+ --pprTraceIt "ResultChains" $
+ combineNeighbourhood rankedEdges' (mapElems fusedChains)
+
+ --Make sure the first block stays first
+ ([entryChain],chains')
+ = ASSERT(noDups $ neighbourChains)
+ partition (chainMember entry) neighbourChains
+ (entryChain':entryRest)
+ | inFront entry entryChain = [entryChain]
+ | (rest,entry) <- breakChainAt entry entryChain
+ = [entry,rest]
+ | otherwise = pprPanic "Entry point eliminated" $
+ ppr ([entryChain],chains')
+
+ prepedChains
+ = entryChain':(entryRest++chains') :: [BlockChain]
+ blockList
+ -- = (concatMap chainToBlocks prepedChains)
+ = (concatMap seqToList $ map chainBlocks prepedChains)
+
+ --chainPlaced = setFromList $ map blockId blockList :: LabelSet
+ chainPlaced = setFromList $ blockList :: LabelSet
+ unplaced =
+ let blocks = mapKeys blockMap
+ isPlaced b = setMember (b) chainPlaced
+ in filter (\block -> not (isPlaced block)) blocks
+
+ placedBlocks =
+ --pprTraceIt "placedBlocks" $
+ blockList ++ unplaced
+ getBlock bid = expectJust "Block placment" $ mapLookup bid blockMap
+ in
+ --Assert we placed all blocks given as input
+ ASSERT(all (\bid -> mapMember bid blockMap) placedBlocks)
+ dropJumps info $ map getBlock placedBlocks
+
+dropJumps :: forall a i. Instruction i => LabelMap a -> [GenBasicBlock i]
+ -> [GenBasicBlock i]
+dropJumps _ [] = []
+dropJumps info ((BasicBlock lbl ins):todo)
+ | not . null $ ins --This can happen because of shortcutting
+ , [dest] <- jumpDestsOfInstr (last ins)
+ , ((BasicBlock nextLbl _) : _) <- todo
+ , not (mapMember dest info)
+ , nextLbl == dest
+ = BasicBlock lbl (init ins) : dropJumps info todo
+ | otherwise
+ = BasicBlock lbl ins : dropJumps info todo
+
+
+-- -----------------------------------------------------------------------------
+-- Sequencing the basic blocks
+
+-- Cmm BasicBlocks are self-contained entities: they always end in a
+-- jump, either non-local or to another basic block in the same proc.
+-- In this phase, we attempt to place the basic blocks in a sequence
+-- such that as many of the local jumps as possible turn into
+-- fallthroughs.
+
+sequenceTop
+ :: (Instruction instr, Outputable instr)
+ => DynFlags --Use new layout code
+ -> NcgImpl statics instr jumpDest -> CFG
+ -> NatCmmDecl statics instr -> NatCmmDecl statics instr
+
+sequenceTop _ _ _ top@(CmmData _ _) = top
+sequenceTop dflags ncgImpl edgeWeights
+ (CmmProc info lbl live (ListGraph blocks))
+ | (gopt Opt_CfgBlocklayout dflags) && backendMaintainsCfg dflags
+ --Use chain based algorithm
+ = CmmProc info lbl live ( ListGraph $ ncgMakeFarBranches ncgImpl info $
+ sequenceChain info edgeWeights blocks )
+ | otherwise
+ --Use old algorithm
+ = CmmProc info lbl live ( ListGraph $ ncgMakeFarBranches ncgImpl info $
+ sequenceBlocks cfg info blocks)
+ where
+ cfg
+ | (gopt Opt_WeightlessBlocklayout dflags) ||
+ (not $ backendMaintainsCfg dflags)
+ -- Don't make use of cfg in the old algorithm
+ = Nothing
+ -- Use cfg in the old algorithm
+ | otherwise = Just edgeWeights
+
+-- The old algorithm:
+-- It is very simple (and stupid): We make a graph out of
+-- the blocks where there is an edge from one block to another iff the
+-- first block ends by jumping to the second. Then we topologically
+-- sort this graph. Then traverse the list: for each block, we first
+-- output the block, then if it has an out edge, we move the
+-- destination of the out edge to the front of the list, and continue.
+
+-- FYI, the classic layout for basic blocks uses postorder DFS; this
+-- algorithm is implemented in Hoopl.
+
+sequenceBlocks :: Instruction inst => Maybe CFG -> LabelMap a
+ -> [GenBasicBlock inst] -> [GenBasicBlock inst]
+sequenceBlocks _edgeWeight _ [] = []
+sequenceBlocks edgeWeights infos (entry:blocks) =
+ let entryNode = mkNode edgeWeights entry
+ bodyNodes = reverse
+ (flattenSCCs (sccBlocks edgeWeights blocks))
+ in dropJumps infos . seqBlocks infos $ ( entryNode : bodyNodes)
+ -- the first block is the entry point ==> it must remain at the start.
+
+sccBlocks
+ :: Instruction instr
+ => Maybe CFG -> [NatBasicBlock instr]
+ -> [SCC (Node BlockId (NatBasicBlock instr))]
+sccBlocks edgeWeights blocks =
+ stronglyConnCompFromEdgedVerticesUniqR
+ (map (mkNode edgeWeights) blocks)
+
+mkNode :: (Instruction t)
+ => Maybe CFG -> GenBasicBlock t
+ -> Node BlockId (GenBasicBlock t)
+mkNode edgeWeights block@(BasicBlock id instrs) =
+ DigraphNode block id outEdges
+ where
+ outEdges :: [BlockId]
+ outEdges
+ --Select the heaviest successor, ignore weights <= zero
+ = successor
+ where
+ successor
+ | Just successors <- fmap (`getSuccEdgesSorted` id)
+ edgeWeights -- :: Maybe [(Label, EdgeInfo)]
+ = case successors of
+ [] -> []
+ ((target,info):_)
+ | length successors > 2 || edgeWeight info <= 0 -> []
+ | otherwise -> [target]
+ | otherwise
+ = case jumpDestsOfInstr (last instrs) of
+ [one] -> [one]
+ _many -> []
+
+
+seqBlocks :: LabelMap i -> [Node BlockId (GenBasicBlock t1)]
+ -> [GenBasicBlock t1]
+seqBlocks infos blocks = placeNext pullable0 todo0
+ where
+ -- pullable: Blocks that are not yet placed
+ -- todo: Original order of blocks, to be followed if we have no good
+ -- reason not to;
+ -- may include blocks that have already been placed, but then
+ -- these are not in pullable
+ pullable0 = listToUFM [ (i,(b,n)) | DigraphNode b i n <- blocks ]
+ todo0 = map node_key blocks
+
+ placeNext _ [] = []
+ placeNext pullable (i:rest)
+ | Just (block, pullable') <- lookupDeleteUFM pullable i
+ = place pullable' rest block
+ | otherwise
+ -- We already placed this block, so ignore
+ = placeNext pullable rest
+
+ place pullable todo (block,[])
+ = block : placeNext pullable todo
+ place pullable todo (block@(BasicBlock id instrs),[next])
+ | mapMember next infos
+ = block : placeNext pullable todo
+ | Just (nextBlock, pullable') <- lookupDeleteUFM pullable next
+ = BasicBlock id instrs : place pullable' todo nextBlock
+ | otherwise
+ = block : placeNext pullable todo
+ place _ _ (_,tooManyNextNodes)
+ = pprPanic "seqBlocks" (ppr tooManyNextNodes)
+
+
+lookupDeleteUFM :: Uniquable key => UniqFM elt -> key
+ -> Maybe (elt, UniqFM elt)
+lookupDeleteUFM m k = do -- Maybe monad
+ v <- lookupUFM m k
+ return (v, delFromUFM m k)
+
+-- -------------------------------------------------------------------
+-- Some specialized data structures to speed things up:
+-- * BlockSequence: A specialized version of Data.Sequence.
+-- Better at indexing at the front/end but lacks ability
+-- to do lookup by position.
+
+type FrontierMap = LabelMap ([BlockId],BlockChain)
+
+-- | A "reverse zipper" of sorts.
+-- We store a list of blocks in two parts, the initial part from left to right
+-- and the remaining part stored in reverse order. This makes it easy to look
+-- the last/first element and append on both sides.
+data BlockSequence
+ = Singleton !BlockId
+ | Pair (OrdList BlockId) (OrdList BlockId)
+ -- ^ For a non empty pair there is at least one element in the left part.
+ | Empty
+
+seqFront :: BlockSequence -> BlockId
+seqFront Empty = panic "Empty sequence"
+seqFront (Singleton bid) = bid
+seqFront (Pair lefts rights) = expectJust "Seq invariant" $
+ listToMaybe (fromOL lefts) <|> listToMaybe (fromOL $ reverseOL rights)
+
+-- seqEnd :: BlockSequence -> BlockId
+-- seqEnd Empty = panic "Empty sequence"
+-- seqEnd (Singleton bid) = bid
+-- seqEnd (Pair lefts rights) = expectJust "Seq invariant" $
+-- listToMaybe (fromOL rights) <|> listToMaybe (fromOL $ reverseOL lefts)
+
+seqToList :: BlockSequence -> [BlockId]
+seqToList Empty = []
+seqToList (Singleton bid) = [bid]
+seqToList (Pair lefts rights) = fromOL $ lefts `appOL` reverseOL rights
+
+
+seqToRList :: BlockSequence -> [BlockId]
+seqToRList Empty = []
+seqToRList (Singleton bid) = [bid]
+seqToRList (Pair lefts rights) = fromOL $ rights `appOL` reverseOL lefts
+
+seqSnoc :: BlockSequence -> BlockId -> BlockSequence
+seqSnoc (Empty) bid = Singleton bid
+seqSnoc (Singleton s) bid= Pair (unitOL s) (unitOL bid)
+seqSnoc (Pair lefts rights) bid = Pair lefts (bid `consOL` rights)
+
+seqConcat :: BlockSequence -> BlockSequence -> BlockSequence
+seqConcat (Empty) x2 = x2
+seqConcat (Singleton b1) (Singleton b2) = Pair (unitOL b1) (unitOL b2)
+seqConcat x1 (Empty) = x1
+seqConcat (Singleton b1) (Pair lefts rights) = Pair (b1 `consOL` lefts) rights
+seqConcat (Pair lefts rights) (Singleton b2) = Pair lefts (b2 `consOL` rights)
+seqConcat (Pair lefts1 rights1) (Pair lefts2 rights2) =
+ Pair (lefts1 `appOL` (reverseOL rights1) `appOL` lefts2) rights2
+
+seqFromBids :: [BlockId] -> BlockSequence
+seqFromBids [] = Empty
+seqFromBids [b1] = Singleton b1
+seqFromBids [b1,b2] = Pair (unitOL b1) (unitOL b2)
+seqFromBids [b1,b2,b3] = Pair (consOL b1 $ unitOL b2) (unitOL b3)
+seqFromBids (b1:b2:b3:bs) = Pair (toOL [b1,b2,b3]) (toOL bs)
diff --git a/compiler/nativeGen/CFG.hs b/compiler/nativeGen/CFG.hs
new file mode 100644
index 0000000000..a52c92f429
--- /dev/null
+++ b/compiler/nativeGen/CFG.hs
@@ -0,0 +1,635 @@
+--
+-- Copyright (c) 2018 Andreas Klebinger
+--
+
+{-# LANGUAGE TypeFamilies, ScopedTypeVariables #-}
+{-# LANGUAGE TupleSections #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving #-}
+{-# LANGUAGE CPP #-}
+
+module CFG
+ ( CFG, CfgEdge(..), EdgeInfo(..), EdgeWeight(..)
+ , TransitionSource(..)
+
+ --Modify the CFG
+ , addWeightEdge, addEdge, delEdge
+ , addNodesBetween, shortcutWeightMap
+ , reverseEdges, filterEdges
+ , addImmediateSuccessor
+ , mkWeightInfo, adjustEdgeWeight
+
+ --Query the CFG
+ , infoEdgeList, edgeList
+ , getSuccessorEdges, getSuccessors
+ , getSuccEdgesSorted, weightedEdgeList
+ , getEdgeInfo
+ , getCfgNodes, hasNode
+
+ --Construction/Misc
+ , getCfg, getCfgProc, pprEdgeWeights, sanityCheckCfg
+
+ --Find backedges and update their weight
+ , optimizeCFG )
+where
+
+#include "HsVersions.h"
+
+import GhcPrelude
+
+import BlockId
+import Cmm ( RawCmmDecl, GenCmmDecl( .. ), CmmBlock, succ, g_entry
+ , CmmGraph )
+import CmmNode
+import CmmUtils
+import CmmSwitch
+import Hoopl.Collections
+import Hoopl.Label
+import Hoopl.Block
+import qualified Hoopl.Graph as G
+
+import Util
+import Digraph
+
+import Outputable
+-- DEBUGGING ONLY
+--import Debug
+--import OrdList
+--import Debug.Trace
+import PprCmm ()
+import qualified DynFlags as D
+
+import Data.List
+
+-- import qualified Data.IntMap.Strict as M --TODO: LabelMap
+
+type Edge = (BlockId, BlockId)
+type Edges = [Edge]
+
+newtype EdgeWeight
+ = EdgeWeight Int
+ deriving (Eq,Ord,Enum,Num,Real,Integral)
+
+instance Outputable EdgeWeight where
+ ppr (EdgeWeight w) = ppr w
+
+type EdgeInfoMap edgeInfo = LabelMap (LabelMap edgeInfo)
+
+-- | A control flow graph where edges have been annotated with a weight.
+type CFG = EdgeInfoMap EdgeInfo
+
+data CfgEdge
+ = CfgEdge
+ { edgeFrom :: !BlockId
+ , edgeTo :: !BlockId
+ , edgeInfo :: !EdgeInfo
+ }
+
+-- | Careful! Since we assume there is at most one edge from A to B
+-- the Eq instance does not consider weight.
+instance Eq CfgEdge where
+ (==) (CfgEdge from1 to1 _) (CfgEdge from2 to2 _)
+ = from1 == from2 && to1 == to2
+
+-- | Edges are sorted ascending pointwise by weight, source and destination
+instance Ord CfgEdge where
+ compare (CfgEdge from1 to1 (EdgeInfo {edgeWeight = weight1}))
+ (CfgEdge from2 to2 (EdgeInfo {edgeWeight = weight2}))
+ | weight1 < weight2 || weight1 == weight2 && from1 < from2 ||
+ weight1 == weight2 && from1 == from2 && to1 < to2
+ = LT
+ | from1 == from2 && to1 == to2 && weight1 == weight2
+ = EQ
+ | otherwise
+ = GT
+
+instance Outputable CfgEdge where
+ ppr (CfgEdge from1 to1 edgeInfo)
+ = parens (ppr from1 <+> text "-(" <> ppr edgeInfo <> text ")->" <+> ppr to1)
+
+-- | Can we trace back a edge to a specific Cmm Node
+-- or has it been introduced for codegen. We use this to maintain
+-- some information which would otherwise be lost during the
+-- Cmm <-> asm transition.
+-- See also Note [Inverting Conditional Branches]
+data TransitionSource
+ = CmmSource (CmmNode O C)
+ | AsmCodeGen
+ deriving (Eq)
+
+-- | Information about edges
+data EdgeInfo
+ = EdgeInfo
+ { transitionSource :: !TransitionSource
+ , edgeWeight :: !EdgeWeight
+ } deriving (Eq)
+
+instance Outputable EdgeInfo where
+ ppr edgeInfo = text "weight:" <+> ppr (edgeWeight edgeInfo)
+
+-- Allow specialization
+{-# INLINEABLE mkWeightInfo #-}
+-- | Convenience function, generate edge info based
+-- on weight not originating from cmm.
+mkWeightInfo :: Integral n => n -> EdgeInfo
+mkWeightInfo = EdgeInfo AsmCodeGen . fromIntegral
+
+-- | Adjust the weight between the blocks using the given function.
+-- If there is no such edge returns the original map.
+adjustEdgeWeight :: CFG -> (EdgeWeight -> EdgeWeight)
+ -> BlockId -> BlockId -> CFG
+adjustEdgeWeight cfg f from to
+ | Just info <- getEdgeInfo from to cfg
+ , weight <- edgeWeight info
+ = addEdge from to (info { edgeWeight = f weight}) cfg
+ | otherwise = cfg
+
+getCfgNodes :: CFG -> LabelSet
+getCfgNodes m = mapFoldMapWithKey (\k v -> setFromList (k:mapKeys v)) m
+
+hasNode :: CFG -> BlockId -> Bool
+hasNode m node = mapMember node m || any (mapMember node) m
+
+-- | Check if the nodes in the cfg and the set of blocks are the same.
+-- In a case of a missmatch we panic and show the difference.
+sanityCheckCfg :: CFG -> LabelSet -> SDoc -> Bool
+sanityCheckCfg m blockSet msg
+ | blockSet == cfgNodes
+ = True
+ | otherwise =
+ pprPanic "Block list and cfg nodes don't match" (
+ text "difference:" <+> ppr diff $$
+ text "blocks:" <+> ppr blockSet $$
+ text "cfg:" <+> ppr m $$
+ msg )
+ False
+ where
+ cfgNodes = getCfgNodes m :: LabelSet
+ diff = (setUnion cfgNodes blockSet) `setDifference` (setIntersection cfgNodes blockSet) :: LabelSet
+
+-- | Filter the CFG with a custom function f.
+-- Paramaeters are `f from to edgeInfo`
+filterEdges :: (BlockId -> BlockId -> EdgeInfo -> Bool) -> CFG -> CFG
+filterEdges f cfg =
+ mapMapWithKey filterSources cfg
+ where
+ filterSources from m =
+ mapFilterWithKey (\to w -> f from to w) m
+
+
+{- Note [Updating the CFG during shortcutting]
+
+See Note [What is shortcutting] in the control flow optimization
+code (CmmContFlowOpt.hs) for a slightly more in depth explanation on shortcutting.
+
+In the native backend we shortcut jumps at the assembly level. (AsmCodeGen.hs)
+This means we remove blocks containing only one jump from the code
+and instead redirecting all jumps targeting this block to the deleted
+blocks jump target.
+
+However we want to have an accurate representation of control
+flow in the CFG. So we add/remove edges accordingly to account
+for the eliminated blocks and new edges.
+
+If we shortcut A -> B -> C to A -> C:
+* We delete edges A -> B and B -> C
+* Replacing them with the edge A -> C
+
+We also try to preserve jump weights while doing so.
+
+Note that:
+* The edge B -> C can't have interesting weights since
+ the block B consists of a single unconditional jump without branching.
+* We delete the edge A -> B and add the edge A -> C.
+* The edge A -> B can be one of many edges originating from A so likely
+ has edge weights we want to preserve.
+
+For this reason we simply store the edge info from the original A -> B
+edge and apply this information to the new edge A -> C.
+
+Sometimes we have a scenario where jump target C is not represented by an
+BlockId but an immediate value. I'm only aware of this happening without
+tables next to code currently.
+
+Then we go from A ---> B - -> IMM to A - -> IMM where the dashed arrows
+are not stored in the CFG.
+
+In that case we simply delete the edge A -> B.
+
+In terms of implementation the native backend first builds a mapping
+from blocks suitable for shortcutting to their jump targets.
+Then it redirects all jump instructions to these blocks using the
+built up mapping.
+This function (shortcutWeightMap) takes the same mapping and
+applies the mapping to the CFG in the way layed out above.
+
+-}
+shortcutWeightMap :: CFG -> LabelMap (Maybe BlockId) -> CFG
+shortcutWeightMap cfg cuts =
+ foldl' applyMapping cfg $ mapToList cuts
+ where
+-- takes the tuple (B,C) from the notation in [Updating the CFG during shortcutting]
+ applyMapping :: CFG -> (BlockId,Maybe BlockId) -> CFG
+ --Shortcut immediate
+ applyMapping m (from, Nothing) =
+ mapDelete from .
+ fmap (mapDelete from) $ m
+ --Regular shortcut
+ applyMapping m (from, Just to) =
+ let updatedMap :: CFG
+ updatedMap
+ = fmap (shortcutEdge (from,to)) $
+ (mapDelete from m :: CFG )
+ --Sometimes we can shortcut multiple blocks like so:
+ -- A -> B -> C -> D -> E => A -> E
+ -- so we check for such chains.
+ in case mapLookup to cuts of
+ Nothing -> updatedMap
+ Just dest -> applyMapping updatedMap (to, dest)
+ --Redirect edge from B to C
+ shortcutEdge :: (BlockId, BlockId) -> LabelMap EdgeInfo -> LabelMap EdgeInfo
+ shortcutEdge (from, to) m =
+ case mapLookup from m of
+ Just info -> mapInsert to info $ mapDelete from m
+ Nothing -> m
+
+-- | Sometimes we insert a block which should unconditionally be executed
+-- after a given block. This function updates the CFG for these cases.
+-- So we get A -> B => A -> A' -> B
+-- \ \
+-- -> C => -> C
+--
+addImmediateSuccessor :: BlockId -> BlockId -> CFG -> CFG
+addImmediateSuccessor node follower cfg
+ = updateEdges . addWeightEdge node follower uncondWeight $ cfg
+ where
+ uncondWeight = fromIntegral . D.uncondWeight .
+ D.cfgWeightInfo $ D.unsafeGlobalDynFlags
+ targets = getSuccessorEdges cfg node
+ successors = map fst targets :: [BlockId]
+ updateEdges = addNewSuccs . remOldSuccs
+ remOldSuccs m = foldl' (flip (delEdge node)) m successors
+ addNewSuccs m =
+ foldl' (\m' (t,info) -> addEdge follower t info m') m targets
+
+-- | Adds a new edge, overwrites existing edges if present
+addEdge :: BlockId -> BlockId -> EdgeInfo -> CFG -> CFG
+addEdge from to info cfg =
+ mapAlter addDest from cfg
+ where
+ addDest Nothing = Just $ mapSingleton to info
+ addDest (Just wm) = Just $ mapInsert to info wm
+
+-- | Adds a edge with the given weight to the cfg
+-- If there already existed an edge it is overwritten.
+-- `addWeightEdge from to weight cfg`
+addWeightEdge :: BlockId -> BlockId -> EdgeWeight -> CFG -> CFG
+addWeightEdge from to weight cfg =
+ addEdge from to (mkWeightInfo weight) cfg
+
+delEdge :: BlockId -> BlockId -> CFG -> CFG
+delEdge from to m =
+ mapAlter remDest from m
+ where
+ remDest Nothing = Nothing
+ remDest (Just wm) = Just $ mapDelete to wm
+
+-- | Destinations from bid ordered by weight (descending)
+getSuccEdgesSorted :: CFG -> BlockId -> [(BlockId,EdgeInfo)]
+getSuccEdgesSorted m bid =
+ let destMap = mapFindWithDefault mapEmpty bid m
+ cfgEdges = mapToList destMap
+ sortedEdges = sortWith (negate . edgeWeight . snd) cfgEdges
+ in --pprTrace "getSuccEdgesSorted" (ppr bid <+> text "map:" <+> ppr m)
+ sortedEdges
+
+-- | Get successors of a given node with edge weights.
+getSuccessorEdges :: CFG -> BlockId -> [(BlockId,EdgeInfo)]
+getSuccessorEdges m bid = maybe [] mapToList $ mapLookup bid m
+
+getEdgeInfo :: BlockId -> BlockId -> CFG -> Maybe EdgeInfo
+getEdgeInfo from to m
+ | Just wm <- mapLookup from m
+ , Just info <- mapLookup to wm
+ = Just $! info
+ | otherwise
+ = Nothing
+
+reverseEdges :: CFG -> CFG
+reverseEdges cfg = foldr add mapEmpty flatElems
+ where
+ elems = mapToList $ fmap mapToList cfg :: [(BlockId,[(BlockId,EdgeInfo)])]
+ flatElems =
+ concatMap (\(from,ws) -> map (\(to,info) -> (to,from,info)) ws ) elems
+ add (to,from,info) m = addEdge to from info m
+
+-- | Returns a unordered list of all edges with info
+infoEdgeList :: CFG -> [CfgEdge]
+infoEdgeList m =
+ mapFoldMapWithKey
+ (\from toMap ->
+ map (\(to,info) -> CfgEdge from to info) (mapToList toMap))
+ m
+
+-- | Unordered list of edges with weight as Tuple (from,to,weight)
+weightedEdgeList :: CFG -> [(BlockId,BlockId,EdgeWeight)]
+weightedEdgeList m =
+ mapFoldMapWithKey
+ (\from toMap ->
+ map (\(to,info) ->
+ (from,to, edgeWeight info)) (mapToList toMap))
+ m
+ -- (\(from, tos) -> map (\(to,info) -> (from,to, edgeWeight info)) tos )
+
+-- | Returns a unordered list of all edges without weights
+edgeList :: CFG -> [Edge]
+edgeList m =
+ mapFoldMapWithKey (\from toMap -> fmap (from,) (mapKeys toMap)) m
+
+-- | Get successors of a given node without edge weights.
+getSuccessors :: CFG -> BlockId -> [BlockId]
+getSuccessors m bid
+ | Just wm <- mapLookup bid m
+ = mapKeys wm
+ | otherwise = []
+
+pprEdgeWeights :: CFG -> SDoc
+pprEdgeWeights m =
+ let edges = sort $ weightedEdgeList m
+ printEdge (from, to, weight)
+ = text "\t" <> ppr from <+> text "->" <+> ppr to <>
+ text "[label=\"" <> ppr weight <> text "\",weight=\"" <>
+ ppr weight <> text "\"];\n"
+ --for the case that there are no edges from/to this node.
+ --This should rarely happen but it can save a lot of time
+ --to immediatly see it when it does.
+ printNode node
+ = text "\t" <> ppr node <> text ";\n"
+ getEdgeNodes (from, to, _weight) = [from,to]
+ edgeNodes = setFromList $ concatMap getEdgeNodes edges :: LabelSet
+ nodes = filter (\n -> (not . setMember n) edgeNodes) . mapKeys $ mapFilter null m
+ in
+ text "digraph {\n" <>
+ (foldl' (<>) empty (map printEdge edges)) <>
+ (foldl' (<>) empty (map printNode nodes)) <>
+ text "}\n"
+
+{-# INLINE updateEdgeWeight #-} --Allows eliminating the tuple when possible
+updateEdgeWeight :: (EdgeWeight -> EdgeWeight) -> Edge -> CFG -> CFG
+updateEdgeWeight f (from, to) cfg
+ | Just oldInfo <- getEdgeInfo from to cfg
+ = let oldWeight = edgeWeight oldInfo
+ newWeight = f oldWeight
+ in addEdge from to (oldInfo {edgeWeight = newWeight}) cfg
+ | otherwise
+ = panic "Trying to update invalid edge"
+
+-- from to oldWeight => newWeight
+mapWeights :: (BlockId -> BlockId -> EdgeWeight -> EdgeWeight) -> CFG -> CFG
+mapWeights f cfg =
+ foldl' (\cfg (CfgEdge from to info) ->
+ let oldWeight = edgeWeight info
+ newWeight = f from to oldWeight
+ in addEdge from to (info {edgeWeight = newWeight}) cfg)
+ cfg (infoEdgeList cfg)
+
+
+-- | Insert a block in the control flow between two other blocks.
+-- We pass a list of tuples (A,B,C) where
+-- * A -> C: Old edge
+-- * A -> B -> C : New Arc, where B is the new block.
+-- It's possible that a block has two jumps to the same block
+-- in the assembly code. However we still only store a single edge for
+-- these cases.
+-- We assign the old edge info to the edge A -> B and assign B -> C the
+-- weight of an unconditional jump.
+addNodesBetween :: CFG -> [(BlockId,BlockId,BlockId)] -> CFG
+addNodesBetween m updates =
+ foldl' updateWeight m .
+ weightUpdates $ updates
+ where
+ weight = fromIntegral . D.uncondWeight .
+ D.cfgWeightInfo $ D.unsafeGlobalDynFlags
+ -- We might add two blocks for different jumps along a single
+ -- edge. So we end up with edges: A -> B -> C , A -> D -> C
+ -- in this case after applying the first update the weight for A -> C
+ -- is no longer available. So we calculate future weights before updates.
+ weightUpdates = map getWeight
+ getWeight :: (BlockId,BlockId,BlockId) -> (BlockId,BlockId,BlockId,EdgeInfo)
+ getWeight (from,between,old)
+ | Just edgeInfo <- getEdgeInfo from old m
+ = (from,between,old,edgeInfo)
+ | otherwise
+ = pprPanic "Can't find weight for edge that should have one" (
+ text "triple" <+> ppr (from,between,old) $$
+ text "updates" <+> ppr updates )
+ updateWeight :: CFG -> (BlockId,BlockId,BlockId,EdgeInfo) -> CFG
+ updateWeight m (from,between,old,edgeInfo)
+ = addEdge from between edgeInfo .
+ addWeightEdge between old weight .
+ delEdge from old $ m
+
+{-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ~~~ Note [CFG Edge Weights] ~~~
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Edge weights assigned do not currently represent a specific
+ cost model and rather just a ranking of which blocks should
+ be placed next to each other given their connection type in
+ the CFG.
+ This is especially relevant if we whenever two blocks will
+ jump to the same target.
+
+ A B
+ \ /
+ C
+
+ Should A or B be placed in front of C? The block layout algorithm
+ decides this based on which edge (A,C)/(B,C) is heavier. So we
+ make a educated guess how often execution will transer control
+ along each edge as well as how much we gain by placing eg A before
+ C.
+
+ We rank edges in this order:
+ * Unconditional Control Transfer - They will always
+ transfer control to their target. Unless there is a info table
+ we can turn the jump into a fallthrough as well.
+ We use 20k as default, so it's easy to spot if values have been
+ modified but unlikely that we run into issues with overflow.
+ * If branches (likely) - We assume branches marked as likely
+ are taken more than 80% of the time.
+ By ranking them below unconditional jumps we make sure we
+ prefer the unconditional if there is a conditional and
+ unconditional edge towards a block.
+ * If branches (regular) - The false branch can potentially be turned
+ into a fallthrough so we prefer it slightly over the true branch.
+ * Unlikely branches - These can be assumed to be taken less than 20%
+ of the time. So we given them one of the lowest priorities.
+ * Switches - Switches at this level are implemented as jump tables
+ so have a larger number of successors. So without more information
+ we can only say that each individual successor is unlikely to be
+ jumped to and we rank them accordingly.
+ * Calls - We currently ignore calls completly:
+ * By the time we return from a call there is a good chance
+ that the address we return to has already been evicted from
+ cache eliminating a main advantage sequential placement brings.
+ * Calls always require a info table in front of their return
+ address. This reduces the chance that we return to the same
+ cache line further.
+
+
+-}
+-- | Generate weights for a Cmm proc based on some simple heuristics.
+getCfgProc :: D.CfgWeights -> RawCmmDecl -> CFG
+getCfgProc _ (CmmData {}) = mapEmpty
+getCfgProc weights (CmmProc _info _lab _live graph) =
+ getCfg weights graph
+
+
+getCfg :: D.CfgWeights -> CmmGraph -> CFG
+getCfg weights graph =
+ foldl' insertEdge edgelessCfg $ concatMap getBlockEdges blocks
+ where
+ D.CFGWeights
+ { D.uncondWeight = uncondWeight
+ , D.condBranchWeight = condBranchWeight
+ , D.switchWeight = switchWeight
+ , D.callWeight = callWeight
+ , D.likelyCondWeight = likelyCondWeight
+ , D.unlikelyCondWeight = unlikelyCondWeight
+ -- Last two are used in other places
+ --, D.infoTablePenalty = infoTablePenalty
+ --, D.backEdgeBonus = backEdgeBonus
+ } = weights
+ -- Explicitly add all nodes to the cfg to ensure they are part of the
+ -- CFG.
+ edgelessCfg = mapFromList $ zip (map G.entryLabel blocks) (repeat mapEmpty)
+ insertEdge :: CFG -> ((BlockId,BlockId),EdgeInfo) -> CFG
+ insertEdge m ((from,to),weight) =
+ mapAlter f from m
+ where
+ f :: Maybe (LabelMap EdgeInfo) -> Maybe (LabelMap EdgeInfo)
+ f Nothing = Just $ mapSingleton to weight
+ f (Just destMap) = Just $ mapInsert to weight destMap
+ getBlockEdges :: CmmBlock -> [((BlockId,BlockId),EdgeInfo)]
+ getBlockEdges block =
+ case branch of
+ CmmBranch dest -> [mkEdge dest uncondWeight]
+ CmmCondBranch _c t f l
+ | l == Nothing ->
+ [mkEdge f condBranchWeight, mkEdge t condBranchWeight]
+ | l == Just True ->
+ [mkEdge f unlikelyCondWeight, mkEdge t likelyCondWeight]
+ | l == Just False ->
+ [mkEdge f likelyCondWeight, mkEdge t unlikelyCondWeight]
+ (CmmSwitch _e ids) ->
+ let switchTargets = switchTargetsToList ids
+ --Compiler performance hack - for very wide switches don't
+ --consider targets for layout.
+ adjustedWeight =
+ if (length switchTargets > 10) then -1 else switchWeight
+ in map (\x -> mkEdge x adjustedWeight) switchTargets
+ (CmmCall { cml_cont = Just cont}) -> [mkEdge cont callWeight]
+ (CmmForeignCall {Cmm.succ = cont}) -> [mkEdge cont callWeight]
+ (CmmCall { cml_cont = Nothing }) -> []
+ other ->
+ panic "Foo" $
+ ASSERT2(False, ppr "Unkown successor cause:" <>
+ (ppr branch <+> text "=>" <> ppr (G.successors other)))
+ map (\x -> ((bid,x),mkEdgeInfo 0)) $ G.successors other
+ where
+ bid = G.entryLabel block
+ mkEdgeInfo = EdgeInfo (CmmSource branch) . fromIntegral
+ mkEdge target weight = ((bid,target), mkEdgeInfo weight)
+ branch = lastNode block :: CmmNode O C
+
+ blocks = revPostorder graph :: [CmmBlock]
+
+--Find back edges by BFS
+findBackEdges :: BlockId -> CFG -> Edges
+findBackEdges root cfg =
+ --pprTraceIt "Backedges:" $
+ map fst .
+ filter (\x -> snd x == Backward) $ typedEdges
+ where
+ edges = edgeList cfg :: [(BlockId,BlockId)]
+ getSuccs = getSuccessors cfg :: BlockId -> [BlockId]
+ typedEdges =
+ classifyEdges root getSuccs edges :: [((BlockId,BlockId),EdgeType)]
+
+
+optimizeCFG :: D.CfgWeights -> RawCmmDecl -> CFG -> CFG
+optimizeCFG _ (CmmData {}) cfg = cfg
+optimizeCFG weights (CmmProc info _lab _live graph) cfg =
+ favourFewerPreds .
+ penalizeInfoTables info .
+ increaseBackEdgeWeight (g_entry graph) $ cfg
+ where
+
+ -- | Increase the weight of all backedges in the CFG
+ -- this helps to make loop jumpbacks the heaviest edges
+ increaseBackEdgeWeight :: BlockId -> CFG -> CFG
+ increaseBackEdgeWeight root cfg =
+ let backedges = findBackEdges root cfg
+ update weight
+ --Keep irrelevant edges irrelevant
+ | weight <= 0 = 0
+ | otherwise
+ = weight + fromIntegral (D.backEdgeBonus weights)
+ in foldl' (\cfg edge -> updateEdgeWeight update edge cfg)
+ cfg backedges
+
+ -- | Since we cant fall through info tables we penalize these.
+ penalizeInfoTables :: LabelMap a -> CFG -> CFG
+ penalizeInfoTables info cfg =
+ mapWeights fupdate cfg
+ where
+ fupdate :: BlockId -> BlockId -> EdgeWeight -> EdgeWeight
+ fupdate _ to weight
+ | mapMember to info
+ = weight - (fromIntegral $ D.infoTablePenalty weights)
+ | otherwise = weight
+
+
+{- Note [Optimize for Fallthrough]
+
+-}
+ -- | If a block has two successors, favour the one with fewer
+ -- predecessors. (As that one is more likely to become a fallthrough)
+ favourFewerPreds :: CFG -> CFG
+ favourFewerPreds cfg =
+ let
+ revCfg =
+ reverseEdges $ filterEdges
+ (\_from -> fallthroughTarget) cfg
+
+ predCount n = length $ getSuccessorEdges revCfg n
+ nodes = getCfgNodes cfg
+
+ modifiers :: Int -> Int -> (EdgeWeight, EdgeWeight)
+ modifiers preds1 preds2
+ | preds1 < preds2 = ( 1,-1)
+ | preds1 == preds2 = ( 0, 0)
+ | otherwise = (-1, 1)
+
+ update cfg node
+ | [(s1,e1),(s2,e2)] <- getSuccessorEdges cfg node
+ , w1 <- edgeWeight e1
+ , w2 <- edgeWeight e2
+ --Only change the weights if there isn't already a ordering.
+ , w1 == w2
+ , (mod1,mod2) <- modifiers (predCount s1) (predCount s2)
+ = (\cfg' ->
+ (adjustEdgeWeight cfg' (+mod2) node s2))
+ (adjustEdgeWeight cfg (+mod1) node s1)
+ | otherwise
+ = cfg
+ in setFoldl update cfg nodes
+ where
+ fallthroughTarget :: BlockId -> EdgeInfo -> Bool
+ fallthroughTarget to (EdgeInfo source _weight)
+ | mapMember to info = False
+ | AsmCodeGen <- source = True
+ | CmmSource (CmmBranch {}) <- source = True
+ | CmmSource (CmmCondBranch {}) <- source = True
+ | otherwise = False
diff --git a/compiler/nativeGen/NCGMonad.hs b/compiler/nativeGen/NCGMonad.hs
index b9532e17b5..c22a656d2a 100644
--- a/compiler/nativeGen/NCGMonad.hs
+++ b/compiler/nativeGen/NCGMonad.hs
@@ -9,11 +9,15 @@
-- -----------------------------------------------------------------------------
module NCGMonad (
+ NcgImpl(..),
NatM_State(..), mkNatM_State,
NatM, -- instance Monad
initNat,
addImportNat,
+ addNodeBetweenNat,
+ addImmediateSuccessorNat,
+ updateCfgNat,
getUniqueNat,
mapAccumLNat,
setDeltaNat,
@@ -57,6 +61,39 @@ import Module
import Control.Monad ( liftM, ap )
+import Instruction
+import Outputable (SDoc, pprPanic, ppr)
+import Cmm (RawCmmDecl, CmmStatics)
+import CFG
+
+data NcgImpl statics instr jumpDest = NcgImpl {
+ cmmTopCodeGen :: RawCmmDecl -> NatM [NatCmmDecl statics instr],
+ generateJumpTableForInstr :: instr -> Maybe (NatCmmDecl statics instr),
+ getJumpDestBlockId :: jumpDest -> Maybe BlockId,
+ canShortcut :: instr -> Maybe jumpDest,
+ shortcutStatics :: (BlockId -> Maybe jumpDest) -> statics -> statics,
+ shortcutJump :: (BlockId -> Maybe jumpDest) -> instr -> instr,
+ pprNatCmmDecl :: NatCmmDecl statics instr -> SDoc,
+ maxSpillSlots :: Int,
+ allocatableRegs :: [RealReg],
+ ncg_x86fp_kludge :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
+ ncgExpandTop :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
+ ncgAllocMoreStack :: Int -> NatCmmDecl statics instr
+ -> UniqSM (NatCmmDecl statics instr, [(BlockId,BlockId)]),
+ -- ^ The list of block ids records the redirected jumps to allow us to update
+ -- the CFG.
+ ncgMakeFarBranches :: LabelMap CmmStatics -> [NatBasicBlock instr] -> [NatBasicBlock instr],
+ extractUnwindPoints :: [instr] -> [UnwindPoint],
+ -- ^ given the instruction sequence of a block, produce a list of
+ -- the block's 'UnwindPoint's
+ -- See Note [What is this unwinding business?] in Debug
+ -- and Note [Unwinding information in the NCG] in this module.
+ invertCondBranches :: CFG -> LabelMap CmmStatics -> [NatBasicBlock instr]
+ -> [NatBasicBlock instr]
+ -- ^ Turn the sequence of `jcc l1; jmp l2` into `jncc l2; <block_l1>`
+ -- when possible.
+ }
+
data NatM_State
= NatM_State {
natm_us :: UniqSupply,
@@ -67,7 +104,11 @@ data NatM_State
natm_this_module :: Module,
natm_modloc :: ModLocation,
natm_fileid :: DwarfFiles,
- natm_debug_map :: LabelMap DebugBlock
+ natm_debug_map :: LabelMap DebugBlock,
+ natm_cfg :: CFG
+ -- ^ Having a CFG with additional information is essential for some
+ -- operations. However we can't reconstruct all information once we
+ -- generated instructions. So instead we update the CFG as we go.
}
type DwarfFiles = UniqFM (FastString, Int)
@@ -78,9 +119,21 @@ unNat :: NatM a -> NatM_State -> (a, NatM_State)
unNat (NatM a) = a
mkNatM_State :: UniqSupply -> Int -> DynFlags -> Module -> ModLocation ->
- DwarfFiles -> LabelMap DebugBlock -> NatM_State
+ DwarfFiles -> LabelMap DebugBlock -> CFG -> NatM_State
mkNatM_State us delta dflags this_mod
- = NatM_State us delta [] Nothing dflags this_mod
+ = \loc dwf dbg cfg ->
+ NatM_State
+ { natm_us = us
+ , natm_delta = delta
+ , natm_imports = []
+ , natm_pic = Nothing
+ , natm_dflags = dflags
+ , natm_this_module = this_mod
+ , natm_modloc = loc
+ , natm_fileid = dwf
+ , natm_debug_map = dbg
+ , natm_cfg = cfg
+ }
initNat :: NatM_State -> NatM a -> (a, NatM_State)
initNat init_st m
@@ -151,6 +204,36 @@ addImportNat :: CLabel -> NatM ()
addImportNat imp
= NatM $ \ st -> ((), st {natm_imports = imp : natm_imports st})
+updateCfgNat :: (CFG -> CFG) -> NatM ()
+updateCfgNat f
+ = NatM $ \ st -> ((), st { natm_cfg = f (natm_cfg st) })
+
+-- | Record that we added a block between `from` and `old`.
+addNodeBetweenNat :: BlockId -> BlockId -> BlockId -> NatM ()
+addNodeBetweenNat from between to
+ = do df <- getDynFlags
+ let jmpWeight = fromIntegral . uncondWeight .
+ cfgWeightInfo $ df
+ updateCfgNat (updateCfg jmpWeight from between to)
+ where
+ -- When transforming A -> B to A -> A' -> B
+ -- A -> A' keeps the old edge info while
+ -- A' -> B gets the info for an unconditional
+ -- jump.
+ updateCfg weight from between old m
+ | Just info <- getEdgeInfo from old m
+ = addEdge from between info .
+ addWeightEdge between old weight .
+ delEdge from old $ m
+ | otherwise
+ = pprPanic "Faild to update cfg: Untracked edge" (ppr (from,to))
+
+
+-- | Place `succ` after `block` and change any edges
+-- block -> X to `succ` -> X
+addImmediateSuccessorNat :: BlockId -> BlockId -> NatM ()
+addImmediateSuccessorNat block succ
+ = updateCfgNat (addImmediateSuccessor block succ)
getBlockIdNat :: NatM BlockId
getBlockIdNat
diff --git a/compiler/nativeGen/PPC/CodeGen.hs b/compiler/nativeGen/PPC/CodeGen.hs
index f246ec36f1..360b102654 100644
--- a/compiler/nativeGen/PPC/CodeGen.hs
+++ b/compiler/nativeGen/PPC/CodeGen.hs
@@ -32,7 +32,9 @@ import PPC.Instr
import PPC.Cond
import PPC.Regs
import CPrim
-import NCGMonad
+import NCGMonad ( NatM, getNewRegNat, getNewLabelNat
+ , getBlockIdNat, getPicBaseNat, getNewRegPairNat
+ , getPicBaseMaybeNat )
import Instruction
import PIC
import Format
diff --git a/compiler/nativeGen/PPC/Instr.hs b/compiler/nativeGen/PPC/Instr.hs
index 8eb5e8fa8d..ade39430c0 100644
--- a/compiler/nativeGen/PPC/Instr.hs
+++ b/compiler/nativeGen/PPC/Instr.hs
@@ -100,9 +100,9 @@ allocMoreStack
:: Platform
-> Int
-> NatCmmDecl statics PPC.Instr.Instr
- -> UniqSM (NatCmmDecl statics PPC.Instr.Instr)
+ -> UniqSM (NatCmmDecl statics PPC.Instr.Instr, [(BlockId,BlockId)])
-allocMoreStack _ _ top@(CmmData _ _) = return top
+allocMoreStack _ _ top@(CmmData _ _) = return (top,[])
allocMoreStack platform slots (CmmProc info lbl live (ListGraph code)) = do
let
infos = mapKeys info
@@ -121,8 +121,10 @@ allocMoreStack platform slots (CmmProc info lbl live (ListGraph code)) = do
alloc = mkStackAllocInstr platform delta
dealloc = mkStackDeallocInstr platform delta
+ retargetList = (zip entries (map mkBlockId uniqs))
+
new_blockmap :: LabelMap BlockId
- new_blockmap = mapFromList (zip entries (map mkBlockId uniqs))
+ new_blockmap = mapFromList retargetList
insert_stack_insns (BasicBlock id insns)
| Just new_blockid <- mapLookup id new_blockmap
@@ -156,7 +158,7 @@ allocMoreStack platform slots (CmmProc info lbl live (ListGraph code)) = do
= concatMap insert_stack_insns code
-- in
- return (CmmProc info lbl live (ListGraph new_code))
+ return (CmmProc info lbl live (ListGraph new_code),retargetList)
-- -----------------------------------------------------------------------------
diff --git a/compiler/nativeGen/PPC/RegInfo.hs b/compiler/nativeGen/PPC/RegInfo.hs
index 30a07b9440..c369cac179 100644
--- a/compiler/nativeGen/PPC/RegInfo.hs
+++ b/compiler/nativeGen/PPC/RegInfo.hs
@@ -29,9 +29,14 @@ import Cmm
import CLabel
import Unique
+import Outputable (ppr, text, Outputable, (<>))
data JumpDest = DestBlockId BlockId
+-- Debug Instance
+instance Outputable JumpDest where
+ ppr (DestBlockId bid) = text "jd<blk>:" <> ppr bid
+
getJumpDestBlockId :: JumpDest -> Maybe BlockId
getJumpDestBlockId (DestBlockId bid) = Just bid
diff --git a/compiler/nativeGen/RegAlloc/Linear/Base.hs b/compiler/nativeGen/RegAlloc/Linear/Base.hs
index 1172870729..ad0fafb3ed 100644
--- a/compiler/nativeGen/RegAlloc/Linear/Base.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/Base.hs
@@ -28,6 +28,7 @@ import Outputable
import Unique
import UniqFM
import UniqSupply
+import BlockId
-- | Used to store the register assignment on entry to a basic block.
@@ -98,7 +99,10 @@ data SpillReason
-- | Used to carry interesting stats out of the register allocator.
data RegAllocStats
= RegAllocStats
- { ra_spillInstrs :: UniqFM [Int] }
+ { ra_spillInstrs :: UniqFM [Int]
+ , ra_fixupList :: [(BlockId,BlockId,BlockId)]
+ -- ^ (from,fixup,to) : We inserted fixup code between from and to
+ }
-- | The register allocator state
@@ -129,6 +133,9 @@ data RA_State freeRegs
-- Just keep a list here instead of a map of regs -> reasons.
-- We don't want to slow down the allocator if we're not going to emit the stats.
, ra_spills :: [SpillReason]
- , ra_DynFlags :: DynFlags }
+ , ra_DynFlags :: DynFlags
+
+ -- | (from,fixup,to) : We inserted fixup code between from and to
+ , ra_fixups :: [(BlockId,BlockId,BlockId)] }
diff --git a/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs b/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
index 89f496c409..fb002e2fe0 100644
--- a/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
@@ -33,7 +33,7 @@ import Data.Foldable (foldl')
-- vregs are in the correct regs for its destination.
--
joinToTargets
- :: (FR freeRegs, Instruction instr)
+ :: (FR freeRegs, Instruction instr, Outputable instr)
=> BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
-- that are known to be live on the entry to each block.
@@ -57,7 +57,7 @@ joinToTargets block_live id instr
-----
joinToTargets'
- :: (FR freeRegs, Instruction instr)
+ :: (FR freeRegs, Instruction instr, Outputable instr)
=> BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
-- that are known to be live on the entry to each block.
@@ -111,7 +111,7 @@ joinToTargets' block_live new_blocks block_id instr (dest:dests)
-- this is the first time we jumped to this block.
-joinToTargets_first :: (FR freeRegs, Instruction instr)
+joinToTargets_first :: (FR freeRegs, Instruction instr, Outputable instr)
=> BlockMap RegSet
-> [NatBasicBlock instr]
-> BlockId
@@ -140,7 +140,7 @@ joinToTargets_first block_live new_blocks block_id instr dest dests
-- we've jumped to this block before
-joinToTargets_again :: (Instruction instr, FR freeRegs)
+joinToTargets_again :: (Instruction instr, FR freeRegs, Outputable instr)
=> BlockMap RegSet
-> [NatBasicBlock instr]
-> BlockId
@@ -181,7 +181,8 @@ joinToTargets_again
--
let sccs = stronglyConnCompFromEdgedVerticesOrdR graph
-{- -- debugging
+ -- debugging
+ {-
pprTrace
("joinToTargets: making fixup code")
(vcat [ text " in block: " <> ppr block_id
@@ -192,7 +193,7 @@ joinToTargets_again
, text " sccs of graph: " <> ppr sccs
, text ""])
(return ())
--}
+ -}
delta <- getDeltaR
fixUpInstrs_ <- mapM (handleComponent delta instr) sccs
let fixUpInstrs = concat fixUpInstrs_
@@ -200,16 +201,10 @@ joinToTargets_again
-- make a new basic block containing the fixup code.
-- A the end of the current block we will jump to the fixup one,
-- then that will jump to our original destination.
- fixup_block_id <- getUniqueR
- let block = BasicBlock (mkBlockId fixup_block_id)
+ fixup_block_id <- mkBlockId <$> getUniqueR
+ let block = BasicBlock fixup_block_id
$ fixUpInstrs ++ mkJumpInstr dest
-{- pprTrace
- ("joinToTargets: fixup code is:")
- (vcat [ ppr block
- , text ""])
- (return ())
--}
-- if we didn't need any fixups, then don't include the block
case fixUpInstrs of
[] -> joinToTargets' block_live new_blocks block_id instr dests
@@ -217,11 +212,25 @@ joinToTargets_again
-- patch the original branch instruction so it goes to our
-- fixup block instead.
_ -> let instr' = patchJumpInstr instr
- (\bid -> if bid == dest
- then mkBlockId fixup_block_id
- else bid) -- no change!
-
- in joinToTargets' block_live (block : new_blocks) block_id instr' dests
+ (\bid -> if bid == dest
+ then fixup_block_id
+ else bid) -- no change!
+
+ in do
+ {- --debugging
+ pprTrace "FixUpEdge info:"
+ (
+ text "inBlock:" <> ppr block_id $$
+ text "instr:" <> ppr instr $$
+ text "instr':" <> ppr instr' $$
+ text "fixup_block_id':" <>
+ ppr fixup_block_id $$
+ text "dest:" <> ppr dest
+ ) (return ())
+ -}
+ recordFixupBlock block_id fixup_block_id dest
+ joinToTargets' block_live (block : new_blocks)
+ block_id instr' dests
-- | Construct a graph of register\/spill movements.
diff --git a/compiler/nativeGen/RegAlloc/Linear/Main.hs b/compiler/nativeGen/RegAlloc/Linear/Main.hs
index 6171d8d20d..bcac084c5f 100644
--- a/compiler/nativeGen/RegAlloc/Linear/Main.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/Main.hs
@@ -147,7 +147,8 @@ regAlloc
-> UniqSM ( NatCmmDecl statics instr
, Maybe Int -- number of extra stack slots required,
-- beyond maxSpillSlots
- , Maybe RegAllocStats)
+ , Maybe RegAllocStats
+ )
regAlloc _ (CmmData sec d)
= return
@@ -523,6 +524,11 @@ genRaInsn block_live new_instrs block_id instr r_dying w_dying = do
(fixup_blocks, adjusted_instr)
<- joinToTargets block_live block_id instr
+ -- Debugging - show places where the reg alloc inserted
+ -- assignment fixup blocks.
+ -- when (not $ null fixup_blocks) $
+ -- pprTrace "fixup_blocks" (ppr fixup_blocks) (return ())
+
-- (e) Delete all register assignments for temps which are read
-- (only) and die here. Update the free register list.
releaseRegs r_dying
diff --git a/compiler/nativeGen/RegAlloc/Linear/State.hs b/compiler/nativeGen/RegAlloc/Linear/State.hs
index 6554188f41..34637b04c8 100644
--- a/compiler/nativeGen/RegAlloc/Linear/State.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/State.hs
@@ -27,7 +27,8 @@ module RegAlloc.Linear.State (
getUniqueR,
- recordSpill
+ recordSpill,
+ recordFixupBlock
)
where
@@ -39,6 +40,7 @@ import RegAlloc.Linear.Base
import RegAlloc.Liveness
import Instruction
import Reg
+import BlockId
import DynFlags
import Unique
@@ -84,7 +86,8 @@ runR dflags block_assig freeregs assig stack us thing =
, ra_stack = stack
, ra_us = us
, ra_spills = []
- , ra_DynFlags = dflags })
+ , ra_DynFlags = dflags
+ , ra_fixups = [] })
of
(# state'@RA_State
{ ra_blockassig = block_assig
@@ -98,7 +101,8 @@ runR dflags block_assig freeregs assig stack us thing =
makeRAStats :: RA_State freeRegs -> RegAllocStats
makeRAStats state
= RegAllocStats
- { ra_spillInstrs = binSpillReasons (ra_spills state) }
+ { ra_spillInstrs = binSpillReasons (ra_spills state)
+ , ra_fixupList = ra_fixups state }
spillR :: Instruction instr
@@ -161,3 +165,7 @@ recordSpill :: SpillReason -> RegM freeRegs ()
recordSpill spill
= RegM $ \s -> (# s { ra_spills = spill : ra_spills s}, () #)
+-- | Record a created fixup block
+recordFixupBlock :: BlockId -> BlockId -> BlockId -> RegM freeRegs ()
+recordFixupBlock from between to
+ = RegM $ \s -> (# s { ra_fixups = (from,between,to) : ra_fixups s}, () #)
diff --git a/compiler/nativeGen/RegAlloc/Liveness.hs b/compiler/nativeGen/RegAlloc/Liveness.hs
index 9d93564317..b7f8d1c871 100644
--- a/compiler/nativeGen/RegAlloc/Liveness.hs
+++ b/compiler/nativeGen/RegAlloc/Liveness.hs
@@ -41,6 +41,7 @@ import Reg
import Instruction
import BlockId
+import CFG
import Hoopl.Collections
import Hoopl.Label
import Cmm hiding (RegSet, emptyRegSet)
@@ -646,28 +647,35 @@ patchRegsLiveInstr patchF li
-- | Convert a NatCmmDecl to a LiveCmmDecl, with empty liveness information
natCmmTopToLive
- :: Instruction instr
- => NatCmmDecl statics instr
+ :: (Instruction instr, Outputable instr)
+ => Maybe CFG -> NatCmmDecl statics instr
-> LiveCmmDecl statics instr
-natCmmTopToLive (CmmData i d)
+natCmmTopToLive _ (CmmData i d)
= CmmData i d
-natCmmTopToLive (CmmProc info lbl live (ListGraph []))
+natCmmTopToLive _ (CmmProc info lbl live (ListGraph []))
= CmmProc (LiveInfo info [] Nothing mapEmpty) lbl live []
-natCmmTopToLive proc@(CmmProc info lbl live (ListGraph blocks@(first : _)))
- = let first_id = blockId first
+natCmmTopToLive mCfg proc@(CmmProc info lbl live (ListGraph blocks@(first : _)))
+ = CmmProc (LiveInfo info' (first_id : entry_ids) Nothing mapEmpty)
+ lbl live sccsLive
+ where
+ first_id = blockId first
all_entry_ids = entryBlocks proc
- sccs = sccBlocks blocks all_entry_ids
- entry_ids = filter (/= first_id) all_entry_ids
+ sccs = sccBlocks blocks all_entry_ids mCfg
sccsLive = map (fmap (\(BasicBlock l instrs) ->
- BasicBlock l (map (\i -> LiveInstr (Instr i) Nothing) instrs)))
+ BasicBlock l (map (\i -> LiveInstr (Instr i) Nothing) instrs)))
$ sccs
- in CmmProc (LiveInfo info (first_id : entry_ids) Nothing mapEmpty)
- lbl live sccsLive
-
+ entry_ids = filter (reachable_node) .
+ filter (/= first_id) $ all_entry_ids
+ info' = mapFilterWithKey (\node _ -> reachable_node node) info
+ reachable_node
+ | Just cfg <- mCfg
+ = hasNode cfg
+ | otherwise
+ = const True
--
-- Compute the liveness graph of the set of basic blocks. Important:
@@ -683,9 +691,10 @@ sccBlocks
:: forall instr . Instruction instr
=> [NatBasicBlock instr]
-> [BlockId]
+ -> Maybe CFG
-> [SCC (NatBasicBlock instr)]
-sccBlocks blocks entries = map (fmap node_payload) sccs
+sccBlocks blocks entries mcfg = map (fmap node_payload) sccs
where
nodes :: [ Node BlockId (NatBasicBlock instr) ]
nodes = [ DigraphNode block id (getOutEdges instrs)
@@ -694,7 +703,12 @@ sccBlocks blocks entries = map (fmap node_payload) sccs
g1 = graphFromEdgedVerticesUniq nodes
reachable :: LabelSet
- reachable = setFromList [ node_key node | node <- reachablesG g1 roots ]
+ reachable
+ | Just cfg <- mcfg
+ -- Our CFG only contains reachable nodes by construction.
+ = getCfgNodes cfg
+ | otherwise
+ = setFromList $ [ node_key node | node <- reachablesG g1 roots ]
g2 = graphFromEdgedVerticesUniq [ node | node <- nodes
, node_key node
@@ -714,8 +728,6 @@ sccBlocks blocks entries = map (fmap node_payload) sccs
roots = [DigraphNode (panic "sccBlocks") b (panic "sccBlocks")
| b <- entries ]
-
-
--------------------------------------------------------------------------------
-- Annotate code with register liveness information
--
diff --git a/compiler/nativeGen/SPARC/CodeGen.hs b/compiler/nativeGen/SPARC/CodeGen.hs
index a95a22274b..83402bb126 100644
--- a/compiler/nativeGen/SPARC/CodeGen.hs
+++ b/compiler/nativeGen/SPARC/CodeGen.hs
@@ -39,7 +39,7 @@ import SPARC.Regs
import SPARC.Stack
import Instruction
import Format
-import NCGMonad
+import NCGMonad ( NatM, getNewRegNat, getNewLabelNat )
-- Our intermediate code:
import BlockId
diff --git a/compiler/nativeGen/SPARC/ShortcutJump.hs b/compiler/nativeGen/SPARC/ShortcutJump.hs
index 83e366cb04..e2a8a71572 100644
--- a/compiler/nativeGen/SPARC/ShortcutJump.hs
+++ b/compiler/nativeGen/SPARC/ShortcutJump.hs
@@ -18,12 +18,17 @@ import BlockId
import Cmm
import Panic
-
+import Outputable
data JumpDest
= DestBlockId BlockId
| DestImm Imm
+-- Debug Instance
+instance Outputable JumpDest where
+ ppr (DestBlockId bid) = text "blk:" <> ppr bid
+ ppr (DestImm _bid) = text "imm:?"
+
getJumpDestBlockId :: JumpDest -> Maybe BlockId
getJumpDestBlockId (DestBlockId bid) = Just bid
getJumpDestBlockId _ = Nothing
diff --git a/compiler/nativeGen/X86/CodeGen.hs b/compiler/nativeGen/X86/CodeGen.hs
index 38dc76090d..37080b990e 100644
--- a/compiler/nativeGen/X86/CodeGen.hs
+++ b/compiler/nativeGen/X86/CodeGen.hs
@@ -20,6 +20,7 @@ module X86.CodeGen (
cmmTopCodeGen,
generateJumpTableForInstr,
extractUnwindPoints,
+ invertCondBranches,
InstrBlock
)
@@ -36,13 +37,21 @@ import X86.Instr
import X86.Cond
import X86.Regs
import X86.RegInfo
+
+--TODO: Remove - Just for development/debugging
+import X86.Ppr()
+
import CodeGen.Platform
import CPrim
import Debug ( DebugBlock(..), UnwindPoint(..), UnwindTable
, UnwindExpr(UwReg), toUnwindExpr )
import Instruction
import PIC
-import NCGMonad
+import NCGMonad ( NatM, getNewRegNat, getNewLabelNat, setDeltaNat
+ , getDeltaNat, getBlockIdNat, getPicBaseNat, getNewRegPairNat
+ , getPicBaseMaybeNat, getDebugBlock, getFileId
+ , addImmediateSuccessorNat, updateCfgNat)
+import CFG
import Format
import Reg
import Platform
@@ -56,7 +65,9 @@ import CmmUtils
import CmmSwitch
import Cmm
import Hoopl.Block
+import Hoopl.Collections
import Hoopl.Graph
+import Hoopl.Label
import CLabel
import CoreSyn ( Tickish(..) )
import SrcLoc ( srcSpanFile, srcSpanStartLine, srcSpanStartCol )
@@ -137,8 +148,8 @@ basicBlockCodeGen block = do
let line = srcSpanStartLine span; col = srcSpanStartCol span
return $ unitOL $ LOCATION fileId line col name
_ -> return nilOL
- mid_instrs <- stmtsToInstrs stmts
- tail_instrs <- stmtToInstrs tail
+ mid_instrs <- stmtsToInstrs id stmts
+ tail_instrs <- stmtToInstrs id tail
let instrs = loc_instrs `appOL` mid_instrs `appOL` tail_instrs
instrs' <- fold <$> traverse addSpUnwindings instrs
-- code generation may introduce new basic block boundaries, which
@@ -169,14 +180,15 @@ addSpUnwindings instr@(DELTA d) = do
else return (unitOL instr)
addSpUnwindings instr = return $ unitOL instr
-stmtsToInstrs :: [CmmNode e x] -> NatM InstrBlock
-stmtsToInstrs stmts
- = do instrss <- mapM stmtToInstrs stmts
+stmtsToInstrs :: BlockId -> [CmmNode e x] -> NatM InstrBlock
+stmtsToInstrs bid stmts
+ = do instrss <- mapM (stmtToInstrs bid) stmts
return (concatOL instrss)
-
-stmtToInstrs :: CmmNode e x -> NatM InstrBlock
-stmtToInstrs stmt = do
+-- | `bid` refers to the current block and is used to update the CFG
+-- if new blocks are inserted in the control flow.
+stmtToInstrs :: BlockId -> CmmNode e x -> NatM InstrBlock
+stmtToInstrs bid stmt = do
dflags <- getDynFlags
is32Bit <- is32BitPlatform
case stmt of
@@ -207,16 +219,13 @@ stmtToInstrs stmt = do
format = cmmTypeFormat ty
CmmUnsafeForeignCall target result_regs args
- -> genCCall dflags is32Bit target result_regs args
+ -> genCCall dflags is32Bit target result_regs args bid
- CmmBranch id -> genBranch id
+ CmmBranch id -> return $ genBranch id
--We try to arrange blocks such that the likely branch is the fallthrough
--in CmmContFlowOpt. So we can assume the condition is likely false here.
- CmmCondBranch arg true false _ -> do
- b1 <- genCondJump true arg
- b2 <- genBranch false
- return (b1 `appOL` b2)
+ CmmCondBranch arg true false _ -> genCondBranch bid true false arg
CmmSwitch arg ids -> do dflags <- getDynFlags
genSwitch dflags arg ids
CmmCall { cml_target = arg
@@ -1673,13 +1682,13 @@ genJump expr regs = do
-- -----------------------------------------------------------------------------
-- Unconditional branches
-genBranch :: BlockId -> NatM InstrBlock
-genBranch = return . toOL . mkJumpInstr
+genBranch :: BlockId -> InstrBlock
+genBranch = toOL . mkJumpInstr
-- -----------------------------------------------------------------------------
--- Conditional jumps
+-- Conditional jumps/branches
{-
Conditional jumps are always to local labels, so we can use branch
@@ -1690,19 +1699,24 @@ I386: First, we have to ensure that the condition
codes are set according to the supplied comparison operation.
-}
-genCondJump
- :: BlockId -- the branch target
+
+genCondBranch
+ :: BlockId -- the source of the jump
+ -> BlockId -- the true branch target
+ -> BlockId -- the false branch target
-> CmmExpr -- the condition on which to branch
- -> NatM InstrBlock
+ -> NatM InstrBlock -- Instructions
-genCondJump id expr = do
+genCondBranch bid id false expr = do
is32Bit <- is32BitPlatform
- genCondJump' is32Bit id expr
+ genCondBranch' is32Bit bid id false expr
-genCondJump' :: Bool -> BlockId -> CmmExpr -> NatM InstrBlock
+-- | We return the instructions generated.
+genCondBranch' :: Bool -> BlockId -> BlockId -> BlockId -> CmmExpr
+ -> NatM InstrBlock
-- 64-bit integer comparisons on 32-bit
-genCondJump' is32Bit true (CmmMachOp mop [e1,e2])
+genCondBranch' is32Bit _bid true false (CmmMachOp mop [e1,e2])
| is32Bit, Just W64 <- maybeIntComparison mop = do
ChildCode64 code1 r1_lo <- iselExpr64 e1
ChildCode64 code2 r2_lo <- iselExpr64 e2
@@ -1710,52 +1724,52 @@ genCondJump' is32Bit true (CmmMachOp mop [e1,e2])
r2_hi = getHiVRegFromLo r2_lo
cond = machOpToCond mop
Just cond' = maybeFlipCond cond
- false <- getBlockIdNat
- return $ code1 `appOL` code2 `appOL` toOL [
- CMP II32 (OpReg r2_hi) (OpReg r1_hi),
- JXX cond true,
- JXX cond' false,
- CMP II32 (OpReg r2_lo) (OpReg r1_lo),
- JXX cond true,
- NEWBLOCK false ]
-
-genCondJump' _ id bool = do
+ --TODO: Update CFG for x86
+ let code = code1 `appOL` code2 `appOL` toOL [
+ CMP II32 (OpReg r2_hi) (OpReg r1_hi),
+ JXX cond true,
+ JXX cond' false,
+ CMP II32 (OpReg r2_lo) (OpReg r1_lo),
+ JXX cond true] `appOL` genBranch false
+ return code
+
+genCondBranch' _ bid id false bool = do
CondCode is_float cond cond_code <- getCondCode bool
use_sse2 <- sse2Enabled
if not is_float || not use_sse2
then
- return (cond_code `snocOL` JXX cond id)
+ return (cond_code `snocOL` JXX cond id `appOL` genBranch false)
else do
- lbl <- getBlockIdNat
-
-- See Note [SSE Parity Checks]
- let code = case cond of
- NE -> or_unordered
- GU -> plain_test
- GEU -> plain_test
- -- Use ASSERT so we don't break releases if
- -- LTT/LE creep in somehow.
- LTT ->
- ASSERT2(False, ppr "Should have been turned into >")
- and_ordered
- LE ->
- ASSERT2(False, ppr "Should have been turned into >=")
- and_ordered
- _ -> and_ordered
+ let jmpFalse = genBranch false
+ code
+ = case cond of
+ NE -> or_unordered
+ GU -> plain_test
+ GEU -> plain_test
+ -- Use ASSERT so we don't break releases if
+ -- LTT/LE creep in somehow.
+ LTT ->
+ ASSERT2(False, ppr "Should have been turned into >")
+ and_ordered
+ LE ->
+ ASSERT2(False, ppr "Should have been turned into >=")
+ and_ordered
+ _ -> and_ordered
plain_test = unitOL (
JXX cond id
- )
+ ) `appOL` jmpFalse
or_unordered = toOL [
JXX cond id,
JXX PARITY id
- ]
+ ] `appOL` jmpFalse
and_ordered = toOL [
- JXX PARITY lbl,
+ JXX PARITY false,
JXX cond id,
- JXX ALWAYS lbl,
- NEWBLOCK lbl
+ JXX ALWAYS false
]
+ updateCfgNat (\cfg -> adjustEdgeWeight cfg (+3) bid false)
return (cond_code `appOL` code)
-- -----------------------------------------------------------------------------
@@ -1774,6 +1788,7 @@ genCCall
-> ForeignTarget -- function to call
-> [CmmFormal] -- where to put the result
-> [CmmActual] -- arguments (of mixed type)
+ -> BlockId -- The block we are in
-> NatM InstrBlock
-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -1782,7 +1797,7 @@ genCCall
-- least DWORD aligned and the number of bytes to copy isn't too
-- large. Otherwise, call C's memcpy.
genCCall dflags is32Bit (PrimTarget (MO_Memcpy align)) _
- [dst, src, CmmLit (CmmInt n _)]
+ [dst, src, CmmLit (CmmInt n _)] _
| fromInteger insns <= maxInlineMemcpyInsns dflags && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat format
@@ -1832,6 +1847,7 @@ genCCall dflags _ (PrimTarget (MO_Memset align)) _
[dst,
CmmLit (CmmInt c _),
CmmLit (CmmInt n _)]
+ _
| fromInteger insns <= maxInlineMemsetInsns dflags && align .&. 3 == 0 = do
code_dst <- getAnyReg dst
dst_r <- getNewRegNat format
@@ -1872,13 +1888,13 @@ genCCall dflags _ (PrimTarget (MO_Memset align)) _
dst_addr = AddrBaseIndex (EABaseReg dst) EAIndexNone
(ImmInteger (n - i))
-genCCall _ _ (PrimTarget MO_WriteBarrier) _ _ = return nilOL
+genCCall _ _ (PrimTarget MO_WriteBarrier) _ _ _ = return nilOL
-- write barrier compiles to no code on x86/x86-64;
-- we keep it this long in order to prevent earlier optimisations.
-genCCall _ _ (PrimTarget MO_Touch) _ _ = return nilOL
+genCCall _ _ (PrimTarget MO_Touch) _ _ _ = return nilOL
-genCCall _ is32bit (PrimTarget (MO_Prefetch_Data n )) _ [src] =
+genCCall _ is32bit (PrimTarget (MO_Prefetch_Data n )) _ [src] _ =
case n of
0 -> genPrefetch src $ PREFETCH NTA format
1 -> genPrefetch src $ PREFETCH Lvl2 format
@@ -1899,7 +1915,7 @@ genCCall _ is32bit (PrimTarget (MO_Prefetch_Data n )) _ [src] =
((AddrBaseIndex (EABaseReg src_r ) EAIndexNone (ImmInt 0)))) ))
-- prefetch always takes an address
-genCCall dflags is32Bit (PrimTarget (MO_BSwap width)) [dst] [src] = do
+genCCall dflags is32Bit (PrimTarget (MO_BSwap width)) [dst] [src] _ = do
let platform = targetPlatform dflags
let dst_r = getRegisterReg platform False (CmmLocal dst)
case width of
@@ -1922,7 +1938,7 @@ genCCall dflags is32Bit (PrimTarget (MO_BSwap width)) [dst] [src] = do
format = intFormat width
genCCall dflags is32Bit (PrimTarget (MO_PopCnt width)) dest_regs@[dst]
- args@[src] = do
+ args@[src] bid = do
sse4_2 <- sse4_2Enabled
let platform = targetPlatform dflags
if sse4_2
@@ -1947,13 +1963,13 @@ genCCall dflags is32Bit (PrimTarget (MO_PopCnt width)) dest_regs@[dst]
let target = ForeignTarget targetExpr (ForeignConvention CCallConv
[NoHint] [NoHint]
CmmMayReturn)
- genCCall dflags is32Bit target dest_regs args
+ genCCall dflags is32Bit target dest_regs args bid
where
format = intFormat width
lbl = mkCmmCodeLabel primUnitId (fsLit (popCntLabel width))
genCCall dflags is32Bit (PrimTarget (MO_Pdep width)) dest_regs@[dst]
- args@[src, mask] = do
+ args@[src, mask] bid = do
let platform = targetPlatform dflags
if isBmi2Enabled dflags
then do code_src <- getAnyReg src
@@ -1980,13 +1996,13 @@ genCCall dflags is32Bit (PrimTarget (MO_Pdep width)) dest_regs@[dst]
let target = ForeignTarget targetExpr (ForeignConvention CCallConv
[NoHint] [NoHint]
CmmMayReturn)
- genCCall dflags is32Bit target dest_regs args
+ genCCall dflags is32Bit target dest_regs args bid
where
format = intFormat width
lbl = mkCmmCodeLabel primUnitId (fsLit (pdepLabel width))
genCCall dflags is32Bit (PrimTarget (MO_Pext width)) dest_regs@[dst]
- args@[src, mask] = do
+ args@[src, mask] bid = do
let platform = targetPlatform dflags
if isBmi2Enabled dflags
then do code_src <- getAnyReg src
@@ -2013,19 +2029,19 @@ genCCall dflags is32Bit (PrimTarget (MO_Pext width)) dest_regs@[dst]
let target = ForeignTarget targetExpr (ForeignConvention CCallConv
[NoHint] [NoHint]
CmmMayReturn)
- genCCall dflags is32Bit target dest_regs args
+ genCCall dflags is32Bit target dest_regs args bid
where
format = intFormat width
lbl = mkCmmCodeLabel primUnitId (fsLit (pextLabel width))
-genCCall dflags is32Bit (PrimTarget (MO_Clz width)) dest_regs@[dst] args@[src]
+genCCall dflags is32Bit (PrimTarget (MO_Clz width)) dest_regs@[dst] args@[src] bid
| is32Bit && width == W64 = do
-- Fallback to `hs_clz64` on i386
targetExpr <- cmmMakeDynamicReference dflags CallReference lbl
let target = ForeignTarget targetExpr (ForeignConvention CCallConv
[NoHint] [NoHint]
CmmMayReturn)
- genCCall dflags is32Bit target dest_regs args
+ genCCall dflags is32Bit target dest_regs args bid
| otherwise = do
code_src <- getAnyReg src
@@ -2050,7 +2066,7 @@ genCCall dflags is32Bit (PrimTarget (MO_Clz width)) dest_regs@[dst] args@[src]
format = if width == W8 then II16 else intFormat width
lbl = mkCmmCodeLabel primUnitId (fsLit (clzLabel width))
-genCCall dflags is32Bit (PrimTarget (MO_Ctz width)) [dst] [src]
+genCCall dflags is32Bit (PrimTarget (MO_Ctz width)) [dst] [src] bid
| is32Bit, width == W64 = do
ChildCode64 vcode rlo <- iselExpr64 src
let rhi = getHiVRegFromLo rlo
@@ -2059,6 +2075,14 @@ genCCall dflags is32Bit (PrimTarget (MO_Ctz width)) [dst] [src]
lbl2 <- getBlockIdNat
tmp_r <- getNewRegNat format
+ -- New CFG Edges:
+ -- bid -> lbl2
+ -- bid -> lbl1 -> lbl2
+ -- We also changes edges originating at bid to start at lbl2 instead.
+ updateCfgNat (addWeightEdge bid lbl1 110 .
+ addWeightEdge lbl1 lbl2 110 .
+ addImmediateSuccessor bid lbl2)
+
-- The following instruction sequence corresponds to the pseudo-code
--
-- if (src) {
@@ -2104,17 +2128,18 @@ genCCall dflags is32Bit (PrimTarget (MO_Ctz width)) [dst] [src]
platform = targetPlatform dflags
format = if width == W8 then II16 else intFormat width
-genCCall dflags is32Bit (PrimTarget (MO_UF_Conv width)) dest_regs args = do
+genCCall dflags is32Bit (PrimTarget (MO_UF_Conv width)) dest_regs args bid = do
targetExpr <- cmmMakeDynamicReference dflags
CallReference lbl
let target = ForeignTarget targetExpr (ForeignConvention CCallConv
[NoHint] [NoHint]
CmmMayReturn)
- genCCall dflags is32Bit target dest_regs args
+ genCCall dflags is32Bit target dest_regs args bid
where
lbl = mkCmmCodeLabel primUnitId (fsLit (word2FloatLabel width))
-genCCall dflags is32Bit (PrimTarget (MO_AtomicRMW width amop)) [dst] [addr, n] = do
+genCCall dflags is32Bit (PrimTarget (MO_AtomicRMW width amop))
+ [dst] [addr, n] bid = do
Amode amode addr_code <-
if amop `elem` [AMO_Add, AMO_Sub]
then getAmode addr
@@ -2157,6 +2182,11 @@ genCCall dflags is32Bit (PrimTarget (MO_AtomicRMW width amop)) [dst] [addr, n] =
cmpxchg_code instrs = do
lbl <- getBlockIdNat
tmp <- getNewRegNat format
+
+ --Record inserted blocks
+ addImmediateSuccessorNat bid lbl
+ updateCfgNat (addWeightEdge lbl lbl 0)
+
return $ toOL
[ MOV format (OpAddr amode) (OpReg eax)
, JXX ALWAYS lbl
@@ -2172,17 +2202,17 @@ genCCall dflags is32Bit (PrimTarget (MO_AtomicRMW width amop)) [dst] [addr, n] =
format = intFormat width
-genCCall dflags _ (PrimTarget (MO_AtomicRead width)) [dst] [addr] = do
+genCCall dflags _ (PrimTarget (MO_AtomicRead width)) [dst] [addr] _ = do
load_code <- intLoadCode (MOV (intFormat width)) addr
let platform = targetPlatform dflags
use_sse2 <- sse2Enabled
return (load_code (getRegisterReg platform use_sse2 (CmmLocal dst)))
-genCCall _ _ (PrimTarget (MO_AtomicWrite width)) [] [addr, val] = do
+genCCall _ _ (PrimTarget (MO_AtomicWrite width)) [] [addr, val] _ = do
code <- assignMem_IntCode (intFormat width) addr val
return $ code `snocOL` MFENCE
-genCCall dflags is32Bit (PrimTarget (MO_Cmpxchg width)) [dst] [addr, old, new] = do
+genCCall dflags is32Bit (PrimTarget (MO_Cmpxchg width)) [dst] [addr, old, new] _ = do
-- On x86 we don't have enough registers to use cmpxchg with a
-- complicated addressing mode, so on that architecture we
-- pre-compute the address first.
@@ -2204,14 +2234,14 @@ genCCall dflags is32Bit (PrimTarget (MO_Cmpxchg width)) [dst] [addr, old, new] =
where
format = intFormat width
-genCCall _ is32Bit target dest_regs args = do
+genCCall _ is32Bit target dest_regs args bid = do
dflags <- getDynFlags
let platform = targetPlatform dflags
sse2 = isSse2Enabled dflags
case (target, dest_regs) of
-- void return type prim op
(PrimTarget op, []) ->
- outOfLineCmmOp op Nothing args
+ outOfLineCmmOp bid op Nothing args
-- we only cope with a single result for foreign calls
(PrimTarget op, [r])
| sse2 -> case op of
@@ -2224,12 +2254,12 @@ genCCall _ is32Bit target dest_regs args = do
MO_F32_Sqrt -> actuallyInlineSSE2Op (\fmt r -> SQRT fmt (OpReg r)) FF32 args
MO_F64_Sqrt -> actuallyInlineSSE2Op (\fmt r -> SQRT fmt (OpReg r)) FF64 args
- _other_op -> outOfLineCmmOp op (Just r) args
+ _other_op -> outOfLineCmmOp bid op (Just r) args
| otherwise -> do
l1 <- getNewLabelNat
l2 <- getNewLabelNat
if sse2
- then outOfLineCmmOp op (Just r) args
+ then outOfLineCmmOp bid op (Just r) args
else case op of
MO_F32_Sqrt -> actuallyInlineFloatOp GSQRT FF32 args
MO_F64_Sqrt -> actuallyInlineFloatOp GSQRT FF64 args
@@ -2243,7 +2273,7 @@ genCCall _ is32Bit target dest_regs args = do
MO_F32_Tan -> actuallyInlineFloatOp (\s -> GTAN s l1 l2) FF32 args
MO_F64_Tan -> actuallyInlineFloatOp (\s -> GTAN s l1 l2) FF64 args
- _other_op -> outOfLineCmmOp op (Just r) args
+ _other_op -> outOfLineCmmOp bid op (Just r) args
where
actuallyInlineFloatOp = actuallyInlineFloatOp' False
@@ -2813,15 +2843,16 @@ maybePromoteCArg dflags wto arg
where
wfrom = cmmExprWidth dflags arg
-outOfLineCmmOp :: CallishMachOp -> Maybe CmmFormal -> [CmmActual] -> NatM InstrBlock
-outOfLineCmmOp mop res args
+outOfLineCmmOp :: BlockId -> CallishMachOp -> Maybe CmmFormal -> [CmmActual]
+ -> NatM InstrBlock
+outOfLineCmmOp bid mop res args
= do
dflags <- getDynFlags
targetExpr <- cmmMakeDynamicReference dflags CallReference lbl
let target = ForeignTarget targetExpr
(ForeignConvention CCallConv [] [] CmmMayReturn)
- stmtToInstrs (CmmUnsafeForeignCall target (catMaybes [res]) args)
+ stmtToInstrs bid (CmmUnsafeForeignCall target (catMaybes [res]) args)
where
-- Assume we can call these functions directly, and that they're not in a dynamic library.
-- TODO: Why is this ok? Under linux this code will be in libm.so
@@ -3389,3 +3420,55 @@ needLlvm :: NatM a
needLlvm =
sorry $ unlines ["The native code generator does not support vector"
,"instructions. Please use -fllvm."]
+
+-- | This works on the invariant that all jumps in the given blocks are required.
+-- Starting from there we try to make a few more jumps redundant by reordering
+-- them.
+invertCondBranches :: CFG -> LabelMap a -> [NatBasicBlock Instr]
+ -> [NatBasicBlock Instr]
+invertCondBranches cfg keep bs =
+ --trace "Foo" $
+ invert bs
+ where
+ invert :: [NatBasicBlock Instr] -> [NatBasicBlock Instr]
+ invert ((BasicBlock lbl1 ins@(_:_:_xs)):b2@(BasicBlock lbl2 _):bs)
+ | --pprTrace "Block" (ppr lbl1) True,
+ (jmp1,jmp2) <- last2 ins
+ , JXX cond1 target1 <- jmp1
+ , target1 == lbl2
+ --, pprTrace "CutChance" (ppr b1) True
+ , JXX ALWAYS target2 <- jmp2
+ -- We have enough information to check if we can perform the inversion
+ -- TODO: We could also check for the last asm instruction which sets
+ -- status flags instead. Which I suspect is worse in terms of compiler
+ -- performance, but might be applicable to more cases
+ , Just edgeInfo1 <- getEdgeInfo lbl1 target1 cfg
+ , Just edgeInfo2 <- getEdgeInfo lbl1 target2 cfg
+ -- Both jumps come from the same cmm statement
+ , transitionSource edgeInfo1 == transitionSource edgeInfo2
+ , (CmmSource cmmCondBranch) <- transitionSource edgeInfo1
+
+ --Int comparisons are invertable
+ , CmmCondBranch (CmmMachOp op _args) _ _ _ <- cmmCondBranch
+ , Just _ <- maybeIntComparison op
+ , Just invCond <- maybeInvertCond cond1
+
+ --Swap the last two jumps, invert the conditional jumps condition.
+ = let jumps =
+ case () of
+ -- We are free the eliminate the jmp. So we do so.
+ _ | not (mapMember target1 keep)
+ -> [JXX invCond target2]
+ -- If the conditional target is unlikely we put the other
+ -- target at the front.
+ | edgeWeight edgeInfo2 > edgeWeight edgeInfo1
+ -> [JXX invCond target2, JXX ALWAYS target1]
+ -- Keep things as-is otherwise
+ | otherwise
+ -> [jmp1, jmp2]
+ in --pprTrace "Cutable" (ppr [jmp1,jmp2] <+> text "=>" <+> ppr jumps) $
+ (BasicBlock lbl1
+ (dropTail 2 ins ++ jumps))
+ : invert (b2:bs)
+ invert (b:bs) = b : invert bs
+ invert [] = []
diff --git a/compiler/nativeGen/X86/Cond.hs b/compiler/nativeGen/X86/Cond.hs
index 35cbf943e1..49e3ce9254 100644
--- a/compiler/nativeGen/X86/Cond.hs
+++ b/compiler/nativeGen/X86/Cond.hs
@@ -3,7 +3,8 @@ module X86.Cond (
condUnsigned,
condToSigned,
condToUnsigned,
- maybeFlipCond
+ maybeFlipCond,
+ maybeInvertCond
)
where
@@ -68,3 +69,41 @@ maybeFlipCond cond = case cond of
LE -> Just GE
GE -> Just LE
_other -> Nothing
+
+-- | If we apply @maybeInvertCond@ to the condition of a jump we turn
+-- jumps taken into jumps not taken and vice versa.
+--
+-- Careful! If the used comparison and the conditional jump
+-- don't match the above behaviour will NOT hold.
+-- When used for FP comparisons this does not consider unordered
+-- numbers.
+-- Also inverting twice might return a synonym for the original condition.
+maybeInvertCond :: Cond -> Maybe Cond
+maybeInvertCond cond = case cond of
+ ALWAYS -> Nothing
+ EQQ -> Just NE
+ NE -> Just EQQ
+
+ NEG -> Just POS
+ POS -> Just NEG
+
+ GEU -> Just LU
+ LU -> Just GEU
+
+ GE -> Just LTT
+ LTT -> Just GE
+
+ GTT -> Just LE
+ LE -> Just GTT
+
+ GU -> Just LEU
+ LEU -> Just GU
+
+ --GEU "==" NOTCARRY, they are synonyms
+ --at the assembly level
+ CARRY -> Just GEU
+
+ OFLO -> Nothing
+
+ PARITY -> Just NOTPARITY
+ NOTPARITY -> Just PARITY
diff --git a/compiler/nativeGen/X86/Instr.hs b/compiler/nativeGen/X86/Instr.hs
index 8cc61ed789..c47e1fae83 100644
--- a/compiler/nativeGen/X86/Instr.hs
+++ b/compiler/nativeGen/X86/Instr.hs
@@ -11,7 +11,7 @@
module X86.Instr (Instr(..), Operand(..), PrefetchVariant(..), JumpDest(..),
getJumpDestBlockId, canShortcut, shortcutStatics,
shortcutJump, i386_insert_ffrees, allocMoreStack,
- maxSpillSlots, archWordFormat)
+ maxSpillSlots, archWordFormat )
where
#include "HsVersions.h"
@@ -1061,9 +1061,9 @@ allocMoreStack
:: Platform
-> Int
-> NatCmmDecl statics X86.Instr.Instr
- -> UniqSM (NatCmmDecl statics X86.Instr.Instr)
+ -> UniqSM (NatCmmDecl statics X86.Instr.Instr, [(BlockId,BlockId)])
-allocMoreStack _ _ top@(CmmData _ _) = return top
+allocMoreStack _ _ top@(CmmData _ _) = return (top,[])
allocMoreStack platform slots proc@(CmmProc info lbl live (ListGraph code)) = do
let entries = entryBlocks proc
@@ -1076,8 +1076,10 @@ allocMoreStack platform slots proc@(CmmProc info lbl live (ListGraph code)) = do
alloc = mkStackAllocInstr platform delta
dealloc = mkStackDeallocInstr platform delta
+ retargetList = (zip entries (map mkBlockId uniqs))
+
new_blockmap :: LabelMap BlockId
- new_blockmap = mapFromList (zip entries (map mkBlockId uniqs))
+ new_blockmap = mapFromList retargetList
insert_stack_insns (BasicBlock id insns)
| Just new_blockid <- mapLookup id new_blockmap
@@ -1096,10 +1098,16 @@ allocMoreStack platform slots proc@(CmmProc info lbl live (ListGraph code)) = do
new_code = concatMap insert_stack_insns code
-- in
- return (CmmProc info lbl live (ListGraph new_code))
+ return (CmmProc info lbl live (ListGraph new_code), retargetList)
data JumpDest = DestBlockId BlockId | DestImm Imm
+-- Debug Instance
+instance Outputable JumpDest where
+ ppr (DestBlockId bid) = text "jd<blk>:" <> ppr bid
+ ppr (DestImm _imm) = text "jd<imm>:noShow"
+
+
getJumpDestBlockId :: JumpDest -> Maybe BlockId
getJumpDestBlockId (DestBlockId bid) = Just bid
getJumpDestBlockId _ = Nothing
diff --git a/compiler/nativeGen/X86/Regs.hs b/compiler/nativeGen/X86/Regs.hs
index 97c3b984e2..fcb0847ba9 100644
--- a/compiler/nativeGen/X86/Regs.hs
+++ b/compiler/nativeGen/X86/Regs.hs
@@ -132,7 +132,6 @@ data Imm
| ImmConstantSum Imm Imm
| ImmConstantDiff Imm Imm
-
strImmLit :: String -> Imm
strImmLit s = ImmLit (text s)
diff --git a/compiler/utils/Digraph.hs b/compiler/utils/Digraph.hs
index c420486ed1..bb118a65f6 100644
--- a/compiler/utils/Digraph.hs
+++ b/compiler/utils/Digraph.hs
@@ -1,6 +1,7 @@
-- (c) The University of Glasgow 2006
{-# LANGUAGE CPP, ScopedTypeVariables, ViewPatterns #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving #-}
module Digraph(
Graph, graphFromEdgedVerticesOrd, graphFromEdgedVerticesUniq,
@@ -19,6 +20,9 @@ module Digraph(
stronglyConnCompFromEdgedVerticesOrdR,
stronglyConnCompFromEdgedVerticesUniq,
stronglyConnCompFromEdgedVerticesUniqR,
+
+ -- Simple way to classify edges
+ EdgeType(..), classifyEdges
) where
#include "HsVersions.h"
@@ -346,6 +350,7 @@ reachableG graph from = map (gr_vertex_to_node graph) result
where from_vertex = expectJust "reachableG" (gr_node_to_vertex graph from)
result = {-# SCC "Digraph.reachable" #-} reachable (gr_int_graph graph) [from_vertex]
+-- | Given a list of roots return all reachable nodes.
reachablesG :: Graph node -> [node] -> [node]
reachablesG graph froms = map (gr_vertex_to_node graph) result
where result = {-# SCC "Digraph.reachable" #-}
@@ -420,3 +425,92 @@ preorderF ts = concat (map flatten ts)
-- This generalizes reachable which was found in Data.Graph
reachable :: IntGraph -> [Vertex] -> [Vertex]
reachable g vs = preorderF (dfs g vs)
+
+{-
+************************************************************************
+* *
+* Classify Edge Types
+* *
+************************************************************************
+-}
+
+-- Remark: While we could generalize this algorithm this comes at a runtime
+-- cost and with no advantages. If you find yourself using this with graphs
+-- not easily represented using Int nodes please consider rewriting this
+-- using the more general Graph type.
+
+-- | Edge direction based on DFS Classification
+data EdgeType
+ = Forward
+ | Cross
+ | Backward -- ^ Loop back towards the root node.
+ -- Eg backjumps in loops
+ | SelfLoop -- ^ v -> v
+ deriving (Eq,Ord)
+
+instance Outputable EdgeType where
+ ppr Forward = text "Forward"
+ ppr Cross = text "Cross"
+ ppr Backward = text "Backward"
+ ppr SelfLoop = text "SelfLoop"
+
+newtype Time = Time Int deriving (Eq,Ord,Num,Outputable)
+
+--Allow for specialzation
+{-# INLINEABLE classifyEdges #-}
+
+-- | Given a start vertex, a way to get successors from a node
+-- and a list of (directed) edges classify the types of edges.
+classifyEdges :: forall key. Uniquable key => key -> (key -> [key])
+ -> [(key,key)] -> [((key, key), EdgeType)]
+classifyEdges root getSucc edges =
+ --let uqe (from,to) = (getUnique from, getUnique to)
+ --in pprTrace "Edges:" (ppr $ map uqe edges) $
+ zip edges $ map classify edges
+ where
+ (_time, starts, ends) = addTimes (0,emptyUFM,emptyUFM) root
+ classify :: (key,key) -> EdgeType
+ classify (from,to)
+ | startFrom < startTo
+ , endFrom > endTo
+ = Forward
+ | startFrom > startTo
+ , endFrom < endTo
+ = Backward
+ | startFrom > startTo
+ , endFrom > endTo
+ = Cross
+ | getUnique from == getUnique to
+ = SelfLoop
+ | otherwise
+ = pprPanic "Failed to classify edge of Graph"
+ (ppr (getUnique from, getUnique to))
+
+ where
+ getTime event node
+ | Just time <- lookupUFM event node
+ = time
+ | otherwise
+ = pprPanic "Failed to classify edge of CFG - not not timed"
+ (text "edges" <> ppr (getUnique from, getUnique to)
+ <+> ppr starts <+> ppr ends )
+ startFrom = getTime starts from
+ startTo = getTime starts to
+ endFrom = getTime ends from
+ endTo = getTime ends to
+
+ addTimes :: (Time, UniqFM Time, UniqFM Time) -> key
+ -> (Time, UniqFM Time, UniqFM Time)
+ addTimes (time,starts,ends) n
+ --Dont reenter nodes
+ | elemUFM n starts
+ = (time,starts,ends)
+ | otherwise =
+ let
+ starts' = addToUFM starts n time
+ time' = time + 1
+ succs = getSucc n :: [key]
+ (time'',starts'',ends') = foldl' addTimes (time',starts',ends) succs
+ ends'' = addToUFM ends' n time''
+ in
+ (time'' + 1, starts'', ends'')
diff --git a/compiler/utils/OrdList.hs b/compiler/utils/OrdList.hs
index a5739764d4..8e4dae7561 100644
--- a/compiler/utils/OrdList.hs
+++ b/compiler/utils/OrdList.hs
@@ -12,7 +12,7 @@ can be appended in linear time.
module OrdList (
OrdList,
nilOL, isNilOL, unitOL, appOL, consOL, snocOL, concatOL, lastOL,
- mapOL, fromOL, toOL, foldrOL, foldlOL
+ mapOL, fromOL, toOL, foldrOL, foldlOL, reverseOL
) where
import GhcPrelude
@@ -124,3 +124,11 @@ toOL :: [a] -> OrdList a
toOL [] = None
toOL [x] = One x
toOL xs = Many xs
+
+reverseOL :: OrdList a -> OrdList a
+reverseOL None = None
+reverseOL (One x) = One x
+reverseOL (Cons a b) = Snoc (reverseOL b) a
+reverseOL (Snoc a b) = Cons b (reverseOL a)
+reverseOL (Two a b) = Two (reverseOL b) (reverseOL a)
+reverseOL (Many xs) = Many (reverse xs)
diff --git a/compiler/utils/Util.hs b/compiler/utils/Util.hs
index c6c5362112..84799aed0b 100644
--- a/compiler/utils/Util.hs
+++ b/compiler/utils/Util.hs
@@ -28,7 +28,7 @@ module Util (
mapAndUnzip, mapAndUnzip3, mapAccumL2,
nOfThem, filterOut, partitionWith,
- dropWhileEndLE, spanEnd,
+ dropWhileEndLE, spanEnd, last2,
foldl1', foldl2, count, all2,
@@ -61,7 +61,7 @@ module Util (
nTimes,
-- * Sorting
- sortWith, minWith, nubSort,
+ sortWith, minWith, nubSort, ordNub,
-- * Comparisons
isEqual, eqListBy, eqMaybeBy,
@@ -637,6 +637,18 @@ minWith get_key xs = ASSERT( not (null xs) )
nubSort :: Ord a => [a] -> [a]
nubSort = Set.toAscList . Set.fromList
+-- | Remove duplicates but keep elements in order.
+-- O(n * log n)
+ordNub :: Ord a => [a] -> [a]
+ordNub xs
+ = go Set.empty xs
+ where
+ go _ [] = []
+ go s (x:xs)
+ | Set.member x s = go s xs
+ | otherwise = x : go (Set.insert x s) xs
+
+
{-
************************************************************************
* *
@@ -754,6 +766,12 @@ spanEnd p l = go l [] [] l
| p x = go yes (x : rev_yes) rev_no xs
| otherwise = go xs [] (x : rev_yes ++ rev_no) xs
+-- | Get the last two elements in a list. Partial!
+{-# INLINE last2 #-}
+last2 :: [a] -> (a,a)
+last2 = foldl' (\(_,x2) x -> (x2,x)) (partialError,partialError)
+ where
+ partialError = panic "last2 - list length less than two"
snocView :: [a] -> Maybe ([a],a)
-- Split off the last element