summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/cmm/CmmCallConv.hs56
-rw-r--r--compiler/cmm/CmmLayoutStack.hs2
-rw-r--r--compiler/cmm/MkGraph.hs103
-rw-r--r--compiler/codeGen/CgCallConv.hs76
-rw-r--r--compiler/codeGen/CgClosure.lhs8
-rw-r--r--compiler/codeGen/CgCon.lhs6
-rw-r--r--compiler/codeGen/CgTailCall.lhs5
-rw-r--r--compiler/codeGen/StgCmmBind.hs5
-rw-r--r--compiler/codeGen/StgCmmExpr.hs6
-rw-r--r--compiler/codeGen/StgCmmForeign.hs3
-rw-r--r--compiler/codeGen/StgCmmHeap.hs23
-rw-r--r--compiler/codeGen/StgCmmLayout.hs16
-rw-r--r--compiler/codeGen/StgCmmMonad.hs10
-rw-r--r--compiler/ghc.mk2
-rw-r--r--compiler/iface/BinIface.hs5
-rw-r--r--compiler/llvmGen/LlvmCodeGen.hs2
-rw-r--r--compiler/llvmGen/LlvmCodeGen/Base.hs21
-rw-r--r--compiler/llvmGen/LlvmCodeGen/CodeGen.hs9
-rw-r--r--compiler/main/DynFlags.hs44
-rw-r--r--compiler/main/StaticFlagParser.hs15
-rw-r--r--compiler/main/StaticFlags.hs3
-rw-r--r--compiler/main/SysTools.lhs24
-rw-r--r--compiler/utils/Platform.hs1
-rw-r--r--configure.ac23
-rw-r--r--ghc/Main.hs12
-rw-r--r--mk/build.mk.sample27
-rw-r--r--mk/config.mk.in15
-rw-r--r--settings.in1
28 files changed, 272 insertions, 251 deletions
diff --git a/compiler/cmm/CmmCallConv.hs b/compiler/cmm/CmmCallConv.hs
index a76ad6f00a..c92ad0fa08 100644
--- a/compiler/cmm/CmmCallConv.hs
+++ b/compiler/cmm/CmmCallConv.hs
@@ -20,8 +20,9 @@ import PprCmm ()
import Constants
import qualified Data.List as L
-import StaticFlags (opt_Unregisterised)
+import DynFlags
import Outputable
+import Platform
-- Calculate the 'GlobalReg' or stack locations for function call
-- parameters as used by the Cmm calling convention.
@@ -37,22 +38,22 @@ instance Outputable ParamLocation where
-- | JD: For the new stack story, I want arguments passed on the stack to manifest as
-- positive offsets in a CallArea, not negative offsets from the stack pointer.
-- Also, I want byte offsets, not word offsets.
-assignArgumentsPos :: Convention -> (a -> CmmType) -> [a] ->
+assignArgumentsPos :: DynFlags -> Convention -> (a -> CmmType) -> [a] ->
[(a, ParamLocation)]
-- Given a list of arguments, and a function that tells their types,
-- return a list showing where each argument is passed
-assignArgumentsPos conv arg_ty reps = assignments
+assignArgumentsPos dflags conv arg_ty reps = assignments
where -- The calling conventions (CgCallConv.hs) are complicated, to say the least
regs = case (reps, conv) of
- (_, NativeNodeCall) -> getRegsWithNode
- (_, NativeDirectCall) -> getRegsWithoutNode
+ (_, NativeNodeCall) -> getRegsWithNode dflags
+ (_, NativeDirectCall) -> getRegsWithoutNode dflags
([_], NativeReturn) -> allRegs
- (_, NativeReturn) -> getRegsWithNode
+ (_, NativeReturn) -> getRegsWithNode dflags
-- GC calling convention *must* put values in registers
(_, GC) -> allRegs
(_, PrimOpCall) -> allRegs
([_], PrimOpReturn) -> allRegs
- (_, PrimOpReturn) -> getRegsWithNode
+ (_, PrimOpReturn) -> getRegsWithNode dflags
(_, Slow) -> noRegs
-- The calling conventions first assign arguments to registers,
-- then switch to the stack when we first run out of registers
@@ -110,25 +111,34 @@ type AvailRegs = ( [VGcPtr -> GlobalReg] -- available vanilla regs.
-- We take these register supplies from the *real* registers, i.e. those
-- that are guaranteed to map to machine registers.
-vanillaRegNos, floatRegNos, doubleRegNos, longRegNos :: [Int]
-vanillaRegNos | opt_Unregisterised = []
- | otherwise = regList mAX_Real_Vanilla_REG
-floatRegNos | opt_Unregisterised = []
- | otherwise = regList mAX_Real_Float_REG
-doubleRegNos | opt_Unregisterised = []
- | otherwise = regList mAX_Real_Double_REG
-longRegNos | opt_Unregisterised = []
- | otherwise = regList mAX_Real_Long_REG
+vanillaRegNos, floatRegNos, doubleRegNos, longRegNos :: DynFlags -> [Int]
+vanillaRegNos dflags
+ | platformUnregisterised (targetPlatform dflags) = []
+ | otherwise = regList mAX_Real_Vanilla_REG
+floatRegNos dflags
+ | platformUnregisterised (targetPlatform dflags) = []
+ | otherwise = regList mAX_Real_Float_REG
+doubleRegNos dflags
+ | platformUnregisterised (targetPlatform dflags) = []
+ | otherwise = regList mAX_Real_Double_REG
+longRegNos dflags
+ | platformUnregisterised (targetPlatform dflags) = []
+ | otherwise = regList mAX_Real_Long_REG
--
-getRegsWithoutNode, getRegsWithNode :: AvailRegs
-getRegsWithoutNode =
+getRegsWithoutNode, getRegsWithNode :: DynFlags -> AvailRegs
+getRegsWithoutNode dflags =
(filter (\r -> r VGcPtr /= node) intRegs,
- map FloatReg floatRegNos, map DoubleReg doubleRegNos, map LongReg longRegNos)
- where intRegs = map VanillaReg vanillaRegNos
-getRegsWithNode =
- (intRegs, map FloatReg floatRegNos, map DoubleReg doubleRegNos, map LongReg longRegNos)
- where intRegs = map VanillaReg vanillaRegNos
+ map FloatReg (floatRegNos dflags),
+ map DoubleReg (doubleRegNos dflags),
+ map LongReg (longRegNos dflags))
+ where intRegs = map VanillaReg (vanillaRegNos dflags)
+getRegsWithNode dflags =
+ (intRegs,
+ map FloatReg (floatRegNos dflags),
+ map DoubleReg (doubleRegNos dflags),
+ map LongReg (longRegNos dflags))
+ where intRegs = map VanillaReg (vanillaRegNos dflags)
allFloatRegs, allDoubleRegs, allLongRegs :: [GlobalReg]
allVanillaRegs :: [VGcPtr -> GlobalReg]
diff --git a/compiler/cmm/CmmLayoutStack.hs b/compiler/cmm/CmmLayoutStack.hs
index 02c90e6adc..5aca286001 100644
--- a/compiler/cmm/CmmLayoutStack.hs
+++ b/compiler/cmm/CmmLayoutStack.hs
@@ -929,7 +929,7 @@ lowerSafeForeignCall dflags block
caller_load <*>
loadThreadState dflags load_tso load_stack
- (ret_args, regs, copyout) = copyOutOflow NativeReturn Jump (Young succ)
+ (ret_args, regs, copyout) = copyOutOflow dflags NativeReturn Jump (Young succ)
(map (CmmReg . CmmLocal) res)
updfr (0, [])
diff --git a/compiler/cmm/MkGraph.hs b/compiler/cmm/MkGraph.hs
index 4703b47f42..60704b5b32 100644
--- a/compiler/cmm/MkGraph.hs
+++ b/compiler/cmm/MkGraph.hs
@@ -24,6 +24,7 @@ import CmmCallConv (assignArgumentsPos, ParamLocation(..))
import Compiler.Hoopl hiding (Unique, (<*>), mkFirst, mkMiddle, mkLast, mkLabel, mkBranch, Shape(..))
+import DynFlags
import FastString
import ForeignCall
import Outputable
@@ -172,31 +173,35 @@ mkStore :: CmmExpr -> CmmExpr -> CmmAGraph
mkStore l r = mkMiddle $ CmmStore l r
---------- Control transfer
-mkJump :: CmmExpr -> [CmmActual] -> UpdFrameOffset -> CmmAGraph
-mkJump e actuals updfr_off =
- lastWithArgs Jump Old NativeNodeCall actuals updfr_off $
+mkJump :: DynFlags -> CmmExpr -> [CmmActual] -> UpdFrameOffset
+ -> CmmAGraph
+mkJump dflags e actuals updfr_off =
+ lastWithArgs dflags Jump Old NativeNodeCall actuals updfr_off $
toCall e Nothing updfr_off 0
-mkDirectJump :: CmmExpr -> [CmmActual] -> UpdFrameOffset -> CmmAGraph
-mkDirectJump e actuals updfr_off =
- lastWithArgs Jump Old NativeDirectCall actuals updfr_off $
+mkDirectJump :: DynFlags -> CmmExpr -> [CmmActual] -> UpdFrameOffset
+ -> CmmAGraph
+mkDirectJump dflags e actuals updfr_off =
+ lastWithArgs dflags Jump Old NativeDirectCall actuals updfr_off $
toCall e Nothing updfr_off 0
-mkJumpGC :: CmmExpr -> [CmmActual] -> UpdFrameOffset -> CmmAGraph
-mkJumpGC e actuals updfr_off =
- lastWithArgs Jump Old GC actuals updfr_off $
+mkJumpGC :: DynFlags -> CmmExpr -> [CmmActual] -> UpdFrameOffset
+ -> CmmAGraph
+mkJumpGC dflags e actuals updfr_off =
+ lastWithArgs dflags Jump Old GC actuals updfr_off $
toCall e Nothing updfr_off 0
-mkForeignJump :: Convention -> CmmExpr -> [CmmActual] -> UpdFrameOffset
+mkForeignJump :: DynFlags
+ -> Convention -> CmmExpr -> [CmmActual] -> UpdFrameOffset
-> CmmAGraph
-mkForeignJump conv e actuals updfr_off =
- mkForeignJumpExtra conv e actuals updfr_off noExtraStack
+mkForeignJump dflags conv e actuals updfr_off =
+ mkForeignJumpExtra dflags conv e actuals updfr_off noExtraStack
-mkForeignJumpExtra :: Convention -> CmmExpr -> [CmmActual]
+mkForeignJumpExtra :: DynFlags -> Convention -> CmmExpr -> [CmmActual]
-> UpdFrameOffset -> (ByteOff, [(CmmExpr, ByteOff)])
-> CmmAGraph
-mkForeignJumpExtra conv e actuals updfr_off extra_stack =
- lastWithArgsAndExtraStack Jump Old conv actuals updfr_off extra_stack $
+mkForeignJumpExtra dflags conv e actuals updfr_off extra_stack =
+ lastWithArgsAndExtraStack dflags Jump Old conv actuals updfr_off extra_stack $
toCall e Nothing updfr_off 0
mkCbranch :: CmmExpr -> BlockId -> BlockId -> CmmAGraph
@@ -205,45 +210,47 @@ mkCbranch pred ifso ifnot = mkLast (CmmCondBranch pred ifso ifnot)
mkSwitch :: CmmExpr -> [Maybe BlockId] -> CmmAGraph
mkSwitch e tbl = mkLast $ CmmSwitch e tbl
-mkReturn :: CmmExpr -> [CmmActual] -> UpdFrameOffset -> CmmAGraph
-mkReturn e actuals updfr_off =
- lastWithArgs Ret Old NativeReturn actuals updfr_off $
+mkReturn :: DynFlags -> CmmExpr -> [CmmActual] -> UpdFrameOffset
+ -> CmmAGraph
+mkReturn dflags e actuals updfr_off =
+ lastWithArgs dflags Ret Old NativeReturn actuals updfr_off $
toCall e Nothing updfr_off 0
-mkReturnSimple :: [CmmActual] -> UpdFrameOffset -> CmmAGraph
-mkReturnSimple actuals updfr_off =
- mkReturn e actuals updfr_off
+mkReturnSimple :: DynFlags -> [CmmActual] -> UpdFrameOffset -> CmmAGraph
+mkReturnSimple dflags actuals updfr_off =
+ mkReturn dflags e actuals updfr_off
where e = CmmLoad (CmmStackSlot Old updfr_off) gcWord
mkBranch :: BlockId -> CmmAGraph
mkBranch bid = mkLast (CmmBranch bid)
-mkFinalCall :: CmmExpr -> CCallConv -> [CmmActual] -> UpdFrameOffset
+mkFinalCall :: DynFlags
+ -> CmmExpr -> CCallConv -> [CmmActual] -> UpdFrameOffset
-> CmmAGraph
-mkFinalCall f _ actuals updfr_off =
- lastWithArgs Call Old NativeDirectCall actuals updfr_off $
+mkFinalCall dflags f _ actuals updfr_off =
+ lastWithArgs dflags Call Old NativeDirectCall actuals updfr_off $
toCall f Nothing updfr_off 0
-mkCallReturnsTo :: CmmExpr -> Convention -> [CmmActual]
+mkCallReturnsTo :: DynFlags -> CmmExpr -> Convention -> [CmmActual]
-> BlockId
-> ByteOff
-> UpdFrameOffset
-> (ByteOff, [(CmmExpr,ByteOff)])
-> CmmAGraph
-mkCallReturnsTo f callConv actuals ret_lbl ret_off updfr_off extra_stack = do
- lastWithArgsAndExtraStack Call (Young ret_lbl) callConv actuals
+mkCallReturnsTo dflags f callConv actuals ret_lbl ret_off updfr_off extra_stack = do
+ lastWithArgsAndExtraStack dflags Call (Young ret_lbl) callConv actuals
updfr_off extra_stack $
toCall f (Just ret_lbl) updfr_off ret_off
-- Like mkCallReturnsTo, but does not push the return address (it is assumed to be
-- already on the stack).
-mkJumpReturnsTo :: CmmExpr -> Convention -> [CmmActual]
+mkJumpReturnsTo :: DynFlags -> CmmExpr -> Convention -> [CmmActual]
-> BlockId
-> ByteOff
-> UpdFrameOffset
-> CmmAGraph
-mkJumpReturnsTo f callConv actuals ret_lbl ret_off updfr_off = do
- lastWithArgs JumpRet (Young ret_lbl) callConv actuals updfr_off $
+mkJumpReturnsTo dflags f callConv actuals ret_lbl ret_off updfr_off = do
+ lastWithArgs dflags JumpRet (Young ret_lbl) callConv actuals updfr_off $
toCall f (Just ret_lbl) updfr_off ret_off
mkUnsafeCall :: ForeignTarget -> [CmmFormal] -> [CmmActual] -> CmmAGraph
@@ -269,25 +276,26 @@ stackStubExpr w = CmmLit (CmmInt 0 w)
-- the variables in their spill slots.
-- Therefore, for copying arguments and results, we provide different
-- functions to pass the arguments in an overflow area and to pass them in spill slots.
-copyInOflow :: Convention -> Area -> [CmmFormal] -> (Int, CmmAGraph)
+copyInOflow :: DynFlags -> Convention -> Area -> [CmmFormal]
+ -> (Int, CmmAGraph)
-copyInOflow conv area formals = (offset, catAGraphs $ map mkMiddle nodes)
- where (offset, nodes) = copyIn oneCopyOflowI conv area formals
+copyInOflow dflags conv area formals = (offset, catAGraphs $ map mkMiddle nodes)
+ where (offset, nodes) = copyIn dflags oneCopyOflowI conv area formals
type SlotCopier = Area -> (LocalReg, ByteOff) -> (ByteOff, [CmmNode O O]) ->
(ByteOff, [CmmNode O O])
-type CopyIn = SlotCopier -> Convention -> Area -> [CmmFormal] -> (ByteOff, [CmmNode O O])
+type CopyIn = DynFlags -> SlotCopier -> Convention -> Area -> [CmmFormal] -> (ByteOff, [CmmNode O O])
-- Return the number of bytes used for copying arguments, as well as the
-- instructions to copy the arguments.
copyIn :: CopyIn
-copyIn oflow conv area formals =
+copyIn dflags oflow conv area formals =
foldr ci (init_offset, []) args'
where ci (reg, RegisterParam r) (n, ms) =
(n, CmmAssign (CmmLocal reg) (CmmReg $ CmmGlobal r) : ms)
ci (r, StackParam off) (n, ms) = oflow area (r, off) (n, ms)
init_offset = widthInBytes wordWidth -- infotable
- args = assignArgumentsPos conv localRegType formals
+ args = assignArgumentsPos dflags conv localRegType formals
args' = foldl adjust [] args
where adjust rst (v, StackParam off) = (v, StackParam (off + init_offset)) : rst
adjust rst x@(_, RegisterParam _) = x : rst
@@ -303,7 +311,7 @@ oneCopyOflowI area (reg, off) (n, ms) =
data Transfer = Call | JumpRet | Jump | Ret deriving Eq
-copyOutOflow :: Convention -> Transfer -> Area -> [CmmActual]
+copyOutOflow :: DynFlags -> Convention -> Transfer -> Area -> [CmmActual]
-> UpdFrameOffset
-> (ByteOff, [(CmmExpr,ByteOff)]) -- extra stack stuff
-> (Int, [GlobalReg], CmmAGraph)
@@ -317,7 +325,7 @@ copyOutOflow :: Convention -> Transfer -> Area -> [CmmActual]
-- the info table for return and adjust the offsets of the other
-- parameters. If this is a call instruction, we adjust the offsets
-- of the other parameters.
-copyOutOflow conv transfer area actuals updfr_off
+copyOutOflow dflags conv transfer area actuals updfr_off
(extra_stack_off, extra_stack_stuff)
= foldr co (init_offset, [], mkNop) (args' ++ stack_params)
where
@@ -347,7 +355,7 @@ copyOutOflow conv transfer area actuals updfr_off
arg_offset = init_offset + extra_stack_off
args :: [(CmmExpr, ParamLocation)] -- The argument and where to put it
- args = assignArgumentsPos conv cmmExprType actuals
+ args = assignArgumentsPos dflags conv cmmExprType actuals
args' = foldl adjust setRA args
where adjust rst (v, StackParam off) = (v, StackParam (off + arg_offset)) : rst
@@ -355,26 +363,27 @@ copyOutOflow conv transfer area actuals updfr_off
-mkCallEntry :: Convention -> [CmmFormal] -> (Int, CmmAGraph)
-mkCallEntry conv formals = copyInOflow conv Old formals
+mkCallEntry :: DynFlags -> Convention -> [CmmFormal] -> (Int, CmmAGraph)
+mkCallEntry dflags conv formals = copyInOflow dflags conv Old formals
-lastWithArgs :: Transfer -> Area -> Convention -> [CmmActual]
+lastWithArgs :: DynFlags -> Transfer -> Area -> Convention -> [CmmActual]
-> UpdFrameOffset
-> (ByteOff -> [GlobalReg] -> CmmAGraph)
-> CmmAGraph
-lastWithArgs transfer area conv actuals updfr_off last =
- lastWithArgsAndExtraStack transfer area conv actuals
+lastWithArgs dflags transfer area conv actuals updfr_off last =
+ lastWithArgsAndExtraStack dflags transfer area conv actuals
updfr_off noExtraStack last
-lastWithArgsAndExtraStack :: Transfer -> Area -> Convention -> [CmmActual]
+lastWithArgsAndExtraStack :: DynFlags
+ -> Transfer -> Area -> Convention -> [CmmActual]
-> UpdFrameOffset -> (ByteOff, [(CmmExpr,ByteOff)])
-> (ByteOff -> [GlobalReg] -> CmmAGraph)
-> CmmAGraph
-lastWithArgsAndExtraStack transfer area conv actuals updfr_off
+lastWithArgsAndExtraStack dflags transfer area conv actuals updfr_off
extra_stack last =
copies <*> last outArgs regs
where
- (outArgs, regs, copies) = copyOutOflow conv transfer area actuals
+ (outArgs, regs, copies) = copyOutOflow dflags conv transfer area actuals
updfr_off extra_stack
diff --git a/compiler/codeGen/CgCallConv.hs b/compiler/codeGen/CgCallConv.hs
index 332ec0746a..9443e0e936 100644
--- a/compiler/codeGen/CgCallConv.hs
+++ b/compiler/codeGen/CgCallConv.hs
@@ -43,10 +43,10 @@ import Id
import Name
import Util
import DynFlags
-import StaticFlags
import Module
import FastString
import Outputable
+import Platform
import Data.Bits
-------------------------------------------------------------------------
@@ -255,16 +255,19 @@ getSequelAmode
-- registers. This is used for calling special RTS functions and PrimOps
-- which expect their arguments to always be in the same registers.
-assignCallRegs, assignPrimOpCallRegs, assignReturnRegs
- :: [(CgRep,a)] -- Arg or result values to assign
- -> ([(a, GlobalReg)], -- Register assignment in same order
- -- for *initial segment of* input list
- -- (but reversed; doesn't matter)
- -- VoidRep args do not appear here
- [(CgRep,a)]) -- Leftover arg or result values
+type AssignRegs a = [(CgRep,a)] -- Arg or result values to assign
+ -> ([(a, GlobalReg)], -- Register assignment in same order
+ -- for *initial segment of* input list
+ -- (but reversed; doesn't matter)
+ -- VoidRep args do not appear here
+ [(CgRep,a)]) -- Leftover arg or result values
-assignCallRegs args
- = assign_regs args (mkRegTbl [node])
+assignCallRegs :: DynFlags -> AssignRegs a
+assignPrimOpCallRegs :: AssignRegs a
+assignReturnRegs :: DynFlags -> AssignRegs a
+
+assignCallRegs dflags args
+ = assign_regs args (mkRegTbl dflags [node])
-- The entry convention for a function closure
-- never uses Node for argument passing; instead
-- Node points to the function closure itself
@@ -273,7 +276,7 @@ assignPrimOpCallRegs args
= assign_regs args (mkRegTbl_allRegs [])
-- For primops, *all* arguments must be passed in registers
-assignReturnRegs args
+assignReturnRegs dflags args
-- when we have a single non-void component to return, use the normal
-- unpointed return convention. This make various things simpler: it
-- means we can assume a consistent convention for IO, which is useful
@@ -285,7 +288,7 @@ assignReturnRegs args
| [(rep,arg)] <- non_void_args, CmmGlobal r <- dataReturnConvPrim rep
= ([(arg, r)], [])
| otherwise
- = assign_regs args (mkRegTbl [])
+ = assign_regs args (mkRegTbl dflags [])
-- For returning unboxed tuples etc,
-- we use all regs
where
@@ -327,24 +330,28 @@ assign_reg _ _ = Nothing
-- We take these register supplies from the *real* registers, i.e. those
-- that are guaranteed to map to machine registers.
-useVanillaRegs :: Int
-useVanillaRegs | opt_Unregisterised = 0
- | otherwise = mAX_Real_Vanilla_REG
-useFloatRegs :: Int
-useFloatRegs | opt_Unregisterised = 0
- | otherwise = mAX_Real_Float_REG
-useDoubleRegs :: Int
-useDoubleRegs | opt_Unregisterised = 0
- | otherwise = mAX_Real_Double_REG
-useLongRegs :: Int
-useLongRegs | opt_Unregisterised = 0
- | otherwise = mAX_Real_Long_REG
-
-vanillaRegNos, floatRegNos, doubleRegNos, longRegNos :: [Int]
-vanillaRegNos = regList useVanillaRegs
-floatRegNos = regList useFloatRegs
-doubleRegNos = regList useDoubleRegs
-longRegNos = regList useLongRegs
+useVanillaRegs :: DynFlags -> Int
+useVanillaRegs dflags
+ | platformUnregisterised (targetPlatform dflags) = 0
+ | otherwise = mAX_Real_Vanilla_REG
+useFloatRegs :: DynFlags -> Int
+useFloatRegs dflags
+ | platformUnregisterised (targetPlatform dflags) = 0
+ | otherwise = mAX_Real_Float_REG
+useDoubleRegs :: DynFlags -> Int
+useDoubleRegs dflags
+ | platformUnregisterised (targetPlatform dflags) = 0
+ | otherwise = mAX_Real_Double_REG
+useLongRegs :: DynFlags -> Int
+useLongRegs dflags
+ | platformUnregisterised (targetPlatform dflags) = 0
+ | otherwise = mAX_Real_Long_REG
+
+vanillaRegNos, floatRegNos, doubleRegNos, longRegNos :: DynFlags -> [Int]
+vanillaRegNos dflags = regList $ useVanillaRegs dflags
+floatRegNos dflags = regList $ useFloatRegs dflags
+doubleRegNos dflags = regList $ useDoubleRegs dflags
+longRegNos dflags = regList $ useLongRegs dflags
allVanillaRegNos, allFloatRegNos, allDoubleRegNos, allLongRegNos :: [Int]
allVanillaRegNos = regList mAX_Vanilla_REG
@@ -361,9 +368,12 @@ type AvailRegs = ( [Int] -- available vanilla regs.
, [Int] -- longs (int64 and word64)
)
-mkRegTbl :: [GlobalReg] -> AvailRegs
-mkRegTbl regs_in_use
- = mkRegTbl' regs_in_use vanillaRegNos floatRegNos doubleRegNos longRegNos
+mkRegTbl :: DynFlags -> [GlobalReg] -> AvailRegs
+mkRegTbl dflags regs_in_use
+ = mkRegTbl' regs_in_use (vanillaRegNos dflags)
+ (floatRegNos dflags)
+ (doubleRegNos dflags)
+ (longRegNos dflags)
mkRegTbl_allRegs :: [GlobalReg] -> AvailRegs
mkRegTbl_allRegs regs_in_use
diff --git a/compiler/codeGen/CgClosure.lhs b/compiler/codeGen/CgClosure.lhs
index 053314b966..f1da2d4235 100644
--- a/compiler/codeGen/CgClosure.lhs
+++ b/compiler/codeGen/CgClosure.lhs
@@ -273,10 +273,12 @@ Node points to closure is available. -- HWL
\begin{code}
closureCodeBody _binder_info cl_info cc args body
= ASSERT( length args > 0 )
- do { -- Get the current virtual Sp (it might not be zero,
+ do {
+ dflags <- getDynFlags
+ -- Get the current virtual Sp (it might not be zero,
-- eg. if we're compiling a let-no-escape).
- vSp <- getVirtSp
- ; let (reg_args, other_args) = assignCallRegs (addIdReps args)
+ ; vSp <- getVirtSp
+ ; let (reg_args, other_args) = assignCallRegs dflags (addIdReps args)
(sp_top, stk_args) = mkVirtStkOffsets vSp other_args
-- Allocate the global ticky counter
diff --git a/compiler/codeGen/CgCon.lhs b/compiler/codeGen/CgCon.lhs
index 15347de060..4c451ec339 100644
--- a/compiler/codeGen/CgCon.lhs
+++ b/compiler/codeGen/CgCon.lhs
@@ -271,11 +271,13 @@ bindUnboxedTupleComponents
bindUnboxedTupleComponents args
= do {
- vsp <- getVirtSp
+ dflags <- getDynFlags
+
+ ; vsp <- getVirtSp
; rsp <- getRealSp
-- Assign as many components as possible to registers
- ; let (reg_args, stk_args) = assignReturnRegs (addIdReps args)
+ ; let (reg_args, stk_args) = assignReturnRegs dflags (addIdReps args)
-- Separate the rest of the args into pointers and non-pointers
(ptr_args, nptr_args) = separateByPtrFollowness stk_args
diff --git a/compiler/codeGen/CgTailCall.lhs b/compiler/codeGen/CgTailCall.lhs
index 6f98e4a09c..6db1b46d77 100644
--- a/compiler/codeGen/CgTailCall.lhs
+++ b/compiler/codeGen/CgTailCall.lhs
@@ -255,7 +255,7 @@ directCall sp lbl args extra_args live_node assts = do
dflags <- getDynFlags
let
-- First chunk of args go in registers
- (reg_arg_amodes, stk_args) = assignCallRegs args
+ (reg_arg_amodes, stk_args) = assignCallRegs dflags args
-- Any "extra" arguments are placed in frames on the
-- stack after the other arguments.
@@ -354,7 +354,8 @@ pushUnboxedTuple :: VirtualSpOffset -- Sp at which to start pushing
pushUnboxedTuple sp []
= return (sp, noStmts, [])
pushUnboxedTuple sp amodes
- = do { let (reg_arg_amodes, stk_arg_amodes) = assignReturnRegs amodes
+ = do { dflags <- getDynFlags
+ ; let (reg_arg_amodes, stk_arg_amodes) = assignReturnRegs dflags amodes
live_regs = map snd reg_arg_amodes
-- separate the rest of the args into pointers and non-pointers
diff --git a/compiler/codeGen/StgCmmBind.hs b/compiler/codeGen/StgCmmBind.hs
index a38078a1c8..cb2b41d852 100644
--- a/compiler/codeGen/StgCmmBind.hs
+++ b/compiler/codeGen/StgCmmBind.hs
@@ -470,7 +470,8 @@ mkSlowEntryCode cl_info arg_regs -- function closure is already in `Node'
let slow_lbl = closureSlowEntryLabel cl_info
fast_lbl = closureLocalEntryLabel dflags cl_info
-- mkDirectJump does not clobber `Node' containing function closure
- jump = mkDirectJump (mkLblExpr fast_lbl)
+ jump = mkDirectJump dflags
+ (mkLblExpr fast_lbl)
(map (CmmReg . CmmLocal) arg_regs)
initUpdFrameOff
emitProcWithConvention Slow Nothing slow_lbl arg_regs jump
@@ -680,7 +681,7 @@ link_caf _is_upd = do
-- assuming lots of things, like the stack pointer hasn't
-- moved since we entered the CAF.
(let target = entryCode dflags (closureInfoPtr (CmmReg nodeReg)) in
- mkJump target [] updfr)
+ mkJump dflags target [] updfr)
; return hp_rel }
diff --git a/compiler/codeGen/StgCmmExpr.hs b/compiler/codeGen/StgCmmExpr.hs
index 35533ec933..1d016d6b3d 100644
--- a/compiler/codeGen/StgCmmExpr.hs
+++ b/compiler/codeGen/StgCmmExpr.hs
@@ -674,7 +674,7 @@ emitEnter fun = do
-- test, just generating an enter.
Return _ -> do
{ let entry = entryCode dflags $ closureInfoPtr $ CmmReg nodeReg
- ; emit $ mkForeignJump NativeNodeCall entry
+ ; emit $ mkForeignJump dflags NativeNodeCall entry
[cmmUntag fun] updfr_off
; return AssignedDirectly
}
@@ -706,11 +706,11 @@ emitEnter fun = do
--
AssignTo res_regs _ -> do
{ lret <- newLabelC
- ; let (off, copyin) = copyInOflow NativeReturn (Young lret) res_regs
+ ; let (off, copyin) = copyInOflow dflags NativeReturn (Young lret) res_regs
; lcall <- newLabelC
; updfr_off <- getUpdFrameOff
; let area = Young lret
- ; let (outArgs, regs, copyout) = copyOutOflow NativeNodeCall Call area
+ ; let (outArgs, regs, copyout) = copyOutOflow dflags NativeNodeCall Call area
[fun] updfr_off (0,[])
-- refer to fun via nodeReg after the copyout, to avoid having
-- both live simultaneously; this sometimes enables fun to be
diff --git a/compiler/codeGen/StgCmmForeign.hs b/compiler/codeGen/StgCmmForeign.hs
index 8fec067288..3976dee6f8 100644
--- a/compiler/codeGen/StgCmmForeign.hs
+++ b/compiler/codeGen/StgCmmForeign.hs
@@ -214,10 +214,11 @@ emitForeignCall safety results target args _ret
return AssignedDirectly
| otherwise = do
+ dflags <- getDynFlags
updfr_off <- getUpdFrameOff
temp_target <- load_target_into_temp target
k <- newLabelC
- let (off, copyout) = copyInOflow NativeReturn (Young k) results
+ let (off, copyout) = copyInOflow dflags NativeReturn (Young k) results
-- see Note [safe foreign call convention]
emit $
( mkStore (CmmStackSlot (Young k) (widthInBytes wordWidth))
diff --git a/compiler/codeGen/StgCmmHeap.hs b/compiler/codeGen/StgCmmHeap.hs
index ddb6dd01e4..d3bf17f7d7 100644
--- a/compiler/codeGen/StgCmmHeap.hs
+++ b/compiler/codeGen/StgCmmHeap.hs
@@ -348,7 +348,8 @@ entryHeapCheck :: ClosureInfo
-> FCode ()
entryHeapCheck cl_info offset nodeSet arity args code
- = do let is_thunk = arity == 0
+ = do dflags <- getDynFlags
+ let is_thunk = arity == 0
is_fastf = case closureFunInfo cl_info of
Just (_, ArgGen _) -> False
_otherwise -> True
@@ -365,9 +366,9 @@ entryHeapCheck cl_info offset nodeSet arity args code
Function (slow): Set R1 = node, call generic_gc -}
gc_call upd = setN <*> gc_lbl upd
gc_lbl upd
- | is_thunk = mkDirectJump (CmmReg $ CmmGlobal GCEnter1) [] sp
- | is_fastf = mkDirectJump (CmmReg $ CmmGlobal GCFun) [] sp
- | otherwise = mkForeignJump Slow (CmmReg $ CmmGlobal GCFun) args' upd
+ | is_thunk = mkDirectJump dflags (CmmReg $ CmmGlobal GCEnter1) [] sp
+ | is_fastf = mkDirectJump dflags (CmmReg $ CmmGlobal GCFun) [] sp
+ | otherwise = mkForeignJump dflags Slow (CmmReg $ CmmGlobal GCFun) args' upd
where sp = max offset upd
{- DT (12/08/10) This is a little fishy, mainly the sp fix up amount.
- This is since the ncg inserts spills before the stack/heap check.
@@ -447,8 +448,9 @@ altHeapCheck regs code
= case cannedGCEntryPoint regs of
Nothing -> genericGC code
Just gc -> do
+ dflags <- getDynFlags
lret <- newLabelC
- let (off, copyin) = copyInOflow NativeReturn (Young lret) regs
+ let (off, copyin) = copyInOflow dflags NativeReturn (Young lret) regs
lcont <- newLabelC
emitOutOfLine lret (copyin <*> mkBranch lcont)
emitLabel lcont
@@ -464,15 +466,16 @@ cannedGCReturnsTo :: Bool -> CmmExpr -> [LocalReg] -> Label -> ByteOff
-> FCode a
-> FCode a
cannedGCReturnsTo cont_on_stack gc regs lret off code
- = do updfr_sz <- getUpdFrameOff
- heapCheck False (gc_call gc updfr_sz) code
+ = do dflags <- getDynFlags
+ updfr_sz <- getUpdFrameOff
+ heapCheck False (gc_call dflags gc updfr_sz) code
where
reg_exprs = map (CmmReg . CmmLocal) regs
-- Note [stg_gc arguments]
- gc_call label sp
- | cont_on_stack = mkJumpReturnsTo label GC reg_exprs lret off sp
- | otherwise = mkCallReturnsTo label GC reg_exprs lret off sp (0,[])
+ gc_call dflags label sp
+ | cont_on_stack = mkJumpReturnsTo dflags label GC reg_exprs lret off sp
+ | otherwise = mkCallReturnsTo dflags label GC reg_exprs lret off sp (0,[])
genericGC :: FCode a -> FCode a
genericGC code
diff --git a/compiler/codeGen/StgCmmLayout.hs b/compiler/codeGen/StgCmmLayout.hs
index 4e2b478f77..e20e4a29bd 100644
--- a/compiler/codeGen/StgCmmLayout.hs
+++ b/compiler/codeGen/StgCmmLayout.hs
@@ -78,12 +78,13 @@ import FastString
--
emitReturn :: [CmmExpr] -> FCode ReturnKind
emitReturn results
- = do { sequel <- getSequel;
+ = do { dflags <- getDynFlags
+ ; sequel <- getSequel
; updfr_off <- getUpdFrameOff
; case sequel of
Return _ ->
do { adjustHpBackwards
- ; emit (mkReturnSimple results updfr_off) }
+ ; emit (mkReturnSimple dflags results updfr_off) }
AssignTo regs adjust ->
do { if adjust then adjustHpBackwards else return ()
; emitMultiAssign regs results }
@@ -109,18 +110,19 @@ emitCallWithExtraStack
:: (Convention, Convention) -> CmmExpr -> [CmmExpr]
-> (ByteOff, [(CmmExpr,ByteOff)]) -> FCode ReturnKind
emitCallWithExtraStack (callConv, retConv) fun args extra_stack
- = do { adjustHpBackwards
+ = do { dflags <- getDynFlags
+ ; adjustHpBackwards
; sequel <- getSequel
; updfr_off <- getUpdFrameOff
; case sequel of
Return _ -> do
- emit $ mkForeignJumpExtra callConv fun args updfr_off extra_stack
+ emit $ mkForeignJumpExtra dflags callConv fun args updfr_off extra_stack
return AssignedDirectly
AssignTo res_regs _ -> do
k <- newLabelC
let area = Young k
- (off, copyin) = copyInOflow retConv area res_regs
- copyout = mkCallReturnsTo fun callConv args k off updfr_off
+ (off, copyin) = copyInOflow dflags retConv area res_regs
+ copyout = mkCallReturnsTo dflags fun callConv args k off updfr_off
extra_stack
emit (copyout <*> mkLabel k <*> copyin)
return (ReturnedTo k off)
@@ -537,7 +539,7 @@ emitClosureProcAndInfoTable top_lvl bndr lf_info info_tbl args body
; let args' = if node_points then (node : arg_regs) else arg_regs
conv = if nodeMustPointToIt dflags lf_info then NativeNodeCall
else NativeDirectCall
- (offset, _) = mkCallEntry conv args'
+ (offset, _) = mkCallEntry dflags conv args'
; emitClosureAndInfoTable info_tbl conv args' $ body (offset, node, arg_regs)
}
diff --git a/compiler/codeGen/StgCmmMonad.hs b/compiler/codeGen/StgCmmMonad.hs
index 3d34cb9bdd..1819e44bb6 100644
--- a/compiler/codeGen/StgCmmMonad.hs
+++ b/compiler/codeGen/StgCmmMonad.hs
@@ -725,8 +725,9 @@ emitOutOfLine l stmts = emitCgStmt (CgFork l stmts)
emitProcWithConvention :: Convention -> Maybe CmmInfoTable -> CLabel
-> [CmmFormal] -> CmmAGraph -> FCode ()
emitProcWithConvention conv mb_info lbl args blocks
- = do { us <- newUniqSupply
- ; let (offset, entry) = mkCallEntry conv args
+ = do { dflags <- getDynFlags
+ ; us <- newUniqSupply
+ ; let (offset, entry) = mkCallEntry dflags conv args
blks = initUs_ us $ lgraphOfAGraph $ entry <*> blocks
; let sinfo = StackInfo {arg_space = offset, updfr_space = Just initUpdFrameOff}
tinfo = TopInfo {info_tbls = infos, stack_info=sinfo}
@@ -783,10 +784,11 @@ mkCmmIfThen e tbranch = do
mkCall :: CmmExpr -> (Convention, Convention) -> [CmmFormal] -> [CmmActual]
-> UpdFrameOffset -> (ByteOff,[(CmmExpr,ByteOff)]) -> FCode CmmAGraph
mkCall f (callConv, retConv) results actuals updfr_off extra_stack = do
+ dflags <- getDynFlags
k <- newLabelC
let area = Young k
- (off, copyin) = copyInOflow retConv area results
- copyout = mkCallReturnsTo f callConv actuals k off updfr_off extra_stack
+ (off, copyin) = copyInOflow dflags retConv area results
+ copyout = mkCallReturnsTo dflags f callConv actuals k off updfr_off extra_stack
return (copyout <*> mkLabel k <*> copyin)
mkCmmCall :: CmmExpr -> [CmmFormal] -> [CmmActual] -> UpdFrameOffset
diff --git a/compiler/ghc.mk b/compiler/ghc.mk
index de191eaa10..be2b631617 100644
--- a/compiler/ghc.mk
+++ b/compiler/ghc.mk
@@ -91,8 +91,6 @@ endif
@echo 'cGhcWithSMP = "$(GhcWithSMP)"' >> $@
@echo 'cGhcRTSWays :: String' >> $@
@echo 'cGhcRTSWays = "$(GhcRTSWays)"' >> $@
- @echo 'cGhcUnregisterised :: String' >> $@
- @echo 'cGhcUnregisterised = "$(GhcUnregisterised)"' >> $@
@echo 'cGhcEnableTablesNextToCode :: String' >> $@
@echo 'cGhcEnableTablesNextToCode = "$(GhcEnableTablesNextToCode)"' >> $@
@echo 'cLeadingUnderscore :: String' >> $@
diff --git a/compiler/iface/BinIface.hs b/compiler/iface/BinIface.hs
index 9298a0381a..965b1a96c3 100644
--- a/compiler/iface/BinIface.hs
+++ b/compiler/iface/BinIface.hs
@@ -44,7 +44,6 @@ import Panic
import Binary
import SrcLoc
import ErrUtils
-import Config
import FastMutInt
import Unique
import Outputable
@@ -572,8 +571,8 @@ instance Binary ModIface where
getWayDescr :: DynFlags -> String
getWayDescr dflags
- | cGhcUnregisterised == "YES" = 'u':tag
- | otherwise = tag
+ | platformUnregisterised (targetPlatform dflags) = 'u':tag
+ | otherwise = tag
where tag = buildTag dflags
-- if this is an unregisterised build, make sure our interfaces
-- can't be used by a registerised build.
diff --git a/compiler/llvmGen/LlvmCodeGen.hs b/compiler/llvmGen/LlvmCodeGen.hs
index a4c48058bb..a813433f64 100644
--- a/compiler/llvmGen/LlvmCodeGen.hs
+++ b/compiler/llvmGen/LlvmCodeGen.hs
@@ -45,7 +45,7 @@ llvmCodeGen dflags h us cmms
let lbl = strCLabel_llvm env $ case topInfoTable p of
Nothing -> l
Just (Statics info_lbl _) -> info_lbl
- env' = funInsert lbl llvmFunTy e
+ env' = funInsert lbl (llvmFunTy dflags) e
in (d,env')
in do
showPass dflags "LlVM CodeGen"
diff --git a/compiler/llvmGen/LlvmCodeGen/Base.hs b/compiler/llvmGen/LlvmCodeGen/Base.hs
index 19ca511f16..a9dfebb868 100644
--- a/compiler/llvmGen/LlvmCodeGen/Base.hs
+++ b/compiler/llvmGen/LlvmCodeGen/Base.hs
@@ -31,7 +31,6 @@ import LlvmCodeGen.Regs
import CLabel
import CgUtils ( activeStgRegs )
-import Config
import Constants
import DynFlags
import FastString
@@ -84,23 +83,25 @@ widthToLlvmInt :: Width -> LlvmType
widthToLlvmInt w = LMInt $ widthInBits w
-- | GHC Call Convention for LLVM
-llvmGhcCC :: LlvmCallConvention
-llvmGhcCC | cGhcUnregisterised == "NO" = CC_Ncc 10
- | otherwise = CC_Ccc
+llvmGhcCC :: DynFlags -> LlvmCallConvention
+llvmGhcCC dflags
+ | platformUnregisterised (targetPlatform dflags) = CC_Ncc 10
+ | otherwise = CC_Ccc
-- | Llvm Function type for Cmm function
-llvmFunTy :: LlvmType
-llvmFunTy = LMFunction $ llvmFunSig' (fsLit "a") ExternallyVisible
+llvmFunTy :: DynFlags -> LlvmType
+llvmFunTy dflags = LMFunction $ llvmFunSig' dflags (fsLit "a") ExternallyVisible
-- | Llvm Function signature
llvmFunSig :: LlvmEnv -> CLabel -> LlvmLinkageType -> LlvmFunctionDecl
-llvmFunSig env lbl link = llvmFunSig' (strCLabel_llvm env lbl) link
+llvmFunSig env lbl link
+ = llvmFunSig' (getDflags env) (strCLabel_llvm env lbl) link
-llvmFunSig' :: LMString -> LlvmLinkageType -> LlvmFunctionDecl
-llvmFunSig' lbl link
+llvmFunSig' :: DynFlags -> LMString -> LlvmLinkageType -> LlvmFunctionDecl
+llvmFunSig' dflags lbl link
= let toParams x | isPointer x = (x, [NoAlias, NoCapture])
| otherwise = (x, [])
- in LlvmFunctionDecl lbl link llvmGhcCC LMVoid FixedArgs
+ in LlvmFunctionDecl lbl link (llvmGhcCC dflags) LMVoid FixedArgs
(map (toParams . getVarType) llvmFunArgs) llvmFunAlign
-- | Create a Haskell function in LLVM.
diff --git a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
index 2a2104dac1..0bd1bb70ce 100644
--- a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
+++ b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs
@@ -516,7 +516,7 @@ genJump env (CmmLit (CmmLabel lbl)) live = do
-- Call to unknown function / address
genJump env expr live = do
- let fty = llvmFunTy
+ let fty = llvmFunTy (getDflags env)
(env', vf, stmts, top) <- exprToVar env expr
let cast = case getVarType vf of
@@ -1293,7 +1293,8 @@ trashStmts = concatOL $ map trashReg activeStgRegs
-- with foreign functions.
getHsFunc :: LlvmEnv -> CLabel -> UniqSM ExprData
getHsFunc env lbl
- = let fn = strCLabel_llvm env lbl
+ = let dflags = getDflags env
+ fn = strCLabel_llvm env lbl
ty = funLookup fn env
in case ty of
-- Function in module in right form
@@ -1305,8 +1306,8 @@ getHsFunc env lbl
Just ty' -> do
let fun = LMGlobalVar fn (pLift ty') ExternallyVisible
Nothing Nothing False
- (v1, s1) <- doExpr (pLift llvmFunTy) $
- Cast LM_Bitcast fun (pLift llvmFunTy)
+ (v1, s1) <- doExpr (pLift (llvmFunTy dflags)) $
+ Cast LM_Bitcast fun (pLift (llvmFunTy dflags))
return (env, v1, unitOL s1, [])
-- label not in module, create external reference
diff --git a/compiler/main/DynFlags.hs b/compiler/main/DynFlags.hs
index c528402b7a..8abe664aa0 100644
--- a/compiler/main/DynFlags.hs
+++ b/compiler/main/DynFlags.hs
@@ -46,7 +46,7 @@ module DynFlags (
DynLibLoader(..),
fFlags, fWarningFlags, fLangFlags, xFlags,
wayNames, dynFlagDependencies,
- tablesNextToCode,
+ tablesNextToCode, mkTablesNextToCode,
printOutputForUser, printInfoForUser,
@@ -871,25 +871,28 @@ data PackageFlag
| DistrustPackage String
deriving Eq
-defaultHscTarget :: HscTarget
+defaultHscTarget :: Platform -> HscTarget
defaultHscTarget = defaultObjectTarget
-- | The 'HscTarget' value corresponding to the default way to create
-- object files on the current platform.
-defaultObjectTarget :: HscTarget
-defaultObjectTarget
- | cGhcUnregisterised == "YES" = HscC
+defaultObjectTarget :: Platform -> HscTarget
+defaultObjectTarget platform
+ | platformUnregisterised platform = HscC
| cGhcWithNativeCodeGen == "YES" = HscAsm
| otherwise = HscLlvm
--- Derived, not a real option. Determines whether we will be compiling
+tablesNextToCode :: DynFlags -> Bool
+tablesNextToCode dflags
+ = mkTablesNextToCode (platformUnregisterised (targetPlatform dflags))
+
+-- Determines whether we will be compiling
-- info tables that reside just before the entry code, or with an
-- indirection to the entry code. See TABLES_NEXT_TO_CODE in
-- includes/rts/storage/InfoTables.h.
-tablesNextToCode :: DynFlags -> Bool
-tablesNextToCode _ = not opt_Unregisterised
- && cGhcEnableTablesNextToCode == "YES"
-
+mkTablesNextToCode :: Bool -> Bool
+mkTablesNextToCode unregisterised
+ = not unregisterised && cGhcEnableTablesNextToCode == "YES"
data DynLibLoader
= Deployable
@@ -925,7 +928,7 @@ defaultDynFlags mySettings =
DynFlags {
ghcMode = CompManager,
ghcLink = LinkBinary,
- hscTarget = defaultHscTarget,
+ hscTarget = defaultHscTarget (sTargetPlatform mySettings),
hscOutName = "",
extCoreName = "",
verbosity = 0,
@@ -1866,7 +1869,7 @@ dynamic_flags = [
, Flag "fno-code" (NoArg (do upd $ \d -> d{ ghcLink=NoLink }
setTarget HscNothing))
, Flag "fbyte-code" (NoArg (setTarget HscInterpreted))
- , Flag "fobject-code" (NoArg (setTarget defaultHscTarget))
+ , Flag "fobject-code" (NoArg (setTargetWithPlatform defaultHscTarget))
, Flag "fglasgow-exts" (NoArg (enableGlasgowExts >> deprecate "Use individual extensions instead"))
, Flag "fno-glasgow-exts" (NoArg (disableGlasgowExts >> deprecate "Use individual extensions instead"))
@@ -2637,11 +2640,15 @@ setPackageName p s = s{ thisPackage = stringToPackageId p }
-- If we're linking a binary, then only targets that produce object
-- code are allowed (requests for other target types are ignored).
setTarget :: HscTarget -> DynP ()
-setTarget l = upd set
+setTarget l = setTargetWithPlatform (const l)
+
+setTargetWithPlatform :: (Platform -> HscTarget) -> DynP ()
+setTargetWithPlatform f = upd set
where
- set dfs
- | ghcLink dfs /= LinkBinary || isObjectTarget l = dfs{ hscTarget = l }
- | otherwise = dfs
+ set dfs = let l = f (targetPlatform dfs)
+ in if ghcLink dfs /= LinkBinary || isObjectTarget l
+ then dfs{ hscTarget = l }
+ else dfs
-- Changes the target only if we're compiling object code. This is
-- used by -fasm and -fllvm, which switch from one to the other, but
@@ -2654,7 +2661,7 @@ setObjTarget l = updM set
| isObjectTarget (hscTarget dflags)
= case l of
HscC
- | cGhcUnregisterised /= "YES" ->
+ | platformUnregisterised (targetPlatform dflags) ->
do addWarn ("Compiler not unregisterised, so ignoring " ++ flag)
return dflags
HscAsm
@@ -2679,7 +2686,7 @@ setFPIC :: DynP ()
setFPIC = updM set
where
set dflags
- | cGhcWithNativeCodeGen == "YES" || cGhcUnregisterised == "YES"
+ | cGhcWithNativeCodeGen == "YES" || platformUnregisterised (targetPlatform dflags)
= let platform = targetPlatform dflags
in case hscTarget dflags of
HscLlvm
@@ -2913,7 +2920,6 @@ compilerInfo dflags
("Object splitting supported", cSupportsSplitObjs),
("Have native code generator", cGhcWithNativeCodeGen),
("Support SMP", cGhcWithSMP),
- ("Unregisterised", cGhcUnregisterised),
("Tables next to code", cGhcEnableTablesNextToCode),
("RTS ways", cGhcRTSWays),
("Leading underscore", cLeadingUnderscore),
diff --git a/compiler/main/StaticFlagParser.hs b/compiler/main/StaticFlagParser.hs
index adda6f1505..2b7f95a910 100644
--- a/compiler/main/StaticFlagParser.hs
+++ b/compiler/main/StaticFlagParser.hs
@@ -21,7 +21,6 @@ import qualified StaticFlags as SF
import StaticFlags ( v_opt_C_ready, getWayFlags, WayName(..)
, opt_SimplExcessPrecision )
import CmdLineParser
-import Config
import SrcLoc
import Util
import Panic
@@ -69,14 +68,9 @@ parseStaticFlagsFull flagsAvailable args = do
way_flags <- getWayFlags
let way_flags' = map (mkGeneralLocated "in way flags") way_flags
- -- if we're unregisterised, add some more flags
- let unreg_flags | cGhcUnregisterised == "YES" = unregFlags
- | otherwise = []
-
-- as these are GHC generated flags, we parse them with all static flags
-- in scope, regardless of what availableFlags are passed in.
- (more_leftover, errs, warns2) <-
- processArgs flagsStatic (unreg_flags ++ way_flags')
+ (more_leftover, errs, warns2) <- processArgs flagsStatic way_flags'
-- see sanity code in staticOpts
writeIORef v_opt_C_ready True
@@ -174,7 +168,6 @@ isStaticFlag f =
"fexcess-precision",
"static",
"fhardwire-lib-paths",
- "funregisterised",
"fcpr-off",
"ferror-spans",
"fhpc"
@@ -190,12 +183,6 @@ isStaticFlag f =
"funfolding-keeness-factor"
]
-unregFlags :: [Located String]
-unregFlags = map (mkGeneralLocated "in unregFlags")
- [ "-optc-DNO_REGS"
- , "-optc-DUSE_MINIINTERPRETER"
- , "-funregisterised" ]
-
-----------------------------------------------------------------------------
-- convert sizes like "3.5M" into integers
diff --git a/compiler/main/StaticFlags.hs b/compiler/main/StaticFlags.hs
index f19497cb94..2334940492 100644
--- a/compiler/main/StaticFlags.hs
+++ b/compiler/main/StaticFlags.hs
@@ -72,7 +72,6 @@ module StaticFlags (
-- misc opts
opt_ErrorSpans,
opt_HistorySize,
- opt_Unregisterised,
v_Ld_inputs,
opt_StubDeadValues,
opt_Ticky,
@@ -309,8 +308,6 @@ opt_UF_DearOp = ( 40 :: Int)
-- Related to linking
opt_Static :: Bool
opt_Static = lookUp (fsLit "-static")
-opt_Unregisterised :: Bool
-opt_Unregisterised = lookUp (fsLit "-funregisterised")
-- Include full span info in error messages, instead of just the start position.
opt_ErrorSpans :: Bool
diff --git a/compiler/main/SysTools.lhs b/compiler/main/SysTools.lhs
index 295aa595e1..7d905d35c6 100644
--- a/compiler/main/SysTools.lhs
+++ b/compiler/main/SysTools.lhs
@@ -51,7 +51,6 @@ import Platform
import Util
import DynFlags
import Exception
-import StaticFlags
import Data.IORef
import Control.Monad
@@ -207,6 +206,7 @@ initSysTools mbMinusB
targetArch <- readSetting "target arch"
targetOS <- readSetting "target os"
targetWordSize <- readSetting "target word size"
+ targetUnregisterised <- getBooleanSetting "Unregisterised"
targetHasGnuNonexecStack <- readSetting "target has GNU nonexec stack"
targetHasIdentDirective <- readSetting "target has .ident directive"
targetHasSubsectionsViaSymbols <- readSetting "target has subsections via symbols"
@@ -218,12 +218,17 @@ initSysTools mbMinusB
-- to make that possible, so for now you can't.
gcc_prog <- getSetting "C compiler command"
gcc_args_str <- getSetting "C compiler flags"
- let
+ let unreg_gcc_args = if targetUnregisterised
+ then ["-DNO_REGS", "-DUSE_MINIINTERPRETER"]
+ else []
-- TABLES_NEXT_TO_CODE affects the info table layout.
tntc_gcc_args
- | tablesNextToCode' = ["-DTABLES_NEXT_TO_CODE"]
- | otherwise = []
- gcc_args = map Option (words gcc_args_str ++ tntc_gcc_args)
+ | mkTablesNextToCode targetUnregisterised
+ = ["-DTABLES_NEXT_TO_CODE"]
+ | otherwise = []
+ gcc_args = map Option (words gcc_args_str
+ ++ unreg_gcc_args
+ ++ tntc_gcc_args)
ldSupportsCompactUnwind <- getBooleanSetting "ld supports compact unwind"
ldSupportsBuildId <- getBooleanSetting "ld supports build-id"
ldIsGnuLd <- getBooleanSetting "ld is GNU ld"
@@ -280,6 +285,7 @@ initSysTools mbMinusB
platformArch = targetArch,
platformOS = targetOS,
platformWordSize = targetWordSize,
+ platformUnregisterised = targetUnregisterised,
platformHasGnuNonexecStack = targetHasGnuNonexecStack,
platformHasIdentDirective = targetHasIdentDirective,
platformHasSubsectionsViaSymbols = targetHasSubsectionsViaSymbols
@@ -322,14 +328,6 @@ initSysTools mbMinusB
sOpt_lo = [],
sOpt_lc = []
}
-
--- Derived, not a real option. Determines whether we will be compiling
--- info tables that reside just before the entry code, or with an
--- indirection to the entry code. See TABLES_NEXT_TO_CODE in
--- includes/rts/storage/InfoTables.h.
-tablesNextToCode' :: Bool
-tablesNextToCode' = not opt_Unregisterised
- && cGhcEnableTablesNextToCode == "YES"
\end{code}
\begin{code}
diff --git a/compiler/utils/Platform.hs b/compiler/utils/Platform.hs
index 76bb38686f..b53ece9182 100644
--- a/compiler/utils/Platform.hs
+++ b/compiler/utils/Platform.hs
@@ -24,6 +24,7 @@ data Platform
-- Word size in bytes (i.e. normally 4 or 8,
-- for 32bit and 64bit platforms respectively)
platformWordSize :: {-# UNPACK #-} !Int,
+ platformUnregisterised :: Bool,
platformHasGnuNonexecStack :: Bool,
platformHasIdentDirective :: Bool,
platformHasSubsectionsViaSymbols :: Bool
diff --git a/configure.ac b/configure.ac
index 397e8a5de0..4ca64f2714 100644
--- a/configure.ac
+++ b/configure.ac
@@ -260,6 +260,29 @@ esac
AC_SUBST(SOLARIS_BROKEN_SHLD)
+dnl ** Do an unregisterised build?
+dnl --------------------------------------------------------------
+case "$HostArch" in
+ i386|x86_64|powerpc|arm)
+ UnregisterisedDefault=NO
+ ;;
+ *)
+ UnregisterisedDefault=YES
+ ;;
+esac
+AC_ARG_ENABLE(unregisterised,
+[AC_HELP_STRING([--enable-unregisterised],
+[Build an unregisterised compiler (enabled by default on platforms without registerised support) [default="$UnregisterisedDefault"]])],
+[ if test x"$enableval" = x"yes"; then
+ Unregisterised=YES
+ else
+ Unregisterised=NO
+ fi
+],
+[Unregisterised="$UnregisterisedDefault"]
+)
+AC_SUBST(Unregisterised)
+
AC_ARG_WITH(hc,
[AC_HELP_STRING([--with-hc=ARG],
[Use ARG as the path to the compiler for compiling ordinary
diff --git a/ghc/Main.hs b/ghc/Main.hs
index a53912c926..1e9d0a2a96 100644
--- a/ghc/Main.hs
+++ b/ghc/Main.hs
@@ -558,12 +558,12 @@ mode_flags =
]
setGenerateC :: String -> EwM ModeM ()
-setGenerateC f
- | cGhcUnregisterised /= "YES" = do
- addWarn ("Compiler not unregisterised, so ignoring " ++ f)
- | otherwise = do
- setMode (stopBeforeMode HCc) f
- addFlag "-fvia-C" f
+setGenerateC f = do -- TODO: We used to warn and ignore when
+ -- unregisterised, but we no longer know whether
+ -- we are unregisterised at this point. Should
+ -- we check later on?
+ setMode (stopBeforeMode HCc) f
+ addFlag "-fvia-C" f
setMode :: Mode -> String -> EwM ModeM ()
setMode newMode newFlag = liftEwM $ do
diff --git a/mk/build.mk.sample b/mk/build.mk.sample
index e979f3990e..83ff4b7c48 100644
--- a/mk/build.mk.sample
+++ b/mk/build.mk.sample
@@ -30,9 +30,6 @@
# A development build, working on the stage 2 compiler:
#BuildFlavour = devel2
-# An unregisterised, optimised build of ghc, for porting:
-#BuildFlavour = unreg
-
GhcLibWays = v
# -------- 1. A Performance/Distribution build--------------------------------
@@ -167,30 +164,6 @@ LAX_DEPENDENCIES = YES
endif
-# -------- A Unregisterised build) -------------------------------------------
-
-ifeq "$(BuildFlavour)" "unreg"
-
-# Note that the LLVM backend works in unregisterised mode as well as
-# registerised mode. This often makes it a good choice for porting
-# GHC.
-
-GhcUnregisterised = YES
-GhcWithNativeCodeGen = NO
-
-SRC_HC_OPTS = -O -H64m # -fllvm
-GhcStage1HcOpts = -O
-GhcStage2HcOpts = -O2
-GhcHcOpts = -Rghc-timing
-GhcLibHcOpts = -O2
-SplitObjs = NO
-HADDOCK_DOCS = NO
-BUILD_DOCBOOK_HTML = NO
-BUILD_DOCBOOK_PS = NO
-BUILD_DOCBOOK_PDF = NO
-
-endif
-
# -----------------------------------------------------------------------------
# Other settings that might be useful
diff --git a/mk/config.mk.in b/mk/config.mk.in
index 769bce8353..806d53df6d 100644
--- a/mk/config.mk.in
+++ b/mk/config.mk.in
@@ -139,11 +139,7 @@ PlatformSupportsSharedLibs = $(if $(filter $(TARGETPLATFORM),\
# the compiler you build with is generating registerised binaries), but
# the stage2 compiler will be an unregisterised binary.
#
-ifneq "$(findstring $(TargetArch_CPP), i386 x86_64 powerpc arm)" ""
-GhcUnregisterised=NO
-else
-GhcUnregisterised=YES
-endif
+GhcUnregisterised=@Unregisterised@
# Build a compiler with a native code generator backend
# (as well as a C backend)
@@ -154,8 +150,7 @@ endif
ArchSupportsNCG=$(strip $(patsubst $(TargetArch_CPP), YES, $(findstring $(TargetArch_CPP), i386 x86_64 powerpc sparc)))
OsSupportsNCG=$(strip $(patsubst $(TargetOS_CPP), YES, $(patsubst aix,,$(TargetOS_CPP))))
-# lazy test, because $(GhcUnregisterised) might be set in build.mk later.
-GhcWithNativeCodeGen=$(strip\
+GhcWithNativeCodeGen := $(strip\
$(if $(filter YESYESNO,\
$(OsSupportsNCG)$(ArchSupportsNCG)$(GhcUnregisterised)),YES,NO))
@@ -165,8 +160,7 @@ HaveLibDL = @HaveLibDL@
# includes/stg/SMP.h
ArchSupportsSMP=$(strip $(patsubst $(TargetArch_CPP), YES, $(findstring $(TargetArch_CPP), i386 x86_64 sparc powerpc arm)))
-# lazy test, because $(GhcUnregisterised) might be set in build.mk later.
-GhcWithSMP=$(strip $(if $(filter YESNO, $(ArchSupportsSMP)$(GhcUnregisterised)),YES,NO))
+GhcWithSMP := $(strip $(if $(filter YESNO, $(ArchSupportsSMP)$(GhcUnregisterised)),YES,NO))
# Whether to include GHCi in the compiler. Depends on whether the RTS linker
# has support for this OS/ARCH combination.
@@ -304,8 +298,7 @@ ArchSupportsSplitObjs=$(strip $(if $(filter $(TargetArch_CPP),i386 x86_64 powerp
OsSupportsSplitObjs=$(strip $(if $(filter $(TargetOS_CPP),mingw32 cygwin32 linux darwin solaris2 freebsd dragonfly netbsd openbsd),YES,NO))
SplitObjsBroken = @SplitObjsBroken@
-# lazy test, so that $(GhcUnregisterised) can be set in build.mk
-SupportsSplitObjs=$(strip \
+SupportsSplitObjs := $(strip \
$(if $(and $(filter YES,$(ArchSupportsSplitObjs)),\
$(filter YES,$(OsSupportsSplitObjs)),\
$(filter NO,$(SplitObjsBroken)),\
diff --git a/settings.in b/settings.in
index dbf15fd501..12583698a2 100644
--- a/settings.in
+++ b/settings.in
@@ -18,6 +18,7 @@
("target has GNU nonexec stack", "@HaskellHaveGnuNonexecStack@"),
("target has .ident directive", "@HaskellHaveIdentDirective@"),
("target has subsections via symbols", "@HaskellHaveSubsectionsViaSymbols@"),
+ ("Unregisterised", "@Unregisterised@"),
("LLVM llc command", "@SettingsLlcCommand@"),
("LLVM opt command", "@SettingsOptCommand@")
]