summaryrefslogtreecommitdiff
path: root/compiler/nativeGen/SPARC/CodeGen.hs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/nativeGen/SPARC/CodeGen.hs')
-rw-r--r--compiler/nativeGen/SPARC/CodeGen.hs1545
1 files changed, 1545 insertions, 0 deletions
diff --git a/compiler/nativeGen/SPARC/CodeGen.hs b/compiler/nativeGen/SPARC/CodeGen.hs
new file mode 100644
index 0000000000..d921c12e7a
--- /dev/null
+++ b/compiler/nativeGen/SPARC/CodeGen.hs
@@ -0,0 +1,1545 @@
+{-# OPTIONS -w #-}
+-----------------------------------------------------------------------------
+--
+-- Generating machine code (instruction selection)
+--
+-- (c) The University of Glasgow 1996-2004
+--
+-----------------------------------------------------------------------------
+
+module SPARC.CodeGen (
+ cmmTopCodeGen,
+ InstrBlock
+)
+
+where
+
+#include "HsVersions.h"
+#include "nativeGen/NCG.h"
+#include "MachDeps.h"
+
+-- NCG stuff:
+import SPARC.Instr
+import SPARC.Cond
+import SPARC.Regs
+import SPARC.RegInfo
+import Instruction
+import Size
+import Reg
+import PIC
+import NCGMonad
+
+-- Our intermediate code:
+import BlockId
+import Cmm
+import CLabel
+
+-- The rest:
+import BasicTypes
+import StaticFlags ( opt_PIC )
+import OrdList
+import qualified Outputable as O
+import Outputable
+import FastString
+
+import Control.Monad ( mapAndUnzipM )
+import Data.Int
+import DynFlags
+
+-- | Top level code generation
+cmmTopCodeGen
+ :: DynFlags
+ -> RawCmmTop
+ -> NatM [NatCmmTop Instr]
+
+cmmTopCodeGen _
+ (CmmProc info lab params (ListGraph blocks))
+ = do
+ (nat_blocks,statics) <- mapAndUnzipM basicBlockCodeGen blocks
+
+-- picBaseMb <- getPicBaseMaybeNat
+ let proc = CmmProc info lab params (ListGraph $ concat nat_blocks)
+ let tops = proc : concat statics
+
+-- case picBaseMb of
+-- Just picBase -> initializePicBase picBase tops
+-- Nothing -> return tops
+
+ return tops
+
+
+cmmTopCodeGen _ (CmmData sec dat) = do
+ return [CmmData sec dat] -- no translation, we just use CmmStatic
+
+
+
+basicBlockCodeGen
+ :: CmmBasicBlock
+ -> NatM ( [NatBasicBlock Instr]
+ , [NatCmmTop Instr])
+
+basicBlockCodeGen (BasicBlock id stmts) = do
+ instrs <- stmtsToInstrs stmts
+ -- code generation may introduce new basic block boundaries, which
+ -- are indicated by the NEWBLOCK instruction. We must split up the
+ -- instruction stream into basic blocks again. Also, we extract
+ -- LDATAs here too.
+ let
+ (top,other_blocks,statics) = foldrOL mkBlocks ([],[],[]) instrs
+
+ mkBlocks (NEWBLOCK id) (instrs,blocks,statics)
+ = ([], BasicBlock id instrs : blocks, statics)
+ mkBlocks (LDATA sec dat) (instrs,blocks,statics)
+ = (instrs, blocks, CmmData sec dat:statics)
+ mkBlocks instr (instrs,blocks,statics)
+ = (instr:instrs, blocks, statics)
+ -- in
+ return (BasicBlock id top : other_blocks, statics)
+
+
+stmtsToInstrs :: [CmmStmt] -> NatM InstrBlock
+stmtsToInstrs stmts
+ = do instrss <- mapM stmtToInstrs stmts
+ return (concatOL instrss)
+
+
+stmtToInstrs :: CmmStmt -> NatM InstrBlock
+stmtToInstrs stmt = case stmt of
+ CmmNop -> return nilOL
+ CmmComment s -> return (unitOL (COMMENT s))
+
+ CmmAssign reg src
+ | isFloatType ty -> assignReg_FltCode size reg src
+ | isWord64 ty -> assignReg_I64Code reg src
+ | otherwise -> assignReg_IntCode size reg src
+ where ty = cmmRegType reg
+ size = cmmTypeSize ty
+
+ CmmStore addr src
+ | isFloatType ty -> assignMem_FltCode size addr src
+ | isWord64 ty -> assignMem_I64Code addr src
+ | otherwise -> assignMem_IntCode size addr src
+ where ty = cmmExprType src
+ size = cmmTypeSize ty
+
+ CmmCall target result_regs args _ _
+ -> genCCall target result_regs args
+
+ CmmBranch id -> genBranch id
+ CmmCondBranch arg id -> genCondJump id arg
+ CmmSwitch arg ids -> genSwitch arg ids
+ CmmJump arg _ -> genJump arg
+
+ CmmReturn _
+ -> panic "stmtToInstrs: return statement should have been cps'd away"
+
+
+--------------------------------------------------------------------------------
+-- | 'InstrBlock's are the insn sequences generated by the insn selectors.
+-- They are really trees of insns to facilitate fast appending, where a
+-- left-to-right traversal yields the insns in the correct order.
+--
+type InstrBlock
+ = OrdList Instr
+
+
+-- | Condition codes passed up the tree.
+--
+data CondCode
+ = CondCode Bool Cond InstrBlock
+
+
+-- | a.k.a "Register64"
+-- Reg is the lower 32-bit temporary which contains the result.
+-- Use getHiVRegFromLo to find the other VRegUnique.
+--
+-- Rules of this simplified insn selection game are therefore that
+-- the returned Reg may be modified
+--
+data ChildCode64
+ = ChildCode64
+ InstrBlock
+ Reg
+
+
+-- | Register's passed up the tree. If the stix code forces the register
+-- to live in a pre-decided machine register, it comes out as @Fixed@;
+-- otherwise, it comes out as @Any@, and the parent can decide which
+-- register to put it in.
+--
+data Register
+ = Fixed Size Reg InstrBlock
+ | Any Size (Reg -> InstrBlock)
+
+
+swizzleRegisterRep :: Register -> Size -> Register
+swizzleRegisterRep (Fixed _ reg code) size = Fixed size reg code
+swizzleRegisterRep (Any _ codefn) size = Any size codefn
+
+
+-- | Grab the Reg for a CmmReg
+getRegisterReg :: CmmReg -> Reg
+
+getRegisterReg (CmmLocal (LocalReg u pk))
+ = mkVReg u (cmmTypeSize pk)
+
+getRegisterReg (CmmGlobal mid)
+ = case get_GlobalReg_reg_or_addr mid of
+ Left (RealReg rrno) -> RealReg rrno
+ _other -> pprPanic "getRegisterReg-memory" (ppr $ CmmGlobal mid)
+ -- By this stage, the only MagicIds remaining should be the
+ -- ones which map to a real machine register on this
+ -- platform. Hence ...
+
+
+-- | Memory addressing modes passed up the tree.
+data Amode
+ = Amode AddrMode InstrBlock
+
+{-
+Now, given a tree (the argument to an CmmLoad) that references memory,
+produce a suitable addressing mode.
+
+A Rule of the Game (tm) for Amodes: use of the addr bit must
+immediately follow use of the code part, since the code part puts
+values in registers which the addr then refers to. So you can't put
+anything in between, lest it overwrite some of those registers. If
+you need to do some other computation between the code part and use of
+the addr bit, first store the effective address from the amode in a
+temporary, then do the other computation, and then use the temporary:
+
+ code
+ LEA amode, tmp
+ ... other computation ...
+ ... (tmp) ...
+-}
+
+
+-- | Check whether an integer will fit in 32 bits.
+-- A CmmInt is intended to be truncated to the appropriate
+-- number of bits, so here we truncate it to Int64. This is
+-- important because e.g. -1 as a CmmInt might be either
+-- -1 or 18446744073709551615.
+--
+is32BitInteger :: Integer -> Bool
+is32BitInteger i = i64 <= 0x7fffffff && i64 >= -0x80000000
+ where i64 = fromIntegral i :: Int64
+
+
+-- | Convert a BlockId to some CmmStatic data
+jumpTableEntry :: Maybe BlockId -> CmmStatic
+jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth)
+jumpTableEntry (Just (BlockId id)) = CmmStaticLit (CmmLabel blockLabel)
+ where blockLabel = mkAsmTempLabel id
+
+
+
+
+-- -----------------------------------------------------------------------------
+-- General things for putting together code sequences
+
+-- Expand CmmRegOff. ToDo: should we do it this way around, or convert
+-- CmmExprs into CmmRegOff?
+mangleIndexTree :: CmmExpr -> CmmExpr
+mangleIndexTree (CmmRegOff reg off)
+ = CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
+ where width = typeWidth (cmmRegType reg)
+
+
+assignMem_I64Code :: CmmExpr -> CmmExpr -> NatM InstrBlock
+assignMem_I64Code addrTree valueTree = do
+ Amode _ addr_code <- getAmode addrTree
+ ChildCode64 vcode rlo <- iselExpr64 valueTree
+
+ (src, code) <- getSomeReg addrTree
+ let
+ rhi = getHiVRegFromLo rlo
+ -- Big-endian store
+ mov_hi = ST II32 rhi (AddrRegImm src (ImmInt 0))
+ mov_lo = ST II32 rlo (AddrRegImm src (ImmInt 4))
+
+ return (vcode `appOL` code `snocOL` mov_hi `snocOL` mov_lo)
+
+
+assignReg_I64Code :: CmmReg -> CmmExpr -> NatM InstrBlock
+assignReg_I64Code (CmmLocal (LocalReg u_dst pk)) valueTree = do
+ ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
+ let
+ r_dst_lo = mkVReg u_dst (cmmTypeSize pk)
+ r_dst_hi = getHiVRegFromLo r_dst_lo
+ r_src_hi = getHiVRegFromLo r_src_lo
+ mov_lo = mkMOV r_src_lo r_dst_lo
+ mov_hi = mkMOV r_src_hi r_dst_hi
+ mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
+ return (vcode `snocOL` mov_hi `snocOL` mov_lo)
+assignReg_I64Code lvalue valueTree
+ = panic "assignReg_I64Code(sparc): invalid lvalue"
+
+
+-- Load a 64 bit word
+iselExpr64 (CmmLoad addrTree ty)
+ | isWord64 ty
+ = do Amode amode addr_code <- getAmode addrTree
+ let result
+
+ | AddrRegReg r1 r2 <- amode
+ = do rlo <- getNewRegNat II32
+ tmp <- getNewRegNat II32
+ let rhi = getHiVRegFromLo rlo
+
+ return $ ChildCode64
+ ( addr_code
+ `appOL` toOL
+ [ ADD False False r1 (RIReg r2) tmp
+ , LD II32 (AddrRegImm tmp (ImmInt 0)) rhi
+ , LD II32 (AddrRegImm tmp (ImmInt 4)) rlo ])
+ rlo
+
+ | AddrRegImm r1 (ImmInt i) <- amode
+ = do rlo <- getNewRegNat II32
+ let rhi = getHiVRegFromLo rlo
+
+ return $ ChildCode64
+ ( addr_code
+ `appOL` toOL
+ [ LD II32 (AddrRegImm r1 (ImmInt $ 0 + i)) rhi
+ , LD II32 (AddrRegImm r1 (ImmInt $ 4 + i)) rlo ])
+ rlo
+
+ result
+
+
+-- Add a literal to a 64 bit integer
+iselExpr64 (CmmMachOp (MO_Add _) [e1, CmmLit (CmmInt i _)])
+ = do ChildCode64 code1 r1_lo <- iselExpr64 e1
+ let r1_hi = getHiVRegFromLo r1_lo
+
+ r_dst_lo <- getNewRegNat II32
+ let r_dst_hi = getHiVRegFromLo r_dst_lo
+
+ return $ ChildCode64
+ ( toOL
+ [ ADD False False r1_lo (RIImm (ImmInteger i)) r_dst_lo
+ , ADD True False r1_hi (RIReg g0) r_dst_hi ])
+ r_dst_lo
+
+
+-- Addition of II64
+iselExpr64 (CmmMachOp (MO_Add width) [e1, e2])
+ = do ChildCode64 code1 r1_lo <- iselExpr64 e1
+ let r1_hi = getHiVRegFromLo r1_lo
+
+ ChildCode64 code2 r2_lo <- iselExpr64 e2
+ let r2_hi = getHiVRegFromLo r2_lo
+
+ r_dst_lo <- getNewRegNat II32
+ let r_dst_hi = getHiVRegFromLo r_dst_lo
+
+ let code = code1
+ `appOL` code2
+ `appOL` toOL
+ [ ADD False False r1_lo (RIReg r2_lo) r_dst_lo
+ , ADD True False r1_hi (RIReg r2_hi) r_dst_hi ]
+
+ return $ ChildCode64 code r_dst_lo
+
+
+iselExpr64 (CmmReg (CmmLocal (LocalReg uq ty))) | isWord64 ty = do
+ r_dst_lo <- getNewRegNat II32
+ let r_dst_hi = getHiVRegFromLo r_dst_lo
+ r_src_lo = mkVReg uq II32
+ r_src_hi = getHiVRegFromLo r_src_lo
+ mov_lo = mkMOV r_src_lo r_dst_lo
+ mov_hi = mkMOV r_src_hi r_dst_hi
+ mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
+ return (
+ ChildCode64 (toOL [mov_hi, mov_lo]) r_dst_lo
+ )
+
+-- Convert something into II64
+iselExpr64 (CmmMachOp (MO_UU_Conv _ W64) [expr])
+ = do
+ r_dst_lo <- getNewRegNat II32
+ let r_dst_hi = getHiVRegFromLo r_dst_lo
+
+ -- compute expr and load it into r_dst_lo
+ (a_reg, a_code) <- getSomeReg expr
+
+ let code = a_code
+ `appOL` toOL
+ [ mkRegRegMoveInstr g0 r_dst_hi -- clear high 32 bits
+ , mkRegRegMoveInstr a_reg r_dst_lo ]
+
+ return $ ChildCode64 code r_dst_lo
+
+
+iselExpr64 expr
+ = pprPanic "iselExpr64(sparc)" (ppr expr)
+
+
+-- | The dual to getAnyReg: compute an expression into a register, but
+-- we don't mind which one it is.
+getSomeReg :: CmmExpr -> NatM (Reg, InstrBlock)
+getSomeReg expr = do
+ r <- getRegister expr
+ case r of
+ Any rep code -> do
+ tmp <- getNewRegNat rep
+ return (tmp, code tmp)
+ Fixed _ reg code ->
+ return (reg, code)
+
+
+--
+getRegister :: CmmExpr -> NatM Register
+
+getRegister (CmmReg reg)
+ = return (Fixed (cmmTypeSize (cmmRegType reg))
+ (getRegisterReg reg) nilOL)
+
+getRegister tree@(CmmRegOff _ _)
+ = getRegister (mangleIndexTree tree)
+
+getRegister (CmmMachOp (MO_UU_Conv W64 W32)
+ [CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]]) = do
+ ChildCode64 code rlo <- iselExpr64 x
+ return $ Fixed II32 (getHiVRegFromLo rlo) code
+
+getRegister (CmmMachOp (MO_SS_Conv W64 W32)
+ [CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]]) = do
+ ChildCode64 code rlo <- iselExpr64 x
+ return $ Fixed II32 (getHiVRegFromLo rlo) code
+
+getRegister (CmmMachOp (MO_UU_Conv W64 W32) [x]) = do
+ ChildCode64 code rlo <- iselExpr64 x
+ return $ Fixed II32 rlo code
+
+getRegister (CmmMachOp (MO_SS_Conv W64 W32) [x]) = do
+ ChildCode64 code rlo <- iselExpr64 x
+ return $ Fixed II32 rlo code
+
+
+
+-- Load a literal float into a float register.
+-- The actual literal is stored in a new data area, and we load it
+-- at runtime.
+getRegister (CmmLit (CmmFloat f W32)) = do
+
+ -- a label for the new data area
+ lbl <- getNewLabelNat
+ tmp <- getNewRegNat II32
+
+ let code dst = toOL [
+ -- the data area
+ LDATA ReadOnlyData
+ [CmmDataLabel lbl,
+ CmmStaticLit (CmmFloat f W32)],
+
+ -- load the literal
+ SETHI (HI (ImmCLbl lbl)) tmp,
+ LD II32 (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
+
+ return (Any FF32 code)
+
+getRegister (CmmLit (CmmFloat d W64)) = do
+ lbl <- getNewLabelNat
+ tmp <- getNewRegNat II32
+ let code dst = toOL [
+ LDATA ReadOnlyData
+ [CmmDataLabel lbl,
+ CmmStaticLit (CmmFloat d W64)],
+ SETHI (HI (ImmCLbl lbl)) tmp,
+ LD II64 (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
+ return (Any FF64 code)
+
+getRegister (CmmMachOp mop [x]) -- unary MachOps
+ = case mop of
+ MO_F_Neg W32 -> trivialUFCode FF32 (FNEG FF32) x
+ MO_F_Neg W64 -> trivialUFCode FF64 (FNEG FF64) x
+
+ MO_S_Neg rep -> trivialUCode (intSize rep) (SUB False False g0) x
+ MO_Not rep -> trivialUCode (intSize rep) (XNOR False g0) x
+
+ MO_FF_Conv W64 W32-> coerceDbl2Flt x
+ MO_FF_Conv W32 W64-> coerceFlt2Dbl x
+
+ MO_FS_Conv from to -> coerceFP2Int from to x
+ MO_SF_Conv from to -> coerceInt2FP from to x
+
+ -- Conversions which are a nop on sparc
+ MO_UU_Conv from to
+ | from == to -> conversionNop (intSize to) x
+ MO_UU_Conv W32 W8 -> trivialCode W8 (AND False) x (CmmLit (CmmInt 255 W8))
+ MO_UU_Conv W32 to -> conversionNop (intSize to) x
+ MO_SS_Conv W32 to -> conversionNop (intSize to) x
+
+ MO_UU_Conv W8 to@W32 -> conversionNop (intSize to) x
+ MO_UU_Conv W16 to@W32 -> conversionNop (intSize to) x
+ MO_UU_Conv W8 to@W16 -> conversionNop (intSize to) x
+
+ -- sign extension
+ MO_SS_Conv W8 W32 -> integerExtend W8 W32 x
+ MO_SS_Conv W16 W32 -> integerExtend W16 W32 x
+ MO_SS_Conv W8 W16 -> integerExtend W8 W16 x
+
+ other_op -> panic ("Unknown unary mach op: " ++ show mop)
+ where
+
+ -- | sign extend and widen
+ integerExtend
+ :: Width -- ^ width of source expression
+ -> Width -- ^ width of result
+ -> CmmExpr -- ^ source expression
+ -> NatM Register
+
+ integerExtend from to expr
+ = do -- load the expr into some register
+ (reg, e_code) <- getSomeReg expr
+ tmp <- getNewRegNat II32
+ let bitCount
+ = case (from, to) of
+ (W8, W32) -> 24
+ (W16, W32) -> 16
+ (W8, W16) -> 24
+ let code dst
+ = e_code
+
+ -- local shift word left to load the sign bit
+ `snocOL` SLL reg (RIImm (ImmInt bitCount)) tmp
+
+ -- arithmetic shift right to sign extend
+ `snocOL` SRA tmp (RIImm (ImmInt bitCount)) dst
+
+ return (Any (intSize to) code)
+
+
+ conversionNop new_rep expr
+ = do e_code <- getRegister expr
+ return (swizzleRegisterRep e_code new_rep)
+
+getRegister (CmmMachOp mop [x, y]) -- dyadic PrimOps
+ = case mop of
+ MO_Eq rep -> condIntReg EQQ x y
+ MO_Ne rep -> condIntReg NE x y
+
+ MO_S_Gt rep -> condIntReg GTT x y
+ MO_S_Ge rep -> condIntReg GE x y
+ MO_S_Lt rep -> condIntReg LTT x y
+ MO_S_Le rep -> condIntReg LE x y
+
+ MO_U_Gt W32 -> condIntReg GTT x y
+ MO_U_Ge W32 -> condIntReg GE x y
+ MO_U_Lt W32 -> condIntReg LTT x y
+ MO_U_Le W32 -> condIntReg LE x y
+
+ MO_U_Gt W16 -> condIntReg GU x y
+ MO_U_Ge W16 -> condIntReg GEU x y
+ MO_U_Lt W16 -> condIntReg LU x y
+ MO_U_Le W16 -> condIntReg LEU x y
+
+ MO_Add W32 -> trivialCode W32 (ADD False False) x y
+ MO_Sub W32 -> trivialCode W32 (SUB False False) x y
+
+ MO_S_MulMayOflo rep -> imulMayOflo rep x y
+
+ MO_S_Quot W32 -> idiv True False x y
+ MO_U_Quot W32 -> idiv False False x y
+
+ MO_S_Rem W32 -> irem True x y
+ MO_U_Rem W32 -> irem False x y
+
+ MO_F_Eq w -> condFltReg EQQ x y
+ MO_F_Ne w -> condFltReg NE x y
+
+ MO_F_Gt w -> condFltReg GTT x y
+ MO_F_Ge w -> condFltReg GE x y
+ MO_F_Lt w -> condFltReg LTT x y
+ MO_F_Le w -> condFltReg LE x y
+
+ MO_F_Add w -> trivialFCode w FADD x y
+ MO_F_Sub w -> trivialFCode w FSUB x y
+ MO_F_Mul w -> trivialFCode w FMUL x y
+ MO_F_Quot w -> trivialFCode w FDIV x y
+
+ MO_And rep -> trivialCode rep (AND False) x y
+ MO_Or rep -> trivialCode rep (OR False) x y
+ MO_Xor rep -> trivialCode rep (XOR False) x y
+
+ MO_Mul rep -> trivialCode rep (SMUL False) x y
+
+ MO_Shl rep -> trivialCode rep SLL x y
+ MO_U_Shr rep -> trivialCode rep SRL x y
+ MO_S_Shr rep -> trivialCode rep SRA x y
+
+{-
+ MO_F32_Pwr -> getRegister (StCall (Left (fsLit "pow")) CCallConv FF64
+ [promote x, promote y])
+ where promote x = CmmMachOp MO_F32_to_Dbl [x]
+ MO_F64_Pwr -> getRegister (StCall (Left (fsLit "pow")) CCallConv FF64
+ [x, y])
+-}
+ other -> pprPanic "getRegister(sparc) - binary CmmMachOp (1)" (pprMachOp mop)
+ where
+ -- idiv fn x y = getRegister (StCall (Left fn) CCallConv II32 [x, y])
+
+
+ -- | Generate an integer division instruction.
+ idiv :: Bool -> Bool -> CmmExpr -> CmmExpr -> NatM Register
+
+ -- For unsigned division with a 32 bit numerator,
+ -- we can just clear the Y register.
+ idiv False cc x y = do
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ WRY g0 g0
+ , UDIV cc a_reg (RIReg b_reg) dst]
+
+ return (Any II32 code)
+
+
+ -- For _signed_ division with a 32 bit numerator,
+ -- we have to sign extend the numerator into the Y register.
+ idiv True cc x y = do
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ tmp <- getNewRegNat II32
+
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ SRA a_reg (RIImm (ImmInt 16)) tmp -- sign extend
+ , SRA tmp (RIImm (ImmInt 16)) tmp
+
+ , WRY tmp g0
+ , SDIV cc a_reg (RIReg b_reg) dst]
+
+ return (Any II32 code)
+
+
+ -- | Do an integer remainder.
+ --
+ -- NOTE: The SPARC v8 architecture manual says that integer division
+ -- instructions _may_ generate a remainder, depending on the implementation.
+ -- If so it is _recommended_ that the remainder is placed in the Y register.
+ --
+ -- The UltraSparc 2007 manual says Y is _undefined_ after division.
+ --
+ -- The SPARC T2 doesn't store the remainder, not sure about the others.
+ -- It's probably best not to worry about it, and just generate our own
+ -- remainders.
+ --
+ irem :: Bool -> CmmExpr -> CmmExpr -> NatM Register
+
+ -- For unsigned operands:
+ -- Division is between a 64 bit numerator and a 32 bit denominator,
+ -- so we still have to clear the Y register.
+ irem False x y = do
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ tmp_reg <- getNewRegNat II32
+
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ WRY g0 g0
+ , UDIV False a_reg (RIReg b_reg) tmp_reg
+ , UMUL False tmp_reg (RIReg b_reg) tmp_reg
+ , SUB False False a_reg (RIReg tmp_reg) dst]
+
+ return (Any II32 code)
+
+
+ -- For signed operands:
+ -- Make sure to sign extend into the Y register, or the remainder
+ -- will have the wrong sign when the numerator is negative.
+ --
+ -- TODO: When sign extending, GCC only shifts the a_reg right by 17 bits,
+ -- not the full 32. Not sure why this is, something to do with overflow?
+ -- If anyone cares enough about the speed of signed remainder they
+ -- can work it out themselves (then tell me). -- BL 2009/01/20
+
+ irem True x y = do
+ (a_reg, a_code) <- getSomeReg x
+ (b_reg, b_code) <- getSomeReg y
+
+ tmp1_reg <- getNewRegNat II32
+ tmp2_reg <- getNewRegNat II32
+
+ let code dst
+ = a_code
+ `appOL` b_code
+ `appOL` toOL
+ [ SRA a_reg (RIImm (ImmInt 16)) tmp1_reg -- sign extend
+ , SRA tmp1_reg (RIImm (ImmInt 16)) tmp1_reg -- sign extend
+ , WRY tmp1_reg g0
+
+ , SDIV False a_reg (RIReg b_reg) tmp2_reg
+ , SMUL False tmp2_reg (RIReg b_reg) tmp2_reg
+ , SUB False False a_reg (RIReg tmp2_reg) dst]
+
+ return (Any II32 code)
+
+
+ imulMayOflo :: Width -> CmmExpr -> CmmExpr -> NatM Register
+ imulMayOflo rep a b = do
+ (a_reg, a_code) <- getSomeReg a
+ (b_reg, b_code) <- getSomeReg b
+ res_lo <- getNewRegNat II32
+ res_hi <- getNewRegNat II32
+ let
+ shift_amt = case rep of
+ W32 -> 31
+ W64 -> 63
+ _ -> panic "shift_amt"
+ code dst = a_code `appOL` b_code `appOL`
+ toOL [
+ SMUL False a_reg (RIReg b_reg) res_lo,
+ RDY res_hi,
+ SRA res_lo (RIImm (ImmInt shift_amt)) res_lo,
+ SUB False False res_lo (RIReg res_hi) dst
+ ]
+ return (Any II32 code)
+
+getRegister (CmmLoad mem pk) = do
+ Amode src code <- getAmode mem
+ let
+ code__2 dst = code `snocOL` LD (cmmTypeSize pk) src dst
+ return (Any (cmmTypeSize pk) code__2)
+
+getRegister (CmmLit (CmmInt i _))
+ | fits13Bits i
+ = let
+ src = ImmInt (fromInteger i)
+ code dst = unitOL (OR False g0 (RIImm src) dst)
+ in
+ return (Any II32 code)
+
+getRegister (CmmLit lit)
+ = let rep = cmmLitType lit
+ imm = litToImm lit
+ code dst = toOL [
+ SETHI (HI imm) dst,
+ OR False dst (RIImm (LO imm)) dst]
+ in return (Any II32 code)
+
+
+
+getAmode :: CmmExpr -> NatM Amode
+getAmode tree@(CmmRegOff _ _) = getAmode (mangleIndexTree tree)
+
+getAmode (CmmMachOp (MO_Sub rep) [x, CmmLit (CmmInt i _)])
+ | fits13Bits (-i)
+ = do
+ (reg, code) <- getSomeReg x
+ let
+ off = ImmInt (-(fromInteger i))
+ return (Amode (AddrRegImm reg off) code)
+
+
+getAmode (CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt i _)])
+ | fits13Bits i
+ = do
+ (reg, code) <- getSomeReg x
+ let
+ off = ImmInt (fromInteger i)
+ return (Amode (AddrRegImm reg off) code)
+
+getAmode (CmmMachOp (MO_Add rep) [x, y])
+ = do
+ (regX, codeX) <- getSomeReg x
+ (regY, codeY) <- getSomeReg y
+ let
+ code = codeX `appOL` codeY
+ return (Amode (AddrRegReg regX regY) code)
+
+getAmode (CmmLit lit)
+ = do
+ let imm__2 = litToImm lit
+ tmp1 <- getNewRegNat II32
+ tmp2 <- getNewRegNat II32
+
+ let code = toOL [ SETHI (HI imm__2) tmp1
+ , OR False tmp1 (RIImm (LO imm__2)) tmp2]
+
+ return (Amode (AddrRegReg tmp2 g0) code)
+
+getAmode other
+ = do
+ (reg, code) <- getSomeReg other
+ let
+ off = ImmInt 0
+ return (Amode (AddrRegImm reg off) code)
+
+
+getCondCode :: CmmExpr -> NatM CondCode
+getCondCode (CmmMachOp mop [x, y])
+ =
+ case mop of
+ MO_F_Eq W32 -> condFltCode EQQ x y
+ MO_F_Ne W32 -> condFltCode NE x y
+ MO_F_Gt W32 -> condFltCode GTT x y
+ MO_F_Ge W32 -> condFltCode GE x y
+ MO_F_Lt W32 -> condFltCode LTT x y
+ MO_F_Le W32 -> condFltCode LE x y
+
+ MO_F_Eq W64 -> condFltCode EQQ x y
+ MO_F_Ne W64 -> condFltCode NE x y
+ MO_F_Gt W64 -> condFltCode GTT x y
+ MO_F_Ge W64 -> condFltCode GE x y
+ MO_F_Lt W64 -> condFltCode LTT x y
+ MO_F_Le W64 -> condFltCode LE x y
+
+ MO_Eq rep -> condIntCode EQQ x y
+ MO_Ne rep -> condIntCode NE x y
+
+ MO_S_Gt rep -> condIntCode GTT x y
+ MO_S_Ge rep -> condIntCode GE x y
+ MO_S_Lt rep -> condIntCode LTT x y
+ MO_S_Le rep -> condIntCode LE x y
+
+ MO_U_Gt rep -> condIntCode GU x y
+ MO_U_Ge rep -> condIntCode GEU x y
+ MO_U_Lt rep -> condIntCode LU x y
+ MO_U_Le rep -> condIntCode LEU x y
+
+ other -> pprPanic "getCondCode(x86,x86_64,sparc)" (ppr (CmmMachOp mop [x,y]))
+
+getCondCode other = pprPanic "getCondCode(2)(x86,sparc)" (ppr other)
+
+
+
+
+
+-- @cond(Int|Flt)Code@: Turn a boolean expression into a condition, to be
+-- passed back up the tree.
+
+condIntCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
+condIntCode cond x (CmmLit (CmmInt y rep))
+ | fits13Bits y
+ = do
+ (src1, code) <- getSomeReg x
+ let
+ src2 = ImmInt (fromInteger y)
+ code' = code `snocOL` SUB False True src1 (RIImm src2) g0
+ return (CondCode False cond code')
+
+condIntCode cond x y = do
+ (src1, code1) <- getSomeReg x
+ (src2, code2) <- getSomeReg y
+ let
+ code__2 = code1 `appOL` code2 `snocOL`
+ SUB False True src1 (RIReg src2) g0
+ return (CondCode False cond code__2)
+
+
+condFltCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
+condFltCode cond x y = do
+ (src1, code1) <- getSomeReg x
+ (src2, code2) <- getSomeReg y
+ tmp <- getNewRegNat FF64
+ let
+ promote x = FxTOy FF32 FF64 x tmp
+
+ pk1 = cmmExprType x
+ pk2 = cmmExprType y
+
+ code__2 =
+ if pk1 `cmmEqType` pk2 then
+ code1 `appOL` code2 `snocOL`
+ FCMP True (cmmTypeSize pk1) src1 src2
+ else if typeWidth pk1 == W32 then
+ code1 `snocOL` promote src1 `appOL` code2 `snocOL`
+ FCMP True FF64 tmp src2
+ else
+ code1 `appOL` code2 `snocOL` promote src2 `snocOL`
+ FCMP True FF64 src1 tmp
+ return (CondCode True cond code__2)
+
+
+
+-- -----------------------------------------------------------------------------
+-- Generating assignments
+
+-- Assignments are really at the heart of the whole code generation
+-- business. Almost all top-level nodes of any real importance are
+-- assignments, which correspond to loads, stores, or register
+-- transfers. If we're really lucky, some of the register transfers
+-- will go away, because we can use the destination register to
+-- complete the code generation for the right hand side. This only
+-- fails when the right hand side is forced into a fixed register
+-- (e.g. the result of a call).
+
+assignMem_IntCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
+assignMem_IntCode pk addr src = do
+ (srcReg, code) <- getSomeReg src
+ Amode dstAddr addr_code <- getAmode addr
+ return $ code `appOL` addr_code `snocOL` ST pk srcReg dstAddr
+
+
+assignReg_IntCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
+assignReg_IntCode pk reg src = do
+ r <- getRegister src
+ return $ case r of
+ Any _ code -> code dst
+ Fixed _ freg fcode -> fcode `snocOL` OR False g0 (RIReg freg) dst
+ where
+ dst = getRegisterReg reg
+
+
+
+-- Floating point assignment to memory
+assignMem_FltCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
+assignMem_FltCode pk addr src = do
+ Amode dst__2 code1 <- getAmode addr
+ (src__2, code2) <- getSomeReg src
+ tmp1 <- getNewRegNat pk
+ let
+ pk__2 = cmmExprType src
+ code__2 = code1 `appOL` code2 `appOL`
+ if sizeToWidth pk == typeWidth pk__2
+ then unitOL (ST pk src__2 dst__2)
+ else toOL [ FxTOy (cmmTypeSize pk__2) pk src__2 tmp1
+ , ST pk tmp1 dst__2]
+ return code__2
+
+-- Floating point assignment to a register/temporary
+assignReg_FltCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
+assignReg_FltCode pk dstCmmReg srcCmmExpr = do
+ srcRegister <- getRegister srcCmmExpr
+ let dstReg = getRegisterReg dstCmmReg
+
+ return $ case srcRegister of
+ Any _ code -> code dstReg
+ Fixed _ srcFixedReg srcCode -> srcCode `snocOL` FMOV pk srcFixedReg dstReg
+
+
+
+
+genJump :: CmmExpr{-the branch target-} -> NatM InstrBlock
+
+genJump (CmmLit (CmmLabel lbl))
+ = return (toOL [CALL (Left target) 0 True, NOP])
+ where
+ target = ImmCLbl lbl
+
+genJump tree
+ = do
+ (target, code) <- getSomeReg tree
+ return (code `snocOL` JMP (AddrRegReg target g0) `snocOL` NOP)
+
+-- -----------------------------------------------------------------------------
+-- Unconditional branches
+
+genBranch :: BlockId -> NatM InstrBlock
+genBranch = return . toOL . mkJumpInstr
+
+
+-- -----------------------------------------------------------------------------
+-- Conditional jumps
+
+{-
+Conditional jumps are always to local labels, so we can use branch
+instructions. We peek at the arguments to decide what kind of
+comparison to do.
+
+SPARC: First, we have to ensure that the condition codes are set
+according to the supplied comparison operation. We generate slightly
+different code for floating point comparisons, because a floating
+point operation cannot directly precede a @BF@. We assume the worst
+and fill that slot with a @NOP@.
+
+SPARC: Do not fill the delay slots here; you will confuse the register
+allocator.
+-}
+
+
+genCondJump
+ :: BlockId -- the branch target
+ -> CmmExpr -- the condition on which to branch
+ -> NatM InstrBlock
+
+
+
+genCondJump bid bool = do
+ CondCode is_float cond code <- getCondCode bool
+ return (
+ code `appOL`
+ toOL (
+ if is_float
+ then [NOP, BF cond False bid, NOP]
+ else [BI cond False bid, NOP]
+ )
+ )
+
+
+
+-- -----------------------------------------------------------------------------
+-- Generating C calls
+
+-- Now the biggest nightmare---calls. Most of the nastiness is buried in
+-- @get_arg@, which moves the arguments to the correct registers/stack
+-- locations. Apart from that, the code is easy.
+--
+-- (If applicable) Do not fill the delay slots here; you will confuse the
+-- register allocator.
+
+genCCall
+ :: CmmCallTarget -- function to call
+ -> HintedCmmFormals -- where to put the result
+ -> HintedCmmActuals -- arguments (of mixed type)
+ -> NatM InstrBlock
+
+
+-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+{-
+ The SPARC calling convention is an absolute
+ nightmare. The first 6x32 bits of arguments are mapped into
+ %o0 through %o5, and the remaining arguments are dumped to the
+ stack, beginning at [%sp+92]. (Note that %o6 == %sp.)
+
+ If we have to put args on the stack, move %o6==%sp down by
+ the number of words to go on the stack, to ensure there's enough space.
+
+ According to Fraser and Hanson's lcc book, page 478, fig 17.2,
+ 16 words above the stack pointer is a word for the address of
+ a structure return value. I use this as a temporary location
+ for moving values from float to int regs. Certainly it isn't
+ safe to put anything in the 16 words starting at %sp, since
+ this area can get trashed at any time due to window overflows
+ caused by signal handlers.
+
+ A final complication (if the above isn't enough) is that
+ we can't blithely calculate the arguments one by one into
+ %o0 .. %o5. Consider the following nested calls:
+
+ fff a (fff b c)
+
+ Naive code moves a into %o0, and (fff b c) into %o1. Unfortunately
+ the inner call will itself use %o0, which trashes the value put there
+ in preparation for the outer call. Upshot: we need to calculate the
+ args into temporary regs, and move those to arg regs or onto the
+ stack only immediately prior to the call proper. Sigh.
+
+genCCall
+ :: CmmCallTarget -- function to call
+ -> HintedCmmFormals -- where to put the result
+ -> HintedCmmActuals -- arguments (of mixed type)
+ -> NatM InstrBlock
+
+-}
+
+
+-- On SPARC under TSO (Total Store Ordering), writes earlier in the instruction stream
+-- are guaranteed to take place before writes afterwards (unlike on PowerPC).
+-- Ref: Section 8.4 of the SPARC V9 Architecture manual.
+--
+-- In the SPARC case we don't need a barrier.
+--
+genCCall (CmmPrim (MO_WriteBarrier)) _ _
+ = do return nilOL
+
+genCCall target dest_regs argsAndHints
+ = do
+ -- strip hints from the arg regs
+ let args :: [CmmExpr]
+ args = map hintlessCmm argsAndHints
+
+
+ -- work out the arguments, and assign them to integer regs
+ argcode_and_vregs <- mapM arg_to_int_vregs args
+ let (argcodes, vregss) = unzip argcode_and_vregs
+ let vregs = concat vregss
+
+ let n_argRegs = length allArgRegs
+ let n_argRegs_used = min (length vregs) n_argRegs
+
+
+ -- deal with static vs dynamic call targets
+ callinsns <- case target of
+ CmmCallee (CmmLit (CmmLabel lbl)) conv ->
+ return (unitOL (CALL (Left (litToImm (CmmLabel lbl))) n_argRegs_used False))
+
+ CmmCallee expr conv
+ -> do (dyn_c, [dyn_r]) <- arg_to_int_vregs expr
+ return (dyn_c `snocOL` CALL (Right dyn_r) n_argRegs_used False)
+
+ CmmPrim mop
+ -> do res <- outOfLineFloatOp mop
+ lblOrMopExpr <- case res of
+ Left lbl -> do
+ return (unitOL (CALL (Left (litToImm (CmmLabel lbl))) n_argRegs_used False))
+
+ Right mopExpr -> do
+ (dyn_c, [dyn_r]) <- arg_to_int_vregs mopExpr
+ return (dyn_c `snocOL` CALL (Right dyn_r) n_argRegs_used False)
+
+ return lblOrMopExpr
+
+ let argcode = concatOL argcodes
+
+ let (move_sp_down, move_sp_up)
+ = let diff = length vregs - n_argRegs
+ nn = if odd diff then diff + 1 else diff -- keep 8-byte alignment
+ in if nn <= 0
+ then (nilOL, nilOL)
+ else (unitOL (moveSp (-1*nn)), unitOL (moveSp (1*nn)))
+
+ let transfer_code
+ = toOL (move_final vregs allArgRegs eXTRA_STK_ARGS_HERE)
+
+ return
+ $ argcode `appOL`
+ move_sp_down `appOL`
+ transfer_code `appOL`
+ callinsns `appOL`
+ unitOL NOP `appOL`
+ move_sp_up `appOL`
+ assign_code dest_regs
+
+
+-- | Generate code to calculate an argument, and move it into one
+-- or two integer vregs.
+arg_to_int_vregs :: CmmExpr -> NatM (OrdList Instr, [Reg])
+arg_to_int_vregs arg
+
+ -- If the expr produces a 64 bit int, then we can just use iselExpr64
+ | isWord64 (cmmExprType arg)
+ = do (ChildCode64 code r_lo) <- iselExpr64 arg
+ let r_hi = getHiVRegFromLo r_lo
+ return (code, [r_hi, r_lo])
+
+ | otherwise
+ = do (src, code) <- getSomeReg arg
+ tmp <- getNewRegNat (cmmTypeSize $ cmmExprType arg)
+ let pk = cmmExprType arg
+
+ case cmmTypeSize pk of
+
+ -- Load a 64 bit float return value into two integer regs.
+ FF64 -> do
+ v1 <- getNewRegNat II32
+ v2 <- getNewRegNat II32
+
+ let Just f0_high = fPair f0
+
+ let code2 =
+ code `snocOL`
+ FMOV FF64 src f0 `snocOL`
+ ST FF32 f0 (spRel 16) `snocOL`
+ LD II32 (spRel 16) v1 `snocOL`
+ ST FF32 f0_high (spRel 16) `snocOL`
+ LD II32 (spRel 16) v2
+
+ return (code2, [v1,v2])
+
+ -- Load a 32 bit float return value into an integer reg
+ FF32 -> do
+ v1 <- getNewRegNat II32
+
+ let code2 =
+ code `snocOL`
+ ST FF32 src (spRel 16) `snocOL`
+ LD II32 (spRel 16) v1
+
+ return (code2, [v1])
+
+ -- Move an integer return value into its destination reg.
+ other -> do
+ v1 <- getNewRegNat II32
+
+ let code2 =
+ code `snocOL`
+ OR False g0 (RIReg src) v1
+
+ return (code2, [v1])
+
+
+-- | Move args from the integer vregs into which they have been
+-- marshalled, into %o0 .. %o5, and the rest onto the stack.
+--
+move_final :: [Reg] -> [Reg] -> Int -> [Instr]
+
+-- all args done
+move_final [] _ offset
+ = []
+
+-- out of aregs; move to stack
+move_final (v:vs) [] offset
+ = ST II32 v (spRel offset)
+ : move_final vs [] (offset+1)
+
+-- move into an arg (%o[0..5]) reg
+move_final (v:vs) (a:az) offset
+ = OR False g0 (RIReg v) a
+ : move_final vs az offset
+
+
+-- | Assign results returned from the call into their
+-- desination regs.
+--
+assign_code :: [CmmHinted LocalReg] -> OrdList Instr
+assign_code [] = nilOL
+
+assign_code [CmmHinted dest _hint]
+ = let rep = localRegType dest
+ width = typeWidth rep
+ r_dest = getRegisterReg (CmmLocal dest)
+
+ result
+ | isFloatType rep
+ , W32 <- width
+ = unitOL $ FMOV FF32 (RealReg $ fReg 0) r_dest
+
+ | isFloatType rep
+ , W64 <- width
+ = unitOL $ FMOV FF64 (RealReg $ fReg 0) r_dest
+
+ | not $ isFloatType rep
+ , W32 <- width
+ = unitOL $ mkRegRegMoveInstr (RealReg $ oReg 0) r_dest
+
+ | not $ isFloatType rep
+ , W64 <- width
+ , r_dest_hi <- getHiVRegFromLo r_dest
+ = toOL [ mkRegRegMoveInstr (RealReg $ oReg 0) r_dest_hi
+ , mkRegRegMoveInstr (RealReg $ oReg 1) r_dest]
+ in result
+
+
+-- | Generate a call to implement an out-of-line floating point operation
+outOfLineFloatOp
+ :: CallishMachOp
+ -> NatM (Either CLabel CmmExpr)
+
+outOfLineFloatOp mop
+ = do let functionName
+ = outOfLineFloatOp_table mop
+
+ dflags <- getDynFlagsNat
+ mopExpr <- cmmMakeDynamicReference dflags addImportNat CallReference
+ $ mkForeignLabel functionName Nothing True IsFunction
+
+ let mopLabelOrExpr
+ = case mopExpr of
+ CmmLit (CmmLabel lbl) -> Left lbl
+ _ -> Right mopExpr
+
+ return mopLabelOrExpr
+
+
+-- | Decide what C function to use to implement a CallishMachOp
+--
+outOfLineFloatOp_table
+ :: CallishMachOp
+ -> FastString
+
+outOfLineFloatOp_table mop
+ = case mop of
+ MO_F32_Exp -> fsLit "expf"
+ MO_F32_Log -> fsLit "logf"
+ MO_F32_Sqrt -> fsLit "sqrtf"
+ MO_F32_Pwr -> fsLit "powf"
+
+ MO_F32_Sin -> fsLit "sinf"
+ MO_F32_Cos -> fsLit "cosf"
+ MO_F32_Tan -> fsLit "tanf"
+
+ MO_F32_Asin -> fsLit "asinf"
+ MO_F32_Acos -> fsLit "acosf"
+ MO_F32_Atan -> fsLit "atanf"
+
+ MO_F32_Sinh -> fsLit "sinhf"
+ MO_F32_Cosh -> fsLit "coshf"
+ MO_F32_Tanh -> fsLit "tanhf"
+
+ MO_F64_Exp -> fsLit "exp"
+ MO_F64_Log -> fsLit "log"
+ MO_F64_Sqrt -> fsLit "sqrt"
+ MO_F64_Pwr -> fsLit "pow"
+
+ MO_F64_Sin -> fsLit "sin"
+ MO_F64_Cos -> fsLit "cos"
+ MO_F64_Tan -> fsLit "tan"
+
+ MO_F64_Asin -> fsLit "asin"
+ MO_F64_Acos -> fsLit "acos"
+ MO_F64_Atan -> fsLit "atan"
+
+ MO_F64_Sinh -> fsLit "sinh"
+ MO_F64_Cosh -> fsLit "cosh"
+ MO_F64_Tanh -> fsLit "tanh"
+
+ other -> pprPanic "outOfLineFloatOp(sparc): Unknown callish mach op "
+ (pprCallishMachOp mop)
+
+
+-- -----------------------------------------------------------------------------
+-- Generating a table-branch
+
+genSwitch :: CmmExpr -> [Maybe BlockId] -> NatM InstrBlock
+genSwitch expr ids
+ | opt_PIC
+ = error "MachCodeGen: sparc genSwitch PIC not finished\n"
+
+ | otherwise
+ = do (e_reg, e_code) <- getSomeReg expr
+
+ base_reg <- getNewRegNat II32
+ offset_reg <- getNewRegNat II32
+ dst <- getNewRegNat II32
+
+ label <- getNewLabelNat
+ let jumpTable = map jumpTableEntry ids
+
+ return $ e_code `appOL`
+ toOL
+ -- the jump table
+ [ LDATA ReadOnlyData (CmmDataLabel label : jumpTable)
+
+ -- load base of jump table
+ , SETHI (HI (ImmCLbl label)) base_reg
+ , OR False base_reg (RIImm $ LO $ ImmCLbl label) base_reg
+
+ -- the addrs in the table are 32 bits wide..
+ , SLL e_reg (RIImm $ ImmInt 2) offset_reg
+
+ -- load and jump to the destination
+ , LD II32 (AddrRegReg base_reg offset_reg) dst
+ , JMP_TBL (AddrRegImm dst (ImmInt 0)) [i | Just i <- ids]
+ , NOP ]
+
+
+
+-- -----------------------------------------------------------------------------
+-- 'condIntReg' and 'condFltReg': condition codes into registers
+
+-- Turn those condition codes into integers now (when they appear on
+-- the right hand side of an assignment).
+--
+-- (If applicable) Do not fill the delay slots here; you will confuse the
+-- register allocator.
+
+condIntReg, condFltReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
+
+condIntReg EQQ x (CmmLit (CmmInt 0 d)) = do
+ (src, code) <- getSomeReg x
+ tmp <- getNewRegNat II32
+ let
+ code__2 dst = code `appOL` toOL [
+ SUB False True g0 (RIReg src) g0,
+ SUB True False g0 (RIImm (ImmInt (-1))) dst]
+ return (Any II32 code__2)
+
+condIntReg EQQ x y = do
+ (src1, code1) <- getSomeReg x
+ (src2, code2) <- getSomeReg y
+ tmp1 <- getNewRegNat II32
+ tmp2 <- getNewRegNat II32
+ let
+ code__2 dst = code1 `appOL` code2 `appOL` toOL [
+ XOR False src1 (RIReg src2) dst,
+ SUB False True g0 (RIReg dst) g0,
+ SUB True False g0 (RIImm (ImmInt (-1))) dst]
+ return (Any II32 code__2)
+
+condIntReg NE x (CmmLit (CmmInt 0 d)) = do
+ (src, code) <- getSomeReg x
+ tmp <- getNewRegNat II32
+ let
+ code__2 dst = code `appOL` toOL [
+ SUB False True g0 (RIReg src) g0,
+ ADD True False g0 (RIImm (ImmInt 0)) dst]
+ return (Any II32 code__2)
+
+condIntReg NE x y = do
+ (src1, code1) <- getSomeReg x
+ (src2, code2) <- getSomeReg y
+ tmp1 <- getNewRegNat II32
+ tmp2 <- getNewRegNat II32
+ let
+ code__2 dst = code1 `appOL` code2 `appOL` toOL [
+ XOR False src1 (RIReg src2) dst,
+ SUB False True g0 (RIReg dst) g0,
+ ADD True False g0 (RIImm (ImmInt 0)) dst]
+ return (Any II32 code__2)
+
+condIntReg cond x y = do
+ bid1@(BlockId lbl1) <- getBlockIdNat
+ bid2@(BlockId lbl2) <- getBlockIdNat
+ CondCode _ cond cond_code <- condIntCode cond x y
+ let
+ code__2 dst = cond_code `appOL` toOL [
+ BI cond False bid1, NOP,
+ OR False g0 (RIImm (ImmInt 0)) dst,
+ BI ALWAYS False bid2, NOP,
+ NEWBLOCK bid1,
+ OR False g0 (RIImm (ImmInt 1)) dst,
+ NEWBLOCK bid2]
+ return (Any II32 code__2)
+
+condFltReg cond x y = do
+ bid1@(BlockId lbl1) <- getBlockIdNat
+ bid2@(BlockId lbl2) <- getBlockIdNat
+ CondCode _ cond cond_code <- condFltCode cond x y
+ let
+ code__2 dst = cond_code `appOL` toOL [
+ NOP,
+ BF cond False bid1, NOP,
+ OR False g0 (RIImm (ImmInt 0)) dst,
+ BI ALWAYS False bid2, NOP,
+ NEWBLOCK bid1,
+ OR False g0 (RIImm (ImmInt 1)) dst,
+ NEWBLOCK bid2]
+ return (Any II32 code__2)
+
+
+
+-- -----------------------------------------------------------------------------
+-- 'trivial*Code': deal with trivial instructions
+
+-- Trivial (dyadic: 'trivialCode', floating-point: 'trivialFCode',
+-- unary: 'trivialUCode', unary fl-pt:'trivialUFCode') instructions.
+-- Only look for constants on the right hand side, because that's
+-- where the generic optimizer will have put them.
+
+-- Similarly, for unary instructions, we don't have to worry about
+-- matching an StInt as the argument, because genericOpt will already
+-- have handled the constant-folding.
+
+trivialCode pk instr x (CmmLit (CmmInt y d))
+ | fits13Bits y
+ = do
+ (src1, code) <- getSomeReg x
+ tmp <- getNewRegNat II32
+ let
+ src2 = ImmInt (fromInteger y)
+ code__2 dst = code `snocOL` instr src1 (RIImm src2) dst
+ return (Any II32 code__2)
+
+trivialCode pk instr x y = do
+ (src1, code1) <- getSomeReg x
+ (src2, code2) <- getSomeReg y
+ tmp1 <- getNewRegNat II32
+ tmp2 <- getNewRegNat II32
+ let
+ code__2 dst = code1 `appOL` code2 `snocOL`
+ instr src1 (RIReg src2) dst
+ return (Any II32 code__2)
+
+------------
+trivialFCode pk instr x y = do
+ (src1, code1) <- getSomeReg x
+ (src2, code2) <- getSomeReg y
+ tmp1 <- getNewRegNat (cmmTypeSize $ cmmExprType x)
+ tmp2 <- getNewRegNat (cmmTypeSize $ cmmExprType y)
+ tmp <- getNewRegNat FF64
+ let
+ promote x = FxTOy FF32 FF64 x tmp
+
+ pk1 = cmmExprType x
+ pk2 = cmmExprType y
+
+ code__2 dst =
+ if pk1 `cmmEqType` pk2 then
+ code1 `appOL` code2 `snocOL`
+ instr (floatSize pk) src1 src2 dst
+ else if typeWidth pk1 == W32 then
+ code1 `snocOL` promote src1 `appOL` code2 `snocOL`
+ instr FF64 tmp src2 dst
+ else
+ code1 `appOL` code2 `snocOL` promote src2 `snocOL`
+ instr FF64 src1 tmp dst
+ return (Any (cmmTypeSize $ if pk1 `cmmEqType` pk2 then pk1 else cmmFloat W64)
+ code__2)
+
+------------
+trivialUCode size instr x = do
+ (src, code) <- getSomeReg x
+ tmp <- getNewRegNat size
+ let
+ code__2 dst = code `snocOL` instr (RIReg src) dst
+ return (Any size code__2)
+
+-------------
+trivialUFCode pk instr x = do
+ (src, code) <- getSomeReg x
+ tmp <- getNewRegNat pk
+ let
+ code__2 dst = code `snocOL` instr src dst
+ return (Any pk code__2)
+
+
+
+coerceDbl2Flt :: CmmExpr -> NatM Register
+coerceFlt2Dbl :: CmmExpr -> NatM Register
+
+
+coerceInt2FP width1 width2 x = do
+ (src, code) <- getSomeReg x
+ let
+ code__2 dst = code `appOL` toOL [
+ ST (intSize width1) src (spRel (-2)),
+ LD (intSize width1) (spRel (-2)) dst,
+ FxTOy (intSize width1) (floatSize width2) dst dst]
+ return (Any (floatSize $ width2) code__2)
+
+
+-- | Coerce a floating point value to integer
+--
+-- NOTE: On sparc v9 there are no instructions to move a value from an
+-- FP register directly to an int register, so we have to use a load/store.
+--
+coerceFP2Int width1 width2 x
+ = do let fsize1 = floatSize width1
+ fsize2 = floatSize width2
+
+ isize2 = intSize width2
+
+ (fsrc, code) <- getSomeReg x
+ fdst <- getNewRegNat fsize2
+
+ let code2 dst
+ = code
+ `appOL` toOL
+ -- convert float to int format, leaving it in a float reg.
+ [ FxTOy fsize1 isize2 fsrc fdst
+
+ -- store the int into mem, then load it back to move
+ -- it into an actual int reg.
+ , ST fsize2 fdst (spRel (-2))
+ , LD isize2 (spRel (-2)) dst]
+
+ return (Any isize2 code2)
+
+------------
+coerceDbl2Flt x = do
+ (src, code) <- getSomeReg x
+ return (Any FF32 (\dst -> code `snocOL` FxTOy FF64 FF32 src dst))
+
+------------
+coerceFlt2Dbl x = do
+ (src, code) <- getSomeReg x
+ return (Any FF64 (\dst -> code `snocOL` FxTOy FF32 FF64 src dst))
+
+
+
+-- eXTRA_STK_ARGS_HERE
+
+-- We (allegedly) put the first six C-call arguments in registers;
+-- where do we start putting the rest of them?
+
+-- Moved from Instrs (SDM):
+
+eXTRA_STK_ARGS_HERE :: Int
+eXTRA_STK_ARGS_HERE
+ = 23