1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
|
-----------------------------------------------------------------------------
--
-- Generating machine code (instruction selection)
--
-- (c) The University of Glasgow 1996-2004
--
-----------------------------------------------------------------------------
module SPARC.CodeGen (
cmmTopCodeGen,
generateJumpTableForInstr,
InstrBlock
)
where
#include "HsVersions.h"
#include "nativeGen/NCG.h"
#include "../includes/MachDeps.h"
-- NCG stuff:
import SPARC.CodeGen.Sanity
import SPARC.CodeGen.Amode
import SPARC.CodeGen.CondCode
import SPARC.CodeGen.Gen64
import SPARC.CodeGen.Gen32
import SPARC.CodeGen.CCall
import SPARC.CodeGen.Base
import SPARC.Ppr ()
import SPARC.Instr
import SPARC.Imm
import SPARC.AddrMode
import SPARC.Regs
import Instruction
import Size
import NCGMonad
-- Our intermediate code:
import BlockId
import OldCmm
import CLabel
-- The rest:
import StaticFlags ( opt_PIC )
import OrdList
import Outputable
import Unique
import Control.Monad ( mapAndUnzipM )
-- | Top level code generation
cmmTopCodeGen
:: RawCmmTop
-> NatM [NatCmmTop Instr]
cmmTopCodeGen
(CmmProc info lab (ListGraph blocks))
= do
(nat_blocks,statics) <- mapAndUnzipM basicBlockCodeGen blocks
let proc = CmmProc info lab (ListGraph $ concat nat_blocks)
let tops = proc : concat statics
return tops
cmmTopCodeGen (CmmData sec dat) = do
return [CmmData sec dat] -- no translation, we just use CmmStatic
-- | Do code generation on a single block of CMM code.
-- code generation may introduce new basic block boundaries, which
-- are indicated by the NEWBLOCK instruction. We must split up the
-- instruction stream into basic blocks again. Also, we extract
-- LDATAs here too.
basicBlockCodeGen
:: CmmBasicBlock
-> NatM ( [NatBasicBlock Instr]
, [NatCmmTop Instr])
basicBlockCodeGen cmm@(BasicBlock id stmts) = do
instrs <- stmtsToInstrs stmts
let
(top,other_blocks,statics)
= foldrOL mkBlocks ([],[],[]) instrs
mkBlocks (NEWBLOCK id) (instrs,blocks,statics)
= ([], BasicBlock id instrs : blocks, statics)
mkBlocks (LDATA sec dat) (instrs,blocks,statics)
= (instrs, blocks, CmmData sec dat:statics)
mkBlocks instr (instrs,blocks,statics)
= (instr:instrs, blocks, statics)
-- do intra-block sanity checking
blocksChecked
= map (checkBlock cmm)
$ BasicBlock id top : other_blocks
return (blocksChecked, statics)
-- | Convert some Cmm statements to SPARC instructions.
stmtsToInstrs :: [CmmStmt] -> NatM InstrBlock
stmtsToInstrs stmts
= do instrss <- mapM stmtToInstrs stmts
return (concatOL instrss)
stmtToInstrs :: CmmStmt -> NatM InstrBlock
stmtToInstrs stmt = case stmt of
CmmNop -> return nilOL
CmmComment s -> return (unitOL (COMMENT s))
CmmAssign reg src
| isFloatType ty -> assignReg_FltCode size reg src
| isWord64 ty -> assignReg_I64Code reg src
| otherwise -> assignReg_IntCode size reg src
where ty = cmmRegType reg
size = cmmTypeSize ty
CmmStore addr src
| isFloatType ty -> assignMem_FltCode size addr src
| isWord64 ty -> assignMem_I64Code addr src
| otherwise -> assignMem_IntCode size addr src
where ty = cmmExprType src
size = cmmTypeSize ty
CmmCall target result_regs args _ _
-> genCCall target result_regs args
CmmBranch id -> genBranch id
CmmCondBranch arg id -> genCondJump id arg
CmmSwitch arg ids -> genSwitch arg ids
CmmJump arg _ -> genJump arg
CmmReturn _
-> panic "stmtToInstrs: return statement should have been cps'd away"
{-
Now, given a tree (the argument to an CmmLoad) that references memory,
produce a suitable addressing mode.
A Rule of the Game (tm) for Amodes: use of the addr bit must
immediately follow use of the code part, since the code part puts
values in registers which the addr then refers to. So you can't put
anything in between, lest it overwrite some of those registers. If
you need to do some other computation between the code part and use of
the addr bit, first store the effective address from the amode in a
temporary, then do the other computation, and then use the temporary:
code
LEA amode, tmp
... other computation ...
... (tmp) ...
-}
-- | Convert a BlockId to some CmmStatic data
jumpTableEntry :: Maybe BlockId -> CmmStatic
jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth)
jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
where blockLabel = mkAsmTempLabel (getUnique blockid)
-- -----------------------------------------------------------------------------
-- Generating assignments
-- Assignments are really at the heart of the whole code generation
-- business. Almost all top-level nodes of any real importance are
-- assignments, which correspond to loads, stores, or register
-- transfers. If we're really lucky, some of the register transfers
-- will go away, because we can use the destination register to
-- complete the code generation for the right hand side. This only
-- fails when the right hand side is forced into a fixed register
-- (e.g. the result of a call).
assignMem_IntCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
assignMem_IntCode pk addr src = do
(srcReg, code) <- getSomeReg src
Amode dstAddr addr_code <- getAmode addr
return $ code `appOL` addr_code `snocOL` ST pk srcReg dstAddr
assignReg_IntCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
assignReg_IntCode _ reg src = do
r <- getRegister src
return $ case r of
Any _ code -> code dst
Fixed _ freg fcode -> fcode `snocOL` OR False g0 (RIReg freg) dst
where
dst = getRegisterReg reg
-- Floating point assignment to memory
assignMem_FltCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
assignMem_FltCode pk addr src = do
Amode dst__2 code1 <- getAmode addr
(src__2, code2) <- getSomeReg src
tmp1 <- getNewRegNat pk
let
pk__2 = cmmExprType src
code__2 = code1 `appOL` code2 `appOL`
if sizeToWidth pk == typeWidth pk__2
then unitOL (ST pk src__2 dst__2)
else toOL [ FxTOy (cmmTypeSize pk__2) pk src__2 tmp1
, ST pk tmp1 dst__2]
return code__2
-- Floating point assignment to a register/temporary
assignReg_FltCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
assignReg_FltCode pk dstCmmReg srcCmmExpr = do
srcRegister <- getRegister srcCmmExpr
let dstReg = getRegisterReg dstCmmReg
return $ case srcRegister of
Any _ code -> code dstReg
Fixed _ srcFixedReg srcCode -> srcCode `snocOL` FMOV pk srcFixedReg dstReg
genJump :: CmmExpr{-the branch target-} -> NatM InstrBlock
genJump (CmmLit (CmmLabel lbl))
= return (toOL [CALL (Left target) 0 True, NOP])
where
target = ImmCLbl lbl
genJump tree
= do
(target, code) <- getSomeReg tree
return (code `snocOL` JMP (AddrRegReg target g0) `snocOL` NOP)
-- -----------------------------------------------------------------------------
-- Unconditional branches
genBranch :: BlockId -> NatM InstrBlock
genBranch = return . toOL . mkJumpInstr
-- -----------------------------------------------------------------------------
-- Conditional jumps
{-
Conditional jumps are always to local labels, so we can use branch
instructions. We peek at the arguments to decide what kind of
comparison to do.
SPARC: First, we have to ensure that the condition codes are set
according to the supplied comparison operation. We generate slightly
different code for floating point comparisons, because a floating
point operation cannot directly precede a @BF@. We assume the worst
and fill that slot with a @NOP@.
SPARC: Do not fill the delay slots here; you will confuse the register
allocator.
-}
genCondJump
:: BlockId -- the branch target
-> CmmExpr -- the condition on which to branch
-> NatM InstrBlock
genCondJump bid bool = do
CondCode is_float cond code <- getCondCode bool
return (
code `appOL`
toOL (
if is_float
then [NOP, BF cond False bid, NOP]
else [BI cond False bid, NOP]
)
)
-- -----------------------------------------------------------------------------
-- Generating a table-branch
genSwitch :: CmmExpr -> [Maybe BlockId] -> NatM InstrBlock
genSwitch expr ids
| opt_PIC
= error "MachCodeGen: sparc genSwitch PIC not finished\n"
| otherwise
= do (e_reg, e_code) <- getSomeReg expr
base_reg <- getNewRegNat II32
offset_reg <- getNewRegNat II32
dst <- getNewRegNat II32
label <- getNewLabelNat
return $ e_code `appOL`
toOL
[ -- load base of jump table
SETHI (HI (ImmCLbl label)) base_reg
, OR False base_reg (RIImm $ LO $ ImmCLbl label) base_reg
-- the addrs in the table are 32 bits wide..
, SLL e_reg (RIImm $ ImmInt 2) offset_reg
-- load and jump to the destination
, LD II32 (AddrRegReg base_reg offset_reg) dst
, JMP_TBL (AddrRegImm dst (ImmInt 0)) ids label
, NOP ]
generateJumpTableForInstr :: Instr -> Maybe (NatCmmTop Instr)
generateJumpTableForInstr (JMP_TBL _ ids label) =
let jumpTable = map jumpTableEntry ids
in Just (CmmData ReadOnlyData (CmmDataLabel label : jumpTable))
generateJumpTableForInstr _ = Nothing
|