From dfd27445308d1ed2df8826c2a045130e918e8192 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A9cate?= Date: Sat, 10 Oct 2020 21:15:36 +0200 Subject: Add the proper HLint rules and remove redundant keywords from compiler --- compiler/GHC/CmmToAsm/BlockLayout.hs | 12 ++--- compiler/GHC/CmmToAsm/CFG/Dominators.hs | 7 ++- compiler/GHC/CmmToAsm/PPC/CodeGen.hs | 16 +++---- compiler/GHC/CmmToAsm/Reg/Graph/Spill.hs | 77 +++++++++++++++----------------- compiler/GHC/CmmToAsm/SPARC/CodeGen.hs | 8 ++-- compiler/GHC/CmmToAsm/X86/CodeGen.hs | 41 ++++++++--------- 6 files changed, 76 insertions(+), 85 deletions(-) (limited to 'compiler/GHC/CmmToAsm') diff --git a/compiler/GHC/CmmToAsm/BlockLayout.hs b/compiler/GHC/CmmToAsm/BlockLayout.hs index 0a71d00449..d32357b5cc 100644 --- a/compiler/GHC/CmmToAsm/BlockLayout.hs +++ b/compiler/GHC/CmmToAsm/BlockLayout.hs @@ -475,7 +475,6 @@ combineNeighbourhood edges chains applyEdges edges newEnds newFronts (Set.insert (from,to) combined) | otherwise = applyEdges edges chainEnds chainFronts combined - where getFronts chain = takeL neighbourOverlapp chain getEnds chain = takeR neighbourOverlapp chain @@ -588,19 +587,14 @@ buildChains edges blocks , Just predChain <- mapLookup from chainEnds , Just succChain <- mapLookup to chainStarts , predChain /= succChain -- Otherwise we try to create a cycle. - = do - -- pprTraceM "Fusing edge" (ppr edge) - fuseChain predChain succChain + = fuseChain predChain succChain | (alreadyPlaced from) && (alreadyPlaced to) - = --pprTraceM "Skipping:" (ppr edge) >> - buildNext placed chainStarts chainEnds todo linked + = buildNext placed chainStarts chainEnds todo linked | otherwise - = do -- pprTraceM "Finding chain for:" (ppr edge $$ - -- text "placed" <+> ppr placed) - findChain + = findChain where from = edgeFrom edge to = edgeTo edge diff --git a/compiler/GHC/CmmToAsm/CFG/Dominators.hs b/compiler/GHC/CmmToAsm/CFG/Dominators.hs index d9edc86cee..92ef5d95ec 100644 --- a/compiler/GHC/CmmToAsm/CFG/Dominators.hs +++ b/compiler/GHC/CmmToAsm/CFG/Dominators.hs @@ -1,4 +1,7 @@ -{-# LANGUAGE RankNTypes, BangPatterns, FlexibleContexts, Strict #-} +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE Strict #-} {- | Module : GHC.CmmToAsm.CFG.Dominators @@ -250,7 +253,7 @@ link v w = do zw <- sizeM w store labelE s lw store sizeE v . (+zw) =<< sizeM v - let follow s = do + let follow s = when (s /= n0) (do store ancestorE s v follow =<< childM s) diff --git a/compiler/GHC/CmmToAsm/PPC/CodeGen.hs b/compiler/GHC/CmmToAsm/PPC/CodeGen.hs index b25e6187b9..01a3a67333 100644 --- a/compiler/GHC/CmmToAsm/PPC/CodeGen.hs +++ b/compiler/GHC/CmmToAsm/PPC/CodeGen.hs @@ -1,4 +1,5 @@ -{-# LANGUAGE CPP, GADTs #-} +{-# LANGUAGE CPP #-} +{-# LANGUAGE GADTs #-} ----------------------------------------------------------------------------- -- @@ -117,7 +118,7 @@ cmmTopCodeGen (CmmProc info lab live graph) = do return (CmmProc info lab live (ListGraph (b':blocks)) : statics) fixup_entry _ = panic "cmmTopCodegen: Broken CmmProc" -cmmTopCodeGen (CmmData sec dat) = do +cmmTopCodeGen (CmmData sec dat) = return [CmmData sec dat] -- no translation, we just use CmmStatic basicBlockCodeGen @@ -787,7 +788,7 @@ getAmode DS (CmmMachOp (MO_Sub W64) [x, CmmLit (CmmInt i _)]) (reg, code) <- getSomeReg x (reg', off', code') <- if i `mod` 4 == 0 - then do return (reg, off, code) + then return (reg, off, code) else do tmp <- getNewRegNat II64 return (tmp, ImmInt 0, @@ -800,7 +801,7 @@ getAmode DS (CmmMachOp (MO_Add W64) [x, CmmLit (CmmInt i _)]) (reg, code) <- getSomeReg x (reg', off', code') <- if i `mod` 4 == 0 - then do return (reg, off, code) + then return (reg, off, code) else do tmp <- getNewRegNat II64 return (tmp, ImmInt 0, @@ -882,8 +883,7 @@ getCondCode :: CmmExpr -> NatM CondCode -- extend small integers to 32 bit or 64 bit first getCondCode (CmmMachOp mop [x, y]) - = do - case mop of + = case mop of MO_F_Eq W32 -> condFltCode EQQ x y MO_F_Ne W32 -> condFltCode NE x y MO_F_Gt W32 -> condFltCode GTT x y @@ -1670,7 +1670,7 @@ genCCall' config gcp target dest_regs args codeAfter = move_sp_up finalStack `appOL` moveResult reduceToFF32 case labelOrExpr of - Left lbl -> do -- the linker does all the work for us + Left lbl -> -- the linker does all the work for us return ( codeBefore `snocOL` BL lbl usedRegs `appOL` maybeNOP -- some ABI require a NOP after BL @@ -1716,7 +1716,7 @@ genCCall' config gcp target dest_regs args where platform = ncgPlatform config - uses_pic_base_implicitly = do + uses_pic_base_implicitly = -- See Note [implicit register in PPC PIC code] -- on why we claim to use PIC register here when (ncgPIC config && target32Bit platform) $ do diff --git a/compiler/GHC/CmmToAsm/Reg/Graph/Spill.hs b/compiler/GHC/CmmToAsm/Reg/Graph/Spill.hs index b3c06cefcc..e290be505e 100644 --- a/compiler/GHC/CmmToAsm/Reg/Graph/Spill.hs +++ b/compiler/GHC/CmmToAsm/Reg/Graph/Spill.hs @@ -1,4 +1,3 @@ - -- | When there aren't enough registers to hold all the vregs we have to spill -- some of those vregs to slots on the stack. This module is used modify the -- code to use those slots. @@ -7,6 +6,7 @@ module GHC.CmmToAsm.Reg.Graph.Spill ( SpillStats(..), accSpillSL ) where + import GHC.Prelude import GHC.CmmToAsm.Reg.Liveness @@ -182,46 +182,41 @@ regSpill_instr -> UniqFM Reg Int -- ^ map of vregs to slots they're being spilled to. -> LiveInstr instr -> SpillM [LiveInstr instr] - -regSpill_instr _ _ li@(LiveInstr _ Nothing) - = do return [li] - -regSpill_instr platform regSlotMap - (LiveInstr instr (Just _)) - = do - -- work out which regs are read and written in this instr - let RU rlRead rlWritten = regUsageOfInstr platform instr - - -- sometimes a register is listed as being read more than once, - -- nub this so we don't end up inserting two lots of spill code. - let rsRead_ = nub rlRead - let rsWritten_ = nub rlWritten - - -- if a reg is modified, it appears in both lists, want to undo this.. - let rsRead = rsRead_ \\ rsWritten_ - let rsWritten = rsWritten_ \\ rsRead_ - let rsModify = intersect rsRead_ rsWritten_ - - -- work out if any of the regs being used are currently being spilled. - let rsSpillRead = filter (\r -> elemUFM r regSlotMap) rsRead - let rsSpillWritten = filter (\r -> elemUFM r regSlotMap) rsWritten - let rsSpillModify = filter (\r -> elemUFM r regSlotMap) rsModify - - -- rewrite the instr and work out spill code. - (instr1, prepost1) <- mapAccumLM (spillRead regSlotMap) instr rsSpillRead - (instr2, prepost2) <- mapAccumLM (spillWrite regSlotMap) instr1 rsSpillWritten - (instr3, prepost3) <- mapAccumLM (spillModify regSlotMap) instr2 rsSpillModify - - let (mPrefixes, mPostfixes) = unzip (prepost1 ++ prepost2 ++ prepost3) - let prefixes = concat mPrefixes - let postfixes = concat mPostfixes - - -- final code - let instrs' = prefixes - ++ [LiveInstr instr3 Nothing] - ++ postfixes - - return $ instrs' +regSpill_instr _ _ li@(LiveInstr _ Nothing) = return [li] +regSpill_instr platform regSlotMap (LiveInstr instr (Just _)) = do + -- work out which regs are read and written in this instr + let RU rlRead rlWritten = regUsageOfInstr platform instr + + -- sometimes a register is listed as being read more than once, + -- nub this so we don't end up inserting two lots of spill code. + let rsRead_ = nub rlRead + let rsWritten_ = nub rlWritten + + -- if a reg is modified, it appears in both lists, want to undo this.. + let rsRead = rsRead_ \\ rsWritten_ + let rsWritten = rsWritten_ \\ rsRead_ + let rsModify = intersect rsRead_ rsWritten_ + + -- work out if any of the regs being used are currently being spilled. + let rsSpillRead = filter (\r -> elemUFM r regSlotMap) rsRead + let rsSpillWritten = filter (\r -> elemUFM r regSlotMap) rsWritten + let rsSpillModify = filter (\r -> elemUFM r regSlotMap) rsModify + + -- rewrite the instr and work out spill code. + (instr1, prepost1) <- mapAccumLM (spillRead regSlotMap) instr rsSpillRead + (instr2, prepost2) <- mapAccumLM (spillWrite regSlotMap) instr1 rsSpillWritten + (instr3, prepost3) <- mapAccumLM (spillModify regSlotMap) instr2 rsSpillModify + + let (mPrefixes, mPostfixes) = unzip (prepost1 ++ prepost2 ++ prepost3) + let prefixes = concat mPrefixes + let postfixes = concat mPostfixes + + -- final code + let instrs' = prefixes + ++ [LiveInstr instr3 Nothing] + ++ postfixes + + return instrs' -- | Add a RELOAD met a instruction to load a value for an instruction that diff --git a/compiler/GHC/CmmToAsm/SPARC/CodeGen.hs b/compiler/GHC/CmmToAsm/SPARC/CodeGen.hs index 13a9ef4f9e..c06d4178ad 100644 --- a/compiler/GHC/CmmToAsm/SPARC/CodeGen.hs +++ b/compiler/GHC/CmmToAsm/SPARC/CodeGen.hs @@ -73,7 +73,7 @@ cmmTopCodeGen (CmmProc info lab live graph) return tops -cmmTopCodeGen (CmmData sec dat) = do +cmmTopCodeGen (CmmData sec dat) = return [CmmData sec dat] -- no translation, we just use CmmStatic @@ -430,8 +430,8 @@ genCCall target dest_regs args PrimTarget mop -> do res <- outOfLineMachOp mop - lblOrMopExpr <- case res of - Left lbl -> do + case res of + Left lbl -> return (unitOL (CALL (Left (litToImm (CmmLabel lbl))) n_argRegs_used False)) Right mopExpr -> do @@ -441,8 +441,6 @@ genCCall target dest_regs args _ -> panic "SPARC.CodeGen.genCCall: arg_to_int" return (dyn_c `snocOL` CALL (Right dyn_r) n_argRegs_used False) - return lblOrMopExpr - let argcode = concatOL argcodes let (move_sp_down, move_sp_up) diff --git a/compiler/GHC/CmmToAsm/X86/CodeGen.hs b/compiler/GHC/CmmToAsm/X86/CodeGen.hs index aa4769f376..e59ddb01cc 100644 --- a/compiler/GHC/CmmToAsm/X86/CodeGen.hs +++ b/compiler/GHC/CmmToAsm/X86/CodeGen.hs @@ -1,6 +1,8 @@ -{-# LANGUAGE CPP, GADTs, NondecreasingIndentation #-} -{-# LANGUAGE TupleSections #-} {-# LANGUAGE BangPatterns #-} +{-# LANGUAGE CPP #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE NondecreasingIndentation #-} +{-# LANGUAGE TupleSections #-} #if __GLASGOW_HASKELL__ <= 808 -- GHC 8.10 deprecates this flag, but GHC 8.8 needs it @@ -131,7 +133,7 @@ cmmTopCodeGen (CmmProc info lab live graph) = do Just picBase -> initializePicBase_x86 ArchX86 os picBase tops Nothing -> return tops -cmmTopCodeGen (CmmData sec dat) = do +cmmTopCodeGen (CmmData sec dat) = return [CmmData sec (mkAlignment 1, dat)] -- no translation, we just use CmmStatic {- Note [Verifying basic blocks] @@ -750,11 +752,11 @@ getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W32 W64) [CmmLoad addr _]) getRegister' _ is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg), CmmLit displacement]) - | not is32Bit = do + | not is32Bit = return $ Any II64 (\dst -> unitOL $ LEA II64 (OpAddr (ripRel (litToImm displacement))) (OpReg dst)) -getRegister' platform is32Bit (CmmMachOp mop [x]) = do -- unary MachOps +getRegister' platform is32Bit (CmmMachOp mop [x]) = -- unary MachOps case mop of MO_F_Neg w -> sse2NegCode w x @@ -886,7 +888,7 @@ getRegister' platform is32Bit (CmmMachOp mop [x]) = do -- unary MachOps return (swizzleRegisterRep e_code new_format) -getRegister' _ is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps +getRegister' _ is32Bit (CmmMachOp mop [x, y]) = -- dyadic MachOps case mop of MO_F_Eq _ -> condFltReg is32Bit EQQ x y MO_F_Ne _ -> condFltReg is32Bit NE x y @@ -1371,17 +1373,16 @@ x86_complex_amode base index shift offset -- (see trivialCode where this function is used for an example). getNonClobberedOperand :: CmmExpr -> NatM (Operand, InstrBlock) -getNonClobberedOperand (CmmLit lit) = do +getNonClobberedOperand (CmmLit lit) = if isSuitableFloatingPointLit lit - then do - let CmmFloat _ w = lit - Amode addr code <- memConstant (mkAlignment $ widthInBytes w) lit - return (OpAddr addr, code) - else do - - is32Bit <- is32BitPlatform - platform <- getPlatform - if is32BitLit is32Bit lit && not (isFloatType (cmmLitType platform lit)) + then do + let CmmFloat _ w = lit + Amode addr code <- memConstant (mkAlignment $ widthInBytes w) lit + return (OpAddr addr, code) + else do + is32Bit <- is32BitPlatform + platform <- getPlatform + if is32BitLit is32Bit lit && not (isFloatType (cmmLitType platform lit)) then return (OpImm (litToImm lit), nilOL) else getNonClobberedOperand_generic (CmmLit lit) @@ -1407,7 +1408,7 @@ getNonClobberedOperand (CmmLoad mem pk) = do else return (src, nilOL) return (OpAddr src', mem_code `appOL` save_code) - else do + else -- if its a word or gcptr on 32bit? getNonClobberedOperand_generic (CmmLoad mem pk) @@ -1415,8 +1416,8 @@ getNonClobberedOperand e = getNonClobberedOperand_generic e getNonClobberedOperand_generic :: CmmExpr -> NatM (Operand, InstrBlock) getNonClobberedOperand_generic e = do - (reg, code) <- getNonClobberedReg e - return (OpReg reg, code) + (reg, code) <- getNonClobberedReg e + return (OpReg reg, code) amodeCouldBeClobbered :: Platform -> AddrMode -> Bool amodeCouldBeClobbered platform amode = any (regClobbered platform) (addrModeRegs amode) @@ -1795,7 +1796,7 @@ genJump (CmmLoad mem _) regs = do Amode target code <- getAmode mem return (code `snocOL` JMP (OpAddr target) regs) -genJump (CmmLit lit) regs = do +genJump (CmmLit lit) regs = return (unitOL (JMP (OpImm (litToImm lit)) regs)) genJump expr regs = do -- cgit v1.2.1