summaryrefslogtreecommitdiff
path: root/compiler/codeGen
diff options
context:
space:
mode:
authorHerbert Valerio Riedel <hvr@gnu.org>2018-08-06 12:53:06 -0400
committerBen Gamari <ben@smart-cactus.org>2018-08-07 15:56:53 -0400
commitaab8656ba0561e56048a1222c396d2d117aca5a7 (patch)
tree8d14345e7f042ba5700b4275950e44dcc0ca1be9 /compiler/codeGen
parentf22baa424aed66cd75ea05d4db7efdcd0e021217 (diff)
downloadhaskell-aab8656ba0561e56048a1222c396d2d117aca5a7.tar.gz
Turn on MonadFail desugaring by default
Summary: This contains two commits: ---- Make GHC's code-base compatible w/ `MonadFail` There were a couple of use-sites which implicitly used pattern-matches in `do`-notation even though the underlying `Monad` didn't explicitly support `fail` This refactoring turns those use-sites into explicit case discrimations and adds an `MonadFail` instance for `UniqSM` (`UniqSM` was the worst offender so this has been postponed for a follow-up refactoring) --- Turn on MonadFail desugaring by default This finally implements the phase scheduled for GHC 8.6 according to https://prime.haskell.org/wiki/Libraries/Proposals/MonadFail#Transitionalstrategy This also preserves some tests that assumed MonadFail desugaring to be active; all ghc boot libs were already made compatible with this `MonadFail` long ago, so no changes were needed there. Test Plan: Locally performed ./validate --fast Reviewers: bgamari, simonmar, jrtc27, RyanGlScott Reviewed By: bgamari Subscribers: bgamari, RyanGlScott, rwbarton, thomie, carter Differential Revision: https://phabricator.haskell.org/D5028
Diffstat (limited to 'compiler/codeGen')
-rw-r--r--compiler/codeGen/StgCmmCon.hs6
-rw-r--r--compiler/codeGen/StgCmmMonad.hs11
-rw-r--r--compiler/codeGen/StgCmmPrim.hs29
3 files changed, 27 insertions, 19 deletions
diff --git a/compiler/codeGen/StgCmmCon.hs b/compiler/codeGen/StgCmmCon.hs
index f2287e0fbd..a8ec300157 100644
--- a/compiler/codeGen/StgCmmCon.hs
+++ b/compiler/codeGen/StgCmmCon.hs
@@ -86,8 +86,10 @@ cgTopRhsCon dflags id con args =
mk_payload (Padding len _) = return (CmmInt 0 (widthFromBytes len))
mk_payload (FieldOff arg _) = do
- CmmLit lit <- getArgAmode arg
- return lit
+ amode <- getArgAmode arg
+ case amode of
+ CmmLit lit -> return lit
+ _ -> panic "StgCmmCon.cgTopRhsCon"
nonptr_wds = tot_wds - ptr_wds
diff --git a/compiler/codeGen/StgCmmMonad.hs b/compiler/codeGen/StgCmmMonad.hs
index cc941a2e57..1a708670b3 100644
--- a/compiler/codeGen/StgCmmMonad.hs
+++ b/compiler/codeGen/StgCmmMonad.hs
@@ -29,7 +29,7 @@ module StgCmmMonad (
mkCall, mkCmmCall,
- forkClosureBody, forkLneBody, forkAlts, codeOnly,
+ forkClosureBody, forkLneBody, forkAlts, forkAltPair, codeOnly,
ConTagZ,
@@ -636,6 +636,15 @@ forkAlts branch_fcodes
-- NB foldl. state is the *left* argument to stateIncUsage
; return branch_results }
+forkAltPair :: FCode a -> FCode a -> FCode (a,a)
+-- Most common use of 'forkAlts'; having this helper function avoids
+-- accidental use of failible pattern-matches in @do@-notation
+forkAltPair x y = do
+ xy' <- forkAlts [x,y]
+ case xy' of
+ [x',y'] -> return (x',y')
+ _ -> panic "forkAltPair"
+
-- collect the code emitted by an FCode computation
getCodeR :: FCode a -> FCode (a, CmmAGraph)
getCodeR fcode
diff --git a/compiler/codeGen/StgCmmPrim.hs b/compiler/codeGen/StgCmmPrim.hs
index da18949846..6ed3ca7402 100644
--- a/compiler/codeGen/StgCmmPrim.hs
+++ b/compiler/codeGen/StgCmmPrim.hs
@@ -1929,10 +1929,9 @@ doCopyMutableByteArrayOp = emitCopyByteArray copy
-- TODO: Optimize branch for common case of no aliasing.
copy src dst dst_p src_p bytes = do
dflags <- getDynFlags
- [moveCall, cpyCall] <- forkAlts [
- getCode $ emitMemmoveCall dst_p src_p bytes 1,
- getCode $ emitMemcpyCall dst_p src_p bytes 1
- ]
+ (moveCall, cpyCall) <- forkAltPair
+ (getCode $ emitMemmoveCall dst_p src_p bytes 1)
+ (getCode $ emitMemcpyCall dst_p src_p bytes 1)
emit =<< mkCmmIfThenElse (cmmEqWord dflags src dst) moveCall cpyCall
emitCopyByteArray :: (CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr
@@ -2073,12 +2072,11 @@ doCopyMutableArrayOp = emitCopyArray copy
-- TODO: Optimize branch for common case of no aliasing.
copy src dst dst_p src_p bytes = do
dflags <- getDynFlags
- [moveCall, cpyCall] <- forkAlts [
- getCode $ emitMemmoveCall dst_p src_p (mkIntExpr dflags bytes)
- (wORD_SIZE dflags),
- getCode $ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
- (wORD_SIZE dflags)
- ]
+ (moveCall, cpyCall) <- forkAltPair
+ (getCode $ emitMemmoveCall dst_p src_p (mkIntExpr dflags bytes)
+ (wORD_SIZE dflags))
+ (getCode $ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
+ (wORD_SIZE dflags))
emit =<< mkCmmIfThenElse (cmmEqWord dflags src dst) moveCall cpyCall
emitCopyArray :: (CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> ByteOff
@@ -2136,12 +2134,11 @@ doCopySmallMutableArrayOp = emitCopySmallArray copy
-- TODO: Optimize branch for common case of no aliasing.
copy src dst dst_p src_p bytes = do
dflags <- getDynFlags
- [moveCall, cpyCall] <- forkAlts
- [ getCode $ emitMemmoveCall dst_p src_p (mkIntExpr dflags bytes)
- (wORD_SIZE dflags)
- , getCode $ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
- (wORD_SIZE dflags)
- ]
+ (moveCall, cpyCall) <- forkAltPair
+ (getCode $ emitMemmoveCall dst_p src_p (mkIntExpr dflags bytes)
+ (wORD_SIZE dflags))
+ (getCode $ emitMemcpyCall dst_p src_p (mkIntExpr dflags bytes)
+ (wORD_SIZE dflags))
emit =<< mkCmmIfThenElse (cmmEqWord dflags src dst) moveCall cpyCall
emitCopySmallArray :: (CmmExpr -> CmmExpr -> CmmExpr -> CmmExpr -> ByteOff