summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Peyton Jones <simonpj@microsoft.com>2014-09-26 12:58:41 +0100
committerSimon Peyton Jones <simonpj@microsoft.com>2014-09-26 13:55:11 +0100
commit0ef1cc67dc472493b7dee1a28dedbfe938536b8f (patch)
tree59aa09b676707607792fd8a0430ba23afc608839
parentac157de3cd959a18a71fa056403675e2c0563497 (diff)
downloadhaskell-0ef1cc67dc472493b7dee1a28dedbfe938536b8f.tar.gz
De-tabify and remove trailing whitespace
-rw-r--r--compiler/basicTypes/MkId.lhs270
-rw-r--r--compiler/basicTypes/OccName.lhs445
-rw-r--r--compiler/basicTypes/VarSet.lhs140
-rw-r--r--compiler/coreSyn/CoreArity.lhs372
-rw-r--r--compiler/coreSyn/CoreSyn.lhs518
-rw-r--r--compiler/coreSyn/CoreUnfold.lhs564
-rw-r--r--compiler/deSugar/DsArrows.lhs440
-rw-r--r--compiler/deSugar/DsBinds.lhs348
-rw-r--r--compiler/deSugar/DsUtils.lhs220
-rw-r--r--compiler/deSugar/MatchCon.lhs124
-rw-r--r--compiler/iface/BuildTyCl.lhs292
-rw-r--r--compiler/iface/IfaceEnv.lhs158
-rw-r--r--compiler/simplCore/FloatIn.lhs248
-rw-r--r--compiler/simplCore/FloatOut.lhs266
-rw-r--r--compiler/typecheck/FamInst.lhs88
-rw-r--r--compiler/typecheck/Inst.lhs204
-rw-r--r--compiler/typecheck/TcClassDcl.lhs236
-rw-r--r--compiler/typecheck/TcDefaults.lhs85
-rw-r--r--compiler/typecheck/TcErrors.lhs208
-rw-r--r--compiler/typecheck/TcHsType.lhs504
-rw-r--r--compiler/typecheck/TcMType.lhs214
-rw-r--r--compiler/typecheck/TcMatches.lhs436
-rw-r--r--compiler/typecheck/TcRules.lhs117
-rw-r--r--compiler/types/Class.lhs104
-rw-r--r--compiler/types/OptCoercion.lhs54
-rw-r--r--compiler/types/TypeRep.lhs236
26 files changed, 3366 insertions, 3525 deletions
diff --git a/compiler/basicTypes/MkId.lhs b/compiler/basicTypes/MkId.lhs
index 7816ad9005..5a317e2b6c 100644
--- a/compiler/basicTypes/MkId.lhs
+++ b/compiler/basicTypes/MkId.lhs
@@ -13,12 +13,6 @@ have a standard form, namely:
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module MkId (
mkDictFunId, mkDictFunTy, mkDictSelId, mkDictSelRhs,
@@ -39,8 +33,8 @@ module MkId (
nullAddrId, seqId, lazyId, lazyIdKey,
coercionTokenId, magicDictId, coerceId,
- -- Re-export error Ids
- module PrelRules
+ -- Re-export error Ids
+ module PrelRules
) where
#include "HsVersions.h"
@@ -54,7 +48,7 @@ import FamInstEnv
import Coercion
import TcType
import MkCore
-import CoreUtils ( exprType, mkCast )
+import CoreUtils ( exprType, mkCast )
import CoreUnfold
import Literal
import TyCon
@@ -106,8 +100,8 @@ There are several reasons why an Id might appear in the wiredInIds:
is 'open'; that is can be unified with an unboxed type
[The interface file format now carry such information, but there's
- no way yet of expressing at the definition site for these
- error-reporting functions that they have an 'open'
+ no way yet of expressing at the definition site for these
+ error-reporting functions that they have an 'open'
result type. -- sof 1/99]
(3) Other error functions (rUNTIME_ERROR_ID) are wired in (a) because
@@ -118,7 +112,7 @@ There are several reasons why an Id might appear in the wiredInIds:
strictness of the version defined in GHC.Base
In cases (2-4), the function has a definition in a library module, and
-can be called; but the wired-in version means that the details are
+can be called; but the wired-in version means that the details are
never read from that module's interface file; instead, the full definition
is right here.
@@ -126,7 +120,7 @@ is right here.
wiredInIds :: [Id]
wiredInIds
= [lazyId, dollarId]
- ++ errorIds -- Defined in MkCore
+ ++ errorIds -- Defined in MkCore
++ ghcPrimIds
-- These Ids are exported from GHC.Prim
@@ -159,7 +153,7 @@ We're going to build a constructor that looks like:
data (Data a, C b) => T a b = T1 !a !Int b
- T1 = /\ a b ->
+ T1 = /\ a b ->
\d1::Data a, d2::C b ->
\p q r -> case p of { p ->
case q of { q ->
@@ -175,7 +169,7 @@ Notice that
the types a and Int. Once we've done that we can throw d1 away too.
* We use (case p of q -> ...) to evaluate p, rather than "seq" because
- all that matters is that the arguments are evaluated. "seq" is
+ all that matters is that the arguments are evaluated. "seq" is
very careful to preserve evaluation order, which we don't need
to be here.
@@ -254,7 +248,7 @@ part of the theta-type, so all is well.
%************************************************************************
Selecting a field for a dictionary. If there is just one field, then
-there's nothing to do.
+there's nothing to do.
Dictionary selectors may get nested forall-types. Thus:
@@ -263,8 +257,8 @@ Dictionary selectors may get nested forall-types. Thus:
Then the top-level type for op is
- op :: forall a. Foo a =>
- forall b. Ord b =>
+ op :: forall a. Foo a =>
+ forall b. Ord b =>
a -> b -> b
This is unlike ordinary record selectors, which have all the for-alls
@@ -272,18 +266,18 @@ at the outside. When dealing with classes it's very convenient to
recover the original type signature from the class op selector.
\begin{code}
-mkDictSelId :: Name -- Name of one of the *value* selectors
- -- (dictionary superclass or method)
+mkDictSelId :: Name -- Name of one of the *value* selectors
+ -- (dictionary superclass or method)
-> Class -> Id
mkDictSelId name clas
= mkGlobalId (ClassOpId clas) name sel_ty info
where
- tycon = classTyCon clas
+ tycon = classTyCon clas
sel_names = map idName (classAllSelIds clas)
- new_tycon = isNewTyCon tycon
- [data_con] = tyConDataCons tycon
- tyvars = dataConUnivTyVars data_con
- arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
+ new_tycon = isNewTyCon tycon
+ [data_con] = tyConDataCons tycon
+ tyvars = dataConUnivTyVars data_con
+ arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
val_index = assoc "MkId.mkDictSelId" (sel_names `zip` [0..]) name
sel_ty = mkForAllTys tyvars (mkFunTy (mkClassPred clas (mkTyVarTys tyvars))
@@ -296,23 +290,23 @@ mkDictSelId name clas
info | new_tycon
= base_info `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkInlineUnfolding (Just 1) (mkDictSelRhs clas val_index)
- -- See Note [Single-method classes] in TcInstDcls
- -- for why alwaysInlinePragma
+ -- See Note [Single-method classes] in TcInstDcls
+ -- for why alwaysInlinePragma
| otherwise
= base_info `setSpecInfo` mkSpecInfo [rule]
- -- Add a magic BuiltinRule, but no unfolding
- -- so that the rule is always available to fire.
- -- See Note [ClassOp/DFun selection] in TcInstDcls
+ -- Add a magic BuiltinRule, but no unfolding
+ -- so that the rule is always available to fire.
+ -- See Note [ClassOp/DFun selection] in TcInstDcls
n_ty_args = length tyvars
-- This is the built-in rule that goes
- -- op (dfT d1 d2) ---> opT d1 d2
- rule = BuiltinRule { ru_name = fsLit "Class op " `appendFS`
- occNameFS (getOccName name)
+ -- op (dfT d1 d2) ---> opT d1 d2
+ rule = BuiltinRule { ru_name = fsLit "Class op " `appendFS`
+ occNameFS (getOccName name)
, ru_fn = name
- , ru_nargs = n_ty_args + 1
+ , ru_nargs = n_ty_args + 1
, ru_try = dictSelRule val_index n_ty_args }
-- The strictness signature is of the form U(AAAVAAAA) -> T
@@ -332,22 +326,22 @@ mkDictSelRhs :: Class
mkDictSelRhs clas val_index
= mkLams tyvars (Lam dict_id rhs_body)
where
- tycon = classTyCon clas
- new_tycon = isNewTyCon tycon
- [data_con] = tyConDataCons tycon
- tyvars = dataConUnivTyVars data_con
- arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
+ tycon = classTyCon clas
+ new_tycon = isNewTyCon tycon
+ [data_con] = tyConDataCons tycon
+ tyvars = dataConUnivTyVars data_con
+ arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses
the_arg_id = getNth arg_ids val_index
- pred = mkClassPred clas (mkTyVarTys tyvars)
- dict_id = mkTemplateLocal 1 pred
- arg_ids = mkTemplateLocalsNum 2 arg_tys
+ pred = mkClassPred clas (mkTyVarTys tyvars)
+ dict_id = mkTemplateLocal 1 pred
+ arg_ids = mkTemplateLocalsNum 2 arg_tys
rhs_body | new_tycon = unwrapNewTypeBody tycon (map mkTyVarTy tyvars) (Var dict_id)
| otherwise = Case (Var dict_id) dict_id (idType the_arg_id)
[(DataAlt data_con, arg_ids, varToCoreExpr the_arg_id)]
- -- varToCoreExpr needed for equality superclass selectors
- -- sel a b d = case x of { MkC _ (g:a~b) _ -> CO g }
+ -- varToCoreExpr needed for equality superclass selectors
+ -- sel a b d = case x of { MkC _ (g:a~b) _ -> CO g }
dictSelRule :: Int -> Arity -> RuleFun
-- Tries to persuade the argument to look like a constructor
@@ -403,7 +397,7 @@ mkDataConWorkId wkr_name data_con
-- the simplifier thinks that y is "sure to be evaluated" (because
-- $wMkT is strict) and drops the case. No, $wMkT is not strict.
--
- -- When the simplifer sees a pattern
+ -- When the simplifer sees a pattern
-- case e of MkT x -> ...
-- it uses the dataConRepStrictness of MkT to mark x as evaluated;
-- but that's fine... dataConRepStrictness comes from the data con
@@ -420,16 +414,16 @@ mkDataConWorkId wkr_name data_con
id_arg1 = mkTemplateLocal 1 (head nt_arg_tys)
newtype_unf = ASSERT2( isVanillaDataCon data_con &&
isSingleton nt_arg_tys, ppr data_con )
- -- Note [Newtype datacons]
- mkCompulsoryUnfolding $
- mkLams nt_tvs $ Lam id_arg1 $
+ -- Note [Newtype datacons]
+ mkCompulsoryUnfolding $
+ mkLams nt_tvs $ Lam id_arg1 $
wrapNewTypeBody tycon res_ty_args (Var id_arg1)
dataConCPR :: DataCon -> DmdResult
dataConCPR con
- | isDataTyCon tycon -- Real data types only; that is,
+ | isDataTyCon tycon -- Real data types only; that is,
-- not unboxed tuples or newtypes
- , isVanillaDataCon con -- No existentials
+ , isVanillaDataCon con -- No existentials
, wkr_arity > 0
, wkr_arity <= mAX_CPR_SIZE
= if is_prod then vanillaCprProdRes (dataConRepArity con)
@@ -444,9 +438,9 @@ dataConCPR con
mAX_CPR_SIZE :: Arity
mAX_CPR_SIZE = 10
-- We do not treat very big tuples as CPR-ish:
- -- a) for a start we get into trouble because there aren't
- -- "enough" unboxed tuple types (a tiresome restriction,
- -- but hard to fix),
+ -- a) for a start we get into trouble because there aren't
+ -- "enough" unboxed tuple types (a tiresome restriction,
+ -- but hard to fix),
-- b) more importantly, big unboxed tuples get returned mainly
-- on the stack, and are often then allocated in the heap
-- by the caller. So doing CPR for them may in fact make
@@ -455,8 +449,8 @@ dataConCPR con
-------------------------------------------------
-- Data constructor representation
---
--- This is where we decide how to wrap/unwrap the
+--
+-- This is where we decide how to wrap/unwrap the
-- constructor fields
--
--------------------------------------------------
@@ -480,39 +474,39 @@ mkDataConRep dflags fam_envs wrap_name data_con
| otherwise
= do { wrap_args <- mapM newLocal wrap_arg_tys
- ; wrap_body <- mk_rep_app (wrap_args `zip` dropList eq_spec unboxers)
+ ; wrap_body <- mk_rep_app (wrap_args `zip` dropList eq_spec unboxers)
initial_wrap_app
; let wrap_id = mkGlobalId (DataConWrapId data_con) wrap_name wrap_ty wrap_info
wrap_info = noCafIdInfo
- `setArityInfo` wrap_arity
- -- It's important to specify the arity, so that partial
- -- applications are treated as values
- `setInlinePragInfo` alwaysInlinePragma
- `setUnfoldingInfo` wrap_unf
- `setStrictnessInfo` wrap_sig
- -- We need to get the CAF info right here because TidyPgm
- -- does not tidy the IdInfo of implicit bindings (like the wrapper)
- -- so it not make sure that the CAF info is sane
-
- wrap_sig = mkClosedStrictSig wrap_arg_dmds (dataConCPR data_con)
- wrap_arg_dmds = map mk_dmd (dropList eq_spec wrap_bangs)
- mk_dmd str | isBanged str = evalDmd
- | otherwise = topDmd
- -- The Cpr info can be important inside INLINE rhss, where the
- -- wrapper constructor isn't inlined.
- -- And the argument strictness can be important too; we
- -- may not inline a contructor when it is partially applied.
- -- For example:
- -- data W = C !Int !Int !Int
- -- ...(let w = C x in ...(w p q)...)...
- -- we want to see that w is strict in its two arguments
-
- wrap_unf = mkInlineUnfolding (Just wrap_arity) wrap_rhs
+ `setArityInfo` wrap_arity
+ -- It's important to specify the arity, so that partial
+ -- applications are treated as values
+ `setInlinePragInfo` alwaysInlinePragma
+ `setUnfoldingInfo` wrap_unf
+ `setStrictnessInfo` wrap_sig
+ -- We need to get the CAF info right here because TidyPgm
+ -- does not tidy the IdInfo of implicit bindings (like the wrapper)
+ -- so it not make sure that the CAF info is sane
+
+ wrap_sig = mkClosedStrictSig wrap_arg_dmds (dataConCPR data_con)
+ wrap_arg_dmds = map mk_dmd (dropList eq_spec wrap_bangs)
+ mk_dmd str | isBanged str = evalDmd
+ | otherwise = topDmd
+ -- The Cpr info can be important inside INLINE rhss, where the
+ -- wrapper constructor isn't inlined.
+ -- And the argument strictness can be important too; we
+ -- may not inline a contructor when it is partially applied.
+ -- For example:
+ -- data W = C !Int !Int !Int
+ -- ...(let w = C x in ...(w p q)...)...
+ -- we want to see that w is strict in its two arguments
+
+ wrap_unf = mkInlineUnfolding (Just wrap_arity) wrap_rhs
wrap_tvs = (univ_tvs `minusList` map fst eq_spec) ++ ex_tvs
- wrap_rhs = mkLams wrap_tvs $
- mkLams wrap_args $
- wrapFamInstBody tycon res_ty_args $
+ wrap_rhs = mkLams wrap_tvs $
+ mkLams wrap_args $
+ wrapFamInstBody tycon res_ty_args $
wrap_body
; return (DCR { dcr_wrap_id = wrap_id
@@ -532,9 +526,9 @@ mkDataConRep dflags fam_envs wrap_name data_con
wrap_arg_tys = theta ++ orig_arg_tys
wrap_arity = length wrap_arg_tys
- -- The wrap_args are the arguments *other than* the eq_spec
- -- Because we are going to apply the eq_spec args manually in the
- -- wrapper
+ -- The wrap_args are the arguments *other than* the eq_spec
+ -- Because we are going to apply the eq_spec args manually in the
+ -- wrapper
(wrap_bangs, rep_tys_w_strs, wrappers)
= unzip3 (zipWith (dataConArgRep dflags fam_envs) all_arg_tys orig_bangs)
@@ -548,16 +542,16 @@ mkDataConRep dflags fam_envs wrap_name data_con
initial_wrap_app = Var (dataConWorkId data_con)
`mkTyApps` res_ty_args
- `mkVarApps` ex_tvs
- `mkCoApps` map (mkReflCo Nominal . snd) eq_spec
- -- Dont box the eq_spec coercions since they are
- -- marked as HsUnpack by mk_dict_strict_mark
+ `mkVarApps` ex_tvs
+ `mkCoApps` map (mkReflCo Nominal . snd) eq_spec
+ -- Dont box the eq_spec coercions since they are
+ -- marked as HsUnpack by mk_dict_strict_mark
mk_boxer :: [Boxer] -> DataConBoxer
- mk_boxer boxers = DCB (\ ty_args src_vars ->
+ mk_boxer boxers = DCB (\ ty_args src_vars ->
do { let ex_vars = takeList ex_tvs src_vars
subst1 = mkTopTvSubst (univ_tvs `zip` ty_args)
- subst2 = extendTvSubstList subst1 ex_tvs
+ subst2 = extendTvSubstList subst1 ex_tvs
(mkTyVarTys ex_vars)
; (rep_ids, binds) <- go subst2 boxers (dropList ex_tvs src_vars)
; return (ex_vars ++ rep_ids, binds) } )
@@ -573,21 +567,21 @@ mkDataConRep dflags fam_envs wrap_name data_con
go _ (_:_) [] = pprPanic "mk_boxer" (ppr data_con)
mk_rep_app :: [(Id,Unboxer)] -> CoreExpr -> UniqSM CoreExpr
- mk_rep_app [] con_app
+ mk_rep_app [] con_app
= return con_app
- mk_rep_app ((wrap_arg, unboxer) : prs) con_app
+ mk_rep_app ((wrap_arg, unboxer) : prs) con_app
= do { (rep_ids, unbox_fn) <- unboxer wrap_arg
; expr <- mk_rep_app prs (mkVarApps con_app rep_ids)
; return (unbox_fn expr) }
-------------------------
newLocal :: Type -> UniqSM Var
-newLocal ty = do { uniq <- getUniqueUs
+newLocal ty = do { uniq <- getUniqueUs
; return (mkSysLocal (fsLit "dt") uniq ty) }
-------------------------
dataConArgRep
- :: DynFlags
+ :: DynFlags
-> FamInstEnvs
-> Type -> HsBang
-> ( HsBang -- Like input but with HsUnpackFailed if necy
@@ -600,10 +594,10 @@ dataConArgRep _ _ arg_ty HsNoBang
dataConArgRep _ _ arg_ty (HsUserBang _ False) -- No '!'
= (HsNoBang, [(arg_ty, NotMarkedStrict)], (unitUnboxer, unitBoxer))
-dataConArgRep dflags fam_envs arg_ty
+dataConArgRep dflags fam_envs arg_ty
(HsUserBang unpk_prag True) -- {-# UNPACK #-} !
| not (gopt Opt_OmitInterfacePragmas dflags) -- Don't unpack if -fomit-iface-pragmas
- -- Don't unpack if we aren't optimising; rather arbitrarily,
+ -- Don't unpack if we aren't optimising; rather arbitrarily,
-- we use -fomit-iface-pragmas as the indication
, let mb_co = topNormaliseType_maybe fam_envs arg_ty
-- Unwrap type families and newtypes
@@ -612,7 +606,7 @@ dataConArgRep dflags fam_envs arg_ty
, (rep_tys, wrappers) <- dataConArgUnpack arg_ty'
, case unpk_prag of
Nothing -> gopt Opt_UnboxStrictFields dflags
- || (gopt Opt_UnboxSmallStrictFields dflags
+ || (gopt Opt_UnboxSmallStrictFields dflags
&& length rep_tys <= 1) -- See Note [Unpack one-wide fields]
Just unpack_me -> unpack_me
= case mb_co of
@@ -647,8 +641,8 @@ wrapCo co rep_ty (unbox_rep, box_rep) -- co :: arg_ty ~ rep_ty
; (rep_ids, rep_fn) <- unbox_rep rep_id
; let co_bind = NonRec rep_id (Var arg_id `Cast` co)
; return (rep_ids, Let co_bind . rep_fn) }
- boxer = Boxer $ \ subst ->
- do { (rep_ids, rep_expr)
+ boxer = Boxer $ \ subst ->
+ do { (rep_ids, rep_expr)
<- case box_rep of
UnitBox -> do { rep_id <- newLocal (TcType.substTy subst rep_ty)
; return ([rep_id], Var rep_id) }
@@ -676,7 +670,7 @@ dataConArgUnpack arg_ty
| Just (tc, tc_args) <- splitTyConApp_maybe arg_ty
, Just con <- tyConSingleAlgDataCon_maybe tc
-- NB: check for an *algebraic* data type
- -- A recursive newtype might mean that
+ -- A recursive newtype might mean that
-- 'arg_ty' is a newtype
, let rep_tys = dataConInstArgTys con tc_args
= ASSERT( isVanillaDataCon con )
@@ -697,7 +691,7 @@ dataConArgUnpack arg_ty
-- An interface file specified Unpacked, but we couldn't unpack it
isUnpackableType :: FamInstEnvs -> Type -> Bool
--- True if we can unpack the UNPACK the argument type
+-- True if we can unpack the UNPACK the argument type
-- See Note [Recursive unboxing]
-- We look "deeply" inside rather than relying on the DataCons
-- we encounter on the way, because otherwise we might well
@@ -721,12 +715,12 @@ isUnpackableType fam_envs ty
Just con | isVanillaDataCon con
-> ok_con_args (tcs `addOneToNameSet` getName tc) con
_ -> True
- | otherwise
+ | otherwise
= True
ok_con_args tcs con
= all (ok_arg tcs) (dataConOrigArgTys con `zip` dataConStrictMarks con)
- -- NB: dataConStrictMarks gives the *user* request;
+ -- NB: dataConStrictMarks gives the *user* request;
-- We'd get a black hole if we used dataConRepBangs
attempt_unpack (HsUnpack {}) = True
@@ -751,9 +745,9 @@ For example:
data G = G !F !F
All of these should have an Int# as their representation, except
-G which should have two Int#s.
+G which should have two Int#s.
-However
+However
data T = T !(S Int)
data S = S !a
@@ -769,22 +763,22 @@ The representation arguments of MkR are the *representation* arguments
of S (plus Int); the rep args of MkS are Int#. This is all fine.
But be careful not to try to unbox this!
- data T = MkT {-# UNPACK #-} !T Int
+ data T = MkT {-# UNPACK #-} !T Int
Because then we'd get an infinite number of arguments.
Here is a more complicated case:
- data S = MkS {-# UNPACK #-} !T Int
- data T = MkT {-# UNPACK #-} !S Int
+ data S = MkS {-# UNPACK #-} !T Int
+ data T = MkT {-# UNPACK #-} !S Int
Each of S and T must decide independendently whether to unpack
and they had better not both say yes. So they must both say no.
Also behave conservatively when there is no UNPACK pragma
- data T = MkS !T Int
+ data T = MkS !T Int
with -funbox-strict-fields or -funbox-small-strict-fields
we need to behave as if there was an UNPACK pragma there.
But it's the *argument* type that matters. This is fine:
- data S = MkS S !Int
+ data S = MkS S !Int
because Int is non-recursive.
@@ -800,8 +794,8 @@ space for each equality predicate, so it's pretty important!
\begin{code}
mk_pred_strict_mark :: PredType -> HsBang
-mk_pred_strict_mark pred
- | isEqPred pred = HsUnpack Nothing -- Note [Unpack equality predicates]
+mk_pred_strict_mark pred
+ | isEqPred pred = HsUnpack Nothing -- Note [Unpack equality predicates]
| otherwise = HsNoBang
\end{code}
@@ -824,7 +818,7 @@ wrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr
-- e `cast` (CoT [a])
--
-- If a coercion constructor is provided in the newtype, then we use
--- it, otherwise the wrap/unwrap are both no-ops
+-- it, otherwise the wrap/unwrap are both no-ops
--
-- If the we are dealing with a newtype *instance*, we have a second coercion
-- identifying the family instance with the constructor of the newtype
@@ -895,39 +889,39 @@ unwrapTypeUnbranchedFamInstScrut axiom
\begin{code}
mkPrimOpId :: PrimOp -> Id
-mkPrimOpId prim_op
+mkPrimOpId prim_op
= id
where
(tyvars,arg_tys,res_ty, arity, strict_sig) = primOpSig prim_op
ty = mkForAllTys tyvars (mkFunTys arg_tys res_ty)
- name = mkWiredInName gHC_PRIM (primOpOcc prim_op)
+ name = mkWiredInName gHC_PRIM (primOpOcc prim_op)
(mkPrimOpIdUnique (primOpTag prim_op))
(AnId id) UserSyntax
id = mkGlobalId (PrimOpId prim_op) name ty info
-
+
info = noCafIdInfo
`setSpecInfo` mkSpecInfo (maybeToList $ primOpRules name prim_op)
`setArityInfo` arity
`setStrictnessInfo` strict_sig
`setInlinePragInfo` neverInlinePragma
-- We give PrimOps a NOINLINE pragma so that we don't
- -- get silly warnings from Desugar.dsRule (the inline_shadows_rule
+ -- get silly warnings from Desugar.dsRule (the inline_shadows_rule
-- test) about a RULE conflicting with a possible inlining
-- cf Trac #7287
-- For each ccall we manufacture a separate CCallOpId, giving it
-- a fresh unique, a type that is correct for this particular ccall,
-- and a CCall structure that gives the correct details about calling
--- convention etc.
+-- convention etc.
--
-- The *name* of this Id is a local name whose OccName gives the full
--- details of the ccall, type and all. This means that the interface
+-- details of the ccall, type and all. This means that the interface
-- file reader can reconstruct a suitable Id
mkFCallId :: DynFlags -> Unique -> ForeignCall -> Type -> Id
mkFCallId dflags uniq fcall ty
= ASSERT( isEmptyVarSet (tyVarsOfType ty) )
- -- A CCallOpId should have no free type variables;
+ -- A CCallOpId should have no free type variables;
-- when doing substitutions won't substitute over it
mkGlobalId (FCallId fcall) name ty info
where
@@ -966,7 +960,7 @@ NB: See also Note [Exported LocalIds] in Id
mkDictFunId :: Name -- Name to use for the dict fun;
-> [TyVar]
-> ThetaType
- -> Class
+ -> Class
-> [Type]
-> Id
-- Implements the DFun Superclass Invariant (see TcInstDcls)
@@ -985,8 +979,8 @@ mkDictFunTy tvs theta clas tys
= (length silent_theta, dfun_ty)
where
dfun_ty = mkSigmaTy tvs (silent_theta ++ theta) (mkClassPred clas tys)
- silent_theta
- | null tvs, null theta
+ silent_theta
+ | null tvs, null theta
= []
| otherwise
= filterOut discard $
@@ -1070,7 +1064,7 @@ unsafeCoerceId
where
info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkCompulsoryUnfolding rhs
-
+
ty = mkForAllTys [openAlphaTyVar,openBetaTyVar]
(mkFunTy openAlphaTy openBetaTy)
@@ -1081,7 +1075,7 @@ unsafeCoerceId
------------------------------------------------
nullAddrId :: Id
-- nullAddr# :: Addr#
--- The reason is is here is because we don't provide
+-- The reason is is here is because we don't provide
-- a way to write this literal in Haskell.
nullAddrId = pcMiscPrelId nullAddrName addrPrimTy info
where
@@ -1089,13 +1083,13 @@ nullAddrId = pcMiscPrelId nullAddrName addrPrimTy info
`setUnfoldingInfo` mkCompulsoryUnfolding (Lit nullAddrLit)
------------------------------------------------
-seqId :: Id -- See Note [seqId magic]
+seqId :: Id -- See Note [seqId magic]
seqId = pcMiscPrelId seqName ty info
where
info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma
`setUnfoldingInfo` mkCompulsoryUnfolding rhs
`setSpecInfo` mkSpecInfo [seq_cast_rule]
-
+
ty = mkForAllTys [alphaTyVar,betaTyVar]
(mkFunTy alphaTy (mkFunTy betaTy betaTy))
@@ -1119,7 +1113,7 @@ match_seq_of_cast _ _ _ [Type _, Type res_ty, Cast scrut co, expr]
match_seq_of_cast _ _ _ _ = Nothing
------------------------------------------------
-lazyId :: Id -- See Note [lazyId magic]
+lazyId :: Id -- See Note [lazyId magic]
lazyId = pcMiscPrelId lazyIdName ty info
where
info = noCafIdInfo
@@ -1151,7 +1145,7 @@ coerceId = pcMiscPrelId coerceName ty info
[eqR,x,eq] = mkTemplateLocals [eqRTy, aTy,eqRPrimTy]
rhs = mkLams [kv,a,b,eqR,x] $
mkWildCase (Var eqR) eqRTy bTy $
- [(DataAlt coercibleDataCon, [eq], Cast (Var x) (CoVarCo eq))]
+ [(DataAlt coercibleDataCon, [eq], Cast (Var x) (CoVarCo eq))]
\end{code}
Note [dollarId magic]
@@ -1186,7 +1180,7 @@ it on unboxed things, (unsafeCoerce# 3#) :: Int. Its type is
Note [seqId magic]
~~~~~~~~~~~~~~~~~~
-'GHC.Prim.seq' is special in several ways.
+'GHC.Prim.seq' is special in several ways.
a) Its second arg can have an unboxed type
x `seq` (v +# w)
@@ -1194,7 +1188,7 @@ a) Its second arg can have an unboxed type
b) Its fixity is set in LoadIface.ghcPrimIface
-c) It has quite a bit of desugaring magic.
+c) It has quite a bit of desugaring magic.
See DsUtils.lhs Note [Desugaring seq (1)] and (2) and (3)
d) There is some special rule handing: Note [User-defined RULES for seq]
@@ -1231,10 +1225,10 @@ We also have the following built-in rule for seq
seq (x `cast` co) y = seq x y
This eliminates unnecessary casts and also allows other seq rules to
-match more often. Notably,
+match more often. Notably,
seq (f x `cast` co) y --> seq (f x) y
-
+
and now a user-defined rule for seq (see Note [User-defined RULES for seq])
may fire.
@@ -1250,7 +1244,7 @@ not from GHC.Base.hi. This is important, because the strictness
analyser will spot it as strict!
Also no unfolding in lazyId: it gets "inlined" by a HACK in CorePrep.
-It's very important to do this inlining *after* unfoldings are exposed
+It's very important to do this inlining *after* unfoldings are exposed
in the interface file. Otherwise, the unfolding for (say) pseq in the
interface file will not mention 'lazy', so if we inline 'pseq' we'll totally
miss the very thing that 'lazy' was there for in the first place.
@@ -1337,9 +1331,9 @@ voidPrimId = pcMiscPrelId voidPrimIdName voidPrimTy
voidArgId :: Id -- Local lambda-bound :: Void#
voidArgId = mkSysLocal (fsLit "void") voidArgIdKey voidPrimTy
-coercionTokenId :: Id -- :: () ~ ()
+coercionTokenId :: Id -- :: () ~ ()
coercionTokenId -- Used to replace Coercion terms when we go to STG
- = pcMiscPrelId coercionTokenName
+ = pcMiscPrelId coercionTokenName
(mkTyConApp eqPrimTyCon [liftedTypeKind, unitTy, unitTy])
noCafIdInfo
\end{code}
diff --git a/compiler/basicTypes/OccName.lhs b/compiler/basicTypes/OccName.lhs
index d942362db7..1f1fda8ae3 100644
--- a/compiler/basicTypes/OccName.lhs
+++ b/compiler/basicTypes/OccName.lhs
@@ -22,89 +22,82 @@
--
-- * 'Var.Var': see "Var#name_types"
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module OccName (
- -- * The 'NameSpace' type
- NameSpace, -- Abstract
+ -- * The 'NameSpace' type
+ NameSpace, -- Abstract
nameSpacesRelated,
-
- -- ** Construction
- -- $real_vs_source_data_constructors
- tcName, clsName, tcClsName, dataName, varName,
- tvName, srcDataName,
-
- -- ** Pretty Printing
- pprNameSpace, pprNonVarNameSpace, pprNameSpaceBrief,
-
- -- * The 'OccName' type
- OccName, -- Abstract, instance of Outputable
- pprOccName,
-
- -- ** Construction
- mkOccName, mkOccNameFS,
- mkVarOcc, mkVarOccFS,
- mkDataOcc, mkDataOccFS,
- mkTyVarOcc, mkTyVarOccFS,
- mkTcOcc, mkTcOccFS,
- mkClsOcc, mkClsOccFS,
+
+ -- ** Construction
+ -- $real_vs_source_data_constructors
+ tcName, clsName, tcClsName, dataName, varName,
+ tvName, srcDataName,
+
+ -- ** Pretty Printing
+ pprNameSpace, pprNonVarNameSpace, pprNameSpaceBrief,
+
+ -- * The 'OccName' type
+ OccName, -- Abstract, instance of Outputable
+ pprOccName,
+
+ -- ** Construction
+ mkOccName, mkOccNameFS,
+ mkVarOcc, mkVarOccFS,
+ mkDataOcc, mkDataOccFS,
+ mkTyVarOcc, mkTyVarOccFS,
+ mkTcOcc, mkTcOccFS,
+ mkClsOcc, mkClsOccFS,
mkDFunOcc,
- setOccNameSpace,
+ setOccNameSpace,
demoteOccName,
HasOccName(..),
- -- ** Derived 'OccName's
+ -- ** Derived 'OccName's
isDerivedOccName,
- mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkDefaultMethodOcc,
- mkGenDefMethodOcc,
- mkDerivedTyConOcc, mkNewTyCoOcc, mkClassOpAuxOcc,
+ mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkDefaultMethodOcc,
+ mkGenDefMethodOcc,
+ mkDerivedTyConOcc, mkNewTyCoOcc, mkClassOpAuxOcc,
mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc,
- mkClassDataConOcc, mkDictOcc, mkIPOcc,
- mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenOcc1, mkGenOcc2,
- mkGenD, mkGenR, mkGen1R, mkGenRCo, mkGenC, mkGenS,
+ mkClassDataConOcc, mkDictOcc, mkIPOcc,
+ mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenOcc1, mkGenOcc2,
+ mkGenD, mkGenR, mkGen1R, mkGenRCo, mkGenC, mkGenS,
mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc,
- mkSuperDictSelOcc, mkLocalOcc, mkMethodOcc, mkInstTyTcOcc,
- mkInstTyCoOcc, mkEqPredCoOcc,
+ mkSuperDictSelOcc, mkLocalOcc, mkMethodOcc, mkInstTyTcOcc,
+ mkInstTyCoOcc, mkEqPredCoOcc,
mkVectOcc, mkVectTyConOcc, mkVectDataConOcc, mkVectIsoOcc,
mkPDataTyConOcc, mkPDataDataConOcc,
- mkPDatasTyConOcc, mkPDatasDataConOcc,
- mkPReprTyConOcc,
+ mkPDatasTyConOcc, mkPDatasDataConOcc,
+ mkPReprTyConOcc,
mkPADFunOcc,
- -- ** Deconstruction
- occNameFS, occNameString, occNameSpace,
+ -- ** Deconstruction
+ occNameFS, occNameString, occNameSpace,
+
+ isVarOcc, isTvOcc, isTcOcc, isDataOcc, isDataSymOcc, isSymOcc, isValOcc,
+ parenSymOcc, startsWithUnderscore,
- isVarOcc, isTvOcc, isTcOcc, isDataOcc, isDataSymOcc, isSymOcc, isValOcc,
- parenSymOcc, startsWithUnderscore,
-
- isTcClsNameSpace, isTvNameSpace, isDataConNameSpace, isVarNameSpace, isValNameSpace,
+ isTcClsNameSpace, isTvNameSpace, isDataConNameSpace, isVarNameSpace, isValNameSpace,
- -- * The 'OccEnv' type
- OccEnv, emptyOccEnv, unitOccEnv, extendOccEnv, mapOccEnv,
- lookupOccEnv, mkOccEnv, mkOccEnv_C, extendOccEnvList, elemOccEnv,
- occEnvElts, foldOccEnv, plusOccEnv, plusOccEnv_C, extendOccEnv_C,
+ -- * The 'OccEnv' type
+ OccEnv, emptyOccEnv, unitOccEnv, extendOccEnv, mapOccEnv,
+ lookupOccEnv, mkOccEnv, mkOccEnv_C, extendOccEnvList, elemOccEnv,
+ occEnvElts, foldOccEnv, plusOccEnv, plusOccEnv_C, extendOccEnv_C,
extendOccEnv_Acc, filterOccEnv, delListFromOccEnv, delFromOccEnv,
alterOccEnv, pprOccEnv,
- -- * The 'OccSet' type
- OccSet, emptyOccSet, unitOccSet, mkOccSet, extendOccSet,
- extendOccSetList,
- unionOccSets, unionManyOccSets, minusOccSet, elemOccSet, occSetElts,
- foldOccSet, isEmptyOccSet, intersectOccSet, intersectsOccSet,
-
- -- * Tidying up
- TidyOccEnv, emptyTidyOccEnv, tidyOccName, initTidyOccEnv,
+ -- * The 'OccSet' type
+ OccSet, emptyOccSet, unitOccSet, mkOccSet, extendOccSet,
+ extendOccSetList,
+ unionOccSets, unionManyOccSets, minusOccSet, elemOccSet, occSetElts,
+ foldOccSet, isEmptyOccSet, intersectOccSet, intersectsOccSet,
- -- * Lexical characteristics of Haskell names
- isLexCon, isLexVar, isLexId, isLexSym,
- isLexConId, isLexConSym, isLexVarId, isLexVarSym,
- startsVarSym, startsVarId, startsConSym, startsConId,
+ -- * Tidying up
+ TidyOccEnv, emptyTidyOccEnv, tidyOccName, initTidyOccEnv,
+
+ -- * Lexical characteristics of Haskell names
+ isLexCon, isLexVar, isLexId, isLexSym,
+ isLexConId, isLexConSym, isLexVarId, isLexVarSym,
+ startsVarSym, startsVarId, startsConSym, startsConId,
-- FsEnv
FastStringEnv, emptyFsEnv, lookupFsEnv, extendFsEnv, mkFsEnv
@@ -123,9 +116,9 @@ import Data.Data
\end{code}
%************************************************************************
-%* *
+%* *
FastStringEnv
-%* *
+%* *
%************************************************************************
FastStringEnv can't be in FastString because the env depends on UniqFM
@@ -146,29 +139,29 @@ mkFsEnv = listToUFM
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Name space}
-%* *
+%* *
%************************************************************************
\begin{code}
-data NameSpace = VarName -- Variables, including "real" data constructors
- | DataName -- "Source" data constructors
- | TvName -- Type variables
- | TcClsName -- Type constructors and classes; Haskell has them
- -- in the same name space for now.
- deriving( Eq, Ord )
+data NameSpace = VarName -- Variables, including "real" data constructors
+ | DataName -- "Source" data constructors
+ | TvName -- Type variables
+ | TcClsName -- Type constructors and classes; Haskell has them
+ -- in the same name space for now.
+ deriving( Eq, Ord )
{-! derive: Binary !-}
--- Note [Data Constructors]
+-- Note [Data Constructors]
-- see also: Note [Data Constructor Naming] in DataCon.lhs
--
-- $real_vs_source_data_constructors
-- There are two forms of data constructor:
--
--- [Source data constructors] The data constructors mentioned in Haskell source code
+-- [Source data constructors] The data constructors mentioned in Haskell source code
--
--- [Real data constructors] The data constructors of the representation type, which may not be the same as the source type
+-- [Real data constructors] The data constructors of the representation type, which may not be the same as the source type
--
-- For example:
--
@@ -185,13 +178,13 @@ tvName, varName :: NameSpace
-- Though type constructors and classes are in the same name space now,
-- the NameSpace type is abstract, so we can easily separate them later
-tcName = TcClsName -- Type constructors
-clsName = TcClsName -- Classes
-tcClsName = TcClsName -- Not sure which!
+tcName = TcClsName -- Type constructors
+clsName = TcClsName -- Classes
+tcClsName = TcClsName -- Not sure which!
dataName = DataName
-srcDataName = DataName -- Haskell-source data constructors should be
- -- in the Data name space
+srcDataName = DataName -- Haskell-source data constructors should be
+ -- in the Data name space
tvName = TvName
varName = VarName
@@ -208,7 +201,7 @@ isTvNameSpace :: NameSpace -> Bool
isTvNameSpace TvName = True
isTvNameSpace _ = False
-isVarNameSpace :: NameSpace -> Bool -- Variables or type variables, but not constructors
+isVarNameSpace :: NameSpace -> Bool -- Variables or type variables, but not constructors
isVarNameSpace TvName = True
isVarNameSpace VarName = True
isVarNameSpace _ = False
@@ -246,13 +239,13 @@ demoteNameSpace TcClsName = Just DataName
%************************************************************************
-%* *
+%* *
\subsection[Name-pieces-datatypes]{The @OccName@ datatypes}
-%* *
+%* *
%************************************************************************
\begin{code}
-data OccName = OccName
+data OccName = OccName
{ occNameSpace :: !NameSpace
, occNameFS :: !FastString
}
@@ -265,9 +258,9 @@ instance Eq OccName where
(OccName sp1 s1) == (OccName sp2 s2) = s1 == s2 && sp1 == sp2
instance Ord OccName where
- -- Compares lexicographically, *not* by Unique of the string
- compare (OccName sp1 s1) (OccName sp2 s2)
- = (s1 `compare` s2) `thenCmp` (sp1 `compare` sp2)
+ -- Compares lexicographically, *not* by Unique of the string
+ compare (OccName sp1 s1) (OccName sp2 s2)
+ = (s1 `compare` s2) `thenCmp` (sp1 `compare` sp2)
instance Data OccName where
-- don't traverse?
@@ -281,11 +274,11 @@ instance HasOccName OccName where
%************************************************************************
-%* *
+%* *
\subsection{Printing}
-%* *
+%* *
%************************************************************************
-
+
\begin{code}
instance Outputable OccName where
ppr = pprOccName
@@ -296,21 +289,21 @@ instance OutputableBndr OccName where
pprPrefixOcc n = pprPrefixVar (isSymOcc n) (ppr n)
pprOccName :: OccName -> SDoc
-pprOccName (OccName sp occ)
+pprOccName (OccName sp occ)
= getPprStyle $ \ sty ->
- if codeStyle sty
+ if codeStyle sty
then ztext (zEncodeFS occ)
else pp_occ <> pp_debug sty
where
pp_debug sty | debugStyle sty = braces (pprNameSpaceBrief sp)
- | otherwise = empty
+ | otherwise = empty
pp_occ = sdocWithDynFlags $ \dflags ->
if gopt Opt_SuppressUniques dflags
then text (strip_th_unique (unpackFS occ))
else ftext occ
- -- See Note [Suppressing uniques in OccNames]
+ -- See Note [Suppressing uniques in OccNames]
strip_th_unique ('[' : c : _) | isAlphaNum c = []
strip_th_unique (c : cs) = c : strip_th_unique cs
strip_th_unique [] = []
@@ -323,9 +316,9 @@ Template Haskell that have been turned into a string in the OccName.
See Note [Unique OccNames from Template Haskell] in Convert.hs
%************************************************************************
-%* *
+%* *
\subsection{Construction}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -393,9 +386,9 @@ class HasOccName name where
%************************************************************************
-%* *
- Environments
-%* *
+%* *
+ Environments
+%* *
%************************************************************************
OccEnvs are used mainly for the envts in ModIfaces.
@@ -403,11 +396,11 @@ OccEnvs are used mainly for the envts in ModIfaces.
Note [The Unique of an OccName]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
They are efficient, because FastStrings have unique Int# keys. We assume
-this key is less than 2^24, and indeed FastStrings are allocated keys
+this key is less than 2^24, and indeed FastStrings are allocated keys
sequentially starting at 0.
So we can make a Unique using
- mkUnique ns key :: Unique
+ mkUnique ns key :: Unique
where 'ns' is a Char representing the name space. This in turn makes it
easy to build an OccEnv.
@@ -436,25 +429,25 @@ extendOccEnv_Acc :: (a->b->b) -> (a->b) -> OccEnv b -> OccName -> a -> OccEnv b
plusOccEnv :: OccEnv a -> OccEnv a -> OccEnv a
plusOccEnv_C :: (a->a->a) -> OccEnv a -> OccEnv a -> OccEnv a
mapOccEnv :: (a->b) -> OccEnv a -> OccEnv b
-delFromOccEnv :: OccEnv a -> OccName -> OccEnv a
+delFromOccEnv :: OccEnv a -> OccName -> OccEnv a
delListFromOccEnv :: OccEnv a -> [OccName] -> OccEnv a
-filterOccEnv :: (elt -> Bool) -> OccEnv elt -> OccEnv elt
-alterOccEnv :: (Maybe elt -> Maybe elt) -> OccEnv elt -> OccName -> OccEnv elt
+filterOccEnv :: (elt -> Bool) -> OccEnv elt -> OccEnv elt
+alterOccEnv :: (Maybe elt -> Maybe elt) -> OccEnv elt -> OccName -> OccEnv elt
-emptyOccEnv = A emptyUFM
-unitOccEnv x y = A $ unitUFM x y
+emptyOccEnv = A emptyUFM
+unitOccEnv x y = A $ unitUFM x y
extendOccEnv (A x) y z = A $ addToUFM x y z
extendOccEnvList (A x) l = A $ addListToUFM x l
lookupOccEnv (A x) y = lookupUFM x y
mkOccEnv l = A $ listToUFM l
-elemOccEnv x (A y) = elemUFM x y
-foldOccEnv a b (A c) = foldUFM a b c
-occEnvElts (A x) = eltsUFM x
-plusOccEnv (A x) (A y) = A $ plusUFM x y
-plusOccEnv_C f (A x) (A y) = A $ plusUFM_C f x y
+elemOccEnv x (A y) = elemUFM x y
+foldOccEnv a b (A c) = foldUFM a b c
+occEnvElts (A x) = eltsUFM x
+plusOccEnv (A x) (A y) = A $ plusUFM x y
+plusOccEnv_C f (A x) (A y) = A $ plusUFM_C f x y
extendOccEnv_C f (A x) y z = A $ addToUFM_C f x y z
extendOccEnv_Acc f g (A x) y z = A $ addToUFM_Acc f g x y z
-mapOccEnv f (A x) = A $ mapUFM f x
+mapOccEnv f (A x) = A $ mapUFM f x
mkOccEnv_C comb l = A $ addListToUFM_C comb emptyUFM l
delFromOccEnv (A x) y = A $ delFromUFM x y
delListFromOccEnv (A x) y = A $ delListFromUFM x y
@@ -469,32 +462,32 @@ pprOccEnv ppr_elt (A env) = pprUniqFM ppr_elt env
type OccSet = UniqSet OccName
-emptyOccSet :: OccSet
-unitOccSet :: OccName -> OccSet
+emptyOccSet :: OccSet
+unitOccSet :: OccName -> OccSet
mkOccSet :: [OccName] -> OccSet
extendOccSet :: OccSet -> OccName -> OccSet
extendOccSetList :: OccSet -> [OccName] -> OccSet
-unionOccSets :: OccSet -> OccSet -> OccSet
+unionOccSets :: OccSet -> OccSet -> OccSet
unionManyOccSets :: [OccSet] -> OccSet
-minusOccSet :: OccSet -> OccSet -> OccSet
-elemOccSet :: OccName -> OccSet -> Bool
-occSetElts :: OccSet -> [OccName]
-foldOccSet :: (OccName -> b -> b) -> b -> OccSet -> b
-isEmptyOccSet :: OccSet -> Bool
+minusOccSet :: OccSet -> OccSet -> OccSet
+elemOccSet :: OccName -> OccSet -> Bool
+occSetElts :: OccSet -> [OccName]
+foldOccSet :: (OccName -> b -> b) -> b -> OccSet -> b
+isEmptyOccSet :: OccSet -> Bool
intersectOccSet :: OccSet -> OccSet -> OccSet
intersectsOccSet :: OccSet -> OccSet -> Bool
-emptyOccSet = emptyUniqSet
-unitOccSet = unitUniqSet
+emptyOccSet = emptyUniqSet
+unitOccSet = unitUniqSet
mkOccSet = mkUniqSet
-extendOccSet = addOneToUniqSet
+extendOccSet = addOneToUniqSet
extendOccSetList = addListToUniqSet
unionOccSets = unionUniqSets
unionManyOccSets = unionManyUniqSets
-minusOccSet = minusUniqSet
+minusOccSet = minusUniqSet
elemOccSet = elementOfUniqSet
occSetElts = uniqSetToList
-foldOccSet = foldUniqSet
+foldOccSet = foldUniqSet
isEmptyOccSet = isEmptyUniqSet
intersectOccSet = intersectUniqSets
intersectsOccSet s1 s2 = not (isEmptyOccSet (s1 `intersectOccSet` s2))
@@ -502,9 +495,9 @@ intersectsOccSet s1 s2 = not (isEmptyOccSet (s1 `intersectOccSet` s2))
%************************************************************************
-%* *
+%* *
\subsection{Predicates and taking them apart}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -525,7 +518,7 @@ isTvOcc _ = False
isTcOcc (OccName TcClsName _) = True
isTcOcc _ = False
--- | /Value/ 'OccNames's are those that are either in
+-- | /Value/ 'OccNames's are those that are either in
-- the variable or data constructor namespaces
isValOcc :: OccName -> Bool
isValOcc (OccName VarName _) = True
@@ -542,7 +535,7 @@ isDataSymOcc (OccName DataName s) = isLexConSym s
isDataSymOcc _ = False
-- Pretty inefficient!
--- | Test if the 'OccName' is that for any operator (whether
+-- | Test if the 'OccName' is that for any operator (whether
-- it is a data constructor or variable or whatever)
isSymOcc :: OccName -> Bool
isSymOcc (OccName DataName s) = isLexConSym s
@@ -554,7 +547,7 @@ isSymOcc (OccName TvName s) = isLexSym s
parenSymOcc :: OccName -> SDoc -> SDoc
-- ^ Wrap parens around an operator
parenSymOcc occ doc | isSymOcc occ = parens doc
- | otherwise = doc
+ | otherwise = doc
\end{code}
@@ -563,39 +556,39 @@ startsWithUnderscore :: OccName -> Bool
-- ^ Haskell 98 encourages compilers to suppress warnings about unsed
-- names in a pattern if they start with @_@: this implements that test
startsWithUnderscore occ = case occNameString occ of
- ('_' : _) -> True
- _other -> False
+ ('_' : _) -> True
+ _other -> False
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Making system names}
-%* *
+%* *
%************************************************************************
Here's our convention for splitting up the interface file name space:
- d... dictionary identifiers
- (local variables, so no name-clash worries)
+ d... dictionary identifiers
+ (local variables, so no name-clash worries)
All of these other OccNames contain a mixture of alphabetic
and symbolic characters, and hence cannot possibly clash with
a user-written type or function name
- $f... Dict-fun identifiers (from inst decls)
- $dmop Default method for 'op'
- $pnC n'th superclass selector for class C
- $wf Worker for functtoin 'f'
- $sf.. Specialised version of f
- T:C Tycon for dictionary for class C
- D:C Data constructor for dictionary for class C
+ $f... Dict-fun identifiers (from inst decls)
+ $dmop Default method for 'op'
+ $pnC n'th superclass selector for class C
+ $wf Worker for functtoin 'f'
+ $sf.. Specialised version of f
+ T:C Tycon for dictionary for class C
+ D:C Data constructor for dictionary for class C
NTCo:T Coercion connecting newtype T with its representation type
TFCo:R Coercion connecting a data family to its respresentation type R
In encoded form these appear as Zdfxxx etc
- :... keywords (export:, letrec: etc.)
+ :... keywords (export:, letrec: etc.)
--- I THINK THIS IS WRONG!
This knowledge is encoded in the following functions.
@@ -604,15 +597,15 @@ This knowledge is encoded in the following functions.
NB: The string must already be encoded!
\begin{code}
-mk_deriv :: NameSpace
- -> String -- Distinguishes one sort of derived name from another
- -> String
- -> OccName
+mk_deriv :: NameSpace
+ -> String -- Distinguishes one sort of derived name from another
+ -> String
+ -> OccName
mk_deriv occ_sp sys_prefix str = mkOccName occ_sp (sys_prefix ++ str)
isDerivedOccName :: OccName -> Bool
-isDerivedOccName occ =
+isDerivedOccName occ =
case occNameString occ of
'$':c:_ | isAlphaNum c -> True
':':c:_ | isAlphaNum c -> True
@@ -622,10 +615,10 @@ isDerivedOccName occ =
\begin{code}
mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkDefaultMethodOcc,
mkGenDefMethodOcc, mkDerivedTyConOcc, mkClassDataConOcc, mkDictOcc,
- mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenOcc1, mkGenOcc2,
- mkGenD, mkGenR, mkGen1R, mkGenRCo,
- mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc, mkNewTyCoOcc,
- mkInstTyCoOcc, mkEqPredCoOcc, mkClassOpAuxOcc,
+ mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenOcc1, mkGenOcc2,
+ mkGenD, mkGenR, mkGen1R, mkGenRCo,
+ mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc, mkNewTyCoOcc,
+ mkInstTyCoOcc, mkEqPredCoOcc, mkClassOpAuxOcc,
mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc
:: OccName -> OccName
@@ -636,17 +629,17 @@ mkMatcherOcc = mk_simple_deriv varName "$m"
mkDefaultMethodOcc = mk_simple_deriv varName "$dm"
mkGenDefMethodOcc = mk_simple_deriv varName "$gdm"
mkClassOpAuxOcc = mk_simple_deriv varName "$c"
-mkDerivedTyConOcc = mk_simple_deriv tcName ":" -- The : prefix makes sure it classifies as a tycon/datacon
-mkClassDataConOcc = mk_simple_deriv dataName "D:" -- We go straight to the "real" data con
- -- for datacons from classes
-mkDictOcc = mk_simple_deriv varName "$d"
-mkIPOcc = mk_simple_deriv varName "$i"
-mkSpecOcc = mk_simple_deriv varName "$s"
+mkDerivedTyConOcc = mk_simple_deriv tcName ":" -- The : prefix makes sure it classifies as a tycon/datacon
+mkClassDataConOcc = mk_simple_deriv dataName "D:" -- We go straight to the "real" data con
+ -- for datacons from classes
+mkDictOcc = mk_simple_deriv varName "$d"
+mkIPOcc = mk_simple_deriv varName "$i"
+mkSpecOcc = mk_simple_deriv varName "$s"
mkForeignExportOcc = mk_simple_deriv varName "$f"
mkRepEqOcc = mk_simple_deriv tvName "$r" -- In RULES involving Coercible
-mkNewTyCoOcc = mk_simple_deriv tcName "NTCo:" -- Coercion for newtypes
+mkNewTyCoOcc = mk_simple_deriv tcName "NTCo:" -- Coercion for newtypes
mkInstTyCoOcc = mk_simple_deriv tcName "TFCo:" -- Coercion for type functions
-mkEqPredCoOcc = mk_simple_deriv tcName "$co"
+mkEqPredCoOcc = mk_simple_deriv tcName "$co"
-- used in derived instances
mkCon2TagOcc = mk_simple_deriv varName "$con2tag_"
@@ -655,7 +648,7 @@ mkMaxTagOcc = mk_simple_deriv varName "$maxtag_"
-- Generic derivable classes (old)
mkGenOcc1 = mk_simple_deriv varName "$gfrom"
-mkGenOcc2 = mk_simple_deriv varName "$gto"
+mkGenOcc2 = mk_simple_deriv varName "$gto"
-- Generic deriving mechanism (new)
mkGenD = mk_simple_deriv tcName "D1"
@@ -671,9 +664,9 @@ mkGenR = mk_simple_deriv tcName "Rep_"
mkGen1R = mk_simple_deriv tcName "Rep1_"
mkGenRCo = mk_simple_deriv tcName "CoRep_"
--- data T = MkT ... deriving( Data ) needs definitions for
--- $tT :: Data.Generics.Basics.DataType
--- $cMkT :: Data.Generics.Basics.Constr
+-- data T = MkT ... deriving( Data ) needs definitions for
+-- $tT :: Data.Generics.Basics.DataType
+-- $cMkT :: Data.Generics.Basics.Constr
mkDataTOcc = mk_simple_deriv varName "$t"
mkDataCOcc = mk_simple_deriv varName "$c"
@@ -704,41 +697,41 @@ mk_simple_deriv_with sp px (Just with) occ = mk_deriv sp (px ++ with ++ "_") (oc
-- Data constructor workers are made by setting the name space
-- of the data constructor OccName (which should be a DataName)
-- to VarName
-mkDataConWorkerOcc datacon_occ = setOccNameSpace varName datacon_occ
+mkDataConWorkerOcc datacon_occ = setOccNameSpace varName datacon_occ
\end{code}
\begin{code}
-mkSuperDictSelOcc :: Int -- ^ Index of superclass, e.g. 3
- -> OccName -- ^ Class, e.g. @Ord@
- -> OccName -- ^ Derived 'Occname', e.g. @$p3Ord@
+mkSuperDictSelOcc :: Int -- ^ Index of superclass, e.g. 3
+ -> OccName -- ^ Class, e.g. @Ord@
+ -> OccName -- ^ Derived 'Occname', e.g. @$p3Ord@
mkSuperDictSelOcc index cls_tc_occ
= mk_deriv varName "$p" (show index ++ occNameString cls_tc_occ)
-mkLocalOcc :: Unique -- ^ Unique to combine with the 'OccName'
- -> OccName -- ^ Local name, e.g. @sat@
- -> OccName -- ^ Nice unique version, e.g. @$L23sat@
+mkLocalOcc :: Unique -- ^ Unique to combine with the 'OccName'
+ -> OccName -- ^ Local name, e.g. @sat@
+ -> OccName -- ^ Nice unique version, e.g. @$L23sat@
mkLocalOcc uniq occ
= mk_deriv varName ("$L" ++ show uniq) (occNameString occ)
- -- The Unique might print with characters
- -- that need encoding (e.g. 'z'!)
+ -- The Unique might print with characters
+ -- that need encoding (e.g. 'z'!)
\end{code}
\begin{code}
-- | Derive a name for the representation type constructor of a
-- @data@\/@newtype@ instance.
-mkInstTyTcOcc :: String -- ^ Family name, e.g. @Map@
+mkInstTyTcOcc :: String -- ^ Family name, e.g. @Map@
-> OccSet -- ^ avoid these Occs
- -> OccName -- ^ @R:Map@
+ -> OccName -- ^ @R:Map@
mkInstTyTcOcc str set =
chooseUniqueOcc tcName ('R' : ':' : str) set
\end{code}
\begin{code}
-mkDFunOcc :: String -- ^ Typically the class and type glommed together e.g. @OrdMaybe@.
- -- Only used in debug mode, for extra clarity
- -> Bool -- ^ Is this a hs-boot instance DFun?
+mkDFunOcc :: String -- ^ Typically the class and type glommed together e.g. @OrdMaybe@.
+ -- Only used in debug mode, for extra clarity
+ -> Bool -- ^ Is this a hs-boot instance DFun?
-> OccSet -- ^ avoid these Occs
- -> OccName -- ^ E.g. @$f3OrdMaybe@
+ -> OccName -- ^ E.g. @$f3OrdMaybe@
-- In hs-boot files we make dict funs like $fx7ClsTy, which get bound to the real
-- thing when we compile the mother module. Reason: we don't know exactly
@@ -748,7 +741,7 @@ mkDFunOcc info_str is_boot set
= chooseUniqueOcc VarName (prefix ++ info_str) set
where
prefix | is_boot = "$fx"
- | otherwise = "$f"
+ | otherwise = "$f"
\end{code}
Sometimes we need to pick an OccName that has not already been used,
@@ -777,9 +770,9 @@ because overloaded constructors (blarg) generate methods too.
And convert to VarName space
e.g. a call to constructor MkFoo where
- data (Ord a) => Foo a = MkFoo a
+ data (Ord a) => Foo a = MkFoo a
-If this is necessary, we do it by prefixing '$m'. These
+If this is necessary, we do it by prefixing '$m'. These
guys never show up in error messages. What a hack.
\begin{code}
@@ -790,9 +783,9 @@ mkMethodOcc occ = mk_simple_deriv varName "$m" occ
%************************************************************************
-%* *
+%* *
\subsection{Tidying them up}
-%* *
+%* *
%************************************************************************
Before we print chunks of code we like to rename it so that
@@ -802,7 +795,7 @@ OccName alone unless it accidentally clashes with one that is already
in scope; if so, we tack on '1' at the end and try again, then '2', and
so on till we find a unique one.
-There's a wrinkle for operators. Consider '>>='. We can't use '>>=1'
+There's a wrinkle for operators. Consider '>>='. We can't use '>>=1'
because that isn't a single lexeme. So we encode it to 'lle' and *then*
tack on the '1', if necessary.
@@ -814,7 +807,7 @@ type TidyOccEnv = UniqFM Int
make sure that we don't re-use
* Int, n = A plausible starting point for new guesses
- There is no guarantee that "FSn" is available;
+ There is no guarantee that "FSn" is available;
you must look that up in the TidyOccEnv. But
it's a good place to start looking.
@@ -822,13 +815,13 @@ type TidyOccEnv = UniqFM Int
with "foo". Otherwise if we tidy twice we get silly names like foo23.
\begin{code}
-type TidyOccEnv = UniqFM Int -- The in-scope OccNames
+type TidyOccEnv = UniqFM Int -- The in-scope OccNames
-- See Note [TidyOccEnv]
emptyTidyOccEnv :: TidyOccEnv
emptyTidyOccEnv = emptyUFM
-initTidyOccEnv :: [OccName] -> TidyOccEnv -- Initialise with names to avoid!
+initTidyOccEnv :: [OccName] -> TidyOccEnv -- Initialise with names to avoid!
initTidyOccEnv = foldl add emptyUFM
where
add env (OccName _ fs) = addToUFM env fs 1
@@ -836,13 +829,13 @@ initTidyOccEnv = foldl add emptyUFM
tidyOccName :: TidyOccEnv -> OccName -> (TidyOccEnv, OccName)
tidyOccName env occ@(OccName occ_sp fs)
= case lookupUFM env fs of
- Just n -> find n
- Nothing -> (addToUFM env fs 1, occ)
+ Just n -> find n
+ Nothing -> (addToUFM env fs 1, occ)
where
base :: String -- Drop trailing digits (see Note [TidyOccEnv])
base = reverse (dropWhile isDigit (reverse (unpackFS fs)))
-
- find n
+
+ find n
= case lookupUFM env new_fs of
Just n' -> find (n1 `max` n')
-- The max ensures that n increases, avoiding loops
@@ -857,9 +850,9 @@ tidyOccName env occ@(OccName occ_sp fs)
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Lexical categories}
-%* *
+%* *
%************************************************************************
These functions test strings to see if they fit the lexical categories
@@ -886,21 +879,21 @@ isLexSym cs = isLexConSym cs || isLexVarSym cs
-------------
-isLexConId cs -- Prefix type or data constructors
- | nullFS cs = False -- e.g. "Foo", "[]", "(,)"
+isLexConId cs -- Prefix type or data constructors
+ | nullFS cs = False -- e.g. "Foo", "[]", "(,)"
| cs == (fsLit "[]") = True
- | otherwise = startsConId (headFS cs)
+ | otherwise = startsConId (headFS cs)
-isLexVarId cs -- Ordinary prefix identifiers
- | nullFS cs = False -- e.g. "x", "_x"
+isLexVarId cs -- Ordinary prefix identifiers
+ | nullFS cs = False -- e.g. "x", "_x"
| otherwise = startsVarId (headFS cs)
-isLexConSym cs -- Infix type or data constructors
- | nullFS cs = False -- e.g. ":-:", ":", "->"
+isLexConSym cs -- Infix type or data constructors
+ | nullFS cs = False -- e.g. ":-:", ":", "->"
| cs == (fsLit "->") = True
- | otherwise = startsConSym (headFS cs)
+ | otherwise = startsConSym (headFS cs)
-isLexVarSym fs -- Infix identifiers e.g. "+"
+isLexVarSym fs -- Infix identifiers e.g. "+"
| fs == (fsLit "~R#") = True
| otherwise
= case (if nullFS fs then [] else unpackFS fs) of
@@ -911,9 +904,9 @@ isLexVarSym fs -- Infix identifiers e.g. "+"
-------------
startsVarSym, startsVarId, startsConSym, startsConId :: Char -> Bool
startsVarSym c = isSymbolASCII c || (ord c > 0x7f && isSymbol c) -- Infix Ids
-startsConSym c = c == ':' -- Infix data constructors
-startsVarId c = isLower c || c == '_' -- Ordinary Ids
-startsConId c = isUpper c || c == '(' -- Ordinary type constructors and data constructors
+startsConSym c = c == ':' -- Infix data constructors
+startsVarId c = isLower c || c == '_' -- Ordinary Ids
+startsConId c = isUpper c || c == '(' -- Ordinary type constructors and data constructors
isSymbolASCII :: Char -> Bool
isSymbolASCII c = c `elem` "!#$%&*+./<=>?@\\^|~-"
@@ -923,36 +916,36 @@ isVarSymChar c = c == ':' || startsVarSym c
\end{code}
%************************************************************************
-%* *
- Binary instance
+%* *
+ Binary instance
Here rather than BinIface because OccName is abstract
-%* *
+%* *
%************************************************************************
\begin{code}
instance Binary NameSpace where
put_ bh VarName = do
- putByte bh 0
+ putByte bh 0
put_ bh DataName = do
- putByte bh 1
+ putByte bh 1
put_ bh TvName = do
- putByte bh 2
+ putByte bh 2
put_ bh TcClsName = do
- putByte bh 3
+ putByte bh 3
get bh = do
- h <- getByte bh
- case h of
- 0 -> do return VarName
- 1 -> do return DataName
- 2 -> do return TvName
- _ -> do return TcClsName
+ h <- getByte bh
+ case h of
+ 0 -> do return VarName
+ 1 -> do return DataName
+ 2 -> do return TvName
+ _ -> do return TcClsName
instance Binary OccName where
put_ bh (OccName aa ab) = do
- put_ bh aa
- put_ bh ab
+ put_ bh aa
+ put_ bh ab
get bh = do
- aa <- get bh
- ab <- get bh
- return (OccName aa ab)
+ aa <- get bh
+ ab <- get bh
+ return (OccName aa ab)
\end{code}
diff --git a/compiler/basicTypes/VarSet.lhs b/compiler/basicTypes/VarSet.lhs
index 368be68ceb..362f408d72 100644
--- a/compiler/basicTypes/VarSet.lhs
+++ b/compiler/basicTypes/VarSet.lhs
@@ -5,27 +5,21 @@
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module VarSet (
-- * Var, Id and TyVar set types
- VarSet, IdSet, TyVarSet, CoVarSet,
-
- -- ** Manipulating these sets
- emptyVarSet, unitVarSet, mkVarSet,
- extendVarSet, extendVarSetList, extendVarSet_C,
- elemVarSet, varSetElems, subVarSet,
- unionVarSet, unionVarSets, mapUnionVarSet,
- intersectVarSet, intersectsVarSet, disjointVarSet,
- isEmptyVarSet, delVarSet, delVarSetList, delVarSetByKey,
- minusVarSet, foldVarSet, filterVarSet, fixVarSet,
- lookupVarSet, mapVarSet, sizeVarSet, seqVarSet,
- elemVarSetByKey, partitionVarSet
+ VarSet, IdSet, TyVarSet, CoVarSet,
+
+ -- ** Manipulating these sets
+ emptyVarSet, unitVarSet, mkVarSet,
+ extendVarSet, extendVarSetList, extendVarSet_C,
+ elemVarSet, varSetElems, subVarSet,
+ unionVarSet, unionVarSets, mapUnionVarSet,
+ intersectVarSet, intersectsVarSet, disjointVarSet,
+ isEmptyVarSet, delVarSet, delVarSetList, delVarSetByKey,
+ minusVarSet, foldVarSet, filterVarSet, fixVarSet,
+ lookupVarSet, mapVarSet, sizeVarSet, seqVarSet,
+ elemVarSetByKey, partitionVarSet
) where
#include "HsVersions.h"
@@ -36,78 +30,78 @@ import UniqSet
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{@VarSet@s}
-%* *
+%* *
%************************************************************************
\begin{code}
type VarSet = UniqSet Var
-type IdSet = UniqSet Id
-type TyVarSet = UniqSet TyVar
+type IdSet = UniqSet Id
+type TyVarSet = UniqSet TyVar
type CoVarSet = UniqSet CoVar
-emptyVarSet :: VarSet
-intersectVarSet :: VarSet -> VarSet -> VarSet
-unionVarSet :: VarSet -> VarSet -> VarSet
-unionVarSets :: [VarSet] -> VarSet
+emptyVarSet :: VarSet
+intersectVarSet :: VarSet -> VarSet -> VarSet
+unionVarSet :: VarSet -> VarSet -> VarSet
+unionVarSets :: [VarSet] -> VarSet
mapUnionVarSet :: (a -> VarSet) -> [a] -> VarSet
-- ^ map the function oer the list, and union the results
-varSetElems :: VarSet -> [Var]
-unitVarSet :: Var -> VarSet
-extendVarSet :: VarSet -> Var -> VarSet
+varSetElems :: VarSet -> [Var]
+unitVarSet :: Var -> VarSet
+extendVarSet :: VarSet -> Var -> VarSet
extendVarSetList:: VarSet -> [Var] -> VarSet
-elemVarSet :: Var -> VarSet -> Bool
-delVarSet :: VarSet -> Var -> VarSet
-delVarSetList :: VarSet -> [Var] -> VarSet
-minusVarSet :: VarSet -> VarSet -> VarSet
-isEmptyVarSet :: VarSet -> Bool
-mkVarSet :: [Var] -> VarSet
-foldVarSet :: (Var -> a -> a) -> a -> VarSet -> a
-lookupVarSet :: VarSet -> Var -> Maybe Var
- -- Returns the set element, which may be
- -- (==) to the argument, but not the same as
-mapVarSet :: (Var -> Var) -> VarSet -> VarSet
-sizeVarSet :: VarSet -> Int
-filterVarSet :: (Var -> Bool) -> VarSet -> VarSet
+elemVarSet :: Var -> VarSet -> Bool
+delVarSet :: VarSet -> Var -> VarSet
+delVarSetList :: VarSet -> [Var] -> VarSet
+minusVarSet :: VarSet -> VarSet -> VarSet
+isEmptyVarSet :: VarSet -> Bool
+mkVarSet :: [Var] -> VarSet
+foldVarSet :: (Var -> a -> a) -> a -> VarSet -> a
+lookupVarSet :: VarSet -> Var -> Maybe Var
+ -- Returns the set element, which may be
+ -- (==) to the argument, but not the same as
+mapVarSet :: (Var -> Var) -> VarSet -> VarSet
+sizeVarSet :: VarSet -> Int
+filterVarSet :: (Var -> Bool) -> VarSet -> VarSet
extendVarSet_C :: (Var->Var->Var) -> VarSet -> Var -> VarSet
-delVarSetByKey :: VarSet -> Unique -> VarSet
+delVarSetByKey :: VarSet -> Unique -> VarSet
elemVarSetByKey :: Unique -> VarSet -> Bool
fixVarSet :: (VarSet -> VarSet) -> VarSet -> VarSet
partitionVarSet :: (Var -> Bool) -> VarSet -> (VarSet, VarSet)
-emptyVarSet = emptyUniqSet
-unitVarSet = unitUniqSet
-extendVarSet = addOneToUniqSet
+emptyVarSet = emptyUniqSet
+unitVarSet = unitUniqSet
+extendVarSet = addOneToUniqSet
extendVarSetList= addListToUniqSet
-intersectVarSet = intersectUniqSets
-
-intersectsVarSet:: VarSet -> VarSet -> Bool -- True if non-empty intersection
-disjointVarSet :: VarSet -> VarSet -> Bool -- True if empty intersection
-subVarSet :: VarSet -> VarSet -> Bool -- True if first arg is subset of second
- -- (s1 `intersectsVarSet` s2) doesn't compute s2 if s1 is empty;
- -- ditto disjointVarSet, subVarSet
-
-unionVarSet = unionUniqSets
-unionVarSets = unionManyUniqSets
-varSetElems = uniqSetToList
-elemVarSet = elementOfUniqSet
-minusVarSet = minusUniqSet
-delVarSet = delOneFromUniqSet
-delVarSetList = delListFromUniqSet
-isEmptyVarSet = isEmptyUniqSet
-mkVarSet = mkUniqSet
-foldVarSet = foldUniqSet
-lookupVarSet = lookupUniqSet
-mapVarSet = mapUniqSet
-sizeVarSet = sizeUniqSet
-filterVarSet = filterUniqSet
+intersectVarSet = intersectUniqSets
+
+intersectsVarSet:: VarSet -> VarSet -> Bool -- True if non-empty intersection
+disjointVarSet :: VarSet -> VarSet -> Bool -- True if empty intersection
+subVarSet :: VarSet -> VarSet -> Bool -- True if first arg is subset of second
+ -- (s1 `intersectsVarSet` s2) doesn't compute s2 if s1 is empty;
+ -- ditto disjointVarSet, subVarSet
+
+unionVarSet = unionUniqSets
+unionVarSets = unionManyUniqSets
+varSetElems = uniqSetToList
+elemVarSet = elementOfUniqSet
+minusVarSet = minusUniqSet
+delVarSet = delOneFromUniqSet
+delVarSetList = delListFromUniqSet
+isEmptyVarSet = isEmptyUniqSet
+mkVarSet = mkUniqSet
+foldVarSet = foldUniqSet
+lookupVarSet = lookupUniqSet
+mapVarSet = mapUniqSet
+sizeVarSet = sizeUniqSet
+filterVarSet = filterUniqSet
extendVarSet_C = addOneToUniqSet_C
-delVarSetByKey = delOneFromUniqSet_Directly
-elemVarSetByKey = elemUniqSet_Directly
+delVarSetByKey = delOneFromUniqSet_Directly
+elemVarSetByKey = elemUniqSet_Directly
partitionVarSet = partitionUniqSet
\end{code}
@@ -121,9 +115,9 @@ subVarSet s1 s2 = isEmptyVarSet (s1 `minusVarSet` s2)
-- Iterate f to a fixpoint
fixVarSet f s | new_s `subVarSet` s = s
- | otherwise = fixVarSet f new_s
- where
- new_s = f s
+ | otherwise = fixVarSet f new_s
+ where
+ new_s = f s
\end{code}
\begin{code}
diff --git a/compiler/coreSyn/CoreArity.lhs b/compiler/coreSyn/CoreArity.lhs
index 26669b6d32..37517d6190 100644
--- a/compiler/coreSyn/CoreArity.lhs
+++ b/compiler/coreSyn/CoreArity.lhs
@@ -3,21 +3,15 @@
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
- Arity and eta expansion
+ Arity and eta expansion
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
--- | Arit and eta expansion
+
+-- | Arity and eta expansion
module CoreArity (
- manifestArity, exprArity, typeArity, exprBotStrictness_maybe,
- exprEtaExpandArity, findRhsArity, CheapFun, etaExpand
+ manifestArity, exprArity, typeArity, exprBotStrictness_maybe,
+ exprEtaExpandArity, findRhsArity, CheapFun, etaExpand
) where
#include "HsVersions.h"
@@ -31,7 +25,7 @@ import Var
import VarEnv
import Id
import Type
-import TyCon ( initRecTc, checkRecTc )
+import TyCon ( initRecTc, checkRecTc )
import Coercion
import BasicTypes
import Unique
@@ -43,9 +37,9 @@ import Util ( debugIsOn )
\end{code}
%************************************************************************
-%* *
+%* *
manifestArity and exprArity
-%* *
+%* *
%************************************************************************
exprArity is a cheap-and-cheerful version of exprEtaExpandArity.
@@ -53,52 +47,52 @@ It tells how many things the expression can be applied to before doing
any work. It doesn't look inside cases, lets, etc. The idea is that
exprEtaExpandArity will do the hard work, leaving something that's easy
for exprArity to grapple with. In particular, Simplify uses exprArity to
-compute the ArityInfo for the Id.
+compute the ArityInfo for the Id.
Originally I thought that it was enough just to look for top-level lambdas, but
it isn't. I've seen this
- foo = PrelBase.timesInt
+ foo = PrelBase.timesInt
We want foo to get arity 2 even though the eta-expander will leave it
unchanged, in the expectation that it'll be inlined. But occasionally it
-isn't, because foo is blacklisted (used in a rule).
+isn't, because foo is blacklisted (used in a rule).
-Similarly, see the ok_note check in exprEtaExpandArity. So
- f = __inline_me (\x -> e)
+Similarly, see the ok_note check in exprEtaExpandArity. So
+ f = __inline_me (\x -> e)
won't be eta-expanded.
And in any case it seems more robust to have exprArity be a bit more intelligent.
-But note that (\x y z -> f x y z)
+But note that (\x y z -> f x y z)
should have arity 3, regardless of f's arity.
\begin{code}
manifestArity :: CoreExpr -> Arity
-- ^ manifestArity sees how many leading value lambdas there are,
-- after looking through casts
-manifestArity (Lam v e) | isId v = 1 + manifestArity e
- | otherwise = manifestArity e
+manifestArity (Lam v e) | isId v = 1 + manifestArity e
+ | otherwise = manifestArity e
manifestArity (Tick t e) | not (tickishIsCode t) = manifestArity e
-manifestArity (Cast e _) = manifestArity e
-manifestArity _ = 0
+manifestArity (Cast e _) = manifestArity e
+manifestArity _ = 0
---------------
exprArity :: CoreExpr -> Arity
-- ^ An approximate, fast, version of 'exprEtaExpandArity'
exprArity e = go e
where
- go (Var v) = idArity v
- go (Lam x e) | isId x = go e + 1
- | otherwise = go e
+ go (Var v) = idArity v
+ go (Lam x e) | isId x = go e + 1
+ | otherwise = go e
go (Tick t e) | not (tickishIsCode t) = go e
go (Cast e co) = trim_arity (go e) (pSnd (coercionKind co))
-- Note [exprArity invariant]
go (App e (Type _)) = go e
go (App f a) | exprIsTrivial a = (go f - 1) `max` 0
-- See Note [exprArity for applications]
- -- NB: coercions count as a value argument
+ -- NB: coercions count as a value argument
- go _ = 0
+ go _ = 0
trim_arity :: Arity -> Type -> Arity
trim_arity arity ty = arity `min` length (typeArity ty)
@@ -108,26 +102,26 @@ typeArity :: Type -> [OneShotInfo]
-- How many value arrows are visible in the type?
-- We look through foralls, and newtypes
-- See Note [exprArity invariant]
-typeArity ty
+typeArity ty
= go initRecTc ty
where
- go rec_nts ty
- | Just (_, ty') <- splitForAllTy_maybe ty
+ go rec_nts ty
+ | Just (_, ty') <- splitForAllTy_maybe ty
= go rec_nts ty'
- | Just (arg,res) <- splitFunTy_maybe ty
+ | Just (arg,res) <- splitFunTy_maybe ty
= typeOneShot arg : go rec_nts res
- | Just (tc,tys) <- splitTyConApp_maybe ty
+ | Just (tc,tys) <- splitTyConApp_maybe ty
, Just (ty', _) <- instNewTyCon_maybe tc tys
, Just rec_nts' <- checkRecTc rec_nts tc -- See Note [Expanding newtypes]
-- in TyCon
--- , not (isClassTyCon tc) -- Do not eta-expand through newtype classes
--- -- See Note [Newtype classes and eta expansion]
+-- , not (isClassTyCon tc) -- Do not eta-expand through newtype classes
+-- -- See Note [Newtype classes and eta expansion]
-- (no longer required)
= go rec_nts' ty'
- -- Important to look through non-recursive newtypes, so that, eg
- -- (f x) where f has arity 2, f :: Int -> IO ()
- -- Here we want to get arity 1 for the result!
+ -- Important to look through non-recursive newtypes, so that, eg
+ -- (f x) where f has arity 2, f :: Int -> IO ()
+ -- Here we want to get arity 1 for the result!
--
-- AND through a layer of recursive newtypes
-- e.g. newtype Stream m a b = Stream (m (Either b (a, Stream m a b)))
@@ -142,8 +136,8 @@ exprBotStrictness_maybe :: CoreExpr -> Maybe (Arity, StrictSig)
-- float-out
exprBotStrictness_maybe e
= case getBotArity (arityType env e) of
- Nothing -> Nothing
- Just ar -> Just (ar, sig ar)
+ Nothing -> Nothing
+ Just ar -> Just (ar, sig ar)
where
env = AE { ae_ped_bot = True, ae_cheap_fn = \ _ _ -> False }
sig ar = mkClosedStrictSig (replicate ar topDmd) botRes
@@ -156,19 +150,19 @@ exprArity has the following invariant:
(1) If typeArity (exprType e) = n,
then manifestArity (etaExpand e n) = n
-
+
That is, etaExpand can always expand as much as typeArity says
So the case analysis in etaExpand and in typeArity must match
-
- (2) exprArity e <= typeArity (exprType e)
+
+ (2) exprArity e <= typeArity (exprType e)
(3) Hence if (exprArity e) = n, then manifestArity (etaExpand e n) = n
- That is, if exprArity says "the arity is n" then etaExpand really
+ That is, if exprArity says "the arity is n" then etaExpand really
can get "n" manifest lambdas to the top.
-Why is this important? Because
- - In TidyPgm we use exprArity to fix the *final arity* of
+Why is this important? Because
+ - In TidyPgm we use exprArity to fix the *final arity* of
each top-level Id, and in
- In CorePrep we use etaExpand on each rhs, so that the visible lambdas
actually match that arity, which in turn means
@@ -186,9 +180,9 @@ Note [Newtype classes and eta expansion]
-------- Old out of date comments, just for interest -----------
We have to be careful when eta-expanding through newtypes. In general
-it's a good idea, but annoyingly it interacts badly with the class-op
+it's a good idea, but annoyingly it interacts badly with the class-op
rule mechanism. Consider
-
+
class C a where { op :: a -> a }
instance C b => C [b] where
op x = ...
@@ -206,7 +200,7 @@ These translate to
Now suppose we have:
- dCInt :: C Int
+ dCInt :: C Int
blah :: [Int] -> [Int]
blah = op ($dfList dCInt)
@@ -230,7 +224,7 @@ The test simplCore/should_compile/T3722 is an excellent example.
Note [exprArity for applications]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we come to an application we check that the arg is trivial.
- eg f (fac x) does not have arity 2,
+ eg f (fac x) does not have arity 2,
even if f has arity 3!
* We require that is trivial rather merely cheap. Suppose f has arity 2.
@@ -245,9 +239,9 @@ When we come to an application we check that the arg is trivial.
%************************************************************************
-%* *
- Computing the "arity" of an expression
-%* *
+%* *
+ Computing the "arity" of an expression
+%* *
%************************************************************************
Note [Definition of arity]
@@ -275,7 +269,7 @@ It's all a bit more subtle than it looks:
Note [One-shot lambdas]
~~~~~~~~~~~~~~~~~~~~~~~
Consider one-shot lambdas
- let x = expensive in \y z -> E
+ let x = expensive in \y z -> E
We want this to have arity 1 if the \y-abstraction is a 1-shot lambda.
Note [Dealing with bottom]
@@ -291,21 +285,21 @@ In this case we do eta-expand, in order to get that \s to the
top, and give f arity 2.
This isn't really right in the presence of seq. Consider
- (f bot) `seq` 1
+ (f bot) `seq` 1
This should diverge! But if we eta-expand, it won't. We ignore this
"problem" (unless -fpedantic-bottoms is on), because being scrupulous
-would lose an important transformation for many programs. (See
+would lose an important transformation for many programs. (See
Trac #5587 for an example.)
Consider also
- f = \x -> error "foo"
+ f = \x -> error "foo"
Here, arity 1 is fine. But if it is
- f = \x -> case x of
- True -> error "foo"
- False -> \y -> x+y
+ f = \x -> case x of
+ True -> error "foo"
+ False -> \y -> x+y
then we want to get arity 2. Technically, this isn't quite right, because
- (f True) `seq` 1
+ (f True) `seq` 1
should diverge, but it'll converge if we eta-expand f. Nevertheless, we
do so; it improves some programs significantly, and increasing convergence
isn't a bad thing. Hence the ABot/ATop in ArityType.
@@ -318,11 +312,11 @@ this transformation. So we try to limit it as much as possible:
case undefined of { (a,b) -> \y -> e }
This showed up in Trac #5557
- (2) Do NOT move a lambda outside a case if all the branches of
+ (2) Do NOT move a lambda outside a case if all the branches of
the case are known to return bottom.
case x of { (a,b) -> \y -> error "urk" }
- This case is less important, but the idea is that if the fn is
- going to diverge eventually anyway then getting the best arity
+ This case is less important, but the idea is that if the fn is
+ going to diverge eventually anyway then getting the best arity
isn't an issue, so we might as well play safe
(3) Do NOT move a lambda outside a case unless
@@ -337,34 +331,34 @@ Of course both (1) and (2) are readily defeated by disguising the bottoms.
Non-recursive newtypes are transparent, and should not get in the way.
We do (currently) eta-expand recursive newtypes too. So if we have, say
- newtype T = MkT ([T] -> Int)
+ newtype T = MkT ([T] -> Int)
Suppose we have
- e = coerce T f
-where f has arity 1. Then: etaExpandArity e = 1;
+ e = coerce T f
+where f has arity 1. Then: etaExpandArity e = 1;
that is, etaExpandArity looks through the coerce.
When we eta-expand e to arity 1: eta_expand 1 e T
-we want to get: coerce T (\x::[T] -> (coerce ([T]->Int) e) x)
+we want to get: coerce T (\x::[T] -> (coerce ([T]->Int) e) x)
HOWEVER, note that if you use coerce bogusly you can ge
- coerce Int negate
+ coerce Int negate
And since negate has arity 2, you might try to eta expand. But you can't
decopose Int to a function type. Hence the final case in eta_expand.
-
+
Note [The state-transformer hack]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Suppose we have
- f = e
+Suppose we have
+ f = e
where e has arity n. Then, if we know from the context that f has
a usage type like
- t1 -> ... -> tn -1-> t(n+1) -1-> ... -1-> tm -> ...
+ t1 -> ... -> tn -1-> t(n+1) -1-> ... -1-> tm -> ...
then we can expand the arity to m. This usage type says that
any application (x e1 .. en) will be applied to uniquely to (m-n) more args
-Consider f = \x. let y = <expensive>
- in case x of
- True -> foo
- False -> \(s:RealWorld) -> e
+Consider f = \x. let y = <expensive>
+ in case x of
+ True -> foo
+ False -> \(s:RealWorld) -> e
where foo has arity 1. Then we want the state hack to
apply to foo too, so we can eta expand the case.
@@ -409,16 +403,16 @@ This arose in another guise in Trac #3959. Here we had
catch# (throw exn >> return ())
Note that (throw :: forall a e. Exn e => e -> a) is called with [a = IO ()].
-After inlining (>>) we get
+After inlining (>>) we get
catch# (\_. throw {IO ()} exn)
-We must *not* eta-expand to
+We must *not* eta-expand to
catch# (\_ _. throw {...} exn)
because 'catch#' expects to get a (# _,_ #) after applying its argument to
-a State#, not another function!
+a State#, not another function!
In short, we use the state hack to allow us to push let inside a lambda,
but not to introduce a new lambda.
@@ -430,24 +424,24 @@ ArityType is the result of a compositional analysis on expressions,
from which we can decide the real arity of the expression (extracted
with function exprEtaExpandArity).
-Here is what the fields mean. If an arbitrary expression 'f' has
+Here is what the fields mean. If an arbitrary expression 'f' has
ArityType 'at', then
* If at = ABot n, then (f x1..xn) definitely diverges. Partial
applications to fewer than n args may *or may not* diverge.
We allow ourselves to eta-expand bottoming functions, even
- if doing so may lose some `seq` sharing,
+ if doing so may lose some `seq` sharing,
let x = <expensive> in \y. error (g x y)
==> \y. let x = <expensive> in error (g x y)
- * If at = ATop as, and n=length as,
- then expanding 'f' to (\x1..xn. f x1 .. xn) loses no sharing,
+ * If at = ATop as, and n=length as,
+ then expanding 'f' to (\x1..xn. f x1 .. xn) loses no sharing,
assuming the calls of f respect the one-shot-ness of of
- its definition.
+ its definition.
NB 'f' is an arbitary expression, eg (f = g e1 e2). This 'f'
- can have ArityType as ATop, with length as > 0, only if e1 e2 are
+ can have ArityType as ATop, with length as > 0, only if e1 e2 are
themselves.
* In both cases, f, (f x1), ... (f x1 ... f(n-1)) are definitely
@@ -457,8 +451,8 @@ ArityType 'at', then
So eta expansion is dynamically ok; see Note [State hack and
bottoming functions], the part about catch#
-Example:
- f = \x\y. let v = <expensive> in
+Example:
+ f = \x\y. let v = <expensive> in
\s(one-shot) \t(one-shot). blah
'f' has ArityType [ManyShot,ManyShot,OneShot,OneShot]
The one-shot-ness means we can, in effect, push that
@@ -467,8 +461,8 @@ Example:
Suppose f = \xy. x+y
Then f :: AT [False,False] ATop
- f v :: AT [False] ATop
- f <expensive> :: AT [] ATop
+ f v :: AT [False] ATop
+ f <expensive> :: AT [] ATop
-------------------- Main arity code ----------------------------
\begin{code}
@@ -478,13 +472,13 @@ data ArityType = ATop [OneShotInfo] | ABot Arity
-- to justify the [OneShot], or the Arity
vanillaArityType :: ArityType
-vanillaArityType = ATop [] -- Totally uninformative
+vanillaArityType = ATop [] -- Totally uninformative
-- ^ The Arity returned is the number of value args the
-- expression can be applied to without doing much work
exprEtaExpandArity :: DynFlags -> CoreExpr -> Arity
-- exprEtaExpandArity is used when eta expanding
--- e ==> \xy -> e x y
+-- e ==> \xy -> e x y
exprEtaExpandArity dflags e
= case (arityType env e) of
ATop oss -> length oss
@@ -548,11 +542,11 @@ findRhsArity dflags bndr rhs old_arity
-- expression can be applied to without doing much work
rhsEtaExpandArity :: DynFlags -> CheapAppFun -> CoreExpr -> Arity
-- exprEtaExpandArity is used when eta expanding
--- e ==> \xy -> e x y
+-- e ==> \xy -> e x y
rhsEtaExpandArity dflags cheap_app e
= case (arityType env e) of
ATop (os:oss)
- | isOneShotInfo os || has_lam e -> 1 + length oss
+ | isOneShotInfo os || has_lam e -> 1 + length oss
-- Don't expand PAPs/thunks
-- Note [Eta expanding thunks]
| otherwise -> 0
@@ -602,13 +596,13 @@ dictionary bindings. This improves arities. Thereby, it also
means that full laziness is less prone to floating out the
application of a function to its dictionary arguments, which
can thereby lose opportunities for fusion. Example:
- foo :: Ord a => a -> ...
+ foo :: Ord a => a -> ...
foo = /\a \(d:Ord a). let d' = ...d... in \(x:a). ....
- -- So foo has arity 1
+ -- So foo has arity 1
f = \x. foo dInt $ bar x
-The (foo DInt) is floated out, and makes ineffective a RULE
+The (foo DInt) is floated out, and makes ineffective a RULE
foo (bar x) = ...
One could go further and make exprIsCheap reply True to any
@@ -626,12 +620,12 @@ We don't eta-expand
When we see
f = case y of p -> \x -> blah
-should we eta-expand it? Well, if 'x' is a one-shot state token
+should we eta-expand it? Well, if 'x' is a one-shot state token
then 'yes' because 'f' will only be applied once. But otherwise
we (conservatively) say no. My main reason is to avoid expanding
PAPSs
- f = g d ==> f = \x. g d x
-because that might in turn make g inline (if it has an inline pragma),
+ f = g d ==> f = \x. g d x
+because that might in turn make g inline (if it has an inline pragma),
which we might not want. After all, INLINE pragmas say "inline only
when saturated" so we don't want to be too gung-ho about saturating!
@@ -662,7 +656,7 @@ andArityType (ABot n1) (ABot n2)
andArityType (ATop as) (ABot _) = ATop as
andArityType (ABot _) (ATop bs) = ATop bs
andArityType (ATop as) (ATop bs) = ATop (as `combine` bs)
- where -- See Note [Combining case branches]
+ where -- See Note [Combining case branches]
combine (a:as) (b:bs) = (a `bestOneShot` b) : combine as bs
combine [] bs = takeWhile isOneShotInfo bs
combine as [] = takeWhile isOneShotInfo as
@@ -689,11 +683,11 @@ basis that if we know one branch is one-shot, then they all must be.
\begin{code}
---------------------------
type CheapFun = CoreExpr -> Maybe Type -> Bool
- -- How to decide if an expression is cheap
- -- If the Maybe is Just, the type is the type
- -- of the expression; Nothing means "don't know"
+ -- How to decide if an expression is cheap
+ -- If the Maybe is Just, the type is the type
+ -- of the expression; Nothing means "don't know"
-data ArityEnv
+data ArityEnv
= AE { ae_cheap_fn :: CheapFun
, ae_ped_bot :: Bool -- True <=> be pedantic about bottoms
}
@@ -723,37 +717,37 @@ arityType _ (Var v)
| otherwise
= ATop (take (idArity v) one_shots)
where
- one_shots :: [OneShotInfo] -- One-shot-ness derived from the type
+ one_shots :: [OneShotInfo] -- One-shot-ness derived from the type
one_shots = typeArity (idType v)
- -- Lambdas; increase arity
+ -- Lambdas; increase arity
arityType env (Lam x e)
| isId x = arityLam x (arityType env e)
| otherwise = arityType env e
- -- Applications; decrease arity, except for types
+ -- Applications; decrease arity, except for types
arityType env (App fun (Type _))
= arityType env fun
arityType env (App fun arg )
= arityApp (arityType env fun) (ae_cheap_fn env arg Nothing)
- -- Case/Let; keep arity if either the expression is cheap
- -- or it's a 1-shot lambda
- -- The former is not really right for Haskell
- -- f x = case x of { (a,b) -> \y. e }
- -- ===>
- -- f x y = case x of { (a,b) -> e }
- -- The difference is observable using 'seq'
- --
+ -- Case/Let; keep arity if either the expression is cheap
+ -- or it's a 1-shot lambda
+ -- The former is not really right for Haskell
+ -- f x = case x of { (a,b) -> \y. e }
+ -- ===>
+ -- f x y = case x of { (a,b) -> e }
+ -- The difference is observable using 'seq'
+ --
arityType env (Case scrut _ _ alts)
| exprIsBottom scrut || null alts
= ABot 0 -- Do not eta expand
-- See Note [Dealing with bottom (1)]
| otherwise
= case alts_type of
- ABot n | n>0 -> ATop [] -- Don't eta expand
- | otherwise -> ABot 0 -- if RHS is bottomming
- -- See Note [Dealing with bottom (2)]
+ ABot n | n>0 -> ATop [] -- Don't eta expand
+ | otherwise -> ABot 0 -- if RHS is bottomming
+ -- See Note [Dealing with bottom (2)]
ATop as | not (ae_ped_bot env) -- See Note [Dealing with bottom (3)]
, ae_cheap_fn env scrut Nothing -> ATop as
@@ -762,7 +756,7 @@ arityType env (Case scrut _ _ alts)
where
alts_type = foldr1 andArityType [arityType env rhs | (_,_,rhs) <- alts]
-arityType env (Let b e)
+arityType env (Let b e)
= floatIn (cheap_bind b) (arityType env e)
where
cheap_bind (NonRec b e) = is_cheap (b,e)
@@ -774,32 +768,32 @@ arityType env (Tick t e)
arityType _ _ = vanillaArityType
\end{code}
-
-
+
+
%************************************************************************
-%* *
- The main eta-expander
-%* *
+%* *
+ The main eta-expander
+%* *
%************************************************************************
We go for:
f = \x1..xn -> N ==> f = \x1..xn y1..ym -> N y1..ym
- (n >= 0)
+ (n >= 0)
-where (in both cases)
+where (in both cases)
- * The xi can include type variables
+ * The xi can include type variables
- * The yi are all value variables
+ * The yi are all value variables
- * N is a NORMAL FORM (i.e. no redexes anywhere)
- wanting a suitable number of extra args.
+ * N is a NORMAL FORM (i.e. no redexes anywhere)
+ wanting a suitable number of extra args.
The biggest reason for doing this is for cases like
- f = \x -> case x of
- True -> \y -> e1
- False -> \y -> e2
+ f = \x -> case x of
+ True -> \y -> e1
+ False -> \y -> e2
Here we want to get the lambdas together. A good example is the nofib
program fibheaps, which gets 25% more allocation if you don't do this
@@ -818,15 +812,15 @@ returns a CoreExpr satisfying the same invariant. See Note [Eta
expansion and the CorePrep invariants] in CorePrep.
This means the eta-expander has to do a bit of on-the-fly
-simplification but it's not too hard. The alernative, of relying on
+simplification but it's not too hard. The alernative, of relying on
a subsequent clean-up phase of the Simplifier to de-crapify the result,
means you can't really use it in CorePrep, which is painful.
Note [Eta expansion and SCCs]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Note that SCCs are not treated specially by etaExpand. If we have
- etaExpand 2 (\x -> scc "foo" e)
- = (\xy -> (scc "foo" e) y)
+ etaExpand 2 (\x -> scc "foo" e)
+ = (\xy -> (scc "foo" e) y)
So the costs of evaluating 'e' (not 'e y') are attributed to "foo"
\begin{code}
@@ -840,14 +834,14 @@ So the costs of evaluating 'e' (not 'e y') are attributed to "foo"
-- We should have that:
--
-- > ty = exprType e = exprType e'
-etaExpand :: Arity -- ^ Result should have this number of value args
- -> CoreExpr -- ^ Expression to expand
- -> CoreExpr
+etaExpand :: Arity -- ^ Result should have this number of value args
+ -> CoreExpr -- ^ Expression to expand
+ -> CoreExpr
-- etaExpand deals with for-alls. For example:
--- etaExpand 1 E
+-- etaExpand 1 E
-- where E :: forall a. a -> a
-- would return
--- (/\b. \y::a -> E b y)
+-- (/\b. \y::a -> E b y)
--
-- It deals with coerces too, though they are now rare
-- so perhaps the extra code isn't worth it
@@ -859,20 +853,20 @@ etaExpand n orig_expr
-- Note [Eta expansion and SCCs]
go 0 expr = expr
go n (Lam v body) | isTyVar v = Lam v (go n body)
- | otherwise = Lam v (go (n-1) body)
+ | otherwise = Lam v (go (n-1) body)
go n (Cast expr co) = Cast (go n expr) co
go n expr = -- pprTrace "ee" (vcat [ppr orig_expr, ppr expr, ppr etas]) $
- etaInfoAbs etas (etaInfoApp subst' expr etas)
- where
- in_scope = mkInScopeSet (exprFreeVars expr)
- (in_scope', etas) = mkEtaWW n orig_expr in_scope (exprType expr)
- subst' = mkEmptySubst in_scope'
+ etaInfoAbs etas (etaInfoApp subst' expr etas)
+ where
+ in_scope = mkInScopeSet (exprFreeVars expr)
+ (in_scope', etas) = mkEtaWW n orig_expr in_scope (exprType expr)
+ subst' = mkEmptySubst in_scope'
- -- Wrapper Unwrapper
+ -- Wrapper Unwrapper
--------------
-data EtaInfo = EtaVar Var -- /\a. [], [] a
- -- \x. [], [] x
- | EtaCo Coercion -- [] |> co, [] |> (sym co)
+data EtaInfo = EtaVar Var -- /\a. [], [] a
+ -- \x. [], [] x
+ | EtaCo Coercion -- [] |> co, [] |> (sym co)
instance Outputable EtaInfo where
ppr (EtaVar v) = ptext (sLit "EtaVar") <+> ppr v
@@ -881,7 +875,7 @@ instance Outputable EtaInfo where
pushCoercion :: Coercion -> [EtaInfo] -> [EtaInfo]
pushCoercion co1 (EtaCo co2 : eis)
| isReflCo co = eis
- | otherwise = EtaCo co : eis
+ | otherwise = EtaCo co : eis
where
co = co1 `mkTransCo` co2
@@ -895,10 +889,10 @@ etaInfoAbs (EtaCo co : eis) expr = Cast (etaInfoAbs eis expr) (mkSymCo co)
--------------
etaInfoApp :: Subst -> CoreExpr -> [EtaInfo] -> CoreExpr
--- (etaInfoApp s e eis) returns something equivalent to
--- ((substExpr s e) `appliedto` eis)
+-- (etaInfoApp s e eis) returns something equivalent to
+-- ((substExpr s e) `appliedto` eis)
-etaInfoApp subst (Lam v1 e) (EtaVar v2 : eis)
+etaInfoApp subst (Lam v1 e) (EtaVar v2 : eis)
= etaInfoApp (CoreSubst.extendSubstWithVar subst v1 v2) e eis
etaInfoApp subst (Cast e co1) eis
@@ -906,20 +900,20 @@ etaInfoApp subst (Cast e co1) eis
where
co' = CoreSubst.substCo subst co1
-etaInfoApp subst (Case e b ty alts) eis
+etaInfoApp subst (Case e b ty alts) eis
= Case (subst_expr subst e) b1 (mk_alts_ty (CoreSubst.substTy subst ty) eis) alts'
where
(subst1, b1) = substBndr subst b
alts' = map subst_alt alts
- subst_alt (con, bs, rhs) = (con, bs', etaInfoApp subst2 rhs eis)
- where
- (subst2,bs') = substBndrs subst1 bs
+ subst_alt (con, bs, rhs) = (con, bs', etaInfoApp subst2 rhs eis)
+ where
+ (subst2,bs') = substBndrs subst1 bs
mk_alts_ty ty [] = ty
mk_alts_ty ty (EtaVar v : eis) = mk_alts_ty (applyTypeToArg ty (varToCoreExpr v)) eis
mk_alts_ty _ (EtaCo co : eis) = mk_alts_ty (pSnd (coercionKind co)) eis
-
-etaInfoApp subst (Let b e) eis
+
+etaInfoApp subst (Let b e) eis
= Let b' (etaInfoApp subst' e eis)
where
(subst', b') = subst_bind subst b
@@ -936,18 +930,18 @@ etaInfoApp subst e eis
--------------
mkEtaWW :: Arity -> CoreExpr -> InScopeSet -> Type
- -> (InScopeSet, [EtaInfo])
- -- EtaInfo contains fresh variables,
- -- not free in the incoming CoreExpr
- -- Outgoing InScopeSet includes the EtaInfo vars
- -- and the original free vars
+ -> (InScopeSet, [EtaInfo])
+ -- EtaInfo contains fresh variables,
+ -- not free in the incoming CoreExpr
+ -- Outgoing InScopeSet includes the EtaInfo vars
+ -- and the original free vars
mkEtaWW orig_n orig_expr in_scope orig_ty
= go orig_n empty_subst orig_ty []
where
empty_subst = TvSubst in_scope emptyTvSubstEnv
- go n subst ty eis -- See Note [exprArity invariant]
+ go n subst ty eis -- See Note [exprArity invariant]
| n == 0
= (getTvInScope subst, reverse eis)
@@ -957,29 +951,29 @@ mkEtaWW orig_n orig_expr in_scope orig_ty
= go n subst' ty' (EtaVar tv' : eis)
| Just (arg_ty, res_ty) <- splitFunTy_maybe ty
- , let (subst', eta_id') = freshEtaId n subst arg_ty
+ , let (subst', eta_id') = freshEtaId n subst arg_ty
-- Avoid free vars of the original expression
= go (n-1) subst' res_ty (EtaVar eta_id' : eis)
-
+
| Just (co, ty') <- topNormaliseNewType_maybe ty
- = -- Given this:
- -- newtype T = MkT ([T] -> Int)
- -- Consider eta-expanding this
- -- eta_expand 1 e T
- -- We want to get
- -- coerce T (\x::[T] -> (coerce ([T]->Int) e) x)
+ = -- Given this:
+ -- newtype T = MkT ([T] -> Int)
+ -- Consider eta-expanding this
+ -- eta_expand 1 e T
+ -- We want to get
+ -- coerce T (\x::[T] -> (coerce ([T]->Int) e) x)
go n subst ty' (EtaCo co : eis)
- | otherwise -- We have an expression of arity > 0,
- -- but its type isn't a function.
+ | otherwise -- We have an expression of arity > 0,
+ -- but its type isn't a function.
= WARN( True, (ppr orig_n <+> ppr orig_ty) $$ ppr orig_expr )
(getTvInScope subst, reverse eis)
- -- This *can* legitmately happen:
- -- e.g. coerce Int (\x. x) Essentially the programmer is
- -- playing fast and loose with types (Happy does this a lot).
- -- So we simply decline to eta-expand. Otherwise we'd end up
- -- with an explicit lambda having a non-function type
-
+ -- This *can* legitmately happen:
+ -- e.g. coerce Int (\x. x) Essentially the programmer is
+ -- playing fast and loose with types (Happy does this a lot).
+ -- So we simply decline to eta-expand. Otherwise we'd end up
+ -- with an explicit lambda having a non-function type
+
--------------
-- Avoiding unnecessary substitution; use short-cutting versions
@@ -997,14 +991,14 @@ freshEtaId :: Int -> TvSubst -> Type -> (TvSubst, Id)
-- It should be "fresh" in the sense that it's not in the in-scope set
-- of the TvSubstEnv; and it should itself then be added to the in-scope
-- set of the TvSubstEnv
---
+--
-- The Int is just a reasonable starting point for generating a unique;
-- it does not necessarily have to be unique itself.
freshEtaId n subst ty
= (subst', eta_id')
where
ty' = Type.substTy subst ty
- eta_id' = uniqAway (getTvInScope subst) $
- mkSysLocal (fsLit "eta") (mkBuiltinUnique n) ty'
- subst' = extendTvInScope subst eta_id'
+ eta_id' = uniqAway (getTvInScope subst) $
+ mkSysLocal (fsLit "eta") (mkBuiltinUnique n) ty'
+ subst' = extendTvInScope subst eta_id'
\end{code}
diff --git a/compiler/coreSyn/CoreSyn.lhs b/compiler/coreSyn/CoreSyn.lhs
index d739738676..47418e22ec 100644
--- a/compiler/coreSyn/CoreSyn.lhs
+++ b/compiler/coreSyn/CoreSyn.lhs
@@ -5,39 +5,33 @@
\begin{code}
{-# LANGUAGE CPP, DeriveDataTypeable, DeriveFunctor #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-- | CoreSyn holds all the main data types for use by for the Glasgow Haskell Compiler midsection
module CoreSyn (
- -- * Main data types
+ -- * Main data types
Expr(..), Alt, Bind(..), AltCon(..), Arg, Tickish(..),
CoreProgram, CoreExpr, CoreAlt, CoreBind, CoreArg, CoreBndr,
TaggedExpr, TaggedAlt, TaggedBind, TaggedArg, TaggedBndr(..), deTagExpr,
-- ** 'Expr' construction
- mkLets, mkLams,
- mkApps, mkTyApps, mkCoApps, mkVarApps,
-
- mkIntLit, mkIntLitInt,
- mkWordLit, mkWordLitWord,
- mkWord64LitWord64, mkInt64LitInt64,
- mkCharLit, mkStringLit,
- mkFloatLit, mkFloatLitFloat,
- mkDoubleLit, mkDoubleLitDouble,
-
- mkConApp, mkConApp2, mkTyBind, mkCoBind,
- varToCoreExpr, varsToCoreExprs,
+ mkLets, mkLams,
+ mkApps, mkTyApps, mkCoApps, mkVarApps,
+
+ mkIntLit, mkIntLitInt,
+ mkWordLit, mkWordLitWord,
+ mkWord64LitWord64, mkInt64LitInt64,
+ mkCharLit, mkStringLit,
+ mkFloatLit, mkFloatLitFloat,
+ mkDoubleLit, mkDoubleLitDouble,
+
+ mkConApp, mkConApp2, mkTyBind, mkCoBind,
+ varToCoreExpr, varsToCoreExprs,
isId, cmpAltCon, cmpAlt, ltAlt,
-
- -- ** Simple 'Expr' access functions and predicates
- bindersOf, bindersOfBinds, rhssOfBind, rhssOfAlts,
- collectBinders, collectTyBinders, collectValBinders, collectTyAndValBinders,
+
+ -- ** Simple 'Expr' access functions and predicates
+ bindersOf, bindersOfBinds, rhssOfBind, rhssOfAlts,
+ collectBinders, collectTyBinders, collectValBinders, collectTyAndValBinders,
collectArgs, flattenBinds,
isValArg, isTypeArg, isTyCoArg, valArgCount, valBndrCount,
@@ -49,42 +43,42 @@ module CoreSyn (
-- * Unfolding data types
Unfolding(..), UnfoldingGuidance(..), UnfoldingSource(..),
- -- ** Constructing 'Unfolding's
- noUnfolding, evaldUnfolding, mkOtherCon,
+ -- ** Constructing 'Unfolding's
+ noUnfolding, evaldUnfolding, mkOtherCon,
unSaturatedOk, needSaturated, boringCxtOk, boringCxtNotOk,
-
- -- ** Predicates and deconstruction on 'Unfolding'
- unfoldingTemplate, expandUnfolding_maybe,
- maybeUnfoldingTemplate, otherCons,
- isValueUnfolding, isEvaldUnfolding, isCheapUnfolding,
+
+ -- ** Predicates and deconstruction on 'Unfolding'
+ unfoldingTemplate, expandUnfolding_maybe,
+ maybeUnfoldingTemplate, otherCons,
+ isValueUnfolding, isEvaldUnfolding, isCheapUnfolding,
isExpandableUnfolding, isConLikeUnfolding, isCompulsoryUnfolding,
isStableUnfolding, hasStableCoreUnfolding_maybe,
- isClosedUnfolding, hasSomeUnfolding,
- canUnfold, neverUnfoldGuidance, isStableSource,
+ isClosedUnfolding, hasSomeUnfolding,
+ canUnfold, neverUnfoldGuidance, isStableSource,
+
+ -- * Strictness
+ seqExpr, seqExprs, seqUnfolding,
- -- * Strictness
- seqExpr, seqExprs, seqUnfolding,
+ -- * Annotated expression data types
+ AnnExpr, AnnExpr'(..), AnnBind(..), AnnAlt,
- -- * Annotated expression data types
- AnnExpr, AnnExpr'(..), AnnBind(..), AnnAlt,
-
-- ** Operations on annotated expressions
collectAnnArgs,
- -- ** Operations on annotations
- deAnnotate, deAnnotate', deAnnAlt, collectAnnBndrs,
+ -- ** Operations on annotations
+ deAnnotate, deAnnotate', deAnnAlt, collectAnnBndrs,
+
+ -- * Core rule data types
+ CoreRule(..), -- CoreSubst, CoreTidy, CoreFVs, PprCore only
+ RuleName, RuleFun, IdUnfoldingFun, InScopeEnv,
- -- * Core rule data types
- CoreRule(..), -- CoreSubst, CoreTidy, CoreFVs, PprCore only
- RuleName, RuleFun, IdUnfoldingFun, InScopeEnv,
-
- -- ** Operations on 'CoreRule's
- seqRules, ruleArity, ruleName, ruleIdName, ruleActivation,
- setRuleIdName,
- isBuiltinRule, isLocalRule, isAutoRule,
+ -- ** Operations on 'CoreRule's
+ seqRules, ruleArity, ruleName, ruleIdName, ruleActivation,
+ setRuleIdName,
+ isBuiltinRule, isLocalRule, isAutoRule,
- -- * Core vectorisation declarations data type
- CoreVect(..)
+ -- * Core vectorisation declarations data type
+ CoreVect(..)
) where
#include "HsVersions.h"
@@ -114,9 +108,9 @@ infixl 4 `mkApps`, `mkTyApps`, `mkVarApps`, `App`, `mkCoApps`
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{The main data types}
-%* *
+%* *
%************************************************************************
These data types are the heart of the compiler
@@ -132,7 +126,7 @@ These data types are the heart of the compiler
-- by the data type 'HsExpr.HsExpr' with the names being 'RdrName.RdrNames'
--
-- 2. This syntax tree is /renamed/, which attaches a 'Unique.Unique' to every 'RdrName.RdrName'
--- (yielding a 'Name.Name') to disambiguate identifiers which are lexically identical.
+-- (yielding a 'Name.Name') to disambiguate identifiers which are lexically identical.
-- For example, this program:
--
-- @
@@ -172,24 +166,24 @@ These data types are the heart of the compiler
-- * Recursive and non recursive @let@s. Operationally
-- this corresponds to allocating a thunk for the things
-- bound and then executing the sub-expression.
---
+--
-- #top_level_invariant#
-- #letrec_invariant#
---
+--
-- The right hand sides of all top-level and recursive @let@s
-- /must/ be of lifted type (see "Type#type_classification" for
-- the meaning of /lifted/ vs. /unlifted/).
---
+--
-- See Note [CoreSyn let/app invariant]
--
-- #type_let#
-- We allow a /non-recursive/ let to bind a type variable, thus:
---
+--
-- > Let (NonRec tv (Type ty)) body
---
+--
-- This can be very convenient for postponing type substitutions until
-- the next run of the simplifier.
---
+--
-- At the moment, the rest of the compiler only deals with type-let
-- in a Let expression, rather than at top level. We may want to revist
-- this choice.
@@ -198,43 +192,43 @@ These data types are the heart of the compiler
-- the scrutinee (expression examined) to weak head normal form
-- and then examining at most one level of resulting constructor (i.e. you
-- cannot do nested pattern matching directly with this).
---
+--
-- The binder gets bound to the value of the scrutinee,
-- and the 'Type' must be that of all the case alternatives
---
+--
-- #case_invariants#
--- This is one of the more complicated elements of the Core language,
+-- This is one of the more complicated elements of the Core language,
-- and comes with a number of restrictions:
---
--- 1. The list of alternatives may be empty;
+--
+-- 1. The list of alternatives may be empty;
-- See Note [Empty case alternatives]
--
--- 2. The 'DEFAULT' case alternative must be first in the list,
+-- 2. The 'DEFAULT' case alternative must be first in the list,
-- if it occurs at all.
---
--- 3. The remaining cases are in order of increasing
--- tag (for 'DataAlts') or
--- lit (for 'LitAlts').
--- This makes finding the relevant constructor easy,
+--
+-- 3. The remaining cases are in order of increasing
+-- tag (for 'DataAlts') or
+-- lit (for 'LitAlts').
+-- This makes finding the relevant constructor easy,
-- and makes comparison easier too.
---
--- 4. The list of alternatives must be exhaustive. An /exhaustive/ case
+--
+-- 4. The list of alternatives must be exhaustive. An /exhaustive/ case
-- does not necessarily mention all constructors:
---
--- @
--- data Foo = Red | Green | Blue
--- ... case x of
--- Red -> True
--- other -> f (case x of
--- Green -> ...
--- Blue -> ... ) ...
--- @
---
--- The inner case does not need a @Red@ alternative, because @x@
--- can't be @Red@ at that program point.
--
--- * Cast an expression to a particular type.
--- This is used to implement @newtype@s (a @newtype@ constructor or
+-- @
+-- data Foo = Red | Green | Blue
+-- ... case x of
+-- Red -> True
+-- other -> f (case x of
+-- Green -> ...
+-- Blue -> ... ) ...
+-- @
+--
+-- The inner case does not need a @Red@ alternative, because @x@
+-- can't be @Red@ at that program point.
+--
+-- * Cast an expression to a particular type.
+-- This is used to implement @newtype@s (a @newtype@ constructor or
-- destructor just becomes a 'Cast' in Core) and GADTs.
--
-- * Notes. These allow general information to be added to expressions
@@ -247,12 +241,12 @@ These data types are the heart of the compiler
-- If you edit this type, you may need to update the GHC formalism
-- See Note [GHC Formalism] in coreSyn/CoreLint.lhs
data Expr b
- = Var Id
+ = Var Id
| Lit Literal
| App (Expr b) (Arg b)
| Lam b (Expr b)
| Let (Bind b) (Expr b)
- | Case (Expr b) b Type [Alt b] -- See #case_invariant#
+ | Case (Expr b) b Type [Alt b] -- See #case_invariant#
| Cast (Expr b) Coercion
| Tick (Tickish Id) (Expr b)
| Type Type
@@ -275,14 +269,14 @@ type Alt b = (AltCon, [b], Expr b)
-- If you edit this type, you may need to update the GHC formalism
-- See Note [GHC Formalism] in coreSyn/CoreLint.lhs
-data AltCon
+data AltCon
= DataAlt DataCon -- ^ A plain data constructor: @case e of { Foo x -> ... }@.
-- Invariant: the 'DataCon' is always from a @data@ type, and never from a @newtype@
| LitAlt Literal -- ^ A literal: @case e of { 1 -> ... }@
-- Invariant: always an *unlifted* literal
- -- See Note [Literal alternatives]
-
+ -- See Note [Literal alternatives]
+
| DEFAULT -- ^ Trivial alternative: @case e of { _ -> ... }@
deriving (Eq, Ord, Data, Typeable)
@@ -291,7 +285,7 @@ data AltCon
-- If you edit this type, you may need to update the GHC formalism
-- See Note [GHC Formalism] in coreSyn/CoreLint.lhs
data Bind b = NonRec b (Expr b)
- | Rec [(b, (Expr b))]
+ | Rec [(b, (Expr b))]
deriving (Data, Typeable)
\end{code}
@@ -385,7 +379,7 @@ The alternatives of a case expression should be exhaustive. A case expression
can have empty alternatives if (and only if) the scrutinee is bound to raise
an exception or diverge. So:
Case (error Int "Hello") b Bool []
-is fine, and has type Bool. This is one reason we need a type on
+is fine, and has type Bool. This is one reason we need a type on
the case expression: if the alternatives are empty we can't get the type
from the alternatives! I'll write this
case (error Int "Hello") of Bool {}
@@ -402,7 +396,7 @@ degnerate situation but we do NOT want to replace
case x of Bool {} --> error Bool "Inaccessible case"
because x might raise an exception, and *that*'s what we want to see!
(Trac #6067 is an example.) To preserve semantics we'd have to say
- x `seq` error Bool "Inaccessible case"
+ x `seq` error Bool "Inaccessible case"
but the 'seq' is just a case, so we are back to square 1. Or I suppose
we could say
x |> UnsafeCoerce T Bool
@@ -414,7 +408,7 @@ one type to another. For example
f :: Int -> Int
f n = error "urk"
-
+
g :: Int -> (# Char, Bool #)
g x = case f x of { 0 -> ..., n -> ... }
@@ -424,14 +418,14 @@ and we can discard the alternatives since the scrutinee is bottom to give
case (error Int "urk") of (# Char, Bool #) {}
This is nicer than using an unsafe coerce between Int ~ (# Char,Bool #),
-if for no other reason that we don't need to instantiate the (~) at an
+if for no other reason that we don't need to instantiate the (~) at an
unboxed type.
%************************************************************************
-%* *
+%* *
Ticks
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -523,9 +517,9 @@ tickishCanSplit _ = True
%************************************************************************
-%* *
+%* *
\subsection{Transformation rules}
-%* *
+%* *
%************************************************************************
The CoreRule type and its friends are dealt with mainly in CoreRules,
@@ -540,52 +534,52 @@ but CoreFVs, Subst, PprCore, CoreTidy also inspect the representation.
-- * \"Orphan\" if nothing on the LHS is defined in the same module
-- as the rule itself
data CoreRule
- = Rule {
- ru_name :: RuleName, -- ^ Name of the rule, for communication with the user
- ru_act :: Activation, -- ^ When the rule is active
-
- -- Rough-matching stuff
- -- see comments with InstEnv.ClsInst( is_cls, is_rough )
- ru_fn :: Name, -- ^ Name of the 'Id.Id' at the head of this rule
- ru_rough :: [Maybe Name], -- ^ Name at the head of each argument to the left hand side
-
- -- Proper-matching stuff
- -- see comments with InstEnv.ClsInst( is_tvs, is_tys )
- ru_bndrs :: [CoreBndr], -- ^ Variables quantified over
- ru_args :: [CoreExpr], -- ^ Left hand side arguments
-
- -- And the right-hand side
- ru_rhs :: CoreExpr, -- ^ Right hand side of the rule
- -- Occurrence info is guaranteed correct
- -- See Note [OccInfo in unfoldings and rules]
-
- -- Locality
- ru_auto :: Bool, -- ^ @True@ <=> this rule is auto-generated
- -- @False@ <=> generated at the users behest
- -- Main effect: reporting of orphan-hood
-
- ru_local :: Bool -- ^ @True@ iff the fn at the head of the rule is
- -- defined in the same module as the rule
- -- and is not an implicit 'Id' (like a record selector,
- -- class operation, or data constructor)
-
- -- NB: ru_local is *not* used to decide orphan-hood
- -- c.g. MkIface.coreRuleToIfaceRule
+ = Rule {
+ ru_name :: RuleName, -- ^ Name of the rule, for communication with the user
+ ru_act :: Activation, -- ^ When the rule is active
+
+ -- Rough-matching stuff
+ -- see comments with InstEnv.ClsInst( is_cls, is_rough )
+ ru_fn :: Name, -- ^ Name of the 'Id.Id' at the head of this rule
+ ru_rough :: [Maybe Name], -- ^ Name at the head of each argument to the left hand side
+
+ -- Proper-matching stuff
+ -- see comments with InstEnv.ClsInst( is_tvs, is_tys )
+ ru_bndrs :: [CoreBndr], -- ^ Variables quantified over
+ ru_args :: [CoreExpr], -- ^ Left hand side arguments
+
+ -- And the right-hand side
+ ru_rhs :: CoreExpr, -- ^ Right hand side of the rule
+ -- Occurrence info is guaranteed correct
+ -- See Note [OccInfo in unfoldings and rules]
+
+ -- Locality
+ ru_auto :: Bool, -- ^ @True@ <=> this rule is auto-generated
+ -- @False@ <=> generated at the users behest
+ -- Main effect: reporting of orphan-hood
+
+ ru_local :: Bool -- ^ @True@ iff the fn at the head of the rule is
+ -- defined in the same module as the rule
+ -- and is not an implicit 'Id' (like a record selector,
+ -- class operation, or data constructor)
+
+ -- NB: ru_local is *not* used to decide orphan-hood
+ -- c.g. MkIface.coreRuleToIfaceRule
}
-- | Built-in rules are used for constant folding
-- and suchlike. They have no free variables.
- | BuiltinRule {
- ru_name :: RuleName, -- ^ As above
- ru_fn :: Name, -- ^ As above
- ru_nargs :: Int, -- ^ Number of arguments that 'ru_try' consumes,
- -- if it fires, including type arguments
- ru_try :: RuleFun
- -- ^ This function does the rewrite. It given too many
- -- arguments, it simply discards them; the returned 'CoreExpr'
- -- is just the rewrite of 'ru_fn' applied to the first 'ru_nargs' args
+ | BuiltinRule {
+ ru_name :: RuleName, -- ^ As above
+ ru_fn :: Name, -- ^ As above
+ ru_nargs :: Int, -- ^ Number of arguments that 'ru_try' consumes,
+ -- if it fires, including type arguments
+ ru_try :: RuleFun
+ -- ^ This function does the rewrite. It given too many
+ -- arguments, it simply discards them; the returned 'CoreExpr'
+ -- is just the rewrite of 'ru_fn' applied to the first 'ru_nargs' args
}
- -- See Note [Extra args in rule matching] in Rules.lhs
+ -- See Note [Extra args in rule matching] in Rules.lhs
type RuleFun = DynFlags -> InScopeEnv -> Id -> [CoreExpr] -> Maybe CoreExpr
type InScopeEnv = (InScopeSet, IdUnfoldingFun)
@@ -597,13 +591,13 @@ type IdUnfoldingFun = Id -> Unfolding
isBuiltinRule :: CoreRule -> Bool
isBuiltinRule (BuiltinRule {}) = True
-isBuiltinRule _ = False
+isBuiltinRule _ = False
isAutoRule :: CoreRule -> Bool
isAutoRule (BuiltinRule {}) = False
isAutoRule (Rule { ru_auto = is_auto }) = is_auto
--- | The number of arguments the 'ru_fn' must be applied
+-- | The number of arguments the 'ru_fn' must be applied
-- to before the rule can match on it
ruleArity :: CoreRule -> Int
ruleArity (BuiltinRule {ru_nargs = n}) = n
@@ -663,49 +657,49 @@ data Unfolding
= NoUnfolding -- ^ We have no information about the unfolding
| OtherCon [AltCon] -- ^ It ain't one of these constructors.
- -- @OtherCon xs@ also indicates that something has been evaluated
- -- and hence there's no point in re-evaluating it.
- -- @OtherCon []@ is used even for non-data-type values
- -- to indicated evaluated-ness. Notably:
- --
- -- > data C = C !(Int -> Int)
- -- > case x of { C f -> ... }
- --
- -- Here, @f@ gets an @OtherCon []@ unfolding.
-
- | DFunUnfolding { -- The Unfolding of a DFunId
- -- See Note [DFun unfoldings]
- -- df = /\a1..am. \d1..dn. MkD t1 .. tk
+ -- @OtherCon xs@ also indicates that something has been evaluated
+ -- and hence there's no point in re-evaluating it.
+ -- @OtherCon []@ is used even for non-data-type values
+ -- to indicated evaluated-ness. Notably:
+ --
+ -- > data C = C !(Int -> Int)
+ -- > case x of { C f -> ... }
+ --
+ -- Here, @f@ gets an @OtherCon []@ unfolding.
+
+ | DFunUnfolding { -- The Unfolding of a DFunId
+ -- See Note [DFun unfoldings]
+ -- df = /\a1..am. \d1..dn. MkD t1 .. tk
-- (op1 a1..am d1..dn)
- -- (op2 a1..am d1..dn)
+ -- (op2 a1..am d1..dn)
df_bndrs :: [Var], -- The bound variables [a1..m],[d1..dn]
df_con :: DataCon, -- The dictionary data constructor (never a newtype datacon)
df_args :: [CoreExpr] -- Args of the data con: types, superclasses and methods,
} -- in positional order
- | CoreUnfolding { -- An unfolding for an Id with no pragma,
+ | CoreUnfolding { -- An unfolding for an Id with no pragma,
-- or perhaps a NOINLINE pragma
- -- (For NOINLINE, the phase, if any, is in the
+ -- (For NOINLINE, the phase, if any, is in the
-- InlinePragInfo for this Id.)
- uf_tmpl :: CoreExpr, -- Template; occurrence info is correct
- uf_src :: UnfoldingSource, -- Where the unfolding came from
- uf_is_top :: Bool, -- True <=> top level binding
- uf_is_value :: Bool, -- exprIsHNF template (cached); it is ok to discard
- -- a `seq` on this variable
+ uf_tmpl :: CoreExpr, -- Template; occurrence info is correct
+ uf_src :: UnfoldingSource, -- Where the unfolding came from
+ uf_is_top :: Bool, -- True <=> top level binding
+ uf_is_value :: Bool, -- exprIsHNF template (cached); it is ok to discard
+ -- a `seq` on this variable
uf_is_conlike :: Bool, -- True <=> applicn of constructor or CONLIKE function
-- Cached version of exprIsConLike
- uf_is_work_free :: Bool, -- True <=> doesn't waste (much) work to expand
+ uf_is_work_free :: Bool, -- True <=> doesn't waste (much) work to expand
-- inside an inlining
- -- Cached version of exprIsCheap
- uf_expandable :: Bool, -- True <=> can expand in RULE matching
- -- Cached version of exprIsExpandable
- uf_guidance :: UnfoldingGuidance -- Tells about the *size* of the template.
+ -- Cached version of exprIsCheap
+ uf_expandable :: Bool, -- True <=> can expand in RULE matching
+ -- Cached version of exprIsExpandable
+ uf_guidance :: UnfoldingGuidance -- Tells about the *size* of the template.
}
-- ^ An unfolding with redundant cached information. Parameters:
--
- -- uf_tmpl: Template used to perform unfolding;
- -- NB: Occurrence info is guaranteed correct:
- -- see Note [OccInfo in unfoldings and rules]
+ -- uf_tmpl: Template used to perform unfolding;
+ -- NB: Occurrence info is guaranteed correct:
+ -- see Note [OccInfo in unfoldings and rules]
--
-- uf_is_top: Is this a top level binding?
--
@@ -721,11 +715,11 @@ data Unfolding
------------------------------------------------
data UnfoldingSource
= -- See also Note [Historical note: unfoldings for wrappers]
-
+
InlineRhs -- The current rhs of the function
- -- Replace uf_tmpl each time around
+ -- Replace uf_tmpl each time around
- | InlineStable -- From an INLINE or INLINABLE pragma
+ | InlineStable -- From an INLINE or INLINABLE pragma
-- INLINE if guidance is UnfWhen
-- INLINABLE if guidance is UnfIfGoodArgs/UnfoldNever
-- (well, technically an INLINABLE might be made
@@ -735,15 +729,15 @@ data UnfoldingSource
-- work so it is consistent with the intended
-- meaning of INLINABLE).
--
- -- uf_tmpl may change, but only as a result of
+ -- uf_tmpl may change, but only as a result of
-- gentle simplification, it doesn't get updated
-- to the current RHS during compilation as with
-- InlineRhs.
--
- -- See Note [InlineRules]
+ -- See Note [InlineRules]
| InlineCompulsory -- Something that *has* no binding, so you *must* inline it
- -- Only a few primop-like things have this property
+ -- Only a few primop-like things have this property
-- (see MkId.lhs, calls to mkCompulsoryUnfolding).
-- Inline absolutely always, however boring the context.
@@ -751,31 +745,31 @@ data UnfoldingSource
-- | 'UnfoldingGuidance' says when unfolding should take place
data UnfoldingGuidance
- = UnfWhen { -- Inline without thinking about the *size* of the uf_tmpl
- -- Used (a) for small *and* cheap unfoldings
- -- (b) for INLINE functions
+ = UnfWhen { -- Inline without thinking about the *size* of the uf_tmpl
+ -- Used (a) for small *and* cheap unfoldings
+ -- (b) for INLINE functions
-- See Note [INLINE for small functions] in CoreUnfold
- ug_arity :: Arity, -- Number of value arguments expected
+ ug_arity :: Arity, -- Number of value arguments expected
- ug_unsat_ok :: Bool, -- True <=> ok to inline even if unsaturated
+ ug_unsat_ok :: Bool, -- True <=> ok to inline even if unsaturated
ug_boring_ok :: Bool -- True <=> ok to inline even if the context is boring
- -- So True,True means "always"
+ -- So True,True means "always"
}
- | UnfIfGoodArgs { -- Arose from a normal Id; the info here is the
- -- result of a simple analysis of the RHS
+ | UnfIfGoodArgs { -- Arose from a normal Id; the info here is the
+ -- result of a simple analysis of the RHS
ug_args :: [Int], -- Discount if the argument is evaluated.
- -- (i.e., a simplification will definitely
- -- be possible). One elt of the list per *value* arg.
+ -- (i.e., a simplification will definitely
+ -- be possible). One elt of the list per *value* arg.
- ug_size :: Int, -- The "size" of the unfolding.
+ ug_size :: Int, -- The "size" of the unfolding.
- ug_res :: Int -- Scrutinee discount: the discount to substract if the thing is in
- } -- a context (case (thing args) of ...),
- -- (where there are the right number of arguments.)
+ ug_res :: Int -- Scrutinee discount: the discount to substract if the thing is in
+ } -- a context (case (thing args) of ...),
+ -- (where there are the right number of arguments.)
- | UnfNever -- The RHS is big, so don't inline it
+ | UnfNever -- The RHS is big, so don't inline it
\end{code}
Note [Historical note: unfoldings for wrappers]
@@ -801,7 +795,7 @@ an Id, so, eg, substitutions need not traverse them.
Note [DFun unfoldings]
~~~~~~~~~~~~~~~~~~~~~~
The Arity in a DFunUnfolding is total number of args (type and value)
-that the DFun needs to produce a dictionary. That's not necessarily
+that the DFun needs to produce a dictionary. That's not necessarily
related to the ordinary arity of the dfun Id, esp if the class has
one method, so the dictionary is represented by a newtype. Example
@@ -812,7 +806,7 @@ The instance translates to
$dfCList :: forall a. C a => C [a] -- Arity 2!
$dfCList = /\a.\d. $copList {a} d |> co
-
+
$copList :: forall a. C a => [a] -> Int -- Arity 2!
$copList = /\a.\d.\xs. op {a} d (head xs)
@@ -848,9 +842,9 @@ mkOtherCon :: [AltCon] -> Unfolding
mkOtherCon = OtherCon
seqUnfolding :: Unfolding -> ()
-seqUnfolding (CoreUnfolding { uf_tmpl = e, uf_is_top = top,
- uf_is_value = b1, uf_is_work_free = b2,
- uf_expandable = b3, uf_is_conlike = b4,
+seqUnfolding (CoreUnfolding { uf_tmpl = e, uf_is_top = top,
+ uf_is_value = b1, uf_is_work_free = b2,
+ uf_expandable = b3, uf_is_conlike = b4,
uf_guidance = g})
= seqExpr e `seq` top `seq` b1 `seq` b2 `seq` b3 `seq` b4 `seq` seqGuidance g
@@ -884,7 +878,7 @@ maybeUnfoldingTemplate (DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args
maybeUnfoldingTemplate _
= Nothing
--- | The constructors that the unfolding could never be:
+-- | The constructors that the unfolding could never be:
-- returns @[]@ if no information is available
otherCons :: Unfolding -> [AltCon]
otherCons (OtherCon cons) = cons
@@ -893,7 +887,7 @@ otherCons _ = []
-- | Determines if it is certainly the case that the unfolding will
-- yield a value (something in HNF): returns @False@ if unsure
isValueUnfolding :: Unfolding -> Bool
- -- Returns False for OtherCon
+ -- Returns False for OtherCon
isValueUnfolding (CoreUnfolding { uf_is_value = is_evald }) = is_evald
isValueUnfolding _ = False
@@ -901,8 +895,8 @@ isValueUnfolding _ = False
-- yield a value. Unlike 'isValueUnfolding' it returns @True@
-- for 'OtherCon'
isEvaldUnfolding :: Unfolding -> Bool
- -- Returns True for OtherCon
-isEvaldUnfolding (OtherCon _) = True
+ -- Returns True for OtherCon
+isEvaldUnfolding (OtherCon _) = True
isEvaldUnfolding (CoreUnfolding { uf_is_value = is_evald }) = is_evald
isEvaldUnfolding _ = False
@@ -923,7 +917,7 @@ isExpandableUnfolding (CoreUnfolding { uf_expandable = is_expable }) = is_expabl
isExpandableUnfolding _ = False
expandUnfolding_maybe :: Unfolding -> Maybe CoreExpr
--- Expand an expandable unfolding; this is used in rule matching
+-- Expand an expandable unfolding; this is used in rule matching
-- See Note [Expanding variables] in Rules.lhs
-- The key point here is that CONLIKE things can be expanded
expandUnfolding_maybe (CoreUnfolding { uf_expandable = True, uf_tmpl = rhs }) = Just rhs
@@ -946,13 +940,13 @@ isCompulsoryUnfolding (CoreUnfolding { uf_src = InlineCompulsory }) = True
isCompulsoryUnfolding _ = False
isStableUnfolding :: Unfolding -> Bool
--- True of unfoldings that should not be overwritten
+-- True of unfoldings that should not be overwritten
-- by a CoreUnfolding for the RHS of a let-binding
isStableUnfolding (CoreUnfolding { uf_src = src }) = isStableSource src
-isStableUnfolding (DFunUnfolding {}) = True
+isStableUnfolding (DFunUnfolding {}) = True
isStableUnfolding _ = False
-isClosedUnfolding :: Unfolding -> Bool -- No free variables
+isClosedUnfolding :: Unfolding -> Bool -- No free variables
isClosedUnfolding (CoreUnfolding {}) = False
isClosedUnfolding (DFunUnfolding {}) = False
isClosedUnfolding _ = True
@@ -968,28 +962,28 @@ neverUnfoldGuidance _ = False
canUnfold :: Unfolding -> Bool
canUnfold (CoreUnfolding { uf_guidance = g }) = not (neverUnfoldGuidance g)
-canUnfold _ = False
+canUnfold _ = False
\end{code}
Note [InlineRules]
~~~~~~~~~~~~~~~~~
-When you say
+When you say
{-# INLINE f #-}
f x = <rhs>
you intend that calls (f e) are replaced by <rhs>[e/x] So we
should capture (\x.<rhs>) in the Unfolding of 'f', and never meddle
with it. Meanwhile, we can optimise <rhs> to our heart's content,
leaving the original unfolding intact in Unfolding of 'f'. For example
- all xs = foldr (&&) True xs
- any p = all . map p {-# INLINE any #-}
+ all xs = foldr (&&) True xs
+ any p = all . map p {-# INLINE any #-}
We optimise any's RHS fully, but leave the InlineRule saying "all . map p",
which deforests well at the call site.
So INLINE pragma gives rise to an InlineRule, which captures the original RHS.
Moreover, it's only used when 'f' is applied to the
-specified number of arguments; that is, the number of argument on
-the LHS of the '=' sign in the original source definition.
+specified number of arguments; that is, the number of argument on
+the LHS of the '=' sign in the original source definition.
For example, (.) is now defined in the libraries like this
{-# INLINE (.) #-}
(.) f g = \x -> f (g x)
@@ -1015,9 +1009,9 @@ the occurrence info is wrong
%************************************************************************
-%* *
+%* *
AltCon
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -1039,7 +1033,7 @@ ltAlt a1 a2 = (a1 `cmpAlt` a2) == LT
cmpAltCon :: AltCon -> AltCon -> Ordering
-- ^ Compares 'AltCon's within a single list of alternatives
-cmpAltCon DEFAULT DEFAULT = EQ
+cmpAltCon DEFAULT DEFAULT = EQ
cmpAltCon DEFAULT _ = LT
cmpAltCon (DataAlt d1) (DataAlt d2) = dataConTag d1 `compare` dataConTag d2
@@ -1047,15 +1041,15 @@ cmpAltCon (DataAlt _) DEFAULT = GT
cmpAltCon (LitAlt l1) (LitAlt l2) = l1 `compare` l2
cmpAltCon (LitAlt _) DEFAULT = GT
-cmpAltCon con1 con2 = WARN( True, text "Comparing incomparable AltCons" <+>
- ppr con1 <+> ppr con2 )
- LT
+cmpAltCon con1 con2 = WARN( True, text "Comparing incomparable AltCons" <+>
+ ppr con1 <+> ppr con2 )
+ LT
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Useful synonyms}
-%* *
+%* *
%************************************************************************
Note [CoreProgram]
@@ -1076,13 +1070,13 @@ a list of CoreBind
on each Rec binding, and splits it into a sequence of smaller
bindings where possible. So the program typically starts life as a
single giant Rec, which is then dependency-analysed into smaller
- chunks.
+ chunks.
\begin{code}
-- If you edit this type, you may need to update the GHC formalism
-- See Note [GHC Formalism] in coreSyn/CoreLint.lhs
-type CoreProgram = [CoreBind] -- See Note [CoreProgram]
+type CoreProgram = [CoreBind] -- See Note [CoreProgram]
-- | The common case for the type of binders and variables when
-- we are manipulating the Core language within GHC
@@ -1098,14 +1092,14 @@ type CoreAlt = Alt CoreBndr
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Tagging}
-%* *
+%* *
%************************************************************************
\begin{code}
-- | Binders are /tagged/ with a t
-data TaggedBndr t = TB CoreBndr t -- TB for "tagged binder"
+data TaggedBndr t = TB CoreBndr t -- TB for "tagged binder"
type TaggedBind t = Bind (TaggedBndr t)
type TaggedExpr t = Expr (TaggedBndr t)
@@ -1116,7 +1110,7 @@ instance Outputable b => Outputable (TaggedBndr b) where
ppr (TB b l) = char '<' <> ppr b <> comma <> ppr l <> char '>'
instance Outputable b => OutputableBndr (TaggedBndr b) where
- pprBndr _ b = ppr b -- Simple
+ pprBndr _ b = ppr b -- Simple
pprInfixOcc b = ppr b
pprPrefixOcc b = ppr b
@@ -1142,9 +1136,9 @@ deTagAlt (con, bndrs, rhs) = (con, [b | TB b _ <- bndrs], deTagExpr rhs)
%************************************************************************
-%* *
+%* *
\subsection{Core-constructing functions with checking}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -1161,14 +1155,14 @@ mkVarApps :: Expr b -> [Var] -> Expr b
-- use 'MkCore.mkCoreConApps' if possible
mkConApp :: DataCon -> [Arg b] -> Expr b
-mkApps f args = foldl App f args
+mkApps f args = foldl App f args
mkTyApps f args = foldl (\ e a -> App e (Type a)) f args
mkCoApps f args = foldl (\ e a -> App e (Coercion a)) f args
mkVarApps f vars = foldl (\ e a -> App e (varToCoreExpr a)) f vars
mkConApp con args = mkApps (Var (dataConWorkId con)) args
mkConApp2 :: DataCon -> [Type] -> [Var] -> Expr b
-mkConApp2 con tys arg_ids = Var (dataConWorkId con)
+mkConApp2 con tys arg_ids = Var (dataConWorkId con)
`mkApps` map Type tys
`mkApps` map varToCoreExpr arg_ids
@@ -1232,10 +1226,10 @@ mkDoubleLitDouble d = Lit (mkMachDouble (toRational d))
-- | Bind all supplied binding groups over an expression in a nested let expression. Assumes
-- that the rhs satisfies the let/app invariant. Prefer to use 'MkCore.mkCoreLets' if
-- possible, which does guarantee the invariant
-mkLets :: [Bind b] -> Expr b -> Expr b
+mkLets :: [Bind b] -> Expr b -> Expr b
-- | Bind all supplied binders over an expression in a nested lambda expression. Prefer to
-- use 'MkCore.mkCoreLams' if possible
-mkLams :: [b] -> Expr b -> Expr b
+mkLams :: [b] -> Expr b -> Expr b
mkLams binders body = foldr Lam body binders
mkLets binds body = foldr Let body binds
@@ -1263,9 +1257,9 @@ varsToCoreExprs vs = map varToCoreExpr vs
%************************************************************************
-%* *
+%* *
\subsection{Simple access functions}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -1292,27 +1286,27 @@ rhssOfAlts alts = [e | (_,_,e) <- alts]
flattenBinds :: [Bind b] -> [(b, Expr b)]
flattenBinds (NonRec b r : binds) = (b,r) : flattenBinds binds
flattenBinds (Rec prs1 : binds) = prs1 ++ flattenBinds binds
-flattenBinds [] = []
+flattenBinds [] = []
\end{code}
\begin{code}
-- | We often want to strip off leading lambdas before getting down to
-- business. This function is your friend.
-collectBinders :: Expr b -> ([b], Expr b)
+collectBinders :: Expr b -> ([b], Expr b)
-- | Collect as many type bindings as possible from the front of a nested lambda
-collectTyBinders :: CoreExpr -> ([TyVar], CoreExpr)
+collectTyBinders :: CoreExpr -> ([TyVar], CoreExpr)
-- | Collect as many value bindings as possible from the front of a nested lambda
-collectValBinders :: CoreExpr -> ([Id], CoreExpr)
--- | Collect type binders from the front of the lambda first,
+collectValBinders :: CoreExpr -> ([Id], CoreExpr)
+-- | Collect type binders from the front of the lambda first,
-- then follow up by collecting as many value bindings as possible
-- from the resulting stripped expression
-collectTyAndValBinders :: CoreExpr -> ([TyVar], [Id], CoreExpr)
+collectTyAndValBinders :: CoreExpr -> ([TyVar], [Id], CoreExpr)
collectBinders expr
= go [] expr
where
go bs (Lam b e) = go (b:bs) e
- go bs e = (reverse bs, e)
+ go bs e = (reverse bs, e)
collectTyAndValBinders expr
= (tvs, ids, body)
@@ -1324,13 +1318,13 @@ collectTyBinders expr
= go [] expr
where
go tvs (Lam b e) | isTyVar b = go (b:tvs) e
- go tvs e = (reverse tvs, e)
+ go tvs e = (reverse tvs, e)
collectValBinders expr
= go [] expr
where
go ids (Lam b e) | isId b = go (b:ids) e
- go ids body = (reverse ids, body)
+ go ids body = (reverse ids, body)
\end{code}
\begin{code}
@@ -1341,24 +1335,24 @@ collectArgs expr
= go expr []
where
go (App f a) as = go f (a:as)
- go e as = (e, as)
+ go e as = (e, as)
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Predicates}
-%* *
+%* *
%************************************************************************
At one time we optionally carried type arguments through to runtime.
@isRuntimeVar v@ returns if (Lam v _) really becomes a lambda at runtime,
i.e. if type applications are actual lambdas because types are kept around
-at runtime. Similarly isRuntimeArg.
+at runtime. Similarly isRuntimeArg.
\begin{code}
-- | Will this variable exist at runtime?
isRuntimeVar :: Var -> Bool
-isRuntimeVar = isId
+isRuntimeVar = isId
-- | Will this argument expression exist at runtime?
isRuntimeArg :: CoreExpr -> Bool
@@ -1394,9 +1388,9 @@ valArgCount = count isValArg
%************************************************************************
-%* *
+%* *
\subsection{Seq stuff}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -1442,15 +1436,15 @@ seqAlts ((c,bs,e):alts) = c `seq` seqBndrs bs `seq` seqExpr e `seq` seqAlts alts
seqRules :: [CoreRule] -> ()
seqRules [] = ()
-seqRules (Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs } : rules)
+seqRules (Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs } : rules)
= seqBndrs bndrs `seq` seqExprs (rhs:args) `seq` seqRules rules
seqRules (BuiltinRule {} : rules) = seqRules rules
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Annotated core}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -1459,16 +1453,16 @@ type AnnExpr bndr annot = (annot, AnnExpr' bndr annot)
-- | A clone of the 'Expr' type but allowing annotation at every tree node
data AnnExpr' bndr annot
- = AnnVar Id
- | AnnLit Literal
- | AnnLam bndr (AnnExpr bndr annot)
- | AnnApp (AnnExpr bndr annot) (AnnExpr bndr annot)
- | AnnCase (AnnExpr bndr annot) bndr Type [AnnAlt bndr annot]
- | AnnLet (AnnBind bndr annot) (AnnExpr bndr annot)
+ = AnnVar Id
+ | AnnLit Literal
+ | AnnLam bndr (AnnExpr bndr annot)
+ | AnnApp (AnnExpr bndr annot) (AnnExpr bndr annot)
+ | AnnCase (AnnExpr bndr annot) bndr Type [AnnAlt bndr annot]
+ | AnnLet (AnnBind bndr annot) (AnnExpr bndr annot)
| AnnCast (AnnExpr bndr annot) (annot, Coercion)
- -- Put an annotation on the (root of) the coercion
+ -- Put an annotation on the (root of) the coercion
| AnnTick (Tickish Id) (AnnExpr bndr annot)
- | AnnType Type
+ | AnnType Type
| AnnCoercion Coercion
-- | A clone of the 'Alt' type but allowing annotation at every tree node
@@ -1488,7 +1482,7 @@ collectAnnArgs expr
= go expr []
where
go (_, AnnApp f a) as = go f (a:as)
- go e as = (e, as)
+ go e as = (e, as)
\end{code}
\begin{code}
@@ -1525,5 +1519,5 @@ collectAnnBndrs e
= collect [] e
where
collect bs (_, AnnLam b body) = collect (b:bs) body
- collect bs body = (reverse bs, body)
+ collect bs body = (reverse bs, body)
\end{code}
diff --git a/compiler/coreSyn/CoreUnfold.lhs b/compiler/coreSyn/CoreUnfold.lhs
index 3c9a1c8f15..fd485ae2b7 100644
--- a/compiler/coreSyn/CoreUnfold.lhs
+++ b/compiler/coreSyn/CoreUnfold.lhs
@@ -16,29 +16,23 @@ find, unsurprisingly, a Core expression.
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module CoreUnfold (
- Unfolding, UnfoldingGuidance, -- Abstract types
+ Unfolding, UnfoldingGuidance, -- Abstract types
- noUnfolding, mkImplicitUnfolding,
+ noUnfolding, mkImplicitUnfolding,
mkUnfolding, mkCoreUnfolding,
- mkTopUnfolding, mkSimpleUnfolding, mkWorkerUnfolding,
- mkInlineUnfolding, mkInlinableUnfolding, mkWwInlineRule,
- mkCompulsoryUnfolding, mkDFunUnfolding,
+ mkTopUnfolding, mkSimpleUnfolding, mkWorkerUnfolding,
+ mkInlineUnfolding, mkInlinableUnfolding, mkWwInlineRule,
+ mkCompulsoryUnfolding, mkDFunUnfolding,
specUnfolding,
- interestingArg, ArgSummary(..),
+ interestingArg, ArgSummary(..),
- couldBeSmallEnoughToInline, inlineBoringOk,
- certainlyWillInline, smallEnoughToInline,
+ couldBeSmallEnoughToInline, inlineBoringOk,
+ certainlyWillInline, smallEnoughToInline,
- callSiteInline, CallCtxt(..),
+ callSiteInline, CallCtxt(..),
-- Reexport from CoreSubst (it only live there so it can be used
-- by the Very Simple Optimiser)
@@ -49,7 +43,7 @@ module CoreUnfold (
import DynFlags
import CoreSyn
-import PprCore () -- Instances
+import PprCore () -- Instances
import OccurAnal ( occurAnalyseExpr )
import CoreSubst hiding( substTy )
import CoreArity ( manifestArity, exprBotStrictness_maybe )
@@ -59,7 +53,7 @@ import DataCon
import Literal
import PrimOp
import IdInfo
-import BasicTypes ( Arity )
+import BasicTypes ( Arity )
import Type
import PrelNames
import TysPrim ( realWorldStatePrimTy )
@@ -76,9 +70,9 @@ import Data.Maybe
%************************************************************************
-%* *
+%* *
\subsection{Making unfoldings}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -114,7 +108,7 @@ mkWwInlineRule expr arity
, ug_boring_ok = boringCxtNotOk })
mkCompulsoryUnfolding :: CoreExpr -> Unfolding
-mkCompulsoryUnfolding expr -- Used for things that absolutely must be unfolded
+mkCompulsoryUnfolding expr -- Used for things that absolutely must be unfolded
= mkCoreUnfolding InlineCompulsory True
(simpleOptExpr expr)
(UnfWhen { ug_arity = 0 -- Arity of unfolding doesn't matter
@@ -136,7 +130,7 @@ mkWorkerUnfolding _ _ _ = noUnfolding
mkInlineUnfolding :: Maybe Arity -> CoreExpr -> Unfolding
mkInlineUnfolding mb_arity expr
= mkCoreUnfolding InlineStable
- True -- Note [Top-level flag on inline rules]
+ True -- Note [Top-level flag on inline rules]
expr' guide
where
expr' = simpleOptExpr expr
@@ -227,15 +221,15 @@ mkCoreUnfolding :: UnfoldingSource -> Bool -> CoreExpr
-> UnfoldingGuidance -> Unfolding
-- Occurrence-analyses the expression before capturing it
mkCoreUnfolding src top_lvl expr guidance
- = CoreUnfolding { uf_tmpl = occurAnalyseExpr expr,
+ = CoreUnfolding { uf_tmpl = occurAnalyseExpr expr,
-- See Note [Occurrrence analysis of unfoldings]
- uf_src = src,
- uf_is_top = top_lvl,
- uf_is_value = exprIsHNF expr,
+ uf_src = src,
+ uf_is_top = top_lvl,
+ uf_is_value = exprIsHNF expr,
uf_is_conlike = exprIsConLike expr,
- uf_is_work_free = exprIsWorkFree expr,
- uf_expandable = exprIsExpandable expr,
- uf_guidance = guidance }
+ uf_is_work_free = exprIsWorkFree expr,
+ uf_expandable = exprIsExpandable expr,
+ uf_guidance = guidance }
mkUnfolding :: DynFlags -> UnfoldingSource -> Bool -> Bool -> CoreExpr
-> Unfolding
@@ -246,19 +240,19 @@ mkUnfolding dflags src top_lvl is_bottoming expr
, not (exprIsTrivial expr)
= NoUnfolding -- See Note [Do not inline top-level bottoming functions]
| otherwise
- = CoreUnfolding { uf_tmpl = occurAnalyseExpr expr,
+ = CoreUnfolding { uf_tmpl = occurAnalyseExpr expr,
-- See Note [Occurrrence analysis of unfoldings]
- uf_src = src,
- uf_is_top = top_lvl,
- uf_is_value = exprIsHNF expr,
+ uf_src = src,
+ uf_is_top = top_lvl,
+ uf_is_value = exprIsHNF expr,
uf_is_conlike = exprIsConLike expr,
- uf_expandable = exprIsExpandable expr,
- uf_is_work_free = exprIsWorkFree expr,
- uf_guidance = guidance }
+ uf_expandable = exprIsExpandable expr,
+ uf_is_work_free = exprIsWorkFree expr,
+ uf_guidance = guidance }
where
guidance = calcUnfoldingGuidance dflags expr
-- NB: *not* (calcUnfoldingGuidance (occurAnalyseExpr expr))!
- -- See Note [Calculate unfolding guidance on the non-occ-anal'd expression]
+ -- See Note [Calculate unfolding guidance on the non-occ-anal'd expression]
\end{code}
Note [Occurrence analysis of unfoldings]
@@ -289,13 +283,13 @@ let-bound thing which has been substituted, and so is now dead; so
expression doesn't.
Nevertheless, we *don't* and *must not* occ-analyse before computing
-the size because
+the size because
a) The size computation bales out after a while, whereas occurrence
analysis does not.
-b) Residency increases sharply if you occ-anal first. I'm not
- 100% sure why, but it's a large effect. Compiling Cabal went
+b) Residency increases sharply if you occ-anal first. I'm not
+ 100% sure why, but it's a large effect. Compiling Cabal went
from residency of 534M to over 800M with this one change.
This can occasionally mean that the guidance is very pessimistic;
@@ -304,15 +298,15 @@ let-bound things that are dead are usually caught by preInlineUnconditionally
%************************************************************************
-%* *
+%* *
\subsection{The UnfoldingGuidance type}
-%* *
+%* *
%************************************************************************
\begin{code}
inlineBoringOk :: CoreExpr -> Bool
-- See Note [INLINE for small functions]
--- True => the result of inlining the expression is
+-- True => the result of inlining the expression is
-- no bigger than the expression itself
-- eg (\x y -> f y x)
-- This is a quick and dirty version. It doesn't attempt
@@ -325,12 +319,12 @@ inlineBoringOk e
go credit (Lam x e) | isId x = go (credit+1) e
| otherwise = go credit e
go credit (App f (Type {})) = go credit f
- go credit (App f a) | credit > 0
+ go credit (App f a) | credit > 0
, exprIsTrivial a = go (credit-1) f
go credit (Tick _ e) = go credit e -- dubious
- go credit (Cast e _) = go credit e
- go _ (Var {}) = boringCxtOk
- go _ _ = boringCxtNotOk
+ go credit (Cast e _) = go credit e
+ go _ (Var {}) = boringCxtOk
+ go _ _ = boringCxtNotOk
calcUnfoldingGuidance
:: DynFlags
@@ -347,7 +341,7 @@ calcUnfoldingGuidance dflags expr
| otherwise
-> UnfIfGoodArgs { ug_args = map (mk_discount cased_bndrs) val_bndrs
, ug_size = iBox size
- , ug_res = iBox scrut_discount }
+ , ug_res = iBox scrut_discount }
where
(bndrs, body) = collectBinders expr
@@ -387,17 +381,17 @@ heuristics right has taken a long time. Here's the basic strategy:
Examples
- Size Term
+ Size Term
--------------
- 0 42#
- 0 x
+ 0 42#
+ 0 x
0 True
- 2 f x
- 1 Just x
- 4 f (g x)
+ 2 f x
+ 1 Just x
+ 4 f (g x)
Notice that 'x' counts 0, while (f x) counts 2. That's deliberate: there's
-a function call to account for. Notice also that constructor applications
+a function call to account for. Notice also that constructor applications
are very cheap, because exposing them to a caller is so valuable.
[25/5/11] All sizes are now multiplied by 10, except for primops
@@ -407,14 +401,14 @@ result of #4978.
Note [Do not inline top-level bottoming functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The FloatOut pass has gone to some trouble to float out calls to 'error'
+The FloatOut pass has gone to some trouble to float out calls to 'error'
and similar friends. See Note [Bottoming floats] in SetLevels.
Do not re-inline them! But we *do* still inline if they are very small
(the uncondInline stuff).
Note [INLINE for small functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Consider {-# INLINE f #-}
+Consider {-# INLINE f #-}
f x = Just x
g y = f y
Then f's RHS is no larger than its LHS, so we should inline it into
@@ -426,11 +420,11 @@ Things to note:
(1) We inline *unconditionally* if inlined thing is smaller (using sizeExpr)
than the thing it's replacing. Notice that
- (f x) --> (g 3) -- YES, unconditionally
- (f x) --> x : [] -- YES, *even though* there are two
- -- arguments to the cons
- x --> g 3 -- NO
- x --> Just v -- NO
+ (f x) --> (g 3) -- YES, unconditionally
+ (f x) --> x : [] -- YES, *even though* there are two
+ -- arguments to the cons
+ x --> g 3 -- NO
+ x --> Just v -- NO
It's very important not to unconditionally replace a variable by
a non-atomic term.
@@ -469,7 +463,7 @@ uncondInline :: CoreExpr -> Arity -> Int -> Bool
-- Inline unconditionally if there no size increase
-- Size of call is arity (+1 for the function)
-- See Note [INLINE for small functions]
-uncondInline rhs arity size
+uncondInline rhs arity size
| arity > 0 = size <= 10 * (arity + 1) -- See Note [INLINE for small functions] (1)
| otherwise = exprIsTrivial rhs -- See Note [INLINE for small functions] (4)
\end{code}
@@ -477,11 +471,11 @@ uncondInline rhs arity size
\begin{code}
sizeExpr :: DynFlags
- -> FastInt -- Bomb out if it gets bigger than this
- -> [Id] -- Arguments; we're interested in which of these
- -- get case'd
- -> CoreExpr
- -> ExprSize
+ -> FastInt -- Bomb out if it gets bigger than this
+ -> [Id] -- Arguments; we're interested in which of these
+ -- get case'd
+ -> CoreExpr
+ -> ExprSize
-- Note [Computing the size of an expression]
@@ -508,40 +502,40 @@ sizeExpr dflags bOMB_OUT_SIZE top_args expr
| otherwise = size_up e
size_up (Let (NonRec binder rhs) body)
- = size_up rhs `addSizeNSD`
- size_up body `addSizeN`
+ = size_up rhs `addSizeNSD`
+ size_up body `addSizeN`
(if isUnLiftedType (idType binder) then 0 else 10)
- -- For the allocation
- -- If the binder has an unlifted type there is no allocation
+ -- For the allocation
+ -- If the binder has an unlifted type there is no allocation
size_up (Let (Rec pairs) body)
- = foldr (addSizeNSD . size_up . snd)
+ = foldr (addSizeNSD . size_up . snd)
(size_up body `addSizeN` (10 * length pairs)) -- (length pairs) for the allocation
pairs
- size_up (Case (Var v) _ _ alts)
- | v `elem` top_args -- We are scrutinising an argument variable
- = alts_size (foldr addAltSize sizeZero alt_sizes)
- (foldr maxSize sizeZero alt_sizes)
- -- Good to inline if an arg is scrutinised, because
- -- that may eliminate allocation in the caller
- -- And it eliminates the case itself
- where
- alt_sizes = map size_up_alt alts
-
- -- alts_size tries to compute a good discount for
- -- the case when we are scrutinising an argument variable
- alts_size (SizeIs tot tot_disc tot_scrut) -- Size of all alternatives
- (SizeIs max _ _) -- Size of biggest alternative
+ size_up (Case (Var v) _ _ alts)
+ | v `elem` top_args -- We are scrutinising an argument variable
+ = alts_size (foldr addAltSize sizeZero alt_sizes)
+ (foldr maxSize sizeZero alt_sizes)
+ -- Good to inline if an arg is scrutinised, because
+ -- that may eliminate allocation in the caller
+ -- And it eliminates the case itself
+ where
+ alt_sizes = map size_up_alt alts
+
+ -- alts_size tries to compute a good discount for
+ -- the case when we are scrutinising an argument variable
+ alts_size (SizeIs tot tot_disc tot_scrut) -- Size of all alternatives
+ (SizeIs max _ _) -- Size of biggest alternative
= SizeIs tot (unitBag (v, iBox (_ILIT(20) +# tot -# max)) `unionBags` tot_disc) tot_scrut
- -- If the variable is known, we produce a discount that
- -- will take us back to 'max', the size of the largest alternative
- -- The 1+ is a little discount for reduced allocation in the caller
- --
- -- Notice though, that we return tot_disc, the total discount from
- -- all branches. I think that's right.
+ -- If the variable is known, we produce a discount that
+ -- will take us back to 'max', the size of the largest alternative
+ -- The 1+ is a little discount for reduced allocation in the caller
+ --
+ -- Notice though, that we return tot_disc, the total discount from
+ -- all branches. I think that's right.
- alts_size tot_size _ = tot_size
+ alts_size tot_size _ = tot_size
size_up (Case e _ _ alts) = size_up e `addSizeNSD`
foldr (addAltSize . size_up_alt) case_size alts
@@ -579,56 +573,56 @@ sizeExpr dflags bOMB_OUT_SIZE top_args expr
| otherwise
= False
- ------------
+ ------------
-- size_up_app is used when there's ONE OR MORE value args
size_up_app (App fun arg) args voids
- | isTyCoArg arg = size_up_app fun args voids
- | isRealWorldExpr arg = size_up_app fun (arg:args) (voids + 1)
- | otherwise = size_up arg `addSizeNSD`
+ | isTyCoArg arg = size_up_app fun args voids
+ | isRealWorldExpr arg = size_up_app fun (arg:args) (voids + 1)
+ | otherwise = size_up arg `addSizeNSD`
size_up_app fun (arg:args) voids
size_up_app (Var fun) args voids = size_up_call fun args voids
size_up_app other args voids = size_up other `addSizeN` (length args - voids)
- ------------
+ ------------
size_up_call :: Id -> [CoreExpr] -> Int -> ExprSize
size_up_call fun val_args voids
= case idDetails fun of
FCallId _ -> sizeN (10 * (1 + length val_args))
DataConWorkId dc -> conSize dc (length val_args)
PrimOpId op -> primOpSize op (length val_args)
- ClassOpId _ -> classOpSize dflags top_args val_args
- _ -> funSize dflags top_args fun (length val_args) voids
+ ClassOpId _ -> classOpSize dflags top_args val_args
+ _ -> funSize dflags top_args fun (length val_args) voids
- ------------
+ ------------
size_up_alt (_con, _bndrs, rhs) = size_up rhs `addSizeN` 10
- -- Don't charge for args, so that wrappers look cheap
- -- (See comments about wrappers with Case)
- --
- -- IMPORATANT: *do* charge 1 for the alternative, else we
- -- find that giant case nests are treated as practically free
- -- A good example is Foreign.C.Error.errrnoToIOError
+ -- Don't charge for args, so that wrappers look cheap
+ -- (See comments about wrappers with Case)
+ --
+ -- IMPORATANT: *do* charge 1 for the alternative, else we
+ -- find that giant case nests are treated as practically free
+ -- A good example is Foreign.C.Error.errrnoToIOError
------------
- -- These addSize things have to be here because
- -- I don't want to give them bOMB_OUT_SIZE as an argument
+ -- These addSize things have to be here because
+ -- I don't want to give them bOMB_OUT_SIZE as an argument
addSizeN TooBig _ = TooBig
- addSizeN (SizeIs n xs d) m = mkSizeIs bOMB_OUT_SIZE (n +# iUnbox m) xs d
-
+ addSizeN (SizeIs n xs d) m = mkSizeIs bOMB_OUT_SIZE (n +# iUnbox m) xs d
+
-- addAltSize is used to add the sizes of case alternatives
- addAltSize TooBig _ = TooBig
- addAltSize _ TooBig = TooBig
- addAltSize (SizeIs n1 xs d1) (SizeIs n2 ys d2)
- = mkSizeIs bOMB_OUT_SIZE (n1 +# n2)
- (xs `unionBags` ys)
+ addAltSize TooBig _ = TooBig
+ addAltSize _ TooBig = TooBig
+ addAltSize (SizeIs n1 xs d1) (SizeIs n2 ys d2)
+ = mkSizeIs bOMB_OUT_SIZE (n1 +# n2)
+ (xs `unionBags` ys)
(d1 +# d2) -- Note [addAltSize result discounts]
-- This variant ignores the result discount from its LEFT argument
- -- It's used when the second argument isn't part of the result
- addSizeNSD TooBig _ = TooBig
- addSizeNSD _ TooBig = TooBig
- addSizeNSD (SizeIs n1 xs _) (SizeIs n2 ys d2)
- = mkSizeIs bOMB_OUT_SIZE (n1 +# n2)
- (xs `unionBags` ys)
+ -- It's used when the second argument isn't part of the result
+ addSizeNSD TooBig _ = TooBig
+ addSizeNSD _ TooBig = TooBig
+ addSizeNSD (SizeIs n1 xs _) (SizeIs n2 ys d2)
+ = mkSizeIs bOMB_OUT_SIZE (n1 +# n2)
+ (xs `unionBags` ys)
d2 -- Ignore d1
isRealWorldId id = idType id `eqType` realWorldStatePrimTy
@@ -643,14 +637,14 @@ sizeExpr dflags bOMB_OUT_SIZE top_args expr
-- | Finds a nominal size of a string literal.
litSize :: Literal -> Int
-- Used by CoreUnfold.sizeExpr
-litSize (LitInteger {}) = 100 -- Note [Size of literal integers]
+litSize (LitInteger {}) = 100 -- Note [Size of literal integers]
litSize (MachStr str) = 10 + 10 * ((BS.length str + 3) `div` 4)
- -- If size could be 0 then @f "x"@ might be too small
- -- [Sept03: make literal strings a bit bigger to avoid fruitless
- -- duplication of little strings]
+ -- If size could be 0 then @f "x"@ might be too small
+ -- [Sept03: make literal strings a bit bigger to avoid fruitless
+ -- duplication of little strings]
litSize _other = 0 -- Must match size of nullary constructors
- -- Key point: if x |-> 4, then x must inline unconditionally
- -- (eg via case binding)
+ -- Key point: if x |-> 4, then x must inline unconditionally
+ -- (eg via case binding)
classOpSize :: DynFlags -> [Id] -> [CoreExpr] -> ExprSize
-- See Note [Conlike is interesting]
@@ -664,10 +658,10 @@ classOpSize dflags top_args (arg1 : other_args)
-- give it a discount, to encourage the inlining of this function
-- The actual discount is rather arbitrarily chosen
arg_discount = case arg1 of
- Var dict | dict `elem` top_args
- -> unitBag (dict, ufDictDiscount dflags)
- _other -> emptyBag
-
+ Var dict | dict `elem` top_args
+ -> unitBag (dict, ufDictDiscount dflags)
+ _other -> emptyBag
+
funSize :: DynFlags -> [Id] -> Id -> Int -> Int -> ExprSize
-- Size for functions that are not constructors or primops
-- Note [Function applications]
@@ -680,20 +674,20 @@ funSize dflags top_args fun n_val_args voids
size | some_val_args = 10 * (1 + n_val_args - voids)
| otherwise = 0
- -- The 1+ is for the function itself
- -- Add 1 for each non-trivial arg;
- -- the allocation cost, as in let(rec)
-
+ -- The 1+ is for the function itself
+ -- Add 1 for each non-trivial arg;
+ -- the allocation cost, as in let(rec)
+
-- DISCOUNTS
-- See Note [Function and non-function discounts]
arg_discount | some_val_args && fun `elem` top_args
- = unitBag (fun, ufFunAppDiscount dflags)
- | otherwise = emptyBag
- -- If the function is an argument and is applied
- -- to some values, give it an arg-discount
+ = unitBag (fun, ufFunAppDiscount dflags)
+ | otherwise = emptyBag
+ -- If the function is an argument and is applied
+ -- to some values, give it an arg-discount
res_discount | idArity fun > n_val_args = ufFunAppDiscount dflags
- | otherwise = 0
+ | otherwise = 0
-- If the function is partially applied, show a result discount
conSize :: DataCon -> Int -> ExprSize
@@ -722,7 +716,7 @@ charge it to the function. So the discount should at least match the
cost of the constructor application, namely 10. But to give a bit
of extra incentive we give a discount of 10*(1 + n_val_args).
-Simon M tried a MUCH bigger discount: (10 * (10 + n_val_args)),
+Simon M tried a MUCH bigger discount: (10 * (10 + n_val_args)),
and said it was an "unambiguous win", but its terribly dangerous
because a fuction with many many case branches, each finishing with
a constructor, can have an arbitrarily large discount. This led to
@@ -730,8 +724,8 @@ terrible code bloat: see Trac #6099.
Note [Unboxed tuple size and result discount]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-However, unboxed tuples count as size zero. I found occasions where we had
- f x y z = case op# x y z of { s -> (# s, () #) }
+However, unboxed tuples count as size zero. I found occasions where we had
+ f x y z = case op# x y z of { s -> (# s, () #) }
and f wasn't getting inlined.
I tried giving unboxed tuples a *result discount* of zero (see the
@@ -752,7 +746,7 @@ monadic combinators with continuation arguments, where inlining is
quite important.
But we don't want a big discount when a function is called many times
-(see the detailed comments with Trac #6048) because if the function is
+(see the detailed comments with Trac #6048) because if the function is
big it won't be inlined at its many call sites and no benefit results.
Indeed, we can get exponentially big inlinings this way; that is what
Trac #6048 is about.
@@ -790,17 +784,17 @@ primOpSize op n_val_args
buildSize :: ExprSize
buildSize = SizeIs (_ILIT(0)) emptyBag (_ILIT(40))
- -- We really want to inline applications of build
- -- build t (\cn -> e) should cost only the cost of e (because build will be inlined later)
- -- Indeed, we should add a result_discount becuause build is
- -- very like a constructor. We don't bother to check that the
- -- build is saturated (it usually is). The "-2" discounts for the \c n,
- -- The "4" is rather arbitrary.
+ -- We really want to inline applications of build
+ -- build t (\cn -> e) should cost only the cost of e (because build will be inlined later)
+ -- Indeed, we should add a result_discount becuause build is
+ -- very like a constructor. We don't bother to check that the
+ -- build is saturated (it usually is). The "-2" discounts for the \c n,
+ -- The "4" is rather arbitrary.
augmentSize :: ExprSize
augmentSize = SizeIs (_ILIT(0)) emptyBag (_ILIT(40))
- -- Ditto (augment t (\cn -> e) ys) should cost only the cost of
- -- e plus ys. The -2 accounts for the \cn
+ -- Ditto (augment t (\cn -> e) ys) should cost only the cost of
+ -- e plus ys. The -2 accounts for the \cn
-- When we return a lambda, give a discount if it's used (applied)
lamScrutDiscount :: DynFlags -> ExprSize -> ExprSize
@@ -813,7 +807,7 @@ Note [addAltSize result discounts]
When adding the size of alternatives, we *add* the result discounts
too, rather than take the *maximum*. For a multi-branch case, this
gives a discount for each branch that returns a constructor, making us
-keener to inline. I did try using 'max' instead, but it makes nofib
+keener to inline. I did try using 'max' instead, but it makes nofib
'rewrite' and 'puzzle' allocate significantly more, and didn't make
binary sizes shrink significantly either.
@@ -831,7 +825,7 @@ ufUseThreshold
this, then it's small enough inline
ufKeenessFactor
- Factor by which the discounts are multiplied before
+ Factor by which the discounts are multiplied before
subtracting from size
ufDictDiscount
@@ -851,22 +845,22 @@ Note [Function applications]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In a function application (f a b)
- - If 'f' is an argument to the function being analysed,
+ - If 'f' is an argument to the function being analysed,
and there's at least one value arg, record a FunAppDiscount for f
- If the application if a PAP (arity > 2 in this example)
record a *result* discount (because inlining
- with "extra" args in the call may mean that we now
+ with "extra" args in the call may mean that we now
get a saturated application)
Code for manipulating sizes
\begin{code}
data ExprSize = TooBig
- | SizeIs FastInt -- Size found
- !(Bag (Id,Int)) -- Arguments cased herein, and discount for each such
- FastInt -- Size to subtract if result is scrutinised
- -- by a case expression
+ | SizeIs FastInt -- Size found
+ !(Bag (Id,Int)) -- Arguments cased herein, and discount for each such
+ FastInt -- Size to subtract if result is scrutinised
+ -- by a case expression
instance Outputable ExprSize where
ppr TooBig = ptext (sLit "TooBig")
@@ -874,18 +868,18 @@ instance Outputable ExprSize where
-- subtract the discount before deciding whether to bale out. eg. we
-- want to inline a large constructor application into a selector:
--- tup = (a_1, ..., a_99)
--- x = case tup of ...
+-- tup = (a_1, ..., a_99)
+-- x = case tup of ...
--
mkSizeIs :: FastInt -> FastInt -> Bag (Id, Int) -> FastInt -> ExprSize
mkSizeIs max n xs d | (n -# d) ># max = TooBig
- | otherwise = SizeIs n xs d
-
+ | otherwise = SizeIs n xs d
+
maxSize :: ExprSize -> ExprSize -> ExprSize
-maxSize TooBig _ = TooBig
-maxSize _ TooBig = TooBig
+maxSize TooBig _ = TooBig
+maxSize _ TooBig = TooBig
maxSize s1@(SizeIs n1 _ _) s2@(SizeIs n2 _ _) | n1 ># n2 = s1
- | otherwise = s2
+ | otherwise = s2
sizeZero :: ExprSize
sizeN :: Int -> ExprSize
@@ -896,9 +890,9 @@ sizeN n = SizeIs (iUnbox n) emptyBag (_ILIT(0))
%************************************************************************
-%* *
+%* *
\subsection[considerUnfolding]{Given all the info, do (not) do the unfolding}
-%* *
+%* *
%************************************************************************
We use 'couldBeSmallEnoughToInline' to avoid exporting inlinings that
@@ -908,7 +902,7 @@ actual arguments.
\begin{code}
couldBeSmallEnoughToInline :: DynFlags -> Int -> CoreExpr -> Bool
-couldBeSmallEnoughToInline dflags threshold rhs
+couldBeSmallEnoughToInline dflags threshold rhs
= case sizeExpr dflags (iUnbox threshold) [] body of
TooBig -> False
_ -> True
@@ -962,13 +956,13 @@ duplication. Even if the work duplication is not great (eg is_cheap
holds), it can make a big difference in an inner loop In Trac #5623 we
found that the WorkWrap phase thought that
y = case x of F# v -> F# (v +# v)
-was certainlyWillInline, so the addition got duplicated.
+was certainlyWillInline, so the addition got duplicated.
%************************************************************************
-%* *
+%* *
\subsection{callSiteInline}
-%* *
+%* *
%************************************************************************
This is the key function. It decides whether to inline a variable at a call site
@@ -976,25 +970,25 @@ This is the key function. It decides whether to inline a variable at a call sit
callSiteInline is used at call sites, so it is a bit more generous.
It's a very important function that embodies lots of heuristics.
A non-WHNF can be inlined if it doesn't occur inside a lambda,
-and occurs exactly once or
+and occurs exactly once or
occurs once in each branch of a case and is small
-If the thing is in WHNF, there's no danger of duplicating work,
+If the thing is in WHNF, there's no danger of duplicating work,
so we can inline if it occurs once, or is small
NOTE: we don't want to inline top-level functions that always diverge.
It just makes the code bigger. Tt turns out that the convenient way to prevent
-them inlining is to give them a NOINLINE pragma, which we do in
+them inlining is to give them a NOINLINE pragma, which we do in
StrictAnal.addStrictnessInfoToTopId
\begin{code}
callSiteInline :: DynFlags
- -> Id -- The Id
- -> Bool -- True <=> unfolding is active
- -> Bool -- True if there are are no arguments at all (incl type args)
- -> [ArgSummary] -- One for each value arg; True if it is interesting
- -> CallCtxt -- True <=> continuation is interesting
- -> Maybe CoreExpr -- Unfolding, if any
+ -> Id -- The Id
+ -> Bool -- True <=> unfolding is active
+ -> Bool -- True if there are are no arguments at all (incl type args)
+ -> [ArgSummary] -- One for each value arg; True if it is interesting
+ -> CallCtxt -- True <=> continuation is interesting
+ -> Maybe CoreExpr -- Unfolding, if any
instance Outputable ArgSummary where
ppr TrivArg = ptext (sLit "TrivArg")
@@ -1005,17 +999,17 @@ data CallCtxt
= BoringCtxt
| RhsCtxt -- Rhs of a let-binding; see Note [RHS of lets]
| DiscArgCtxt -- Argument of a fuction with non-zero arg discount
- | RuleArgCtxt -- We are somewhere in the argument of a function with rules
+ | RuleArgCtxt -- We are somewhere in the argument of a function with rules
- | ValAppCtxt -- We're applied to at least one value arg
- -- This arises when we have ((f x |> co) y)
- -- Then the (f x) has argument 'x' but in a ValAppCtxt
+ | ValAppCtxt -- We're applied to at least one value arg
+ -- This arises when we have ((f x |> co) y)
+ -- Then the (f x) has argument 'x' but in a ValAppCtxt
- | CaseCtxt -- We're the scrutinee of a case
- -- that decomposes its scrutinee
+ | CaseCtxt -- We're the scrutinee of a case
+ -- that decomposes its scrutinee
instance Outputable CallCtxt where
- ppr CaseCtxt = ptext (sLit "CaseCtxt")
+ ppr CaseCtxt = ptext (sLit "CaseCtxt")
ppr ValAppCtxt = ptext (sLit "ValAppCtxt")
ppr BoringCtxt = ptext (sLit "BoringCtxt")
ppr RhsCtxt = ptext (sLit "RhsCtxt")
@@ -1023,20 +1017,20 @@ instance Outputable CallCtxt where
ppr RuleArgCtxt = ptext (sLit "RuleArgCtxt")
callSiteInline dflags id active_unfolding lone_variable arg_infos cont_info
- = case idUnfolding id of
+ = case idUnfolding id of
-- idUnfolding checks for loop-breakers, returning NoUnfolding
- -- Things with an INLINE pragma may have an unfolding *and*
+ -- Things with an INLINE pragma may have an unfolding *and*
-- be a loop breaker (maybe the knot is not yet untied)
- CoreUnfolding { uf_tmpl = unf_template, uf_is_top = is_top
- , uf_is_work_free = is_wf
+ CoreUnfolding { uf_tmpl = unf_template, uf_is_top = is_top
+ , uf_is_work_free = is_wf
, uf_guidance = guidance, uf_expandable = is_exp }
- | active_unfolding -> tryUnfolding dflags id lone_variable
- arg_infos cont_info unf_template is_top
+ | active_unfolding -> tryUnfolding dflags id lone_variable
+ arg_infos cont_info unf_template is_top
is_wf is_exp guidance
| otherwise -> traceInline dflags "Inactive unfolding:" (ppr id) Nothing
- NoUnfolding -> Nothing
- OtherCon {} -> Nothing
- DFunUnfolding {} -> Nothing -- Never unfold a DFun
+ NoUnfolding -> Nothing
+ OtherCon {} -> Nothing
+ DFunUnfolding {} -> Nothing -- Never unfold a DFun
traceInline :: DynFlags -> String -> SDoc -> a -> a
traceInline dflags str doc result
@@ -1047,7 +1041,7 @@ traceInline dflags str doc result
tryUnfolding :: DynFlags -> Id -> Bool -> [ArgSummary] -> CallCtxt
-> CoreExpr -> Bool -> Bool -> Bool -> UnfoldingGuidance
- -> Maybe CoreExpr
+ -> Maybe CoreExpr
tryUnfolding dflags id lone_variable
arg_infos cont_info unf_template is_top
is_wf is_exp guidance
@@ -1080,7 +1074,7 @@ tryUnfolding dflags id lone_variable
where
mk_doc some_benefit extra_doc yes_or_no
= vcat [ text "arg infos" <+> ppr arg_infos
- , text "interesting continuation" <+> ppr cont_info
+ , text "interesting continuation" <+> ppr cont_info
, text "some_benefit" <+> ppr some_benefit
, text "is exp:" <+> ppr is_exp
, text "is work-free:" <+> ppr is_wf
@@ -1099,17 +1093,17 @@ tryUnfolding dflags id lone_variable
calc_some_benefit :: Arity -> Bool -- The Arity is the number of args
-- expected by the unfolding
calc_some_benefit uf_arity
- | not saturated = interesting_args -- Under-saturated
- -- Note [Unsaturated applications]
- | otherwise = interesting_args -- Saturated or over-saturated
+ | not saturated = interesting_args -- Under-saturated
+ -- Note [Unsaturated applications]
+ | otherwise = interesting_args -- Saturated or over-saturated
|| interesting_call
where
saturated = n_val_args >= uf_arity
over_saturated = n_val_args > uf_arity
interesting_args = any nonTriv arg_infos
- -- NB: (any nonTriv arg_infos) looks at the
- -- over-saturated args too which is "wrong";
- -- but if over-saturated we inline anyway.
+ -- NB: (any nonTriv arg_infos) looks at the
+ -- over-saturated args too which is "wrong";
+ -- but if over-saturated we inline anyway.
interesting_call
| over_saturated
@@ -1117,7 +1111,7 @@ tryUnfolding dflags id lone_variable
| otherwise
= case cont_info of
CaseCtxt -> not (lone_variable && is_wf) -- Note [Lone variables]
- ValAppCtxt -> True -- Note [Cast then apply]
+ ValAppCtxt -> True -- Note [Cast then apply]
RuleArgCtxt -> uf_arity > 0 -- See Note [Unfold info lazy contexts]
DiscArgCtxt -> uf_arity > 0 --
RhsCtxt -> uf_arity > 0 --
@@ -1147,9 +1141,9 @@ A good example is the Ord instance for Bool in Base:
Rec {
$fOrdBool =GHC.Classes.D:Ord
- @ Bool
- ...
- $cmin_ajX
+ @ Bool
+ ...
+ $cmin_ajX
$cmin_ajX [Occ=LoopBreaker] :: Bool -> Bool -> Bool
$cmin_ajX = GHC.Classes.$dmmin @ Bool $fOrdBool
@@ -1171,11 +1165,11 @@ Note [Things to watch]
~~~~~~~~~~~~~~~~~~~~~~
* { y = I# 3; x = y `cast` co; ...case (x `cast` co) of ... }
Assume x is exported, so not inlined unconditionally.
- Then we want x to inline unconditionally; no reason for it
+ Then we want x to inline unconditionally; no reason for it
not to, and doing so avoids an indirection.
* { x = I# 3; ....f x.... }
- Make sure that x does not inline unconditionally!
+ Make sure that x does not inline unconditionally!
Lest we get extra allocation.
Note [Inlining an InlineRule]
@@ -1188,7 +1182,7 @@ For (a) the RHS may be large, and our contract is that we *only* inline
when the function is applied to all the arguments on the LHS of the
source-code defn. (The uf_arity in the rule.)
-However for worker/wrapper it may be worth inlining even if the
+However for worker/wrapper it may be worth inlining even if the
arity is not satisfied (as we do in the CoreUnfolding case) so we don't
require saturation.
@@ -1224,44 +1218,44 @@ we end up inlining top-level stuff into useless places; eg
This can make a very big difference: it adds 16% to nofib 'integer' allocs,
and 20% to 'power'.
-At one stage I replaced this condition by 'True' (leading to the above
+At one stage I replaced this condition by 'True' (leading to the above
slow-down). The motivation was test eyeball/inline1.hs; but that seems
to work ok now.
NOTE: arguably, we should inline in ArgCtxt only if the result of the
call is at least CONLIKE. At least for the cases where we use ArgCtxt
-for the RHS of a 'let', we only profit from the inlining if we get a
+for the RHS of a 'let', we only profit from the inlining if we get a
CONLIKE thing (modulo lets).
-Note [Lone variables] See also Note [Interaction of exprIsWorkFree and lone variables]
+Note [Lone variables] See also Note [Interaction of exprIsWorkFree and lone variables]
~~~~~~~~~~~~~~~~~~~~~ which appears below
The "lone-variable" case is important. I spent ages messing about
with unsatisfactory varaints, but this is nice. The idea is that if a
variable appears all alone
- as an arg of lazy fn, or rhs BoringCtxt
- as scrutinee of a case CaseCtxt
- as arg of a fn ArgCtxt
+ as an arg of lazy fn, or rhs BoringCtxt
+ as scrutinee of a case CaseCtxt
+ as arg of a fn ArgCtxt
AND
- it is bound to a cheap expression
+ it is bound to a cheap expression
then we should not inline it (unless there is some other reason,
-e.g. is is the sole occurrence). That is what is happening at
+e.g. is is the sole occurrence). That is what is happening at
the use of 'lone_variable' in 'interesting_call'.
Why? At least in the case-scrutinee situation, turning
- let x = (a,b) in case x of y -> ...
+ let x = (a,b) in case x of y -> ...
into
- let x = (a,b) in case (a,b) of y -> ...
-and thence to
- let x = (a,b) in let y = (a,b) in ...
+ let x = (a,b) in case (a,b) of y -> ...
+and thence to
+ let x = (a,b) in let y = (a,b) in ...
is bad if the binding for x will remain.
Another example: I discovered that strings
were getting inlined straight back into applications of 'error'
because the latter is strict.
- s = "foo"
- f = \x -> ...(error s)...
+ s = "foo"
+ f = \x -> ...(error s)...
Fundamentally such contexts should not encourage inlining because the
context can ``see'' the unfolding of the variable (e.g. case or a
@@ -1270,13 +1264,13 @@ RULE) so there's no gain. If the thing is bound to a value.
However, watch out:
* Consider this:
- foo = _inline_ (\n. [n])
- bar = _inline_ (foo 20)
- baz = \n. case bar of { (m:_) -> m + n }
+ foo = _inline_ (\n. [n])
+ bar = _inline_ (foo 20)
+ baz = \n. case bar of { (m:_) -> m + n }
Here we really want to inline 'bar' so that we can inline 'foo'
- and the whole thing unravels as it should obviously do. This is
+ and the whole thing unravels as it should obviously do. This is
important: in the NDP project, 'bar' generates a closure data
- structure rather than a list.
+ structure rather than a list.
So the non-inlining of lone_variables should only apply if the
unfolding is regarded as cheap; because that is when exprIsConApp_maybe
@@ -1285,24 +1279,24 @@ However, watch out:
* Even a type application or coercion isn't a lone variable.
Consider
- case $fMonadST @ RealWorld of { :DMonad a b c -> c }
+ case $fMonadST @ RealWorld of { :DMonad a b c -> c }
We had better inline that sucker! The case won't see through it.
- For now, I'm treating treating a variable applied to types
+ For now, I'm treating treating a variable applied to types
in a *lazy* context "lone". The motivating example was
- f = /\a. \x. BIG
- g = /\a. \y. h (f a)
+ f = /\a. \x. BIG
+ g = /\a. \y. h (f a)
There's no advantage in inlining f here, and perhaps
a significant disadvantage. Hence some_val_args in the Stop case
Note [Interaction of exprIsWorkFree and lone variables]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The lone-variable test says "don't inline if a case expression
-scrutines a lone variable whose unfolding is cheap". It's very
+scrutines a lone variable whose unfolding is cheap". It's very
important that, under these circumstances, exprIsConApp_maybe
can spot a constructor application. So, for example, we don't
consider
- let x = e in (x,x)
+ let x = e in (x,x)
to be cheap, and that's good because exprIsConApp_maybe doesn't
think that expression is a constructor application.
@@ -1312,8 +1306,8 @@ expression responds True to exprIsHNF, which is what sets is_value.
This kind of thing can occur if you have
- {-# INLINE foo #-}
- foo = let x = e in (x,x)
+ {-# INLINE foo #-}
+ foo = let x = e in (x,x)
which Roman did.
@@ -1321,26 +1315,26 @@ which Roman did.
computeDiscount :: DynFlags -> [Int] -> Int -> [ArgSummary] -> CallCtxt
-> Int
computeDiscount dflags arg_discounts res_discount arg_infos cont_info
- -- We multiple the raw discounts (args_discount and result_discount)
- -- ty opt_UnfoldingKeenessFactor because the former have to do with
- -- *size* whereas the discounts imply that there's some extra
- -- *efficiency* to be gained (e.g. beta reductions, case reductions)
- -- by inlining.
+ -- We multiple the raw discounts (args_discount and result_discount)
+ -- ty opt_UnfoldingKeenessFactor because the former have to do with
+ -- *size* whereas the discounts imply that there's some extra
+ -- *efficiency* to be gained (e.g. beta reductions, case reductions)
+ -- by inlining.
= 10 -- Discount of 10 because the result replaces the call
- -- so we count 10 for the function itself
+ -- so we count 10 for the function itself
+ 10 * length actual_arg_discounts
- -- Discount of 10 for each arg supplied,
- -- because the result replaces the call
+ -- Discount of 10 for each arg supplied,
+ -- because the result replaces the call
+ round (ufKeenessFactor dflags *
- fromIntegral (total_arg_discount + res_discount'))
+ fromIntegral (total_arg_discount + res_discount'))
where
actual_arg_discounts = zipWith mk_arg_discount arg_discounts arg_infos
total_arg_discount = sum actual_arg_discounts
- mk_arg_discount _ TrivArg = 0
+ mk_arg_discount _ TrivArg = 0
mk_arg_discount _ NonTrivArg = 10
mk_arg_discount discount ValueArg = discount
@@ -1349,10 +1343,10 @@ computeDiscount dflags arg_discounts res_discount arg_infos cont_info
= res_discount -- Over-saturated
| otherwise
= case cont_info of
- BoringCtxt -> 0
- CaseCtxt -> res_discount -- Presumably a constructor
- ValAppCtxt -> res_discount -- Presumably a function
- _ -> 40 `min` res_discount
+ BoringCtxt -> 0
+ CaseCtxt -> res_discount -- Presumably a constructor
+ ValAppCtxt -> res_discount -- Presumably a function
+ _ -> 40 `min` res_discount
-- ToDo: this 40 `min` res_discount doesn't seem right
-- for DiscArgCtxt it shouldn't matter because the function will
-- get the arg discount for any non-triv arg
@@ -1361,18 +1355,18 @@ computeDiscount dflags arg_discounts res_discount arg_infos cont_info
-- for RhsCtxt I suppose that exposing a data con is good in general
-- And 40 seems very arbitrary
--
- -- res_discount can be very large when a function returns
- -- constructors; but we only want to invoke that large discount
- -- when there's a case continuation.
- -- Otherwise we, rather arbitrarily, threshold it. Yuk.
- -- But we want to aovid inlining large functions that return
- -- constructors into contexts that are simply "interesting"
+ -- res_discount can be very large when a function returns
+ -- constructors; but we only want to invoke that large discount
+ -- when there's a case continuation.
+ -- Otherwise we, rather arbitrarily, threshold it. Yuk.
+ -- But we want to aovid inlining large functions that return
+ -- constructors into contexts that are simply "interesting"
\end{code}
%************************************************************************
-%* *
- Interesting arguments
-%* *
+%* *
+ Interesting arguments
+%* *
%************************************************************************
Note [Interesting arguments]
@@ -1398,33 +1392,33 @@ to now!
Note [Conlike is interesting]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
- f d = ...((*) d x y)...
- ... f (df d')...
+ f d = ...((*) d x y)...
+ ... f (df d')...
where df is con-like. Then we'd really like to inline 'f' so that the
-rule for (*) (df d) can fire. To do this
+rule for (*) (df d) can fire. To do this
a) we give a discount for being an argument of a class-op (eg (*) d)
b) we say that a con-like argument (eg (df d)) is interesting
\begin{code}
-data ArgSummary = TrivArg -- Nothing interesting
- | NonTrivArg -- Arg has structure
- | ValueArg -- Arg is a con-app or PAP
- -- ..or con-like. Note [Conlike is interesting]
+data ArgSummary = TrivArg -- Nothing interesting
+ | NonTrivArg -- Arg has structure
+ | ValueArg -- Arg is a con-app or PAP
+ -- ..or con-like. Note [Conlike is interesting]
interestingArg :: CoreExpr -> ArgSummary
-- See Note [Interesting arguments]
interestingArg e = go e 0
where
-- n is # value args to which the expression is applied
- go (Lit {}) _ = ValueArg
+ go (Lit {}) _ = ValueArg
go (Var v) n
- | isConLikeId v = ValueArg -- Experimenting with 'conlike' rather that
- -- data constructors here
- | idArity v > n = ValueArg -- Catches (eg) primops with arity but no unfolding
- | n > 0 = NonTrivArg -- Saturated or unknown call
- | conlike_unfolding = ValueArg -- n==0; look for an interesting unfolding
+ | isConLikeId v = ValueArg -- Experimenting with 'conlike' rather that
+ -- data constructors here
+ | idArity v > n = ValueArg -- Catches (eg) primops with arity but no unfolding
+ | n > 0 = NonTrivArg -- Saturated or unknown call
+ | conlike_unfolding = ValueArg -- n==0; look for an interesting unfolding
-- See Note [Conlike is interesting]
- | otherwise = TrivArg -- n==0, no useful unfolding
+ | otherwise = TrivArg -- n==0, no useful unfolding
where
conlike_unfolding = isConLikeUnfolding (idUnfolding v)
@@ -1434,13 +1428,13 @@ interestingArg e = go e 0
go (App fn (Coercion _)) n = go fn n
go (App fn _) n = go fn (n+1)
go (Tick _ a) n = go a n
- go (Cast e _) n = go e n
- go (Lam v e) n
- | isTyVar v = go e n
- | n>0 = go e (n-1)
- | otherwise = ValueArg
- go (Let _ e) n = case go e n of { ValueArg -> ValueArg; _ -> NonTrivArg }
- go (Case {}) _ = NonTrivArg
+ go (Cast e _) n = go e n
+ go (Lam v e) n
+ | isTyVar v = go e n
+ | n>0 = go e (n-1)
+ | otherwise = ValueArg
+ go (Let _ e) n = case go e n of { ValueArg -> ValueArg; _ -> NonTrivArg }
+ go (Case {}) _ = NonTrivArg
nonTriv :: ArgSummary -> Bool
nonTriv TrivArg = False
diff --git a/compiler/deSugar/DsArrows.lhs b/compiler/deSugar/DsArrows.lhs
index 35a2477fd5..8f8e2d9f16 100644
--- a/compiler/deSugar/DsArrows.lhs
+++ b/compiler/deSugar/DsArrows.lhs
@@ -7,12 +7,6 @@ Desugaring arrow commands
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module DsArrows ( dsProcExpr ) where
@@ -22,7 +16,7 @@ import Match
import DsUtils
import DsMonad
-import HsSyn hiding (collectPatBinders, collectPatsBinders, collectLStmtsBinders, collectLStmtBinders, collectStmtBinders )
+import HsSyn hiding (collectPatBinders, collectPatsBinders, collectLStmtsBinders, collectLStmtBinders, collectStmtBinders )
import TcHsSyn
-- NB: The desugarer, which straddles the source and Core worlds, sometimes
@@ -58,7 +52,7 @@ import Data.List
\begin{code}
data DsCmdEnv = DsCmdEnv {
- arr_id, compose_id, first_id, app_id, choice_id, loop_id :: CoreExpr
+ arr_id, compose_id, first_id, app_id, choice_id, loop_id :: CoreExpr
}
mkCmdEnv :: CmdSyntaxTable Id -> DsM ([CoreBind], DsCmdEnv)
@@ -78,7 +72,7 @@ mkCmdEnv tc_meths
= do { rhs <- dsExpr expr
; id <- newSysLocalDs (exprType rhs)
; return (NonRec id rhs, (std_name, id)) }
-
+
find_meth prs std_name
= assocDefault (mk_panic std_name) prs std_name
mk_panic std_name = pprPanic "mkCmdEnv" (ptext (sLit "Not found:") <+> ppr std_name)
@@ -89,7 +83,7 @@ do_arr ids b_ty c_ty f = mkApps (arr_id ids) [Type b_ty, Type c_ty, f]
-- (>>>) :: forall b c d. a b c -> a c d -> a b d
do_compose :: DsCmdEnv -> Type -> Type -> Type ->
- CoreExpr -> CoreExpr -> CoreExpr
+ CoreExpr -> CoreExpr -> CoreExpr
do_compose ids b_ty c_ty d_ty f g
= mkApps (compose_id ids) [Type b_ty, Type c_ty, Type d_ty, f, g]
@@ -105,7 +99,7 @@ do_app ids b_ty c_ty = mkApps (app_id ids) [Type b_ty, Type c_ty]
-- (|||) :: forall b d c. a b d -> a c d -> a (Either b c) d
-- note the swapping of d and c
do_choice :: DsCmdEnv -> Type -> Type -> Type ->
- CoreExpr -> CoreExpr -> CoreExpr
+ CoreExpr -> CoreExpr -> CoreExpr
do_choice ids b_ty c_ty d_ty f g
= mkApps (choice_id ids) [Type b_ty, Type d_ty, Type c_ty, f, g]
@@ -118,7 +112,7 @@ do_loop ids b_ty c_ty d_ty f
-- premap :: forall b c d. (b -> c) -> a c d -> a b d
-- premap f g = arr f >>> g
do_premap :: DsCmdEnv -> Type -> Type -> Type ->
- CoreExpr -> CoreExpr -> CoreExpr
+ CoreExpr -> CoreExpr -> CoreExpr
do_premap ids b_ty c_ty d_ty f g
= do_compose ids b_ty c_ty d_ty (do_arr ids b_ty c_ty f) g
@@ -150,7 +144,7 @@ because the list of variables is typically not yet defined.
\begin{code}
-- coreCaseTuple [u1..] v [x1..xn] body
--- = case v of v { (x1, .., xn) -> body }
+-- = case v of v { (x1, .., xn) -> body }
-- But the matching may be nested if the tuple is very big
coreCaseTuple :: UniqSupply -> Id -> [Id] -> CoreExpr -> CoreExpr
@@ -178,7 +172,7 @@ The input is divided into a local environment, which is a flat tuple
(unless it's too big), and a stack, which is a right-nested pair.
In general, the input has the form
- ((x1,...,xn), (s1,...(sk,())...))
+ ((x1,...,xn), (s1,...(sk,())...))
where xi are the environment values, and si the ones on the stack,
with s1 being the "top", the first one to be matched with a lambda.
@@ -196,28 +190,28 @@ splitTypeAt n ty
_ -> pprPanic "splitTypeAt" (ppr ty)
----------------------------------------------
--- buildEnvStack
+-- buildEnvStack
--
--- ((x1,...,xn),stk)
+-- ((x1,...,xn),stk)
buildEnvStack :: [Id] -> Id -> CoreExpr
buildEnvStack env_ids stack_id
= mkCorePairExpr (mkBigCoreVarTup env_ids) (Var stack_id)
----------------------------------------------
--- matchEnvStack
+-- matchEnvStack
--
--- \ ((x1,...,xn),stk) -> body
--- =>
--- \ pair ->
--- case pair of (tup,stk) ->
--- case tup of (x1,...,xn) ->
--- body
-
-matchEnvStack :: [Id] -- x1..xn
- -> Id -- stk
- -> CoreExpr -- e
- -> DsM CoreExpr
+-- \ ((x1,...,xn),stk) -> body
+-- =>
+-- \ pair ->
+-- case pair of (tup,stk) ->
+-- case tup of (x1,...,xn) ->
+-- body
+
+matchEnvStack :: [Id] -- x1..xn
+ -> Id -- stk
+ -> CoreExpr -- e
+ -> DsM CoreExpr
matchEnvStack env_ids stack_id body = do
uniqs <- newUniqueSupply
tup_var <- newSysLocalDs (mkBigCoreVarTupTy env_ids)
@@ -226,30 +220,30 @@ matchEnvStack env_ids stack_id body = do
return (Lam pair_id (coreCasePair pair_id tup_var stack_id match_env))
----------------------------------------------
--- matchEnv
+-- matchEnv
--
--- \ (x1,...,xn) -> body
--- =>
--- \ tup ->
--- case tup of (x1,...,xn) ->
--- body
-
-matchEnv :: [Id] -- x1..xn
- -> CoreExpr -- e
- -> DsM CoreExpr
+-- \ (x1,...,xn) -> body
+-- =>
+-- \ tup ->
+-- case tup of (x1,...,xn) ->
+-- body
+
+matchEnv :: [Id] -- x1..xn
+ -> CoreExpr -- e
+ -> DsM CoreExpr
matchEnv env_ids body = do
uniqs <- newUniqueSupply
tup_id <- newSysLocalDs (mkBigCoreVarTupTy env_ids)
return (Lam tup_id (coreCaseTuple uniqs tup_id env_ids body))
----------------------------------------------
--- matchVarStack
+-- matchVarStack
--
--- case (x1, ...(xn, s)...) -> e
--- =>
--- case z0 of (x1,z1) ->
--- case zn-1 of (xn,s) ->
--- e
+-- case (x1, ...(xn, s)...) -> e
+-- =>
+-- case z0 of (x1,z1) ->
+-- case zn-1 of (xn,s) ->
+-- e
matchVarStack :: [Id] -> Id -> CoreExpr -> DsM (Id, CoreExpr)
matchVarStack [] stack_id body = return (stack_id, body)
matchVarStack (param_id:param_ids) stack_id body = do
@@ -268,16 +262,16 @@ Translation of arrow abstraction
\begin{code}
--- D; xs |-a c : () --> t' ---> c'
+-- D; xs |-a c : () --> t' ---> c'
-- --------------------------
--- D |- proc p -> c :: a t t' ---> premap (\ p -> ((xs),())) c'
+-- D |- proc p -> c :: a t t' ---> premap (\ p -> ((xs),())) c'
--
--- where (xs) is the tuple of variables bound by p
+-- where (xs) is the tuple of variables bound by p
dsProcExpr
- :: LPat Id
- -> LHsCmdTop Id
- -> DsM CoreExpr
+ :: LPat Id
+ -> LHsCmdTop Id
+ -> DsM CoreExpr
dsProcExpr pat (L _ (HsCmdTop cmd _unitTy cmd_ty ids)) = do
(meth_binds, meth_ids) <- mkCmdEnv ids
let locals = mkVarSet (collectPatBinders pat)
@@ -297,11 +291,11 @@ dsProcExpr pat (L _ (HsCmdTop cmd _unitTy cmd_ty ids)) = do
Translation of a command judgement of the form
- D; xs |-a c : stk --> t
+ D; xs |-a c : stk --> t
to an expression e such that
- D |- e :: a (xs, stk) t
+ D |- e :: a (xs, stk) t
\begin{code}
dsLCmd :: DsCmdEnv -> IdSet -> Type -> Type -> LHsCmd Id -> [Id]
@@ -309,23 +303,23 @@ dsLCmd :: DsCmdEnv -> IdSet -> Type -> Type -> LHsCmd Id -> [Id]
dsLCmd ids local_vars stk_ty res_ty cmd env_ids
= dsCmd ids local_vars stk_ty res_ty (unLoc cmd) env_ids
-dsCmd :: DsCmdEnv -- arrow combinators
- -> IdSet -- set of local vars available to this command
- -> Type -- type of the stack (right-nested tuple)
- -> Type -- return type of the command
- -> HsCmd Id -- command to desugar
- -> [Id] -- list of vars in the input to this command
- -- This is typically fed back,
- -- so don't pull on it too early
- -> DsM (CoreExpr, -- desugared expression
- IdSet) -- subset of local vars that occur free
+dsCmd :: DsCmdEnv -- arrow combinators
+ -> IdSet -- set of local vars available to this command
+ -> Type -- type of the stack (right-nested tuple)
+ -> Type -- return type of the command
+ -> HsCmd Id -- command to desugar
+ -> [Id] -- list of vars in the input to this command
+ -- This is typically fed back,
+ -- so don't pull on it too early
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet) -- subset of local vars that occur free
-- D |- fun :: a t1 t2
-- D, xs |- arg :: t1
-- -----------------------------
-- D; xs |-a fun -< arg : stk --> t2
--
--- ---> premap (\ ((xs), _stk) -> arg) fun
+-- ---> premap (\ ((xs), _stk) -> arg) fun
dsCmd ids local_vars stack_ty res_ty
(HsCmdArrApp arrow arg arrow_ty HsFirstOrderApp _)
@@ -350,7 +344,7 @@ dsCmd ids local_vars stack_ty res_ty
-- ------------------------------
-- D; xs |-a fun -<< arg : stk --> t2
--
--- ---> premap (\ ((xs), _stk) -> (fun, arg)) app
+-- ---> premap (\ ((xs), _stk) -> (fun, arg)) app
dsCmd ids local_vars stack_ty res_ty
(HsCmdArrApp arrow arg arrow_ty HsHigherOrderApp _)
@@ -358,7 +352,7 @@ dsCmd ids local_vars stack_ty res_ty
let
(a_arg_ty, _res_ty') = tcSplitAppTy arrow_ty
(_a_ty, arg_ty) = tcSplitAppTy a_arg_ty
-
+
core_arrow <- dsLExpr arrow
core_arg <- dsLExpr arg
stack_id <- newSysLocalDs stack_ty
@@ -379,7 +373,7 @@ dsCmd ids local_vars stack_ty res_ty
-- ------------------------
-- D; xs |-a cmd exp : stk --> t'
--
--- ---> premap (\ ((xs),stk) -> ((ys),(e,stk))) cmd
+-- ---> premap (\ ((xs),stk) -> ((ys),(e,stk))) cmd
dsCmd ids local_vars stack_ty res_ty (HsCmdApp cmd arg) env_ids = do
core_arg <- dsLExpr arg
@@ -392,9 +386,9 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdApp cmd arg) env_ids = do
arg_id <- newSysLocalDs arg_ty
-- push the argument expression onto the stack
let
- stack' = mkCorePairExpr (Var arg_id) (Var stack_id)
+ stack' = mkCorePairExpr (Var arg_id) (Var stack_id)
core_body = bindNonRec arg_id core_arg
- (mkCorePairExpr (mkBigCoreVarTup env_ids') stack')
+ (mkCorePairExpr (mkBigCoreVarTup env_ids') stack')
-- match the environment and stack against the input
core_map <- matchEnvStack env_ids stack_id core_body
@@ -411,7 +405,7 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdApp cmd arg) env_ids = do
-- -----------------------------------------------
-- D; xs |-a \ p1 ... pk -> cmd : (t1,...(tk,stk)...) t'
--
--- ---> premap (\ ((xs), (p1, ... (pk,stk)...)) -> ((ys),stk)) cmd
+-- ---> premap (\ ((xs), (p1, ... (pk,stk)...)) -> ((ys),stk)) cmd
dsCmd ids local_vars stack_ty res_ty
(HsCmdLam (MG { mg_alts = [L _ (Match pats _ (GRHSs [L _ (GRHS [] body)] _ ))] }))
@@ -419,7 +413,7 @@ dsCmd ids local_vars stack_ty res_ty
let
pat_vars = mkVarSet (collectPatsBinders pats)
local_vars' = pat_vars `unionVarSet` local_vars
- (pat_tys, stack_ty') = splitTypeAt (length pats) stack_ty
+ (pat_tys, stack_ty') = splitTypeAt (length pats) stack_ty
(core_body, free_vars, env_ids') <- dsfixCmd ids local_vars' stack_ty' res_ty body
param_ids <- mapM newSysLocalDs pat_tys
stack_id' <- newSysLocalDs stack_ty'
@@ -432,7 +426,7 @@ dsCmd ids local_vars stack_ty res_ty
core_expr = buildEnvStack env_ids' stack_id'
in_ty = envStackType env_ids stack_ty
in_ty' = envStackType env_ids' stack_ty'
-
+
fail_expr <- mkFailExpr LambdaExpr in_ty'
-- match the patterns against the parameters
match_code <- matchSimplys (map Var param_ids) LambdaExpr pats core_expr fail_expr
@@ -452,9 +446,9 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdPar cmd) env_ids
-- ----------------------------------------
-- D; xs |-a if e then c1 else c2 : stk --> t
--
--- ---> premap (\ ((xs),stk) ->
--- if e then Left ((xs1),stk) else Right ((xs2),stk))
--- (c1 ||| c2)
+-- ---> premap (\ ((xs),stk) ->
+-- if e then Left ((xs1),stk) else Right ((xs2),stk))
+-- (c1 ||| c2)
dsCmd ids local_vars stack_ty res_ty (HsCmdIf mb_fun cond then_cmd else_cmd)
env_ids = do
@@ -474,11 +468,11 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdIf mb_fun cond then_cmd else_cmd)
else_ty = envStackType else_ids stack_ty
sum_ty = mkTyConApp either_con [then_ty, else_ty]
fvs_cond = exprFreeIds core_cond `intersectVarSet` local_vars
-
+
core_left = mk_left_expr then_ty else_ty (buildEnvStack then_ids stack_id)
core_right = mk_right_expr then_ty else_ty (buildEnvStack else_ids stack_id)
- core_if <- case mb_fun of
+ core_if <- case mb_fun of
Just fun -> do { core_fun <- dsExpr fun
; matchEnvStack env_ids stack_id $
mkCoreApps core_fun [core_cond, core_left, core_right] }
@@ -494,15 +488,15 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdIf mb_fun cond then_cmd else_cmd)
Case commands are treated in much the same way as if commands
(see above) except that there are more alternatives. For example
- case e of { p1 -> c1; p2 -> c2; p3 -> c3 }
+ case e of { p1 -> c1; p2 -> c2; p3 -> c3 }
is translated to
- premap (\ ((xs)*ts) -> case e of
- p1 -> (Left (Left (xs1)*ts))
- p2 -> Left ((Right (xs2)*ts))
- p3 -> Right ((xs3)*ts))
- ((c1 ||| c2) ||| c3)
+ premap (\ ((xs)*ts) -> case e of
+ p1 -> (Left (Left (xs1)*ts))
+ p2 -> Left ((Right (xs2)*ts))
+ p3 -> Right ((xs3)*ts))
+ ((c1 ||| c2) ||| c3)
The idea is to extract the commands from the case, build a balanced tree
of choices, and replace the commands with expressions that build tagged
@@ -517,7 +511,7 @@ case bodies, containing the following fields:
bodies with |||.
\begin{code}
-dsCmd ids local_vars stack_ty res_ty
+dsCmd ids local_vars stack_ty res_ty
(HsCmdCase exp (MG { mg_alts = matches, mg_arg_tys = arg_tys, mg_origin = origin }))
env_ids = do
stack_id <- newSysLocalDs stack_ty
@@ -533,7 +527,7 @@ dsCmd ids local_vars stack_ty res_ty
return ([mkHsEnvStackExpr leaf_ids stack_id],
envStackType leaf_ids stack_ty,
core_leaf)
-
+
branches <- mapM make_branch leaves
either_con <- dsLookupTyCon eitherTyConName
left_con <- dsLookupDataCon leftDataConName
@@ -574,13 +568,13 @@ dsCmd ids local_vars stack_ty res_ty
-- ----------------------------------
-- D; xs |-a let binds in cmd : stk --> t
--
--- ---> premap (\ ((xs),stk) -> let binds in ((ys),stk)) c
+-- ---> premap (\ ((xs),stk) -> let binds in ((ys),stk)) c
dsCmd ids local_vars stack_ty res_ty (HsCmdLet binds body) env_ids = do
let
defined_vars = mkVarSet (collectLocalBinders binds)
local_vars' = defined_vars `unionVarSet` local_vars
-
+
(core_body, _free_vars, env_ids') <- dsfixCmd ids local_vars' stack_ty res_ty body
stack_id <- newSysLocalDs stack_ty
-- build a new environment, plus the stack, using the let bindings
@@ -599,24 +593,24 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdLet binds body) env_ids = do
-- ----------------------------------
-- D; xs |-a do { ss } : () --> t
--
--- ---> premap (\ (env,stk) -> env) c
+-- ---> premap (\ (env,stk) -> env) c
dsCmd ids local_vars stack_ty res_ty (HsCmdDo stmts _) env_ids = do
(core_stmts, env_ids') <- dsCmdDo ids local_vars res_ty stmts env_ids
let env_ty = mkBigCoreVarTupTy env_ids
core_fst <- mkFstExpr env_ty stack_ty
return (do_premap ids
- (mkCorePairTy env_ty stack_ty)
- env_ty
- res_ty
- core_fst
- core_stmts,
- env_ids')
+ (mkCorePairTy env_ty stack_ty)
+ env_ty
+ res_ty
+ core_fst
+ core_stmts,
+ env_ids')
-- D |- e :: forall e. a1 (e,stk1) t1 -> ... an (e,stkn) tn -> a (e,stk) t
-- D; xs |-a ci :: stki --> ti
-- -----------------------------------
--- D; xs |-a (|e c1 ... cn|) :: stk --> t ---> e [t_xs] c1 ... cn
+-- D; xs |-a (|e c1 ... cn|) :: stk --> t ---> e [t_xs] c1 ... cn
dsCmd _ids local_vars _stack_ty _res_ty (HsCmdArrForm op _ args) env_ids = do
let env_ty = mkBigCoreVarTupTy env_ids
@@ -632,16 +626,16 @@ dsCmd ids local_vars stack_ty res_ty (HsCmdCast coercion cmd) env_ids = do
dsCmd _ _ _ _ _ c = pprPanic "dsCmd" (ppr c)
--- D; ys |-a c : stk --> t (ys <= xs)
+-- D; ys |-a c : stk --> t (ys <= xs)
-- ---------------------
--- D; xs |-a c : stk --> t ---> premap (\ ((xs),stk) -> ((ys),stk)) c
+-- D; xs |-a c : stk --> t ---> premap (\ ((xs),stk) -> ((ys),stk)) c
dsTrimCmdArg
- :: IdSet -- set of local vars available to this command
- -> [Id] -- list of vars in the input to this command
- -> LHsCmdTop Id -- command argument to desugar
- -> DsM (CoreExpr, -- desugared expression
- IdSet) -- subset of local vars that occur free
+ :: IdSet -- set of local vars available to this command
+ -> [Id] -- list of vars in the input to this command
+ -> LHsCmdTop Id -- command argument to desugar
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet) -- subset of local vars that occur free
dsTrimCmdArg local_vars env_ids (L _ (HsCmdTop cmd stack_ty cmd_ty ids)) = do
(meth_binds, meth_ids) <- mkCmdEnv ids
(core_cmd, free_vars, env_ids') <- dsfixCmd meth_ids local_vars stack_ty cmd_ty cmd
@@ -658,14 +652,14 @@ dsTrimCmdArg local_vars env_ids (L _ (HsCmdTop cmd stack_ty cmd_ty ids)) = do
-- Typically needs to be prefixed with arr (\(p, stk) -> ((xs),stk))
dsfixCmd
- :: DsCmdEnv -- arrow combinators
- -> IdSet -- set of local vars available to this command
- -> Type -- type of the stack (right-nested tuple)
- -> Type -- return type of the command
- -> LHsCmd Id -- command to desugar
- -> DsM (CoreExpr, -- desugared expression
- IdSet, -- subset of local vars that occur free
- [Id]) -- the same local vars as a list, fed back
+ :: DsCmdEnv -- arrow combinators
+ -> IdSet -- set of local vars available to this command
+ -> Type -- type of the stack (right-nested tuple)
+ -> Type -- return type of the command
+ -> LHsCmd Id -- command to desugar
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet, -- subset of local vars that occur free
+ [Id]) -- the same local vars as a list, fed back
dsfixCmd ids local_vars stk_ty cmd_ty cmd
= trimInput (dsLCmd ids local_vars stk_ty cmd_ty cmd)
@@ -673,12 +667,12 @@ dsfixCmd ids local_vars stk_ty cmd_ty cmd
-- for use as the input tuple of the generated arrow.
trimInput
- :: ([Id] -> DsM (CoreExpr, IdSet))
- -> DsM (CoreExpr, -- desugared expression
- IdSet, -- subset of local vars that occur free
- [Id]) -- same local vars as a list, fed back to
- -- the inner function to form the tuple of
- -- inputs to the arrow.
+ :: ([Id] -> DsM (CoreExpr, IdSet))
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet, -- subset of local vars that occur free
+ [Id]) -- same local vars as a list, fed back to
+ -- the inner function to form the tuple of
+ -- inputs to the arrow.
trimInput build_arrow
= fixDs (\ ~(_,_,env_ids) -> do
(core_cmd, free_vars) <- build_arrow env_ids
@@ -688,19 +682,19 @@ trimInput build_arrow
Translation of command judgements of the form
- D |-a do { ss } : t
+ D |-a do { ss } : t
\begin{code}
-dsCmdDo :: DsCmdEnv -- arrow combinators
- -> IdSet -- set of local vars available to this statement
- -> Type -- return type of the statement
- -> [CmdLStmt Id] -- statements to desugar
- -> [Id] -- list of vars in the input to this statement
- -- This is typically fed back,
- -- so don't pull on it too early
- -> DsM (CoreExpr, -- desugared expression
- IdSet) -- subset of local vars that occur free
+dsCmdDo :: DsCmdEnv -- arrow combinators
+ -> IdSet -- set of local vars available to this statement
+ -> Type -- return type of the statement
+ -> [CmdLStmt Id] -- statements to desugar
+ -> [Id] -- list of vars in the input to this statement
+ -- This is typically fed back,
+ -- so don't pull on it too early
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet) -- subset of local vars that occur free
dsCmdDo _ _ _ [] _ = panic "dsCmdDo"
@@ -708,7 +702,7 @@ dsCmdDo _ _ _ [] _ = panic "dsCmdDo"
-- --------------------------
-- D; xs |-a do { c } : t
--
--- ---> premap (\ (xs) -> ((xs), ())) c
+-- ---> premap (\ (xs) -> ((xs), ())) c
dsCmdDo ids local_vars res_ty [L _ (LastStmt body _)] env_ids = do
(core_body, env_ids') <- dsLCmd ids local_vars unitTy res_ty body env_ids
@@ -717,11 +711,11 @@ dsCmdDo ids local_vars res_ty [L _ (LastStmt body _)] env_ids = do
let core_map = Lam env_var (mkCorePairExpr (Var env_var) mkCoreUnitExpr)
return (do_premap ids
env_ty
- (mkCorePairTy env_ty unitTy)
+ (mkCorePairTy env_ty unitTy)
res_ty
core_map
core_body,
- env_ids')
+ env_ids')
dsCmdDo ids local_vars res_ty (stmt:stmts) env_ids = do
let
@@ -748,50 +742,50 @@ dsCmdLStmt ids local_vars out_ids cmd env_ids
= dsCmdStmt ids local_vars out_ids (unLoc cmd) env_ids
dsCmdStmt
- :: DsCmdEnv -- arrow combinators
- -> IdSet -- set of local vars available to this statement
- -> [Id] -- list of vars in the output of this statement
- -> CmdStmt Id -- statement to desugar
- -> [Id] -- list of vars in the input to this statement
- -- This is typically fed back,
- -- so don't pull on it too early
- -> DsM (CoreExpr, -- desugared expression
- IdSet) -- subset of local vars that occur free
+ :: DsCmdEnv -- arrow combinators
+ -> IdSet -- set of local vars available to this statement
+ -> [Id] -- list of vars in the output of this statement
+ -> CmdStmt Id -- statement to desugar
+ -> [Id] -- list of vars in the input to this statement
+ -- This is typically fed back,
+ -- so don't pull on it too early
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet) -- subset of local vars that occur free
-- D; xs1 |-a c : () --> t
-- D; xs' |-a do { ss } : t'
-- ------------------------------
-- D; xs |-a do { c; ss } : t'
--
--- ---> premap (\ ((xs)) -> (((xs1),()),(xs')))
--- (first c >>> arr snd) >>> ss
+-- ---> premap (\ ((xs)) -> (((xs1),()),(xs')))
+-- (first c >>> arr snd) >>> ss
dsCmdStmt ids local_vars out_ids (BodyStmt cmd _ _ c_ty) env_ids = do
(core_cmd, fv_cmd, env_ids1) <- dsfixCmd ids local_vars unitTy c_ty cmd
core_mux <- matchEnv env_ids
(mkCorePairExpr
- (mkCorePairExpr (mkBigCoreVarTup env_ids1) mkCoreUnitExpr)
- (mkBigCoreVarTup out_ids))
+ (mkCorePairExpr (mkBigCoreVarTup env_ids1) mkCoreUnitExpr)
+ (mkBigCoreVarTup out_ids))
let
- in_ty = mkBigCoreVarTupTy env_ids
- in_ty1 = mkCorePairTy (mkBigCoreVarTupTy env_ids1) unitTy
- out_ty = mkBigCoreVarTupTy out_ids
- before_c_ty = mkCorePairTy in_ty1 out_ty
- after_c_ty = mkCorePairTy c_ty out_ty
+ in_ty = mkBigCoreVarTupTy env_ids
+ in_ty1 = mkCorePairTy (mkBigCoreVarTupTy env_ids1) unitTy
+ out_ty = mkBigCoreVarTupTy out_ids
+ before_c_ty = mkCorePairTy in_ty1 out_ty
+ after_c_ty = mkCorePairTy c_ty out_ty
snd_fn <- mkSndExpr c_ty out_ty
return (do_premap ids in_ty before_c_ty out_ty core_mux $
- do_compose ids before_c_ty after_c_ty out_ty
- (do_first ids in_ty1 c_ty out_ty core_cmd) $
- do_arr ids after_c_ty out_ty snd_fn,
- extendVarSetList fv_cmd out_ids)
+ do_compose ids before_c_ty after_c_ty out_ty
+ (do_first ids in_ty1 c_ty out_ty core_cmd) $
+ do_arr ids after_c_ty out_ty snd_fn,
+ extendVarSetList fv_cmd out_ids)
-- D; xs1 |-a c : () --> t
--- D; xs' |-a do { ss } : t' xs2 = xs' - defs(p)
+-- D; xs' |-a do { ss } : t' xs2 = xs' - defs(p)
-- -----------------------------------
-- D; xs |-a do { p <- c; ss } : t'
--
--- ---> premap (\ (xs) -> (((xs1),()),(xs2)))
--- (first c >>> arr (\ (p, (xs2)) -> (xs'))) >>> ss
+-- ---> premap (\ (xs) -> (((xs1),()),(xs2)))
+-- (first c >>> arr (\ (p, (xs2)) -> (xs'))) >>> ss
--
-- It would be simpler and more consistent to do this using second,
-- but that's likely to be defined in terms of first.
@@ -799,53 +793,53 @@ dsCmdStmt ids local_vars out_ids (BodyStmt cmd _ _ c_ty) env_ids = do
dsCmdStmt ids local_vars out_ids (BindStmt pat cmd _ _) env_ids = do
(core_cmd, fv_cmd, env_ids1) <- dsfixCmd ids local_vars unitTy (hsLPatType pat) cmd
let
- pat_ty = hsLPatType pat
- pat_vars = mkVarSet (collectPatBinders pat)
- env_ids2 = varSetElems (mkVarSet out_ids `minusVarSet` pat_vars)
- env_ty2 = mkBigCoreVarTupTy env_ids2
+ pat_ty = hsLPatType pat
+ pat_vars = mkVarSet (collectPatBinders pat)
+ env_ids2 = varSetElems (mkVarSet out_ids `minusVarSet` pat_vars)
+ env_ty2 = mkBigCoreVarTupTy env_ids2
-- multiplexing function
- -- \ (xs) -> (((xs1),()),(xs2))
+ -- \ (xs) -> (((xs1),()),(xs2))
core_mux <- matchEnv env_ids
(mkCorePairExpr
- (mkCorePairExpr (mkBigCoreVarTup env_ids1) mkCoreUnitExpr)
- (mkBigCoreVarTup env_ids2))
+ (mkCorePairExpr (mkBigCoreVarTup env_ids1) mkCoreUnitExpr)
+ (mkBigCoreVarTup env_ids2))
-- projection function
- -- \ (p, (xs2)) -> (zs)
+ -- \ (p, (xs2)) -> (zs)
env_id <- newSysLocalDs env_ty2
uniqs <- newUniqueSupply
let
- after_c_ty = mkCorePairTy pat_ty env_ty2
- out_ty = mkBigCoreVarTupTy out_ids
- body_expr = coreCaseTuple uniqs env_id env_ids2 (mkBigCoreVarTup out_ids)
-
+ after_c_ty = mkCorePairTy pat_ty env_ty2
+ out_ty = mkBigCoreVarTupTy out_ids
+ body_expr = coreCaseTuple uniqs env_id env_ids2 (mkBigCoreVarTup out_ids)
+
fail_expr <- mkFailExpr (StmtCtxt DoExpr) out_ty
pat_id <- selectSimpleMatchVarL pat
match_code <- matchSimply (Var pat_id) (StmtCtxt DoExpr) pat body_expr fail_expr
pair_id <- newSysLocalDs after_c_ty
let
- proj_expr = Lam pair_id (coreCasePair pair_id pat_id env_id match_code)
+ proj_expr = Lam pair_id (coreCasePair pair_id pat_id env_id match_code)
-- put it all together
let
- in_ty = mkBigCoreVarTupTy env_ids
- in_ty1 = mkCorePairTy (mkBigCoreVarTupTy env_ids1) unitTy
- in_ty2 = mkBigCoreVarTupTy env_ids2
- before_c_ty = mkCorePairTy in_ty1 in_ty2
+ in_ty = mkBigCoreVarTupTy env_ids
+ in_ty1 = mkCorePairTy (mkBigCoreVarTupTy env_ids1) unitTy
+ in_ty2 = mkBigCoreVarTupTy env_ids2
+ before_c_ty = mkCorePairTy in_ty1 in_ty2
return (do_premap ids in_ty before_c_ty out_ty core_mux $
- do_compose ids before_c_ty after_c_ty out_ty
- (do_first ids in_ty1 pat_ty in_ty2 core_cmd) $
- do_arr ids after_c_ty out_ty proj_expr,
- fv_cmd `unionVarSet` (mkVarSet out_ids `minusVarSet` pat_vars))
+ do_compose ids before_c_ty after_c_ty out_ty
+ (do_first ids in_ty1 pat_ty in_ty2 core_cmd) $
+ do_arr ids after_c_ty out_ty proj_expr,
+ fv_cmd `unionVarSet` (mkVarSet out_ids `minusVarSet` pat_vars))
-- D; xs' |-a do { ss } : t
-- --------------------------------------
-- D; xs |-a do { let binds; ss } : t
--
--- ---> arr (\ (xs) -> let binds in (xs')) >>> ss
+-- ---> arr (\ (xs) -> let binds in (xs')) >>> ss
dsCmdStmt ids local_vars out_ids (LetStmt binds) env_ids = do
-- build a new environment using the let bindings
@@ -853,24 +847,24 @@ dsCmdStmt ids local_vars out_ids (LetStmt binds) env_ids = do
-- match the old environment against the input
core_map <- matchEnv env_ids core_binds
return (do_arr ids
- (mkBigCoreVarTupTy env_ids)
- (mkBigCoreVarTupTy out_ids)
- core_map,
- exprFreeIds core_binds `intersectVarSet` local_vars)
+ (mkBigCoreVarTupTy env_ids)
+ (mkBigCoreVarTupTy out_ids)
+ core_map,
+ exprFreeIds core_binds `intersectVarSet` local_vars)
-- D; ys |-a do { ss; returnA -< ((xs1), (ys2)) } : ...
-- D; xs' |-a do { ss' } : t
-- ------------------------------------
-- D; xs |-a do { rec ss; ss' } : t
--
--- xs1 = xs' /\ defs(ss)
--- xs2 = xs' - defs(ss)
--- ys1 = ys - defs(ss)
--- ys2 = ys /\ defs(ss)
+-- xs1 = xs' /\ defs(ss)
+-- xs2 = xs' - defs(ss)
+-- ys1 = ys - defs(ss)
+-- ys2 = ys /\ defs(ss)
--
--- ---> arr (\(xs) -> ((ys1),(xs2))) >>>
--- first (loop (arr (\((ys1),~(ys2)) -> (ys)) >>> ss)) >>>
--- arr (\((xs1),(xs2)) -> (xs')) >>> ss'
+-- ---> arr (\(xs) -> ((ys1),(xs2))) >>>
+-- first (loop (arr (\((ys1),~(ys2)) -> (ys)) >>> ss)) >>>
+-- arr (\((xs1),(xs2)) -> (xs')) >>> ss'
dsCmdStmt ids local_vars out_ids
(RecStmt { recS_stmts = stmts
@@ -925,20 +919,20 @@ dsCmdStmt ids local_vars out_ids
dsCmdStmt _ _ _ _ s = pprPanic "dsCmdStmt" (ppr s)
--- loop (premap (\ ((env1_ids), ~(rec_ids)) -> (env_ids))
--- (ss >>> arr (\ (out_ids) -> ((later_rets),(rec_rets))))) >>>
+-- loop (premap (\ ((env1_ids), ~(rec_ids)) -> (env_ids))
+-- (ss >>> arr (\ (out_ids) -> ((later_rets),(rec_rets))))) >>>
dsRecCmd
- :: DsCmdEnv -- arrow combinators
- -> IdSet -- set of local vars available to this statement
+ :: DsCmdEnv -- arrow combinators
+ -> IdSet -- set of local vars available to this statement
-> [CmdLStmt Id] -- list of statements inside the RecCmd
- -> [Id] -- list of vars defined here and used later
- -> [HsExpr Id] -- expressions corresponding to later_ids
- -> [Id] -- list of vars fed back through the loop
- -> [HsExpr Id] -- expressions corresponding to rec_ids
- -> DsM (CoreExpr, -- desugared statement
- IdSet, -- subset of local vars that occur free
- [Id]) -- same local vars as a list
+ -> [Id] -- list of vars defined here and used later
+ -> [HsExpr Id] -- expressions corresponding to later_ids
+ -> [Id] -- list of vars fed back through the loop
+ -> [HsExpr Id] -- expressions corresponding to rec_ids
+ -> DsM (CoreExpr, -- desugared statement
+ IdSet, -- subset of local vars that occur free
+ [Id]) -- same local vars as a list
dsRecCmd ids local_vars stmts later_ids later_rets rec_ids rec_rets = do
let
@@ -1006,25 +1000,25 @@ two environments (no stack)
\begin{code}
dsfixCmdStmts
- :: DsCmdEnv -- arrow combinators
- -> IdSet -- set of local vars available to this statement
- -> [Id] -- output vars of these statements
- -> [CmdLStmt Id] -- statements to desugar
- -> DsM (CoreExpr, -- desugared expression
- IdSet, -- subset of local vars that occur free
- [Id]) -- same local vars as a list
+ :: DsCmdEnv -- arrow combinators
+ -> IdSet -- set of local vars available to this statement
+ -> [Id] -- output vars of these statements
+ -> [CmdLStmt Id] -- statements to desugar
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet, -- subset of local vars that occur free
+ [Id]) -- same local vars as a list
dsfixCmdStmts ids local_vars out_ids stmts
= trimInput (dsCmdStmts ids local_vars out_ids stmts)
dsCmdStmts
- :: DsCmdEnv -- arrow combinators
- -> IdSet -- set of local vars available to this statement
- -> [Id] -- output vars of these statements
- -> [CmdLStmt Id] -- statements to desugar
- -> [Id] -- list of vars in the input to these statements
- -> DsM (CoreExpr, -- desugared expression
- IdSet) -- subset of local vars that occur free
+ :: DsCmdEnv -- arrow combinators
+ -> IdSet -- set of local vars available to this statement
+ -> [Id] -- output vars of these statements
+ -> [CmdLStmt Id] -- statements to desugar
+ -> [Id] -- list of vars in the input to these statements
+ -> DsM (CoreExpr, -- desugared expression
+ IdSet) -- subset of local vars that occur free
dsCmdStmts ids local_vars out_ids [stmt] env_ids
= dsCmdLStmt ids local_vars out_ids stmt env_ids
@@ -1050,11 +1044,11 @@ Match a list of expressions against a list of patterns, left-to-right.
\begin{code}
matchSimplys :: [CoreExpr] -- Scrutinees
- -> HsMatchContext Name -- Match kind
- -> [LPat Id] -- Patterns they should match
- -> CoreExpr -- Return this if they all match
- -> CoreExpr -- Return this if they don't
- -> DsM CoreExpr
+ -> HsMatchContext Name -- Match kind
+ -> [LPat Id] -- Patterns they should match
+ -> CoreExpr -- Return this if they all match
+ -> CoreExpr -- Return this if they don't
+ -> DsM CoreExpr
matchSimplys [] _ctxt [] result_expr _fail_expr = return result_expr
matchSimplys (exp:exps) ctxt (pat:pats) result_expr fail_expr = do
match_code <- matchSimplys exps ctxt pats result_expr fail_expr
@@ -1068,13 +1062,13 @@ List of leaf expressions, with set of variables bound in each
leavesMatch :: LMatch Id (Located (body Id)) -> [(Located (body Id), IdSet)]
leavesMatch (L _ (Match pats _ (GRHSs grhss binds)))
= let
- defined_vars = mkVarSet (collectPatsBinders pats)
- `unionVarSet`
- mkVarSet (collectLocalBinders binds)
+ defined_vars = mkVarSet (collectPatsBinders pats)
+ `unionVarSet`
+ mkVarSet (collectLocalBinders binds)
in
- [(body,
- mkVarSet (collectLStmtsBinders stmts)
- `unionVarSet` defined_vars)
+ [(body,
+ mkVarSet (collectLStmtsBinders stmts)
+ `unionVarSet` defined_vars)
| L _ (GRHS stmts body) <- grhss]
\end{code}
@@ -1089,7 +1083,7 @@ replaceLeavesMatch
LMatch Id (Located (body' Id))) -- updated match
replaceLeavesMatch _res_ty leaves (L loc (Match pat mt (GRHSs grhss binds)))
= let
- (leaves', grhss') = mapAccumL replaceLeavesGRHS leaves grhss
+ (leaves', grhss') = mapAccumL replaceLeavesGRHS leaves grhss
in
(leaves', L loc (Match pat mt (GRHSs grhss' binds)))
diff --git a/compiler/deSugar/DsBinds.lhs b/compiler/deSugar/DsBinds.lhs
index 37c16325e0..a8d37a4bdd 100644
--- a/compiler/deSugar/DsBinds.lhs
+++ b/compiler/deSugar/DsBinds.lhs
@@ -11,12 +11,6 @@ lower levels it is preserved with @let@/@letrec@s).
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module DsBinds ( dsTopLHsBinds, dsLHsBinds, decomposeRuleLhs, dsSpec,
dsHsWrapper, dsTcEvBinds, dsEvBinds
@@ -24,15 +18,15 @@ module DsBinds ( dsTopLHsBinds, dsLHsBinds, decomposeRuleLhs, dsSpec,
#include "HsVersions.h"
-import {-# SOURCE #-} DsExpr( dsLExpr )
-import {-# SOURCE #-} Match( matchWrapper )
+import {-# SOURCE #-} DsExpr( dsLExpr )
+import {-# SOURCE #-} Match( matchWrapper )
import DsMonad
import DsGRHSs
import DsUtils
-import HsSyn -- lots of things
-import CoreSyn -- lots of things
+import HsSyn -- lots of things
+import CoreSyn -- lots of things
import Literal ( Literal(MachStr) )
import CoreSubst
import OccurAnal ( occurAnalyseExpr )
@@ -54,9 +48,9 @@ import Coercion hiding (substCo)
import TysWiredIn ( eqBoxDataCon, coercibleDataCon, tupleCon )
import Id
import Class
-import DataCon ( dataConWorkId )
+import DataCon ( dataConWorkId )
import Name
-import MkId ( seqId )
+import MkId ( seqId )
import Var
import VarSet
import Rules
@@ -78,9 +72,9 @@ import Control.Monad(liftM)
\end{code}
%************************************************************************
-%* *
+%* *
\subsection[dsMonoBinds]{Desugaring a @MonoBinds@}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -106,17 +100,17 @@ dsHsBind (VarBind { var_id = var, var_rhs = expr, var_inline = inline_regardless
= do { dflags <- getDynFlags
; core_expr <- dsLExpr expr
- -- Dictionary bindings are always VarBinds,
- -- so we only need do this here
+ -- Dictionary bindings are always VarBinds,
+ -- so we only need do this here
; let var' | inline_regardless = var `setIdUnfolding` mkCompulsoryUnfolding core_expr
- | otherwise = var
+ | otherwise = var
; return (unitOL (makeCorePair dflags var' False 0 core_expr)) }
dsHsBind (FunBind { fun_id = L _ fun, fun_matches = matches
, fun_co_fn = co_fn, fun_tick = tick
, fun_infix = inf })
- = do { dflags <- getDynFlags
+ = do { dflags <- getDynFlags
; (args, body) <- matchWrapper (FunRhs (idName fun) inf) matches
; let body' = mkOptTickBox tick body
; rhs <- dsHsWrapper co_fn (mkLams args body')
@@ -125,17 +119,17 @@ dsHsBind (FunBind { fun_id = L _ fun, fun_matches = matches
dsHsBind (PatBind { pat_lhs = pat, pat_rhs = grhss, pat_rhs_ty = ty
, pat_ticks = (rhs_tick, var_ticks) })
- = do { body_expr <- dsGuarded grhss ty
+ = do { body_expr <- dsGuarded grhss ty
; let body' = mkOptTickBox rhs_tick body_expr
; sel_binds <- mkSelectorBinds var_ticks pat body'
- -- We silently ignore inline pragmas; no makeCorePair
- -- Not so cool, but really doesn't matter
+ -- We silently ignore inline pragmas; no makeCorePair
+ -- Not so cool, but really doesn't matter
; return (toOL sel_binds) }
- -- A common case: one exported variable
- -- Non-recursive bindings come through this way
- -- So do self-recursive bindings, and recursive bindings
- -- that have been chopped up with type signatures
+ -- A common case: one exported variable
+ -- Non-recursive bindings come through this way
+ -- So do self-recursive bindings, and recursive bindings
+ -- that have been chopped up with type signatures
dsHsBind (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dicts
, abs_exports = [export]
, abs_ev_binds = ev_binds, abs_binds = binds })
@@ -143,21 +137,21 @@ dsHsBind (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dicts
, abe_mono = local, abe_prags = prags } <- export
= do { dflags <- getDynFlags
; bind_prs <- ds_lhs_binds binds
- ; let core_bind = Rec (fromOL bind_prs)
+ ; let core_bind = Rec (fromOL bind_prs)
; ds_binds <- dsTcEvBinds ev_binds
; rhs <- dsHsWrapper wrap $ -- Usually the identity
- mkLams tyvars $ mkLams dicts $
- mkCoreLets ds_binds $
+ mkLams tyvars $ mkLams dicts $
+ mkCoreLets ds_binds $
Let core_bind $
Var local
-
- ; (spec_binds, rules) <- dsSpecs rhs prags
- ; let global' = addIdSpecialisations global rules
- main_bind = makeCorePair dflags global' (isDefaultMethod prags)
- (dictArity dicts) rhs
-
- ; return (main_bind `consOL` spec_binds) }
+ ; (spec_binds, rules) <- dsSpecs rhs prags
+
+ ; let global' = addIdSpecialisations global rules
+ main_bind = makeCorePair dflags global' (isDefaultMethod prags)
+ (dictArity dicts) rhs
+
+ ; return (main_bind `consOL` spec_binds) }
dsHsBind (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dicts
, abs_exports = exports, abs_ev_binds = ev_binds
@@ -167,39 +161,39 @@ dsHsBind (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dicts
; bind_prs <- ds_lhs_binds binds
; let core_bind = Rec [ makeCorePair dflags (add_inline lcl_id) False 0 rhs
| (lcl_id, rhs) <- fromOL bind_prs ]
- -- Monomorphic recursion possible, hence Rec
+ -- Monomorphic recursion possible, hence Rec
- locals = map abe_mono exports
- tup_expr = mkBigCoreVarTup locals
- tup_ty = exprType tup_expr
+ locals = map abe_mono exports
+ tup_expr = mkBigCoreVarTup locals
+ tup_ty = exprType tup_expr
; ds_binds <- dsTcEvBinds ev_binds
- ; let poly_tup_rhs = mkLams tyvars $ mkLams dicts $
- mkCoreLets ds_binds $
- Let core_bind $
- tup_expr
+ ; let poly_tup_rhs = mkLams tyvars $ mkLams dicts $
+ mkCoreLets ds_binds $
+ Let core_bind $
+ tup_expr
- ; poly_tup_id <- newSysLocalDs (exprType poly_tup_rhs)
+ ; poly_tup_id <- newSysLocalDs (exprType poly_tup_rhs)
- ; let mk_bind (ABE { abe_wrap = wrap, abe_poly = global
+ ; let mk_bind (ABE { abe_wrap = wrap, abe_poly = global
, abe_mono = local, abe_prags = spec_prags })
- = do { tup_id <- newSysLocalDs tup_ty
- ; rhs <- dsHsWrapper wrap $
+ = do { tup_id <- newSysLocalDs tup_ty
+ ; rhs <- dsHsWrapper wrap $
mkLams tyvars $ mkLams dicts $
- mkTupleSelector locals local tup_id $
- mkVarApps (Var poly_tup_id) (tyvars ++ dicts)
+ mkTupleSelector locals local tup_id $
+ mkVarApps (Var poly_tup_id) (tyvars ++ dicts)
; let rhs_for_spec = Let (NonRec poly_tup_id poly_tup_rhs) rhs
- ; (spec_binds, rules) <- dsSpecs rhs_for_spec spec_prags
- ; let global' = (global `setInlinePragma` defaultInlinePragma)
+ ; (spec_binds, rules) <- dsSpecs rhs_for_spec spec_prags
+ ; let global' = (global `setInlinePragma` defaultInlinePragma)
`addIdSpecialisations` rules
-- Kill the INLINE pragma because it applies to
-- the user written (local) function. The global
- -- Id is just the selector. Hmm.
- ; return ((global', rhs) `consOL` spec_binds) }
+ -- Id is just the selector. Hmm.
+ ; return ((global', rhs) `consOL` spec_binds) }
; export_binds_s <- mapM mk_bind exports
- ; return ((poly_tup_id, poly_tup_rhs) `consOL`
- concatOL export_binds_s) }
+ ; return ((poly_tup_id, poly_tup_rhs) `consOL`
+ concatOL export_binds_s) }
where
inline_env :: IdEnv Id -- Maps a monomorphic local Id to one with
-- the inline pragma from the source
@@ -217,14 +211,14 @@ dsHsBind (PatSynBind{}) = panic "dsHsBind: PatSynBind"
------------------------
makeCorePair :: DynFlags -> Id -> Bool -> Arity -> CoreExpr -> (Id, CoreExpr)
makeCorePair dflags gbl_id is_default_method dict_arity rhs
- | is_default_method -- Default methods are *always* inlined
+ | is_default_method -- Default methods are *always* inlined
= (gbl_id `setIdUnfolding` mkCompulsoryUnfolding rhs, rhs)
| otherwise
= case inlinePragmaSpec inline_prag of
- EmptyInlineSpec -> (gbl_id, rhs)
- NoInline -> (gbl_id, rhs)
- Inlinable -> (gbl_id `setIdUnfolding` inlinable_unf, rhs)
+ EmptyInlineSpec -> (gbl_id, rhs)
+ NoInline -> (gbl_id, rhs)
+ Inlinable -> (gbl_id `setIdUnfolding` inlinable_unf, rhs)
Inline -> inline_pair
where
@@ -232,8 +226,8 @@ makeCorePair dflags gbl_id is_default_method dict_arity rhs
inlinable_unf = mkInlinableUnfolding dflags rhs
inline_pair
| Just arity <- inlinePragmaSat inline_prag
- -- Add an Unfolding for an INLINE (but not for NOINLINE)
- -- And eta-expand the RHS; see Note [Eta-expanding INLINE things]
+ -- Add an Unfolding for an INLINE (but not for NOINLINE)
+ -- And eta-expand the RHS; see Note [Eta-expanding INLINE things]
, let real_arity = dict_arity + arity
-- NB: The arity in the InlineRule takes account of the dictionaries
= ( gbl_id `setIdUnfolding` mkInlineUnfolding (Just real_arity) rhs
@@ -264,22 +258,22 @@ Note [Rules and inlining]
Common special case: no type or dictionary abstraction
This is a bit less trivial than you might suppose
The naive way woudl be to desguar to something like
- f_lcl = ...f_lcl... -- The "binds" from AbsBinds
- M.f = f_lcl -- Generated from "exports"
+ f_lcl = ...f_lcl... -- The "binds" from AbsBinds
+ M.f = f_lcl -- Generated from "exports"
But we don't want that, because if M.f isn't exported,
-it'll be inlined unconditionally at every call site (its rhs is
-trivial). That would be ok unless it has RULES, which would
+it'll be inlined unconditionally at every call site (its rhs is
+trivial). That would be ok unless it has RULES, which would
thereby be completely lost. Bad, bad, bad.
Instead we want to generate
- M.f = ...f_lcl...
- f_lcl = M.f
-Now all is cool. The RULES are attached to M.f (by SimplCore),
+ M.f = ...f_lcl...
+ f_lcl = M.f
+Now all is cool. The RULES are attached to M.f (by SimplCore),
and f_lcl is rapidly inlined away.
This does not happen in the same way to polymorphic binds,
because they desugar to
- M.f = /\a. let f_lcl = ...f_lcl... in f_lcl
+ M.f = /\a. let f_lcl = ...f_lcl... in f_lcl
Although I'm a bit worried about whether full laziness might
float the f_lcl binding out and then inline M.f at its call site
@@ -297,7 +291,7 @@ So the overloading is in the nested AbsBinds. A good example is in GHC.Float:
instance RealFrac Float where
{-# SPECIALIZE round :: Float -> Int #-}
-The top-level AbsBinds for $cround has no tyvars or dicts (because the
+The top-level AbsBinds for $cround has no tyvars or dicts (because the
instance does not). But the method is locally overloaded!
Note [Abstracting over tyvars only]
@@ -305,36 +299,36 @@ Note [Abstracting over tyvars only]
When abstracting over type variable only (not dictionaries), we don't really need to
built a tuple and select from it, as we do in the general case. Instead we can take
- AbsBinds [a,b] [ ([a,b], fg, fl, _),
- ([b], gg, gl, _) ]
- { fl = e1
- gl = e2
- h = e3 }
+ AbsBinds [a,b] [ ([a,b], fg, fl, _),
+ ([b], gg, gl, _) ]
+ { fl = e1
+ gl = e2
+ h = e3 }
and desugar it to
- fg = /\ab. let B in e1
- gg = /\b. let a = () in let B in S(e2)
- h = /\ab. let B in e3
+ fg = /\ab. let B in e1
+ gg = /\b. let a = () in let B in S(e2)
+ h = /\ab. let B in e3
where B is the *non-recursive* binding
- fl = fg a b
- gl = gg b
- h = h a b -- See (b); note shadowing!
+ fl = fg a b
+ gl = gg b
+ h = h a b -- See (b); note shadowing!
Notice (a) g has a different number of type variables to f, so we must
- use the mkArbitraryType thing to fill in the gaps.
- We use a type-let to do that.
+ use the mkArbitraryType thing to fill in the gaps.
+ We use a type-let to do that.
- (b) The local variable h isn't in the exports, and rather than
- clone a fresh copy we simply replace h by (h a b), where
- the two h's have different types! Shadowing happens here,
- which looks confusing but works fine.
+ (b) The local variable h isn't in the exports, and rather than
+ clone a fresh copy we simply replace h by (h a b), where
+ the two h's have different types! Shadowing happens here,
+ which looks confusing but works fine.
- (c) The result is *still* quadratic-sized if there are a lot of
- small bindings. So if there are more than some small
- number (10), we filter the binding set B by the free
- variables of the particular RHS. Tiresome.
+ (c) The result is *still* quadratic-sized if there are a lot of
+ small bindings. So if there are more than some small
+ number (10), we filter the binding set B by the free
+ variables of the particular RHS. Tiresome.
Why got to this trouble? It's a common case, and it removes the
quadratic-sized tuple desugaring. Less clutter, hopefullly faster
@@ -350,13 +344,13 @@ Consider
foo x = ...
If (foo d) ever gets floated out as a common sub-expression (which can
-happen as a result of method sharing), there's a danger that we never
+happen as a result of method sharing), there's a danger that we never
get to do the inlining, which is a Terribly Bad thing given that the
user said "inline"!
To avoid this we pre-emptively eta-expand the definition, so that foo
has the arity with which it is declared in the source code. In this
-example it has arity 2 (one for the Eq and one for x). Doing this
+example it has arity 2 (one for the Eq and one for x). Doing this
should mean that (foo d) is a PAP and we don't share it.
Note [Nested arities]
@@ -379,8 +373,8 @@ thought!
Note [Implementing SPECIALISE pragmas]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Example:
- f :: (Eq a, Ix b) => a -> b -> Bool
- {-# SPECIALISE f :: (Ix p, Ix q) => Int -> (p,q) -> Bool #-}
+ f :: (Eq a, Ix b) => a -> b -> Bool
+ {-# SPECIALISE f :: (Ix p, Ix q) => Int -> (p,q) -> Bool #-}
f = <poly_rhs>
From this the typechecker generates
@@ -390,7 +384,7 @@ From this the typechecker generates
SpecPrag (wrap_fn :: forall a b. (Eq a, Ix b) => XXX
-> forall p q. (Ix p, Ix q) => XXX[ Int/a, (p,q)/b ])
-Note that wrap_fn can transform *any* function with the right type prefix
+Note that wrap_fn can transform *any* function with the right type prefix
forall ab. (Eq a, Ix b) => XXX
regardless of XXX. It's sort of polymorphic in XXX. This is
useful: we use the same wrapper to transform each of the class ops, as
@@ -398,26 +392,26 @@ well as the dict.
From these we generate:
- Rule: forall p, q, (dp:Ix p), (dq:Ix q).
+ Rule: forall p, q, (dp:Ix p), (dq:Ix q).
f Int (p,q) dInt ($dfInPair dp dq) = f_spec p q dp dq
- Spec bind: f_spec = wrap_fn <poly_rhs>
+ Spec bind: f_spec = wrap_fn <poly_rhs>
-Note that
+Note that
* The LHS of the rule may mention dictionary *expressions* (eg
$dfIxPair dp dq), and that is essential because the dp, dq are
needed on the RHS.
- * The RHS of f_spec, <poly_rhs> has a *copy* of 'binds', so that it
+ * The RHS of f_spec, <poly_rhs> has a *copy* of 'binds', so that it
can fully specialise it.
\begin{code}
------------------------
dsSpecs :: CoreExpr -- Its rhs
-> TcSpecPrags
- -> DsM ( OrdList (Id,CoreExpr) -- Binding for specialised Ids
- , [CoreRule] ) -- Rules for the Global Ids
+ -> DsM ( OrdList (Id,CoreExpr) -- Binding for specialised Ids
+ , [CoreRule] ) -- Rules for the Global Ids
-- See Note [Implementing SPECIALISE pragmas]
dsSpecs _ IsDefaultMethod = return (nilOL, [])
dsSpecs poly_rhs (SpecPrags sps)
@@ -425,29 +419,29 @@ dsSpecs poly_rhs (SpecPrags sps)
; let (spec_binds_s, rules) = unzip pairs
; return (concatOL spec_binds_s, rules) }
-dsSpec :: Maybe CoreExpr -- Just rhs => RULE is for a local binding
- -- Nothing => RULE is for an imported Id
- -- rhs is in the Id's unfolding
+dsSpec :: Maybe CoreExpr -- Just rhs => RULE is for a local binding
+ -- Nothing => RULE is for an imported Id
+ -- rhs is in the Id's unfolding
-> Located TcSpecPrag
-> DsM (Maybe (OrdList (Id,CoreExpr), CoreRule))
dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl))
| isJust (isClassOpId_maybe poly_id)
- = putSrcSpanDs loc $
- do { warnDs (ptext (sLit "Ignoring useless SPECIALISE pragma for class method selector")
+ = putSrcSpanDs loc $
+ do { warnDs (ptext (sLit "Ignoring useless SPECIALISE pragma for class method selector")
<+> quotes (ppr poly_id))
; return Nothing } -- There is no point in trying to specialise a class op
- -- Moreover, classops don't (currently) have an inl_sat arity set
- -- (it would be Just 0) and that in turn makes makeCorePair bleat
+ -- Moreover, classops don't (currently) have an inl_sat arity set
+ -- (it would be Just 0) and that in turn makes makeCorePair bleat
- | no_act_spec && isNeverActive rule_act
- = putSrcSpanDs loc $
+ | no_act_spec && isNeverActive rule_act
+ = putSrcSpanDs loc $
do { warnDs (ptext (sLit "Ignoring useless SPECIALISE pragma for NOINLINE function:")
<+> quotes (ppr poly_id))
; return Nothing } -- Function is NOINLINE, and the specialiation inherits that
- -- See Note [Activation pragmas for SPECIALISE]
+ -- See Note [Activation pragmas for SPECIALISE]
| otherwise
- = putSrcSpanDs loc $
+ = putSrcSpanDs loc $
do { uniq <- newUnique
; let poly_name = idName poly_id
spec_occ = mkSpecOcc (getOccName poly_name)
@@ -467,14 +461,14 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl))
unf_fvs = stableUnfoldingVars fn_unf `orElse` emptyVarSet
in_scope = mkInScopeSet (unf_fvs `unionVarSet` exprsFreeVars args)
spec_unf = specUnfolding dflags (mkEmptySubst in_scope) bndrs args fn_unf
- spec_id = mkLocalId spec_name spec_ty
- `setInlinePragma` inl_prag
- `setIdUnfolding` spec_unf
+ spec_id = mkLocalId spec_name spec_ty
+ `setInlinePragma` inl_prag
+ `setIdUnfolding` spec_unf
rule = mkRule False {- Not auto -} is_local_id
(mkFastString ("SPEC " ++ showPpr dflags poly_name))
- rule_act poly_name
- rule_bndrs args
- (mkVarApps (Var spec_id) bndrs)
+ rule_act poly_name
+ rule_bndrs args
+ (mkVarApps (Var spec_id) bndrs)
; spec_rhs <- dsHsWrapper spec_co poly_rhs
@@ -489,21 +483,21 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl))
where
is_local_id = isJust mb_poly_rhs
poly_rhs | Just rhs <- mb_poly_rhs
- = rhs -- Local Id; this is its rhs
+ = rhs -- Local Id; this is its rhs
| Just unfolding <- maybeUnfoldingTemplate (realIdUnfolding poly_id)
= unfolding -- Imported Id; this is its unfolding
- -- Use realIdUnfolding so we get the unfolding
- -- even when it is a loop breaker.
- -- We want to specialise recursive functions!
+ -- Use realIdUnfolding so we get the unfolding
+ -- even when it is a loop breaker.
+ -- We want to specialise recursive functions!
| otherwise = pprPanic "dsImpSpecs" (ppr poly_id)
- -- The type checker has checked that it *has* an unfolding
+ -- The type checker has checked that it *has* an unfolding
id_inl = idInlinePragma poly_id
-- See Note [Activation pragmas for SPECIALISE]
inl_prag | not (isDefaultInlinePragma spec_inl) = spec_inl
| not is_local_id -- See Note [Specialising imported functions]
- -- in OccurAnal
+ -- in OccurAnal
, isStrongLoopBreaker (idOccInfo poly_id) = neverInlinePragma
| otherwise = id_inl
-- Get the INLINE pragma from SPECIALISE declaration, or,
@@ -522,7 +516,7 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl))
specOnInline :: Name -> MsgDoc
-specOnInline f = ptext (sLit "SPECIALISE pragma on INLINE function probably won't fire:")
+specOnInline f = ptext (sLit "SPECIALISE pragma on INLINE function probably won't fire:")
<+> quotes (ppr f)
\end{code}
@@ -535,7 +529,7 @@ From a user SPECIALISE pragma for f, we generate
We need two pragma-like things:
-* spec_fn's inline pragma: inherited from f's inline pragma (ignoring
+* spec_fn's inline pragma: inherited from f's inline pragma (ignoring
activation on SPEC), unless overriden by SPEC INLINE
* Activation of RULE: from SPECIALISE pragma (if activation given)
@@ -557,7 +551,7 @@ SPEC [n] f :: ty [n] NOINLINE [k]
copy f's prag
INLINE [k] f
-SPEC [n] f :: ty [n] INLINE [k]
+SPEC [n] f :: ty [n] INLINE [k]
copy f's prag
SPEC INLINE [n] f :: ty [n] INLINE [n]
@@ -569,9 +563,9 @@ SPEC f :: ty [n] INLINE [k]
%************************************************************************
-%* *
+%* *
\subsection{Adding inline pragmas}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -598,11 +592,11 @@ decomposeRuleLhs orig_bndrs orig_lhs
Right (bndrs1, fn_var, args)
| Case scrut bndr ty [(DEFAULT, _, body)] <- fun
- , isDeadBinder bndr -- Note [Matching seqId]
+ , isDeadBinder bndr -- Note [Matching seqId]
, let args' = [Type (idType bndr), Type ty, scrut, body]
= Right (bndrs1, seqId, args' ++ args)
- | otherwise
+ | otherwise
= Left bad_shape_msg
where
lhs1 = drop_dicts orig_lhs
@@ -623,7 +617,7 @@ decomposeRuleLhs orig_bndrs orig_lhs
2 (vcat [ text "Optimised lhs:" <+> ppr lhs2
, text "Orig lhs:" <+> ppr orig_lhs])
dead_msg bndr = hang (sep [ ptext (sLit "Forall'd") <+> pp_bndr bndr
- , ptext (sLit "is not bound in RULE lhs")])
+ , ptext (sLit "is not bound in RULE lhs")])
2 (vcat [ text "Orig bndrs:" <+> ppr orig_bndrs
, text "Orig lhs:" <+> ppr orig_lhs
, text "optimised lhs:" <+> ppr lhs2 ])
@@ -633,12 +627,12 @@ decomposeRuleLhs orig_bndrs orig_lhs
| otherwise = ptext (sLit "variable") <+> quotes (ppr bndr)
drop_dicts :: CoreExpr -> CoreExpr
- drop_dicts e
+ drop_dicts e
= wrap_lets needed bnds body
where
needed = orig_bndr_set `minusVarSet` exprFreeVars body
(bnds, body) = split_lets (occurAnalyseExpr e)
- -- The occurAnalyseExpr drops dead bindings which is
+ -- The occurAnalyseExpr drops dead bindings which is
-- crucial to ensure that every binding is used later;
-- which in turn makes wrap_lets work right
@@ -663,22 +657,22 @@ decomposeRuleLhs orig_bndrs orig_lhs
Note [Decomposing the left-hand side of a RULE]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There are several things going on here.
+There are several things going on here.
* drop_dicts: see Note [Drop dictionary bindings on rule LHS]
* simpleOptExpr: see Note [Simplify rule LHS]
* extra_dict_bndrs: see Note [Free dictionaries]
Note [Drop dictionary bindings on rule LHS]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-drop_dicts drops dictionary bindings on the LHS where possible.
+drop_dicts drops dictionary bindings on the LHS where possible.
E.g. let d:Eq [Int] = $fEqList $fEqInt in f d
--> f d
- Reasoning here is that there is only one d:Eq [Int], and so we can
+ Reasoning here is that there is only one d:Eq [Int], and so we can
quantify over it. That makes 'd' free in the LHS, but that is later
picked up by extra_dict_bndrs (Note [Dead spec binders]).
NB 1: We can only drop the binding if the RHS doesn't bind
- one of the orig_bndrs, which we assume occur on RHS.
+ one of the orig_bndrs, which we assume occur on RHS.
Example
f :: (Eq a) => b -> a -> a
{-# SPECIALISE f :: Eq a => b -> [a] -> [a] #-}
@@ -687,7 +681,7 @@ drop_dicts drops dictionary bindings on the LHS where possible.
Of course, the ($dfEqlist d) in the pattern makes it less likely
to match, but ther is no other way to get d:Eq a
- NB 2: We do drop_dicts *before* simplOptEpxr, so that we expect all
+ NB 2: We do drop_dicts *before* simplOptEpxr, so that we expect all
the evidence bindings to be wrapped around the outside of the
LHS. (After simplOptExpr they'll usually have been inlined.)
dsHsWrapper does dependency analysis, so that civilised ones
@@ -728,39 +722,39 @@ Note [Simplify rule LHS]
~~~~~~~~~~~~~~~~~~~~~~~~
simplOptExpr occurrence-analyses and simplifies the LHS:
- (a) Inline any remaining dictionary bindings (which hopefully
+ (a) Inline any remaining dictionary bindings (which hopefully
occur just once)
(b) Substitute trivial lets so that they don't get in the way
- Note that we substitute the function too; we might
+ Note that we substitute the function too; we might
have this as a LHS: let f71 = M.f Int in f71
- (c) Do eta reduction. To see why, consider the fold/build rule,
+ (c) Do eta reduction. To see why, consider the fold/build rule,
which without simplification looked like:
fold k z (build (/\a. g a)) ==> ...
This doesn't match unless you do eta reduction on the build argument.
Similarly for a LHS like
- augment g (build h)
+ augment g (build h)
we do not want to get
- augment (\a. g a) (build h)
+ augment (\a. g a) (build h)
otherwise we don't match when given an argument like
augment (\a. h a a) (build h)
Note [Matching seqId]
~~~~~~~~~~~~~~~~~~~
The desugarer turns (seq e r) into (case e of _ -> r), via a special-case hack
-and this code turns it back into an application of seq!
+and this code turns it back into an application of seq!
See Note [Rules for seq] in MkId for the details.
Note [Unused spec binders]
~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
- f :: a -> a
- {-# SPECIALISE f :: Eq a => a -> a #-}
+ f :: a -> a
+ {-# SPECIALISE f :: Eq a => a -> a #-}
It's true that this *is* a more specialised type, but the rule
we get is something like this:
- f_spec d = f
- RULE: f = f_spec d
+ f_spec d = f
+ RULE: f = f_spec d
Note that the rule is bogus, because it mentions a 'd' that is
not bound on the LHS! But it's a silly specialisation anyway, because
the constraint is unused. We could bind 'd' to (error "unused")
@@ -769,22 +763,22 @@ a mistake. That's what the isDeadBinder call detects.
Note [Free dictionaries]
~~~~~~~~~~~~~~~~~~~~~~~~
-When the LHS of a specialisation rule, (/\as\ds. f es) has a free dict,
-which is presumably in scope at the function definition site, we can quantify
+When the LHS of a specialisation rule, (/\as\ds. f es) has a free dict,
+which is presumably in scope at the function definition site, we can quantify
over it too. *Any* dict with that type will do.
So for example when you have
- f :: Eq a => a -> a
- f = <rhs>
- {-# SPECIALISE f :: Int -> Int #-}
+ f :: Eq a => a -> a
+ f = <rhs>
+ {-# SPECIALISE f :: Int -> Int #-}
Then we get the SpecPrag
- SpecPrag (f Int dInt)
+ SpecPrag (f Int dInt)
And from that we want the rule
-
- RULE forall dInt. f Int dInt = f_spec
- f_spec = let f = <rhs> in f Int dInt
+
+ RULE forall dInt. f Int dInt = f_spec
+ f_spec = let f = <rhs> in f Int dInt
But be careful! That dInt might be GHC.Base.$fOrdInt, which is an External
Name, and you can't bind them in a lambda or forall without getting things
@@ -794,23 +788,23 @@ as the old one, but with an Internal name and no IdInfo.
%************************************************************************
-%* *
- Desugaring evidence
-%* *
+%* *
+ Desugaring evidence
+%* *
%************************************************************************
\begin{code}
dsHsWrapper :: HsWrapper -> CoreExpr -> DsM CoreExpr
-dsHsWrapper WpHole e = return e
+dsHsWrapper WpHole e = return e
dsHsWrapper (WpTyApp ty) e = return $ App e (Type ty)
dsHsWrapper (WpLet ev_binds) e = do bs <- dsTcEvBinds ev_binds
return (mkCoreLets bs e)
dsHsWrapper (WpCompose c1 c2) e = dsHsWrapper c1 =<< dsHsWrapper c2 e
dsHsWrapper (WpCast co) e = ASSERT(tcCoercionRole co == Representational)
dsTcCoercion co (mkCast e)
-dsHsWrapper (WpEvLam ev) e = return $ Lam ev e
-dsHsWrapper (WpTyLam tv) e = return $ Lam tv e
+dsHsWrapper (WpEvLam ev) e = return $ Lam ev e
+dsHsWrapper (WpTyLam tv) e = return $ Lam tv e
dsHsWrapper (WpEvApp evtrm) e = liftM (App e) (dsEvTerm evtrm)
--------------------------------------
@@ -830,7 +824,7 @@ sccEvBinds :: Bag EvBind -> [SCC EvBind]
sccEvBinds bs = stronglyConnCompFromEdgedVertices edges
where
edges :: [(EvBind, EvVar, [EvVar])]
- edges = foldrBag ((:) . mk_node) [] bs
+ edges = foldrBag ((:) . mk_node) [] bs
mk_node :: EvBind -> (EvBind, EvVar, [EvVar])
mk_node b@(EvBind var term) = (b, var, varSetElems (evVarsOfTerm term))
@@ -840,7 +834,7 @@ sccEvBinds bs = stronglyConnCompFromEdgedVertices edges
dsEvTerm :: EvTerm -> DsM CoreExpr
dsEvTerm (EvId v) = return (Var v)
-dsEvTerm (EvCast tm co)
+dsEvTerm (EvCast tm co)
= do { tm' <- dsEvTerm tm
; dsTcCoercion co $ mkCast tm' }
-- 'v' is always a lifted evidence variable so it is
@@ -856,29 +850,29 @@ dsEvTerm (EvTupleSel v n)
= do { tm' <- dsEvTerm v
; let scrut_ty = exprType tm'
(tc, tys) = splitTyConApp scrut_ty
- Just [dc] = tyConDataCons_maybe tc
- xs = mkTemplateLocals tys
+ Just [dc] = tyConDataCons_maybe tc
+ xs = mkTemplateLocals tys
the_x = getNth xs n
; ASSERT( isTupleTyCon tc )
return $
Case tm' (mkWildValBinder scrut_ty) (idType the_x) [(DataAlt dc, xs, Var the_x)] }
-dsEvTerm (EvTupleMk tms)
+dsEvTerm (EvTupleMk tms)
= do { tms' <- mapM dsEvTerm tms
; let tys = map exprType tms'
; return $ Var (dataConWorkId dc) `mkTyApps` tys `mkApps` tms' }
- where
+ where
dc = tupleCon ConstraintTuple (length tms)
dsEvTerm (EvSuperClass d n)
= do { d' <- dsEvTerm d
; let (cls, tys) = getClassPredTys (exprType d')
- sc_sel_id = classSCSelId cls n -- Zero-indexed
+ sc_sel_id = classSCSelId cls n -- Zero-indexed
; return $ Var sc_sel_id `mkTyApps` tys `App` d' }
where
dsEvTerm (EvDelayedError ty msg) = return $ Var errorId `mkTyApps` [ty] `mkApps` [litMsg]
- where
+ where
errorId = rUNTIME_ERROR_ID
litMsg = Lit (MachStr (fastStringToByteString msg))
@@ -889,7 +883,7 @@ dsEvTerm (EvLit l) =
---------------------------------------
dsTcCoercion :: TcCoercion -> (Coercion -> CoreExpr) -> DsM CoreExpr
--- This is the crucial function that moves
+-- This is the crucial function that moves
-- from TcCoercions to Coercions; see Note [TcCoercions] in Coercion
-- e.g. dsTcCoercion (trans g1 g2) k
-- = case g1 of EqBox g1# ->
@@ -927,7 +921,7 @@ ds_tc_coercion :: CvSubst -> TcCoercion -> Coercion
-- If the incoming TcCoercion if of type (a ~ b) (resp. Coercible a b)
-- the result is of type (a ~# b) (reps. a ~# b)
-- The VarEnv maps EqVars of type (a ~ b) to Coercions of type (a ~# b) (resp. and so on)
--- No need for InScope set etc because the
+-- No need for InScope set etc because the
ds_tc_coercion subst tc_co
= go tc_co
where
@@ -978,7 +972,7 @@ Note [Simple coercions]
We have a special case for coercions that are simple variables.
Suppose cv :: a ~ b is in scope
Lacking the special case, if we see
- f a b cv
+ f a b cv
we'd desguar to
f a b (case cv of EqBox (cv# :: a ~# b) -> EqBox cv#)
which is a bit stupid. The special case does the obvious thing.
@@ -990,7 +984,7 @@ This turns out to be important when desugaring the LHS of a RULE
{-# RULES "normalise" normalise = normalise_Double #-}
Then the RULE we want looks like
- forall a, (cv:a~Scalar a).
+ forall a, (cv:a~Scalar a).
normalise a cv = normalise_Double
But without the special case we generate the redundant box/unbox,
which simpleOpt (currently) doesn't remove. So the rule never matches.
diff --git a/compiler/deSugar/DsUtils.lhs b/compiler/deSugar/DsUtils.lhs
index c52b917efd..a269374bed 100644
--- a/compiler/deSugar/DsUtils.lhs
+++ b/compiler/deSugar/DsUtils.lhs
@@ -9,28 +9,22 @@ This module exports some utility functions of no great interest.
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-- | Utility functions for constructing Core syntax, principally for desugaring
module DsUtils (
- EquationInfo(..),
- firstPat, shiftEqns,
+ EquationInfo(..),
+ firstPat, shiftEqns,
- MatchResult(..), CanItFail(..), CaseAlt(..),
- cantFailMatchResult, alwaysFailMatchResult,
- extractMatchResult, combineMatchResults,
- adjustMatchResult, adjustMatchResultDs,
- mkCoLetMatchResult, mkViewMatchResult, mkGuardedMatchResult,
- matchCanFail, mkEvalMatchResult,
- mkCoPrimCaseMatchResult, mkCoAlgCaseMatchResult, mkCoSynCaseMatchResult,
- wrapBind, wrapBinds,
+ MatchResult(..), CanItFail(..), CaseAlt(..),
+ cantFailMatchResult, alwaysFailMatchResult,
+ extractMatchResult, combineMatchResults,
+ adjustMatchResult, adjustMatchResultDs,
+ mkCoLetMatchResult, mkViewMatchResult, mkGuardedMatchResult,
+ matchCanFail, mkEvalMatchResult,
+ mkCoPrimCaseMatchResult, mkCoAlgCaseMatchResult, mkCoSynCaseMatchResult,
+ wrapBind, wrapBinds,
- mkErrorAppDs, mkCoreAppDs, mkCoreAppsDs,
+ mkErrorAppDs, mkCoreAppDs, mkCoreAppsDs,
seqVar,
@@ -40,13 +34,13 @@ module DsUtils (
mkSelectorBinds,
- selectSimpleMatchVarL, selectMatchVars, selectMatchVar,
+ selectSimpleMatchVarL, selectMatchVars, selectMatchVar,
mkOptTickBox, mkBinaryTickBox
) where
#include "HsVersions.h"
-import {-# SOURCE #-} Match ( matchSimply )
+import {-# SOURCE #-} Match ( matchSimply )
import HsSyn
import TcHsSyn
@@ -85,9 +79,9 @@ import Control.Monad ( zipWithM )
%************************************************************************
-%* *
+%* *
\subsection{ Selecting match variables}
-%* *
+%* *
%************************************************************************
We're about to match against some patterns. We want to make some
@@ -105,13 +99,13 @@ selectSimpleMatchVarL pat = selectMatchVar (unLoc pat)
--
-- OLD, but interesting note:
-- But even if it is a variable, its type might not match. Consider
--- data T a where
--- T1 :: Int -> T Int
--- T2 :: a -> T a
+-- data T a where
+-- T1 :: Int -> T Int
+-- T2 :: a -> T a
--
--- f :: T a -> a -> Int
--- f (T1 i) (x::Int) = x
--- f (T2 i) (y::a) = 0
+-- f :: T a -> a -> Int
+-- f (T1 i) (x::Int) = x
+-- f (T2 i) (y::a) = 0
-- Then we must not choose (x::Int) as the matching variable!
-- And nowadays we won't, because the (x::Int) will be wrapped in a CoPat
@@ -125,7 +119,7 @@ selectMatchVar (ParPat pat) = selectMatchVar (unLoc pat)
selectMatchVar (VarPat var) = return (localiseId var) -- Note [Localise pattern binders]
selectMatchVar (AsPat var _) = return (unLoc var)
selectMatchVar other_pat = newSysLocalDs (hsPatType other_pat)
- -- OK, better make up one...
+ -- OK, better make up one...
\end{code}
Note [Localise pattern binders]
@@ -147,7 +141,7 @@ different *unique* by then (the simplifier is good about maintaining
proper scoping), but it's BAD to have two top-level bindings with the
External Name M.a, because that turns into two linker symbols for M.a.
It's quite rare for this to actually *happen* -- the only case I know
-of is tc003 compiled with the 'hpc' way -- but that only makes it
+of is tc003 compiled with the 'hpc' way -- but that only makes it
all the more annoying.
To avoid this, we craftily call 'localiseId' in the desugarer, which
@@ -167,9 +161,9 @@ the desugaring pass.
%************************************************************************
-%* *
-%* type synonym EquationInfo and access functions for its pieces *
-%* *
+%* *
+%* type synonym EquationInfo and access functions for its pieces *
+%* *
%************************************************************************
\subsection[EquationInfo-synonym]{@EquationInfo@: a useful synonym}
@@ -234,13 +228,13 @@ wrapBinds [] e = e
wrapBinds ((new,old):prs) e = wrapBind new old (wrapBinds prs e)
wrapBind :: Var -> Var -> CoreExpr -> CoreExpr
-wrapBind new old body -- NB: this function must deal with term
- | new==old = body -- variables, type variables or coercion variables
+wrapBind new old body -- NB: this function must deal with term
+ | new==old = body -- variables, type variables or coercion variables
| otherwise = Let (NonRec new (varToCoreExpr old)) body
seqVar :: Var -> CoreExpr -> CoreExpr
seqVar var body = Case (Var var) var (exprType body)
- [(DEFAULT, [], body)]
+ [(DEFAULT, [], body)]
mkCoLetMatchResult :: CoreBind -> MatchResult -> MatchResult
mkCoLetMatchResult bind = adjustMatchResult (mkCoreLet bind)
@@ -248,22 +242,22 @@ mkCoLetMatchResult bind = adjustMatchResult (mkCoreLet bind)
-- (mkViewMatchResult var' viewExpr var mr) makes the expression
-- let var' = viewExpr var in mr
mkViewMatchResult :: Id -> CoreExpr -> Id -> MatchResult -> MatchResult
-mkViewMatchResult var' viewExpr var =
+mkViewMatchResult var' viewExpr var =
adjustMatchResult (mkCoreLet (NonRec var' (mkCoreAppDs viewExpr (Var var))))
mkEvalMatchResult :: Id -> Type -> MatchResult -> MatchResult
mkEvalMatchResult var ty
- = adjustMatchResult (\e -> Case (Var var) var ty [(DEFAULT, [], e)])
+ = adjustMatchResult (\e -> Case (Var var) var ty [(DEFAULT, [], e)])
mkGuardedMatchResult :: CoreExpr -> MatchResult -> MatchResult
mkGuardedMatchResult pred_expr (MatchResult _ body_fn)
= MatchResult CanFail (\fail -> do body <- body_fn fail
return (mkIfThenElse pred_expr body fail))
-mkCoPrimCaseMatchResult :: Id -- Scrutinee
+mkCoPrimCaseMatchResult :: Id -- Scrutinee
-> Type -- Type of the case
- -> [(Literal, MatchResult)] -- Alternatives
- -> MatchResult -- Literals are all unlifted
+ -> [(Literal, MatchResult)] -- Alternatives
+ -> MatchResult -- Literals are all unlifted
mkCoPrimCaseMatchResult var ty match_alts
= MatchResult CanFail mk_case
where
@@ -271,7 +265,7 @@ mkCoPrimCaseMatchResult var ty match_alts
alts <- mapM (mk_alt fail) sorted_alts
return (Case (Var var) var ty ((DEFAULT, [], fail) : alts))
- sorted_alts = sortWith fst match_alts -- Right order for a Case
+ sorted_alts = sortWith fst match_alts -- Right order for a Case
mk_alt fail (lit, MatchResult _ body_fn)
= ASSERT( not (litIsLifted lit) )
do body <- body_fn fail
@@ -282,13 +276,13 @@ data CaseAlt a = MkCaseAlt{ alt_pat :: a,
alt_wrapper :: HsWrapper,
alt_result :: MatchResult }
-mkCoAlgCaseMatchResult
+mkCoAlgCaseMatchResult
:: DynFlags
-> Id -- Scrutinee
-> Type -- Type of exp
-> [CaseAlt DataCon] -- Alternatives (bndrs *include* tyvars, dicts)
-> MatchResult
-mkCoAlgCaseMatchResult dflags var ty match_alts
+mkCoAlgCaseMatchResult dflags var ty match_alts
| isNewtype -- Newtype case; use a let
= ASSERT( null (tail match_alts) && null (tail arg_ids1) )
mkCoLetMatchResult (NonRec arg_id1 newtype_rhs) match_result1
@@ -300,36 +294,36 @@ mkCoAlgCaseMatchResult dflags var ty match_alts
where
isNewtype = isNewTyCon (dataConTyCon (alt_pat alt1))
- -- [Interesting: because of GADTs, we can't rely on the type of
- -- the scrutinised Id to be sufficiently refined to have a TyCon in it]
+ -- [Interesting: because of GADTs, we can't rely on the type of
+ -- the scrutinised Id to be sufficiently refined to have a TyCon in it]
alt1@MkCaseAlt{ alt_bndrs = arg_ids1, alt_result = match_result1 }
= ASSERT( notNull match_alts ) head match_alts
-- Stuff for newtype
arg_id1 = ASSERT( notNull arg_ids1 ) head arg_ids1
var_ty = idType var
- (tc, ty_args) = tcSplitTyConApp var_ty -- Don't look through newtypes
- -- (not that splitTyConApp does, these days)
+ (tc, ty_args) = tcSplitTyConApp var_ty -- Don't look through newtypes
+ -- (not that splitTyConApp does, these days)
newtype_rhs = unwrapNewTypeBody tc ty_args (Var var)
--- Stuff for parallel arrays
--
- -- Concerning `isPArrFakeAlts':
- --
- -- * it is *not* sufficient to just check the type of the type
- -- constructor, as we have to be careful not to confuse the real
- -- representation of parallel arrays with the fake constructors;
- -- moreover, a list of alternatives must not mix fake and real
- -- constructors (this is checked earlier on)
- --
- -- FIXME: We actually go through the whole list and make sure that
- -- either all or none of the constructors are fake parallel
- -- array constructors. This is to spot equations that mix fake
- -- constructors with the real representation defined in
- -- `PrelPArr'. It would be nicer to spot this situation
- -- earlier and raise a proper error message, but it can really
- -- only happen in `PrelPArr' anyway.
- --
+ -- Concerning `isPArrFakeAlts':
+ --
+ -- * it is *not* sufficient to just check the type of the type
+ -- constructor, as we have to be careful not to confuse the real
+ -- representation of parallel arrays with the fake constructors;
+ -- moreover, a list of alternatives must not mix fake and real
+ -- constructors (this is checked earlier on)
+ --
+ -- FIXME: We actually go through the whole list and make sure that
+ -- either all or none of the constructors are fake parallel
+ -- array constructors. This is to spot equations that mix fake
+ -- constructors with the real representation defined in
+ -- `PrelPArr'. It would be nicer to spot this situation
+ -- earlier and raise a proper error message, but it can really
+ -- only happen in `PrelPArr' anyway.
+ --
isPArrFakeAlts :: [CaseAlt DataCon] -> Bool
isPArrFakeAlts [alt] = isPArrFakeCon (alt_pat alt)
@@ -454,16 +448,16 @@ mkPArrCase dflags var ty sorted_alts fail = do
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Desugarer's versions of some Core functions}
-%* *
+%* *
%************************************************************************
\begin{code}
-mkErrorAppDs :: Id -- The error function
- -> Type -- Type to which it should be applied
- -> SDoc -- The error message string to pass
- -> DsM CoreExpr
+mkErrorAppDs :: Id -- The error function
+ -> Type -- Type to which it should be applied
+ -> SDoc -- The error message string to pass
+ -> DsM CoreExpr
mkErrorAppDs err_id ty msg = do
src_loc <- getSrcSpanDs
@@ -481,13 +475,13 @@ Note [Desugaring seq (1)] cf Trac #1031
~~~~~~~~~~~~~~~~~~~~~~~~~
f x y = x `seq` (y `seq` (# x,y #))
-The [CoreSyn let/app invariant] means that, other things being equal, because
+The [CoreSyn let/app invariant] means that, other things being equal, because
the argument to the outer 'seq' has an unlifted type, we'll use call-by-value thus:
f x y = case (y `seq` (# x,y #)) of v -> x `seq` v
-But that is bad for two reasons:
- (a) we now evaluate y before x, and
+But that is bad for two reasons:
+ (a) we now evaluate y before x, and
(b) we can't bind v to an unboxed pair
Seq is very, very special! So we recognise it right here, and desugar to
@@ -531,15 +525,15 @@ So we desugar our example to:
And now all is well.
The reason it's a hack is because if you define mySeq=seq, the hack
-won't work on mySeq.
+won't work on mySeq.
Note [Desugaring seq (3)] cf Trac #2409
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The isLocalId ensures that we don't turn
+The isLocalId ensures that we don't turn
True `seq` e
into
case True of True { ... }
-which stupidly tries to bind the datacon 'True'.
+which stupidly tries to bind the datacon 'True'.
\begin{code}
mkCoreAppDs :: CoreExpr -> CoreExpr -> CoreExpr
@@ -551,7 +545,7 @@ mkCoreAppDs (Var f `App` Type ty1 `App` Type ty2 `App` arg1) arg2
Var v1 | isLocalId v1 -> v1 -- Note [Desugaring seq (2) and (3)]
_ -> mkWildValBinder ty1
-mkCoreAppDs fun arg = mkCoreApp fun arg -- The rest is done in MkCore
+mkCoreAppDs fun arg = mkCoreApp fun arg -- The rest is done in MkCore
mkCoreAppsDs :: CoreExpr -> [CoreExpr] -> CoreExpr
mkCoreAppsDs fun args = foldl mkCoreAppDs fun args
@@ -559,9 +553,9 @@ mkCoreAppsDs fun args = foldl mkCoreAppDs fun args
%************************************************************************
-%* *
+%* *
\subsection[mkSelectorBind]{Make a selector bind}
-%* *
+%* *
%************************************************************************
This is used in various places to do with lazy patterns.
@@ -593,12 +587,12 @@ OR (B) t = case e of p -> (x,y)
x = case t of (x,_) -> x
y = case t of (_,y) -> y
-We do (A) when
+We do (A) when
* Matching the pattern is cheap so we don't mind
- doing it twice.
+ doing it twice.
* Or if the pattern binds only one variable (so we'll only
match once)
- * AND the pattern can't fail (else we tiresomely get two inexhaustive
+ * AND the pattern can't fail (else we tiresomely get two inexhaustive
pattern warning messages)
Otherwise we do (B). Really (A) is just an optimisation for very common
@@ -609,8 +603,8 @@ cases like
\begin{code}
mkSelectorBinds :: [Maybe (Tickish Id)] -- ticks to add, possibly
-> LPat Id -- The pattern
- -> CoreExpr -- Expression to which the pattern is bound
- -> DsM [(Id,CoreExpr)]
+ -> CoreExpr -- Expression to which the pattern is bound
+ -> DsM [(Id,CoreExpr)]
mkSelectorBinds ticks (L _ (VarPat v)) val_expr
= return [(v, case ticks of
@@ -618,7 +612,7 @@ mkSelectorBinds ticks (L _ (VarPat v)) val_expr
_ -> val_expr)]
mkSelectorBinds ticks pat val_expr
- | null binders
+ | null binders
= return []
| isSingleton binders || is_simple_lpat pat
@@ -626,7 +620,7 @@ mkSelectorBinds ticks pat val_expr
= do { val_var <- newSysLocalDs (hsLPatType pat)
-- Make up 'v' in Note [mkSelectorBinds]
-- NB: give it the type of *pattern* p, not the type of the *rhs* e.
- -- This does not matter after desugaring, but there's a subtle
+ -- This does not matter after desugaring, but there's a subtle
-- issue with implicit parameters. Consider
-- (x,y) = ?i
-- Then, ?i is given type {?i :: Int}, a PredType, which is opaque
@@ -701,8 +695,8 @@ which is whey they are not in HsUtils.
mkLHsPatTup :: [LPat Id] -> LPat Id
mkLHsPatTup [] = noLoc $ mkVanillaTuplePat [] Boxed
mkLHsPatTup [lpat] = lpat
-mkLHsPatTup lpats = L (getLoc (head lpats)) $
- mkVanillaTuplePat lpats Boxed
+mkLHsPatTup lpats = L (getLoc (head lpats)) $
+ mkVanillaTuplePat lpats Boxed
mkLHsVarPatTup :: [Id] -> LPat Id
mkLHsVarPatTup bs = mkLHsPatTup (map nlVarPat bs)
@@ -727,21 +721,21 @@ mkBigLHsPatTup = mkChunkified mkLHsPatTup
\end{code}
%************************************************************************
-%* *
+%* *
\subsection[mkFailurePair]{Code for pattern-matching and other failures}
-%* *
+%* *
%************************************************************************
Generally, we handle pattern matching failure like this: let-bind a
fail-variable, and use that variable if the thing fails:
\begin{verbatim}
- let fail.33 = error "Help"
- in
- case x of
- p1 -> ...
- p2 -> fail.33
- p3 -> fail.33
- p4 -> ...
+ let fail.33 = error "Help"
+ in
+ case x of
+ p1 -> ...
+ p2 -> fail.33
+ p3 -> fail.33
+ p4 -> ...
\end{verbatim}
Then
\begin{itemize}
@@ -760,31 +754,31 @@ There's a problem when the result of the case expression is of
unboxed type. Then the type of @fail.33@ is unboxed too, and
there is every chance that someone will change the let into a case:
\begin{verbatim}
- case error "Help" of
- fail.33 -> case ....
+ case error "Help" of
+ fail.33 -> case ....
\end{verbatim}
which is of course utterly wrong. Rather than drop the condition that
only boxed types can be let-bound, we just turn the fail into a function
for the primitive case:
\begin{verbatim}
- let fail.33 :: Void -> Int#
- fail.33 = \_ -> error "Help"
- in
- case x of
- p1 -> ...
- p2 -> fail.33 void
- p3 -> fail.33 void
- p4 -> ...
+ let fail.33 :: Void -> Int#
+ fail.33 = \_ -> error "Help"
+ in
+ case x of
+ p1 -> ...
+ p2 -> fail.33 void
+ p3 -> fail.33 void
+ p4 -> ...
\end{verbatim}
Now @fail.33@ is a function, so it can be let-bound.
\begin{code}
-mkFailurePair :: CoreExpr -- Result type of the whole case expression
- -> DsM (CoreBind, -- Binds the newly-created fail variable
- -- to \ _ -> expression
- CoreExpr) -- Fail variable applied to realWorld#
+mkFailurePair :: CoreExpr -- Result type of the whole case expression
+ -> DsM (CoreBind, -- Binds the newly-created fail variable
+ -- to \ _ -> expression
+ CoreExpr) -- Fail variable applied to realWorld#
-- See Note [Failure thunks and CPR]
mkFailurePair expr
= do { fail_fun_var <- newFailLocalDs (voidPrimTy `mkFunTy` ty)
@@ -802,10 +796,10 @@ When we make a failure point we ensure that it
does not look like a thunk. Example:
let fail = \rw -> error "urk"
- in case x of
+ in case x of
[] -> fail realWorld#
(y:ys) -> case ys of
- [] -> fail realWorld#
+ [] -> fail realWorld#
(z:zs) -> (y,z)
Reason: we know that a failure point is always a "join point" and is
@@ -821,7 +815,7 @@ mkOptTickBox (Just tickish) e = Tick tickish e
mkBinaryTickBox :: Int -> Int -> CoreExpr -> DsM CoreExpr
mkBinaryTickBox ixT ixF e = do
- uq <- newUnique
+ uq <- newUnique
this_mod <- getModule
let bndr1 = mkSysLocal (fsLit "t1") uq boolTy
let
diff --git a/compiler/deSugar/MatchCon.lhs b/compiler/deSugar/MatchCon.lhs
index 8e581f66e2..611d48e456 100644
--- a/compiler/deSugar/MatchCon.lhs
+++ b/compiler/deSugar/MatchCon.lhs
@@ -7,18 +7,12 @@ Pattern-matching constructors
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module MatchCon ( matchConFamily, matchPatSyn ) where
#include "HsVersions.h"
-import {-# SOURCE #-} Match ( match )
+import {-# SOURCE #-} Match ( match )
import HsSyn
import DsBinds
@@ -92,8 +86,8 @@ have-we-used-all-the-constructors? question; the local function
\begin{code}
matchConFamily :: [Id]
-> Type
- -> [[EquationInfo]]
- -> DsM MatchResult
+ -> [[EquationInfo]]
+ -> DsM MatchResult
-- Each group of eqns is for a single constructor
matchConFamily (var:vars) ty groups
= do dflags <- getDynFlags
@@ -124,17 +118,17 @@ matchOneConLike :: [Id]
-> Type
-> [EquationInfo]
-> DsM (CaseAlt ConLike)
-matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
- = do { arg_vars <- selectConMatchVars val_arg_tys args1
- -- Use the first equation as a source of
- -- suggestions for the new variables
+matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
+ = do { arg_vars <- selectConMatchVars val_arg_tys args1
+ -- Use the first equation as a source of
+ -- suggestions for the new variables
- -- Divide into sub-groups; see Note [Record patterns]
+ -- Divide into sub-groups; see Note [Record patterns]
; let groups :: [[(ConArgPats, EquationInfo)]]
- groups = runs compatible_pats [ (pat_args (firstPat eqn), eqn)
- | eqn <- eqn1:eqns ]
+ groups = runs compatible_pats [ (pat_args (firstPat eqn), eqn)
+ | eqn <- eqn1:eqns ]
- ; match_results <- mapM (match_group arg_vars) groups
+ ; match_results <- mapM (match_group arg_vars) groups
; return $ MkCaseAlt{ alt_pat = con1,
alt_bndrs = tvs1 ++ dicts1 ++ arg_vars,
@@ -142,19 +136,19 @@ matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
alt_result = foldr1 combineMatchResults match_results } }
where
ConPatOut { pat_con = L _ con1, pat_arg_tys = arg_tys, pat_wrap = wrapper1,
- pat_tvs = tvs1, pat_dicts = dicts1, pat_args = args1 }
- = firstPat eqn1
+ pat_tvs = tvs1, pat_dicts = dicts1, pat_args = args1 }
+ = firstPat eqn1
fields1 = case con1 of
- RealDataCon dcon1 -> dataConFieldLabels dcon1
- PatSynCon{} -> []
+ RealDataCon dcon1 -> dataConFieldLabels dcon1
+ PatSynCon{} -> []
val_arg_tys = case con1 of
RealDataCon dcon1 -> dataConInstOrigArgTys dcon1 inst_tys
PatSynCon psyn1 -> patSynInstArgTys psyn1 inst_tys
inst_tys = ASSERT( tvs1 `equalLength` ex_tvs )
arg_tys ++ mkTyVarTys tvs1
- -- dataConInstOrigArgTys takes the univ and existential tyvars
- -- and returns the types of the *value* args, which is what we want
+ -- dataConInstOrigArgTys takes the univ and existential tyvars
+ -- and returns the types of the *value* args, which is what we want
ex_tvs = case con1 of
RealDataCon dcon1 -> dataConExTyVars dcon1
@@ -165,13 +159,13 @@ matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
match_group arg_vars arg_eqn_prs
= ASSERT( notNull arg_eqn_prs )
do { (wraps, eqns') <- liftM unzip (mapM shift arg_eqn_prs)
- ; let group_arg_vars = select_arg_vars arg_vars arg_eqn_prs
- ; match_result <- match (group_arg_vars ++ vars) ty eqns'
- ; return (adjustMatchResult (foldr1 (.) wraps) match_result) }
+ ; let group_arg_vars = select_arg_vars arg_vars arg_eqn_prs
+ ; match_result <- match (group_arg_vars ++ vars) ty eqns'
+ ; return (adjustMatchResult (foldr1 (.) wraps) match_result) }
- shift (_, eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_tvs = tvs, pat_dicts = ds,
- pat_binds = bind, pat_args = args
- } : pats }))
+ shift (_, eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_tvs = tvs, pat_dicts = ds,
+ pat_binds = bind, pat_args = args
+ } : pats }))
= do ds_bind <- dsTcEvBinds bind
return ( wrapBinds (tvs `zip` tvs1)
. wrapBinds (ds `zip` dicts1)
@@ -184,17 +178,17 @@ matchOneConLike vars ty (eqn1 : eqns) -- All eqns for a single constructor
-- Note [Record patterns]
select_arg_vars arg_vars ((arg_pats, _) : _)
| RecCon flds <- arg_pats
- , let rpats = rec_flds flds
+ , let rpats = rec_flds flds
, not (null rpats) -- Treated specially; cf conArgPats
- = ASSERT2( length fields1 == length arg_vars,
+ = ASSERT2( length fields1 == length arg_vars,
ppr con1 $$ ppr fields1 $$ ppr arg_vars )
map lookup_fld rpats
| otherwise
= arg_vars
where
fld_var_env = mkNameEnv $ zipEqual "get_arg_vars" fields1 arg_vars
- lookup_fld rpat = lookupNameEnv_NF fld_var_env
- (idName (unLoc (hsRecFieldId rpat)))
+ lookup_fld rpat = lookupNameEnv_NF fld_var_env
+ (idName (unLoc (hsRecFieldId rpat)))
select_arg_vars _ [] = panic "matchOneCon/select_arg_vars []"
matchOneConLike _ _ [] = panic "matchOneCon []"
@@ -208,9 +202,9 @@ compatible_pats _ (RecCon flds2, _) = null (rec_flds flds2)
compatible_pats _ _ = True -- Prefix or infix con
same_fields :: HsRecFields Id (LPat Id) -> HsRecFields Id (LPat Id) -> Bool
-same_fields flds1 flds2
+same_fields flds1 flds2
= all2 (\f1 f2 -> unLoc (hsRecFieldId f1) == unLoc (hsRecFieldId f2))
- (rec_flds flds1) (rec_flds flds2)
+ (rec_flds flds1) (rec_flds flds2)
-----------------
@@ -219,38 +213,38 @@ selectConMatchVars arg_tys (RecCon {}) = newSysLocalsDs arg_tys
selectConMatchVars _ (PrefixCon ps) = selectMatchVars (map unLoc ps)
selectConMatchVars _ (InfixCon p1 p2) = selectMatchVars [unLoc p1, unLoc p2]
-conArgPats :: [Type] -- Instantiated argument types
- -- Used only to fill in the types of WildPats, which
- -- are probably never looked at anyway
- -> ConArgPats
- -> [Pat Id]
+conArgPats :: [Type] -- Instantiated argument types
+ -- Used only to fill in the types of WildPats, which
+ -- are probably never looked at anyway
+ -> ConArgPats
+ -> [Pat Id]
conArgPats _arg_tys (PrefixCon ps) = map unLoc ps
conArgPats _arg_tys (InfixCon p1 p2) = [unLoc p1, unLoc p2]
conArgPats arg_tys (RecCon (HsRecFields { rec_flds = rpats }))
| null rpats = map WildPat arg_tys
- -- Important special case for C {}, which can be used for a
- -- datacon that isn't declared to have fields at all
+ -- Important special case for C {}, which can be used for a
+ -- datacon that isn't declared to have fields at all
| otherwise = map (unLoc . hsRecFieldArg) rpats
\end{code}
Note [Record patterns]
~~~~~~~~~~~~~~~~~~~~~~
-Consider
- data T = T { x,y,z :: Bool }
+Consider
+ data T = T { x,y,z :: Bool }
- f (T { y=True, x=False }) = ...
+ f (T { y=True, x=False }) = ...
We must match the patterns IN THE ORDER GIVEN, thus for the first
-one we match y=True before x=False. See Trac #246; or imagine
+one we match y=True before x=False. See Trac #246; or imagine
matching against (T { y=False, x=undefined }): should fail without
-touching the undefined.
+touching the undefined.
Now consider:
- f (T { y=True, x=False }) = ...
- f (T { x=True, y= False}) = ...
+ f (T { y=True, x=False }) = ...
+ f (T { x=True, y= False}) = ...
-In the first we must test y first; in the second we must test x
+In the first we must test y first; in the second we must test x
first. So we must divide even the equations for a single constructor
T into sub-goups, based on whether they match the same field in the
same order. That's what the (runs compatible_pats) grouping.
@@ -264,31 +258,31 @@ Hence the (null rpats) checks here and there.
Note [Existentials in shift_con_pat]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
- data T = forall a. Ord a => T a (a->Int)
+ data T = forall a. Ord a => T a (a->Int)
- f (T x f) True = ...expr1...
- f (T y g) False = ...expr2..
+ f (T x f) True = ...expr1...
+ f (T y g) False = ...expr2..
When we put in the tyvars etc we get
- f (T a (d::Ord a) (x::a) (f::a->Int)) True = ...expr1...
- f (T b (e::Ord b) (y::a) (g::a->Int)) True = ...expr2...
+ f (T a (d::Ord a) (x::a) (f::a->Int)) True = ...expr1...
+ f (T b (e::Ord b) (y::a) (g::a->Int)) True = ...expr2...
After desugaring etc we'll get a single case:
- f = \t::T b::Bool ->
- case t of
- T a (d::Ord a) (x::a) (f::a->Int)) ->
- case b of
- True -> ...expr1...
- False -> ...expr2...
+ f = \t::T b::Bool ->
+ case t of
+ T a (d::Ord a) (x::a) (f::a->Int)) ->
+ case b of
+ True -> ...expr1...
+ False -> ...expr2...
*** We have to substitute [a/b, d/e] in expr2! **
Hence
- False -> ....((/\b\(e:Ord b).expr2) a d)....
+ False -> ....((/\b\(e:Ord b).expr2) a d)....
-Originally I tried to use
- (\b -> let e = d in expr2) a
+Originally I tried to use
+ (\b -> let e = d in expr2) a
to do this substitution. While this is "correct" in a way, it fails
-Lint, because e::Ord b but d::Ord a.
+Lint, because e::Ord b but d::Ord a.
diff --git a/compiler/iface/BuildTyCl.lhs b/compiler/iface/BuildTyCl.lhs
index 46091adf80..2a66de28ac 100644
--- a/compiler/iface/BuildTyCl.lhs
+++ b/compiler/iface/BuildTyCl.lhs
@@ -5,21 +5,15 @@
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module BuildTyCl (
buildSynTyCon,
- buildAlgTyCon,
+ buildAlgTyCon,
buildDataCon,
buildPatSyn,
TcMethInfo, buildClass,
distinctAbstractTyConRhs, totallyAbstractTyConRhs,
- mkNewTyConRhs, mkDataTyConRhs,
+ mkNewTyConRhs, mkDataTyConRhs,
newImplicitBinder
) where
@@ -47,16 +41,16 @@ import UniqSupply
import Util
import Outputable
\end{code}
-
+
\begin{code}
------------------------------------------------------
-buildSynTyCon :: Name -> [TyVar] -> [Role]
+buildSynTyCon :: Name -> [TyVar] -> [Role]
-> SynTyConRhs
-> Kind -- ^ Kind of the RHS
-> TyConParent
-> TcRnIf m n TyCon
-buildSynTyCon tc_name tvs roles rhs rhs_kind parent
+buildSynTyCon tc_name tvs roles rhs rhs_kind parent
= return (mkSynTyCon tc_name kind tvs roles rhs parent)
where kind = mkPiKinds tvs rhs_kind
@@ -71,7 +65,7 @@ mkDataTyConRhs cons
= DataTyCon {
data_cons = cons,
is_enum = not (null cons) && all is_enum_con cons
- -- See Note [Enumeration types] in TyCon
+ -- See Note [Enumeration types] in TyCon
}
where
is_enum_con con
@@ -83,14 +77,14 @@ mkNewTyConRhs :: Name -> TyCon -> DataCon -> TcRnIf m n AlgTyConRhs
-- ^ Monadic because it makes a Name for the coercion TyCon
-- We pass the Name of the parent TyCon, as well as the TyCon itself,
-- because the latter is part of a knot, whereas the former is not.
-mkNewTyConRhs tycon_name tycon con
- = do { co_tycon_name <- newImplicitBinder tycon_name mkNewTyCoOcc
- ; let co_tycon = mkNewTypeCo co_tycon_name tycon etad_tvs etad_roles etad_rhs
- ; traceIf (text "mkNewTyConRhs" <+> ppr co_tycon)
- ; return (NewTyCon { data_con = con,
- nt_rhs = rhs_ty,
- nt_etad_rhs = (etad_tvs, etad_rhs),
- nt_co = co_tycon } ) }
+mkNewTyConRhs tycon_name tycon con
+ = do { co_tycon_name <- newImplicitBinder tycon_name mkNewTyCoOcc
+ ; let co_tycon = mkNewTypeCo co_tycon_name tycon etad_tvs etad_roles etad_rhs
+ ; traceIf (text "mkNewTyConRhs" <+> ppr co_tycon)
+ ; return (NewTyCon { data_con = con,
+ nt_rhs = rhs_ty,
+ nt_etad_rhs = (etad_tvs, etad_rhs),
+ nt_co = co_tycon } ) }
-- Coreview looks through newtypes with a Nothing
-- for nt_co, or uses explicit coercions otherwise
where
@@ -98,89 +92,89 @@ mkNewTyConRhs tycon_name tycon con
roles = tyConRoles tycon
inst_con_ty = applyTys (dataConUserType con) (mkTyVarTys tvs)
rhs_ty = ASSERT( isFunTy inst_con_ty ) funArgTy inst_con_ty
- -- Instantiate the data con with the
- -- type variables from the tycon
- -- NB: a newtype DataCon has a type that must look like
- -- forall tvs. <arg-ty> -> T tvs
- -- Note that we *can't* use dataConInstOrigArgTys here because
- -- the newtype arising from class Foo a => Bar a where {}
- -- has a single argument (Foo a) that is a *type class*, so
- -- dataConInstOrigArgTys returns [].
+ -- Instantiate the data con with the
+ -- type variables from the tycon
+ -- NB: a newtype DataCon has a type that must look like
+ -- forall tvs. <arg-ty> -> T tvs
+ -- Note that we *can't* use dataConInstOrigArgTys here because
+ -- the newtype arising from class Foo a => Bar a where {}
+ -- has a single argument (Foo a) that is a *type class*, so
+ -- dataConInstOrigArgTys returns [].
etad_tvs :: [TyVar] -- Matched lazily, so that mkNewTypeCo can
etad_roles :: [Role] -- return a TyCon without pulling on rhs_ty
etad_rhs :: Type -- See Note [Tricky iface loop] in LoadIface
(etad_tvs, etad_roles, etad_rhs) = eta_reduce (reverse tvs) (reverse roles) rhs_ty
-
- eta_reduce :: [TyVar] -- Reversed
+
+ eta_reduce :: [TyVar] -- Reversed
-> [Role] -- also reversed
- -> Type -- Rhs type
- -> ([TyVar], [Role], Type) -- Eta-reduced version
+ -> Type -- Rhs type
+ -> ([TyVar], [Role], Type) -- Eta-reduced version
-- (tyvars in normal order)
eta_reduce (a:as) (_:rs) ty | Just (fun, arg) <- splitAppTy_maybe ty,
- Just tv <- getTyVar_maybe arg,
- tv == a,
- not (a `elemVarSet` tyVarsOfType fun)
- = eta_reduce as rs fun
+ Just tv <- getTyVar_maybe arg,
+ tv == a,
+ not (a `elemVarSet` tyVarsOfType fun)
+ = eta_reduce as rs fun
eta_reduce tvs rs ty = (reverse tvs, reverse rs, ty)
-
+
------------------------------------------------------
-buildDataCon :: FamInstEnvs
+buildDataCon :: FamInstEnvs
-> Name -> Bool
- -> [HsBang]
- -> [Name] -- Field labels
- -> [TyVar] -> [TyVar] -- Univ and ext
+ -> [HsBang]
+ -> [Name] -- Field labels
+ -> [TyVar] -> [TyVar] -- Univ and ext
-> [(TyVar,Type)] -- Equality spec
- -> ThetaType -- Does not include the "stupid theta"
- -- or the GADT equalities
- -> [Type] -> Type -- Argument and result types
- -> TyCon -- Rep tycon
- -> TcRnIf m n DataCon
+ -> ThetaType -- Does not include the "stupid theta"
+ -- or the GADT equalities
+ -> [Type] -> Type -- Argument and result types
+ -> TyCon -- Rep tycon
+ -> TcRnIf m n DataCon
-- A wrapper for DataCon.mkDataCon that
-- a) makes the worker Id
-- b) makes the wrapper Id if necessary, including
--- allocating its unique (hence monadic)
+-- allocating its unique (hence monadic)
buildDataCon fam_envs src_name declared_infix arg_stricts field_lbls
- univ_tvs ex_tvs eq_spec ctxt arg_tys res_ty rep_tycon
- = do { wrap_name <- newImplicitBinder src_name mkDataConWrapperOcc
- ; work_name <- newImplicitBinder src_name mkDataConWorkerOcc
- -- This last one takes the name of the data constructor in the source
- -- code, which (for Haskell source anyway) will be in the DataName name
- -- space, and puts it into the VarName name space
+ univ_tvs ex_tvs eq_spec ctxt arg_tys res_ty rep_tycon
+ = do { wrap_name <- newImplicitBinder src_name mkDataConWrapperOcc
+ ; work_name <- newImplicitBinder src_name mkDataConWorkerOcc
+ -- This last one takes the name of the data constructor in the source
+ -- code, which (for Haskell source anyway) will be in the DataName name
+ -- space, and puts it into the VarName name space
; us <- newUniqueSupply
; dflags <- getDynFlags
- ; let
- stupid_ctxt = mkDataConStupidTheta rep_tycon arg_tys univ_tvs
- data_con = mkDataCon src_name declared_infix
- arg_stricts field_lbls
- univ_tvs ex_tvs eq_spec ctxt
- arg_tys res_ty rep_tycon
- stupid_ctxt dc_wrk dc_rep
+ ; let
+ stupid_ctxt = mkDataConStupidTheta rep_tycon arg_tys univ_tvs
+ data_con = mkDataCon src_name declared_infix
+ arg_stricts field_lbls
+ univ_tvs ex_tvs eq_spec ctxt
+ arg_tys res_ty rep_tycon
+ stupid_ctxt dc_wrk dc_rep
dc_wrk = mkDataConWorkId work_name data_con
dc_rep = initUs_ us (mkDataConRep dflags fam_envs wrap_name data_con)
- ; return data_con }
+ ; return data_con }
-- The stupid context for a data constructor should be limited to
-- the type variables mentioned in the arg_tys
--- ToDo: Or functionally dependent on?
--- This whole stupid theta thing is, well, stupid.
+-- ToDo: Or functionally dependent on?
+-- This whole stupid theta thing is, well, stupid.
mkDataConStupidTheta :: TyCon -> [Type] -> [TyVar] -> [PredType]
mkDataConStupidTheta tycon arg_tys univ_tvs
- | null stupid_theta = [] -- The common case
- | otherwise = filter in_arg_tys stupid_theta
+ | null stupid_theta = [] -- The common case
+ | otherwise = filter in_arg_tys stupid_theta
where
- tc_subst = zipTopTvSubst (tyConTyVars tycon) (mkTyVarTys univ_tvs)
+ tc_subst = zipTopTvSubst (tyConTyVars tycon) (mkTyVarTys univ_tvs)
stupid_theta = substTheta tc_subst (tyConStupidTheta tycon)
- -- Start by instantiating the master copy of the
- -- stupid theta, taken from the TyCon
+ -- Start by instantiating the master copy of the
+ -- stupid theta, taken from the TyCon
arg_tyvars = tyVarsOfTypes arg_tys
- in_arg_tys pred = not $ isEmptyVarSet $
- tyVarsOfType pred `intersectVarSet` arg_tyvars
+ in_arg_tys pred = not $ isEmptyVarSet $
+ tyVarsOfType pred `intersectVarSet` arg_tyvars
------------------------------------------------------
@@ -217,121 +211,121 @@ buildPatSyn src_name declared_infix matcher wrapper
------------------------------------------------------
\begin{code}
-type TcMethInfo = (Name, DefMethSpec, Type)
- -- A temporary intermediate, to communicate between
+type TcMethInfo = (Name, DefMethSpec, Type)
+ -- A temporary intermediate, to communicate between
-- tcClassSigs and buildClass.
buildClass :: Name -> [TyVar] -> [Role] -> ThetaType
- -> [FunDep TyVar] -- Functional dependencies
- -> [ClassATItem] -- Associated types
- -> [TcMethInfo] -- Method info
- -> ClassMinimalDef -- Minimal complete definition
- -> RecFlag -- Info for type constructor
- -> TcRnIf m n Class
+ -> [FunDep TyVar] -- Functional dependencies
+ -> [ClassATItem] -- Associated types
+ -> [TcMethInfo] -- Method info
+ -> ClassMinimalDef -- Minimal complete definition
+ -> RecFlag -- Info for type constructor
+ -> TcRnIf m n Class
buildClass tycon_name tvs roles sc_theta fds at_items sig_stuff mindef tc_isrec
- = fixM $ \ rec_clas -> -- Only name generation inside loop
- do { traceIf (text "buildClass")
+ = fixM $ \ rec_clas -> -- Only name generation inside loop
+ do { traceIf (text "buildClass")
- ; datacon_name <- newImplicitBinder tycon_name mkClassDataConOcc
- -- The class name is the 'parent' for this datacon, not its tycon,
- -- because one should import the class to get the binding for
- -- the datacon
+ ; datacon_name <- newImplicitBinder tycon_name mkClassDataConOcc
+ -- The class name is the 'parent' for this datacon, not its tycon,
+ -- because one should import the class to get the binding for
+ -- the datacon
- ; op_items <- mapM (mk_op_item rec_clas) sig_stuff
- -- Build the selector id and default method id
+ ; op_items <- mapM (mk_op_item rec_clas) sig_stuff
+ -- Build the selector id and default method id
- -- Make selectors for the superclasses
- ; sc_sel_names <- mapM (newImplicitBinder tycon_name . mkSuperDictSelOcc)
- [1..length sc_theta]
- ; let sc_sel_ids = [ mkDictSelId sc_name rec_clas
+ -- Make selectors for the superclasses
+ ; sc_sel_names <- mapM (newImplicitBinder tycon_name . mkSuperDictSelOcc)
+ [1..length sc_theta]
+ ; let sc_sel_ids = [ mkDictSelId sc_name rec_clas
| sc_name <- sc_sel_names]
- -- We number off the Dict superclass selectors, 1, 2, 3 etc so that we
- -- can construct names for the selectors. Thus
- -- class (C a, C b) => D a b where ...
- -- gives superclass selectors
- -- D_sc1, D_sc2
- -- (We used to call them D_C, but now we can have two different
- -- superclasses both called C!)
-
- ; let use_newtype = isSingleton arg_tys
- -- Use a newtype if the data constructor
- -- (a) has exactly one value field
- -- i.e. exactly one operation or superclass taken together
+ -- We number off the Dict superclass selectors, 1, 2, 3 etc so that we
+ -- can construct names for the selectors. Thus
+ -- class (C a, C b) => D a b where ...
+ -- gives superclass selectors
+ -- D_sc1, D_sc2
+ -- (We used to call them D_C, but now we can have two different
+ -- superclasses both called C!)
+
+ ; let use_newtype = isSingleton arg_tys
+ -- Use a newtype if the data constructor
+ -- (a) has exactly one value field
+ -- i.e. exactly one operation or superclass taken together
-- (b) that value is of lifted type (which they always are, because
-- we box equality superclasses)
- -- See note [Class newtypes and equality predicates]
+ -- See note [Class newtypes and equality predicates]
- -- We treat the dictionary superclasses as ordinary arguments.
+ -- We treat the dictionary superclasses as ordinary arguments.
-- That means that in the case of
- -- class C a => D a
- -- we don't get a newtype with no arguments!
- args = sc_sel_names ++ op_names
- op_tys = [ty | (_,_,ty) <- sig_stuff]
- op_names = [op | (op,_,_) <- sig_stuff]
- arg_tys = sc_theta ++ op_tys
+ -- class C a => D a
+ -- we don't get a newtype with no arguments!
+ args = sc_sel_names ++ op_names
+ op_tys = [ty | (_,_,ty) <- sig_stuff]
+ op_names = [op | (op,_,_) <- sig_stuff]
+ arg_tys = sc_theta ++ op_tys
rec_tycon = classTyCon rec_clas
-
- ; dict_con <- buildDataCon (panic "buildClass: FamInstEnvs")
+
+ ; dict_con <- buildDataCon (panic "buildClass: FamInstEnvs")
datacon_name
- False -- Not declared infix
- (map (const HsNoBang) args)
- [{- No fields -}]
- tvs [{- no existentials -}]
- [{- No GADT equalities -}]
+ False -- Not declared infix
+ (map (const HsNoBang) args)
+ [{- No fields -}]
+ tvs [{- no existentials -}]
+ [{- No GADT equalities -}]
[{- No theta -}]
arg_tys
- (mkTyConApp rec_tycon (mkTyVarTys tvs))
- rec_tycon
-
- ; rhs <- if use_newtype
- then mkNewTyConRhs tycon_name rec_tycon dict_con
- else return (mkDataTyConRhs [dict_con])
-
- ; let { clas_kind = mkPiKinds tvs constraintKind
-
- ; tycon = mkClassTyCon tycon_name clas_kind tvs roles
- rhs rec_clas tc_isrec
- -- A class can be recursive, and in the case of newtypes
- -- this matters. For example
- -- class C a where { op :: C b => a -> b -> Int }
- -- Because C has only one operation, it is represented by
- -- a newtype, and it should be a *recursive* newtype.
- -- [If we don't make it a recursive newtype, we'll expand the
- -- newtype like a synonym, but that will lead to an infinite
- -- type]
-
- ; result = mkClass tvs fds
- sc_theta sc_sel_ids at_items
- op_items mindef tycon
- }
- ; traceIf (text "buildClass" <+> ppr tycon)
- ; return result }
+ (mkTyConApp rec_tycon (mkTyVarTys tvs))
+ rec_tycon
+
+ ; rhs <- if use_newtype
+ then mkNewTyConRhs tycon_name rec_tycon dict_con
+ else return (mkDataTyConRhs [dict_con])
+
+ ; let { clas_kind = mkPiKinds tvs constraintKind
+
+ ; tycon = mkClassTyCon tycon_name clas_kind tvs roles
+ rhs rec_clas tc_isrec
+ -- A class can be recursive, and in the case of newtypes
+ -- this matters. For example
+ -- class C a where { op :: C b => a -> b -> Int }
+ -- Because C has only one operation, it is represented by
+ -- a newtype, and it should be a *recursive* newtype.
+ -- [If we don't make it a recursive newtype, we'll expand the
+ -- newtype like a synonym, but that will lead to an infinite
+ -- type]
+
+ ; result = mkClass tvs fds
+ sc_theta sc_sel_ids at_items
+ op_items mindef tycon
+ }
+ ; traceIf (text "buildClass" <+> ppr tycon)
+ ; return result }
where
mk_op_item :: Class -> TcMethInfo -> TcRnIf n m ClassOpItem
- mk_op_item rec_clas (op_name, dm_spec, _)
+ mk_op_item rec_clas (op_name, dm_spec, _)
= do { dm_info <- case dm_spec of
NoDM -> return NoDefMeth
GenericDM -> do { dm_name <- newImplicitBinder op_name mkGenDefMethodOcc
- ; return (GenDefMeth dm_name) }
+ ; return (GenDefMeth dm_name) }
VanillaDM -> do { dm_name <- newImplicitBinder op_name mkDefaultMethodOcc
- ; return (DefMeth dm_name) }
+ ; return (DefMeth dm_name) }
; return (mkDictSelId op_name rec_clas, dm_info) }
\end{code}
Note [Class newtypes and equality predicates]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
- class (a ~ F b) => C a b where
- op :: a -> b
+ class (a ~ F b) => C a b where
+ op :: a -> b
We cannot represent this by a newtype, even though it's not
existential, because there are two value fields (the equality
predicate and op. See Trac #2238
Moreover,
- class (a ~ F b) => C a b where {}
+ class (a ~ F b) => C a b where {}
Here we can't use a newtype either, even though there is only
one field, because equality predicates are unboxed, and classes
are boxed.
diff --git a/compiler/iface/IfaceEnv.lhs b/compiler/iface/IfaceEnv.lhs
index c29778dc23..6c93f50456 100644
--- a/compiler/iface/IfaceEnv.lhs
+++ b/compiler/iface/IfaceEnv.lhs
@@ -2,25 +2,19 @@
\begin{code}
{-# LANGUAGE CPP, RankNTypes #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module IfaceEnv (
- newGlobalBinder, newImplicitBinder,
- lookupIfaceTop,
- lookupOrig, lookupOrigNameCache, extendNameCache,
- newIfaceName, newIfaceNames,
- extendIfaceIdEnv, extendIfaceTyVarEnv,
- tcIfaceLclId, tcIfaceTyVar, lookupIfaceTyVar,
+ newGlobalBinder, newImplicitBinder,
+ lookupIfaceTop,
+ lookupOrig, lookupOrigNameCache, extendNameCache,
+ newIfaceName, newIfaceNames,
+ extendIfaceIdEnv, extendIfaceTyVarEnv,
+ tcIfaceLclId, tcIfaceTyVar, lookupIfaceTyVar,
- ifaceExportNames,
+ ifaceExportNames,
- -- Name-cache stuff
- allocateGlobalBinder, initNameCache, updNameCache,
+ -- Name-cache stuff
+ allocateGlobalBinder, initNameCache, updNameCache,
getNameCache, mkNameCacheUpdater, NameCacheUpdater(..)
) where
@@ -48,9 +42,9 @@ import Data.IORef ( atomicModifyIORef, readIORef )
%*********************************************************
-%* *
- Allocating new Names in the Name Cache
-%* *
+%* *
+ Allocating new Names in the Name Cache
+%* *
%*********************************************************
Note [The Name Cache]
@@ -80,13 +74,13 @@ newGlobalBinder :: Module -> OccName -> SrcSpan -> TcRnIf a b Name
-- moment when we know its Module and SrcLoc in their full glory
newGlobalBinder mod occ loc
- = do mod `seq` occ `seq` return () -- See notes with lookupOrig
+ = do mod `seq` occ `seq` return () -- See notes with lookupOrig
-- traceIf (text "newGlobalBinder" <+> ppr mod <+> ppr occ <+> ppr loc)
updNameCache $ \name_cache ->
allocateGlobalBinder name_cache mod occ loc
allocateGlobalBinder
- :: NameCache
+ :: NameCache
-> Module -> OccName -> SrcSpan
-> (NameCache, Name)
-- See Note [The Name Cache]
@@ -100,13 +94,13 @@ allocateGlobalBinder name_supply mod occ loc
-- get different SrcLocs can can be reported as such.
--
-- Possible other reason: it might be in the cache because we
- -- encountered an occurrence before the binding site for an
- -- implicitly-imported Name. Perhaps the current SrcLoc is
- -- better... but not really: it'll still just say 'imported'
+ -- encountered an occurrence before the binding site for an
+ -- implicitly-imported Name. Perhaps the current SrcLoc is
+ -- better... but not really: it'll still just say 'imported'
--
-- IMPORTANT: Don't mess with wired-in names.
- -- Their wired-in-ness is in their NameSort
- -- and their Module is correct.
+ -- Their wired-in-ness is in their NameSort
+ -- and their Module is correct.
Just name | isWiredInName name
-> (name_supply, name)
@@ -128,20 +122,20 @@ allocateGlobalBinder name_supply mod occ loc
new_cache = extendNameCache (nsNames name_supply) mod occ name
new_name_supply = name_supply {nsUniqs = us', nsNames = new_cache}
-newImplicitBinder :: Name -- Base name
- -> (OccName -> OccName) -- Occurrence name modifier
- -> TcRnIf m n Name -- Implicit name
+newImplicitBinder :: Name -- Base name
+ -> (OccName -> OccName) -- Occurrence name modifier
+ -> TcRnIf m n Name -- Implicit name
-- Called in BuildTyCl to allocate the implicit binders of type/class decls
-- For source type/class decls, this is the first occurrence
-- For iface ones, the LoadIface has alrady allocated a suitable name in the cache
newImplicitBinder base_name mk_sys_occ
| Just mod <- nameModule_maybe base_name
= newGlobalBinder mod occ loc
- | otherwise -- When typechecking a [d| decl bracket |],
- -- TH generates types, classes etc with Internal names,
- -- so we follow suit for the implicit binders
- = do { uniq <- newUnique
- ; return (mkInternalName uniq occ loc) }
+ | otherwise -- When typechecking a [d| decl bracket |],
+ -- TH generates types, classes etc with Internal names,
+ -- so we follow suit for the implicit binders
+ = do { uniq <- newUnique
+ ; return (mkInternalName uniq occ loc) }
where
occ = mk_sys_occ (nameOccName base_name)
loc = nameSrcSpan base_name
@@ -151,19 +145,19 @@ ifaceExportNames exports = return exports
lookupOrig :: Module -> OccName -> TcRnIf a b Name
lookupOrig mod occ
- = do { -- First ensure that mod and occ are evaluated
- -- If not, chaos can ensue:
- -- we read the name-cache
- -- then pull on mod (say)
- -- which does some stuff that modifies the name cache
- -- This did happen, with tycon_mod in TcIface.tcIfaceAlt (DataAlt..)
- mod `seq` occ `seq` return ()
--- ; traceIf (text "lookup_orig" <+> ppr mod <+> ppr occ)
+ = do { -- First ensure that mod and occ are evaluated
+ -- If not, chaos can ensue:
+ -- we read the name-cache
+ -- then pull on mod (say)
+ -- which does some stuff that modifies the name cache
+ -- This did happen, with tycon_mod in TcIface.tcIfaceAlt (DataAlt..)
+ mod `seq` occ `seq` return ()
+-- ; traceIf (text "lookup_orig" <+> ppr mod <+> ppr occ)
; updNameCache $ \name_cache ->
case lookupOrigNameCache (nsNames name_cache) mod occ of {
- Just name -> (name_cache, name);
- Nothing ->
+ Just name -> (name_cache, name);
+ Nothing ->
case takeUniqFromSupply (nsUniqs name_cache) of {
(uniq, us) ->
let
@@ -174,9 +168,9 @@ lookupOrig mod occ
\end{code}
%************************************************************************
-%* *
- Name cache access
-%* *
+%* *
+ Name cache access
+%* *
%************************************************************************
See Note [The Name Cache] above.
@@ -192,7 +186,7 @@ them up in the original name cache.
However, there are two reasons why we might look up an Orig RdrName:
* If you use setRdrNameSpace on an Exact RdrName it may be
- turned into an Orig RdrName.
+ turned into an Orig RdrName.
* Template Haskell turns a BuiltInSyntax Name into a TH.NameG
(DsMeta.globalVar), and parses a NameG into an Orig RdrName
@@ -203,19 +197,19 @@ However, there are two reasons why we might look up an Orig RdrName:
lookupOrigNameCache :: OrigNameCache -> Module -> OccName -> Maybe Name
lookupOrigNameCache nc mod occ
| Just name <- isBuiltInOcc_maybe occ
- = -- See Note [Known-key names], 3(c) in PrelNames
+ = -- See Note [Known-key names], 3(c) in PrelNames
-- Special case for tuples; there are too many
- -- of them to pre-populate the original-name cache
+ -- of them to pre-populate the original-name cache
Just name
| otherwise
= case lookupModuleEnv nc mod of
- Nothing -> Nothing
- Just occ_env -> lookupOccEnv occ_env occ
+ Nothing -> Nothing
+ Just occ_env -> lookupOccEnv occ_env occ
extendOrigNameCache :: OrigNameCache -> Name -> OrigNameCache
-extendOrigNameCache nc name
- = ASSERT2( isExternalName name, ppr name )
+extendOrigNameCache nc name
+ = ASSERT2( isExternalName name, ppr name )
extendNameCache nc (nameModule name) (nameOccName name) name
extendNameCache :: OrigNameCache -> Module -> OccName -> Name -> OrigNameCache
@@ -225,8 +219,8 @@ extendNameCache nc mod occ name
combine _ occ_env = extendOccEnv occ_env occ name
getNameCache :: TcRnIf a b NameCache
-getNameCache = do { HscEnv { hsc_NC = nc_var } <- getTopEnv;
- readMutVar nc_var }
+getNameCache = do { HscEnv { hsc_NC = nc_var } <- getTopEnv;
+ readMutVar nc_var }
updNameCache :: (NameCache -> (NameCache, c)) -> TcRnIf a b c
updNameCache upd_fn = do
@@ -253,7 +247,7 @@ mkNameCacheUpdater = do
initNameCache :: UniqSupply -> [Name] -> NameCache
initNameCache us names
= NameCache { nsUniqs = us,
- nsNames = initOrigNames names }
+ nsNames = initOrigNames names }
initOrigNames :: [Name] -> OrigNameCache
initOrigNames names = foldl extendOrigNameCache emptyModuleEnv names
@@ -262,70 +256,70 @@ initOrigNames names = foldl extendOrigNameCache emptyModuleEnv names
%************************************************************************
-%* *
- Type variables and local Ids
-%* *
+%* *
+ Type variables and local Ids
+%* *
%************************************************************************
\begin{code}
tcIfaceLclId :: FastString -> IfL Id
tcIfaceLclId occ
- = do { lcl <- getLclEnv
- ; case (lookupUFM (if_id_env lcl) occ) of
+ = do { lcl <- getLclEnv
+ ; case (lookupUFM (if_id_env lcl) occ) of
Just ty_var -> return ty_var
Nothing -> failIfM (text "Iface id out of scope: " <+> ppr occ)
}
extendIfaceIdEnv :: [Id] -> IfL a -> IfL a
extendIfaceIdEnv ids thing_inside
- = do { env <- getLclEnv
- ; let { id_env' = addListToUFM (if_id_env env) pairs
- ; pairs = [(occNameFS (getOccName id), id) | id <- ids] }
- ; setLclEnv (env { if_id_env = id_env' }) thing_inside }
+ = do { env <- getLclEnv
+ ; let { id_env' = addListToUFM (if_id_env env) pairs
+ ; pairs = [(occNameFS (getOccName id), id) | id <- ids] }
+ ; setLclEnv (env { if_id_env = id_env' }) thing_inside }
tcIfaceTyVar :: FastString -> IfL TyVar
tcIfaceTyVar occ
- = do { lcl <- getLclEnv
- ; case (lookupUFM (if_tv_env lcl) occ) of
+ = do { lcl <- getLclEnv
+ ; case (lookupUFM (if_tv_env lcl) occ) of
Just ty_var -> return ty_var
Nothing -> failIfM (text "Iface type variable out of scope: " <+> ppr occ)
}
lookupIfaceTyVar :: FastString -> IfL (Maybe TyVar)
lookupIfaceTyVar occ
- = do { lcl <- getLclEnv
- ; return (lookupUFM (if_tv_env lcl) occ) }
+ = do { lcl <- getLclEnv
+ ; return (lookupUFM (if_tv_env lcl) occ) }
extendIfaceTyVarEnv :: [TyVar] -> IfL a -> IfL a
extendIfaceTyVarEnv tyvars thing_inside
- = do { env <- getLclEnv
- ; let { tv_env' = addListToUFM (if_tv_env env) pairs
- ; pairs = [(occNameFS (getOccName tv), tv) | tv <- tyvars] }
- ; setLclEnv (env { if_tv_env = tv_env' }) thing_inside }
+ = do { env <- getLclEnv
+ ; let { tv_env' = addListToUFM (if_tv_env env) pairs
+ ; pairs = [(occNameFS (getOccName tv), tv) | tv <- tyvars] }
+ ; setLclEnv (env { if_tv_env = tv_env' }) thing_inside }
\end{code}
%************************************************************************
-%* *
- Getting from RdrNames to Names
-%* *
+%* *
+ Getting from RdrNames to Names
+%* *
%************************************************************************
\begin{code}
lookupIfaceTop :: OccName -> IfL Name
-- Look up a top-level name from the current Iface module
lookupIfaceTop occ
- = do { env <- getLclEnv; lookupOrig (if_mod env) occ }
+ = do { env <- getLclEnv; lookupOrig (if_mod env) occ }
newIfaceName :: OccName -> IfL Name
newIfaceName occ
- = do { uniq <- newUnique
- ; return $! mkInternalName uniq occ noSrcSpan }
+ = do { uniq <- newUnique
+ ; return $! mkInternalName uniq occ noSrcSpan }
newIfaceNames :: [OccName] -> IfL [Name]
newIfaceNames occs
- = do { uniqs <- newUniqueSupply
- ; return [ mkInternalName uniq occ noSrcSpan
- | (occ,uniq) <- occs `zip` uniqsFromSupply uniqs] }
+ = do { uniqs <- newUniqueSupply
+ ; return [ mkInternalName uniq occ noSrcSpan
+ | (occ,uniq) <- occs `zip` uniqsFromSupply uniqs] }
\end{code}
diff --git a/compiler/simplCore/FloatIn.lhs b/compiler/simplCore/FloatIn.lhs
index a90d59cf77..3527702b84 100644
--- a/compiler/simplCore/FloatIn.lhs
+++ b/compiler/simplCore/FloatIn.lhs
@@ -2,9 +2,9 @@
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
%************************************************************************
-%* *
+%* *
\section[FloatIn]{Floating Inwards pass}
-%* *
+%* *
%************************************************************************
The main purpose of @floatInwards@ is floating into branches of a
@@ -13,12 +13,6 @@ then discover that they aren't needed in the chosen branch.
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module FloatIn ( floatInwards ) where
@@ -26,11 +20,11 @@ module FloatIn ( floatInwards ) where
import CoreSyn
import MkCore
-import CoreUtils ( exprIsDupable, exprIsExpandable, exprType, exprOkForSideEffects )
-import CoreFVs ( CoreExprWithFVs, freeVars, freeVarsOf, idRuleAndUnfoldingVars )
-import Id ( isOneShotBndr, idType )
+import CoreUtils ( exprIsDupable, exprIsExpandable, exprType, exprOkForSideEffects )
+import CoreFVs ( CoreExprWithFVs, freeVars, freeVarsOf, idRuleAndUnfoldingVars )
+import Id ( isOneShotBndr, idType )
import Var
-import Type ( Type, isUnLiftedType, splitFunTy, applyTy )
+import Type ( Type, isUnLiftedType, splitFunTy, applyTy )
import VarSet
import Util
import UniqFM
@@ -53,9 +47,9 @@ floatInwards dflags = map fi_top_bind
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{Mail from Andr\'e [edited]}
-%* *
+%* *
%************************************************************************
{\em Will wrote: What??? I thought the idea was to float as far
@@ -117,9 +111,9 @@ still left as a let, if the branch is not taken (or b is not entered)
the closure for a is not built.
%************************************************************************
-%* *
+%* *
\subsection{Main floating-inwards code}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -127,12 +121,12 @@ type FreeVarSet = IdSet
type BoundVarSet = IdSet
data FloatInBind = FB BoundVarSet FreeVarSet FloatBind
- -- The FreeVarSet is the free variables of the binding. In the case
- -- of recursive bindings, the set doesn't include the bound
- -- variables.
+ -- The FreeVarSet is the free variables of the binding. In the case
+ -- of recursive bindings, the set doesn't include the bound
+ -- variables.
type FloatInBinds = [FloatInBind]
- -- In reverse dependency order (innermost binder first)
+ -- In reverse dependency order (innermost binder first)
fiExpr :: DynFlags
-> FloatInBinds -- Binds we're trying to drop
@@ -194,7 +188,7 @@ unlifted function arguments to be ok-for-speculation.
Note [Floating in past a lambda group]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-* We must be careful about floating inside inside a value lambda.
+* We must be careful about floating inside inside a value lambda.
That risks losing laziness.
The float-out pass might rescue us, but then again it might not.
@@ -202,41 +196,41 @@ Note [Floating in past a lambda group]
there is no risk of duplicating work thereby, but we do need to be
careful. In particular, here is a bad case (it happened in the
cichelli benchmark:
- let v = ...
- in let f = /\t -> \a -> ...
- ==>
- let f = /\t -> let v = ... in \a -> ...
+ let v = ...
+ in let f = /\t -> \a -> ...
+ ==>
+ let f = /\t -> let v = ... in \a -> ...
This is bad as now f is an updatable closure (update PAP)
and has arity 0.
-* Hack alert! We only float in through one-shot lambdas,
- not (as you might guess) through lone big lambdas.
+* Hack alert! We only float in through one-shot lambdas,
+ not (as you might guess) through lone big lambdas.
Reason: we float *out* past big lambdas (see the test in the Lam
case of FloatOut.floatExpr) and we don't want to float straight
back in again.
-
+
It *is* important to float into one-shot lambdas, however;
see the remarks with noFloatIntoRhs.
So we treat lambda in groups, using the following rule:
- Float in if (a) there is at least one Id,
+ Float in if (a) there is at least one Id,
and (b) there are no non-one-shot Ids
Otherwise drop all the bindings outside the group.
This is what the 'go' function in the AnnLam case is doing.
-Urk! if all are tyvars, and we don't float in, we may miss an
+Urk! if all are tyvars, and we don't float in, we may miss an
opportunity to float inside a nested case branch
\begin{code}
fiExpr dflags to_drop lam@(_, AnnLam _ _)
- | okToFloatInside bndrs -- Float in
+ | okToFloatInside bndrs -- Float in
-- NB: Must line up with noFloatIntoRhs (AnnLam...); see Trac #7088
= mkLams bndrs (fiExpr dflags to_drop body)
- | otherwise -- Dump it all here
+ | otherwise -- Dump it all here
= wrapFloats to_drop (mkLams bndrs (fiExpr dflags [] body))
where
@@ -244,9 +238,9 @@ fiExpr dflags to_drop lam@(_, AnnLam _ _)
\end{code}
We don't float lets inwards past an SCC.
- ToDo: keep info on current cc, and when passing
- one, if it is not the same, annotate all lets in binds with current
- cc, change current cc to the new one and float binds into expr.
+ ToDo: keep info on current cc, and when passing
+ one, if it is not the same, annotate all lets in binds with current
+ cc, change current cc to the new one and float binds into expr.
\begin{code}
fiExpr dflags to_drop (_, AnnTick tickish expr)
@@ -282,16 +276,16 @@ course.
Note [extra_fvs (1): avoid floating into RHS]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Consider let x=\y....t... in body. We do not necessarily want to float
+Consider let x=\y....t... in body. We do not necessarily want to float
a binding for t into the RHS, because it'll immediately be floated out
again. (It won't go inside the lambda else we risk losing work.)
In letrec, we need to be more careful still. We don't want to transform
- let x# = y# +# 1#
- in
- letrec f = \z. ...x#...f...
- in ...
+ let x# = y# +# 1#
+ in
+ letrec f = \z. ...x#...f...
+ in ...
into
- letrec f = let x# = y# +# 1# in \z. ...x#...f... in ...
+ letrec f = let x# = y# +# 1# in \z. ...x#...f... in ...
because now we can't float the let out again, because a letrec
can't have unboxed bindings.
@@ -315,62 +309,62 @@ fiExpr dflags to_drop (_,AnnLet (AnnNonRec id rhs@(rhs_fvs, ann_rhs)) body)
body_fvs = freeVarsOf body `delVarSet` id
rhs_ty = idType id
- rule_fvs = idRuleAndUnfoldingVars id -- See Note [extra_fvs (2): free variables of rules]
+ rule_fvs = idRuleAndUnfoldingVars id -- See Note [extra_fvs (2): free variables of rules]
extra_fvs | noFloatIntoRhs ann_rhs rhs_ty = rule_fvs `unionVarSet` rhs_fvs
- | otherwise = rule_fvs
- -- See Note [extra_fvs (1): avoid floating into RHS]
- -- No point in floating in only to float straight out again
- -- Ditto ok-for-speculation unlifted RHSs
+ | otherwise = rule_fvs
+ -- See Note [extra_fvs (1): avoid floating into RHS]
+ -- No point in floating in only to float straight out again
+ -- Ditto ok-for-speculation unlifted RHSs
- [shared_binds, extra_binds, rhs_binds, body_binds]
- = sepBindsByDropPoint dflags False [extra_fvs, rhs_fvs, body_fvs] to_drop
+ [shared_binds, extra_binds, rhs_binds, body_binds]
+ = sepBindsByDropPoint dflags False [extra_fvs, rhs_fvs, body_fvs] to_drop
- new_to_drop = body_binds ++ -- the bindings used only in the body
- [FB (unitVarSet id) rhs_fvs'
- (FloatLet (NonRec id rhs'))] ++ -- the new binding itself
- extra_binds ++ -- bindings from extra_fvs
- shared_binds -- the bindings used both in rhs and body
+ new_to_drop = body_binds ++ -- the bindings used only in the body
+ [FB (unitVarSet id) rhs_fvs'
+ (FloatLet (NonRec id rhs'))] ++ -- the new binding itself
+ extra_binds ++ -- bindings from extra_fvs
+ shared_binds -- the bindings used both in rhs and body
- -- Push rhs_binds into the right hand side of the binding
+ -- Push rhs_binds into the right hand side of the binding
rhs' = fiExpr dflags rhs_binds rhs
rhs_fvs' = rhs_fvs `unionVarSet` floatedBindsFVs rhs_binds `unionVarSet` rule_fvs
- -- Don't forget the rule_fvs; the binding mentions them!
+ -- Don't forget the rule_fvs; the binding mentions them!
fiExpr dflags to_drop (_,AnnLet (AnnRec bindings) body)
= fiExpr dflags new_to_drop body
where
(ids, rhss) = unzip bindings
rhss_fvs = map freeVarsOf rhss
- body_fvs = freeVarsOf body
+ body_fvs = freeVarsOf body
- -- See Note [extra_fvs (1,2)]
+ -- See Note [extra_fvs (1,2)]
rule_fvs = mapUnionVarSet idRuleAndUnfoldingVars ids
- extra_fvs = rule_fvs `unionVarSet`
- unionVarSets [ fvs | (fvs, rhs) <- rhss
- , noFloatIntoExpr rhs ]
+ extra_fvs = rule_fvs `unionVarSet`
+ unionVarSets [ fvs | (fvs, rhs) <- rhss
+ , noFloatIntoExpr rhs ]
- (shared_binds:extra_binds:body_binds:rhss_binds)
- = sepBindsByDropPoint dflags False (extra_fvs:body_fvs:rhss_fvs) to_drop
+ (shared_binds:extra_binds:body_binds:rhss_binds)
+ = sepBindsByDropPoint dflags False (extra_fvs:body_fvs:rhss_fvs) to_drop
- new_to_drop = body_binds ++ -- the bindings used only in the body
- [FB (mkVarSet ids) rhs_fvs'
+ new_to_drop = body_binds ++ -- the bindings used only in the body
+ [FB (mkVarSet ids) rhs_fvs'
(FloatLet (Rec (fi_bind rhss_binds bindings)))] ++
- -- The new binding itself
- extra_binds ++ -- Note [extra_fvs (1,2)]
- shared_binds -- Used in more than one place
+ -- The new binding itself
+ extra_binds ++ -- Note [extra_fvs (1,2)]
+ shared_binds -- Used in more than one place
rhs_fvs' = unionVarSets rhss_fvs `unionVarSet`
- unionVarSets (map floatedBindsFVs rhss_binds) `unionVarSet`
- rule_fvs -- Don't forget the rule variables!
+ unionVarSets (map floatedBindsFVs rhss_binds) `unionVarSet`
+ rule_fvs -- Don't forget the rule variables!
-- Push rhs_binds into the right hand side of the binding
- fi_bind :: [FloatInBinds] -- one per "drop pt" conjured w/ fvs_of_rhss
- -> [(Id, CoreExprWithFVs)]
- -> [(Id, CoreExpr)]
+ fi_bind :: [FloatInBinds] -- one per "drop pt" conjured w/ fvs_of_rhss
+ -> [(Id, CoreExprWithFVs)]
+ -> [(Id, CoreExpr)]
fi_bind to_drops pairs
- = [ (binder, fiExpr dflags to_drop rhs)
- | ((binder, rhs), to_drop) <- zipEqual "fi_bind" pairs to_drops ]
+ = [ (binder, fiExpr dflags to_drop rhs)
+ | ((binder, rhs), to_drop) <- zipEqual "fi_bind" pairs to_drops ]
\end{code}
For @Case@, the possible ``drop points'' for the \tr{to_drop}
@@ -393,7 +387,7 @@ fiExpr dflags to_drop (_, AnnCase scrut case_bndr _ [(con,alt_bndrs,rhs)])
= wrapFloats shared_binds $
fiExpr dflags (case_float : rhs_binds) rhs
where
- case_float = FB (mkVarSet (case_bndr : alt_bndrs)) scrut_fvs
+ case_float = FB (mkVarSet (case_bndr : alt_bndrs)) scrut_fvs
(FloatCase scrut' case_bndr con alt_bndrs)
scrut' = fiExpr dflags scrut_binds scrut
[shared_binds, scrut_binds, rhs_binds]
@@ -405,21 +399,21 @@ fiExpr dflags to_drop (_, AnnCase scrut case_bndr ty alts)
= wrapFloats drop_here1 $
wrapFloats drop_here2 $
Case (fiExpr dflags scrut_drops scrut) case_bndr ty
- (zipWith fi_alt alts_drops_s alts)
+ (zipWith fi_alt alts_drops_s alts)
where
- -- Float into the scrut and alts-considered-together just like App
- [drop_here1, scrut_drops, alts_drops]
+ -- Float into the scrut and alts-considered-together just like App
+ [drop_here1, scrut_drops, alts_drops]
= sepBindsByDropPoint dflags False [scrut_fvs, all_alts_fvs] to_drop
- -- Float into the alts with the is_case flag set
+ -- Float into the alts with the is_case flag set
(drop_here2 : alts_drops_s) = sepBindsByDropPoint dflags True alts_fvs alts_drops
scrut_fvs = freeVarsOf scrut
alts_fvs = map alt_fvs alts
all_alts_fvs = unionVarSets alts_fvs
alt_fvs (_con, args, rhs) = foldl delVarSet (freeVarsOf rhs) (case_bndr:args)
- -- Delete case_bndr and args from free vars of rhs
- -- to get free vars of alt
+ -- Delete case_bndr and args from free vars of rhs
+ -- to get free vars of alt
fi_alt to_drop (con, args, rhs) = (con, args, fiExpr dflags to_drop rhs)
@@ -442,14 +436,14 @@ noFloatIntoExpr (AnnLam bndr e)
-- NB: Must line up with fiExpr (AnnLam...); see Trac #7088
where
(bndrs, _) = collectAnnBndrs e
- -- IMPORTANT: don't say 'True' for a RHS with a one-shot lambda at the top.
- -- This makes a big difference for things like
- -- f x# = let x = I# x#
- -- in let j = \() -> ...x...
- -- in if <condition> then normal-path else j ()
- -- If x is used only in the error case join point, j, we must float the
- -- boxing constructor into it, else we box it every time which is very bad
- -- news indeed.
+ -- IMPORTANT: don't say 'True' for a RHS with a one-shot lambda at the top.
+ -- This makes a big difference for things like
+ -- f x# = let x = I# x#
+ -- in let j = \() -> ...x...
+ -- in if <condition> then normal-path else j ()
+ -- If x is used only in the error case join point, j, we must float the
+ -- boxing constructor into it, else we box it every time which is very bad
+ -- news indeed.
noFloatIntoExpr rhs = exprIsExpandable (deAnnotate' rhs)
-- We'd just float right back out again...
@@ -458,9 +452,9 @@ noFloatIntoExpr rhs = exprIsExpandable (deAnnotate' rhs)
%************************************************************************
-%* *
+%* *
\subsection{@sepBindsByDropPoint@}
-%* *
+%* *
%************************************************************************
This is the crucial function. The idea is: We have a wad of bindings
@@ -482,11 +476,11 @@ We have to maintain the order on these drop-point-related lists.
sepBindsByDropPoint
:: DynFlags
-> Bool -- True <=> is case expression
- -> [FreeVarSet] -- One set of FVs per drop point
- -> FloatInBinds -- Candidate floaters
+ -> [FreeVarSet] -- One set of FVs per drop point
+ -> FloatInBinds -- Candidate floaters
-> [FloatInBinds] -- FIRST one is bindings which must not be floated
- -- inside any drop point; the rest correspond
- -- one-to-one with the input list of FV sets
+ -- inside any drop point; the rest correspond
+ -- one-to-one with the input list of FV sets
-- Every input floater is returned somewhere in the result;
-- none are dropped, not even ones which don't seem to be
@@ -497,56 +491,56 @@ sepBindsByDropPoint
type DropBox = (FreeVarSet, FloatInBinds)
sepBindsByDropPoint _ _is_case drop_pts []
- = [] : [[] | _ <- drop_pts] -- cut to the chase scene; it happens
+ = [] : [[] | _ <- drop_pts] -- cut to the chase scene; it happens
sepBindsByDropPoint dflags is_case drop_pts floaters
= go floaters (map (\fvs -> (fvs, [])) (emptyVarSet : drop_pts))
where
go :: FloatInBinds -> [DropBox] -> [FloatInBinds]
- -- The *first* one in the argument list is the drop_here set
- -- The FloatInBinds in the lists are in the reverse of
- -- the normal FloatInBinds order; that is, they are the right way round!
+ -- The *first* one in the argument list is the drop_here set
+ -- The FloatInBinds in the lists are in the reverse of
+ -- the normal FloatInBinds order; that is, they are the right way round!
go [] drop_boxes = map (reverse . snd) drop_boxes
go (bind_w_fvs@(FB bndrs bind_fvs bind) : binds) drop_boxes@(here_box : fork_boxes)
- = go binds new_boxes
- where
- -- "here" means the group of bindings dropped at the top of the fork
+ = go binds new_boxes
+ where
+ -- "here" means the group of bindings dropped at the top of the fork
- (used_here : used_in_flags) = [ fvs `intersectsVarSet` bndrs
- | (fvs, _) <- drop_boxes]
+ (used_here : used_in_flags) = [ fvs `intersectsVarSet` bndrs
+ | (fvs, _) <- drop_boxes]
- drop_here = used_here || not can_push
+ drop_here = used_here || not can_push
- -- For case expressions we duplicate the binding if it is
- -- reasonably small, and if it is not used in all the RHSs
- -- This is good for situations like
- -- let x = I# y in
- -- case e of
- -- C -> error x
- -- D -> error x
- -- E -> ...not mentioning x...
+ -- For case expressions we duplicate the binding if it is
+ -- reasonably small, and if it is not used in all the RHSs
+ -- This is good for situations like
+ -- let x = I# y in
+ -- case e of
+ -- C -> error x
+ -- D -> error x
+ -- E -> ...not mentioning x...
- n_alts = length used_in_flags
- n_used_alts = count id used_in_flags -- returns number of Trues in list.
+ n_alts = length used_in_flags
+ n_used_alts = count id used_in_flags -- returns number of Trues in list.
- can_push = n_used_alts == 1 -- Used in just one branch
- || (is_case && -- We are looking at case alternatives
- n_used_alts > 1 && -- It's used in more than one
- n_used_alts < n_alts && -- ...but not all
- floatIsDupable dflags bind) -- and we can duplicate the binding
+ can_push = n_used_alts == 1 -- Used in just one branch
+ || (is_case && -- We are looking at case alternatives
+ n_used_alts > 1 && -- It's used in more than one
+ n_used_alts < n_alts && -- ...but not all
+ floatIsDupable dflags bind) -- and we can duplicate the binding
- new_boxes | drop_here = (insert here_box : fork_boxes)
- | otherwise = (here_box : new_fork_boxes)
+ new_boxes | drop_here = (insert here_box : fork_boxes)
+ | otherwise = (here_box : new_fork_boxes)
- new_fork_boxes = zipWithEqual "FloatIn.sepBinds" insert_maybe fork_boxes used_in_flags
+ new_fork_boxes = zipWithEqual "FloatIn.sepBinds" insert_maybe fork_boxes used_in_flags
- insert :: DropBox -> DropBox
- insert (fvs,drops) = (fvs `unionVarSet` bind_fvs, bind_w_fvs:drops)
+ insert :: DropBox -> DropBox
+ insert (fvs,drops) = (fvs `unionVarSet` bind_fvs, bind_w_fvs:drops)
- insert_maybe box True = insert box
- insert_maybe box False = box
+ insert_maybe box True = insert box
+ insert_maybe box False = box
go _ _ = panic "sepBindsByDropPoint/go"
diff --git a/compiler/simplCore/FloatOut.lhs b/compiler/simplCore/FloatOut.lhs
index 37d6dc8568..55ed111a70 100644
--- a/compiler/simplCore/FloatOut.lhs
+++ b/compiler/simplCore/FloatOut.lhs
@@ -8,25 +8,19 @@
\begin{code}
{-# LANGUAGE CPP #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module FloatOut ( floatOutwards ) where
import CoreSyn
import CoreUtils
import MkCore
-import CoreArity ( etaExpand )
-import CoreMonad ( FloatOutSwitches(..) )
+import CoreArity ( etaExpand )
+import CoreMonad ( FloatOutSwitches(..) )
import DynFlags
-import ErrUtils ( dumpIfSet_dyn )
-import Id ( Id, idArity, isBottomingId )
-import Var ( Var )
+import ErrUtils ( dumpIfSet_dyn )
+import Id ( Id, idArity, isBottomingId )
+import Var ( Var )
import SetLevels
import UniqSupply ( UniqSupply )
import Bag
@@ -39,33 +33,33 @@ import qualified Data.IntMap as M
#include "HsVersions.h"
\end{code}
- -----------------
- Overall game plan
- -----------------
+ -----------------
+ Overall game plan
+ -----------------
The Big Main Idea is:
- To float out sub-expressions that can thereby get outside
- a non-one-shot value lambda, and hence may be shared.
+ To float out sub-expressions that can thereby get outside
+ a non-one-shot value lambda, and hence may be shared.
To achieve this we may need to do two thing:
a) Let-bind the sub-expression:
- f (g x) ==> let lvl = f (g x) in lvl
+ f (g x) ==> let lvl = f (g x) in lvl
- Now we can float the binding for 'lvl'.
+ Now we can float the binding for 'lvl'.
b) More than that, we may need to abstract wrt a type variable
- \x -> ... /\a -> let v = ...a... in ....
+ \x -> ... /\a -> let v = ...a... in ....
Here the binding for v mentions 'a' but not 'x'. So we
abstract wrt 'a', to give this binding for 'v':
- vp = /\a -> ...a...
- v = vp a
+ vp = /\a -> ...a...
+ v = vp a
Now the binding for vp can float out unimpeded.
I can't remember why this case seemed important enough to
@@ -86,9 +80,9 @@ At the moment we never float a binding out to between two adjacent
lambdas. For example:
@
- \x y -> let t = x+x in ...
+ \x y -> let t = x+x in ...
===>
- \x -> let t = x+x in \y -> ...
+ \x -> let t = x+x in \y -> ...
@
Reason: this is less efficient in the case where the original lambda
is never partially applied.
@@ -98,49 +92,49 @@ But there's a case I've seen where this might not be true. Consider:
elEm2 x ys
= elem' x ys
where
- elem' _ [] = False
- elem' x (y:ys) = x==y || elem' x ys
+ elem' _ [] = False
+ elem' x (y:ys) = x==y || elem' x ys
@
It turns out that this generates a subexpression of the form
@
- \deq x ys -> let eq = eqFromEqDict deq in ...
+ \deq x ys -> let eq = eqFromEqDict deq in ...
@
vwhich might usefully be separated to
@
- \deq -> let eq = eqFromEqDict deq in \xy -> ...
+ \deq -> let eq = eqFromEqDict deq in \xy -> ...
@
Well, maybe. We don't do this at the moment.
%************************************************************************
-%* *
+%* *
\subsection[floatOutwards]{@floatOutwards@: let-floating interface function}
-%* *
+%* *
%************************************************************************
\begin{code}
floatOutwards :: FloatOutSwitches
- -> DynFlags
- -> UniqSupply
- -> CoreProgram -> IO CoreProgram
+ -> DynFlags
+ -> UniqSupply
+ -> CoreProgram -> IO CoreProgram
floatOutwards float_sws dflags us pgm
= do {
- let { annotated_w_levels = setLevels float_sws pgm us ;
- (fss, binds_s') = unzip (map floatTopBind annotated_w_levels)
- } ;
+ let { annotated_w_levels = setLevels float_sws pgm us ;
+ (fss, binds_s') = unzip (map floatTopBind annotated_w_levels)
+ } ;
- dumpIfSet_dyn dflags Opt_D_verbose_core2core "Levels added:"
- (vcat (map ppr annotated_w_levels));
+ dumpIfSet_dyn dflags Opt_D_verbose_core2core "Levels added:"
+ (vcat (map ppr annotated_w_levels));
- let { (tlets, ntlets, lams) = get_stats (sum_stats fss) };
+ let { (tlets, ntlets, lams) = get_stats (sum_stats fss) };
- dumpIfSet_dyn dflags Opt_D_dump_simpl_stats "FloatOut stats:"
- (hcat [ int tlets, ptext (sLit " Lets floated to top level; "),
- int ntlets, ptext (sLit " Lets floated elsewhere; from "),
- int lams, ptext (sLit " Lambda groups")]);
+ dumpIfSet_dyn dflags Opt_D_dump_simpl_stats "FloatOut stats:"
+ (hcat [ int tlets, ptext (sLit " Lets floated to top level; "),
+ int ntlets, ptext (sLit " Lets floated elsewhere; from "),
+ int lams, ptext (sLit " Lambda groups")]);
- return (bagToList (unionManyBags binds_s'))
+ return (bagToList (unionManyBags binds_s'))
}
floatTopBind :: LevelledBind -> (FloatStats, Bag CoreBind)
@@ -153,9 +147,9 @@ floatTopBind bind
\end{code}
%************************************************************************
-%* *
+%* *
\subsection[FloatOut-Bind]{Floating in a binding (the business end)}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -163,10 +157,10 @@ floatBind :: LevelledBind -> (FloatStats, FloatBinds, CoreBind)
floatBind (NonRec (TB var _) rhs)
= case (floatExpr rhs) of { (fs, rhs_floats, rhs') ->
- -- A tiresome hack:
- -- see Note [Bottoming floats: eta expansion] in SetLevels
+ -- A tiresome hack:
+ -- see Note [Bottoming floats: eta expansion] in SetLevels
let rhs'' | isBottomingId var = etaExpand (idArity var) rhs'
- | otherwise = rhs'
+ | otherwise = rhs'
in (fs, rhs_floats, NonRec var rhs'') }
@@ -178,7 +172,7 @@ floatBind (Rec pairs)
| isTopLvl dest_lvl -- See Note [floatBind for top level]
= case (floatExpr rhs) of { (fs, rhs_floats, rhs') ->
(fs, emptyFloats, addTopFloatPairs (flattenTopFloats rhs_floats) [(name, rhs')])}
- | otherwise -- Note [Floating out of Rec rhss]
+ | otherwise -- Note [Floating out of Rec rhss]
= case (floatExpr rhs) of { (fs, rhs_floats, rhs') ->
case (partitionByLevel dest_lvl rhs_floats) of { (rhs_floats', heres) ->
case (splitRecFloats heres) of { (pairs, case_heres) ->
@@ -208,9 +202,9 @@ installUnderLambdas floats e
---------------
floatList :: (a -> (FloatStats, FloatBinds, b)) -> [a] -> (FloatStats, FloatBinds, [b])
floatList _ [] = (zeroStats, emptyFloats, [])
-floatList f (a:as) = case f a of { (fs_a, binds_a, b) ->
- case floatList f as of { (fs_as, binds_as, bs) ->
- (fs_a `add_stats` fs_as, binds_a `plusFloats` binds_as, b:bs) }}
+floatList f (a:as) = case f a of { (fs_a, binds_a, b) ->
+ case floatList f as of { (fs_as, binds_as, bs) ->
+ (fs_a `add_stats` fs_as, binds_a `plusFloats` binds_as, b:bs) }}
\end{code}
Note [Floating out of Rec rhss]
@@ -219,19 +213,19 @@ Consider Rec { f<1,0> = \xy. body }
From the body we may get some floats. The ones with level <1,0> must
stay here, since they may mention f. Ideally we'd like to make them
part of the Rec block pairs -- but we can't if there are any
-FloatCases involved.
+FloatCases involved.
Nor is it a good idea to dump them in the rhs, but outside the lambda
f = case x of I# y -> \xy. body
because now f's arity might get worse, which is Not Good. (And if
-there's an SCC around the RHS it might not get better again.
+there's an SCC around the RHS it might not get better again.
See Trac #5342.)
-So, gruesomely, we split the floats into
- * the outer FloatLets, which can join the Rec, and
+So, gruesomely, we split the floats into
+ * the outer FloatLets, which can join the Rec, and
* an inner batch starting in a FloatCase, which are then
- pushed *inside* the lambdas.
-This loses full-laziness the rare situation where there is a
+ pushed *inside* the lambdas.
+This loses full-laziness the rare situation where there is a
FloatCase and a Rec interacting.
Note [floatBind for top level]
@@ -239,7 +233,7 @@ Note [floatBind for top level]
We may have a *nested* binding whose destination level is (FloatMe tOP_LEVEL), thus
letrec { foo <0,0> = .... (let bar<0,0> = .. in ..) .... }
The binding for bar will be in the "tops" part of the floating binds,
-and thus not partioned by floatBody.
+and thus not partioned by floatBody.
We could perhaps get rid of the 'tops' component of the floating binds,
but this case works just as well.
@@ -248,28 +242,28 @@ but this case works just as well.
%************************************************************************
\subsection[FloatOut-Expr]{Floating in expressions}
-%* *
+%* *
%************************************************************************
\begin{code}
floatBody :: Level
-> LevelledExpr
- -> (FloatStats, FloatBinds, CoreExpr)
+ -> (FloatStats, FloatBinds, CoreExpr)
-floatBody lvl arg -- Used rec rhss, and case-alternative rhss
+floatBody lvl arg -- Used rec rhss, and case-alternative rhss
= case (floatExpr arg) of { (fsa, floats, arg') ->
case (partitionByLevel lvl floats) of { (floats', heres) ->
- -- Dump bindings are bound here
+ -- Dump bindings are bound here
(fsa, floats', install heres arg') }}
-----------------
floatExpr :: LevelledExpr
- -> (FloatStats, FloatBinds, CoreExpr)
+ -> (FloatStats, FloatBinds, CoreExpr)
floatExpr (Var v) = (zeroStats, emptyFloats, Var v)
floatExpr (Type ty) = (zeroStats, emptyFloats, Type ty)
floatExpr (Coercion co) = (zeroStats, emptyFloats, Coercion co)
floatExpr (Lit lit) = (zeroStats, emptyFloats, Lit lit)
-
+
floatExpr (App e a)
= case (floatExpr e) of { (fse, floats_e, e') ->
case (floatExpr a) of { (fsa, floats_a, a') ->
@@ -277,10 +271,10 @@ floatExpr (App e a)
floatExpr lam@(Lam (TB _ lam_spec) _)
= let (bndrs_w_lvls, body) = collectBinders lam
- bndrs = [b | TB b _ <- bndrs_w_lvls]
+ bndrs = [b | TB b _ <- bndrs_w_lvls]
bndr_lvl = floatSpecLevel lam_spec
- -- All the binders have the same level
- -- See SetLevels.lvlLamBndrs
+ -- All the binders have the same level
+ -- See SetLevels.lvlLamBndrs
in
case (floatBody bndr_lvl body) of { (fs, floats, body') ->
(add_to_stats fs floats, floats, mkLams bndrs body') }
@@ -289,8 +283,8 @@ floatExpr (Tick tickish expr)
| tickishScoped tickish
= case (floatExpr expr) of { (fs, floating_defns, expr') ->
let
- -- Annotate bindings floated outwards past an scc expression
- -- with the cc. We mark that cc as "duplicated", though.
+ -- Annotate bindings floated outwards past an scc expression
+ -- with the cc. We mark that cc as "duplicated", though.
annotated_defns = wrapTick (mkNoCount tickish) floating_defns
in
(fs, annotated_defns, Tick tickish expr') }
@@ -305,62 +299,62 @@ floatExpr (Cast expr co)
floatExpr (Let bind body)
= case bind_spec of
- FloatMe dest_lvl
+ FloatMe dest_lvl
-> case (floatBind bind) of { (fsb, bind_floats, bind') ->
- case (floatExpr body) of { (fse, body_floats, body') ->
- ( add_stats fsb fse
- , bind_floats `plusFloats` unitLetFloat dest_lvl bind'
+ case (floatExpr body) of { (fse, body_floats, body') ->
+ ( add_stats fsb fse
+ , bind_floats `plusFloats` unitLetFloat dest_lvl bind'
`plusFloats` body_floats
- , body') }}
+ , body') }}
StayPut bind_lvl -- See Note [Avoiding unnecessary floating]
-> case (floatBind bind) of { (fsb, bind_floats, bind') ->
- case (floatBody bind_lvl body) of { (fse, body_floats, body') ->
- ( add_stats fsb fse
- , bind_floats `plusFloats` body_floats
- , Let bind' body') }}
+ case (floatBody bind_lvl body) of { (fse, body_floats, body') ->
+ ( add_stats fsb fse
+ , bind_floats `plusFloats` body_floats
+ , Let bind' body') }}
where
- bind_spec = case bind of
- NonRec (TB _ s) _ -> s
- Rec ((TB _ s, _) : _) -> s
+ bind_spec = case bind of
+ NonRec (TB _ s) _ -> s
+ Rec ((TB _ s, _) : _) -> s
Rec [] -> panic "floatExpr:rec"
floatExpr (Case scrut (TB case_bndr case_spec) ty alts)
= case case_spec of
- FloatMe dest_lvl -- Case expression moves
+ FloatMe dest_lvl -- Case expression moves
| [(con@(DataAlt {}), bndrs, rhs)] <- alts
-> case floatExpr scrut of { (fse, fde, scrut') ->
- case floatExpr rhs of { (fsb, fdb, rhs') ->
- let
- float = unitCaseFloat dest_lvl scrut'
+ case floatExpr rhs of { (fsb, fdb, rhs') ->
+ let
+ float = unitCaseFloat dest_lvl scrut'
case_bndr con [b | TB b _ <- bndrs]
- in
- (add_stats fse fsb, fde `plusFloats` float `plusFloats` fdb, rhs') }}
+ in
+ (add_stats fse fsb, fde `plusFloats` float `plusFloats` fdb, rhs') }}
| otherwise
-> pprPanic "Floating multi-case" (ppr alts)
StayPut bind_lvl -- Case expression stays put
- -> case floatExpr scrut of { (fse, fde, scrut') ->
- case floatList (float_alt bind_lvl) alts of { (fsa, fda, alts') ->
- (add_stats fse fsa, fda `plusFloats` fde, Case scrut' case_bndr ty alts')
- }}
+ -> case floatExpr scrut of { (fse, fde, scrut') ->
+ case floatList (float_alt bind_lvl) alts of { (fsa, fda, alts') ->
+ (add_stats fse fsa, fda `plusFloats` fde, Case scrut' case_bndr ty alts')
+ }}
where
float_alt bind_lvl (con, bs, rhs)
- = case (floatBody bind_lvl rhs) of { (fs, rhs_floats, rhs') ->
- (fs, rhs_floats, (con, [b | TB b _ <- bs], rhs')) }
+ = case (floatBody bind_lvl rhs) of { (fs, rhs_floats, rhs') ->
+ (fs, rhs_floats, (con, [b | TB b _ <- bs], rhs')) }
\end{code}
Note [Avoiding unnecessary floating]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In general we want to avoid floating a let unnecessarily, because
it might worsen strictness:
- let
+ let
x = ...(let y = e in y+y)....
Here y is demanded. If we float it outside the lazy 'x=..' then
we'd have to zap its demand info, and it may never be restored.
So at a 'let' we leave the binding right where the are unless
-the binding will escape a value lambda, e.g.
+the binding will escape a value lambda, e.g.
(\x -> let y = fac 100 in y)
@@ -374,25 +368,25 @@ We used instead to do the partitionByMajorLevel on the RHS of an '=',
in floatRhs. But that was quite tiresome. We needed to test for
values or trival rhss, because (in particular) we don't want to insert
new bindings between the "=" and the "\". E.g.
- f = \x -> let <bind> in <body>
+ f = \x -> let <bind> in <body>
We do not want
- f = let <bind> in \x -> <body>
+ f = let <bind> in \x -> <body>
(a) The simplifier will immediately float it further out, so we may
- as well do so right now; in general, keeping rhss as manifest
- values is good
+ as well do so right now; in general, keeping rhss as manifest
+ values is good
(b) If a float-in pass follows immediately, it might add yet more
- bindings just after the '='. And some of them might (correctly)
- be strict even though the 'let f' is lazy, because f, being a value,
- gets its demand-info zapped by the simplifier.
+ bindings just after the '='. And some of them might (correctly)
+ be strict even though the 'let f' is lazy, because f, being a value,
+ gets its demand-info zapped by the simplifier.
And even all that turned out to be very fragile, and broke
altogether when profiling got in the way.
So now we do the partition right at the (Let..) itself.
%************************************************************************
-%* *
+%* *
\subsection{Utility bits for floating stats}
-%* *
+%* *
%************************************************************************
I didn't implement this with unboxed numbers. I don't want to be too
@@ -400,9 +394,9 @@ strict in this stuff, as it is rarely turned on. (WDP 95/09)
\begin{code}
data FloatStats
- = FlS Int -- Number of top-floats * lambda groups they've been past
- Int -- Number of non-top-floats * lambda groups they've been past
- Int -- Number of lambda (groups) seen
+ = FlS Int -- Number of top-floats * lambda groups they've been past
+ Int -- Number of non-top-floats * lambda groups they've been past
+ Int -- Number of lambda (groups) seen
get_stats :: FloatStats -> (Int, Int, Int)
get_stats (FlS a b c) = (a, b, c)
@@ -424,9 +418,9 @@ add_to_stats (FlS a b c) (FB tops others)
%************************************************************************
-%* *
+%* *
\subsection{Utility bits for floating}
-%* *
+%* *
%************************************************************************
Note [Representation of FloatBinds]
@@ -435,10 +429,10 @@ The FloatBinds types is somewhat important. We can get very large numbers
of floating bindings, often all destined for the top level. A typical example
is x = [4,2,5,2,5, .... ]
Then we get lots of small expressions like (fromInteger 4), which all get
-lifted to top level.
+lifted to top level.
-The trouble is that
- (a) we partition these floating bindings *at every binding site*
+The trouble is that
+ (a) we partition these floating bindings *at every binding site*
(b) SetLevels introduces a new bindings site for every float
So we had better not look at each binding at each binding site!
@@ -450,24 +444,24 @@ partitionByMajorLevel.
\begin{code}
-type FloatLet = CoreBind -- INVARIANT: a FloatLet is always lifted
-type MajorEnv = M.IntMap MinorEnv -- Keyed by major level
+type FloatLet = CoreBind -- INVARIANT: a FloatLet is always lifted
+type MajorEnv = M.IntMap MinorEnv -- Keyed by major level
type MinorEnv = M.IntMap (Bag FloatBind) -- Keyed by minor level
-data FloatBinds = FB !(Bag FloatLet) -- Destined for top level
- !MajorEnv -- Levels other than top
+data FloatBinds = FB !(Bag FloatLet) -- Destined for top level
+ !MajorEnv -- Levels other than top
-- See Note [Representation of FloatBinds]
instance Outputable FloatBinds where
- ppr (FB fbs defs)
+ ppr (FB fbs defs)
= ptext (sLit "FB") <+> (braces $ vcat
[ ptext (sLit "tops =") <+> ppr fbs
, ptext (sLit "non-tops =") <+> ppr defs ])
flattenTopFloats :: FloatBinds -> Bag CoreBind
-flattenTopFloats (FB tops defs)
+flattenTopFloats (FB tops defs)
= ASSERT2( isEmptyBag (flattenMajor defs), ppr defs )
- tops
+ tops
addTopFloatPairs :: Bag CoreBind -> [(Id,CoreExpr)] -> [(Id,CoreExpr)]
addTopFloatPairs float_bag prs
@@ -486,18 +480,18 @@ emptyFloats :: FloatBinds
emptyFloats = FB emptyBag M.empty
unitCaseFloat :: Level -> CoreExpr -> Id -> AltCon -> [Var] -> FloatBinds
-unitCaseFloat (Level major minor) e b con bs
+unitCaseFloat (Level major minor) e b con bs
= FB emptyBag (M.singleton major (M.singleton minor (unitBag (FloatCase e b con bs))))
unitLetFloat :: Level -> FloatLet -> FloatBinds
-unitLetFloat lvl@(Level major minor) b
+unitLetFloat lvl@(Level major minor) b
| isTopLvl lvl = FB (unitBag b) M.empty
| otherwise = FB emptyBag (M.singleton major (M.singleton minor floats))
where
floats = unitBag (FloatLet b)
plusFloats :: FloatBinds -> FloatBinds -> FloatBinds
-plusFloats (FB t1 l1) (FB t2 l2)
+plusFloats (FB t1 l1) (FB t2 l2)
= FB (t1 `unionBags` t2) (l1 `plusMajor` l2)
plusMajor :: MajorEnv -> MajorEnv -> MajorEnv
@@ -511,20 +505,20 @@ install defn_groups expr
= foldrBag wrapFloat expr defn_groups
partitionByLevel
- :: Level -- Partitioning level
- -> FloatBinds -- Defns to be divided into 2 piles...
- -> (FloatBinds, -- Defns with level strictly < partition level,
- Bag FloatBind) -- The rest
+ :: Level -- Partitioning level
+ -> FloatBinds -- Defns to be divided into 2 piles...
+ -> (FloatBinds, -- Defns with level strictly < partition level,
+ Bag FloatBind) -- The rest
{-
--- ---- partitionByMajorLevel ----
--- Float it if we escape a value lambda,
+-- ---- partitionByMajorLevel ----
+-- Float it if we escape a value lambda,
-- *or* if we get to the top level
-- *or* if it's a case-float and its minor level is < current
---
--- If we can get to the top level, say "yes" anyway. This means that
--- x = f e
--- transforms to
+--
+-- If we can get to the top level, say "yes" anyway. This means that
+-- x = f e
+-- transforms to
-- lvl = e
-- x = f lvl
-- which is as it should be
@@ -533,14 +527,14 @@ partitionByMajorLevel (Level major _) (FB tops defns)
= (FB tops outer, heres `unionBags` flattenMajor inner)
where
(outer, mb_heres, inner) = M.splitLookup major defns
- heres = case mb_heres of
+ heres = case mb_heres of
Nothing -> emptyBag
Just h -> flattenMinor h
-}
partitionByLevel (Level major minor) (FB tops defns)
= (FB tops (outer_maj `plusMajor` M.singleton major outer_min),
- here_min `unionBags` flattenMinor inner_min
+ here_min `unionBags` flattenMinor inner_min
`unionBags` flattenMajor inner_maj)
where
@@ -554,7 +548,7 @@ wrapTick :: Tickish Id -> FloatBinds -> FloatBinds
wrapTick t (FB tops defns)
= FB (mapBag wrap_bind tops) (M.map (M.map wrap_defns) defns)
where
- wrap_defns = mapBag wrap_one
+ wrap_defns = mapBag wrap_one
wrap_bind (NonRec binder rhs) = NonRec binder (maybe_tick rhs)
wrap_bind (Rec pairs) = Rec (mapSnd maybe_tick pairs)
diff --git a/compiler/typecheck/FamInst.lhs b/compiler/typecheck/FamInst.lhs
index b917491af2..016dc08a20 100644
--- a/compiler/typecheck/FamInst.lhs
+++ b/compiler/typecheck/FamInst.lhs
@@ -2,17 +2,11 @@ The @FamInst@ type: family instance heads
\begin{code}
{-# LANGUAGE CPP, GADTs #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module FamInst (
FamInstEnvs, tcGetFamInstEnvs,
checkFamInstConsistency, tcExtendLocalFamInstEnv,
- tcLookupFamInst,
+ tcLookupFamInst,
tcLookupDataFamInst, tcInstNewTyConTF_maybe, tcInstNewTyCon_maybe,
newFamInst
) where
@@ -46,9 +40,9 @@ import qualified Data.Map as Map
\end{code}
%************************************************************************
-%* *
+%* *
Making a FamInst
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -81,9 +75,9 @@ newFamInst flavor axiom@(CoAxiom { co_ax_branches = FirstBranch branch
%************************************************************************
-%* *
- Optimised overlap checking for family instances
-%* *
+%* *
+ Optimised overlap checking for family instances
+%* *
%************************************************************************
For any two family instance modules that we import directly or indirectly, we
@@ -91,12 +85,12 @@ check whether the instances in the two modules are consistent, *unless* we can
be certain that the instances of the two modules have already been checked for
consistency during the compilation of modules that we import.
-Why do we need to check? Consider
- module X1 where module X2 where
- data T1 data T2
- type instance F T1 b = Int type instance F a T2 = Char
- f1 :: F T1 a -> Int f2 :: Char -> F a T2
- f1 x = x f2 x = x
+Why do we need to check? Consider
+ module X1 where module X2 where
+ data T1 data T2
+ type instance F T1 b = Int type instance F a T2 = Char
+ f1 :: F T1 a -> Int f2 :: Char -> F a T2
+ f1 x = x f2 x = x
Now if we import both X1 and X2 we could make (f2 . f1) :: Int -> Char.
Notice that neither instance is an orphan.
@@ -117,7 +111,7 @@ data ModulePair = ModulePair Module Module
--
canon :: ModulePair -> (Module, Module)
canon (ModulePair m1 m2) | m1 < m2 = (m1, m2)
- | otherwise = (m2, m1)
+ | otherwise = (m2, m1)
instance Eq ModulePair where
mp1 == mp2 = canon mp1 == canon mp2
@@ -141,26 +135,26 @@ checkFamInstConsistency famInstMods directlyImpMods
; (eps, hpt) <- getEpsAndHpt
; let { -- Fetch the iface of a given module. Must succeed as
- -- all directly imported modules must already have been loaded.
- modIface mod =
- case lookupIfaceByModule dflags hpt (eps_PIT eps) mod of
+ -- all directly imported modules must already have been loaded.
+ modIface mod =
+ case lookupIfaceByModule dflags hpt (eps_PIT eps) mod of
Nothing -> panic "FamInst.checkFamInstConsistency"
Just iface -> iface
; hmiModule = mi_module . hm_iface
- ; hmiFamInstEnv = extendFamInstEnvList emptyFamInstEnv
+ ; hmiFamInstEnv = extendFamInstEnvList emptyFamInstEnv
. md_fam_insts . hm_details
- ; hpt_fam_insts = mkModuleEnv [ (hmiModule hmi, hmiFamInstEnv hmi)
- | hmi <- eltsUFM hpt]
- ; groups = map (dep_finsts . mi_deps . modIface)
- directlyImpMods
- ; okPairs = listToSet $ concatMap allPairs groups
- -- instances of okPairs are consistent
- ; criticalPairs = listToSet $ allPairs famInstMods
- -- all pairs that we need to consider
+ ; hpt_fam_insts = mkModuleEnv [ (hmiModule hmi, hmiFamInstEnv hmi)
+ | hmi <- eltsUFM hpt]
+ ; groups = map (dep_finsts . mi_deps . modIface)
+ directlyImpMods
+ ; okPairs = listToSet $ concatMap allPairs groups
+ -- instances of okPairs are consistent
+ ; criticalPairs = listToSet $ allPairs famInstMods
+ -- all pairs that we need to consider
; toCheckPairs = Map.keys $ criticalPairs `Map.difference` okPairs
- -- the difference gives us the pairs we need to check now
- }
+ -- the difference gives us the pairs we need to check now
+ }
; mapM_ (check hpt_fam_insts) toCheckPairs
}
@@ -171,7 +165,7 @@ checkFamInstConsistency famInstMods directlyImpMods
check hpt_fam_insts (ModulePair m1 m2)
= do { env1 <- getFamInsts hpt_fam_insts m1
; env2 <- getFamInsts hpt_fam_insts m2
- ; mapM_ (checkForConflicts (emptyFamInstEnv, env2))
+ ; mapM_ (checkForConflicts (emptyFamInstEnv, env2))
(famInstEnvElts env1) }
getFamInsts :: ModuleEnv FamInstEnv -> Module -> TcM FamInstEnv
@@ -186,9 +180,9 @@ getFamInsts hpt_fam_insts mod
\end{code}
%************************************************************************
-%* *
- Lookup
-%* *
+%* *
+ Lookup
+%* *
%************************************************************************
Look up the instance tycon of a family instance.
@@ -270,9 +264,9 @@ tcInstNewTyConTF_maybe fam_envs ty
%************************************************************************
-%* *
- Extending the family instance environment
-%* *
+%* *
+ Extending the family instance environment
+%* *
%************************************************************************
\begin{code}
@@ -280,11 +274,11 @@ tcInstNewTyConTF_maybe fam_envs ty
tcExtendLocalFamInstEnv :: [FamInst] -> TcM a -> TcM a
tcExtendLocalFamInstEnv fam_insts thing_inside
= do { env <- getGblEnv
- ; (inst_env', fam_insts') <- foldlM addLocalFamInst
+ ; (inst_env', fam_insts') <- foldlM addLocalFamInst
(tcg_fam_inst_env env, tcg_fam_insts env)
fam_insts
; let env' = env { tcg_fam_insts = fam_insts'
- , tcg_fam_inst_env = inst_env' }
+ , tcg_fam_inst_env = inst_env' }
; setGblEnv env' thing_inside
}
@@ -325,9 +319,9 @@ addLocalFamInst (home_fie, my_fis) fam_inst
\end{code}
%************************************************************************
-%* *
- Checking an instance against conflicts with an instance env
-%* *
+%* *
+ Checking an instance against conflicts with an instance env
+%* *
%************************************************************************
Check whether a single family instance conflicts with those in two instance
@@ -351,7 +345,7 @@ conflictInstErr fam_inst conflictingMatch
| (FamInstMatch { fim_instance = confInst }) : _ <- conflictingMatch
= addFamInstsErr (ptext (sLit "Conflicting family instance declarations:"))
[fam_inst, confInst]
- | otherwise
+ | otherwise
= panic "conflictInstErr"
addFamInstsErr :: SDoc -> [FamInst] -> TcRn ()
@@ -373,7 +367,7 @@ addFamInstsErr herald insts
tcGetFamInstEnvs :: TcM FamInstEnvs
-- Gets both the external-package inst-env
-- and the home-pkg inst env (includes module being compiled)
-tcGetFamInstEnvs
+tcGetFamInstEnvs
= do { eps <- getEps; env <- getGblEnv
; return (eps_fam_inst_env eps, tcg_fam_inst_env env) }
\end{code}
diff --git a/compiler/typecheck/Inst.lhs b/compiler/typecheck/Inst.lhs
index de2f26af85..9998a1e4bc 100644
--- a/compiler/typecheck/Inst.lhs
+++ b/compiler/typecheck/Inst.lhs
@@ -7,27 +7,21 @@ The @Inst@ type: dictionaries or method instances
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-module Inst (
- deeplySkolemise,
+
+module Inst (
+ deeplySkolemise,
deeplyInstantiate, instCall, instStupidTheta,
emitWanted, emitWanteds,
- newOverloadedLit, mkOverLit,
-
+ newOverloadedLit, mkOverLit,
+
tcGetInsts, tcGetInstEnvs, getOverlapFlag,
tcExtendLocalInstEnv, instCallConstraints, newMethodFromName,
tcSyntaxName,
-- Simple functions over evidence variables
- tyVarsOfWC, tyVarsOfBag,
- tyVarsOfCt, tyVarsOfCts,
+ tyVarsOfWC, tyVarsOfBag,
+ tyVarsOfCt, tyVarsOfCts,
tidyEvVar, tidyCt, tidySkolemInfo
) where
@@ -68,9 +62,9 @@ import Data.List( mapAccumL )
%************************************************************************
-%* *
- Emitting constraints
-%* *
+%* *
+ Emitting constraints
+%* *
%************************************************************************
\begin{code}
@@ -78,7 +72,7 @@ emitWanteds :: CtOrigin -> TcThetaType -> TcM [EvVar]
emitWanteds origin theta = mapM (emitWanted origin) theta
emitWanted :: CtOrigin -> TcPredType -> TcM EvVar
-emitWanted origin pred
+emitWanted origin pred
= do { loc <- getCtLoc origin
; ev <- newWantedEvVar pred
; emitFlat $ mkNonCanonical $
@@ -89,15 +83,15 @@ newMethodFromName :: CtOrigin -> Name -> TcRhoType -> TcM (HsExpr TcId)
-- Used when Name is the wired-in name for a wired-in class method,
-- so the caller knows its type for sure, which should be of form
-- forall a. C a => <blah>
--- newMethodFromName is supposed to instantiate just the outer
+-- newMethodFromName is supposed to instantiate just the outer
-- type variable and constraint
newMethodFromName origin name inst_ty
= do { id <- tcLookupId name
- -- Use tcLookupId not tcLookupGlobalId; the method is almost
- -- always a class op, but with -XRebindableSyntax GHC is
- -- meant to find whatever thing is in scope, and that may
- -- be an ordinary function.
+ -- Use tcLookupId not tcLookupGlobalId; the method is almost
+ -- always a class op, but with -XRebindableSyntax GHC is
+ -- meant to find whatever thing is in scope, and that may
+ -- be an ordinary function.
; let (tvs, theta, _caller_knows_this) = tcSplitSigmaTy (idType id)
(the_tv:rest) = tvs
@@ -110,9 +104,9 @@ newMethodFromName origin name inst_ty
%************************************************************************
-%* *
- Deep instantiation and skolemisation
-%* *
+%* *
+ Deep instantiation and skolemisation
+%* *
%************************************************************************
Note [Deep skolemisation]
@@ -122,11 +116,11 @@ with all its arrows visible (ie not buried under foralls)
Examples:
- deeplySkolemise (Int -> forall a. Ord a => blah)
+ deeplySkolemise (Int -> forall a. Ord a => blah)
= ( wp, [a], [d:Ord a], Int -> blah )
where wp = \x:Int. /\a. \(d:Ord a). <hole> x
- deeplySkolemise (forall a. Ord a => Maybe a -> forall b. Eq b => blah)
+ deeplySkolemise (forall a. Ord a => Maybe a -> forall b. Eq b => blah)
= ( wp, [a,b], [d1:Ord a,d2:Eq b], Maybe a -> blah )
where wp = /\a.\(d1:Ord a).\(x:Maybe a)./\b.\(d2:Ord b). <hole> x
@@ -177,9 +171,9 @@ deeplyInstantiate orig ty
; ids1 <- newSysLocalIds (fsLit "di") (substTys subst arg_tys)
; wrap1 <- instCall orig tys (substTheta subst theta)
; (wrap2, rho2) <- deeplyInstantiate orig (substTy subst rho)
- ; return (mkWpLams ids1
+ ; return (mkWpLams ids1
<.> wrap2
- <.> wrap1
+ <.> wrap1
<.> mkWpEvVarApps ids1,
mkFunTys arg_tys rho2) }
@@ -188,23 +182,23 @@ deeplyInstantiate orig ty
%************************************************************************
-%* *
+%* *
Instantiating a call
-%* *
+%* *
%************************************************************************
\begin{code}
----------------
instCall :: CtOrigin -> [TcType] -> TcThetaType -> TcM HsWrapper
-- Instantiate the constraints of a call
--- (instCall o tys theta)
+-- (instCall o tys theta)
-- (a) Makes fresh dictionaries as necessary for the constraints (theta)
-- (b) Throws these dictionaries into the LIE
-- (c) Returns an HsWrapper ([.] tys dicts)
-instCall orig tys theta
- = do { dict_app <- instCallConstraints orig theta
- ; return (dict_app <.> mkWpTyApps tys) }
+instCall orig tys theta
+ = do { dict_app <- instCallConstraints orig theta
+ ; return (dict_app <.> mkWpTyApps tys) }
----------------
instCallConstraints :: CtOrigin -> TcThetaType -> TcM HsWrapper
@@ -212,34 +206,34 @@ instCallConstraints :: CtOrigin -> TcThetaType -> TcM HsWrapper
-- into the LIE, and returns a HsWrapper to enclose the call site.
instCallConstraints orig preds
- | null preds
+ | null preds
= return idHsWrapper
| otherwise
= do { evs <- mapM go preds
; traceTc "instCallConstraints" (ppr evs)
; return (mkWpEvApps evs) }
where
- go pred
+ go pred
| Just (Nominal, ty1, ty2) <- getEqPredTys_maybe pred -- Try short-cut
= do { co <- unifyType ty1 ty2
; return (EvCoercion co) }
| otherwise
= do { ev_var <- emitWanted orig pred
- ; return (EvId ev_var) }
+ ; return (EvId ev_var) }
----------------
instStupidTheta :: CtOrigin -> TcThetaType -> TcM ()
-- Similar to instCall, but only emit the constraints in the LIE
-- Used exclusively for the 'stupid theta' of a data constructor
instStupidTheta orig theta
- = do { _co <- instCallConstraints orig theta -- Discard the coercion
- ; return () }
+ = do { _co <- instCallConstraints orig theta -- Discard the coercion
+ ; return () }
\end{code}
%************************************************************************
-%* *
- Literals
-%* *
+%* *
+ Literals
+%* *
%************************************************************************
In newOverloadedLit we convert directly to an Int or Integer if we
@@ -263,38 +257,38 @@ newOverloadedLit' :: DynFlags
-> TcM (HsOverLit TcId)
newOverloadedLit' dflags orig
lit@(OverLit { ol_val = val, ol_rebindable = rebindable
- , ol_witness = meth_name }) res_ty
+ , ol_witness = meth_name }) res_ty
| not rebindable
- , Just expr <- shortCutLit dflags val res_ty
- -- Do not generate a LitInst for rebindable syntax.
- -- Reason: If we do, tcSimplify will call lookupInst, which
- -- will call tcSyntaxName, which does unification,
- -- which tcSimplify doesn't like
+ , Just expr <- shortCutLit dflags val res_ty
+ -- Do not generate a LitInst for rebindable syntax.
+ -- Reason: If we do, tcSimplify will call lookupInst, which
+ -- will call tcSyntaxName, which does unification,
+ -- which tcSimplify doesn't like
= return (lit { ol_witness = expr, ol_type = res_ty
, ol_rebindable = rebindable })
| otherwise
- = do { hs_lit <- mkOverLit val
- ; let lit_ty = hsLitType hs_lit
- ; fi' <- tcSyntaxOp orig meth_name (mkFunTy lit_ty res_ty)
- -- Overloaded literals must have liftedTypeKind, because
- -- we're instantiating an overloaded function here,
- -- whereas res_ty might be openTypeKind. This was a bug in 6.2.2
- -- However this'll be picked up by tcSyntaxOp if necessary
- ; let witness = HsApp (noLoc fi') (noLoc (HsLit hs_lit))
- ; return (lit { ol_witness = witness, ol_type = res_ty
+ = do { hs_lit <- mkOverLit val
+ ; let lit_ty = hsLitType hs_lit
+ ; fi' <- tcSyntaxOp orig meth_name (mkFunTy lit_ty res_ty)
+ -- Overloaded literals must have liftedTypeKind, because
+ -- we're instantiating an overloaded function here,
+ -- whereas res_ty might be openTypeKind. This was a bug in 6.2.2
+ -- However this'll be picked up by tcSyntaxOp if necessary
+ ; let witness = HsApp (noLoc fi') (noLoc (HsLit hs_lit))
+ ; return (lit { ol_witness = witness, ol_type = res_ty
, ol_rebindable = rebindable }) }
------------
mkOverLit :: OverLitVal -> TcM HsLit
-mkOverLit (HsIntegral i)
- = do { integer_ty <- tcMetaTy integerTyConName
- ; return (HsInteger i integer_ty) }
+mkOverLit (HsIntegral i)
+ = do { integer_ty <- tcMetaTy integerTyConName
+ ; return (HsInteger i integer_ty) }
mkOverLit (HsFractional r)
- = do { rat_ty <- tcMetaTy rationalTyConName
- ; return (HsRat r rat_ty) }
+ = do { rat_ty <- tcMetaTy rationalTyConName
+ ; return (HsRat r rat_ty) }
mkOverLit (HsIsString s) = return (HsString s)
\end{code}
@@ -303,11 +297,11 @@ mkOverLit (HsIsString s) = return (HsString s)
%************************************************************************
-%* *
- Re-mappable syntax
-
+%* *
+ Re-mappable syntax
+
Used only for arrow syntax -- find a way to nuke this
-%* *
+%* *
%************************************************************************
Suppose we are doing the -XRebindableSyntax thing, and we encounter
@@ -320,23 +314,23 @@ this:
So the idea is to generate a local binding for (>>), thus:
- let then72 :: forall a b. m a -> m b -> m b
- then72 = ...something involving the user's (>>)...
- in
- ...the do-expression...
+ let then72 :: forall a b. m a -> m b -> m b
+ then72 = ...something involving the user's (>>)...
+ in
+ ...the do-expression...
Now the do-expression can proceed using then72, which has exactly
the expected type.
In fact tcSyntaxName just generates the RHS for then72, because we only
-want an actual binding in the do-expression case. For literals, we can
+want an actual binding in the do-expression case. For literals, we can
just use the expression inline.
\begin{code}
tcSyntaxName :: CtOrigin
- -> TcType -- Type to instantiate it at
- -> (Name, HsExpr Name) -- (Standard name, user name)
- -> TcM (Name, HsExpr TcId) -- (Standard name, suitable expression)
+ -> TcType -- Type to instantiate it at
+ -> (Name, HsExpr Name) -- (Standard name, user name)
+ -> TcM (Name, HsExpr TcId) -- (Standard name, suitable expression)
-- USED ONLY FOR CmdTop (sigh) ***
-- See Note [CmdSyntaxTable] in HsExpr
@@ -347,18 +341,18 @@ tcSyntaxName orig ty (std_nm, HsVar user_nm)
tcSyntaxName orig ty (std_nm, user_nm_expr) = do
std_id <- tcLookupId std_nm
- let
- -- C.f. newMethodAtLoc
- ([tv], _, tau) = tcSplitSigmaTy (idType std_id)
- sigma1 = substTyWith [tv] [ty] tau
- -- Actually, the "tau-type" might be a sigma-type in the
- -- case of locally-polymorphic methods.
+ let
+ -- C.f. newMethodAtLoc
+ ([tv], _, tau) = tcSplitSigmaTy (idType std_id)
+ sigma1 = substTyWith [tv] [ty] tau
+ -- Actually, the "tau-type" might be a sigma-type in the
+ -- case of locally-polymorphic methods.
addErrCtxtM (syntaxNameCtxt user_nm_expr orig sigma1) $ do
- -- Check that the user-supplied thing has the
- -- same type as the standard one.
- -- Tiresome jiggling because tcCheckSigma takes a located expression
+ -- Check that the user-supplied thing has the
+ -- same type as the standard one.
+ -- Tiresome jiggling because tcCheckSigma takes a located expression
span <- getSrcSpanM
expr <- tcPolyExpr (L span user_nm_expr) sigma1
return (std_nm, unLoc expr)
@@ -368,18 +362,18 @@ syntaxNameCtxt :: HsExpr Name -> CtOrigin -> Type -> TidyEnv
syntaxNameCtxt name orig ty tidy_env
= do { inst_loc <- getCtLoc orig
; let msg = vcat [ ptext (sLit "When checking that") <+> quotes (ppr name)
- <+> ptext (sLit "(needed by a syntactic construct)")
- , nest 2 (ptext (sLit "has the required type:")
+ <+> ptext (sLit "(needed by a syntactic construct)")
+ , nest 2 (ptext (sLit "has the required type:")
<+> ppr (tidyType tidy_env ty))
- , nest 2 (pprArisingAt inst_loc) ]
+ , nest 2 (pprArisingAt inst_loc) ]
; return (tidy_env, msg) }
\end{code}
%************************************************************************
-%* *
- Instances
-%* *
+%* *
+ Instances
+%* *
%************************************************************************
\begin{code}
@@ -400,7 +394,7 @@ tcGetInstEnvs :: TcM (InstEnv, InstEnv)
-- Gets both the external-package inst-env
-- and the home-pkg inst env (includes module being compiled)
tcGetInstEnvs = do { eps <- getEps; env <- getGblEnv;
- return (eps_inst_env eps, tcg_inst_env env) }
+ return (eps_inst_env eps, tcg_inst_env env) }
tcGetInsts :: TcM [ClsInst]
-- Gets the local class instances.
@@ -415,7 +409,7 @@ tcExtendLocalInstEnv dfuns thing_inside
(tcg_inst_env env, tcg_insts env)
dfuns
; let env' = env { tcg_insts = cls_insts'
- , tcg_inst_env = inst_env' }
+ , tcg_inst_env = inst_env' }
; setGblEnv env' thing_inside }
addLocalInst :: (InstEnv, [ClsInst]) -> ClsInst -> TcM (InstEnv, [ClsInst])
@@ -476,7 +470,7 @@ traceDFuns ispecs
where
pp ispec = hang (ppr (instanceDFunId ispec) <+> colon)
2 (ppr ispec)
- -- Print the dfun name itself too
+ -- Print the dfun name itself too
funDepErr :: ClsInst -> [ClsInst] -> TcRn ()
funDepErr ispec ispecs
@@ -486,7 +480,7 @@ funDepErr ispec ispecs
dupInstErr :: ClsInst -> ClsInst -> TcRn ()
dupInstErr ispec dup_ispec
= addClsInstsErr (ptext (sLit "Duplicate instance declarations:"))
- [ispec, dup_ispec]
+ [ispec, dup_ispec]
addClsInstsErr :: SDoc -> [ClsInst] -> TcRn ()
addClsInstsErr herald ispecs
@@ -500,18 +494,18 @@ addClsInstsErr herald ispecs
\end{code}
%************************************************************************
-%* *
- Simple functions over evidence variables
-%* *
+%* *
+ Simple functions over evidence variables
+%* *
%************************************************************************
\begin{code}
---------------- Getting free tyvars -------------------------
tyVarsOfCt :: Ct -> TcTyVarSet
--- NB: the
+-- NB: the
tyVarsOfCt (CTyEqCan { cc_tyvar = tv, cc_rhs = xi }) = extendVarSet (tyVarsOfType xi) tv
tyVarsOfCt (CFunEqCan { cc_tyargs = tys, cc_rhs = xi }) = tyVarsOfTypes (xi:tys)
-tyVarsOfCt (CDictCan { cc_tyargs = tys }) = tyVarsOfTypes tys
+tyVarsOfCt (CDictCan { cc_tyargs = tys }) = tyVarsOfTypes tys
tyVarsOfCt (CIrredEvCan { cc_ev = ev }) = tyVarsOfType (ctEvPred ev)
tyVarsOfCt (CHoleCan { cc_ev = ev }) = tyVarsOfType (ctEvPred ev)
tyVarsOfCt (CNonCanonical { cc_ev = ev }) = tyVarsOfType (ctEvPred ev)
@@ -541,14 +535,14 @@ tyVarsOfBag tvs_of = foldrBag (unionVarSet . tvs_of) emptyVarSet
tidyCt :: TidyEnv -> Ct -> Ct
-- Used only in error reporting
-- Also converts it to non-canonical
-tidyCt env ct
+tidyCt env ct
= case ct of
CHoleCan { cc_ev = ev }
-> ct { cc_ev = tidy_ev env ev }
_ -> mkNonCanonical (tidy_ev env (ctEvidence ct))
- where
+ where
tidy_ev :: TidyEnv -> CtEvidence -> CtEvidence
- -- NB: we do not tidy the ctev_evtm/var field because we don't
+ -- NB: we do not tidy the ctev_evtm/var field because we don't
-- show it in error messages
tidy_ev env ctev@(CtGiven { ctev_pred = pred })
= ctev { ctev_pred = tidyType env pred }
@@ -561,12 +555,12 @@ tidyEvVar :: TidyEnv -> EvVar -> EvVar
tidyEvVar env var = setVarType var (tidyType env (varType var))
tidySkolemInfo :: TidyEnv -> SkolemInfo -> (TidyEnv, SkolemInfo)
-tidySkolemInfo env (SigSkol cx ty)
+tidySkolemInfo env (SigSkol cx ty)
= (env', SigSkol cx ty')
where
(env', ty') = tidyOpenType env ty
-tidySkolemInfo env (InferSkol ids)
+tidySkolemInfo env (InferSkol ids)
= (env', InferSkol ids')
where
(env', ids') = mapAccumL do_one env ids
@@ -574,7 +568,7 @@ tidySkolemInfo env (InferSkol ids)
where
(env', ty') = tidyOpenType env ty
-tidySkolemInfo env (UnifyForAllSkol skol_tvs ty)
+tidySkolemInfo env (UnifyForAllSkol skol_tvs ty)
= (env1, UnifyForAllSkol skol_tvs' ty')
where
env1 = tidyFreeTyVars env (tyVarsOfType ty `delVarSetList` skol_tvs)
diff --git a/compiler/typecheck/TcClassDcl.lhs b/compiler/typecheck/TcClassDcl.lhs
index be5a74f294..29d47b42d8 100644
--- a/compiler/typecheck/TcClassDcl.lhs
+++ b/compiler/typecheck/TcClassDcl.lhs
@@ -7,19 +7,13 @@ Typechecking class declarations
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-module TcClassDcl ( tcClassSigs, tcClassDecl2,
- findMethodBind, instantiateMethod, tcInstanceMethodBody,
- tcClassMinimalDef,
+
+module TcClassDcl ( tcClassSigs, tcClassDecl2,
+ findMethodBind, instantiateMethod, tcInstanceMethodBody,
+ tcClassMinimalDef,
HsSigFun, mkHsSigFun, lookupHsSig, emptyHsSigs,
- tcMkDeclCtxt, tcAddDeclCtxt, badMethodErr
- ) where
+ tcMkDeclCtxt, tcAddDeclCtxt, badMethodErr
+ ) where
#include "HsVersions.h"
@@ -59,45 +53,45 @@ Dictionary handling
Every class implicitly declares a new data type, corresponding to dictionaries
of that class. So, for example:
- class (D a) => C a where
- op1 :: a -> a
- op2 :: forall b. Ord b => a -> b -> b
+ class (D a) => C a where
+ op1 :: a -> a
+ op2 :: forall b. Ord b => a -> b -> b
would implicitly declare
- data CDict a = CDict (D a)
- (a -> a)
- (forall b. Ord b => a -> b -> b)
+ data CDict a = CDict (D a)
+ (a -> a)
+ (forall b. Ord b => a -> b -> b)
(We could use a record decl, but that means changing more of the existing apparatus.
One step at at time!)
For classes with just one superclass+method, we use a newtype decl instead:
- class C a where
- op :: forallb. a -> b -> b
+ class C a where
+ op :: forallb. a -> b -> b
generates
- newtype CDict a = CDict (forall b. a -> b -> b)
+ newtype CDict a = CDict (forall b. a -> b -> b)
-Now DictTy in Type is just a form of type synomym:
- DictTy c t = TyConTy CDict `AppTy` t
+Now DictTy in Type is just a form of type synomym:
+ DictTy c t = TyConTy CDict `AppTy` t
Death to "ExpandingDicts".
%************************************************************************
-%* *
- Type-checking the class op signatures
-%* *
+%* *
+ Type-checking the class op signatures
+%* *
%************************************************************************
\begin{code}
-tcClassSigs :: Name -- Name of the class
- -> [LSig Name]
- -> LHsBinds Name
- -> TcM ([TcMethInfo], -- Exactly one for each method
+tcClassSigs :: Name -- Name of the class
+ -> [LSig Name]
+ -> LHsBinds Name
+ -> TcM ([TcMethInfo], -- Exactly one for each method
NameEnv Type) -- Types of the generic-default methods
tcClassSigs clas sigs def_methods
= do { traceTc "tcClassSigs 1" (ppr clas)
@@ -110,23 +104,23 @@ tcClassSigs clas sigs def_methods
; let op_names = mkNameSet [ n | (n,_,_) <- op_info ]
; sequence_ [ failWithTc (badMethodErr clas n)
| n <- dm_bind_names, not (n `elemNameSet` op_names) ]
- -- Value binding for non class-method (ie no TypeSig)
+ -- Value binding for non class-method (ie no TypeSig)
; sequence_ [ failWithTc (badGenericMethod clas n)
| (n,_) <- gen_dm_prs, not (n `elem` dm_bind_names) ]
- -- Generic signature without value binding
+ -- Generic signature without value binding
; traceTc "tcClassSigs 2" (ppr clas)
; return (op_info, gen_dm_env) }
where
vanilla_sigs = [L loc (nm,ty) | L loc (TypeSig nm ty) <- sigs]
gen_sigs = [L loc (nm,ty) | L loc (GenericSig nm ty) <- sigs]
- dm_bind_names :: [Name] -- These ones have a value binding in the class decl
+ dm_bind_names :: [Name] -- These ones have a value binding in the class decl
dm_bind_names = [op | L _ (FunBind {fun_id = L _ op}) <- bagToList def_methods]
tc_sig genop_env (op_names, op_hs_ty)
= do { traceTc "ClsSig 1" (ppr op_names)
- ; op_ty <- tcClassSigType op_hs_ty -- Class tyvars already in scope
+ ; op_ty <- tcClassSigType op_hs_ty -- Class tyvars already in scope
; traceTc "ClsSig 2" (ppr op_names)
; return [ (op_name, f op_name, op_ty) | L _ op_name <- op_names ] }
where
@@ -141,64 +135,64 @@ tcClassSigs clas sigs def_methods
%************************************************************************
-%* *
- Class Declarations
-%* *
+%* *
+ Class Declarations
+%* *
%************************************************************************
\begin{code}
-tcClassDecl2 :: LTyClDecl Name -- The class declaration
- -> TcM (LHsBinds Id)
+tcClassDecl2 :: LTyClDecl Name -- The class declaration
+ -> TcM (LHsBinds Id)
-tcClassDecl2 (L loc (ClassDecl {tcdLName = class_name, tcdSigs = sigs,
- tcdMeths = default_binds}))
- = recoverM (return emptyLHsBinds) $
- setSrcSpan loc $
+tcClassDecl2 (L loc (ClassDecl {tcdLName = class_name, tcdSigs = sigs,
+ tcdMeths = default_binds}))
+ = recoverM (return emptyLHsBinds) $
+ setSrcSpan loc $
do { clas <- tcLookupLocatedClass class_name
- -- We make a separate binding for each default method.
- -- At one time I used a single AbsBinds for all of them, thus
- -- AbsBind [d] [dm1, dm2, dm3] { dm1 = ...; dm2 = ...; dm3 = ... }
- -- But that desugars into
- -- ds = \d -> (..., ..., ...)
- -- dm1 = \d -> case ds d of (a,b,c) -> a
- -- And since ds is big, it doesn't get inlined, so we don't get good
- -- default methods. Better to make separate AbsBinds for each
- ; let
- (tyvars, _, _, op_items) = classBigSig clas
+ -- We make a separate binding for each default method.
+ -- At one time I used a single AbsBinds for all of them, thus
+ -- AbsBind [d] [dm1, dm2, dm3] { dm1 = ...; dm2 = ...; dm3 = ... }
+ -- But that desugars into
+ -- ds = \d -> (..., ..., ...)
+ -- dm1 = \d -> case ds d of (a,b,c) -> a
+ -- And since ds is big, it doesn't get inlined, so we don't get good
+ -- default methods. Better to make separate AbsBinds for each
+ ; let
+ (tyvars, _, _, op_items) = classBigSig clas
prag_fn = mkPragFun sigs default_binds
- sig_fn = mkHsSigFun sigs
+ sig_fn = mkHsSigFun sigs
clas_tyvars = snd (tcSuperSkolTyVars tyvars)
- pred = mkClassPred clas (mkTyVarTys clas_tyvars)
- ; this_dict <- newEvVar pred
+ pred = mkClassPred clas (mkTyVarTys clas_tyvars)
+ ; this_dict <- newEvVar pred
- ; traceTc "TIM2" (ppr sigs)
- ; let tc_dm = tcDefMeth clas clas_tyvars
- this_dict default_binds
- sig_fn prag_fn
+ ; traceTc "TIM2" (ppr sigs)
+ ; let tc_dm = tcDefMeth clas clas_tyvars
+ this_dict default_binds
+ sig_fn prag_fn
- ; dm_binds <- tcExtendTyVarEnv clas_tyvars $
+ ; dm_binds <- tcExtendTyVarEnv clas_tyvars $
mapM tc_dm op_items
- ; return (unionManyBags dm_binds) }
+ ; return (unionManyBags dm_binds) }
tcClassDecl2 d = pprPanic "tcClassDecl2" (ppr d)
-
+
tcDefMeth :: Class -> [TyVar] -> EvVar -> LHsBinds Name
-> HsSigFun -> PragFun -> ClassOpItem
-> TcM (LHsBinds TcId)
-- Generate code for polymorphic default methods only (hence DefMeth)
-- (Generic default methods have turned into instance decls by now.)
--- This is incompatible with Hugs, which expects a polymorphic
--- default method for every class op, regardless of whether or not
--- the programmer supplied an explicit default decl for the class.
+-- This is incompatible with Hugs, which expects a polymorphic
+-- default method for every class op, regardless of whether or not
+-- the programmer supplied an explicit default decl for the class.
-- (If necessary we can fix that, but we don't have a convenient Id to hand.)
tcDefMeth clas tyvars this_dict binds_in hs_sig_fn prag_fn (sel_id, dm_info)
= case dm_info of
NoDefMeth -> do { mapM_ (addLocM (badDmPrag sel_id)) prags
; return emptyBag }
- DefMeth dm_name -> tc_dm dm_name
- GenDefMeth dm_name -> tc_dm dm_name
+ DefMeth dm_name -> tc_dm dm_name
+ GenDefMeth dm_name -> tc_dm dm_name
where
sel_name = idName sel_id
prags = prag_fn sel_name
@@ -207,27 +201,27 @@ tcDefMeth clas tyvars this_dict binds_in hs_sig_fn prag_fn (sel_id, dm_info)
-- Eg. class C a where
-- op :: forall b. Eq b => a -> [b] -> a
- -- gen_op :: a -> a
- -- generic gen_op :: D a => a -> a
+ -- gen_op :: a -> a
+ -- generic gen_op :: D a => a -> a
-- The "local_dm_ty" is precisely the type in the above
-- type signatures, ie with no "forall a. C a =>" prefix
- tc_dm dm_name
+ tc_dm dm_name
= do { dm_id <- tcLookupId dm_name
- ; local_dm_name <- setSrcSpan bndr_loc (newLocalName sel_name)
- -- Base the local_dm_name on the selector name, because
- -- type errors from tcInstanceMethodBody come from here
+ ; local_dm_name <- setSrcSpan bndr_loc (newLocalName sel_name)
+ -- Base the local_dm_name on the selector name, because
+ -- type errors from tcInstanceMethodBody come from here
; dm_id_w_inline <- addInlinePrags dm_id prags
; spec_prags <- tcSpecPrags dm_id prags
; let local_dm_ty = instantiateMethod clas dm_id (mkTyVarTys tyvars)
- hs_ty = lookupHsSig hs_sig_fn sel_name
+ hs_ty = lookupHsSig hs_sig_fn sel_name
`orElse` pprPanic "tc_dm" (ppr sel_name)
; local_dm_sig <- instTcTySig hs_ty local_dm_ty local_dm_name
; warnTc (not (null spec_prags))
- (ptext (sLit "Ignoring SPECIALISE pragmas on default method")
+ (ptext (sLit "Ignoring SPECIALISE pragmas on default method")
<+> quotes (ppr sel_name))
; tc_bind <- tcInstanceMethodBody (ClsSkol clas) tyvars [this_dict]
@@ -239,18 +233,18 @@ tcDefMeth clas tyvars this_dict binds_in hs_sig_fn prag_fn (sel_id, dm_info)
---------------
tcInstanceMethodBody :: SkolemInfo -> [TcTyVar] -> [EvVar]
-> Id -> TcSigInfo
- -> TcSpecPrags -> LHsBind Name
- -> TcM (LHsBind Id)
+ -> TcSpecPrags -> LHsBind Name
+ -> TcM (LHsBind Id)
tcInstanceMethodBody skol_info tyvars dfun_ev_vars
meth_id local_meth_sig
- specs (L loc bind)
- = do { let local_meth_id = sig_id local_meth_sig
+ specs (L loc bind)
+ = do { let local_meth_id = sig_id local_meth_sig
lm_bind = L loc (bind { fun_id = L loc (idName local_meth_id) })
-- Substitute the local_meth_name for the binder
- -- NB: the binding is always a FunBind
- ; (ev_binds, (tc_bind, _, _))
+ -- NB: the binding is always a FunBind
+ ; (ev_binds, (tc_bind, _, _))
<- checkConstraints skol_info tyvars dfun_ev_vars $
- tcPolyCheck NonRecursive no_prag_fn local_meth_sig lm_bind
+ tcPolyCheck NonRecursive no_prag_fn local_meth_sig lm_bind
; let export = ABE { abe_wrap = idHsWrapper, abe_poly = meth_id
, abe_mono = local_meth_id, abe_prags = specs }
@@ -259,10 +253,10 @@ tcInstanceMethodBody skol_info tyvars dfun_ev_vars
, abs_ev_binds = ev_binds
, abs_binds = tc_bind }
- ; return (L loc full_bind) }
+ ; return (L loc full_bind) }
where
- no_prag_fn _ = [] -- No pragmas for local_meth_id;
- -- they are all for meth_id
+ no_prag_fn _ = [] -- No pragmas for local_meth_id;
+ -- they are all for meth_id
---------------
tcClassMinimalDef :: Name -> [LSig Name] -> [TcMethInfo] -> TcM ClassMinimalDef
@@ -278,7 +272,7 @@ tcClassMinimalDef _clas sigs op_info
(\bf -> addWarnTc (warningMinimalDefIncomplete bf))
return mindef
where
- -- By default require all methods without a default
+ -- By default require all methods without a default
-- implementation whose names don't start with '_'
defMindef :: ClassMinimalDef
defMindef = mkAnd [ mkVar name
@@ -288,26 +282,26 @@ tcClassMinimalDef _clas sigs op_info
\begin{code}
instantiateMethod :: Class -> Id -> [TcType] -> TcType
--- Take a class operation, say
--- op :: forall ab. C a => forall c. Ix c => (b,c) -> a
+-- Take a class operation, say
+-- op :: forall ab. C a => forall c. Ix c => (b,c) -> a
-- Instantiate it at [ty1,ty2]
--- Return the "local method type":
--- forall c. Ix x => (ty2,c) -> ty1
+-- Return the "local method type":
+-- forall c. Ix x => (ty2,c) -> ty1
instantiateMethod clas sel_id inst_tys
= ASSERT( ok_first_pred ) local_meth_ty
where
(sel_tyvars,sel_rho) = tcSplitForAllTys (idType sel_id)
rho_ty = ASSERT( length sel_tyvars == length inst_tys )
- substTyWith sel_tyvars inst_tys sel_rho
+ substTyWith sel_tyvars inst_tys sel_rho
(first_pred, local_meth_ty) = tcSplitPredFunTy_maybe rho_ty
- `orElse` pprPanic "tcInstanceMethod" (ppr sel_id)
+ `orElse` pprPanic "tcInstanceMethod" (ppr sel_id)
ok_first_pred = case getClassPredTys_maybe first_pred of
- Just (clas1, _tys) -> clas == clas1
+ Just (clas1, _tys) -> clas == clas1
Nothing -> False
- -- The first predicate should be of form (C a b)
- -- where C is the class in question
+ -- The first predicate should be of form (C a b)
+ -- where C is the class in question
---------------------------
@@ -317,7 +311,7 @@ emptyHsSigs :: HsSigFun
emptyHsSigs = emptyNameEnv
mkHsSigFun :: [LSig Name] -> HsSigFun
-mkHsSigFun sigs = mkNameEnv [(n, hs_ty)
+mkHsSigFun sigs = mkNameEnv [(n, hs_ty)
| L _ (TypeSig ns hs_ty) <- sigs
, L _ n <- ns ]
@@ -325,17 +319,17 @@ lookupHsSig :: HsSigFun -> Name -> Maybe (LHsType Name)
lookupHsSig = lookupNameEnv
---------------------------
-findMethodBind :: Name -- Selector name
- -> LHsBinds Name -- A group of bindings
- -> Maybe (LHsBind Name, SrcSpan)
- -- Returns the binding, and the binding
+findMethodBind :: Name -- Selector name
+ -> LHsBinds Name -- A group of bindings
+ -> Maybe (LHsBind Name, SrcSpan)
+ -- Returns the binding, and the binding
-- site of the method binder
findMethodBind sel_name binds
= foldlBag mplus Nothing (mapBag f binds)
where
f bind@(L _ (FunBind { fun_id = L bndr_loc op_name }))
| op_name == sel_name
- = Just (bind, bndr_loc)
+ = Just (bind, bndr_loc)
f _other = Nothing
---------------------------
@@ -351,7 +345,7 @@ Note [Polymorphic methods]
~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
class Foo a where
- op :: forall b. Ord b => a -> b -> b -> b
+ op :: forall b. Ord b => a -> b -> b -> b
instance Foo c => Foo [c] where
op = e
@@ -359,30 +353,30 @@ When typechecking the binding 'op = e', we'll have a meth_id for op
whose type is
op :: forall c. Foo c => forall b. Ord b => [c] -> b -> b -> b
-So tcPolyBinds must be capable of dealing with nested polytypes;
+So tcPolyBinds must be capable of dealing with nested polytypes;
and so it is. See TcBinds.tcMonoBinds (with type-sig case).
Note [Silly default-method bind]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we pass the default method binding to the type checker, it must
look like op2 = e
-not $dmop2 = e
+not $dmop2 = e
otherwise the "$dm" stuff comes out error messages. But we want the
"$dm" to come out in the interface file. So we typecheck the former,
and wrap it in a let, thus
- $dmop2 = let op2 = e in op2
+ $dmop2 = let op2 = e in op2
This makes the error messages right.
%************************************************************************
-%* *
- Error messages
-%* *
+%* *
+ Error messages
+%* *
%************************************************************************
\begin{code}
tcMkDeclCtxt :: TyClDecl Name -> SDoc
-tcMkDeclCtxt decl = hsep [ptext (sLit "In the"), pprTyClDeclFlavour decl,
+tcMkDeclCtxt decl = hsep [ptext (sLit "In the"), pprTyClDeclFlavour decl,
ptext (sLit "declaration for"), quotes (ppr (tcdName decl))]
tcAddDeclCtxt :: TyClDecl Name -> TcM a -> TcM a
@@ -391,37 +385,37 @@ tcAddDeclCtxt decl thing_inside
badMethodErr :: Outputable a => a -> Name -> SDoc
badMethodErr clas op
- = hsep [ptext (sLit "Class"), quotes (ppr clas),
- ptext (sLit "does not have a method"), quotes (ppr op)]
+ = hsep [ptext (sLit "Class"), quotes (ppr clas),
+ ptext (sLit "does not have a method"), quotes (ppr op)]
badGenericMethod :: Outputable a => a -> Name -> SDoc
badGenericMethod clas op
- = hsep [ptext (sLit "Class"), quotes (ppr clas),
- ptext (sLit "has a generic-default signature without a binding"), quotes (ppr op)]
+ = hsep [ptext (sLit "Class"), quotes (ppr clas),
+ ptext (sLit "has a generic-default signature without a binding"), quotes (ppr op)]
{-
badGenericInstanceType :: LHsBinds Name -> SDoc
badGenericInstanceType binds
= vcat [ptext (sLit "Illegal type pattern in the generic bindings"),
- nest 2 (ppr binds)]
+ nest 2 (ppr binds)]
missingGenericInstances :: [Name] -> SDoc
missingGenericInstances missing
= ptext (sLit "Missing type patterns for") <+> pprQuotedList missing
-
+
dupGenericInsts :: [(TyCon, InstInfo a)] -> SDoc
dupGenericInsts tc_inst_infos
= vcat [ptext (sLit "More than one type pattern for a single generic type constructor:"),
- nest 2 (vcat (map ppr_inst_ty tc_inst_infos)),
- ptext (sLit "All the type patterns for a generic type constructor must be identical")
+ nest 2 (vcat (map ppr_inst_ty tc_inst_infos)),
+ ptext (sLit "All the type patterns for a generic type constructor must be identical")
]
- where
+ where
ppr_inst_ty (_,inst) = ppr (simpleInstInfoTy inst)
-}
badDmPrag :: Id -> Sig Name -> TcM ()
badDmPrag sel_id prag
- = addErrTc (ptext (sLit "The") <+> hsSigDoc prag <+> ptext (sLit "for default method")
- <+> quotes (ppr sel_id)
+ = addErrTc (ptext (sLit "The") <+> hsSigDoc prag <+> ptext (sLit "for default method")
+ <+> quotes (ppr sel_id)
<+> ptext (sLit "lacks an accompanying binding"))
warningMinimalDefIncomplete :: ClassMinimalDef -> SDoc
diff --git a/compiler/typecheck/TcDefaults.lhs b/compiler/typecheck/TcDefaults.lhs
index 7b5bd27321..0153e5a9a4 100644
--- a/compiler/typecheck/TcDefaults.lhs
+++ b/compiler/typecheck/TcDefaults.lhs
@@ -5,13 +5,6 @@
\section[TcDefaults]{Typechecking \tr{default} declarations}
\begin{code}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module TcDefaults ( tcDefaults ) where
import HsSyn
@@ -32,37 +25,37 @@ import FastString
\begin{code}
tcDefaults :: [LDefaultDecl Name]
- -> TcM (Maybe [Type]) -- Defaulting types to heave
- -- into Tc monad for later use
- -- in Disambig.
-
-tcDefaults []
- = getDeclaredDefaultTys -- No default declaration, so get the
- -- default types from the envt;
- -- i.e. use the current ones
- -- (the caller will put them back there)
- -- It's important not to return defaultDefaultTys here (which
- -- we used to do) because in a TH program, tcDefaults [] is called
- -- repeatedly, once for each group of declarations between top-level
- -- splices. We don't want to carefully set the default types in
- -- one group, only for the next group to ignore them and install
- -- defaultDefaultTys
+ -> TcM (Maybe [Type]) -- Defaulting types to heave
+ -- into Tc monad for later use
+ -- in Disambig.
+
+tcDefaults []
+ = getDeclaredDefaultTys -- No default declaration, so get the
+ -- default types from the envt;
+ -- i.e. use the current ones
+ -- (the caller will put them back there)
+ -- It's important not to return defaultDefaultTys here (which
+ -- we used to do) because in a TH program, tcDefaults [] is called
+ -- repeatedly, once for each group of declarations between top-level
+ -- splices. We don't want to carefully set the default types in
+ -- one group, only for the next group to ignore them and install
+ -- defaultDefaultTys
tcDefaults [L _ (DefaultDecl [])]
- = return (Just []) -- Default declaration specifying no types
+ = return (Just []) -- Default declaration specifying no types
tcDefaults [L locn (DefaultDecl mono_tys)]
- = setSrcSpan locn $
- addErrCtxt defaultDeclCtxt $
- do { ovl_str <- xoptM Opt_OverloadedStrings
- ; num_class <- tcLookupClass numClassName
- ; is_str_class <- tcLookupClass isStringClassName
- ; let deflt_clss | ovl_str = [num_class, is_str_class]
- | otherwise = [num_class]
-
- ; tau_tys <- mapM (tc_default_ty deflt_clss) mono_tys
-
- ; return (Just tau_tys) }
+ = setSrcSpan locn $
+ addErrCtxt defaultDeclCtxt $
+ do { ovl_str <- xoptM Opt_OverloadedStrings
+ ; num_class <- tcLookupClass numClassName
+ ; is_str_class <- tcLookupClass isStringClassName
+ ; let deflt_clss | ovl_str = [num_class, is_str_class]
+ | otherwise = [num_class]
+
+ ; tau_tys <- mapM (tc_default_ty deflt_clss) mono_tys
+
+ ; return (Just tau_tys) }
tcDefaults decls@(L locn (DefaultDecl _) : _)
= setSrcSpan locn $
@@ -70,22 +63,22 @@ tcDefaults decls@(L locn (DefaultDecl _) : _)
tc_default_ty :: [Class] -> LHsType Name -> TcM Type
-tc_default_ty deflt_clss hs_ty
- = do { ty <- tcHsSigType DefaultDeclCtxt hs_ty
- ; checkTc (isTauTy ty) (polyDefErr hs_ty)
+tc_default_ty deflt_clss hs_ty
+ = do { ty <- tcHsSigType DefaultDeclCtxt hs_ty
+ ; checkTc (isTauTy ty) (polyDefErr hs_ty)
- -- Check that the type is an instance of at least one of the deflt_clss
- ; oks <- mapM (check_instance ty) deflt_clss
- ; checkTc (or oks) (badDefaultTy ty deflt_clss)
- ; return ty }
+ -- Check that the type is an instance of at least one of the deflt_clss
+ ; oks <- mapM (check_instance ty) deflt_clss
+ ; checkTc (or oks) (badDefaultTy ty deflt_clss)
+ ; return ty }
check_instance :: Type -> Class -> TcM Bool
-- Check that ty is an instance of cls
-- We only care about whether it worked or not; return a boolean
check_instance ty cls
- = do { (_, mb_res) <- tryTc (simplifyDefault [mkClassPred cls [ty]])
- ; return (isJust mb_res) }
-
+ = do { (_, mb_res) <- tryTc (simplifyDefault [mkClassPred cls [ty]])
+ ; return (isJust mb_res) }
+
defaultDeclCtxt :: SDoc
defaultDeclCtxt = ptext (sLit "When checking the types in a default declaration")
@@ -98,8 +91,8 @@ dupDefaultDeclErr (L _ (DefaultDecl _) : dup_things)
dupDefaultDeclErr [] = panic "dupDefaultDeclErr []"
polyDefErr :: LHsType Name -> SDoc
-polyDefErr ty
- = hang (ptext (sLit "Illegal polymorphic type in default declaration") <> colon) 2 (ppr ty)
+polyDefErr ty
+ = hang (ptext (sLit "Illegal polymorphic type in default declaration") <> colon) 2 (ppr ty)
badDefaultTy :: Type -> [Class] -> SDoc
badDefaultTy ty deflt_clss
diff --git a/compiler/typecheck/TcErrors.lhs b/compiler/typecheck/TcErrors.lhs
index b1165a5e18..210bd79599 100644
--- a/compiler/typecheck/TcErrors.lhs
+++ b/compiler/typecheck/TcErrors.lhs
@@ -1,13 +1,7 @@
\begin{code}
{-# LANGUAGE CPP, ScopedTypeVariables #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-module TcErrors(
+
+module TcErrors(
reportUnsolved, reportAllUnsolved,
warnDefaulting,
@@ -34,13 +28,13 @@ import TcEvidence
import TysWiredIn ( coercibleClass )
import Name
import RdrName ( lookupGRE_Name )
-import Id
+import Id
import Var
import VarSet
import VarEnv
import Bag
import ErrUtils ( ErrMsg, makeIntoWarning, pprLocErrMsg )
-import BasicTypes
+import BasicTypes
import Util
import FastString
import Outputable
@@ -53,9 +47,9 @@ import Data.List ( partition, mapAccumL, zip4, nub )
\end{code}
%************************************************************************
-%* *
+%* *
\section{Errors and contexts}
-%* *
+%* *
%************************************************************************
ToDo: for these error messages, should we note the location as coming
@@ -125,7 +119,7 @@ report_unsolved mb_binds_var defer wanted
= do { traceTc "reportUnsolved (before unflattening)" (ppr wanted)
; env0 <- tcInitTidyEnv
-
+
-- If we are deferring we are going to need /all/ evidence around,
-- including the evidence produced by unflattening (zonkWC)
; let tidy_env = tidyFreeTyVars env0 free_tvs
@@ -136,7 +130,7 @@ report_unsolved mb_binds_var defer wanted
, cec_suppress = False -- See Note [Suppressing error messages]
, cec_binds = mb_binds_var }
- ; traceTc "reportUnsolved (after unflattening):" $
+ ; traceTc "reportUnsolved (after unflattening):" $
vcat [ pprTvBndrs (varSetElems free_tvs)
, ppr wanted ]
@@ -146,12 +140,12 @@ report_unsolved mb_binds_var defer wanted
-- Internal functions
--------------------------------------------
-data ReportErrCtxt
+data ReportErrCtxt
= CEC { cec_encl :: [Implication] -- Enclosing implications
- -- (innermost first)
+ -- (innermost first)
-- ic_skols and givens are tidied, rest are not
, cec_tidy :: TidyEnv
- , cec_binds :: Maybe EvBindsVar
+ , cec_binds :: Maybe EvBindsVar
-- Nothinng <=> Report all errors, including holes; no bindings
-- Just ev <=> make some errors (depending on cec_defer)
-- into warnings, and emit evidence bindings
@@ -173,7 +167,7 @@ evidence bindings (as usual). It's used when more important errors have occurre
Specifically (see reportWanteds)
* If there are insoluble Givens, then we are in unreachable code and all bets
are off. So don't report any further errors.
- * If there are any insolubles (eg Int~Bool), here or in a nested implication,
+ * If there are any insolubles (eg Int~Bool), here or in a nested implication,
then suppress errors from the flat constraints here. Sometimes the
flat-constraint errors are a knock-on effect of the insolubles.
@@ -411,21 +405,21 @@ pprWithArising :: [Ct] -> (CtLoc, SDoc)
-- (Show a) arising from a use of p at q
-- Also return a location for the error message
-- Works for Wanted/Derived only
-pprWithArising []
+pprWithArising []
= panic "pprWithArising"
pprWithArising (ct:cts)
| null cts
- = (loc, addArising (ctLocOrigin loc)
+ = (loc, addArising (ctLocOrigin loc)
(pprTheta [ctPred ct]))
| otherwise
= (loc, vcat (map ppr_one (ct:cts)))
where
loc = ctLoc ct
- ppr_one ct' = hang (parens (pprType (ctPred ct')))
+ ppr_one ct' = hang (parens (pprType (ctPred ct')))
2 (pprArisingAt (ctLoc ct'))
mkErrorMsg :: ReportErrCtxt -> Ct -> SDoc -> TcM ErrMsg
-mkErrorMsg ctxt ct msg
+mkErrorMsg ctxt ct msg
= do { let tcl_env = ctLocEnv (ctLoc ct)
; err_info <- mkErrInfo (cec_tidy ctxt) (tcl_ctxt tcl_env)
; mkLongErrAt (tcl_loc tcl_env) msg err_info }
@@ -436,7 +430,7 @@ getUserGivens :: ReportErrCtxt -> [UserGiven]
-- One item for each enclosing implication
getUserGivens (CEC {cec_encl = ctxt})
= reverse $
- [ (givens, info, tcl_loc env)
+ [ (givens, info, tcl_loc env)
| Implic {ic_given = givens, ic_env = env, ic_info = info } <- ctxt
, not (null givens) ]
\end{code}
@@ -447,13 +441,13 @@ When -fdefer-type-errors is on we warn about *all* type errors, even
if cec_suppress is on. This can lead to a lot more warnings than you
would get errors without -fdefer-type-errors, but if we suppress any of
them you might get a runtime error that wasn't warned about at compile
-time.
+time.
This is an easy design choice to change; just flip the order of the
first two equations for maybeReportError
To be consistent, we should also report multiple warnings from a single
-location in mkGroupReporter, when -fdefer-type-errors is on. But that
+location in mkGroupReporter, when -fdefer-type-errors is on. But that
is perhaps a bit *over*-consistent! Again, an easy choice to change.
@@ -478,7 +472,7 @@ these as errors:
For wanteds, something similar
data T a where
- MkT :: C Int b => a -> b -> T a
+ MkT :: C Int b => a -> b -> T a
g :: C Int c => c -> ()
f :: T a -> ()
f (MkT x y) = g x
@@ -488,23 +482,23 @@ these as errors:
(We leave the Deriveds in wc_flat until reportErrors, so that we don't lose
derived superclasses between iterations of the solver.)
-For functional dependencies, here is a real example,
+For functional dependencies, here is a real example,
stripped off from libraries/utf8-string/Codec/Binary/UTF8/Generic.hs
class C a b | a -> b
- g :: C a b => a -> b -> ()
- f :: C a b => a -> b -> ()
- f xa xb =
- let loop = g xa
+ g :: C a b => a -> b -> ()
+ f :: C a b => a -> b -> ()
+ f xa xb =
+ let loop = g xa
in loop xb
We will first try to infer a type for loop, and we will succeed:
C a b' => b' -> ()
-Subsequently, we will type check (loop xb) and all is good. But,
-recall that we have to solve a final implication constraint:
- C a b => (C a b' => .... cts from body of loop .... ))
-And now we have a problem as we will generate an equality b ~ b' and fail to
-solve it.
+Subsequently, we will type check (loop xb) and all is good. But,
+recall that we have to solve a final implication constraint:
+ C a b => (C a b' => .... cts from body of loop .... ))
+And now we have a problem as we will generate an equality b ~ b' and fail to
+solve it.
%************************************************************************
@@ -515,7 +509,7 @@ solve it.
\begin{code}
mkIrredErr :: ReportErrCtxt -> [Ct] -> TcM ErrMsg
-mkIrredErr ctxt cts
+mkIrredErr ctxt cts
= do { (ctxt, binds_msg) <- relevantBindings True ctxt ct1
; mkErrorMsg ctxt ct1 (msg $$ binds_msg) }
where
@@ -536,12 +530,12 @@ mkHoleError ctxt ct@(CHoleCan { cc_occ = occ })
-- The 'False' means "don't filter the bindings; see Trac #8191
; mkErrorMsg ctxt ct (msg $$ binds_doc) }
where
- loc_msg tv
+ loc_msg tv
= case tcTyVarDetails tv of
SkolemTv {} -> quotes (ppr tv) <+> skol_msg
MetaTv {} -> quotes (ppr tv) <+> ptext (sLit "is an ambiguous type variable")
det -> pprTcTyVarDetails det
- where
+ where
skol_msg = pprSkol (getSkolemInfo (cec_encl ctxt) tv) (getSrcLoc tv)
mkHoleError _ ct = pprPanic "mkHoleError" (ppr ct)
@@ -559,16 +553,16 @@ mkIPErr ctxt cts
msg | null givens
= addArising orig $
sep [ ptext (sLit "Unbound implicit parameter") <> plural cts
- , nest 2 (pprTheta preds) ]
+ , nest 2 (pprTheta preds) ]
| otherwise
= couldNotDeduce givens (preds, orig)
\end{code}
%************************************************************************
-%* *
+%* *
Equality errors
-%* *
+%* *
%************************************************************************
Note [Inaccessible code]
@@ -603,7 +597,7 @@ mkEqErr1 ctxt ct
= do { (ctxt, binds_msg) <- relevantBindings True ctxt ct
; let (given_loc, given_msg) = mk_given (cec_encl ctxt)
; dflags <- getDynFlags
- ; mkEqErr_help dflags ctxt (given_msg $$ binds_msg)
+ ; mkEqErr_help dflags ctxt (given_msg $$ binds_msg)
(ct { cc_ev = ev {ctev_loc = given_loc}}) -- Note [Inaccessible code]
Nothing ty1 ty2 }
@@ -679,10 +673,10 @@ mkTyVarEqErr dflags ctxt extra ct oriented tv1 ty2
, extraTyVarInfo ctxt tv1 ty2
, extra ])
- -- So tv is a meta tyvar (or started that way before we
- -- generalised it). So presumably it is an *untouchable*
+ -- So tv is a meta tyvar (or started that way before we
+ -- generalised it). So presumably it is an *untouchable*
-- meta tyvar or a SigTv, else it'd have been unified
- | not (k2 `tcIsSubKind` k1) -- Kind error
+ | not (k2 `tcIsSubKind` k1) -- Kind error
= mkErrorMsg ctxt ct $ (kindErrorMsg (mkTyVarTy tv1) ty2 $$ extra)
| OC_Occurs <- occ_check_expand
@@ -721,7 +715,7 @@ mkTyVarEqErr dflags ctxt extra ct oriented tv1 ty2
if isSingleton esc_skols then ptext (sLit "its scope")
else ptext (sLit "their scope") ]
tv_extra = vcat [ nest 2 $ esc_doc
- , sep [ (if isSingleton esc_skols
+ , sep [ (if isSingleton esc_skols
then ptext (sLit "This (rigid, skolem) type variable is")
else ptext (sLit "These (rigid, skolem) type variables are"))
<+> ptext (sLit "bound by")
@@ -748,15 +742,15 @@ mkTyVarEqErr dflags ctxt extra ct oriented tv1 ty2
-- This *can* happen (Trac #6123, and test T2627b)
-- Consider an ambiguous top-level constraint (a ~ F a)
-- Not an occurs check, because F is a type function.
- where
+ where
occ_check_expand = occurCheckExpand dflags tv1 ty2
- k1 = tyVarKind tv1
- k2 = typeKind ty2
+ k1 = tyVarKind tv1
+ k2 = typeKind ty2
ty1 = mkTyVarTy tv1
mkEqInfoMsg :: Ct -> TcType -> TcType -> SDoc
-- Report (a) ambiguity if either side is a type function application
--- e.g. F a0 ~ Int
+-- e.g. F a0 ~ Int
-- (b) warning about injectivity if both sides are the same
-- type function application F a ~ F b
-- See Note [Non-injective type functions]
@@ -766,14 +760,14 @@ mkEqInfoMsg ct ty1 ty2
mb_fun1 = isTyFun_maybe ty1
mb_fun2 = isTyFun_maybe ty2
- ambig_msg | isJust mb_fun1 || isJust mb_fun2
+ ambig_msg | isJust mb_fun1 || isJust mb_fun2
= snd (mkAmbigMsg ct)
| otherwise = empty
tyfun_msg | Just tc1 <- mb_fun1
, Just tc2 <- mb_fun2
- , tc1 == tc2
- = ptext (sLit "NB:") <+> quotes (ppr tc1)
+ , tc1 == tc2
+ = ptext (sLit "NB:") <+> quotes (ppr tc1)
<+> ptext (sLit "is a type function, and may not be injective")
| otherwise = empty
@@ -791,13 +785,13 @@ isUserSkolem ctxt tv
misMatchOrCND :: ReportErrCtxt -> Ct -> Maybe SwapFlag -> TcType -> TcType -> SDoc
-- If oriented then ty1 is actual, ty2 is expected
misMatchOrCND ctxt ct oriented ty1 ty2
- | null givens ||
- (isRigid ty1 && isRigid ty2) ||
+ | null givens ||
+ (isRigid ty1 && isRigid ty2) ||
isGivenCt ct
-- If the equality is unconditionally insoluble
-- or there is no context, don't report the context
= misMatchMsg oriented ty1 ty2
- | otherwise
+ | otherwise
= couldNotDeduce givens ([mkTcEqPred ty1 ty2], orig)
where
givens = getUserGivens ctxt
@@ -809,12 +803,12 @@ couldNotDeduce givens (wanteds, orig)
, vcat (pp_givens givens)]
pp_givens :: [UserGiven] -> [SDoc]
-pp_givens givens
+pp_givens givens
= case givens of
[] -> []
(g:gs) -> ppr_given (ptext (sLit "from the context")) g
: map (ppr_given (ptext (sLit "or from"))) gs
- where
+ where
ppr_given herald (gs, skol_info, loc)
= hang (herald <+> pprEvVarTheta gs)
2 (sep [ ptext (sLit "bound by") <+> ppr skol_info
@@ -870,7 +864,7 @@ kindErrorMsg ty1 ty2
k2 = typeKind ty2
--------------------
-misMatchMsg :: Maybe SwapFlag -> TcType -> TcType -> SDoc -- Types are already tidy
+misMatchMsg :: Maybe SwapFlag -> TcType -> TcType -> SDoc -- Types are already tidy
-- If oriented then ty1 is actual, ty2 is expected
misMatchMsg oriented ty1 ty2
| Just IsSwapped <- oriented
@@ -980,9 +974,9 @@ Warn of loopy local equalities that were dropped.
%************************************************************************
-%* *
+%* *
Type-class errors
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -1031,7 +1025,7 @@ mk_dict_err :: FamInstEnvs -> ReportErrCtxt -> (Ct, ClsInstLookupResult)
-> TcM (ReportErrCtxt, SDoc)
-- Report an overlap error if this class constraint results
-- from an overlap (returning Left clas), otherwise return (Right pred)
-mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
+mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
| null matches -- No matches but perhaps several unifiers
= do { let (is_ambig, ambig_msg) = mkAmbigMsg ct
; (ctxt, binds_msg) <- relevantBindings True ctxt ct
@@ -1061,10 +1055,10 @@ mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
potential_msg
= ppWhen (not (null unifiers) && want_potential orig) $
- hang (if isSingleton unifiers
+ hang (if isSingleton unifiers
then ptext (sLit "Note: there is a potential instance available:")
else ptext (sLit "Note: there are several potential instances:"))
- 2 (ppr_insts unifiers)
+ 2 (ppr_insts unifiers)
-- Report "potential instances" only when the constraint arises
-- directly from the user's use of an overloaded function
@@ -1076,16 +1070,16 @@ mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
, (orig:origs) <- mapMaybe get_good_orig (cec_encl ctxt)
= [sep [ ptext (sLit "add") <+> pprParendType pred
<+> ptext (sLit "to the context of")
- , nest 2 $ ppr_skol orig $$
- vcat [ ptext (sLit "or") <+> ppr_skol orig
+ , nest 2 $ ppr_skol orig $$
+ vcat [ ptext (sLit "or") <+> ppr_skol orig
| orig <- origs ] ] ]
| otherwise = []
ppr_skol (PatSkol dc _) = ptext (sLit "the data constructor") <+> quotes (ppr dc)
ppr_skol skol_info = ppr skol_info
- -- Do not suggest adding constraints to an *inferred* type signature!
- get_good_orig ic = case ic_info ic of
+ -- Do not suggest adding constraints to an *inferred* type signature!
+ get_good_orig ic = case ic_info ic of
SigSkol (InfSigCtxt {}) _ -> Nothing
origin -> Just origin
@@ -1112,15 +1106,15 @@ mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
-- Normal overlap error
overlap_msg
= ASSERT( not (null matches) )
- vcat [ addArising orig (ptext (sLit "Overlapping instances for")
- <+> pprType (mkClassPred clas tys))
+ vcat [ addArising orig (ptext (sLit "Overlapping instances for")
+ <+> pprType (mkClassPred clas tys))
, ppUnless (null matching_givens) $
- sep [ptext (sLit "Matching givens (or their superclasses):")
+ sep [ptext (sLit "Matching givens (or their superclasses):")
, nest 2 (vcat matching_givens)]
- , sep [ptext (sLit "Matching instances:"),
- nest 2 (vcat [pprInstances ispecs, pprInstances unifiers])]
+ , sep [ptext (sLit "Matching instances:"),
+ nest 2 (vcat [pprInstances ispecs, pprInstances unifiers])]
, ppWhen (null matching_givens && isSingleton matches && null unifiers) $
-- Intuitively, some given matched the wanted in their
@@ -1129,15 +1123,15 @@ mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
-- constraints are non-flat and non-rewritten so we
-- simply report back the whole given
-- context. Accelerate Smart.hs showed this problem.
- sep [ ptext (sLit "There exists a (perhaps superclass) match:")
+ sep [ ptext (sLit "There exists a (perhaps superclass) match:")
, nest 2 (vcat (pp_givens givens))]
- , ppWhen (isSingleton matches) $
- parens (vcat [ ptext (sLit "The choice depends on the instantiation of") <+>
- quotes (pprWithCommas ppr (varSetElems (tyVarsOfTypes tys)))
- , ppWhen (null (matching_givens)) $
+ , ppWhen (isSingleton matches) $
+ parens (vcat [ ptext (sLit "The choice depends on the instantiation of") <+>
+ quotes (pprWithCommas ppr (varSetElems (tyVarsOfTypes tys)))
+ , ppWhen (null (matching_givens)) $
vcat [ ptext (sLit "To pick the first instance above, use IncoherentInstances")
- , ptext (sLit "when compiling the other instance declarations")]
+ , ptext (sLit "when compiling the other instance declarations")]
])]
where
ispecs = [ispec | (ispec, _) <- matches]
@@ -1145,7 +1139,7 @@ mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
givens = getUserGivens ctxt
matching_givens = mapMaybe matchable givens
- matchable (evvars,skol_info,loc)
+ matchable (evvars,skol_info,loc)
= case ev_vars_matching of
[] -> Nothing
_ -> Just $ hang (pprTheta ev_vars_matching)
@@ -1156,16 +1150,16 @@ mk_dict_err fam_envs ctxt (ct, (matches, unifiers, safe_haskell))
Just (clas', tys')
| clas' == clas
, Just _ <- tcMatchTys (tyVarsOfTypes tys) tys tys'
- -> True
+ -> True
| otherwise
-> any ev_var_matches (immSuperClasses clas' tys')
Nothing -> False
- -- Overlap error because of Safe Haskell (first
+ -- Overlap error because of Safe Haskell (first
-- match should be the most specific match)
safe_haskell_msg
= ASSERT( length matches > 1 )
- vcat [ addArising orig (ptext (sLit "Unsafe overlapping instances for")
+ vcat [ addArising orig (ptext (sLit "Unsafe overlapping instances for")
<+> pprType (mkClassPred clas tys))
, sep [ptext (sLit "The matching instance is:"),
nest 2 (pprInstance $ head ispecs)]
@@ -1233,9 +1227,9 @@ ppr_insts insts
= pprInstances (take 3 insts) $$ dot_dot_message
where
n_extra = length insts - 3
- dot_dot_message
+ dot_dot_message
| n_extra <= 0 = empty
- | otherwise = ptext (sLit "...plus")
+ | otherwise = ptext (sLit "...plus")
<+> speakNOf n_extra (ptext (sLit "other"))
----------------------
@@ -1254,7 +1248,7 @@ quickFlattenTy (FunTy ty1 ty2) = do { fy1 <- quickFlattenTy ty1
; return (FunTy fy1 fy2) }
quickFlattenTy (TyConApp tc tys)
| not (isSynFamilyTyCon tc)
- = do { fys <- mapM quickFlattenTy tys
+ = do { fys <- mapM quickFlattenTy tys
; return (TyConApp tc fys) }
| otherwise
= do { let (funtys,resttys) = splitAt (tyConArity tc) tys
@@ -1333,12 +1327,12 @@ mkAmbigMsg ct
pprSkol :: SkolemInfo -> SrcLoc -> SDoc
pprSkol UnkSkol _
= ptext (sLit "is an unknown type variable")
-pprSkol skol_info tv_loc
+pprSkol skol_info tv_loc
= sep [ ptext (sLit "is a rigid type variable bound by"),
sep [ppr skol_info, ptext (sLit "at") <+> ppr tv_loc]]
getSkolemInfo :: [Implication] -> TcTyVar -> SkolemInfo
--- Get the skolem info for a type variable
+-- Get the skolem info for a type variable
-- from the implication constraint that binds it
getSkolemInfo [] tv
= pprPanic "No skolem info:" (ppr tv)
@@ -1353,7 +1347,7 @@ getSkolemInfo (implic:implics) tv
-- careful to zonk the Id's type first, so it has to be in the monad.
-- We must be careful to pass it a zonked type variable, too.
--
--- We always remove closed top-level bindings, though,
+-- We always remove closed top-level bindings, though,
-- since they are never relevant (cf Trac #8233)
relevantBindings :: Bool -- True <=> filter by tyvar; False <=> no filtering
@@ -1362,24 +1356,24 @@ relevantBindings :: Bool -- True <=> filter by tyvar; False <=> no filtering
-> TcM (ReportErrCtxt, SDoc)
relevantBindings want_filtering ctxt ct
= do { dflags <- getDynFlags
- ; (tidy_env', docs, discards)
- <- go (cec_tidy ctxt) (maxRelevantBinds dflags)
+ ; (tidy_env', docs, discards)
+ <- go (cec_tidy ctxt) (maxRelevantBinds dflags)
emptyVarSet [] False
(tcl_bndrs lcl_env)
- -- tcl_bndrs has the innermost bindings first,
+ -- tcl_bndrs has the innermost bindings first,
-- which are probably the most relevant ones
; traceTc "relevantBindings" (ppr ct $$ ppr [id | TcIdBndr id _ <- tcl_bndrs lcl_env])
- ; let doc = hang (ptext (sLit "Relevant bindings include"))
+ ; let doc = hang (ptext (sLit "Relevant bindings include"))
2 (vcat docs $$ max_msg)
- max_msg | discards
+ max_msg | discards
= ptext (sLit "(Some bindings suppressed; use -fmax-relevant-binds=N or -fno-max-relevant-binds)")
| otherwise = empty
- ; if null docs
+ ; if null docs
then return (ctxt, empty)
else do { traceTc "rb" doc
- ; return (ctxt { cec_tidy = tidy_env' }, doc) } }
+ ; return (ctxt { cec_tidy = tidy_env' }, doc) } }
where
loc = ctLoc ct
lcl_env = ctLocEnv loc
@@ -1398,9 +1392,9 @@ relevantBindings want_filtering ctxt ct
dec_max :: Maybe Int -> Maybe Int
dec_max = fmap (\n -> n - 1)
- go :: TidyEnv -> Maybe Int -> TcTyVarSet -> [SDoc]
+ go :: TidyEnv -> Maybe Int -> TcTyVarSet -> [SDoc]
-> Bool -- True <=> some filtered out due to lack of fuel
- -> [TcIdBinder]
+ -> [TcIdBinder]
-> TcM (TidyEnv, [SDoc], Bool) -- The bool says if we filtered any out
-- because of lack of fuel
go tidy_env _ _ docs discards []
@@ -1410,8 +1404,8 @@ relevantBindings want_filtering ctxt ct
; traceTc "relevantBindings 1" (ppr id <+> dcolon <+> ppr tidy_ty)
; let id_tvs = tyVarsOfType tidy_ty
doc = sep [ pprPrefixOcc id <+> dcolon <+> ppr tidy_ty
- , nest 2 (parens (ptext (sLit "bound at")
- <+> ppr (getSrcLoc id)))]
+ , nest 2 (parens (ptext (sLit "bound at")
+ <+> ppr (getSrcLoc id)))]
new_seen = tvs_seen `unionVarSet` id_tvs
; if (want_filtering && id_tvs `disjointVarSet` ct_tvs)
@@ -1451,13 +1445,13 @@ Note [Runtime skolems]
~~~~~~~~~~~~~~~~~~~~~~
We want to give a reasonably helpful error message for ambiguity
arising from *runtime* skolems in the debugger. These
-are created by in RtClosureInspect.zonkRTTIType.
+are created by in RtClosureInspect.zonkRTTIType.
%************************************************************************
-%* *
+%* *
Error from the canonicaliser
- These ones are called *during* constraint simplification
-%* *
+ These ones are called *during* constraint simplification
+%* *
%************************************************************************
\begin{code}
@@ -1482,9 +1476,9 @@ solverDepthErrorTcS cnt ev
\end{code}
%************************************************************************
-%* *
+%* *
Tidying
-%* *
+%* *
%************************************************************************
\begin{code}
diff --git a/compiler/typecheck/TcHsType.lhs b/compiler/typecheck/TcHsType.lhs
index c4ed2a60b7..c9f0e2f870 100644
--- a/compiler/typecheck/TcHsType.lhs
+++ b/compiler/typecheck/TcHsType.lhs
@@ -6,37 +6,31 @@
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module TcHsType (
- tcHsSigType, tcHsSigTypeNC, tcHsDeriv, tcHsVectInst,
- tcHsInstHead,
- UserTypeCtxt(..),
+ tcHsSigType, tcHsSigTypeNC, tcHsDeriv, tcHsVectInst,
+ tcHsInstHead,
+ UserTypeCtxt(..),
-- Type checking type and class decls
- kcLookupKind, kcTyClTyVars, tcTyClTyVars,
- tcHsConArgType, tcDataKindSig,
+ kcLookupKind, kcTyClTyVars, tcTyClTyVars,
+ tcHsConArgType, tcDataKindSig,
tcClassSigType,
- -- Kind-checking types
+ -- Kind-checking types
-- No kind generalisation, no checkValidType
- kcHsTyVarBndrs, tcHsTyVarBndrs,
+ kcHsTyVarBndrs, tcHsTyVarBndrs,
tcHsLiftedType, tcHsOpenType,
- tcLHsType, tcCheckLHsType,
+ tcLHsType, tcCheckLHsType,
tcHsContext, tcInferApps, tcHsArgTys,
kindGeneralize, checkKind,
- -- Sort-checking kinds
- tcLHsKind,
+ -- Sort-checking kinds
+ tcLHsKind,
- -- Pattern type signatures
- tcHsPatSigType, tcPatSig
+ -- Pattern type signatures
+ tcHsPatSigType, tcPatSig
) where
#include "HsVersions.h"
@@ -79,31 +73,31 @@ import PrelNames( ipClassName, funTyConKey, allNameStrings )
\end{code}
- ----------------------------
- General notes
- ----------------------------
+ ----------------------------
+ General notes
+ ----------------------------
Generally speaking we now type-check types in three phases
1. kcHsType: kind check the HsType
- *includes* performing any TH type splices;
- so it returns a translated, and kind-annotated, type
+ *includes* performing any TH type splices;
+ so it returns a translated, and kind-annotated, type
2. dsHsType: convert from HsType to Type:
- perform zonking
- expand type synonyms [mkGenTyApps]
- hoist the foralls [tcHsType]
+ perform zonking
+ expand type synonyms [mkGenTyApps]
+ hoist the foralls [tcHsType]
3. checkValidType: check the validity of the resulting type
Often these steps are done one after the other (tcHsSigType).
But in mutually recursive groups of type and class decls we do
- 1 kind-check the whole group
- 2 build TyCons/Classes in a knot-tied way
- 3 check the validity of types in the now-unknotted TyCons/Classes
+ 1 kind-check the whole group
+ 2 build TyCons/Classes in a knot-tied way
+ 3 check the validity of types in the now-unknotted TyCons/Classes
For example, when we find
- (forall a m. m a -> m a)
+ (forall a m. m a -> m a)
we bind a,m to kind varibles and kind-check (m a -> m a). This makes
a get kind *, and m get kind *->*. Now we typecheck (m a -> m a) in
an environment that binds a and m suitably.
@@ -111,29 +105,29 @@ an environment that binds a and m suitably.
The kind checker passed to tcHsTyVars needs to look at enough to
establish the kind of the tyvar:
* For a group of type and class decls, it's just the group, not
- the rest of the program
+ the rest of the program
* For a tyvar bound in a pattern type signature, its the types
- mentioned in the other type signatures in that bunch of patterns
+ mentioned in the other type signatures in that bunch of patterns
* For a tyvar bound in a RULE, it's the type signatures on other
- universally quantified variables in the rule
+ universally quantified variables in the rule
Note that this may occasionally give surprising results. For example:
- data T a b = MkT (a b)
+ data T a b = MkT (a b)
-Here we deduce a::*->*, b::*
-But equally valid would be a::(*->*)-> *, b::*->*
+Here we deduce a::*->*, b::*
+But equally valid would be a::(*->*)-> *, b::*->*
Validity checking
~~~~~~~~~~~~~~~~~
-Some of the validity check could in principle be done by the kind checker,
+Some of the validity check could in principle be done by the kind checker,
but not all:
- During desugaring, we normalise by expanding type synonyms. Only
after this step can we check things like type-synonym saturation
- e.g. type T k = k Int
- type S a = a
+ e.g. type T k = k Int
+ type S a = a
Then (T S) is ok, because T is saturated; (T S) expands to (S Int);
and then S is saturated. This is a GHC extension.
@@ -156,15 +150,15 @@ the TyCon being defined.
%************************************************************************
-%* *
+%* *
Check types AND do validity checking
-%* *
+%* *
%************************************************************************
\begin{code}
tcHsSigType, tcHsSigTypeNC :: UserTypeCtxt -> LHsType Name -> TcM Type
-- NB: it's important that the foralls that come from the top-level
- -- HsForAllTy in hs_ty occur *first* in the returned type.
+ -- HsForAllTy in hs_ty occur *first* in the returned type.
-- See Note [Scoped] with TcSigInfo
tcHsSigType ctxt hs_ty
= addErrCtxt (pprHsSigCtxt ctxt hs_ty) $
@@ -200,7 +194,7 @@ tcHsInstHead user_ctxt lhs_ty@(L loc hs_ty)
tc_inst_head :: HsType Name -> TcM TcType
tc_inst_head (HsForAllTy _ hs_tvs hs_ctxt hs_ty)
- = tcHsTyVarBndrs hs_tvs $ \ tvs ->
+ = tcHsTyVarBndrs hs_tvs $ \ tvs ->
do { ctxt <- tcHsContext hs_ctxt
; ty <- tc_lhs_type hs_ty ekConstraint -- Body for forall has kind Constraint
; return (mkSigmaTy tvs ctxt ty) }
@@ -239,18 +233,18 @@ tcHsVectInst ty
= failWithTc $ ptext (sLit "Malformed instance type")
\end{code}
- These functions are used during knot-tying in
- type and class declarations, when we have to
- separate kind-checking, desugaring, and validity checking
+ These functions are used during knot-tying in
+ type and class declarations, when we have to
+ separate kind-checking, desugaring, and validity checking
%************************************************************************
-%* *
+%* *
The main kind checker: no validity checks here
-%* *
+%* *
%************************************************************************
-
- First a couple of simple wrappers for kcHsType
+
+ First a couple of simple wrappers for kcHsType
\begin{code}
tcClassSigType :: LHsType Name -> TcM Type
@@ -293,7 +287,7 @@ tcHsLiftedType ty = addTypeCtxt ty $ tc_lhs_type ty ekLifted
-- Like tcHsType, but takes an expected kind
tcCheckLHsType :: LHsType Name -> Kind -> TcM Type
tcCheckLHsType hs_ty exp_kind
- = addTypeCtxt hs_ty $
+ = addTypeCtxt hs_ty $
tc_lhs_type hs_ty (EK exp_kind expectedKindMsg)
tcLHsType :: LHsType Name -> TcM (TcType, TcKind)
@@ -308,7 +302,7 @@ tcCheckHsTypeAndGen :: HsType Name -> Kind -> TcM Type
tcCheckHsTypeAndGen hs_ty kind
= do { ty <- tc_hs_type hs_ty (EK kind expectedKindMsg)
; traceTc "tcCheckHsTypeAndGen" (ppr hs_ty)
- ; kvs <- zonkTcTypeAndFV ty
+ ; kvs <- zonkTcTypeAndFV ty
; kvs <- kindGeneralize kvs
; return (mkForAllTys kvs ty) }
\end{code}
@@ -336,7 +330,7 @@ tc_lhs_types tys_w_kinds = mapM (uncurry tc_lhs_type) tys_w_kinds
------------------------------------------
tc_fun_type :: HsType Name -> LHsType Name -> LHsType Name -> ExpKind -> TcM TcType
--- We need to recognise (->) so that we can construct a FunTy,
+-- We need to recognise (->) so that we can construct a FunTy,
-- *and* we need to do by looking at the Name, not the TyCon
-- (see Note [Zonking inside the knot]). For example,
-- consider f :: (->) Int Int (Trac #7312)
@@ -350,14 +344,14 @@ tc_fun_type ty ty1 ty2 exp_kind@(EK _ ctxt)
tc_hs_type :: HsType Name -> ExpKind -> TcM TcType
tc_hs_type (HsParTy ty) exp_kind = tc_lhs_type ty exp_kind
tc_hs_type (HsDocTy ty _) exp_kind = tc_lhs_type ty exp_kind
-tc_hs_type (HsQuasiQuoteTy {}) _ = panic "tc_hs_type: qq" -- Eliminated by renamer
+tc_hs_type (HsQuasiQuoteTy {}) _ = panic "tc_hs_type: qq" -- Eliminated by renamer
tc_hs_type ty@(HsBangTy {}) _
-- While top-level bangs at this point are eliminated (eg !(Maybe Int)),
-- other kinds of bangs are not (eg ((!Maybe) Int)). These kinds of
-- bangs are invalid, so fail. (#7210)
= failWithTc (ptext (sLit "Unexpected strictness annotation:") <+> ppr ty)
tc_hs_type (HsRecTy _) _ = panic "tc_hs_type: record" -- Unwrapped by con decls
- -- Record types (which only show up temporarily in constructor
+ -- Record types (which only show up temporarily in constructor
-- signatures) should have been removed by now
---------- Functions and applications
@@ -388,7 +382,7 @@ tc_hs_type hs_ty@(HsAppTy ty1 ty2) exp_kind
; arg_tys' <- tcCheckApps hs_ty fun_ty fun_kind arg_tys exp_kind
; return (mkNakedAppTys fun_ty' arg_tys') }
-- mkNakedAppTys: see Note [Zonking inside the knot]
- -- This looks fragile; how do we *know* that fun_ty isn't
+ -- This looks fragile; how do we *know* that fun_ty isn't
-- a TyConApp, say (which is never supposed to appear in the
-- function position of an AppTy)?
where
@@ -414,7 +408,7 @@ tc_hs_type hs_ty@(HsForAllTy _ hs_tvs context ty) exp_kind@(EK exp_k _)
; return (mkSigmaTy tvs' ctxt' ty') }
--------- Lists, arrays, and tuples
-tc_hs_type hs_ty@(HsListTy elt_ty) exp_kind
+tc_hs_type hs_ty@(HsListTy elt_ty) exp_kind
= do { tau_ty <- tc_lhs_type elt_ty ekLifted
; checkExpectedKind hs_ty liftedTypeKind exp_kind
; checkWiredInTyCon listTyCon
@@ -495,7 +489,7 @@ tc_hs_type ipTy@(HsIParamTy n ty) exp_kind
; return (mkClassPred ipClass [n',ty'])
}
-tc_hs_type ty@(HsEqTy ty1 ty2) exp_kind
+tc_hs_type ty@(HsEqTy ty1 ty2) exp_kind
= do { (ty1', kind1) <- tc_infer_lhs_type ty1
; (ty2', kind2) <- tc_infer_lhs_type ty2
; checkExpectedKind ty2 kind2
@@ -507,12 +501,12 @@ tc_hs_type ty@(HsEqTy ty1 ty2) exp_kind
<+> quotes (pprKind pkind)
--------- Misc
-tc_hs_type (HsKindSig ty sig_k) exp_kind
+tc_hs_type (HsKindSig ty sig_k) exp_kind
= do { sig_k' <- tcLHsKind sig_k
; checkExpectedKind ty sig_k' exp_kind
; tc_lhs_type ty (EK sig_k' msg_fn) }
where
- msg_fn pkind = ptext (sLit "The signature specified kind")
+ msg_fn pkind = ptext (sLit "The signature specified kind")
<+> quotes (pprKind pkind)
tc_hs_type (HsCoreTy ty) exp_kind
@@ -572,21 +566,21 @@ finish_tuple hs_ty tup_sort tau_tys exp_kind
---------------------------
tcInferApps :: Outputable a
- => a
- -> TcKind -- Function kind
- -> [LHsType Name] -- Arg types
- -> TcM ([TcType], TcKind) -- Kind-checked args
+ => a
+ -> TcKind -- Function kind
+ -> [LHsType Name] -- Arg types
+ -> TcM ([TcType], TcKind) -- Kind-checked args
tcInferApps the_fun fun_kind args
= do { (args_w_kinds, res_kind) <- splitFunKind (ppr the_fun) fun_kind args
; args' <- tc_lhs_types args_w_kinds
; return (args', res_kind) }
-tcCheckApps :: Outputable a
+tcCheckApps :: Outputable a
=> HsType Name -- The type being checked (for err messages only)
-> a -- The function
-> TcKind -> [LHsType Name] -- Fun kind and arg types
- -> ExpKind -- Expected kind
- -> TcM [TcType]
+ -> ExpKind -- Expected kind
+ -> TcM [TcType]
tcCheckApps hs_ty the_fun fun_kind args exp_kind
= do { (arg_tys, res_kind) <- tcInferApps the_fun fun_kind args
; checkExpectedKind hs_ty res_kind exp_kind
@@ -601,13 +595,13 @@ splitFunKind the_fun fun_kind args
go arg_no fk (arg:args)
= do { mb_fk <- matchExpectedFunKind fk
; case mb_fk of
- Nothing -> failWithTc too_many_args
+ Nothing -> failWithTc too_many_args
Just (ak,fk') -> do { (aks, rk) <- go (arg_no+1) fk' args
; let exp_kind = expArgKind (quotes the_fun) ak arg_no
; return ((arg, exp_kind) : aks, rk) } }
-
+
too_many_args = quotes the_fun <+>
- ptext (sLit "is applied to too many type arguments")
+ ptext (sLit "is applied to too many type arguments")
---------------------------
@@ -625,7 +619,7 @@ tcTyVar name -- Could be a tyvar, a tycon, or a datacon
= do { traceTc "lk1" (ppr name)
; thing <- tcLookup name
; case thing of
- ATyVar _ tv
+ ATyVar _ tv
| isKindVar tv
-> failWithTc (ptext (sLit "Kind variable") <+> quotes (ppr tv)
<+> ptext (sLit "used as a type"))
@@ -644,7 +638,7 @@ tcTyVar name -- Could be a tyvar, a tycon, or a datacon
; unless data_kinds $ promotionErr name NoDataKinds
; inst_tycon (mkTyConApp tc) (tyConKind tc) }
| otherwise -> failWithTc (ptext (sLit "Data constructor") <+> quotes (ppr dc)
- <+> ptext (sLit "comes from an un-promotable type")
+ <+> ptext (sLit "comes from an un-promotable type")
<+> quotes (ppr (dataConTyCon dc)))
APromotionErr err -> promotionErr name err
@@ -661,22 +655,22 @@ tcTyVar name -- Could be a tyvar, a tycon, or a datacon
-- Instantiate the polymorphic kind
-- Lazy in the TyCon
inst_tycon mk_tc_app kind
- | null kvs
+ | null kvs
= return (mk_tc_app [], ki_body)
| otherwise
= do { traceTc "lk4" (ppr name <+> dcolon <+> ppr kind)
; ks <- mapM (const newMetaKindVar) kvs
; return (mk_tc_app ks, substKiWith kvs ks ki_body) }
- where
+ where
(kvs, ki_body) = splitForAllTys kind
tcClass :: Name -> TcM (Class, TcKind)
-tcClass cls -- Must be a class
+tcClass cls -- Must be a class
= do { thing <- tcLookup cls
; case thing of
AThing kind -> return (aThingErr "tcClass" cls, kind)
AGlobal (ATyCon tc)
- | Just cls <- tyConClass_maybe tc
+ | Just cls <- tyConClass_maybe tc
-> return (cls, tyConKind tc)
_ -> wrongThingErr "class" thing cls }
@@ -694,14 +688,14 @@ Suppose we are checking the argument types of a data constructor. We
must zonk the types before making the DataCon, because once built we
can't change it. So we must traverse the type.
-BUT the parent TyCon is knot-tied, so we can't look at it yet.
+BUT the parent TyCon is knot-tied, so we can't look at it yet.
So we must be careful not to use "smart constructors" for types that
-look at the TyCon or Class involved.
+look at the TyCon or Class involved.
- * Hence the use of mkNakedXXX functions. These do *not* enforce
- the invariants (for example that we use (FunTy s t) rather
- than (TyConApp (->) [s,t])).
+ * Hence the use of mkNakedXXX functions. These do *not* enforce
+ the invariants (for example that we use (FunTy s t) rather
+ than (TyConApp (->) [s,t])).
* Ditto in zonkTcType (which may be applied more than once, eg to
squeeze out kind meta-variables), we are careful not to look at
@@ -720,7 +714,7 @@ delicate it is can be seen in Trac #7903.
\begin{code}
mkNakedTyConApp :: TyCon -> [Type] -> Type
--- Builds a TyConApp
+-- Builds a TyConApp
-- * without being strict in TyCon,
-- * without satisfying the invariants of TyConApp
-- A subsequent zonking will establish the invariants
@@ -754,14 +748,14 @@ zonkSigType ty
go (AppTy fun arg) = do fun' <- go fun
arg' <- go arg
return (mkAppTy fun' arg')
- -- NB the mkAppTy; we might have instantiated a
- -- type variable to a type constructor, so we need
- -- to pull the TyConApp to the top.
+ -- NB the mkAppTy; we might have instantiated a
+ -- type variable to a type constructor, so we need
+ -- to pull the TyConApp to the top.
- -- The two interesting cases!
+ -- The two interesting cases!
go (TyVarTy tyvar) | isTcTyVar tyvar = zonkTcTyVar tyvar
- | otherwise = TyVarTy <$> updateTyVarKindM go tyvar
- -- Ordinary (non Tc) tyvars occur inside quantified types
+ | otherwise = TyVarTy <$> updateTyVarKindM go tyvar
+ -- Ordinary (non Tc) tyvars occur inside quantified types
go (ForAllTy tv ty) = do { tv' <- zonkTcTyVarBndr tv
; ty' <- go ty
@@ -773,11 +767,11 @@ Note [Body kind of a forall]
The body of a forall is usually a type, but in principle
there's no reason to prohibit *unlifted* types.
In fact, GHC can itself construct a function with an
-unboxed tuple inside a for-all (via CPR analyis; see
+unboxed tuple inside a for-all (via CPR analyis; see
typecheck/should_compile/tc170).
Moreover in instance heads we get forall-types with
-kind Constraint.
+kind Constraint.
Moreover if we have a signature
f :: Int#
@@ -812,7 +806,7 @@ so that we do kind generalisation on it.
Really we should check that it's a type of value kind
{*, Constraint, #}, but I'm not doing that yet
-Example that should be rejected:
+Example that should be rejected:
f :: (forall (a:*->*). a) Int
Note [Inferring tuple kinds]
@@ -843,9 +837,9 @@ The type desugarer is phase 2 of dealing with HsTypes. Specifically:
* It zonks any kinds. The returned type should have no mutable kind
or type variables (hence returning Type not TcType):
- - any unconstrained kind variables are defaulted to AnyK just
- as in TcHsSyn.
- - there are no mutable type variables because we are
+ - any unconstrained kind variables are defaulted to AnyK just
+ as in TcHsSyn.
+ - there are no mutable type variables because we are
kind-checking a type
Reason: the returned type may be put in a TyCon or DataCon where
it will never subsequently be zonked.
@@ -861,11 +855,11 @@ delicate point, this. If it becomes an issue we might need to
distinguish top-level from nested uses.
Moreover
- * it cannot fail,
+ * it cannot fail,
* it does no unifications
* it does no validity checking, except for structural matters, such as
- (a) spurious ! annotations.
- (b) a class used as a type
+ (a) spurious ! annotations.
+ (b) a class used as a type
Note [Kind of a type splice]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -874,7 +868,7 @@ Consider these terms, each with TH type splice inside:
[| e2 :: $(..blah..) |]
When kind-checking the type signature, we'll kind-check the splice
$(..blah..); we want to give it a kind that can fit in any context,
-as if $(..blah..) :: forall k. k.
+as if $(..blah..) :: forall k. k.
In the e1 example, the context of the splice fixes kappa to *. But
in the e2 example, we'll desugar the type, zonking the kind unification
@@ -887,25 +881,25 @@ Help functions for type applications
\begin{code}
addTypeCtxt :: LHsType Name -> TcM a -> TcM a
- -- Wrap a context around only if we want to show that contexts.
- -- Omit invisble ones and ones user's won't grok
-addTypeCtxt (L _ ty) thing
+ -- Wrap a context around only if we want to show that contexts.
+ -- Omit invisble ones and ones user's won't grok
+addTypeCtxt (L _ ty) thing
= addErrCtxt doc thing
where
doc = ptext (sLit "In the type") <+> quotes (ppr ty)
\end{code}
%************************************************************************
-%* *
- Type-variable binders
-%* *
+%* *
+ Type-variable binders
+%* *
%************************************************************************
\begin{code}
mkKindSigVar :: Name -> TcM KindVar
-- Use the specified name; don't clone it
-mkKindSigVar n
+mkKindSigVar n
= do { mb_thing <- tcLookupLcl_maybe n
; case mb_thing of
Just (AThing k)
@@ -917,19 +911,19 @@ kcScopedKindVars :: [Name] -> TcM a -> TcM a
-- Given some tyvar binders like [a (b :: k -> *) (c :: k)]
-- bind each scoped kind variable (k in this case) to a fresh
-- kind skolem variable
-kcScopedKindVars kv_ns thing_inside
+kcScopedKindVars kv_ns thing_inside
= do { kvs <- mapM (\n -> newSigTyVar n superKind) kv_ns
-- NB: use mutable signature variables
- ; tcExtendTyVarEnv2 (kv_ns `zip` kvs) thing_inside }
+ ; tcExtendTyVarEnv2 (kv_ns `zip` kvs) thing_inside }
-- | Kind-check a 'LHsTyVarBndrs'. If the decl under consideration has a complete,
-- user-supplied kind signature (CUSK), generalise the result. Used in 'getInitialKind'
-- and in kind-checking. See also Note [Complete user-supplied kind signatures] in
-- HsDecls.
kcHsTyVarBndrs :: Bool -- ^ True <=> the decl being checked has a CUSK
- -> LHsTyVarBndrs Name
- -> TcM (Kind, r) -- ^ the result kind, possibly with other info
- -> TcM (Kind, r) -- ^ The full kind of the thing being declared,
+ -> LHsTyVarBndrs Name
+ -> TcM (Kind, r) -- ^ the result kind, possibly with other info
+ -> TcM (Kind, r) -- ^ The full kind of the thing being declared,
-- with the other info
kcHsTyVarBndrs cusk (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
= do { kvs <- if cusk
@@ -950,13 +944,13 @@ kcHsTyVarBndrs cusk (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
kc_hs_tv (UserTyVar n)
= do { mb_thing <- tcLookupLcl_maybe n
; kind <- case mb_thing of
- Just (AThing k) -> return k
- _ | cusk -> return liftedTypeKind
- | otherwise -> newMetaKindVar
+ Just (AThing k) -> return k
+ _ | cusk -> return liftedTypeKind
+ | otherwise -> newMetaKindVar
; return (n, kind) }
- kc_hs_tv (KindedTyVar n k)
+ kc_hs_tv (KindedTyVar n k)
= do { kind <- tcLHsKind k
- -- In an associated type decl, the type variable may already
+ -- In an associated type decl, the type variable may already
-- be in scope; in that case we want to make sure its kind
-- matches the one declared here
; mb_thing <- tcLookupLcl_maybe n
@@ -966,14 +960,14 @@ kcHsTyVarBndrs cusk (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
Just thing -> pprPanic "check_in_scope" (ppr thing)
; return (n, kind) }
-tcHsTyVarBndrs :: LHsTyVarBndrs Name
- -> ([TcTyVar] -> TcM r)
- -> TcM r
+tcHsTyVarBndrs :: LHsTyVarBndrs Name
+ -> ([TcTyVar] -> TcM r)
+ -> TcM r
-- Bind the kind variables to fresh skolem variables
-- and type variables to skolems, each with a meta-kind variable kind
tcHsTyVarBndrs (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
= do { kvs <- mapM mkKindSigVar kv_ns
- ; tcExtendTyVarEnv kvs $ do
+ ; tcExtendTyVarEnv kvs $ do
{ tvs <- mapM tcHsTyVarBndr hs_tvs
; traceTc "tcHsTyVarBndrs {" (vcat [ text "Hs kind vars:" <+> ppr kv_ns
, text "Hs type vars:" <+> ppr hs_tvs
@@ -987,13 +981,13 @@ tcHsTyVarBndrs (HsQTvs { hsq_kvs = kv_ns, hsq_tvs = hs_tvs }) thing_inside
; return res } }
tcHsTyVarBndr :: LHsTyVarBndr Name -> TcM TcTyVar
--- Return a type variable
+-- Return a type variable
-- initialised with a kind variable.
--- Typically the Kind inside the HsTyVarBndr will be a tyvar with a mutable kind
+-- Typically the Kind inside the HsTyVarBndr will be a tyvar with a mutable kind
-- in it.
--
-- If the variable is already in scope return it, instead of introducing a new
--- one. This can occur in
+-- one. This can occur in
-- instance C (a,b) where
-- type F (a,b) c = ...
-- Here a,b will be in scope when processing the associated type instance for F.
@@ -1018,9 +1012,9 @@ kindGeneralize tkvs
-- Any type variables in tkvs will be in scope,
-- and hence in gbl_tvs, so after removing gbl_tvs
-- we should only have kind variables left
- --
- -- BUT there is a smelly case (to be fixed when TH is reorganised)
- -- f t = [| e :: $t |]
+ --
+ -- BUT there is a smelly case (to be fixed when TH is reorganised)
+ -- f t = [| e :: $t |]
-- When typechecking the body of the bracket, we typecheck $t to a
-- unification variable 'alpha', with no biding forall. We don't
-- want to kind-quantify it!
@@ -1052,12 +1046,12 @@ must return type variables whose kinds are zonked too. Example
(a :: k7) where k7 := k9 -> k9
We must return
[k9, a:k9->k9]
-and NOT
+and NOT
[k9, a:k7]
-Reason: we're going to turn this into a for-all type,
+Reason: we're going to turn this into a for-all type,
forall k9. forall (a:k7). blah
which the type checker will then instantiate, and instantiate does not
-look through unification variables!
+look through unification variables!
Hence using zonked_kinds when forming tvs'.
@@ -1066,10 +1060,10 @@ Hence using zonked_kinds when forming tvs'.
-- getInitialKind has made a suitably-shaped kind for the type or class
-- Unpack it, and attribute those kinds to the type variables
-- Extend the env with bindings for the tyvars, taken from
--- the kind of the tycon/class. Give it to the thing inside, and
+-- the kind of the tycon/class. Give it to the thing inside, and
-- check the result kind matches
kcLookupKind :: Name -> TcM Kind
-kcLookupKind nm
+kcLookupKind nm
= do { tc_ty_thing <- tcLookup nm
; case tc_ty_thing of
AThing k -> return k
@@ -1078,11 +1072,11 @@ kcLookupKind nm
kcTyClTyVars :: Name -> LHsTyVarBndrs Name -> TcM a -> TcM a
-- Used for the type variables of a type or class decl,
--- when doing the initial kind-check.
+-- when doing the initial kind-check.
kcTyClTyVars name (HsQTvs { hsq_kvs = kvs, hsq_tvs = hs_tvs }) thing_inside
= kcScopedKindVars kvs $
- do { tc_kind <- kcLookupKind name
- ; let (_, mono_kind) = splitForAllTys tc_kind
+ do { tc_kind <- kcLookupKind name
+ ; let (_, mono_kind) = splitForAllTys tc_kind
-- if we have a FullKindSignature, the tc_kind may already
-- be generalized. The kvs get matched up while kind-checking
-- the types in kc_tv, below
@@ -1093,11 +1087,11 @@ kcTyClTyVars name (HsQTvs { hsq_kvs = kvs, hsq_tvs = hs_tvs }) thing_inside
; tcExtendKindEnv name_ks thing_inside }
where
-- getInitialKind has already gotten the kinds of these type
- -- variables, but tiresomely we need to check them *again*
- -- to match the kind variables they mention against the ones
+ -- variables, but tiresomely we need to check them *again*
+ -- to match the kind variables they mention against the ones
-- we've freshly brought into scope
kc_tv :: LHsTyVarBndr Name -> Kind -> TcM (Name, Kind)
- kc_tv (L _ (UserTyVar n)) exp_k
+ kc_tv (L _ (UserTyVar n)) exp_k
= return (n, exp_k)
kc_tv (L _ (KindedTyVar n hs_k)) exp_k
= do { k <- tcLHsKind hs_k
@@ -1105,18 +1099,18 @@ kcTyClTyVars name (HsQTvs { hsq_kvs = kvs, hsq_tvs = hs_tvs }) thing_inside
; return (n, exp_k) }
-----------------------
-tcTyClTyVars :: Name -> LHsTyVarBndrs Name -- LHS of the type or class decl
+tcTyClTyVars :: Name -> LHsTyVarBndrs Name -- LHS of the type or class decl
-> ([TyVar] -> Kind -> TcM a) -> TcM a
-- Used for the type variables of a type or class decl,
-- on the second pass when constructing the final result
--- (tcTyClTyVars T [a,b] thing_inside)
+-- (tcTyClTyVars T [a,b] thing_inside)
-- where T : forall k1 k2 (a:k1 -> *) (b:k1). k2 -> *
-- calls thing_inside with arguments
-- [k1,k2,a,b] (k2 -> *)
--- having also extended the type environment with bindings
+-- having also extended the type environment with bindings
-- for k1,k2,a,b
--
--- No need to freshen the k's because they are just skolem
+-- No need to freshen the k's because they are just skolem
-- constants here, and we are at top level anyway.
tcTyClTyVars tycon (HsQTvs { hsq_kvs = hs_kvs, hsq_tvs = hs_tvs }) thing_inside
= kcScopedKindVars hs_kvs $ -- Bind scoped kind vars to fresh kind univ vars
@@ -1147,32 +1141,32 @@ tcTyClTyVars tycon (HsQTvs { hsq_kvs = hs_kvs, hsq_tvs = hs_tvs }) thing_inside
-----------------------------------
tcDataKindSig :: Kind -> TcM [TyVar]
-- GADT decls can have a (perhaps partial) kind signature
--- e.g. data T :: * -> * -> * where ...
--- This function makes up suitable (kinded) type variables for
+-- e.g. data T :: * -> * -> * where ...
+-- This function makes up suitable (kinded) type variables for
-- the argument kinds, and checks that the result kind is indeed *.
-- We use it also to make up argument type variables for for data instances.
tcDataKindSig kind
- = do { checkTc (isLiftedTypeKind res_kind) (badKindSig kind)
- ; span <- getSrcSpanM
- ; us <- newUniqueSupply
+ = do { checkTc (isLiftedTypeKind res_kind) (badKindSig kind)
+ ; span <- getSrcSpanM
+ ; us <- newUniqueSupply
; rdr_env <- getLocalRdrEnv
- ; let uniqs = uniqsFromSupply us
+ ; let uniqs = uniqsFromSupply us
occs = [ occ | str <- allNameStrings
, let occ = mkOccName tvName str
, isNothing (lookupLocalRdrOcc rdr_env occ) ]
-- Note [Avoid name clashes for associated data types]
- ; return [ mk_tv span uniq occ kind
- | ((kind, occ), uniq) <- arg_kinds `zip` occs `zip` uniqs ] }
+ ; return [ mk_tv span uniq occ kind
+ | ((kind, occ), uniq) <- arg_kinds `zip` occs `zip` uniqs ] }
where
(arg_kinds, res_kind) = splitKindFunTys kind
- mk_tv loc uniq occ kind
+ mk_tv loc uniq occ kind
= mkTyVar (mkInternalName uniq occ loc) kind
-
+
badKindSig :: Kind -> SDoc
-badKindSig kind
+badKindSig kind
= hang (ptext (sLit "Kind signature on data type declaration has non-* return kind"))
- 2 (ppr kind)
+ 2 (ppr kind)
\end{code}
Note [Avoid name clashes for associated data types]
@@ -1183,7 +1177,7 @@ When typechecking the decl for D, we'll invent an extra type variable
for D, to fill out its kind. Ideally we don't want this type variable
to be 'a', because when pretty printing we'll get
class C a b where
- data D b a0
+ data D b a0
(NB: the tidying happens in the conversion to IfaceSyn, which happens
as part of pretty-printing a TyThing.)
@@ -1193,15 +1187,15 @@ It isn't essential for correctness.
%************************************************************************
-%* *
- Scoped type variables
-%* *
+%* *
+ Scoped type variables
+%* *
%************************************************************************
tcAddScopedTyVars is used for scoped type variables added by pattern
type signatures
- e.g. \ ((x::a), (y::a)) -> x+y
+ e.g. \ ((x::a), (y::a)) -> x+y
They never have explicit kinds (because this is source-code only)
They are mutable (because they can get bound to a more specific type).
@@ -1216,42 +1210,42 @@ The current not-very-good plan is to
* do kind inference
* bring the kinded type vars into scope
* BUT throw away the kind-checked type
- (we'll kind-check it again when we type-check the pattern)
+ (we'll kind-check it again when we type-check the pattern)
This is bad because throwing away the kind checked type throws away
its splices. But too bad for now. [July 03]
Historical note:
- We no longer specify that these type variables must be univerally
- quantified (lots of email on the subject). If you want to put that
+ We no longer specify that these type variables must be univerally
+ quantified (lots of email on the subject). If you want to put that
back in, you need to
- a) Do a checkSigTyVars after thing_inside
- b) More insidiously, don't pass in expected_ty, else
- we unify with it too early and checkSigTyVars barfs
- Instead you have to pass in a fresh ty var, and unify
- it with expected_ty afterwards
+ a) Do a checkSigTyVars after thing_inside
+ b) More insidiously, don't pass in expected_ty, else
+ we unify with it too early and checkSigTyVars barfs
+ Instead you have to pass in a fresh ty var, and unify
+ it with expected_ty afterwards
\begin{code}
tcHsPatSigType :: UserTypeCtxt
- -> HsWithBndrs Name (LHsType Name) -- The type signature
- -> TcM ( Type -- The signature
+ -> HsWithBndrs Name (LHsType Name) -- The type signature
+ -> TcM ( Type -- The signature
, [(Name, TcTyVar)] ) -- The new bit of type environment, binding
- -- the scoped type variables
+ -- the scoped type variables
-- Used for type-checking type signatures in
--- (a) patterns e.g f (x::Int) = e
+-- (a) patterns e.g f (x::Int) = e
-- (b) result signatures e.g. g x :: Int = e
-- (c) RULE forall bndrs e.g. forall (x::Int). f x = x
tcHsPatSigType ctxt (HsWB { hswb_cts = hs_ty, hswb_kvs = sig_kvs, hswb_tvs = sig_tvs })
= addErrCtxt (pprHsSigCtxt ctxt hs_ty) $
- do { kvs <- mapM new_kv sig_kvs
+ do { kvs <- mapM new_kv sig_kvs
; tvs <- mapM new_tv sig_tvs
; let ktv_binds = (sig_kvs `zip` kvs) ++ (sig_tvs `zip` tvs)
- ; sig_ty <- tcExtendTyVarEnv2 ktv_binds $
+ ; sig_ty <- tcExtendTyVarEnv2 ktv_binds $
tcHsLiftedType hs_ty
; sig_ty <- zonkSigType sig_ty
- ; checkValidType ctxt sig_ty
- ; return (sig_ty, ktv_binds) }
+ ; checkValidType ctxt sig_ty
+ ; return (sig_ty, ktv_binds) }
where
new_kv name = new_tkv name superKind
new_tv name = do { kind <- newMetaKindVar
@@ -1263,54 +1257,54 @@ tcHsPatSigType ctxt (HsWB { hswb_cts = hs_ty, hswb_kvs = sig_kvs, hswb_tvs = sig
_ -> newSigTyVar name kind -- See Note [Unifying SigTvs]
tcPatSig :: UserTypeCtxt
- -> HsWithBndrs Name (LHsType Name)
- -> TcSigmaType
- -> TcM (TcType, -- The type to use for "inside" the signature
- [(Name, TcTyVar)], -- The new bit of type environment, binding
- -- the scoped type variables
+ -> HsWithBndrs Name (LHsType Name)
+ -> TcSigmaType
+ -> TcM (TcType, -- The type to use for "inside" the signature
+ [(Name, TcTyVar)], -- The new bit of type environment, binding
+ -- the scoped type variables
HsWrapper) -- Coercion due to unification with actual ty
-- Of shape: res_ty ~ sig_ty
tcPatSig ctxt sig res_ty
- = do { (sig_ty, sig_tvs) <- tcHsPatSigType ctxt sig
- -- sig_tvs are the type variables free in 'sig',
- -- and not already in scope. These are the ones
- -- that should be brought into scope
+ = do { (sig_ty, sig_tvs) <- tcHsPatSigType ctxt sig
+ -- sig_tvs are the type variables free in 'sig',
+ -- and not already in scope. These are the ones
+ -- that should be brought into scope
- ; if null sig_tvs then do {
- -- Just do the subsumption check and return
+ ; if null sig_tvs then do {
+ -- Just do the subsumption check and return
wrap <- tcSubType PatSigOrigin ctxt res_ty sig_ty
- ; return (sig_ty, [], wrap)
+ ; return (sig_ty, [], wrap)
} else do
- -- Type signature binds at least one scoped type variable
-
- -- A pattern binding cannot bind scoped type variables
+ -- Type signature binds at least one scoped type variable
+
+ -- A pattern binding cannot bind scoped type variables
-- It is more convenient to make the test here
-- than in the renamer
- { let in_pat_bind = case ctxt of
- BindPatSigCtxt -> True
- _ -> False
- ; when in_pat_bind (addErr (patBindSigErr sig_tvs))
-
- -- Check that all newly-in-scope tyvars are in fact
- -- constrained by the pattern. This catches tiresome
- -- cases like
- -- type T a = Int
- -- f :: Int -> Int
- -- f (x :: T a) = ...
- -- Here 'a' doesn't get a binding. Sigh
- ; let bad_tvs = [ tv | (_, tv) <- sig_tvs
+ { let in_pat_bind = case ctxt of
+ BindPatSigCtxt -> True
+ _ -> False
+ ; when in_pat_bind (addErr (patBindSigErr sig_tvs))
+
+ -- Check that all newly-in-scope tyvars are in fact
+ -- constrained by the pattern. This catches tiresome
+ -- cases like
+ -- type T a = Int
+ -- f :: Int -> Int
+ -- f (x :: T a) = ...
+ -- Here 'a' doesn't get a binding. Sigh
+ ; let bad_tvs = [ tv | (_, tv) <- sig_tvs
, not (tv `elemVarSet` exactTyVarsOfType sig_ty) ]
- ; checkTc (null bad_tvs) (badPatSigTvs sig_ty bad_tvs)
+ ; checkTc (null bad_tvs) (badPatSigTvs sig_ty bad_tvs)
- -- Now do a subsumption check of the pattern signature against res_ty
- ; wrap <- tcSubType PatSigOrigin ctxt res_ty sig_ty
+ -- Now do a subsumption check of the pattern signature against res_ty
+ ; wrap <- tcSubType PatSigOrigin ctxt res_ty sig_ty
- -- Phew!
+ -- Phew!
; return (sig_ty, sig_tvs, wrap)
} }
patBindSigErr :: [(Name, TcTyVar)] -> SDoc
-patBindSigErr sig_tvs
+patBindSigErr sig_tvs
= hang (ptext (sLit "You cannot bind scoped type variable") <> plural sig_tvs
<+> pprQuotedList (map fst sig_tvs))
2 (ptext (sLit "in a pattern binding signature"))
@@ -1322,19 +1316,19 @@ Consider
data T = forall a. T a (a->Int)
f (T x (f :: a->Int) = blah)
-Here
- * The pattern (T p1 p2) creates a *skolem* type variable 'a_sk',
- It must be a skolem so that that it retains its identity, and
+Here
+ * The pattern (T p1 p2) creates a *skolem* type variable 'a_sk',
+ It must be a skolem so that that it retains its identity, and
TcErrors.getSkolemInfo can thereby find the binding site for the skolem.
* The type signature pattern (f :: a->Int) binds "a" -> a_sig in the envt
* Then unificaiton makes a_sig := a_sk
-That's why we must make a_sig a MetaTv (albeit a SigTv),
+That's why we must make a_sig a MetaTv (albeit a SigTv),
not a SkolemTv, so that it can unify to a_sk.
-For RULE binders, though, things are a bit different (yuk).
+For RULE binders, though, things are a bit different (yuk).
RULE "foo" forall (x::a) (y::[a]). f x y = ...
Here this really is the binding site of the type variable so we'd like
to use a skolem, so that we get a complaint if we unify two of them
@@ -1342,7 +1336,7 @@ together.
Note [Unifying SigTvs]
~~~~~~~~~~~~~~~~~~~~~~
-ALAS we have no decent way of avoiding two SigTvs getting unified.
+ALAS we have no decent way of avoiding two SigTvs getting unified.
Consider
f (x::(a,b)) (y::c)) = [fst x, y]
Here we'd really like to complain that 'a' and 'c' are unified. But
@@ -1353,9 +1347,9 @@ are just SigTvs that can unify. And indeed, this would be ok,
(x1 :: a2, False) -> [x,y,y]
Here the type of x's first component is called 'a1' in one branch and
'a2' in the other. We could try insisting on the same OccName, but
-they definitely won't have the sane lexical Name.
+they definitely won't have the sane lexical Name.
-I think we could solve this by recording in a SigTv a list of all the
+I think we could solve this by recording in a SigTv a list of all the
in-scope varaibles that it should not unify with, but it's fiddly.
@@ -1372,11 +1366,11 @@ We would like to get a decent error message from
f :: Int x -> Int x
\begin{code}
--- The ExpKind datatype means "expected kind" and contains
+-- The ExpKind datatype means "expected kind" and contains
-- some info about just why that kind is expected, to improve
-- the error message on a mis-match
data ExpKind = EK TcKind (TcKind -> SDoc)
- -- The second arg is function that takes a *tidied* version
+ -- The second arg is function that takes a *tidied* version
-- of the first arg, and produces something like
-- "Expected kind k"
-- "Expected a constraint"
@@ -1400,16 +1394,16 @@ expectedKindMsg pkind
expArgKind :: SDoc -> TcKind -> Int -> ExpKind
expArgKind exp kind arg_no = EK kind msg_fn
where
- msg_fn pkind
- = sep [ ptext (sLit "The") <+> speakNth arg_no
+ msg_fn pkind
+ = sep [ ptext (sLit "The") <+> speakNth arg_no
<+> ptext (sLit "argument of") <+> exp
- , nest 2 $ ptext (sLit "should have kind")
+ , nest 2 $ ptext (sLit "should have kind")
<+> quotes (pprKind pkind) ]
unifyKinds :: SDoc -> [(TcType, TcKind)] -> TcM TcKind
unifyKinds fun act_kinds
= do { kind <- newMetaKindVar
- ; let check (arg_no, (ty, act_kind))
+ ; let check (arg_no, (ty, act_kind))
= checkExpectedKind ty act_kind (expArgKind (quotes fun) kind arg_no)
; mapM_ check (zip [1..] act_kinds)
; return kind }
@@ -1453,12 +1447,12 @@ checkExpectedKind ty act_kind (EK exp_kind ek_ctxt)
(env1, tidy_exp_kind) = tidyOpenKind env0 exp_kind
(env2, tidy_act_kind) = tidyOpenKind env1 act_kind
- occurs_check
+ occurs_check
| Just act_tv <- tcGetTyVar_maybe act_kind
= check_occ act_tv exp_kind
| Just exp_tv <- tcGetTyVar_maybe exp_kind
= check_occ exp_tv act_kind
- | otherwise
+ | otherwise
= False
check_occ tv k = case occurCheckExpand dflags tv k of
@@ -1537,7 +1531,7 @@ tc_hs_kind (HsTupleTy _ kis) =
do kappas <- mapM tc_lhs_kind kis
checkWiredInTyCon tycon
return $ mkTyConApp tycon kappas
- where
+ where
tycon = promotedTupleTyCon BoxedTuple (length kis)
-- Argument not kind-shaped
@@ -1548,7 +1542,7 @@ tc_kind_app :: HsKind Name -> [LHsKind Name] -> TcM Kind
tc_kind_app (HsAppTy ki1 ki2) kis = tc_kind_app (unLoc ki1) (ki2:kis)
tc_kind_app (HsTyVar tc) kis = do { arg_kis <- mapM tc_lhs_kind kis
; tc_kind_var_app tc arg_kis }
-tc_kind_app ki _ = failWithTc (quotes (ppr ki) <+>
+tc_kind_app ki _ = failWithTc (quotes (ppr ki) <+>
ptext (sLit "is not a kind constructor"))
tc_kind_var_app :: Name -> [Kind] -> TcM Kind
@@ -1568,43 +1562,43 @@ tc_kind_var_app name arg_kis
tc_kind_var_app name arg_kis
= do { thing <- tcLookup name
; case thing of
- AGlobal (ATyCon tc)
- -> do { data_kinds <- xoptM Opt_DataKinds
- ; unless data_kinds $ addErr (dataKindsErr name)
- ; case promotableTyCon_maybe tc of
- Just prom_tc | arg_kis `lengthIs` tyConArity prom_tc
- -> return (mkTyConApp prom_tc arg_kis)
- Just _ -> tycon_err tc "is not fully applied"
- Nothing -> tycon_err tc "is not promotable" }
-
- -- A lexically scoped kind variable
- ATyVar _ kind_var
- | not (isKindVar kind_var)
+ AGlobal (ATyCon tc)
+ -> do { data_kinds <- xoptM Opt_DataKinds
+ ; unless data_kinds $ addErr (dataKindsErr name)
+ ; case promotableTyCon_maybe tc of
+ Just prom_tc | arg_kis `lengthIs` tyConArity prom_tc
+ -> return (mkTyConApp prom_tc arg_kis)
+ Just _ -> tycon_err tc "is not fully applied"
+ Nothing -> tycon_err tc "is not promotable" }
+
+ -- A lexically scoped kind variable
+ ATyVar _ kind_var
+ | not (isKindVar kind_var)
-> failWithTc (ptext (sLit "Type variable") <+> quotes (ppr kind_var)
<+> ptext (sLit "used as a kind"))
- | not (null arg_kis) -- Kind variables always have kind BOX,
+ | not (null arg_kis) -- Kind variables always have kind BOX,
-- so cannot be applied to anything
-> failWithTc (ptext (sLit "Kind variable") <+> quotes (ppr name)
<+> ptext (sLit "cannot appear in a function position"))
- | otherwise
+ | otherwise
-> return (mkAppTys (mkTyVarTy kind_var) arg_kis)
- -- It is in scope, but not what we expected
- AThing _
- | isTyVarName name
+ -- It is in scope, but not what we expected
+ AThing _
+ | isTyVarName name
-> failWithTc (ptext (sLit "Type variable") <+> quotes (ppr name)
<+> ptext (sLit "used in a kind"))
- | otherwise
+ | otherwise
-> failWithTc (hang (ptext (sLit "Type constructor") <+> quotes (ppr name)
<+> ptext (sLit "used in a kind"))
- 2 (ptext (sLit "inside its own recursive group")))
+ 2 (ptext (sLit "inside its own recursive group")))
APromotionErr err -> promotionErr name err
- _ -> wrongThingErr "promoted type" thing name
+ _ -> wrongThingErr "promoted type" thing name
-- This really should not happen
}
- where
+ where
tycon_err tc msg = failWithTc (quotes (ppr tc) <+> ptext (sLit "of kind")
<+> quotes (ppr (tyConKind tc)) <+> ptext (sLit msg))
@@ -1625,15 +1619,15 @@ promotionErr name err
\end{code}
%************************************************************************
-%* *
- Scoped type variables
-%* *
+%* *
+ Scoped type variables
+%* *
%************************************************************************
\begin{code}
pprHsSigCtxt :: UserTypeCtxt -> LHsType Name -> SDoc
-pprHsSigCtxt ctxt hs_ty = sep [ ptext (sLit "In") <+> pprUserTypeCtxt ctxt <> colon,
- nest 2 (pp_sig ctxt) ]
+pprHsSigCtxt ctxt hs_ty = sep [ ptext (sLit "In") <+> pprUserTypeCtxt ctxt <> colon,
+ nest 2 (pp_sig ctxt) ]
where
pp_sig (FunSigCtxt n) = pp_n_colon n
pp_sig (ConArgCtxt n) = pp_n_colon n
@@ -1644,11 +1638,11 @@ pprHsSigCtxt ctxt hs_ty = sep [ ptext (sLit "In") <+> pprUserTypeCtxt ctxt <> co
badPatSigTvs :: TcType -> [TyVar] -> SDoc
badPatSigTvs sig_ty bad_tvs
- = vcat [ fsep [ptext (sLit "The type variable") <> plural bad_tvs,
- quotes (pprWithCommas ppr bad_tvs),
- ptext (sLit "should be bound by the pattern signature") <+> quotes (ppr sig_ty),
- ptext (sLit "but are actually discarded by a type synonym") ]
- , ptext (sLit "To fix this, expand the type synonym")
+ = vcat [ fsep [ptext (sLit "The type variable") <> plural bad_tvs,
+ quotes (pprWithCommas ppr bad_tvs),
+ ptext (sLit "should be bound by the pattern signature") <+> quotes (ppr sig_ty),
+ ptext (sLit "but are actually discarded by a type synonym") ]
+ , ptext (sLit "To fix this, expand the type synonym")
, ptext (sLit "[Note: I hope to lift this restriction in due course]") ]
unifyKindMisMatch :: TcKind -> TcKind -> TcM a
diff --git a/compiler/typecheck/TcMType.lhs b/compiler/typecheck/TcMType.lhs
index 65bc0b7653..301801ab91 100644
--- a/compiler/typecheck/TcMType.lhs
+++ b/compiler/typecheck/TcMType.lhs
@@ -10,12 +10,6 @@ mutable type variables
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module TcMType (
TcTyVar, TcKind, TcType, TcTauType, TcThetaType, TcTyVarSet,
@@ -23,11 +17,11 @@ module TcMType (
--------------------------------
-- Creating new mutable type variables
newFlexiTyVar,
- newFlexiTyVarTy, -- Kind -> TcM TcType
- newFlexiTyVarTys, -- Int -> Kind -> TcM [TcType]
+ newFlexiTyVarTy, -- Kind -> TcM TcType
+ newFlexiTyVarTys, -- Int -> Kind -> TcM [TcType]
newPolyFlexiTyVarTy,
newMetaKindVar, newMetaKindVars,
- mkTcTyVarName, cloneMetaTyVar,
+ mkTcTyVarName, cloneMetaTyVar,
newMetaTyVar, readMetaTyVar, writeMetaTyVar, writeMetaTyVarRef,
newMetaDetails, isFilledMetaTyVar, isFlexiMetaTyVar,
@@ -50,16 +44,16 @@ module TcMType (
--------------------------------
-- Zonking
- zonkTcPredType,
+ zonkTcPredType,
skolemiseUnboundMetaTyVar,
zonkTcTyVar, zonkTcTyVars, zonkTyVarsAndFV, zonkTcTypeAndFV,
zonkQuantifiedTyVar, quantifyTyVars,
- zonkTcTyVarBndr, zonkTcType, zonkTcTypes, zonkTcThetaType,
+ zonkTcTyVarBndr, zonkTcType, zonkTcTypes, zonkTcThetaType,
zonkTcKind, defaultKindVarToStar,
zonkEvVar, zonkWC, zonkFlats, zonkId, zonkCt, zonkCts, zonkSkolemInfo,
- tcGetGlobalTyVars,
+ tcGetGlobalTyVars,
) where
#include "HsVersions.h"
@@ -92,24 +86,24 @@ import Data.List ( partition, mapAccumL )
%************************************************************************
-%* *
- Kind variables
-%* *
+%* *
+ Kind variables
+%* *
%************************************************************************
\begin{code}
mkKindName :: Unique -> Name
mkKindName unique = mkSystemName unique kind_var_occ
-kind_var_occ :: OccName -- Just one for all MetaKindVars
- -- They may be jiggled by tidying
+kind_var_occ :: OccName -- Just one for all MetaKindVars
+ -- They may be jiggled by tidying
kind_var_occ = mkOccName tvName "k"
newMetaKindVar :: TcM TcKind
newMetaKindVar = do { uniq <- newUnique
- ; details <- newMetaDetails TauTv
+ ; details <- newMetaDetails TauTv
; let kv = mkTcTyVar (mkKindName uniq) superKind details
- ; return (mkTyVarTy kv) }
+ ; return (mkTyVarTy kv) }
newMetaKindVars :: Int -> TcM [TcKind]
newMetaKindVars n = mapM (\ _ -> newMetaKindVar) (nOfThem n ())
@@ -117,26 +111,26 @@ newMetaKindVars n = mapM (\ _ -> newMetaKindVar) (nOfThem n ())
%************************************************************************
-%* *
+%* *
Evidence variables; range over constraints we can abstract over
-%* *
+%* *
%************************************************************************
\begin{code}
newEvVars :: TcThetaType -> TcM [EvVar]
newEvVars theta = mapM newEvVar theta
-newWantedEvVar :: TcPredType -> TcM EvVar
+newWantedEvVar :: TcPredType -> TcM EvVar
newWantedEvVar = newEvVar
-newWantedEvVars :: TcThetaType -> TcM [EvVar]
-newWantedEvVars theta = mapM newWantedEvVar theta
+newWantedEvVars :: TcThetaType -> TcM [EvVar]
+newWantedEvVars theta = mapM newWantedEvVar theta
--------------
newEvVar :: TcPredType -> TcM EvVar
-- Creates new *rigid* variables for predicates
-newEvVar ty = do { name <- newSysName (predTypeOccName ty)
+newEvVar ty = do { name <- newSysName (predTypeOccName ty)
; return (mkLocalId name ty) }
newEq :: TcType -> TcType -> TcM EvVar
@@ -145,7 +139,7 @@ newEq ty1 ty2
; return (mkLocalId name (mkTcEqPred ty1 ty2)) }
newDict :: Class -> [TcType] -> TcM DictId
-newDict cls tys
+newDict cls tys
= do { name <- newSysName (mkDictOcc (getOccName cls))
; return (mkLocalId name (mkClassPred cls tys)) }
@@ -158,7 +152,7 @@ predTypeOccName ty = case classifyPredType ty of
\end{code}
*********************************************************************************
-* *
+* *
* Wanted constraints
* *
*********************************************************************************
@@ -178,30 +172,30 @@ newFlatWanteds orig = mapM (newFlatWanted orig)
\end{code}
%************************************************************************
-%* *
- SkolemTvs (immutable)
-%* *
+%* *
+ SkolemTvs (immutable)
+%* *
%************************************************************************
\begin{code}
tcInstType :: ([TyVar] -> TcM (TvSubst, [TcTyVar])) -- How to instantiate the type variables
- -> TcType -- Type to instantiate
- -> TcM ([TcTyVar], TcThetaType, TcType) -- Result
- -- (type vars (excl coercion vars), preds (incl equalities), rho)
+ -> TcType -- Type to instantiate
+ -> TcM ([TcTyVar], TcThetaType, TcType) -- Result
+ -- (type vars (excl coercion vars), preds (incl equalities), rho)
tcInstType inst_tyvars ty
= case tcSplitForAllTys ty of
- ([], rho) -> let -- There may be overloading despite no type variables;
- -- (?x :: Int) => Int -> Int
- (theta, tau) = tcSplitPhiTy rho
- in
- return ([], theta, tau)
+ ([], rho) -> let -- There may be overloading despite no type variables;
+ -- (?x :: Int) => Int -> Int
+ (theta, tau) = tcSplitPhiTy rho
+ in
+ return ([], theta, tau)
- (tyvars, rho) -> do { (subst, tyvars') <- inst_tyvars tyvars
- ; let (theta, tau) = tcSplitPhiTy (substTy subst rho)
- ; return (tyvars', theta, tau) }
+ (tyvars, rho) -> do { (subst, tyvars') <- inst_tyvars tyvars
+ ; let (theta, tau) = tcSplitPhiTy (substTy subst rho)
+ ; return (tyvars', theta, tau) }
tcSkolDFunType :: Type -> TcM ([TcTyVar], TcThetaType, TcType)
--- Instantiate a type signature with skolem constants, but
+-- Instantiate a type signature with skolem constants, but
-- do *not* give them fresh names, because we want the name to
-- be in the type environment: it is lexically scoped.
tcSkolDFunType ty = tcInstType (\tvs -> return (tcSuperSkolTyVars tvs)) ty
@@ -222,8 +216,8 @@ tcSuperSkolTyVar subst tv
tcInstSkolTyVar :: SrcSpan -> Bool -> TvSubst -> TyVar
-> TcRnIf gbl lcl (TvSubst, TcTyVar)
--- Instantiate the tyvar, using
--- * the occ-name and kind of the supplied tyvar,
+-- Instantiate the tyvar, using
+-- * the occ-name and kind of the supplied tyvar,
-- * the unique from the monad,
-- * the location either from the tyvar (skol_info = SigSkol)
-- or from the monad (otherwise)
@@ -283,7 +277,7 @@ newSigTyVar name kind
; return (mkTcTyVar name' kind details) }
newMetaDetails :: MetaInfo -> TcM TcTyVarDetails
-newMetaDetails info
+newMetaDetails info
= do { ref <- newMutVar Flexi
; untch <- getUntouchables
; return (MetaTv { mtv_info = info, mtv_ref = ref, mtv_untch = untch }) }
@@ -306,31 +300,31 @@ instead of the buggous
%************************************************************************
-%* *
- MetaTvs (meta type variables; mutable)
-%* *
+%* *
+ MetaTvs (meta type variables; mutable)
+%* *
%************************************************************************
\begin{code}
newMetaTyVar :: MetaInfo -> Kind -> TcM TcTyVar
-- Make a new meta tyvar out of thin air
newMetaTyVar meta_info kind
- = do { uniq <- newUnique
+ = do { uniq <- newUnique
; let name = mkTcTyVarName uniq s
s = case meta_info of
PolyTv -> fsLit "s"
TauTv -> fsLit "t"
SigTv -> fsLit "a"
; details <- newMetaDetails meta_info
- ; return (mkTcTyVar name kind details) }
+ ; return (mkTcTyVar name kind details) }
cloneMetaTyVar :: TcTyVar -> TcM TcTyVar
cloneMetaTyVar tv
= ASSERT( isTcTyVar tv )
- do { uniq <- newUnique
+ do { uniq <- newUnique
; ref <- newMutVar Flexi
; let name' = setNameUnique (tyVarName tv) uniq
- details' = case tcTyVarDetails tv of
+ details' = case tcTyVarDetails tv of
details@(MetaTv {}) -> details { mtv_ref = ref }
_ -> pprPanic "cloneMetaTyVar" (ppr tv)
; return (mkTcTyVar name' (tyVarKind tv) details') }
@@ -343,15 +337,15 @@ mkTcTyVarName uniq str = mkSysTvName uniq str
-- Works for both type and kind variables
readMetaTyVar :: TyVar -> TcM MetaDetails
readMetaTyVar tyvar = ASSERT2( isMetaTyVar tyvar, ppr tyvar )
- readMutVar (metaTvRef tyvar)
+ readMutVar (metaTvRef tyvar)
isFilledMetaTyVar :: TyVar -> TcM Bool
-- True of a filled-in (Indirect) meta type variable
isFilledMetaTyVar tv
| not (isTcTyVar tv) = return False
| MetaTv { mtv_ref = ref } <- tcTyVarDetails tv
- = do { details <- readMutVar ref
- ; return (isIndirect details) }
+ = do { details <- readMutVar ref
+ ; return (isIndirect details) }
| otherwise = return False
isFlexiMetaTyVar :: TyVar -> TcM Bool
@@ -359,8 +353,8 @@ isFlexiMetaTyVar :: TyVar -> TcM Bool
isFlexiMetaTyVar tv
| not (isTcTyVar tv) = return False
| MetaTv { mtv_ref = ref } <- tcTyVarDetails tv
- = do { details <- readMutVar ref
- ; return (isFlexi details) }
+ = do { details <- readMutVar ref
+ ; return (isFlexi details) }
| otherwise = return False
--------------------
@@ -369,7 +363,7 @@ writeMetaTyVar :: TcTyVar -> TcType -> TcM ()
-- Write into a currently-empty MetaTyVar
writeMetaTyVar tyvar ty
- | not debugIsOn
+ | not debugIsOn
= writeMetaTyVarRef tyvar (metaTvRef tyvar) ty
-- Everything from here on only happens if DEBUG is on
@@ -422,9 +416,9 @@ writeMetaTyVarRef tyvar ref ty
%************************************************************************
-%* *
- MetaTvs: TauTvs
-%* *
+%* *
+ MetaTvs: TauTvs
+%* *
%************************************************************************
\begin{code}
@@ -467,15 +461,15 @@ tcInstTyVarX subst tyvar
; details <- newMetaDetails TauTv
; let name = mkSystemName uniq (getOccName tyvar)
kind = substTy subst (tyVarKind tyvar)
- new_tv = mkTcTyVar name kind details
+ new_tv = mkTcTyVar name kind details
; return (extendTvSubst subst tyvar (mkTyVarTy new_tv), new_tv) }
\end{code}
%************************************************************************
-%* *
+%* *
Quantification
-%* *
+%* *
%************************************************************************
Note [quantifyTyVars]
@@ -483,7 +477,7 @@ Note [quantifyTyVars]
quantifyTyVars is give the free vars of a type that we
are about to wrap in a forall.
-It takes these free type/kind variables and
+It takes these free type/kind variables and
1. Zonks them and remove globals
2. Partitions into type and kind variables (kvs1, tvs)
3. Extends kvs1 with free kind vars in the kinds of tvs (removing globals)
@@ -499,7 +493,7 @@ has free vars {f,a}, but we must add 'k' as well! Hence step (3).
\begin{code}
quantifyTyVars :: TcTyVarSet -> TcTyVarSet -> TcM [TcTyVar]
-- See Note [quantifyTyVars]
--- The input is a mixture of type and kind variables; a kind variable k
+-- The input is a mixture of type and kind variables; a kind variable k
-- may occur *after* a tyvar mentioning k in its kind
-- Can be given a mixture of TcTyVars and TyVars, in the case of
-- associated type declarations
@@ -510,7 +504,7 @@ quantifyTyVars gbl_tvs tkvs
; let (kvs, tvs) = partitionVarSet isKindVar (closeOverKinds tkvs `minusVarSet` gbl_tvs)
-- NB kinds of tvs are zonked by zonkTyVarsAndFV
kvs2 = varSetElems kvs
- qtvs = varSetElems tvs
+ qtvs = varSetElems tvs
-- In the non-PolyKinds case, default the kind variables
-- to *, and zonk the tyvars as usual. Notice that this
@@ -524,7 +518,7 @@ quantifyTyVars gbl_tvs tkvs
; mapM_ defaultKindVarToStar meta_kvs
; return skolem_kvs } -- should be empty
- ; mapM zonk_quant (qkvs ++ qtvs) }
+ ; mapM zonk_quant (qkvs ++ qtvs) }
-- Because of the order, any kind variables
-- mentioned in the kinds of the type variables refer to
-- the now-quantified versions
@@ -532,16 +526,16 @@ quantifyTyVars gbl_tvs tkvs
zonk_quant tkv
| isTcTyVar tkv = zonkQuantifiedTyVar tkv
| otherwise = return tkv
- -- For associated types, we have the class variables
+ -- For associated types, we have the class variables
-- in scope, and they are TyVars not TcTyVars
zonkQuantifiedTyVar :: TcTyVar -> TcM TcTyVar
-- The quantified type variables often include meta type variables
-- we want to freeze them into ordinary type variables, and
-- default their kind (e.g. from OpenTypeKind to TypeKind)
--- -- see notes with Kind.defaultKind
--- The meta tyvar is updated to point to the new skolem TyVar. Now any
--- bound occurrences of the original type variable will get zonked to
+-- -- see notes with Kind.defaultKind
+-- The meta tyvar is updated to point to the new skolem TyVar. Now any
+-- bound occurrences of the original type variable will get zonked to
-- the immutable version.
--
-- We leave skolem TyVars alone; they are immutable.
@@ -549,12 +543,12 @@ zonkQuantifiedTyVar :: TcTyVar -> TcM TcTyVar
-- This function is called on both kind and type variables,
-- but kind variables *only* if PolyKinds is on.
zonkQuantifiedTyVar tv
- = ASSERT2( isTcTyVar tv, ppr tv )
+ = ASSERT2( isTcTyVar tv, ppr tv )
case tcTyVarDetails tv of
SkolemTv {} -> do { kind <- zonkTcKind (tyVarKind tv)
; return $ setTyVarKind tv kind }
- -- It might be a skolem type variable,
- -- for example from a user type signature
+ -- It might be a skolem type variable,
+ -- for example from a user type signature
MetaTv { mtv_ref = ref } ->
do when debugIsOn $ do
@@ -570,7 +564,7 @@ zonkQuantifiedTyVar tv
defaultKindVarToStar :: TcTyVar -> TcM Kind
-- We have a meta-kind: unify it with '*'
-defaultKindVarToStar kv
+defaultKindVarToStar kv
= do { ASSERT( isKindVar kv && isMetaTyVar kv )
writeMetaTyVar kv liftedTypeKind
; return liftedTypeKind }
@@ -582,7 +576,7 @@ skolemiseUnboundMetaTyVar :: TcTyVar -> TcTyVarDetails -> TcM TyVar
-- We create a skolem TyVar, not a regular TyVar
-- See Note [Zonking to Skolem]
skolemiseUnboundMetaTyVar tv details
- = ASSERT2( isMetaTyVar tv, ppr tv )
+ = ASSERT2( isMetaTyVar tv, ppr tv )
do { span <- getSrcSpanM -- Get the location from "here"
-- ie where we are generalising
; uniq <- newUnique -- Remove it from TcMetaTyVar unique land
@@ -629,28 +623,28 @@ simplifier knows how to deal with.
Note [Silly Type Synonyms]
~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this:
- type C u a = u -- Note 'a' unused
+ type C u a = u -- Note 'a' unused
- foo :: (forall a. C u a -> C u a) -> u
- foo x = ...
+ foo :: (forall a. C u a -> C u a) -> u
+ foo x = ...
- bar :: Num u => u
- bar = foo (\t -> t + t)
+ bar :: Num u => u
+ bar = foo (\t -> t + t)
* From the (\t -> t+t) we get type {Num d} => d -> d
where d is fresh.
* Now unify with type of foo's arg, and we get:
- {Num (C d a)} => C d a -> C d a
+ {Num (C d a)} => C d a -> C d a
where a is fresh.
* Now abstract over the 'a', but float out the Num (C d a) constraint
because it does not 'really' mention a. (see exactTyVarsOfType)
The arg to foo becomes
- \/\a -> \t -> t+t
+ \/\a -> \t -> t+t
* So we get a dict binding for Num (C d a), which is zonked to give
- a = ()
+ a = ()
[Note Sept 04: now that we are zonking quantified type variables
on construction, the 'a' will be frozen as a regular tyvar on
quantification, so the floated dict will still have type (C d a).
@@ -662,9 +656,9 @@ All very silly. I think its harmless to ignore the problem. We'll end up with
a \/\a in the final result but all the occurrences of a will be zonked to ()
%************************************************************************
-%* *
+%* *
Zonking
-%* *
+%* *
%************************************************************************
@tcGetGlobalTyVars@ returns a fully-zonked set of tyvars free in the environment.
@@ -699,7 +693,7 @@ zonkTyVar :: TyVar -> TcM TcType
zonkTyVar tv | isTcTyVar tv = zonkTcTyVar tv
| otherwise = return (mkTyVarTy tv)
-- Hackily, when typechecking type and class decls
- -- we have TyVars in scopeadded (only) in
+ -- we have TyVars in scopeadded (only) in
-- TcHsType.tcTyClTyVars, but it seems
-- painful to make them into TcTyVars there
@@ -739,7 +733,7 @@ zonkImplication implic@(Implic { ic_untch = untch
; given' <- mapM zonkEvVar given
; info' <- zonkSkolemInfo info
; wanted' <- zonkWCRec binds_var untch wanted
- ; if isEmptyWC wanted'
+ ; if isEmptyWC wanted'
then return emptyBag
else return $ unitBag $
implic { ic_fsks = [] -- Zonking removes all FlatSkol tyvars
@@ -777,7 +771,7 @@ zonkFlats binds_var untch cts
; zonkCts cts }
where
unflatten_one orig_ct cts
- = do { zct <- zonkCt orig_ct -- First we need to fully zonk
+ = do { zct <- zonkCt orig_ct -- First we need to fully zonk
; mct <- try_zonk_fun_eq orig_ct zct -- Then try to solve if family equation
; return $ maybe cts (`consBag` cts) mct }
@@ -835,7 +829,7 @@ Note [How to unflatten]
How do we unflatten during zonking. Consider a bunch of flat constraints.
Consider them one by one. For each such constraint C
* Zonk C (to apply current substitution)
- * If C is of form F tys ~ alpha,
+ * If C is of form F tys ~ alpha,
where alpha is touchable
and alpha is not mentioned in tys
then unify alpha := F tys
@@ -862,7 +856,7 @@ zonkCt ct
; return (mkNonCanonical fl') }
zonkCtEvidence :: CtEvidence -> TcM CtEvidence
-zonkCtEvidence ctev@(CtGiven { ctev_pred = pred })
+zonkCtEvidence ctev@(CtGiven { ctev_pred = pred })
= do { pred' <- zonkTcType pred
; return (ctev { ctev_pred = pred'}) }
zonkCtEvidence ctev@(CtWanted { ctev_pred = pred })
@@ -885,11 +879,11 @@ zonkSkolemInfo skol_info = return skol_info
%************************************************************************
-%* *
+%* *
\subsection{Zonking -- the main work-horses: zonkTcType, zonkTcTyVar}
-%* *
-%* For internal use only! *
-%* *
+%* *
+%* For internal use only! *
+%* *
%************************************************************************
\begin{code}
@@ -901,7 +895,7 @@ zonkId id
-- For unbound, mutable tyvars, zonkType uses the function given to it
-- For tyvars bound at a for-all, zonkType zonks them to an immutable
--- type variable and zonks the kind too
+-- type variable and zonks the kind too
zonkTcType :: TcType -> TcM TcType
zonkTcType ty
@@ -922,17 +916,17 @@ zonkTcType ty
go (AppTy fun arg) = do fun' <- go fun
arg' <- go arg
return (mkAppTy fun' arg')
- -- NB the mkAppTy; we might have instantiated a
- -- type variable to a type constructor, so we need
- -- to pull the TyConApp to the top.
+ -- NB the mkAppTy; we might have instantiated a
+ -- type variable to a type constructor, so we need
+ -- to pull the TyConApp to the top.
-- OK to do this because only strict in the structure
-- not in the TyCon.
-- See Note [Zonking inside the knot] in TcHsType
- -- The two interesting cases!
+ -- The two interesting cases!
go (TyVarTy tyvar) | isTcTyVar tyvar = zonkTcTyVar tyvar
- | otherwise = TyVarTy <$> updateTyVarKindM go tyvar
- -- Ordinary (non Tc) tyvars occur inside quantified types
+ | otherwise = TyVarTy <$> updateTyVarKindM go tyvar
+ -- Ordinary (non Tc) tyvars occur inside quantified types
go (ForAllTy tv ty) = do { tv' <- zonkTcTyVarBndr tv
; ty' <- go ty
@@ -940,7 +934,7 @@ zonkTcType ty
zonkTcTyVarBndr :: TcTyVar -> TcM TcTyVar
-- A tyvar binder is never a unification variable (MetaTv),
--- rather it is always a skolems. BUT it may have a kind
+-- rather it is always a skolems. BUT it may have a kind
-- that has not yet been zonked, and may include kind
-- unification variables.
zonkTcTyVarBndr tyvar
@@ -958,8 +952,8 @@ zonkTcTyVar tv
MetaTv { mtv_ref = ref }
-> do { cts <- readMutVar ref
; case cts of
- Flexi -> zonk_kind_and_return
- Indirect ty -> zonkTcType ty }
+ Flexi -> zonk_kind_and_return
+ Indirect ty -> zonkTcType ty }
where
zonk_kind_and_return = do { z_tv <- zonkTyVarKind tv
; return (TyVarTy z_tv) }
@@ -968,15 +962,15 @@ zonkTcTyVar tv
%************************************************************************
-%* *
- Zonking kinds
-%* *
+%* *
+ Zonking kinds
+%* *
%************************************************************************
\begin{code}
zonkTcKind :: TcKind -> TcM TcKind
zonkTcKind k = zonkTcType k
\end{code}
-
+
diff --git a/compiler/typecheck/TcMatches.lhs b/compiler/typecheck/TcMatches.lhs
index 32b6d1e326..b4e31801ee 100644
--- a/compiler/typecheck/TcMatches.lhs
+++ b/compiler/typecheck/TcMatches.lhs
@@ -7,20 +7,14 @@ TcMatches: Typecheck some @Matches@
\begin{code}
{-# LANGUAGE CPP, RankNTypes #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module TcMatches ( tcMatchesFun, tcGRHS, tcGRHSsPat, tcMatchesCase, tcMatchLambda,
- TcMatchCtxt(..), TcStmtChecker, TcExprStmtChecker, TcCmdStmtChecker,
- tcStmts, tcStmtsAndThen, tcDoStmts, tcBody,
- tcDoStmt, tcGuardStmt
+ TcMatchCtxt(..), TcStmtChecker, TcExprStmtChecker, TcCmdStmtChecker,
+ tcStmts, tcStmtsAndThen, tcDoStmts, tcBody,
+ tcDoStmt, tcGuardStmt
) where
-import {-# SOURCE #-} TcExpr( tcSyntaxOp, tcInferRhoNC, tcInferRho, tcCheckId,
+import {-# SOURCE #-} TcExpr( tcSyntaxOp, tcInferRhoNC, tcInferRho, tcCheckId,
tcMonoExpr, tcMonoExprNC, tcPolyExpr )
import HsSyn
@@ -52,9 +46,9 @@ import Control.Monad
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{tcMatchesFun, tcMatchesCase}
-%* *
+%* *
%************************************************************************
@tcMatchesFun@ typechecks a @[Match]@ list which occurs in a
@@ -75,20 +69,20 @@ tcMatchesFun :: Name -> Bool
-> TcM (HsWrapper, MatchGroup TcId (LHsExpr TcId))
-- Returns type of body
tcMatchesFun fun_name inf matches exp_ty
- = do { -- Check that they all have the same no of arguments
- -- Location is in the monad, set the caller so that
- -- any inter-equation error messages get some vaguely
- -- sensible location. Note: we have to do this odd
- -- ann-grabbing, because we don't always have annotations in
- -- hand when we call tcMatchesFun...
+ = do { -- Check that they all have the same no of arguments
+ -- Location is in the monad, set the caller so that
+ -- any inter-equation error messages get some vaguely
+ -- sensible location. Note: we have to do this odd
+ -- ann-grabbing, because we don't always have annotations in
+ -- hand when we call tcMatchesFun...
traceTc "tcMatchesFun" (ppr fun_name $$ ppr exp_ty)
- ; checkArgs fun_name matches
+ ; checkArgs fun_name matches
- ; (wrap_gen, (wrap_fun, group))
+ ; (wrap_gen, (wrap_fun, group))
<- tcGen (FunSigCtxt fun_name) exp_ty $ \ _ exp_rho ->
- -- Note [Polymorphic expected type for tcMatchesFun]
- matchFunTys herald arity exp_rho $ \ pat_tys rhs_ty ->
- tcMatches match_ctxt pat_tys rhs_ty matches
+ -- Note [Polymorphic expected type for tcMatchesFun]
+ matchFunTys herald arity exp_rho $ \ pat_tys rhs_ty ->
+ tcMatches match_ctxt pat_tys rhs_ty matches
; return (wrap_gen <.> wrap_fun, group) }
where
arity = matchGroupArity matches
@@ -101,7 +95,7 @@ tcMatchesFun fun_name inf matches exp_ty
parser guarantees that each equation has exactly one argument.
\begin{code}
-tcMatchesCase :: (Outputable (body Name)) =>
+tcMatchesCase :: (Outputable (body Name)) =>
TcMatchCtxt body -- Case context
-> TcRhoType -- Type of scrutinee
-> MatchGroup Name (Located (body Name)) -- The case alternatives
@@ -115,20 +109,20 @@ tcMatchesCase ctxt scrut_ty matches res_ty
| otherwise
= tcMatches ctxt [scrut_ty] res_ty matches
-tcMatchLambda :: MatchGroup Name (LHsExpr Name) -> TcRhoType
+tcMatchLambda :: MatchGroup Name (LHsExpr Name) -> TcRhoType
-> TcM (HsWrapper, MatchGroup TcId (LHsExpr TcId))
-tcMatchLambda match res_ty
+tcMatchLambda match res_ty
= matchFunTys herald n_pats res_ty $ \ pat_tys rhs_ty ->
tcMatches match_ctxt pat_tys rhs_ty match
where
n_pats = matchGroupArity match
herald = sep [ ptext (sLit "The lambda expression")
- <+> quotes (pprSetDepth (PartWay 1) $
+ <+> quotes (pprSetDepth (PartWay 1) $
pprMatches (LambdaExpr :: HsMatchContext Name) match),
- -- The pprSetDepth makes the abstraction print briefly
- ptext (sLit "has")]
+ -- The pprSetDepth makes the abstraction print briefly
+ ptext (sLit "has")]
match_ctxt = MC { mc_what = LambdaExpr,
- mc_body = tcBody }
+ mc_body = tcBody }
\end{code}
@tcGRHSsPat@ typechecks @[GRHSs]@ that occur in a @PatMonoBind@.
@@ -140,31 +134,31 @@ tcGRHSsPat :: GRHSs Name (LHsExpr Name) -> TcRhoType
tcGRHSsPat grhss res_ty = tcGRHSs match_ctxt grhss res_ty
where
match_ctxt = MC { mc_what = PatBindRhs,
- mc_body = tcBody }
+ mc_body = tcBody }
\end{code}
\begin{code}
matchFunTys
- :: SDoc -- See Note [Herald for matchExpecteFunTys] in TcUnify
+ :: SDoc -- See Note [Herald for matchExpecteFunTys] in TcUnify
-> Arity
-> TcRhoType
-> ([TcSigmaType] -> TcRhoType -> TcM a)
-> TcM (HsWrapper, a)
--- Written in CPS style for historical reasons;
+-- Written in CPS style for historical reasons;
-- could probably be un-CPSd, like matchExpectedTyConApp
matchFunTys herald arity res_ty thing_inside
- = do { (co, pat_tys, res_ty) <- matchExpectedFunTys herald arity res_ty
- ; res <- thing_inside pat_tys res_ty
+ = do { (co, pat_tys, res_ty) <- matchExpectedFunTys herald arity res_ty
+ ; res <- thing_inside pat_tys res_ty
; return (coToHsWrapper (mkTcSymCo co), res) }
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{tcMatch}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -182,9 +176,9 @@ data TcMatchCtxt body -- c.f. TcStmtCtxt, also in this module
-> TcM (Located (body TcId)) }
tcMatches ctxt pat_tys rhs_ty (MG { mg_alts = matches, mg_origin = origin })
- = ASSERT( not (null matches) ) -- Ensure that rhs_ty is filled in
- do { matches' <- mapM (tcMatch ctxt pat_tys rhs_ty) matches
- ; return (MG { mg_alts = matches', mg_arg_tys = pat_tys, mg_res_ty = rhs_ty, mg_origin = origin }) }
+ = ASSERT( not (null matches) ) -- Ensure that rhs_ty is filled in
+ do { matches' <- mapM (tcMatch ctxt pat_tys rhs_ty) matches
+ ; return (MG { mg_alts = matches', mg_arg_tys = pat_tys, mg_res_ty = rhs_ty, mg_origin = origin }) }
-------------
tcMatch :: (Outputable (body Name)) => TcMatchCtxt body
@@ -193,44 +187,44 @@ tcMatch :: (Outputable (body Name)) => TcMatchCtxt body
-> LMatch Name (Located (body Name))
-> TcM (LMatch TcId (Located (body TcId)))
-tcMatch ctxt pat_tys rhs_ty match
+tcMatch ctxt pat_tys rhs_ty match
= wrapLocM (tc_match ctxt pat_tys rhs_ty) match
where
tc_match ctxt pat_tys rhs_ty match@(Match pats maybe_rhs_sig grhss)
= add_match_ctxt match $
do { (pats', grhss') <- tcPats (mc_what ctxt) pats pat_tys $
- tc_grhss ctxt maybe_rhs_sig grhss rhs_ty
- ; return (Match pats' Nothing grhss') }
+ tc_grhss ctxt maybe_rhs_sig grhss rhs_ty
+ ; return (Match pats' Nothing grhss') }
- tc_grhss ctxt Nothing grhss rhs_ty
- = tcGRHSs ctxt grhss rhs_ty -- No result signature
+ tc_grhss ctxt Nothing grhss rhs_ty
+ = tcGRHSs ctxt grhss rhs_ty -- No result signature
- -- Result type sigs are no longer supported
+ -- Result type sigs are no longer supported
tc_grhss _ (Just {}) _ _
- = panic "tc_ghrss" -- Rejected by renamer
+ = panic "tc_ghrss" -- Rejected by renamer
- -- For (\x -> e), tcExpr has already said "In the expresssion \x->e"
- -- so we don't want to add "In the lambda abstraction \x->e"
+ -- For (\x -> e), tcExpr has already said "In the expresssion \x->e"
+ -- so we don't want to add "In the lambda abstraction \x->e"
add_match_ctxt match thing_inside
- = case mc_what ctxt of
- LambdaExpr -> thing_inside
- m_ctxt -> addErrCtxt (pprMatchInCtxt m_ctxt match) thing_inside
+ = case mc_what ctxt of
+ LambdaExpr -> thing_inside
+ m_ctxt -> addErrCtxt (pprMatchInCtxt m_ctxt match) thing_inside
-------------
tcGRHSs :: TcMatchCtxt body -> GRHSs Name (Located (body Name)) -> TcRhoType
- -> TcM (GRHSs TcId (Located (body TcId)))
+ -> TcM (GRHSs TcId (Located (body TcId)))
-- Notice that we pass in the full res_ty, so that we get
-- good inference from simple things like
--- f = \(x::forall a.a->a) -> <stuff>
+-- f = \(x::forall a.a->a) -> <stuff>
-- We used to force it to be a monotype when there was more than one guard
-- but we don't need to do that any more
tcGRHSs ctxt (GRHSs grhss binds) res_ty
- = do { (binds', grhss') <- tcLocalBinds binds $
- mapM (wrapLocM (tcGRHS ctxt res_ty)) grhss
+ = do { (binds', grhss') <- tcLocalBinds binds $
+ mapM (wrapLocM (tcGRHS ctxt res_ty)) grhss
- ; return (GRHSs grhss' binds') }
+ ; return (GRHSs grhss' binds') }
-------------
tcGRHS :: TcMatchCtxt body -> TcRhoType -> GRHS Name (Located (body Name))
@@ -238,63 +232,63 @@ tcGRHS :: TcMatchCtxt body -> TcRhoType -> GRHS Name (Located (body Name))
tcGRHS ctxt res_ty (GRHS guards rhs)
= do { (guards', rhs') <- tcStmtsAndThen stmt_ctxt tcGuardStmt guards res_ty $
- mc_body ctxt rhs
- ; return (GRHS guards' rhs') }
+ mc_body ctxt rhs
+ ; return (GRHS guards' rhs') }
where
stmt_ctxt = PatGuard (mc_what ctxt)
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{@tcDoStmts@ typechecks a {\em list} of do statements}
-%* *
+%* *
%************************************************************************
\begin{code}
-tcDoStmts :: HsStmtContext Name
- -> [LStmt Name (LHsExpr Name)]
- -> TcRhoType
- -> TcM (HsExpr TcId) -- Returns a HsDo
+tcDoStmts :: HsStmtContext Name
+ -> [LStmt Name (LHsExpr Name)]
+ -> TcRhoType
+ -> TcM (HsExpr TcId) -- Returns a HsDo
tcDoStmts ListComp stmts res_ty
- = do { (co, elt_ty) <- matchExpectedListTy res_ty
+ = do { (co, elt_ty) <- matchExpectedListTy res_ty
; let list_ty = mkListTy elt_ty
- ; stmts' <- tcStmts ListComp (tcLcStmt listTyCon) stmts elt_ty
- ; return $ mkHsWrapCo co (HsDo ListComp stmts' list_ty) }
+ ; stmts' <- tcStmts ListComp (tcLcStmt listTyCon) stmts elt_ty
+ ; return $ mkHsWrapCo co (HsDo ListComp stmts' list_ty) }
tcDoStmts PArrComp stmts res_ty
- = do { (co, elt_ty) <- matchExpectedPArrTy res_ty
+ = do { (co, elt_ty) <- matchExpectedPArrTy res_ty
; let parr_ty = mkPArrTy elt_ty
- ; stmts' <- tcStmts PArrComp (tcLcStmt parrTyCon) stmts elt_ty
- ; return $ mkHsWrapCo co (HsDo PArrComp stmts' parr_ty) }
+ ; stmts' <- tcStmts PArrComp (tcLcStmt parrTyCon) stmts elt_ty
+ ; return $ mkHsWrapCo co (HsDo PArrComp stmts' parr_ty) }
tcDoStmts DoExpr stmts res_ty
- = do { stmts' <- tcStmts DoExpr tcDoStmt stmts res_ty
- ; return (HsDo DoExpr stmts' res_ty) }
+ = do { stmts' <- tcStmts DoExpr tcDoStmt stmts res_ty
+ ; return (HsDo DoExpr stmts' res_ty) }
tcDoStmts MDoExpr stmts res_ty
= do { stmts' <- tcStmts MDoExpr tcDoStmt stmts res_ty
; return (HsDo MDoExpr stmts' res_ty) }
tcDoStmts MonadComp stmts res_ty
- = do { stmts' <- tcStmts MonadComp tcMcStmt stmts res_ty
+ = do { stmts' <- tcStmts MonadComp tcMcStmt stmts res_ty
; return (HsDo MonadComp stmts' res_ty) }
tcDoStmts ctxt _ _ = pprPanic "tcDoStmts" (pprStmtContext ctxt)
tcBody :: LHsExpr Name -> TcRhoType -> TcM (LHsExpr TcId)
tcBody body res_ty
- = do { traceTc "tcBody" (ppr res_ty)
- ; body' <- tcMonoExpr body res_ty
- ; return body'
- }
+ = do { traceTc "tcBody" (ppr res_ty)
+ ; body' <- tcMonoExpr body res_ty
+ ; return body'
+ }
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{tcStmts}
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -330,55 +324,55 @@ tcStmtsAndThen :: (Outputable (body Name)) => HsStmtContext Name
-- types in the equations for tcStmts
tcStmtsAndThen _ _ [] res_ty thing_inside
- = do { thing <- thing_inside res_ty
- ; return ([], thing) }
+ = do { thing <- thing_inside res_ty
+ ; return ([], thing) }
-- LetStmts are handled uniformly, regardless of context
tcStmtsAndThen ctxt stmt_chk (L loc (LetStmt binds) : stmts) res_ty thing_inside
- = do { (binds', (stmts',thing)) <- tcLocalBinds binds $
- tcStmtsAndThen ctxt stmt_chk stmts res_ty thing_inside
- ; return (L loc (LetStmt binds') : stmts', thing) }
+ = do { (binds', (stmts',thing)) <- tcLocalBinds binds $
+ tcStmtsAndThen ctxt stmt_chk stmts res_ty thing_inside
+ ; return (L loc (LetStmt binds') : stmts', thing) }
-- For the vanilla case, handle the location-setting part
tcStmtsAndThen ctxt stmt_chk (L loc stmt : stmts) res_ty thing_inside
- = do { (stmt', (stmts', thing)) <-
- setSrcSpan loc $
- addErrCtxt (pprStmtInCtxt ctxt stmt) $
- stmt_chk ctxt stmt res_ty $ \ res_ty' ->
- popErrCtxt $
- tcStmtsAndThen ctxt stmt_chk stmts res_ty' $
- thing_inside
- ; return (L loc stmt' : stmts', thing) }
+ = do { (stmt', (stmts', thing)) <-
+ setSrcSpan loc $
+ addErrCtxt (pprStmtInCtxt ctxt stmt) $
+ stmt_chk ctxt stmt res_ty $ \ res_ty' ->
+ popErrCtxt $
+ tcStmtsAndThen ctxt stmt_chk stmts res_ty' $
+ thing_inside
+ ; return (L loc stmt' : stmts', thing) }
---------------------------------------------------
--- Pattern guards
+-- Pattern guards
---------------------------------------------------
tcGuardStmt :: TcExprStmtChecker
tcGuardStmt _ (BodyStmt guard _ _ _) res_ty thing_inside
- = do { guard' <- tcMonoExpr guard boolTy
- ; thing <- thing_inside res_ty
- ; return (BodyStmt guard' noSyntaxExpr noSyntaxExpr boolTy, thing) }
+ = do { guard' <- tcMonoExpr guard boolTy
+ ; thing <- thing_inside res_ty
+ ; return (BodyStmt guard' noSyntaxExpr noSyntaxExpr boolTy, thing) }
tcGuardStmt ctxt (BindStmt pat rhs _ _) res_ty thing_inside
- = do { (rhs', rhs_ty) <- tcInferRhoNC rhs -- Stmt has a context already
- ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat rhs_ty $
+ = do { (rhs', rhs_ty) <- tcInferRhoNC rhs -- Stmt has a context already
+ ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat rhs_ty $
thing_inside res_ty
- ; return (BindStmt pat' rhs' noSyntaxExpr noSyntaxExpr, thing) }
+ ; return (BindStmt pat' rhs' noSyntaxExpr noSyntaxExpr, thing) }
tcGuardStmt _ stmt _ _
= pprPanic "tcGuardStmt: unexpected Stmt" (ppr stmt)
---------------------------------------------------
--- List comprehensions and PArrays
--- (no rebindable syntax)
+-- List comprehensions and PArrays
+-- (no rebindable syntax)
---------------------------------------------------
-- Dealt with separately, rather than by tcMcStmt, because
-- a) PArr isn't (yet) an instance of Monad, so the generality seems overkill
-- b) We have special desugaring rules for list comprehensions,
--- which avoid creating intermediate lists. They in turn
+-- which avoid creating intermediate lists. They in turn
-- assume that the bind/return operations are the regular
-- polymorphic ones, and in particular don't have any
-- coercion matching stuff in them. It's hard to avoid the
@@ -394,45 +388,45 @@ tcLcStmt _ _ (LastStmt body _) elt_ty thing_inside
-- A generator, pat <- rhs
tcLcStmt m_tc ctxt (BindStmt pat rhs _ _) elt_ty thing_inside
- = do { pat_ty <- newFlexiTyVarTy liftedTypeKind
+ = do { pat_ty <- newFlexiTyVarTy liftedTypeKind
; rhs' <- tcMonoExpr rhs (mkTyConApp m_tc [pat_ty])
- ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat pat_ty $
+ ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat pat_ty $
thing_inside elt_ty
- ; return (BindStmt pat' rhs' noSyntaxExpr noSyntaxExpr, thing) }
+ ; return (BindStmt pat' rhs' noSyntaxExpr noSyntaxExpr, thing) }
-- A boolean guard
tcLcStmt _ _ (BodyStmt rhs _ _ _) elt_ty thing_inside
- = do { rhs' <- tcMonoExpr rhs boolTy
- ; thing <- thing_inside elt_ty
- ; return (BodyStmt rhs' noSyntaxExpr noSyntaxExpr boolTy, thing) }
+ = do { rhs' <- tcMonoExpr rhs boolTy
+ ; thing <- thing_inside elt_ty
+ ; return (BodyStmt rhs' noSyntaxExpr noSyntaxExpr boolTy, thing) }
-- ParStmt: See notes with tcMcStmt
tcLcStmt m_tc ctxt (ParStmt bndr_stmts_s _ _) elt_ty thing_inside
- = do { (pairs', thing) <- loop bndr_stmts_s
- ; return (ParStmt pairs' noSyntaxExpr noSyntaxExpr, thing) }
+ = do { (pairs', thing) <- loop bndr_stmts_s
+ ; return (ParStmt pairs' noSyntaxExpr noSyntaxExpr, thing) }
where
-- loop :: [([LStmt Name], [Name])] -> TcM ([([LStmt TcId], [TcId])], thing)
loop [] = do { thing <- thing_inside elt_ty
- ; return ([], thing) } -- matching in the branches
+ ; return ([], thing) } -- matching in the branches
loop (ParStmtBlock stmts names _ : pairs)
= do { (stmts', (ids, pairs', thing))
- <- tcStmtsAndThen ctxt (tcLcStmt m_tc) stmts elt_ty $ \ _elt_ty' ->
- do { ids <- tcLookupLocalIds names
- ; (pairs', thing) <- loop pairs
- ; return (ids, pairs', thing) }
- ; return ( ParStmtBlock stmts' ids noSyntaxExpr : pairs', thing ) }
+ <- tcStmtsAndThen ctxt (tcLcStmt m_tc) stmts elt_ty $ \ _elt_ty' ->
+ do { ids <- tcLookupLocalIds names
+ ; (pairs', thing) <- loop pairs
+ ; return (ids, pairs', thing) }
+ ; return ( ParStmtBlock stmts' ids noSyntaxExpr : pairs', thing ) }
tcLcStmt m_tc ctxt (TransStmt { trS_form = form, trS_stmts = stmts
, trS_bndrs = bindersMap
, trS_by = by, trS_using = using }) elt_ty thing_inside
= do { let (bndr_names, n_bndr_names) = unzip bindersMap
unused_ty = pprPanic "tcLcStmt: inner ty" (ppr bindersMap)
- -- The inner 'stmts' lack a LastStmt, so the element type
- -- passed in to tcStmtsAndThen is never looked at
+ -- The inner 'stmts' lack a LastStmt, so the element type
+ -- passed in to tcStmtsAndThen is never looked at
; (stmts', (bndr_ids, by'))
<- tcStmtsAndThen (TransStmtCtxt ctxt) (tcLcStmt m_tc) stmts unused_ty $ \_ -> do
- { by' <- case by of
+ { by' <- case by of
Nothing -> return Nothing
Just e -> do { e_ty <- tcInferRho e; return (Just e_ty) }
; bndr_ids <- tcLookupLocalIds bndr_names
@@ -447,7 +441,7 @@ tcLcStmt m_tc ctxt (TransStmt { trS_form = form, trS_stmts = stmts
-- n_app :: Type -> Type -- Wraps a 'ty' into '[ty]' for GroupForm
; let n_app = case form of
ThenForm -> (\ty -> ty)
- _ -> m_app
+ _ -> m_app
by_arrow :: Type -> Type -- Wraps 'ty' to '(a->t) -> ty' if the By is present
by_arrow = case by' of
@@ -456,40 +450,40 @@ tcLcStmt m_tc ctxt (TransStmt { trS_form = form, trS_stmts = stmts
tup_ty = mkBigCoreVarTupTy bndr_ids
poly_arg_ty = m_app alphaTy
- poly_res_ty = m_app (n_app alphaTy)
- using_poly_ty = mkForAllTy alphaTyVar $ by_arrow $
+ poly_res_ty = m_app (n_app alphaTy)
+ using_poly_ty = mkForAllTy alphaTyVar $ by_arrow $
poly_arg_ty `mkFunTy` poly_res_ty
; using' <- tcPolyExpr using using_poly_ty
- ; let final_using = fmap (HsWrap (WpTyApp tup_ty)) using'
+ ; let final_using = fmap (HsWrap (WpTyApp tup_ty)) using'
- -- 'stmts' returns a result of type (m1_ty tuple_ty),
- -- typically something like [(Int,Bool,Int)]
- -- We don't know what tuple_ty is yet, so we use a variable
+ -- 'stmts' returns a result of type (m1_ty tuple_ty),
+ -- typically something like [(Int,Bool,Int)]
+ -- We don't know what tuple_ty is yet, so we use a variable
; let mk_n_bndr :: Name -> TcId -> TcId
mk_n_bndr n_bndr_name bndr_id = mkLocalId n_bndr_name (n_app (idType bndr_id))
-- Ensure that every old binder of type `b` is linked up with its
-- new binder which should have type `n b`
- -- See Note [GroupStmt binder map] in HsExpr
+ -- See Note [GroupStmt binder map] in HsExpr
n_bndr_ids = zipWith mk_n_bndr n_bndr_names bndr_ids
bindersMap' = bndr_ids `zip` n_bndr_ids
- -- Type check the thing in the environment with
+ -- Type check the thing in the environment with
-- these new binders and return the result
; thing <- tcExtendIdEnv n_bndr_ids (thing_inside elt_ty)
- ; return (emptyTransStmt { trS_stmts = stmts', trS_bndrs = bindersMap'
- , trS_by = fmap fst by', trS_using = final_using
+ ; return (emptyTransStmt { trS_stmts = stmts', trS_bndrs = bindersMap'
+ , trS_by = fmap fst by', trS_using = final_using
, trS_form = form }, thing) }
-
+
tcLcStmt _ _ stmt _ _
= pprPanic "tcLcStmt: unexpected Stmt" (ppr stmt)
---------------------------------------------------
--- Monad comprehensions
--- (supports rebindable syntax)
+-- Monad comprehensions
+-- (supports rebindable syntax)
---------------------------------------------------
tcMcStmt :: TcExprStmtChecker
@@ -500,7 +494,7 @@ tcMcStmt _ (LastStmt body return_op) res_ty thing_inside
(a_ty `mkFunTy` res_ty)
; body' <- tcMonoExprNC body a_ty
; thing <- thing_inside (panic "tcMcStmt: thing_inside")
- ; return (LastStmt body' return_op', thing) }
+ ; return (LastStmt body' return_op', thing) }
-- Generators for monad comprehensions ( pat <- rhs )
--
@@ -513,12 +507,12 @@ tcMcStmt ctxt (BindStmt pat rhs bind_op fail_op) res_ty thing_inside
; pat_ty <- newFlexiTyVarTy liftedTypeKind
; new_res_ty <- newFlexiTyVarTy liftedTypeKind
- -- (>>=) :: rhs_ty -> (pat_ty -> new_res_ty) -> res_ty
- ; bind_op' <- tcSyntaxOp MCompOrigin bind_op
+ -- (>>=) :: rhs_ty -> (pat_ty -> new_res_ty) -> res_ty
+ ; bind_op' <- tcSyntaxOp MCompOrigin bind_op
(mkFunTys [rhs_ty, mkFunTy pat_ty new_res_ty] res_ty)
-- If (but only if) the pattern can fail, typecheck the 'fail' operator
- ; fail_op' <- if isIrrefutableHsPat pat
+ ; fail_op' <- if isIrrefutableHsPat pat
then return noSyntaxExpr
else tcSyntaxOp MCompOrigin fail_op (mkFunTy stringTy new_res_ty)
@@ -533,7 +527,7 @@ tcMcStmt ctxt (BindStmt pat rhs bind_op fail_op) res_ty thing_inside
-- [ body | stmts, expr ] -> expr :: m Bool
--
tcMcStmt _ (BodyStmt rhs then_op guard_op _) res_ty thing_inside
- = do { -- Deal with rebindable syntax:
+ = do { -- Deal with rebindable syntax:
-- guard_op :: test_ty -> rhs_ty
-- then_op :: rhs_ty -> new_res_ty -> res_ty
-- Where test_ty is, for example, Bool
@@ -544,9 +538,9 @@ tcMcStmt _ (BodyStmt rhs then_op guard_op _) res_ty thing_inside
; guard_op' <- tcSyntaxOp MCompOrigin guard_op
(mkFunTy test_ty rhs_ty)
; then_op' <- tcSyntaxOp MCompOrigin then_op
- (mkFunTys [rhs_ty, new_res_ty] res_ty)
- ; thing <- thing_inside new_res_ty
- ; return (BodyStmt rhs' then_op' guard_op' rhs_ty, thing) }
+ (mkFunTys [rhs_ty, new_res_ty] res_ty)
+ ; thing <- thing_inside new_res_ty
+ ; return (BodyStmt rhs' then_op' guard_op' rhs_ty, thing) }
-- Grouping statements
--
@@ -560,14 +554,14 @@ tcMcStmt _ (BodyStmt rhs then_op guard_op _) res_ty thing_inside
-- f <optional by> [ (a,b,c) | stmts ] >>= \(a,b,c) -> ...body....
--
-- We type the functions as follows:
--- f <optional by> :: m1 (a,b,c) -> m2 (a,b,c) (ThenForm)
--- :: m1 (a,b,c) -> m2 (n (a,b,c)) (GroupForm)
--- (>>=) :: m2 (a,b,c) -> ((a,b,c) -> res) -> res (ThenForm)
--- :: m2 (n (a,b,c)) -> (n (a,b,c) -> res) -> res (GroupForm)
---
+-- f <optional by> :: m1 (a,b,c) -> m2 (a,b,c) (ThenForm)
+-- :: m1 (a,b,c) -> m2 (n (a,b,c)) (GroupForm)
+-- (>>=) :: m2 (a,b,c) -> ((a,b,c) -> res) -> res (ThenForm)
+-- :: m2 (n (a,b,c)) -> (n (a,b,c) -> res) -> res (GroupForm)
+--
tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap
, trS_by = by, trS_using = using, trS_form = form
- , trS_ret = return_op, trS_bind = bind_op
+ , trS_ret = return_op, trS_bind = bind_op
, trS_fmap = fmap_op }) res_ty thing_inside
= do { let star_star_kind = liftedTypeKind `mkArrowKind` liftedTypeKind
; m1_ty <- newFlexiTyVarTy star_star_kind
@@ -578,29 +572,29 @@ tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap
-- n_app :: Type -> Type -- Wraps a 'ty' into '(n ty)' for GroupForm
; n_app <- case form of
ThenForm -> return (\ty -> ty)
- _ -> do { n_ty <- newFlexiTyVarTy star_star_kind
- ; return (n_ty `mkAppTy`) }
- ; let by_arrow :: Type -> Type
+ _ -> do { n_ty <- newFlexiTyVarTy star_star_kind
+ ; return (n_ty `mkAppTy`) }
+ ; let by_arrow :: Type -> Type
-- (by_arrow res) produces ((alpha->e_ty) -> res) ('by' present)
- -- or res ('by' absent)
+ -- or res ('by' absent)
by_arrow = case by of
Nothing -> \res -> res
Just {} -> \res -> (alphaTy `mkFunTy` by_e_ty) `mkFunTy` res
poly_arg_ty = m1_ty `mkAppTy` alphaTy
using_arg_ty = m1_ty `mkAppTy` tup_ty
- poly_res_ty = m2_ty `mkAppTy` n_app alphaTy
- using_res_ty = m2_ty `mkAppTy` n_app tup_ty
- using_poly_ty = mkForAllTy alphaTyVar $ by_arrow $
+ poly_res_ty = m2_ty `mkAppTy` n_app alphaTy
+ using_res_ty = m2_ty `mkAppTy` n_app tup_ty
+ using_poly_ty = mkForAllTy alphaTyVar $ by_arrow $
poly_arg_ty `mkFunTy` poly_res_ty
- -- 'stmts' returns a result of type (m1_ty tuple_ty),
- -- typically something like [(Int,Bool,Int)]
- -- We don't know what tuple_ty is yet, so we use a variable
+ -- 'stmts' returns a result of type (m1_ty tuple_ty),
+ -- typically something like [(Int,Bool,Int)]
+ -- We don't know what tuple_ty is yet, so we use a variable
; let (bndr_names, n_bndr_names) = unzip bindersMap
; (stmts', (bndr_ids, by', return_op')) <-
tcStmtsAndThen (TransStmtCtxt ctxt) tcMcStmt stmts using_arg_ty $ \res_ty' -> do
- { by' <- case by of
+ { by' <- case by of
Nothing -> return Nothing
Just e -> do { e' <- tcMonoExpr e by_e_ty; return (Just e') }
@@ -609,7 +603,7 @@ tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap
-- 'return' is only used for the binders, so we know its type.
-- return :: (a,b,c,..) -> m (a,b,c,..)
- ; return_op' <- tcSyntaxOp MCompOrigin return_op $
+ ; return_op' <- tcSyntaxOp MCompOrigin return_op $
(mkBigCoreVarTupTy bndr_ids) `mkFunTy` res_ty'
; return (bndr_ids, by', return_op') }
@@ -634,7 +628,7 @@ tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap
-- using :: ((a,b,c)->t) -> m1 (a,b,c) -> m2 (n (a,b,c))
; using' <- tcPolyExpr using using_poly_ty
- ; let final_using = fmap (HsWrap (WpTyApp tup_ty)) using'
+ ; let final_using = fmap (HsWrap (WpTyApp tup_ty)) using'
--------------- Bulding the bindersMap ----------------
; let mk_n_bndr :: Name -> TcId -> TcId
@@ -642,36 +636,36 @@ tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap
-- Ensure that every old binder of type `b` is linked up with its
-- new binder which should have type `n b`
- -- See Note [GroupStmt binder map] in HsExpr
+ -- See Note [GroupStmt binder map] in HsExpr
n_bndr_ids = zipWith mk_n_bndr n_bndr_names bndr_ids
bindersMap' = bndr_ids `zip` n_bndr_ids
- -- Type check the thing in the environment with
+ -- Type check the thing in the environment with
-- these new binders and return the result
; thing <- tcExtendIdEnv n_bndr_ids (thing_inside new_res_ty)
- ; return (TransStmt { trS_stmts = stmts', trS_bndrs = bindersMap'
- , trS_by = by', trS_using = final_using
+ ; return (TransStmt { trS_stmts = stmts', trS_bndrs = bindersMap'
+ , trS_by = by', trS_using = final_using
, trS_ret = return_op', trS_bind = bind_op'
, trS_fmap = fmap_op', trS_form = form }, thing) }
-- A parallel set of comprehensions
--- [ (g x, h x) | ... ; let g v = ...
--- | ... ; let h v = ... ]
+-- [ (g x, h x) | ... ; let g v = ...
+-- | ... ; let h v = ... ]
--
-- It's possible that g,h are overloaded, so we need to feed the LIE from the
-- (g x, h x) up through both lots of bindings (so we get the bindLocalMethods).
-- Similarly if we had an existential pattern match:
--
--- data T = forall a. Show a => C a
+-- data T = forall a. Show a => C a
--
--- [ (show x, show y) | ... ; C x <- ...
--- | ... ; C y <- ... ]
+-- [ (show x, show y) | ... ; C x <- ...
+-- | ... ; C y <- ... ]
--
-- Then we need the LIE from (show x, show y) to be simplified against
--- the bindings for x and y.
---
--- It's difficult to do this in parallel, so we rely on the renamer to
+-- the bindings for x and y.
+--
+-- It's difficult to do this in parallel, so we rely on the renamer to
-- ensure that g,h and x,y don't duplicate, and simply grow the environment.
-- So the binders of the first parallel group will be in scope in the second
-- group. But that's fine; there's no shadowing to worry about.
@@ -679,7 +673,7 @@ tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap
-- Note: The `mzip` function will get typechecked via:
--
-- ParStmt [st1::t1, st2::t2, st3::t3]
---
+--
-- mzip :: m st1
-- -> (m st2 -> m st3 -> m (st2, st3)) -- recursive call
-- -> m (st1, (st2, st3))
@@ -709,7 +703,7 @@ tcMcStmt ctxt (ParStmt bndr_stmts_s mzip_op bind_op) res_ty thing_inside
; return (ParStmt blocks' mzip_op' bind_op', thing) }
- where
+ where
mk_tuple_ty tys = foldr1 (\tn tm -> mkBoxedTupleTy [tn, tm]) tys
-- loop :: Type -- m_ty
@@ -725,7 +719,7 @@ tcMcStmt ctxt (ParStmt bndr_stmts_s mzip_op bind_op) res_ty thing_inside
; (stmts', (ids, return_op', pairs', thing))
<- tcStmtsAndThen ctxt tcMcStmt stmts m_tup_ty $ \m_tup_ty' ->
do { ids <- tcLookupLocalIds names
- ; let tup_ty = mkBigCoreVarTupTy ids
+ ; let tup_ty = mkBigCoreVarTupTy ids
; return_op' <- tcSyntaxOp MCompOrigin return_op
(tup_ty `mkFunTy` m_tup_ty')
; (pairs', thing) <- loop m_ty pairs
@@ -737,8 +731,8 @@ tcMcStmt _ stmt _ _
---------------------------------------------------
--- Do-notation
--- (supports rebindable syntax)
+-- Do-notation
+-- (supports rebindable syntax)
---------------------------------------------------
tcDoStmt :: TcExprStmtChecker
@@ -749,82 +743,82 @@ tcDoStmt _ (LastStmt body _) res_ty thing_inside
; return (LastStmt body' noSyntaxExpr, thing) }
tcDoStmt ctxt (BindStmt pat rhs bind_op fail_op) res_ty thing_inside
- = do { -- Deal with rebindable syntax:
- -- (>>=) :: rhs_ty -> (pat_ty -> new_res_ty) -> res_ty
- -- This level of generality is needed for using do-notation
- -- in full generality; see Trac #1537
-
- -- I'd like to put this *after* the tcSyntaxOp
- -- (see Note [Treat rebindable syntax first], but that breaks
- -- the rigidity info for GADTs. When we move to the new story
+ = do { -- Deal with rebindable syntax:
+ -- (>>=) :: rhs_ty -> (pat_ty -> new_res_ty) -> res_ty
+ -- This level of generality is needed for using do-notation
+ -- in full generality; see Trac #1537
+
+ -- I'd like to put this *after* the tcSyntaxOp
+ -- (see Note [Treat rebindable syntax first], but that breaks
+ -- the rigidity info for GADTs. When we move to the new story
-- for GADTs, we can move this after tcSyntaxOp
rhs_ty <- newFlexiTyVarTy liftedTypeKind
; pat_ty <- newFlexiTyVarTy liftedTypeKind
; new_res_ty <- newFlexiTyVarTy liftedTypeKind
- ; bind_op' <- tcSyntaxOp DoOrigin bind_op
- (mkFunTys [rhs_ty, mkFunTy pat_ty new_res_ty] res_ty)
+ ; bind_op' <- tcSyntaxOp DoOrigin bind_op
+ (mkFunTys [rhs_ty, mkFunTy pat_ty new_res_ty] res_ty)
- -- If (but only if) the pattern can fail,
- -- typecheck the 'fail' operator
- ; fail_op' <- if isIrrefutableHsPat pat
- then return noSyntaxExpr
- else tcSyntaxOp DoOrigin fail_op (mkFunTy stringTy new_res_ty)
+ -- If (but only if) the pattern can fail,
+ -- typecheck the 'fail' operator
+ ; fail_op' <- if isIrrefutableHsPat pat
+ then return noSyntaxExpr
+ else tcSyntaxOp DoOrigin fail_op (mkFunTy stringTy new_res_ty)
; rhs' <- tcMonoExprNC rhs rhs_ty
- ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat pat_ty $
+ ; (pat', thing) <- tcPat (StmtCtxt ctxt) pat pat_ty $
thing_inside new_res_ty
- ; return (BindStmt pat' rhs' bind_op' fail_op', thing) }
+ ; return (BindStmt pat' rhs' bind_op' fail_op', thing) }
tcDoStmt _ (BodyStmt rhs then_op _ _) res_ty thing_inside
- = do { -- Deal with rebindable syntax;
+ = do { -- Deal with rebindable syntax;
-- (>>) :: rhs_ty -> new_res_ty -> res_ty
- -- See also Note [Treat rebindable syntax first]
+ -- See also Note [Treat rebindable syntax first]
rhs_ty <- newFlexiTyVarTy liftedTypeKind
; new_res_ty <- newFlexiTyVarTy liftedTypeKind
- ; then_op' <- tcSyntaxOp DoOrigin then_op
- (mkFunTys [rhs_ty, new_res_ty] res_ty)
+ ; then_op' <- tcSyntaxOp DoOrigin then_op
+ (mkFunTys [rhs_ty, new_res_ty] res_ty)
; rhs' <- tcMonoExprNC rhs rhs_ty
- ; thing <- thing_inside new_res_ty
- ; return (BodyStmt rhs' then_op' noSyntaxExpr rhs_ty, thing) }
+ ; thing <- thing_inside new_res_ty
+ ; return (BodyStmt rhs' then_op' noSyntaxExpr rhs_ty, thing) }
tcDoStmt ctxt (RecStmt { recS_stmts = stmts, recS_later_ids = later_names
, recS_rec_ids = rec_names, recS_ret_fn = ret_op
- , recS_mfix_fn = mfix_op, recS_bind_fn = bind_op })
+ , recS_mfix_fn = mfix_op, recS_bind_fn = bind_op })
res_ty thing_inside
= do { let tup_names = rec_names ++ filterOut (`elem` rec_names) later_names
; tup_elt_tys <- newFlexiTyVarTys (length tup_names) liftedTypeKind
; let tup_ids = zipWith mkLocalId tup_names tup_elt_tys
- tup_ty = mkBigCoreTupTy tup_elt_tys
+ tup_ty = mkBigCoreTupTy tup_elt_tys
; tcExtendIdEnv tup_ids $ do
{ stmts_ty <- newFlexiTyVarTy liftedTypeKind
; (stmts', (ret_op', tup_rets))
<- tcStmtsAndThen ctxt tcDoStmt stmts stmts_ty $ \ inner_res_ty ->
do { tup_rets <- zipWithM tcCheckId tup_names tup_elt_tys
- -- Unify the types of the "final" Ids (which may
+ -- Unify the types of the "final" Ids (which may
-- be polymorphic) with those of "knot-tied" Ids
- ; ret_op' <- tcSyntaxOp DoOrigin ret_op (mkFunTy tup_ty inner_res_ty)
+ ; ret_op' <- tcSyntaxOp DoOrigin ret_op (mkFunTy tup_ty inner_res_ty)
; return (ret_op', tup_rets) }
- ; mfix_res_ty <- newFlexiTyVarTy liftedTypeKind
+ ; mfix_res_ty <- newFlexiTyVarTy liftedTypeKind
; mfix_op' <- tcSyntaxOp DoOrigin mfix_op
(mkFunTy (mkFunTy tup_ty stmts_ty) mfix_res_ty)
- ; new_res_ty <- newFlexiTyVarTy liftedTypeKind
- ; bind_op' <- tcSyntaxOp DoOrigin bind_op
- (mkFunTys [mfix_res_ty, mkFunTy tup_ty new_res_ty] res_ty)
+ ; new_res_ty <- newFlexiTyVarTy liftedTypeKind
+ ; bind_op' <- tcSyntaxOp DoOrigin bind_op
+ (mkFunTys [mfix_res_ty, mkFunTy tup_ty new_res_ty] res_ty)
; thing <- thing_inside new_res_ty
-
+
; let rec_ids = takeList rec_names tup_ids
- ; later_ids <- tcLookupLocalIds later_names
- ; traceTc "tcdo" $ vcat [ppr rec_ids <+> ppr (map idType rec_ids),
+ ; later_ids <- tcLookupLocalIds later_names
+ ; traceTc "tcdo" $ vcat [ppr rec_ids <+> ppr (map idType rec_ids),
ppr later_ids <+> ppr (map idType later_ids)]
; return (RecStmt { recS_stmts = stmts', recS_later_ids = later_ids
- , recS_rec_ids = rec_ids, recS_ret_fn = ret_op'
+ , recS_rec_ids = rec_ids, recS_ret_fn = ret_op'
, recS_mfix_fn = mfix_op', recS_bind_fn = bind_op'
, recS_later_rets = [], recS_rec_rets = tup_rets
, recS_ret_ty = stmts_ty }, thing)
@@ -837,7 +831,7 @@ tcDoStmt _ stmt _ _
Note [Treat rebindable syntax first]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When typechecking
- do { bar; ... } :: IO ()
+ do { bar; ... } :: IO ()
we want to typecheck 'bar' in the knowledge that it should be an IO thing,
pushing info from the context into the RHS. To do this, we check the
rebindable syntax first, and push that information into (tcMonoExprNC rhs).
@@ -846,9 +840,9 @@ the expected/inferred stuff is back to front (see Trac #3613).
%************************************************************************
-%* *
+%* *
\subsection{Errors and contexts}
-%* *
+%* *
%************************************************************************
@sameNoOfArgs@ takes a @[RenamedMatch]@ and decides whether the same
@@ -859,13 +853,13 @@ checkArgs :: Name -> MatchGroup Name body -> TcM ()
checkArgs _ (MG { mg_alts = [] })
= return ()
checkArgs fun (MG { mg_alts = match1:matches })
- | null bad_matches
+ | null bad_matches
= return ()
| otherwise
- = failWithTc (vcat [ptext (sLit "Equations for") <+> quotes (ppr fun) <+>
- ptext (sLit "have different numbers of arguments"),
- nest 2 (ppr (getLoc match1)),
- nest 2 (ppr (getLoc (head bad_matches)))])
+ = failWithTc (vcat [ptext (sLit "Equations for") <+> quotes (ppr fun) <+>
+ ptext (sLit "have different numbers of arguments"),
+ nest 2 (ppr (getLoc match1)),
+ nest 2 (ppr (getLoc (head bad_matches)))])
where
n_args1 = args_in_match match1
bad_matches = [m | m <- matches, args_in_match m /= n_args1]
diff --git a/compiler/typecheck/TcRules.lhs b/compiler/typecheck/TcRules.lhs
index 47b38f114b..3b405b3dda 100644
--- a/compiler/typecheck/TcRules.lhs
+++ b/compiler/typecheck/TcRules.lhs
@@ -6,13 +6,6 @@
TcRules: Typechecking transformation rules
\begin{code}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
module TcRules ( tcRules ) where
import HsSyn
@@ -35,7 +28,7 @@ import Data.List( partition )
Note [Typechecking rules]
~~~~~~~~~~~~~~~~~~~~~~~~~
-We *infer* the typ of the LHS, and use that type to *check* the type of
+We *infer* the typ of the LHS, and use that type to *check* the type of
the RHS. That means that higher-rank rules work reasonably well. Here's
an example (test simplCore/should_compile/rule2.hs) produced by Roman:
@@ -62,41 +55,41 @@ amount of simplification, so simplifyRuleLhs just sets the flag
appropriately.
Example. Consider the following left-hand side of a rule
- f (x == y) (y > z) = ...
+ f (x == y) (y > z) = ...
If we typecheck this expression we get constraints
- d1 :: Ord a, d2 :: Eq a
+ d1 :: Ord a, d2 :: Eq a
We do NOT want to "simplify" to the LHS
- forall x::a, y::a, z::a, d1::Ord a.
- f ((==) (eqFromOrd d1) x y) ((>) d1 y z) = ...
-Instead we want
- forall x::a, y::a, z::a, d1::Ord a, d2::Eq a.
- f ((==) d2 x y) ((>) d1 y z) = ...
+ forall x::a, y::a, z::a, d1::Ord a.
+ f ((==) (eqFromOrd d1) x y) ((>) d1 y z) = ...
+Instead we want
+ forall x::a, y::a, z::a, d1::Ord a, d2::Eq a.
+ f ((==) d2 x y) ((>) d1 y z) = ...
Here is another example:
- fromIntegral :: (Integral a, Num b) => a -> b
- {-# RULES "foo" fromIntegral = id :: Int -> Int #-}
+ fromIntegral :: (Integral a, Num b) => a -> b
+ {-# RULES "foo" fromIntegral = id :: Int -> Int #-}
In the rule, a=b=Int, and Num Int is a superclass of Integral Int. But
we *dont* want to get
- forall dIntegralInt.
- fromIntegral Int Int dIntegralInt (scsel dIntegralInt) = id Int
+ forall dIntegralInt.
+ fromIntegral Int Int dIntegralInt (scsel dIntegralInt) = id Int
because the scsel will mess up RULE matching. Instead we want
- forall dIntegralInt, dNumInt.
- fromIntegral Int Int dIntegralInt dNumInt = id Int
+ forall dIntegralInt, dNumInt.
+ fromIntegral Int Int dIntegralInt dNumInt = id Int
-Even if we have
- g (x == y) (y == z) = ..
+Even if we have
+ g (x == y) (y == z) = ..
where the two dictionaries are *identical*, we do NOT WANT
- forall x::a, y::a, z::a, d1::Eq a
- f ((==) d1 x y) ((>) d1 y z) = ...
+ forall x::a, y::a, z::a, d1::Eq a
+ f ((==) d1 x y) ((>) d1 y z) = ...
because that will only match if the dict args are (visibly) equal.
Instead we want to quantify over the dictionaries separately.
In short, simplifyRuleLhs must *only* squash equalities, leaving
-all dicts unchanged, with absolutely no sharing.
+all dicts unchanged, with absolutely no sharing.
Also note that we can't solve the LHS constraints in isolation:
Example foo :: Ord a => a -> a
- foo_spec :: Int -> Int
+ foo_spec :: Int -> Int
{-# RULE "foo" foo = foo_spec #-}
Here, it's the RHS that fixes the type variable
@@ -107,8 +100,8 @@ Consider
f b True = ...
#-}
Here we *must* solve the wanted (Eq a) from the given (Eq a)
-resulting from skolemising the agument type of g. So we
-revert to SimplCheck when going under an implication.
+resulting from skolemising the agument type of g. So we
+revert to SimplCheck when going under an implication.
------------------------ So the plan is this -----------------------
@@ -131,10 +124,10 @@ tcRules decls = mapM (wrapLocM tcRule) decls
tcRule :: RuleDecl Name -> TcM (RuleDecl TcId)
tcRule (HsRule name act hs_bndrs lhs fv_lhs rhs fv_rhs)
- = addErrCtxt (ruleCtxt name) $
+ = addErrCtxt (ruleCtxt name) $
do { traceTc "---- Rule ------" (ppr name)
- -- Note [Typechecking rules]
+ -- Note [Typechecking rules]
; vars <- tcRuleBndrs hs_bndrs
; let (id_bndrs, tv_bndrs) = partition isId vars
; (lhs', lhs_wanted, rhs', rhs_wanted, rule_ty)
@@ -146,17 +139,17 @@ tcRule (HsRule name act hs_bndrs lhs fv_lhs rhs fv_rhs)
; (lhs_evs, other_lhs_wanted) <- simplifyRule name lhs_wanted rhs_wanted
- -- Now figure out what to quantify over
- -- c.f. TcSimplify.simplifyInfer
- -- We quantify over any tyvars free in *either* the rule
- -- *or* the bound variables. The latter is important. Consider
- -- ss (x,(y,z)) = (x,z)
- -- RULE: forall v. fst (ss v) = fst v
- -- The type of the rhs of the rule is just a, but v::(a,(b,c))
- --
- -- We also need to get the completely-uconstrained tyvars of
- -- the LHS, lest they otherwise get defaulted to Any; but we do that
- -- during zonking (see TcHsSyn.zonkRule)
+ -- Now figure out what to quantify over
+ -- c.f. TcSimplify.simplifyInfer
+ -- We quantify over any tyvars free in *either* the rule
+ -- *or* the bound variables. The latter is important. Consider
+ -- ss (x,(y,z)) = (x,z)
+ -- RULE: forall v. fst (ss v) = fst v
+ -- The type of the rhs of the rule is just a, but v::(a,(b,c))
+ --
+ -- We also need to get the completely-uconstrained tyvars of
+ -- the LHS, lest they otherwise get defaulted to Any; but we do that
+ -- during zonking (see TcHsSyn.zonkRule)
; let tpl_ids = lhs_evs ++ id_bndrs
forall_tvs = tyVarsOfTypes (rule_ty : map idType tpl_ids)
@@ -167,7 +160,7 @@ tcRule (HsRule name act hs_bndrs lhs fv_lhs rhs fv_rhs)
, ppr forall_tvs
, ppr qtkvs
, ppr rule_ty
- , vcat [ ppr id <+> dcolon <+> ppr (idType id) | id <- tpl_ids ]
+ , vcat [ ppr id <+> dcolon <+> ppr (idType id) | id <- tpl_ids ]
])
-- Simplify the RHS constraints
@@ -182,7 +175,7 @@ tcRule (HsRule name act hs_bndrs lhs fv_lhs rhs fv_rhs)
, ic_insol = insolubleWC rhs_wanted
, ic_binds = rhs_binds_var
, ic_info = RuleSkol name
- , ic_env = lcl_env }
+ , ic_env = lcl_env }
-- For the LHS constraints we must solve the remaining constraints
-- (a) so that we report insoluble ones
@@ -197,39 +190,39 @@ tcRule (HsRule name act hs_bndrs lhs fv_lhs rhs fv_rhs)
, ic_insol = insolubleWC other_lhs_wanted
, ic_binds = lhs_binds_var
, ic_info = RuleSkol name
- , ic_env = lcl_env }
+ , ic_env = lcl_env }
; return (HsRule name act
- (map (RuleBndr . noLoc) (qtkvs ++ tpl_ids))
- (mkHsDictLet (TcEvBinds lhs_binds_var) lhs') fv_lhs
- (mkHsDictLet (TcEvBinds rhs_binds_var) rhs') fv_rhs) }
+ (map (RuleBndr . noLoc) (qtkvs ++ tpl_ids))
+ (mkHsDictLet (TcEvBinds lhs_binds_var) lhs') fv_lhs
+ (mkHsDictLet (TcEvBinds rhs_binds_var) rhs') fv_rhs) }
tcRuleBndrs :: [RuleBndr Name] -> TcM [Var]
-tcRuleBndrs []
+tcRuleBndrs []
= return []
tcRuleBndrs (RuleBndr (L _ name) : rule_bndrs)
- = do { ty <- newFlexiTyVarTy openTypeKind
+ = do { ty <- newFlexiTyVarTy openTypeKind
; vars <- tcRuleBndrs rule_bndrs
- ; return (mkLocalId name ty : vars) }
+ ; return (mkLocalId name ty : vars) }
tcRuleBndrs (RuleBndrSig (L _ name) rn_ty : rule_bndrs)
--- e.g x :: a->a
+-- e.g x :: a->a
-- The tyvar 'a' is brought into scope first, just as if you'd written
--- a::*, x :: a->a
- = do { let ctxt = RuleSigCtxt name
- ; (id_ty, tv_prs) <- tcHsPatSigType ctxt rn_ty
+-- a::*, x :: a->a
+ = do { let ctxt = RuleSigCtxt name
+ ; (id_ty, tv_prs) <- tcHsPatSigType ctxt rn_ty
; let id = mkLocalId name id_ty
- tvs = map snd tv_prs
+ tvs = map snd tv_prs
-- tcHsPatSigType returns (Name,TyVar) pairs
-- for for RuleSigCtxt their Names are not
-- cloned, so we get (n, tv-with-name-n) pairs
-- See Note [Pattern signature binders] in TcHsType
- -- The type variables scope over subsequent bindings; yuk
- ; vars <- tcExtendTyVarEnv tvs $
- tcRuleBndrs rule_bndrs
- ; return (tvs ++ id : vars) }
+ -- The type variables scope over subsequent bindings; yuk
+ ; vars <- tcExtendTyVarEnv tvs $
+ tcRuleBndrs rule_bndrs
+ ; return (tvs ++ id : vars) }
ruleCtxt :: FastString -> SDoc
-ruleCtxt name = ptext (sLit "When checking the transformation rule") <+>
- doubleQuotes (ftext name)
+ruleCtxt name = ptext (sLit "When checking the transformation rule") <+>
+ doubleQuotes (ftext name)
\end{code}
diff --git a/compiler/types/Class.lhs b/compiler/types/Class.lhs
index 9863b8d98f..5fa1c946cc 100644
--- a/compiler/types/Class.lhs
+++ b/compiler/types/Class.lhs
@@ -7,32 +7,26 @@ The @Class@ datatype
\begin{code}
{-# LANGUAGE CPP, DeriveDataTypeable #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
module Class (
- Class,
+ Class,
ClassOpItem, DefMeth (..),
ClassATItem(..),
ClassMinimalDef,
- defMethSpecOfDefMeth,
+ defMethSpecOfDefMeth,
- FunDep, pprFundeps, pprFunDep,
+ FunDep, pprFundeps, pprFunDep,
- mkClass, classTyVars, classArity,
- classKey, className, classATs, classATItems, classTyCon, classMethods,
- classOpItems, classBigSig, classExtraBigSig, classTvsFds, classSCTheta,
+ mkClass, classTyVars, classArity,
+ classKey, className, classATs, classATItems, classTyCon, classMethods,
+ classOpItems, classBigSig, classExtraBigSig, classTvsFds, classSCTheta,
classAllSelIds, classSCSelId, classMinimalDef
) where
#include "HsVersions.h"
-import {-# SOURCE #-} TyCon ( TyCon, tyConName, tyConUnique )
-import {-# SOURCE #-} TypeRep ( Type, PredType )
+import {-# SOURCE #-} TyCon ( TyCon, tyConName, tyConUnique )
+import {-# SOURCE #-} TypeRep ( Type, PredType )
import Var
import Name
import BasicTypes
@@ -47,9 +41,9 @@ import qualified Data.Data as Data
\end{code}
%************************************************************************
-%* *
+%* *
\subsection[Class-basic]{@Class@: basic definition}
-%* *
+%* *
%************************************************************************
A @Class@ corresponds to a Greek kappa in the static semantics:
@@ -57,46 +51,46 @@ A @Class@ corresponds to a Greek kappa in the static semantics:
\begin{code}
data Class
= Class {
- classTyCon :: TyCon, -- The data type constructor for
- -- dictionaries of this class
+ classTyCon :: TyCon, -- The data type constructor for
+ -- dictionaries of this class
-- See Note [ATyCon for classes] in TypeRep
- className :: Name, -- Just the cached name of the TyCon
- classKey :: Unique, -- Cached unique of TyCon
-
- classTyVars :: [TyVar], -- The class kind and type variables;
- -- identical to those of the TyCon
+ className :: Name, -- Just the cached name of the TyCon
+ classKey :: Unique, -- Cached unique of TyCon
- classFunDeps :: [FunDep TyVar], -- The functional dependencies
+ classTyVars :: [TyVar], -- The class kind and type variables;
+ -- identical to those of the TyCon
- -- Superclasses: eg: (F a ~ b, F b ~ G a, Eq a, Show b)
- -- We need value-level selectors for both the dictionary
- -- superclasses and the equality superclasses
- classSCTheta :: [PredType], -- Immediate superclasses,
- classSCSels :: [Id], -- Selector functions to extract the
- -- superclasses from a
- -- dictionary of this class
- -- Associated types
- classATStuff :: [ClassATItem], -- Associated type families
+ classFunDeps :: [FunDep TyVar], -- The functional dependencies
+
+ -- Superclasses: eg: (F a ~ b, F b ~ G a, Eq a, Show b)
+ -- We need value-level selectors for both the dictionary
+ -- superclasses and the equality superclasses
+ classSCTheta :: [PredType], -- Immediate superclasses,
+ classSCSels :: [Id], -- Selector functions to extract the
+ -- superclasses from a
+ -- dictionary of this class
+ -- Associated types
+ classATStuff :: [ClassATItem], -- Associated type families
-- Class operations (methods, not superclasses)
- classOpStuff :: [ClassOpItem], -- Ordered by tag
+ classOpStuff :: [ClassOpItem], -- Ordered by tag
- -- Minimal complete definition
- classMinimalDef :: ClassMinimalDef
+ -- Minimal complete definition
+ classMinimalDef :: ClassMinimalDef
}
deriving Typeable
type FunDep a = ([a],[a]) -- e.g. class C a b c | a b -> c, a c -> b where...
- -- Here fun-deps are [([a,b],[c]), ([a,c],[b])]
+ -- Here fun-deps are [([a,b],[c]), ([a,c],[b])]
type ClassOpItem = (Id, DefMeth)
-- Selector function; contains unfolding
- -- Default-method info
+ -- Default-method info
-data DefMeth = NoDefMeth -- No default method
- | DefMeth Name -- A polymorphic default method
- | GenDefMeth Name -- A generic default method
+data DefMeth = NoDefMeth -- No default method
+ | DefMeth Name -- A polymorphic default method
+ | GenDefMeth Name -- A generic default method
deriving Eq
data ClassATItem
@@ -111,9 +105,9 @@ type ClassMinimalDef = BooleanFormula Name -- Required methods
defMethSpecOfDefMeth :: DefMeth -> DefMethSpec
defMethSpecOfDefMeth meth
= case meth of
- NoDefMeth -> NoDM
- DefMeth _ -> VanillaDM
- GenDefMeth _ -> GenericDM
+ NoDefMeth -> NoDM
+ DefMeth _ -> VanillaDM
+ GenDefMeth _ -> GenericDM
\end{code}
Note [Associated type defaults]
@@ -181,7 +175,7 @@ parent class. Thus
type F b x a :: *
We make F use the same Name for 'a' as C does, and similary 'b'.
-The reason for this is when checking instances it's easier to match
+The reason for this is when checking instances it's easier to match
them up, to ensure they match. Eg
instance C Int [d] where
type F [d] x Int = ....
@@ -193,9 +187,9 @@ Having the same variables for class and tycon is also used in checkValidRoles
%************************************************************************
-%* *
+%* *
\subsection[Class-selectors]{@Class@: simple selectors}
-%* *
+%* *
%************************************************************************
The rest of these functions are just simple selectors.
@@ -203,7 +197,7 @@ The rest of these functions are just simple selectors.
\begin{code}
classArity :: Class -> Arity
classArity clas = length (classTyVars clas)
- -- Could memoise this
+ -- Could memoise this
classAllSelIds :: Class -> [Id]
-- Both superclass-dictionary and method selectors
@@ -212,7 +206,7 @@ classAllSelIds c@(Class {classSCSels = sc_sels})
classSCSelId :: Class -> Int -> Id
-- Get the n'th superclass selector Id
--- where n is 0-indexed, and counts
+-- where n is 0-indexed, and counts
-- *all* superclasses including equalities
classSCSelId (Class { classSCSels = sc_sels }) n
= ASSERT( n >= 0 && n < length sc_sels )
@@ -237,22 +231,22 @@ classTvsFds c
= (classTyVars c, classFunDeps c)
classBigSig :: Class -> ([TyVar], [PredType], [Id], [ClassOpItem])
-classBigSig (Class {classTyVars = tyvars, classSCTheta = sc_theta,
- classSCSels = sc_sels, classOpStuff = op_stuff})
+classBigSig (Class {classTyVars = tyvars, classSCTheta = sc_theta,
+ classSCSels = sc_sels, classOpStuff = op_stuff})
= (tyvars, sc_theta, sc_sels, op_stuff)
classExtraBigSig :: Class -> ([TyVar], [FunDep TyVar], [PredType], [Id], [ClassATItem], [ClassOpItem])
classExtraBigSig (Class {classTyVars = tyvars, classFunDeps = fundeps,
- classSCTheta = sc_theta, classSCSels = sc_sels,
- classATStuff = ats, classOpStuff = op_stuff})
+ classSCTheta = sc_theta, classSCSels = sc_sels,
+ classATStuff = ats, classOpStuff = op_stuff})
= (tyvars, fundeps, sc_theta, sc_sels, ats, op_stuff)
\end{code}
%************************************************************************
-%* *
+%* *
\subsection[Class-instances]{Instance declarations for @Class@}
-%* *
+%* *
%************************************************************************
We compare @Classes@ by their keys (which include @Uniques@).
diff --git a/compiler/types/OptCoercion.lhs b/compiler/types/OptCoercion.lhs
index 5cc2e64afa..1c88f46d6a 100644
--- a/compiler/types/OptCoercion.lhs
+++ b/compiler/types/OptCoercion.lhs
@@ -4,14 +4,8 @@
\begin{code}
{-# LANGUAGE CPP #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-module OptCoercion ( optCoercion, checkAxInstCo ) where
+module OptCoercion ( optCoercion, checkAxInstCo ) where
#include "HsVersions.h"
@@ -24,7 +18,7 @@ import Var
import VarSet
import FamInstEnv ( flattenTys )
import VarEnv
-import StaticFlags ( opt_NoOptCoercion )
+import StaticFlags ( opt_NoOptCoercion )
import Outputable
import Pair
import FastString
@@ -37,7 +31,7 @@ import Control.Monad ( zipWithM )
%************************************************************************
%* *
- Optimising coercions
+ Optimising coercions
%* *
%************************************************************************
@@ -56,7 +50,7 @@ to return
forall (co_B1:t1~t2). ...co_B1...
because now the co_B1 (which is really free) has been captured, and
subsequent substitutions will go wrong. That's why we can't use
-mkCoPredTy in the ForAll case, where this note appears.
+mkCoPredTy in the ForAll case, where this note appears.
Note [Optimising coercion optimisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -76,14 +70,14 @@ opt_co2.
\begin{code}
optCoercion :: CvSubst -> Coercion -> NormalCo
--- ^ optCoercion applies a substitution to a coercion,
+-- ^ optCoercion applies a substitution to a coercion,
-- *and* optimises it to reduce its size
-optCoercion env co
+optCoercion env co
| opt_NoOptCoercion = substCo env co
| otherwise = opt_co1 env False co
type NormalCo = Coercion
- -- Invariants:
+ -- Invariants:
-- * The substitution has been fully applied
-- * For trans coercions (co1 `trans` co2)
-- co1 is not a trans, and neither co1 nor co2 is identity
@@ -248,7 +242,7 @@ opt_co4 env sym rep r (InstCo co ty)
-- See if it is a forall after optimization
-- If so, do an inefficient one-variable substitution
| Just (tv, co'_body) <- splitForAllCo_maybe co'
- = substCoWithTy (getCvInScope env) tv ty' co'_body
+ = substCoWithTy (getCvInScope env) tv ty' co'_body
| otherwise = InstCo co' ty'
where
@@ -363,9 +357,9 @@ opt_trans2 :: InScopeSet -> NormalNonIdCo -> NormalNonIdCo -> NormalCo
-- Neither arg is the identity
opt_trans2 is (TransCo co1a co1b) co2
-- Don't know whether the sub-coercions are the identity
- = opt_trans is co1a (opt_trans is co1b co2)
+ = opt_trans is co1a (opt_trans is co1b co2)
-opt_trans2 is co1 co2
+opt_trans2 is co1 co2
| Just co <- opt_trans_rule is co1 co2
= co
@@ -401,10 +395,10 @@ opt_trans_rule is in_co1@(InstCo co1 ty1) in_co2@(InstCo co2 ty2)
, co1 `compatible_co` co2
= fireTransRule "TrPushInst" in_co1 in_co2 $
mkInstCo (opt_trans is co1 co2) ty1
-
+
-- Push transitivity down through matching top-level constructors.
opt_trans_rule is in_co1@(TyConAppCo r1 tc1 cos1) in_co2@(TyConAppCo r2 tc2 cos2)
- | tc1 == tc2
+ | tc1 == tc2
= ASSERT( r1 == r2 )
fireTransRule "PushTyConApp" in_co1 in_co2 $
TyConAppCo r1 tc1 (opt_transList is cos1 cos2)
@@ -480,7 +474,7 @@ opt_trans_rule is co1 co2
, Nothing <- checkAxInstCo newAxInst
= fireTransRule "TrPushSymAxL" co1 co2 $ SymCo newAxInst
- -- TrPushAxL
+ -- TrPushAxL
| Just (sym, con, ind, cos2) <- co2_is_axiom_maybe
, Just cos1 <- matchAxiom (not sym) con ind co1
, False <- sym
@@ -509,7 +503,7 @@ opt_trans_rule is co1 co2
co2_is_axiom_maybe = isAxiom_maybe co2
role = coercionRole co1 -- should be the same as coercionRole co2!
-opt_trans_rule _ co1 co2 -- Identity rule
+opt_trans_rule _ co1 co2 -- Identity rule
| (Pair ty1 _, r) <- coercionKindRole co1
, Pair _ ty2 <- coercionKind co2
, ty1 `eqType` ty2
@@ -592,7 +586,7 @@ checkAxInstCo (AxiomInstCo ax ind cos)
= let branch = coAxiomNthBranch ax ind
tvs = coAxBranchTyVars branch
incomps = coAxBranchIncomps branch
- tys = map (pFst . coercionKind) cos
+ tys = map (pFst . coercionKind) cos
subst = zipOpenTvSubst tvs tys
target = Type.substTys subst (coAxBranchLHS branch)
in_scope = mkInScopeSet $
@@ -636,14 +630,14 @@ substTyVarBndr2 :: CvSubst -> TyVar -> TyVar
substTyVarBndr2 env tv1 tv2
= case substTyVarBndr env tv1 of
(env1, tv1') -> (env1, extendTvSubstAndInScope env tv2 (mkTyVarTy tv1'), tv1')
-
+
zapCvSubstEnv2 :: CvSubst -> CvSubst -> CvSubst
zapCvSubstEnv2 env1 env2 = mkCvSubst (is1 `unionInScope` is2) []
where is1 = getCvInScope env1
is2 = getCvInScope env2
-----------
isAxiom_maybe :: Coercion -> Maybe (Bool, CoAxiom Branched, Int, [Coercion])
-isAxiom_maybe (SymCo co)
+isAxiom_maybe (SymCo co)
| Just (sym, con, ind, cos) <- isAxiom_maybe co
= Just (not sym, con, ind, cos)
isAxiom_maybe (AxiomInstCo con ind cos)
@@ -667,7 +661,7 @@ matchAxiom sym ax@(CoAxiom { co_ax_tc = tc }) ind co
compatible_co :: Coercion -> Coercion -> Bool
-- Check whether (co1 . co2) will be well-kinded
compatible_co co1 co2
- = x1 `eqType` x2
+ = x1 `eqType` x2
where
Pair _ x1 = coercionKind co1
Pair x2 _ = coercionKind co2
@@ -704,9 +698,9 @@ etaAppCo_maybe co
= Nothing
etaTyConAppCo_maybe :: TyCon -> Coercion -> Maybe [Coercion]
--- If possible, split a coercion
+-- If possible, split a coercion
-- g :: T s1 .. sn ~ T t1 .. tn
--- into [ Nth 0 g :: s1~t1, ..., Nth (n-1) g :: sn~tn ]
+-- into [ Nth 0 g :: s1~t1, ..., Nth (n-1) g :: sn~tn ]
etaTyConAppCo_maybe tc (TyConAppCo _ tc2 cos2)
= ASSERT( tc == tc2 ) Just cos2
@@ -717,7 +711,7 @@ etaTyConAppCo_maybe tc co
, Just (tc2, tys2) <- splitTyConApp_maybe ty2
, tc1 == tc2
, let n = length tys1
- = ASSERT( tc == tc1 )
+ = ASSERT( tc == tc1 )
ASSERT( n == length tys2 )
Just (decomposeCo n co)
-- NB: n might be <> tyConArity tc
@@ -726,11 +720,11 @@ etaTyConAppCo_maybe tc co
| otherwise
= Nothing
-\end{code}
+\end{code}
Note [Eta for AppCo]
~~~~~~~~~~~~~~~~~~~~
-Suppose we have
+Suppose we have
g :: s1 t1 ~ s2 t2
Then we can't necessarily make
@@ -742,7 +736,7 @@ because it's possible that
and in that case (left g) does not have the same
kind on either side.
-It's enough to check that
+It's enough to check that
kind t1 = kind t2
because if g is well-kinded then
kind (s1 t2) = kind (s2 t2)
diff --git a/compiler/types/TypeRep.lhs b/compiler/types/TypeRep.lhs
index 45acb86b64..cc7202f995 100644
--- a/compiler/types/TypeRep.lhs
+++ b/compiler/types/TypeRep.lhs
@@ -8,7 +8,7 @@ Note [The Type-related module hierarchy]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Class
TyCon imports Class
- TypeRep
+ TypeRep
TysPrim imports TypeRep ( including mkTyConTy )
Kind imports TysPrim ( mainly for primitive kinds )
Type imports Kind
@@ -16,18 +16,12 @@ Note [The Type-related module hierarchy]
\begin{code}
{-# LANGUAGE CPP, DeriveDataTypeable, DeriveFunctor, DeriveFoldable, DeriveTraversable #-}
-{-# OPTIONS_GHC -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
{-# OPTIONS_HADDOCK hide #-}
-- We expose the relevant stuff from this module via the Type module
module TypeRep (
- TyThing(..),
- Type(..),
+ TyThing(..),
+ Type(..),
TyLit(..),
KindOrType, Kind, SuperKind,
PredType, ThetaType, -- Synonyms
@@ -35,14 +29,14 @@ module TypeRep (
-- Functions over types
mkTyConTy, mkTyVarTy, mkTyVarTys,
isLiftedTypeKind, isSuperKind, isTypeVar, isKindVar,
-
+
-- Pretty-printing
- pprType, pprParendType, pprTypeApp, pprTvBndr, pprTvBndrs,
- pprTyThing, pprTyThingCategory, pprSigmaType,
- pprTheta, pprForAll, pprUserForAll,
+ pprType, pprParendType, pprTypeApp, pprTvBndr, pprTvBndrs,
+ pprTyThing, pprTyThingCategory, pprSigmaType,
+ pprTheta, pprForAll, pprUserForAll,
pprThetaArrowTy, pprClassPred,
pprKind, pprParendKind, pprTyLit, suppressKinds,
- TyPrec(..), maybeParen, pprTcApp,
+ TyPrec(..), maybeParen, pprTcApp,
pprPrefixApp, pprArrowChain, ppr_type,
-- Free variables
@@ -56,7 +50,7 @@ module TypeRep (
tidyOpenTyVar, tidyOpenTyVars,
tidyTyVarOcc,
tidyTopType,
- tidyKind,
+ tidyKind,
-- Substitutions
TvSubst(..), TvSubstEnv
@@ -92,9 +86,9 @@ import qualified Data.Data as Data hiding ( TyCon )
%************************************************************************
-%* *
+%* *
\subsection{The data type}
-%* *
+%* *
%************************************************************************
@@ -104,42 +98,42 @@ import qualified Data.Data as Data hiding ( TyCon )
-- If you edit this type, you may need to update the GHC formalism
-- See Note [GHC Formalism] in coreSyn/CoreLint.lhs
data Type
- = TyVarTy Var -- ^ Vanilla type or kind variable (*never* a coercion variable)
+ = TyVarTy Var -- ^ Vanilla type or kind variable (*never* a coercion variable)
| AppTy -- See Note [AppTy invariant]
- Type
- Type -- ^ Type application to something other than a 'TyCon'. Parameters:
- --
+ Type
+ Type -- ^ Type application to something other than a 'TyCon'. Parameters:
+ --
-- 1) Function: must /not/ be a 'TyConApp',
-- must be another 'AppTy', or 'TyVarTy'
- --
- -- 2) Argument type
+ --
+ -- 2) Argument type
| TyConApp -- See Note [AppTy invariant]
- TyCon
- [KindOrType] -- ^ Application of a 'TyCon', including newtypes /and/ synonyms.
- -- Invariant: saturated appliations of 'FunTyCon' must
- -- use 'FunTy' and saturated synonyms must use their own
+ TyCon
+ [KindOrType] -- ^ Application of a 'TyCon', including newtypes /and/ synonyms.
+ -- Invariant: saturated appliations of 'FunTyCon' must
+ -- use 'FunTy' and saturated synonyms must use their own
-- constructors. However, /unsaturated/ 'FunTyCon's
-- do appear as 'TyConApp's.
- -- Parameters:
- --
- -- 1) Type constructor being applied to.
- --
+ -- Parameters:
+ --
+ -- 1) Type constructor being applied to.
+ --
-- 2) Type arguments. Might not have enough type arguments
-- here to saturate the constructor.
-- Even type synonyms are not necessarily saturated;
-- for example unsaturated type synonyms
- -- can appear as the right hand side of a type synonym.
+ -- can appear as the right hand side of a type synonym.
| FunTy
- Type
- Type -- ^ Special case of 'TyConApp': @TyConApp FunTyCon [t1, t2]@
- -- See Note [Equality-constrained types]
+ Type
+ Type -- ^ Special case of 'TyConApp': @TyConApp FunTyCon [t1, t2]@
+ -- See Note [Equality-constrained types]
| ForAllTy
- Var -- Type or kind variable
- Type -- ^ A polymorphic type
+ Var -- Type or kind variable
+ Type -- ^ A polymorphic type
| LitTy TyLit -- ^ Type literals are similar to type constructors.
@@ -186,7 +180,7 @@ has a UnliftedTypeKind or ArgTypeKind underneath an arrow.
Nor can we abstract over a type variable with any of these kinds.
- k :: = kk | # | ArgKind | (#) | OpenKind
+ k :: = kk | # | ArgKind | (#) | OpenKind
kk :: = * | kk -> kk | T kk1 ... kkn
So a type variable can only be abstracted kk.
@@ -224,17 +218,17 @@ is encoded like this:
blah
-------------------------------------
- Note [PredTy]
+ Note [PredTy]
\begin{code}
-- | A type of the form @p@ of kind @Constraint@ represents a value whose type is
--- the Haskell predicate @p@, where a predicate is what occurs before
+-- the Haskell predicate @p@, where a predicate is what occurs before
-- the @=>@ in a Haskell type.
--
-- We use 'PredType' as documentation to mark those types that we guarantee to have
-- this kind.
--
--- It can be expanded into its representation, but:
+-- It can be expanded into its representation, but:
--
-- * The type checker must treat it as opaque
--
@@ -257,18 +251,18 @@ type ThetaType = [PredType]
to expand to allow them.)
A Haskell qualified type, such as that for f,g,h above, is
-represented using
- * a FunTy for the double arrow
- * with a type of kind Constraint as the function argument
+represented using
+ * a FunTy for the double arrow
+ * with a type of kind Constraint as the function argument
The predicate really does turn into a real extra argument to the
function. If the argument has type (p :: Constraint) then the predicate p is
represented by evidence of type p.
%************************************************************************
-%* *
+%* *
Simple constructors
-%* *
+%* *
%************************************************************************
These functions are here so that they can be used by TysPrim,
@@ -301,15 +295,15 @@ isSuperKind _ = False
isTypeVar :: Var -> Bool
isTypeVar v = isTKVar v && not (isSuperKind (varType v))
-isKindVar :: Var -> Bool
+isKindVar :: Var -> Bool
isKindVar v = isTKVar v && isSuperKind (varType v)
\end{code}
%************************************************************************
-%* *
- Free variables of types and coercions
-%* *
+%* *
+ Free variables of types and coercions
+%* *
%************************************************************************
\begin{code}
@@ -333,7 +327,7 @@ closeOverKinds :: TyVarSet -> TyVarSet
-- Add the kind variables free in the kinds
-- of the tyvars in the given set
closeOverKinds tvs
- = foldVarSet (\tv ktvs -> tyVarsOfType (tyVarKind tv) `unionVarSet` ktvs)
+ = foldVarSet (\tv ktvs -> tyVarsOfType (tyVarKind tv) `unionVarSet` ktvs)
tvs tvs
varSetElemsKvsFirst :: VarSet -> [TyVar]
@@ -345,12 +339,12 @@ varSetElemsKvsFirst set
\end{code}
%************************************************************************
-%* *
- TyThing
-%* *
+%* *
+ TyThing
+%* *
%************************************************************************
-Despite the fact that DataCon has to be imported via a hi-boot route,
+Despite the fact that DataCon has to be imported via a hi-boot route,
this module seems the right place for TyThing, because it's needed for
funTyCon and all the types in TysPrim.
@@ -364,14 +358,14 @@ The Class and its associated TyCon have the same Name.
\begin{code}
-- | A typecheckable-thing, essentially anything that has a name
-data TyThing
+data TyThing
= AnId Id
| AConLike ConLike
| ATyCon TyCon -- TyCons and classes; see Note [ATyCon for classes]
| ACoAxiom (CoAxiom Branched)
deriving (Eq, Ord)
-instance Outputable TyThing where
+instance Outputable TyThing where
ppr = pprTyThing
pprTyThing :: TyThing -> SDoc
@@ -387,9 +381,9 @@ pprTyThingCategory (AConLike (RealDataCon _)) = ptext (sLit "Data constructor")
pprTyThingCategory (AConLike (PatSynCon _)) = ptext (sLit "Pattern synonym")
-instance NamedThing TyThing where -- Can't put this with the type
- getName (AnId id) = getName id -- decl, because the DataCon instance
- getName (ATyCon tc) = getName tc -- isn't visible there
+instance NamedThing TyThing where -- Can't put this with the type
+ getName (AnId id) = getName id -- decl, because the DataCon instance
+ getName (ATyCon tc) = getName tc -- isn't visible there
getName (ACoAxiom cc) = getName cc
getName (AConLike cl) = getName cl
@@ -397,10 +391,10 @@ instance NamedThing TyThing where -- Can't put this with the type
%************************************************************************
-%* *
- Substitutions
+%* *
+ Substitutions
Data type defined here to avoid unnecessary mutual recursion
-%* *
+%* *
%************************************************************************
\begin{code}
@@ -408,46 +402,46 @@ instance NamedThing TyThing where -- Can't put this with the type
--
-- #tvsubst_invariant#
-- The following invariants must hold of a 'TvSubst':
---
+--
-- 1. The in-scope set is needed /only/ to
-- guide the generation of fresh uniques
--
--- 2. In particular, the /kind/ of the type variables in
+-- 2. In particular, the /kind/ of the type variables in
-- the in-scope set is not relevant
--
-- 3. The substitution is only applied ONCE! This is because
-- in general such application will not reached a fixed point.
-data TvSubst
- = TvSubst InScopeSet -- The in-scope type and kind variables
- TvSubstEnv -- Substitutes both type and kind variables
- -- See Note [Apply Once]
- -- and Note [Extending the TvSubstEnv]
+data TvSubst
+ = TvSubst InScopeSet -- The in-scope type and kind variables
+ TvSubstEnv -- Substitutes both type and kind variables
+ -- See Note [Apply Once]
+ -- and Note [Extending the TvSubstEnv]
-- | A substitution of 'Type's for 'TyVar's
-- and 'Kind's for 'KindVar's
type TvSubstEnv = TyVarEnv Type
- -- A TvSubstEnv is used both inside a TvSubst (with the apply-once
- -- invariant discussed in Note [Apply Once]), and also independently
- -- in the middle of matching, and unification (see Types.Unify)
- -- So you have to look at the context to know if it's idempotent or
- -- apply-once or whatever
+ -- A TvSubstEnv is used both inside a TvSubst (with the apply-once
+ -- invariant discussed in Note [Apply Once]), and also independently
+ -- in the middle of matching, and unification (see Types.Unify)
+ -- So you have to look at the context to know if it's idempotent or
+ -- apply-once or whatever
\end{code}
Note [Apply Once]
~~~~~~~~~~~~~~~~~
We use TvSubsts to instantiate things, and we might instantiate
- forall a b. ty
+ forall a b. ty
\with the types
- [a, b], or [b, a].
+ [a, b], or [b, a].
So the substitution might go [a->b, b->a]. A similar situation arises in Core
when we find a beta redex like
- (/\ a /\ b -> e) b a
+ (/\ a /\ b -> e) b a
Then we also end up with a substitution that permutes type variables. Other
-variations happen to; for example [a -> (a, b)].
+variations happen to; for example [a -> (a, b)].
- ***************************************************
- *** So a TvSubst must be applied precisely once ***
- ***************************************************
+ ***************************************************
+ *** So a TvSubst must be applied precisely once ***
+ ***************************************************
A TvSubst is not idempotent, but, unlike the non-idempotent substitution
we use during unifications, it must not be repeatedly applied.
@@ -461,15 +455,15 @@ if the TvSubstEnv is empty --- i.e. (isEmptyTvSubt subst) holds ---
then (substTy subst ty) does nothing.
For example, consider:
- (/\a. /\b:(a~Int). ...b..) Int
+ (/\a. /\b:(a~Int). ...b..) Int
We substitute Int for 'a'. The Unique of 'b' does not change, but
nevertheless we add 'b' to the TvSubstEnv, because b's kind does change
This invariant has several crucial consequences:
-* In substTyVarBndr, we need extend the TvSubstEnv
- - if the unique has changed
- - or if the kind has changed
+* In substTyVarBndr, we need extend the TvSubstEnv
+ - if the unique has changed
+ - or if the kind has changed
* In substTyVar, we do not need to consult the in-scope set;
the TvSubstEnv is enough
@@ -479,7 +473,7 @@ This invariant has several crucial consequences:
%************************************************************************
-%* *
+%* *
Pretty-printing types
Defined very early because of debug printing in assertions
@@ -518,7 +512,7 @@ data TyPrec -- See Note [Prededence in types]
maybeParen :: TyPrec -> TyPrec -> SDoc -> SDoc
maybeParen ctxt_prec inner_prec pretty
| ctxt_prec < inner_prec = pretty
- | otherwise = parens pretty
+ | otherwise = parens pretty
------------------
pprType, pprParendType :: Type -> SDoc
@@ -538,7 +532,7 @@ pprClassPred clas tys = pprTypeApp (classTyCon clas) tys
------------
pprTheta :: ThetaType -> SDoc
--- pprTheta [pred] = pprPred pred -- I'm in two minds about this
+-- pprTheta [pred] = pprPred pred -- I'm in two minds about this
pprTheta theta = parens (sep (punctuate comma (map (ppr_type TopPrec) theta)))
pprThetaArrowTy :: ThetaType -> SDoc
@@ -576,16 +570,16 @@ instance Outputable TyLit where
ppr = pprTyLit
------------------
- -- OK, here's the main printer
+ -- OK, here's the main printer
ppr_type :: TyPrec -> Type -> SDoc
-ppr_type _ (TyVarTy tv) = ppr_tvar tv
+ppr_type _ (TyVarTy tv) = ppr_tvar tv
ppr_type p (TyConApp tc tys) = pprTyTcApp p tc tys
ppr_type p (LitTy l) = ppr_tylit p l
ppr_type p ty@(ForAllTy {}) = ppr_forall_type p ty
ppr_type p (AppTy t1 t2) = maybeParen p TyConPrec $
- ppr_type FunPrec t1 <+> ppr_type TyConPrec t2
+ ppr_type FunPrec t1 <+> ppr_type TyConPrec t2
ppr_type p fun_ty@(FunTy ty1 ty2)
| isPredTy ty1
@@ -654,11 +648,11 @@ pprTvBndrs :: [TyVar] -> SDoc
pprTvBndrs tvs = sep (map pprTvBndr tvs)
pprTvBndr :: TyVar -> SDoc
-pprTvBndr tv
+pprTvBndr tv
| isLiftedTypeKind kind = ppr_tvar tv
- | otherwise = parens (ppr_tvar tv <+> dcolon <+> pprKind kind)
- where
- kind = tyVarKind tv
+ | otherwise = parens (ppr_tvar tv <+> dcolon <+> pprKind kind)
+ where
+ kind = tyVarKind tv
\end{code}
Note [When to print foralls]
@@ -669,7 +663,7 @@ too much information; see Trac #9018.
So I'm trying out this rule: print explicit foralls if
a) User specifies -fprint-explicit-foralls, or
- b) Any of the quantified type variables has a kind
+ b) Any of the quantified type variables has a kind
that mentions a kind variable
This catches common situations, such as a type siguature
@@ -734,7 +728,7 @@ pprTcApp p pp tc tys
| Just dc <- isPromotedDataCon_maybe tc
, let dc_tc = dataConTyCon dc
- , isTupleTyCon dc_tc
+ , isTupleTyCon dc_tc
, let arity = tyConArity dc_tc -- E.g. 3 for (,,) k1 k2 k3 t1 t2 t3
ty_args = drop arity tys -- Drop the kind args
, ty_args `lengthIs` arity -- Result is saturated
@@ -755,8 +749,8 @@ pprTcApp_help p pp tc tys dflags
-- we know nothing of precedence though
= pprInfixApp p pp (ppr tc) ty1 ty2
- | tc `hasKey` liftedTypeKindTyConKey
- || tc `hasKey` unliftedTypeKindTyConKey
+ | tc `hasKey` liftedTypeKindTyConKey
+ || tc `hasKey` unliftedTypeKindTyConKey
= ASSERT( null tys ) ppr tc -- Do not wrap *, # in parens
| otherwise
@@ -779,11 +773,11 @@ suppressKinds dflags kind xs
----------------
pprTyList :: TyPrec -> Type -> Type -> SDoc
--- Given a type-level list (t1 ': t2), see if we can print
--- it in list notation [t1, ...].
+-- Given a type-level list (t1 ': t2), see if we can print
+-- it in list notation [t1, ...].
pprTyList p ty1 ty2
= case gather ty2 of
- (arg_tys, Nothing) -> char '\'' <> brackets (fsep (punctuate comma
+ (arg_tys, Nothing) -> char '\'' <> brackets (fsep (punctuate comma
(map (ppr_type TopPrec) (ty1:arg_tys))))
(arg_tys, Just tl) -> maybeParen p FunPrec $
hang (ppr_type FunPrec ty1)
@@ -808,7 +802,7 @@ pprInfixApp p pp pp_tc ty1 ty2
sep [pp TyOpPrec ty1, pprInfixVar True pp_tc <+> pp TyOpPrec ty2]
pprPrefixApp :: TyPrec -> SDoc -> [SDoc] -> SDoc
-pprPrefixApp p pp_fun pp_tys
+pprPrefixApp p pp_fun pp_tys
| null pp_tys = pp_fun
| otherwise = maybeParen p TyConPrec $
hang pp_fun 2 (sep pp_tys)
@@ -822,9 +816,9 @@ pprArrowChain p (arg:args) = maybeParen p FunPrec $
\end{code}
%************************************************************************
-%* *
+%* *
\subsection{TidyType}
-%* *
+%* *
%************************************************************************
Tidying is here because it has a special case for FlatSkol
@@ -832,7 +826,7 @@ Tidying is here because it has a special case for FlatSkol
\begin{code}
-- | This tidies up a type for printing in an error message, or in
-- an interface file.
---
+--
-- It doesn't change the uniques at all, just the print names.
tidyTyVarBndrs :: TidyEnv -> [TyVar] -> (TidyEnv, [TyVar])
tidyTyVarBndrs env tvs = mapAccumL tidyTyVarBndr env tvs
@@ -841,7 +835,7 @@ tidyTyVarBndr :: TidyEnv -> TyVar -> (TidyEnv, TyVar)
tidyTyVarBndr tidy_env@(occ_env, subst) tyvar
= case tidyOccName occ_env occ1 of
(tidy', occ') -> ((tidy', subst'), tyvar')
- where
+ where
subst' = extendVarEnv subst tyvar tyvar'
tyvar' = setTyVarKind (setTyVarName tyvar name') kind'
name' = tidyNameOcc name occ'
@@ -860,7 +854,7 @@ tidyTyVarBndr tidy_env@(occ_env, subst) tyvar
tidyFreeTyVars :: TidyEnv -> TyVarSet -> TidyEnv
-- ^ Add the free 'TyVar's to the env in tidy form,
-- so that we can tidy the type they are free in
-tidyFreeTyVars (full_occ_env, var_env) tyvars
+tidyFreeTyVars (full_occ_env, var_env) tyvars
= fst (tidyOpenTyVars (full_occ_env, var_env) (varSetElems tyvars))
---------------
@@ -874,15 +868,15 @@ tidyOpenTyVar :: TidyEnv -> TyVar -> (TidyEnv, TyVar)
-- also 'tidyTyVarBndr'
tidyOpenTyVar env@(_, subst) tyvar
= case lookupVarEnv subst tyvar of
- Just tyvar' -> (env, tyvar') -- Already substituted
- Nothing -> tidyTyVarBndr env tyvar -- Treat it as a binder
+ Just tyvar' -> (env, tyvar') -- Already substituted
+ Nothing -> tidyTyVarBndr env tyvar -- Treat it as a binder
---------------
tidyTyVarOcc :: TidyEnv -> TyVar -> TyVar
tidyTyVarOcc (_, subst) tv
= case lookupVarEnv subst tv of
- Nothing -> tv
- Just tv' -> tv'
+ Nothing -> tv
+ Just tv' -> tv'
---------------
tidyTypes :: TidyEnv -> [Type] -> [Type]
@@ -891,14 +885,14 @@ tidyTypes env tys = map (tidyType env) tys
---------------
tidyType :: TidyEnv -> Type -> Type
tidyType _ (LitTy n) = LitTy n
-tidyType env (TyVarTy tv) = TyVarTy (tidyTyVarOcc env tv)
+tidyType env (TyVarTy tv) = TyVarTy (tidyTyVarOcc env tv)
tidyType env (TyConApp tycon tys) = let args = tidyTypes env tys
- in args `seqList` TyConApp tycon args
-tidyType env (AppTy fun arg) = (AppTy $! (tidyType env fun)) $! (tidyType env arg)
-tidyType env (FunTy fun arg) = (FunTy $! (tidyType env fun)) $! (tidyType env arg)
-tidyType env (ForAllTy tv ty) = ForAllTy tvp $! (tidyType envp ty)
- where
- (envp, tvp) = tidyTyVarBndr env tv
+ in args `seqList` TyConApp tycon args
+tidyType env (AppTy fun arg) = (AppTy $! (tidyType env fun)) $! (tidyType env arg)
+tidyType env (FunTy fun arg) = (FunTy $! (tidyType env fun)) $! (tidyType env arg)
+tidyType env (ForAllTy tv ty) = ForAllTy tvp $! (tidyType envp ty)
+ where
+ (envp, tvp) = tidyTyVarBndr env tv
---------------
-- | Grabs the free type variables, tidies them
@@ -909,7 +903,7 @@ tidyOpenType env ty
where
(env'@(_, var_env), tvs') = tidyOpenTyVars env (varSetElems (tyVarsOfType ty))
trimmed_occ_env = initTidyOccEnv (map getOccName tvs')
- -- The idea here was that we restrict the new TidyEnv to the
+ -- The idea here was that we restrict the new TidyEnv to the
-- _free_ vars of the type, so that we don't gratuitously rename
-- the _bound_ variables of the type.