summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2014-05-04 20:27:42 +0100
committerSimon Marlow <marlowsd@gmail.com>2014-05-04 20:28:58 +0100
commitf0fcc41d755876a1b02d1c7c79f57515059f6417 (patch)
tree89cce0cfc61744b1c7b732619ea9de04f21fdcfe
parent5141baf76132fe0d8f88cfa0a62698cc3b37e48a (diff)
downloadhaskell-f0fcc41d755876a1b02d1c7c79f57515059f6417.tar.gz
Revert "Per-thread allocation counters and limits"
Problems were found on 32-bit platforms, I'll commit again when I have a fix. This reverts the following commits: 54b31f744848da872c7c6366dea840748e01b5cf b0534f78a73f972e279eed4447a5687bd6a8308e
-rw-r--r--compiler/cmm/CmmLayoutStack.hs9
-rw-r--r--compiler/codeGen/StgCmmForeign.hs268
-rw-r--r--includes/CodeGen.Platform.hs4
-rw-r--r--includes/rts/Constants.h6
-rw-r--r--includes/rts/Flags.h8
-rw-r--r--includes/rts/Threads.h8
-rw-r--r--includes/rts/storage/TSO.h31
-rw-r--r--libraries/base/Control/Exception.hs1
-rw-r--r--libraries/base/Control/Exception/Base.hs1
-rw-r--r--libraries/base/GHC/Conc.lhs6
-rw-r--r--libraries/base/GHC/Conc/Sync.lhs92
-rw-r--r--libraries/base/GHC/IO/Exception.hs21
-rw-r--r--rts/HeapStackCheck.cmm4
-rw-r--r--rts/Linker.c4
-rw-r--r--rts/Prelude.h2
-rw-r--r--rts/RaiseAsync.c54
-rw-r--r--rts/RaiseAsync.h6
-rw-r--r--rts/RtsFlags.c10
-rw-r--r--rts/RtsStartup.c1
-rw-r--r--rts/Schedule.c19
-rw-r--r--rts/Threads.c77
-rw-r--r--rts/package.conf.in2
-rw-r--r--rts/sm/Storage.c8
-rw-r--r--rts/win32/libHSbase.def1
-rw-r--r--testsuite/tests/concurrent/should_run/all.T7
-rw-r--r--testsuite/tests/concurrent/should_run/allocLimit1.hs9
-rw-r--r--testsuite/tests/concurrent/should_run/allocLimit1.stderr1
-rw-r--r--testsuite/tests/concurrent/should_run/allocLimit2.hs17
-rw-r--r--testsuite/tests/concurrent/should_run/allocLimit3.hs15
-rw-r--r--testsuite/tests/concurrent/should_run/allocLimit3.stderr1
-rw-r--r--testsuite/tests/concurrent/should_run/allocLimit3.stdout1
-rw-r--r--testsuite/tests/concurrent/should_run/allocLimit4.hs31
-rw-r--r--utils/deriveConstants/DeriveConstants.hs1
33 files changed, 146 insertions, 580 deletions
diff --git a/compiler/cmm/CmmLayoutStack.hs b/compiler/cmm/CmmLayoutStack.hs
index e7d57d550a..bdc947829d 100644
--- a/compiler/cmm/CmmLayoutStack.hs
+++ b/compiler/cmm/CmmLayoutStack.hs
@@ -988,12 +988,9 @@ lowerSafeForeignCall dflags block
id <- newTemp (bWord dflags)
new_base <- newTemp (cmmRegType dflags (CmmGlobal BaseReg))
let (caller_save, caller_load) = callerSaveVolatileRegs dflags
+ load_tso <- newTemp (gcWord dflags)
load_stack <- newTemp (gcWord dflags)
- tso <- newTemp (gcWord dflags)
- cn <- newTemp (bWord dflags)
- bdfree <- newTemp (bWord dflags)
- bdstart <- newTemp (bWord dflags)
- let suspend = saveThreadState dflags tso cn <*>
+ let suspend = saveThreadState dflags <*>
caller_save <*>
mkMiddle (callSuspendThread dflags id intrbl)
midCall = mkUnsafeCall tgt res args
@@ -1002,7 +999,7 @@ lowerSafeForeignCall dflags block
-- might now have a different Capability!
mkAssign (CmmGlobal BaseReg) (CmmReg (CmmLocal new_base)) <*>
caller_load <*>
- loadThreadState dflags tso load_stack cn bdfree bdstart
+ loadThreadState dflags load_tso load_stack
(_, regs, copyout) =
copyOutOflow dflags NativeReturn Jump (Young succ)
diff --git a/compiler/codeGen/StgCmmForeign.hs b/compiler/codeGen/StgCmmForeign.hs
index 2730275d34..bf88f1ccb3 100644
--- a/compiler/codeGen/StgCmmForeign.hs
+++ b/compiler/codeGen/StgCmmForeign.hs
@@ -7,15 +7,12 @@
-----------------------------------------------------------------------------
module StgCmmForeign (
- cgForeignCall,
+ cgForeignCall, loadThreadState, saveThreadState,
emitPrimCall, emitCCall,
emitForeignCall, -- For CmmParse
- emitSaveThreadState,
- saveThreadState,
- emitLoadThreadState,
- loadThreadState,
- emitOpenNursery,
- emitCloseNursery,
+ emitSaveThreadState, -- will be needed by the Cmm parser
+ emitLoadThreadState, -- ditto
+ emitCloseNursery, emitOpenNursery
) where
#include "HsVersions.h"
@@ -267,215 +264,94 @@ maybe_assign_temp e = do
-- This stuff can't be done in suspendThread/resumeThread, because it
-- refers to global registers which aren't available in the C world.
+saveThreadState :: DynFlags -> CmmAGraph
+saveThreadState dflags =
+ -- CurrentTSO->stackobj->sp = Sp;
+ mkStore (cmmOffset dflags (CmmLoad (cmmOffset dflags stgCurrentTSO (tso_stackobj dflags)) (bWord dflags)) (stack_SP dflags)) stgSp
+ <*> closeNursery dflags
+ -- and save the current cost centre stack in the TSO when profiling:
+ <*> if gopt Opt_SccProfilingOn dflags then
+ mkStore (cmmOffset dflags stgCurrentTSO (tso_CCCS dflags)) curCCS
+ else mkNop
+
emitSaveThreadState :: FCode ()
emitSaveThreadState = do
dflags <- getDynFlags
- tso <- newTemp (gcWord dflags)
- cn <- newTemp (bWord dflags)
- emit $ saveThreadState dflags tso cn
-
-
--- saveThreadState must be usable from the stack layout pass, where we
--- don't have FCode. Therefore it takes LocalRegs as arguments, so
--- the caller can create these.
-saveThreadState :: DynFlags -> LocalReg -> LocalReg -> CmmAGraph
-saveThreadState dflags tso cn =
- catAGraphs [
- -- tso = CurrentTSO;
- mkAssign (CmmLocal tso) stgCurrentTSO,
- -- tso->stackobj->sp = Sp;
- mkStore (cmmOffset dflags (CmmLoad (cmmOffset dflags (CmmReg (CmmLocal tso)) (tso_stackobj dflags)) (bWord dflags)) (stack_SP dflags)) stgSp,
- closeNursery dflags tso cn,
- -- and save the current cost centre stack in the TSO when profiling:
- if gopt Opt_SccProfilingOn dflags then
- mkStore (cmmOffset dflags (CmmReg (CmmLocal tso)) (tso_CCCS dflags)) curCCS
- else mkNop
- ]
+ emit (saveThreadState dflags)
emitCloseNursery :: FCode ()
emitCloseNursery = do
- dflags <- getDynFlags
- tso <- newTemp (gcWord dflags)
- cn <- newTemp (bWord dflags)
- emit $ mkAssign (CmmLocal tso) stgCurrentTSO <*>
- closeNursery dflags tso cn
-
-{-
-Closing the nursery corresponds to the following code:
-
- tso = CurrentTSO;
- cn = CurrentNuresry;
+ df <- getDynFlags
+ emit (closeNursery df)
- // Update the allocation limit for the current thread. We don't
- // check to see whether it has overflowed at this point, that check is
- // made when we run out of space in the current heap block (stg_gc_noregs)
- // and in the scheduler when context switching (schedulePostRunThread).
- tso->alloc_limit -= Hp + WDS(1) - cn->start;
+ -- CurrentNursery->free = Hp+1;
+closeNursery :: DynFlags -> CmmAGraph
+closeNursery dflags = mkStore (nursery_bdescr_free dflags) (cmmOffsetW dflags stgHp 1)
- // Set cn->free to the next unoccupied word in the block
- cn->free = Hp + WDS(1);
--}
-
-closeNursery :: DynFlags -> LocalReg -> LocalReg -> CmmAGraph
-closeNursery df tso cn =
- let
- tsoreg = CmmLocal tso
- cnreg = CmmLocal cn
- in
+loadThreadState :: DynFlags -> LocalReg -> LocalReg -> CmmAGraph
+loadThreadState dflags tso stack = do
catAGraphs [
- mkAssign cnreg stgCurrentNursery,
-
- let alloc =
- CmmMachOp (mo_wordSub df)
- [ cmmOffsetW df stgHp 1
- , CmmLoad (nursery_bdescr_start df cnreg) (bWord df)
- ]
-
- alloc_limit = cmmOffset df (CmmReg tsoreg) (tso_alloc_limit df)
- in
-
- -- tso->alloc_limit += alloc
- mkStore alloc_limit (CmmMachOp (mo_wordSub df)
- [ CmmLoad alloc_limit b64
- , alloc ]),
-
- -- CurrentNursery->free = Hp+1;
- mkStore (nursery_bdescr_free df cnreg) (cmmOffsetW df stgHp 1)
- ]
+ -- tso = CurrentTSO;
+ mkAssign (CmmLocal tso) stgCurrentTSO,
+ -- stack = tso->stackobj;
+ mkAssign (CmmLocal stack) (CmmLoad (cmmOffset dflags (CmmReg (CmmLocal tso)) (tso_stackobj dflags)) (bWord dflags)),
+ -- Sp = stack->sp;
+ mkAssign sp (CmmLoad (cmmOffset dflags (CmmReg (CmmLocal stack)) (stack_SP dflags)) (bWord dflags)),
+ -- SpLim = stack->stack + RESERVED_STACK_WORDS;
+ mkAssign spLim (cmmOffsetW dflags (cmmOffset dflags (CmmReg (CmmLocal stack)) (stack_STACK dflags))
+ (rESERVED_STACK_WORDS dflags)),
+ -- HpAlloc = 0;
+ -- HpAlloc is assumed to be set to non-zero only by a failed
+ -- a heap check, see HeapStackCheck.cmm:GC_GENERIC
+ mkAssign hpAlloc (zeroExpr dflags),
+
+ openNursery dflags,
+ -- and load the current cost centre stack from the TSO when profiling:
+ if gopt Opt_SccProfilingOn dflags then
+ storeCurCCS
+ (CmmLoad (cmmOffset dflags (CmmReg (CmmLocal tso)) (tso_CCCS dflags)) (ccsType dflags))
+ else mkNop]
emitLoadThreadState :: FCode ()
emitLoadThreadState = do
dflags <- getDynFlags
- tso <- newTemp (gcWord dflags)
- stack <- newTemp (gcWord dflags)
- cn <- newTemp (bWord dflags)
- bdfree <- newTemp (bWord dflags)
- bdstart <- newTemp (bWord dflags)
- emit $ loadThreadState dflags tso stack cn bdfree bdstart
-
--- loadThreadState must be usable from the stack layout pass, where we
--- don't have FCode. Therefore it takes LocalRegs as arguments, so
--- the caller can create these.
-loadThreadState :: DynFlags
- -> LocalReg -> LocalReg -> LocalReg -> LocalReg -> LocalReg
- -> CmmAGraph
-loadThreadState dflags tso stack cn bdfree bdstart =
- catAGraphs [
- -- tso = CurrentTSO;
- mkAssign (CmmLocal tso) stgCurrentTSO,
- -- stack = tso->stackobj;
- mkAssign (CmmLocal stack) (CmmLoad (cmmOffset dflags (CmmReg (CmmLocal tso)) (tso_stackobj dflags)) (bWord dflags)),
- -- Sp = stack->sp;
- mkAssign sp (CmmLoad (cmmOffset dflags (CmmReg (CmmLocal stack)) (stack_SP dflags)) (bWord dflags)),
- -- SpLim = stack->stack + RESERVED_STACK_WORDS;
- mkAssign spLim (cmmOffsetW dflags (cmmOffset dflags (CmmReg (CmmLocal stack)) (stack_STACK dflags))
- (rESERVED_STACK_WORDS dflags)),
- -- HpAlloc = 0;
- -- HpAlloc is assumed to be set to non-zero only by a failed
- -- a heap check, see HeapStackCheck.cmm:GC_GENERIC
- mkAssign hpAlloc (zeroExpr dflags),
- openNursery dflags tso cn bdfree bdstart,
- -- and load the current cost centre stack from the TSO when profiling:
- if gopt Opt_SccProfilingOn dflags
- then storeCurCCS
- (CmmLoad (cmmOffset dflags (CmmReg (CmmLocal tso))
- (tso_CCCS dflags)) (ccsType dflags))
- else mkNop
- ]
-
+ load_tso <- newTemp (gcWord dflags)
+ load_stack <- newTemp (gcWord dflags)
+ emit $ loadThreadState dflags load_tso load_stack
emitOpenNursery :: FCode ()
emitOpenNursery = do
- dflags <- getDynFlags
- tso <- newTemp (gcWord dflags)
- cn <- newTemp (bWord dflags)
- bdfree <- newTemp (bWord dflags)
- bdstart <- newTemp (bWord dflags)
- emit $ mkAssign (CmmLocal tso) stgCurrentTSO <*>
- openNursery dflags tso cn bdfree bdstart
-
-{-
-Opening the nursery corresponds to the following code:
-
- tso = CurrentTSO;
- cn = CurrentNursery;
- bdfree = CurrentNuresry->free;
- bdstart = CurrentNuresry->start;
-
- // We *add* the currently occupied portion of the nursery block to
- // the allocation limit, because we will subtract it again in
- // closeNursery.
- tso->alloc_limit += bdfree - bdstart;
-
- // Set Hp to the last occupied word of the heap block. Why not the
- // next unocupied word? Doing it this way means that we get to use
- // an offset of zero more often, which might lead to slightly smaller
- // code on some architectures.
- Hp = bdfree - WDS(1);
-
- // Set HpLim to the end of the current nursery block (note that this block
- // might be a block group, consisting of several adjacent blocks.
- HpLim = bdstart + CurrentNursery->blocks*BLOCK_SIZE_W - 1;
--}
-
-openNursery :: DynFlags
- -> LocalReg -> LocalReg -> LocalReg -> LocalReg
- -> CmmAGraph
-openNursery df tso cn bdfree bdstart =
- let
- tsoreg = CmmLocal tso
- cnreg = CmmLocal cn
- bdfreereg = CmmLocal bdfree
- bdstartreg = CmmLocal bdstart
- in
- catAGraphs [
- mkAssign cnreg stgCurrentNursery,
- mkAssign bdfreereg (CmmLoad (nursery_bdescr_free df cnreg) (bWord df)),
- mkAssign bdstartreg (CmmLoad (nursery_bdescr_start df cnreg) (bWord df)),
-
- -- alloc = bd->free - bd->start
- let alloc =
- CmmMachOp (mo_wordSub df) [CmmReg bdfreereg, CmmReg bdstartreg]
-
- alloc_limit = cmmOffset df (CmmReg tsoreg) (tso_alloc_limit df)
- in
-
- -- tso->alloc_limit += alloc
- mkStore alloc_limit (CmmMachOp (mo_wordAdd df)
- [ CmmLoad alloc_limit b64
- , alloc ]),
-
- -- Hp = CurrentNursery->free - 1;
- mkAssign hp (cmmOffsetW df (CmmReg bdfreereg) (-1)),
-
- -- HpLim = CurrentNursery->start +
- -- CurrentNursery->blocks*BLOCK_SIZE_W - 1;
- mkAssign hpLim
- (cmmOffsetExpr df
- (CmmReg bdstartreg)
- (cmmOffset df
- (CmmMachOp (mo_wordMul df) [
- CmmMachOp (MO_SS_Conv W32 (wordWidth df))
- [CmmLoad (nursery_bdescr_blocks df cnreg) b32],
- mkIntExpr df (bLOCK_SIZE df)
- ])
- (-1)
- )
- )
+ df <- getDynFlags
+ emit (openNursery df)
+
+openNursery :: DynFlags -> CmmAGraph
+openNursery dflags = catAGraphs [
+ -- Hp = CurrentNursery->free - 1;
+ mkAssign hp (cmmOffsetW dflags (CmmLoad (nursery_bdescr_free dflags) (bWord dflags)) (-1)),
+
+ -- HpLim = CurrentNursery->start +
+ -- CurrentNursery->blocks*BLOCK_SIZE_W - 1;
+ mkAssign hpLim
+ (cmmOffsetExpr dflags
+ (CmmLoad (nursery_bdescr_start dflags) (bWord dflags))
+ (cmmOffset dflags
+ (CmmMachOp (mo_wordMul dflags) [
+ CmmMachOp (MO_SS_Conv W32 (wordWidth dflags))
+ [CmmLoad (nursery_bdescr_blocks dflags) b32],
+ mkIntExpr dflags (bLOCK_SIZE dflags)
+ ])
+ (-1)
+ )
+ )
]
-nursery_bdescr_free, nursery_bdescr_start, nursery_bdescr_blocks
- :: DynFlags -> CmmReg -> CmmExpr
-nursery_bdescr_free dflags cn =
- cmmOffset dflags (CmmReg cn) (oFFSET_bdescr_free dflags)
-nursery_bdescr_start dflags cn =
- cmmOffset dflags (CmmReg cn) (oFFSET_bdescr_start dflags)
-nursery_bdescr_blocks dflags cn =
- cmmOffset dflags (CmmReg cn) (oFFSET_bdescr_blocks dflags)
+nursery_bdescr_free, nursery_bdescr_start, nursery_bdescr_blocks :: DynFlags -> CmmExpr
+nursery_bdescr_free dflags = cmmOffset dflags stgCurrentNursery (oFFSET_bdescr_free dflags)
+nursery_bdescr_start dflags = cmmOffset dflags stgCurrentNursery (oFFSET_bdescr_start dflags)
+nursery_bdescr_blocks dflags = cmmOffset dflags stgCurrentNursery (oFFSET_bdescr_blocks dflags)
-tso_stackobj, tso_CCCS, tso_alloc_limit, stack_STACK, stack_SP :: DynFlags -> ByteOff
+tso_stackobj, tso_CCCS, stack_STACK, stack_SP :: DynFlags -> ByteOff
tso_stackobj dflags = closureField dflags (oFFSET_StgTSO_stackobj dflags)
-tso_alloc_limit dflags = closureField dflags (oFFSET_StgTSO_alloc_limit dflags)
tso_CCCS dflags = closureField dflags (oFFSET_StgTSO_cccs dflags)
stack_STACK dflags = closureField dflags (oFFSET_StgStack_stack dflags)
stack_SP dflags = closureField dflags (oFFSET_StgStack_sp dflags)
diff --git a/includes/CodeGen.Platform.hs b/includes/CodeGen.Platform.hs
index f3abb3d275..3d6dd41ae4 100644
--- a/includes/CodeGen.Platform.hs
+++ b/includes/CodeGen.Platform.hs
@@ -741,8 +741,10 @@ globalRegMaybe CurrentTSO = Just (RealRegSingle REG_CurrentTSO)
# ifdef REG_CurrentNursery
globalRegMaybe CurrentNursery = Just (RealRegSingle REG_CurrentNursery)
# endif
-#endif
globalRegMaybe _ = Nothing
+#else
+globalRegMaybe = panic "globalRegMaybe not defined for this platform"
+#endif
freeReg :: RegNo -> FastBool
diff --git a/includes/rts/Constants.h b/includes/rts/Constants.h
index a7eef0fb26..842c37b369 100644
--- a/includes/rts/Constants.h
+++ b/includes/rts/Constants.h
@@ -275,12 +275,6 @@
#define TSO_SQUEEZED 128
/*
- * Enables the AllocationLimitExceeded exception when the thread's
- * allocation limit goes negative.
- */
-#define TSO_ALLOC_LIMIT 256
-
-/*
* The number of times we spin in a spin lock before yielding (see
* #3758). To tune this value, use the benchmark in #3758: run the
* server with -N2 and the client both on a dual-core. Also make sure
diff --git a/includes/rts/Flags.h b/includes/rts/Flags.h
index ec542701df..bf6a7f3c5c 100644
--- a/includes/rts/Flags.h
+++ b/includes/rts/Flags.h
@@ -56,14 +56,6 @@ struct GC_FLAGS {
rtsBool doIdleGC;
StgWord heapBase; /* address to ask the OS for memory */
-
- StgWord allocLimitGrace; /* units: *blocks*
- * After an AllocationLimitExceeded
- * exception has been raised, how much
- * extra space is given to the thread
- * to handle the exception before we
- * raise it again.
- */
};
struct DEBUG_FLAGS {
diff --git a/includes/rts/Threads.h b/includes/rts/Threads.h
index f6264adfb9..da6f7a4add 100644
--- a/includes/rts/Threads.h
+++ b/includes/rts/Threads.h
@@ -42,12 +42,8 @@ StgRegTable * resumeThread (void *);
//
// Thread operations from Threads.c
//
-int cmp_thread (StgPtr tso1, StgPtr tso2);
-int rts_getThreadId (StgPtr tso);
-HsInt64 rts_getThreadAllocationCounter (StgPtr tso);
-void rts_setThreadAllocationCounter (StgPtr tso, HsInt64 i);
-void rts_enableThreadAllocationLimit (StgPtr tso);
-void rts_disableThreadAllocationLimit (StgPtr tso);
+int cmp_thread (StgPtr tso1, StgPtr tso2);
+int rts_getThreadId (StgPtr tso);
#if !defined(mingw32_HOST_OS)
pid_t forkProcess (HsStablePtr *entry);
diff --git a/includes/rts/storage/TSO.h b/includes/rts/storage/TSO.h
index b933067574..187b6682e0 100644
--- a/includes/rts/storage/TSO.h
+++ b/includes/rts/storage/TSO.h
@@ -145,18 +145,15 @@ typedef struct StgTSO_ {
*/
struct StgBlockingQueue_ *bq;
- /*
- * The allocation limit for this thread, which is updated as the
- * thread allocates. If the value drops below zero, and
- * TSO_ALLOC_LIMIT is set in flags, we raise an exception in the
- * thread, and give the thread a little more space to handle the
- * exception before we raise the exception again.
- *
- * This is an integer, because we might update it in a place where
- * it isn't convenient to raise the exception, so we want it to
- * stay negative until we get around to checking it.
- */
- StgInt64 alloc_limit; /* in bytes */
+#ifdef TICKY_TICKY
+ /* TICKY-specific stuff would go here. */
+#endif
+#ifdef PROFILING
+ StgTSOProfInfo prof;
+#endif
+#ifdef mingw32_HOST_OS
+ StgWord32 saved_winerror;
+#endif
/*
* sum of the sizes of all stack chunks (in words), used to decide
@@ -171,16 +168,6 @@ typedef struct StgTSO_ {
*/
StgWord32 tot_stack_size;
-#ifdef TICKY_TICKY
- /* TICKY-specific stuff would go here. */
-#endif
-#ifdef PROFILING
- StgTSOProfInfo prof;
-#endif
-#ifdef mingw32_HOST_OS
- StgWord32 saved_winerror;
-#endif
-
} *StgTSOPtr;
typedef struct StgStack_ {
diff --git a/libraries/base/Control/Exception.hs b/libraries/base/Control/Exception.hs
index e13a0e978b..7c019eb5ca 100644
--- a/libraries/base/Control/Exception.hs
+++ b/libraries/base/Control/Exception.hs
@@ -48,7 +48,6 @@ module Control.Exception (
NestedAtomically(..),
BlockedIndefinitelyOnMVar(..),
BlockedIndefinitelyOnSTM(..),
- AllocationLimitExceeded(..),
Deadlock(..),
NoMethodError(..),
PatternMatchFail(..),
diff --git a/libraries/base/Control/Exception/Base.hs b/libraries/base/Control/Exception/Base.hs
index be9f4e5e41..d8a0d9635f 100644
--- a/libraries/base/Control/Exception/Base.hs
+++ b/libraries/base/Control/Exception/Base.hs
@@ -31,7 +31,6 @@ module Control.Exception.Base (
NestedAtomically(..),
BlockedIndefinitelyOnMVar(..),
BlockedIndefinitelyOnSTM(..),
- AllocationLimitExceeded(..),
Deadlock(..),
NoMethodError(..),
PatternMatchFail(..),
diff --git a/libraries/base/GHC/Conc.lhs b/libraries/base/GHC/Conc.lhs
index 804fd1539d..1ba17f2912 100644
--- a/libraries/base/GHC/Conc.lhs
+++ b/libraries/base/GHC/Conc.lhs
@@ -60,12 +60,6 @@ module GHC.Conc
, threadWaitWriteSTM
, closeFdWith
- -- * Allocation counter and limit
- , setAllocationCounter
- , getAllocationCounter
- , enableAllocationLimit
- , disableAllocationLimit
-
-- * TVars
, STM(..)
, atomically
diff --git a/libraries/base/GHC/Conc/Sync.lhs b/libraries/base/GHC/Conc/Sync.lhs
index 6d786f5530..ebb7226d09 100644
--- a/libraries/base/GHC/Conc/Sync.lhs
+++ b/libraries/base/GHC/Conc/Sync.lhs
@@ -61,12 +61,6 @@ module GHC.Conc.Sync
, threadStatus
, threadCapability
- -- * Allocation counter and quota
- , setAllocationCounter
- , getAllocationCounter
- , enableAllocationLimit
- , disableAllocationLimit
-
-- * TVars
, STM(..)
, atomically
@@ -183,92 +177,16 @@ instance Eq ThreadId where
instance Ord ThreadId where
compare = cmpThread
--- | Every thread has an allocation counter that tracks how much
--- memory has been allocated by the thread. The counter is
--- initialized to zero, and 'setAllocationCounter' sets the current
--- value. The allocation counter counts *down*, so in the absence of
--- a call to 'setAllocationCounter' its value is the negation of the
--- number of bytes of memory allocated by the thread.
---
--- There are two things that you can do with this counter:
---
--- * Use it as a simple profiling mechanism, with
--- 'getAllocationCounter'.
---
--- * Use it as a resource limit. See 'enableAllocationLimit'.
---
--- Allocation accounting is accurate only to about 4Kbytes.
---
-setAllocationCounter :: Int64 -> IO ()
-setAllocationCounter i = do
- ThreadId t <- myThreadId
- rts_setThreadAllocationCounter t i
-
--- | Return the current value of the allocation counter for the
--- current thread.
-getAllocationCounter :: IO Int64
-getAllocationCounter = do
- ThreadId t <- myThreadId
- rts_getThreadAllocationCounter t
-
--- | Enables the allocation counter to be treated as a limit for the
--- current thread. When the allocation limit is enabled, if the
--- allocation counter counts down below zero, the thread will be sent
--- the 'AllocationLimitExceeded' asynchronous exception. When this
--- happens, the counter is reinitialised (by default
--- to 100K, but tunable with the @+RTS -xq@ option) so that it can handle
--- the exception and perform any necessary clean up. If it exhausts
--- this additional allowance, another 'AllocationLimitExceeded' exception
--- is sent, and so forth.
---
--- Note that memory allocation is unrelated to /live memory/, also
--- known as /heap residency/. A thread can allocate a large amount of
--- memory and retain anything between none and all of it. It is
--- better to think of the allocation limit as a limit on
--- /CPU time/, rather than a limit on memory.
---
--- Compared to using timeouts, allocation limits don't count time
--- spent blocked or in foreign calls.
---
-enableAllocationLimit :: IO ()
-enableAllocationLimit = do
- ThreadId t <- myThreadId
- rts_enableThreadAllocationLimit t
-
--- | Disable allocation limit processing for the current thread.
-disableAllocationLimit :: IO ()
-disableAllocationLimit = do
- ThreadId t <- myThreadId
- rts_disableThreadAllocationLimit t
-
--- We cannot do these operations safely on another thread, because on
--- a 32-bit machine we cannot do atomic operations on a 64-bit value.
--- Therefore, we only expose APIs that allow getting and setting the
--- limit of the current thread.
-foreign import ccall unsafe "rts_setThreadAllocationCounter"
- rts_setThreadAllocationCounter :: ThreadId# -> Int64 -> IO ()
-
-foreign import ccall unsafe "rts_getThreadAllocationCounter"
- rts_getThreadAllocationCounter :: ThreadId# -> IO Int64
-
-foreign import ccall unsafe "rts_enableThreadAllocationLimit"
- rts_enableThreadAllocationLimit :: ThreadId# -> IO ()
-
-foreign import ccall unsafe "rts_disableThreadAllocationLimit"
- rts_disableThreadAllocationLimit :: ThreadId# -> IO ()
-
{- |
-Creates a new thread to run the 'IO' computation passed as the
+Sparks off a new thread to run the 'IO' computation passed as the
first argument, and returns the 'ThreadId' of the newly created
thread.
-The new thread will be a lightweight, /unbound/ thread. Foreign calls
-made by this thread are not guaranteed to be made by any particular OS
-thread; if you need foreign calls to be made by a particular OS
-thread, then use 'Control.Concurrent.forkOS' instead.
+The new thread will be a lightweight thread; if you want to use a foreign
+library that uses thread-local storage, use 'Control.Concurrent.forkOS' instead.
-The new thread inherits the /masked/ state of the parent (see
-'Control.Exception.mask').
+GHC note: the new thread inherits the /masked/ state of the parent
+(see 'Control.Exception.mask').
The newly created thread has an exception handler that discards the
exceptions 'BlockedIndefinitelyOnMVar', 'BlockedIndefinitelyOnSTM', and
diff --git a/libraries/base/GHC/IO/Exception.hs b/libraries/base/GHC/IO/Exception.hs
index af9e7661e0..7f5bc4ef18 100644
--- a/libraries/base/GHC/IO/Exception.hs
+++ b/libraries/base/GHC/IO/Exception.hs
@@ -22,7 +22,6 @@ module GHC.IO.Exception (
BlockedIndefinitelyOnMVar(..), blockedIndefinitelyOnMVar,
BlockedIndefinitelyOnSTM(..), blockedIndefinitelyOnSTM,
Deadlock(..),
- AllocationLimitExceeded(..), allocationLimitExceeded,
AssertionFailed(..),
SomeAsyncException(..),
@@ -100,23 +99,6 @@ instance Show Deadlock where
-----
--- |This thread has exceeded its allocation limit. See
--- 'GHC.Conc.setAllocationCounter' and
--- 'GHC.Conc.enableAllocationLimit'.
-data AllocationLimitExceeded = AllocationLimitExceeded
- deriving Typeable
-
-instance Exception AllocationLimitExceeded
-
-instance Show AllocationLimitExceeded where
- showsPrec _ AllocationLimitExceeded =
- showString "allocation limit exceeded"
-
-allocationLimitExceeded :: SomeException -- for the RTS
-allocationLimitExceeded = toException AllocationLimitExceeded
-
------
-
-- |'assert' was applied to 'False'.
data AssertionFailed = AssertionFailed String
deriving Typeable
@@ -193,8 +175,7 @@ data ArrayException
instance Exception ArrayException
--- for the RTS
-stackOverflow, heapOverflow :: SomeException
+stackOverflow, heapOverflow :: SomeException -- for the RTS
stackOverflow = toException StackOverflow
heapOverflow = toException HeapOverflow
diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm
index 280820334a..12bcfb26df 100644
--- a/rts/HeapStackCheck.cmm
+++ b/rts/HeapStackCheck.cmm
@@ -100,9 +100,7 @@ stg_gc_noregs
CurrentNursery = bdescr_link(CurrentNursery);
OPEN_NURSERY();
if (Capability_context_switch(MyCapability()) != 0 :: CInt ||
- Capability_interrupt(MyCapability()) != 0 :: CInt ||
- (StgTSO_alloc_limit(CurrentTSO) `lt` 0 &&
- (TO_W_(StgTSO_flags(CurrentTSO)) & TSO_ALLOC_LIMIT) != 0)) {
+ Capability_interrupt(MyCapability()) != 0 :: CInt) {
ret = ThreadYielding;
goto sched;
} else {
diff --git a/rts/Linker.c b/rts/Linker.c
index 6ddf4bef9d..ea7c1c67b9 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -1230,10 +1230,6 @@ typedef struct _RtsSymbolVal {
SymI_HasProto(rts_getFunPtr) \
SymI_HasProto(rts_getStablePtr) \
SymI_HasProto(rts_getThreadId) \
- SymI_HasProto(rts_getThreadAllocationCounter) \
- SymI_HasProto(rts_setThreadAllocationCounter) \
- SymI_HasProto(rts_enableThreadAllocationLimit) \
- SymI_HasProto(rts_disableThreadAllocationLimit) \
SymI_HasProto(rts_getWord) \
SymI_HasProto(rts_getWord8) \
SymI_HasProto(rts_getWord16) \
diff --git a/rts/Prelude.h b/rts/Prelude.h
index ca08e2c84e..89e80a0a3d 100644
--- a/rts/Prelude.h
+++ b/rts/Prelude.h
@@ -37,7 +37,6 @@ extern StgClosure ZCMain_main_closure;
PRELUDE_CLOSURE(base_GHCziIOziException_stackOverflow_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_heapOverflow_closure);
-PRELUDE_CLOSURE(base_GHCziIOziException_allocationLimitExceeded_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnThrowTo_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnMVar_closure);
PRELUDE_CLOSURE(base_GHCziIOziException_blockedIndefinitelyOnSTM_closure);
@@ -101,7 +100,6 @@ PRELUDE_INFO(base_GHCziStable_StablePtr_con_info);
#define stackOverflow_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_stackOverflow_closure)
#define heapOverflow_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_heapOverflow_closure)
-#define allocationLimitExceeded_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_allocationLimitExceeded_closure)
#define blockedIndefinitelyOnMVar_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_blockedIndefinitelyOnMVar_closure)
#define blockedIndefinitelyOnSTM_closure DLL_IMPORT_DATA_REF(base_GHCziIOziException_blockedIndefinitelyOnSTM_closure)
#define nonTermination_closure DLL_IMPORT_DATA_REF(base_ControlziExceptionziBase_nonTermination_closure)
diff --git a/rts/RaiseAsync.c b/rts/RaiseAsync.c
index 847076bc6f..a5440e40ad 100644
--- a/rts/RaiseAsync.c
+++ b/rts/RaiseAsync.c
@@ -89,60 +89,6 @@ suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
}
/* -----------------------------------------------------------------------------
- throwToSelf
-
- Useful for throwing an async exception in a thread from the
- runtime. It handles unlocking the throwto message returned by
- throwTo().
-
- Note [Throw to self when masked]
-
- When a StackOverflow occurs when the thread is masked, we want to
- defer the exception to when the thread becomes unmasked/hits an
- interruptible point. We already have a mechanism for doing this,
- the blocked_exceptions list, but the use here is a bit unusual,
- because an exception is normally only added to this list upon
- an asynchronous 'throwTo' call (with all of the relevant
- multithreaded nonsense). Morally, a stack overflow should be an
- asynchronous exception sent by a thread to itself, and it should
- have the same semantics. But there are a few key differences:
-
- - If you actually tried to send an asynchronous exception to
- yourself using throwTo, the exception would actually immediately
- be delivered. This is because throwTo itself is considered an
- interruptible point, so the exception is always deliverable. Thus,
- ordinarily, we never end up with a message to onesself in the
- blocked_exceptions queue.
-
- - In the case of a StackOverflow, we don't actually care about the
- wakeup semantics; when an exception is delivered, the thread that
- originally threw the exception should be woken up, since throwTo
- blocks until the exception is successfully thrown. Fortunately,
- it is harmless to wakeup a thread that doesn't actually need waking
- up, e.g. ourselves.
-
- - No synchronization is necessary, because we own the TSO and the
- capability. You can observe this by tracing through the execution
- of throwTo. We skip synchronizing the message and inter-capability
- communication.
-
- We think this doesn't break any invariants, but do be careful!
- -------------------------------------------------------------------------- */
-
-void
-throwToSelf (Capability *cap, StgTSO *tso, StgClosure *exception)
-{
- MessageThrowTo *m;
-
- m = throwTo(cap, tso, tso, exception);
-
- if (m != NULL) {
- // throwTo leaves it locked
- unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info);
- }
-}
-
-/* -----------------------------------------------------------------------------
throwTo
This function may be used to throw an exception from one thread to
diff --git a/rts/RaiseAsync.h b/rts/RaiseAsync.h
index 65ca4f5103..1f61b8c72d 100644
--- a/rts/RaiseAsync.h
+++ b/rts/RaiseAsync.h
@@ -28,11 +28,7 @@ void throwToSingleThreaded_ (Capability *cap,
StgClosure *exception,
rtsBool stop_at_atomically);
-void throwToSelf (Capability *cap,
- StgTSO *tso,
- StgClosure *exception);
-
-void suspendComputation (Capability *cap,
+void suspendComputation (Capability *cap,
StgTSO *tso,
StgUpdateFrame *stop_here);
diff --git a/rts/RtsFlags.c b/rts/RtsFlags.c
index fb1e2ec07b..af1b2049f6 100644
--- a/rts/RtsFlags.c
+++ b/rts/RtsFlags.c
@@ -137,7 +137,6 @@ void initRtsFlagsDefaults(void)
#else
RtsFlags.GcFlags.heapBase = 0; /* means don't care */
#endif
- RtsFlags.GcFlags.allocLimitGrace = (100*1024) / BLOCK_SIZE;
#ifdef DEBUG
RtsFlags.DebugFlags.scheduler = rtsFalse;
@@ -403,8 +402,6 @@ usage_text[] = {
" +PAPI_EVENT - collect papi preset event PAPI_EVENT",
" #NATIVE_EVENT - collect native event NATIVE_EVENT (in hex)",
#endif
-" -xq The allocation limit given to a thread after it receives",
-" an AllocationLimitExceeded exception. (default: 100k)",
"",
"RTS options may also be specified using the GHCRTS environment variable.",
"",
@@ -1363,13 +1360,6 @@ error = rtsTrue;
/* The option prefix '-xx' is reserved for future extension. KSW 1999-11. */
- case 'q':
- OPTION_UNSAFE;
- RtsFlags.GcFlags.allocLimitGrace
- = decodeSize(rts_argv[arg], 3, BLOCK_SIZE, HS_INT_MAX)
- / BLOCK_SIZE;
- break;
-
default:
OPTION_SAFE;
errorBelch("unknown RTS option: %s",rts_argv[arg]);
diff --git a/rts/RtsStartup.c b/rts/RtsStartup.c
index 640811ff17..aa7306f88a 100644
--- a/rts/RtsStartup.c
+++ b/rts/RtsStartup.c
@@ -208,7 +208,6 @@ hs_init_ghc(int *argc, char **argv[], RtsConfig rts_config)
getStablePtr((StgPtr)blockedIndefinitelyOnMVar_closure);
getStablePtr((StgPtr)nonTermination_closure);
getStablePtr((StgPtr)blockedIndefinitelyOnSTM_closure);
- getStablePtr((StgPtr)allocationLimitExceeded_closure);
getStablePtr((StgPtr)nestedAtomically_closure);
getStablePtr((StgPtr)runSparks_closure);
diff --git a/rts/Schedule.c b/rts/Schedule.c
index b1b489a6d1..adf2b5cb39 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -481,10 +481,6 @@ run_thread:
// happened. So find the new location:
t = cap->r.rCurrentTSO;
- // cap->r.rCurrentTSO is charged for calls to allocate(), so we
- // don't want it set during scheduler operations.
- cap->r.rCurrentTSO = NULL;
-
// And save the current errno in this thread.
// XXX: possibly bogus for SMP because this thread might already
// be running again, see code below.
@@ -1082,21 +1078,6 @@ schedulePostRunThread (Capability *cap, StgTSO *t)
}
}
- //
- // If the current thread's allocation limit has run out, send it
- // the AllocationLimitExceeded exception.
-
- if (t->alloc_limit < 0 && (t->flags & TSO_ALLOC_LIMIT)) {
- // Use a throwToSelf rather than a throwToSingleThreaded, because
- // it correctly handles the case where the thread is currently
- // inside mask. Also the thread might be blocked (e.g. on an
- // MVar), and throwToSingleThreaded doesn't unblock it
- // correctly in that case.
- throwToSelf(cap, t, allocationLimitExceeded_closure);
- t->alloc_limit = (StgInt64)RtsFlags.GcFlags.allocLimitGrace
- * BLOCK_SIZE;
- }
-
/* some statistics gathering in the parallel case */
}
diff --git a/rts/Threads.c b/rts/Threads.c
index b82295284b..af4353fc49 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -110,8 +110,6 @@ createThread(Capability *cap, W_ size)
tso->stackobj = stack;
tso->tot_stack_size = stack->stack_size;
- tso->alloc_limit = 0;
-
tso->trec = NO_TREC;
#ifdef PROFILING
@@ -166,31 +164,6 @@ rts_getThreadId(StgPtr tso)
return ((StgTSO *)tso)->id;
}
-/* ---------------------------------------------------------------------------
- * Getting & setting the thread allocation limit
- * ------------------------------------------------------------------------ */
-HsInt64 rts_getThreadAllocationCounter(StgPtr tso)
-{
- // NB. doesn't take into account allocation in the current nursery
- // block, so it might be off by up to 4k.
- return ((StgTSO *)tso)->alloc_limit;
-}
-
-void rts_setThreadAllocationCounter(StgPtr tso, HsInt64 i)
-{
- ((StgTSO *)tso)->alloc_limit = i;
-}
-
-void rts_enableThreadAllocationLimit(StgPtr tso)
-{
- ((StgTSO *)tso)->flags |= TSO_ALLOC_LIMIT;
-}
-
-void rts_disableThreadAllocationLimit(StgPtr tso)
-{
- ((StgTSO *)tso)->flags &= ~TSO_ALLOC_LIMIT;
-}
-
/* -----------------------------------------------------------------------------
Remove a thread from a queue.
Fails fatally if the TSO is not on the queue.
@@ -551,8 +524,21 @@ threadStackOverflow (Capability *cap, StgTSO *tso)
stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
tso->stackobj->sp+64)));
- // Note [Throw to self when masked], also #767 and #8303.
- throwToSelf(cap, tso, (StgClosure *)stackOverflow_closure);
+ if (tso->flags & TSO_BLOCKEX) {
+ // NB. StackOverflow exceptions must be deferred if the thread is
+ // inside Control.Exception.mask. See bug #767 and bug #8303.
+ // This implementation is a minor hack, see Note [Throw to self when masked]
+ MessageThrowTo *msg = (MessageThrowTo*)allocate(cap, sizeofW(MessageThrowTo));
+ SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
+ msg->source = tso;
+ msg->target = tso;
+ msg->exception = (StgClosure *)stackOverflow_closure;
+ blockedThrowTo(cap, tso, msg);
+ } else {
+ // Send this thread the StackOverflow exception
+ throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
+ return;
+ }
}
@@ -683,6 +669,39 @@ threadStackOverflow (Capability *cap, StgTSO *tso)
// IF_DEBUG(scheduler,printTSO(new_tso));
}
+/* Note [Throw to self when masked]
+ *
+ * When a StackOverflow occurs when the thread is masked, we want to
+ * defer the exception to when the thread becomes unmasked/hits an
+ * interruptible point. We already have a mechanism for doing this,
+ * the blocked_exceptions list, but the use here is a bit unusual,
+ * because an exception is normally only added to this list upon
+ * an asynchronous 'throwTo' call (with all of the relevant
+ * multithreaded nonsense). Morally, a stack overflow should be an
+ * asynchronous exception sent by a thread to itself, and it should
+ * have the same semantics. But there are a few key differences:
+ *
+ * - If you actually tried to send an asynchronous exception to
+ * yourself using throwTo, the exception would actually immediately
+ * be delivered. This is because throwTo itself is considered an
+ * interruptible point, so the exception is always deliverable. Thus,
+ * ordinarily, we never end up with a message to onesself in the
+ * blocked_exceptions queue.
+ *
+ * - In the case of a StackOverflow, we don't actually care about the
+ * wakeup semantics; when an exception is delivered, the thread that
+ * originally threw the exception should be woken up, since throwTo
+ * blocks until the exception is successfully thrown. Fortunately,
+ * it is harmless to wakeup a thread that doesn't actually need waking
+ * up, e.g. ourselves.
+ *
+ * - No synchronization is necessary, because we own the TSO and the
+ * capability. You can observe this by tracing through the execution
+ * of throwTo. We skip synchronizing the message and inter-capability
+ * communication.
+ *
+ * We think this doesn't break any invariants, but do be careful!
+ */
/* ---------------------------------------------------------------------------
diff --git a/rts/package.conf.in b/rts/package.conf.in
index 914dd9c156..4c8686f262 100644
--- a/rts/package.conf.in
+++ b/rts/package.conf.in
@@ -98,7 +98,6 @@ ld-options:
, "-Wl,-u,_base_ControlziExceptionziBase_nonTermination_closure"
, "-Wl,-u,_base_GHCziIOziException_blockedIndefinitelyOnMVar_closure"
, "-Wl,-u,_base_GHCziIOziException_blockedIndefinitelyOnSTM_closure"
- , "-Wl,-u,_base_GHCziIOziException_allocationLimitExceeded_closure"
, "-Wl,-u,_base_ControlziExceptionziBase_nestedAtomically_closure"
, "-Wl,-u,_base_GHCziWeak_runFinalizzerBatch_closure"
, "-Wl,-u,_base_GHCziTopHandler_flushStdHandles_closure"
@@ -139,7 +138,6 @@ ld-options:
, "-Wl,-u,base_ControlziExceptionziBase_nonTermination_closure"
, "-Wl,-u,base_GHCziIOziException_blockedIndefinitelyOnMVar_closure"
, "-Wl,-u,base_GHCziIOziException_blockedIndefinitelyOnSTM_closure"
- , "-Wl,-u,base_GHCziIOziException_allocationLimitExceeded_closure"
, "-Wl,-u,base_ControlziExceptionziBase_nestedAtomically_closure"
, "-Wl,-u,base_GHCziWeak_runFinalizzerBatch_closure"
, "-Wl,-u,base_GHCziTopHandler_flushStdHandles_closure"
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 865a890fcc..86bd1c2bb3 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -684,10 +684,7 @@ StgPtr allocate (Capability *cap, W_ n)
TICK_ALLOC_HEAP_NOCTR(WDS(n));
CCS_ALLOC(cap->r.rCCCS,n);
- if (cap->r.rCurrentTSO != NULL) {
- cap->r.rCurrentTSO->alloc_limit -= n*sizeof(W_);
- }
-
+
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
W_ req_blocks = (W_)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
@@ -824,9 +821,6 @@ allocatePinned (Capability *cap, W_ n)
TICK_ALLOC_HEAP_NOCTR(WDS(n));
CCS_ALLOC(cap->r.rCCCS,n);
- if (cap->r.rCurrentTSO != NULL) {
- cap->r.rCurrentTSO->alloc_limit -= n*sizeof(W_);
- }
bd = cap->pinned_object_block;
diff --git a/rts/win32/libHSbase.def b/rts/win32/libHSbase.def
index 6bb19da569..119237b652 100644
--- a/rts/win32/libHSbase.def
+++ b/rts/win32/libHSbase.def
@@ -36,7 +36,6 @@ EXPORTS
base_GHCziPack_unpackCString_closure
base_GHCziIOziException_blockedIndefinitelyOnMVar_closure
base_GHCziIOziException_blockedIndefinitelyOnSTM_closure
- base_GHCziIOziException_allocationLimitExceeded_closure
base_GHCziIOziException_stackOverflow_closure
base_ControlziExceptionziBase_nonTermination_closure
diff --git a/testsuite/tests/concurrent/should_run/all.T b/testsuite/tests/concurrent/should_run/all.T
index f8f583ec8a..d4e76c6b1e 100644
--- a/testsuite/tests/concurrent/should_run/all.T
+++ b/testsuite/tests/concurrent/should_run/all.T
@@ -81,12 +81,6 @@ test('tryReadMVar1', normal, compile_and_run, [''])
test('T7970', normal, compile_and_run, [''])
-test('allocLimit1', exit_code(1), compile_and_run, [''])
-test('allocLimit2', normal, compile_and_run, [''])
-test('allocLimit3', exit_code(1), compile_and_run, [''])
-test('allocLimit4', [ extra_run_opts('+RTS -xq300k -RTS') ],
- compile_and_run, [''])
-
# -----------------------------------------------------------------------------
# These tests we only do for a full run
@@ -251,4 +245,3 @@ test('setnumcapabilities001',
# omit ghci, which can't handle unboxed tuples:
test('compareAndSwap', [omit_ways(['ghci','hpc']), reqlib('primitive')], compile_and_run, [''])
-
diff --git a/testsuite/tests/concurrent/should_run/allocLimit1.hs b/testsuite/tests/concurrent/should_run/allocLimit1.hs
deleted file mode 100644
index b1c8fa6035..0000000000
--- a/testsuite/tests/concurrent/should_run/allocLimit1.hs
+++ /dev/null
@@ -1,9 +0,0 @@
-module Main (main) where
-
-import GHC.Conc
-
-main = do
- setAllocationCounter (10*1024)
- enableAllocationLimit
- print (length [1..])
-
diff --git a/testsuite/tests/concurrent/should_run/allocLimit1.stderr b/testsuite/tests/concurrent/should_run/allocLimit1.stderr
deleted file mode 100644
index 2133e14ce1..0000000000
--- a/testsuite/tests/concurrent/should_run/allocLimit1.stderr
+++ /dev/null
@@ -1 +0,0 @@
-allocLimit1: allocation limit exceeded
diff --git a/testsuite/tests/concurrent/should_run/allocLimit2.hs b/testsuite/tests/concurrent/should_run/allocLimit2.hs
deleted file mode 100644
index 4fd117b615..0000000000
--- a/testsuite/tests/concurrent/should_run/allocLimit2.hs
+++ /dev/null
@@ -1,17 +0,0 @@
-module Main (main) where
-
-import GHC.Conc
-import Control.Concurrent
-import Control.Exception
-import System.Exit
-
-main = do
- m <- newEmptyMVar
- let action = do setAllocationCounter (10*1024)
- enableAllocationLimit
- print (length [1..])
- forkFinally action (putMVar m)
- r <- takeMVar m
- case r of
- Left e | Just AllocationLimitExceeded <- fromException e -> return ()
- _ -> print r >> exitFailure
diff --git a/testsuite/tests/concurrent/should_run/allocLimit3.hs b/testsuite/tests/concurrent/should_run/allocLimit3.hs
deleted file mode 100644
index 28881dc016..0000000000
--- a/testsuite/tests/concurrent/should_run/allocLimit3.hs
+++ /dev/null
@@ -1,15 +0,0 @@
-module Main (main) where
-
-import GHC.Conc
-import Control.Concurrent
-import Control.Exception
-
-main = do
- setAllocationCounter (10*1024)
- enableAllocationLimit
-
- -- alloc limit overflow while masked: should successfully print the
- -- result, and then immediately raise the exception
- r <- mask_ $ try $ print (length [1..100000])
-
- print (r :: Either SomeException ())
diff --git a/testsuite/tests/concurrent/should_run/allocLimit3.stderr b/testsuite/tests/concurrent/should_run/allocLimit3.stderr
deleted file mode 100644
index 27ae0a9480..0000000000
--- a/testsuite/tests/concurrent/should_run/allocLimit3.stderr
+++ /dev/null
@@ -1 +0,0 @@
-allocLimit3: allocation limit exceeded
diff --git a/testsuite/tests/concurrent/should_run/allocLimit3.stdout b/testsuite/tests/concurrent/should_run/allocLimit3.stdout
deleted file mode 100644
index f7393e847d..0000000000
--- a/testsuite/tests/concurrent/should_run/allocLimit3.stdout
+++ /dev/null
@@ -1 +0,0 @@
-100000
diff --git a/testsuite/tests/concurrent/should_run/allocLimit4.hs b/testsuite/tests/concurrent/should_run/allocLimit4.hs
deleted file mode 100644
index b589ffa4af..0000000000
--- a/testsuite/tests/concurrent/should_run/allocLimit4.hs
+++ /dev/null
@@ -1,31 +0,0 @@
-module Main (main) where
-
-import GHC.Conc
-import Control.Concurrent
-import Control.Exception
-import System.Exit
-import Control.Monad
-
--- check that +RTS -xq is doing the right thing: the test requires
--- +RTS -xq300k
-
-main = do
- m <- newEmptyMVar
- let action = do
- e <- try $ do
- setAllocationCounter (10*1024)
- enableAllocationLimit
- print (length [1..])
- case e of
- Left AllocationLimitExceeded{} -> do
- c <- getAllocationCounter
- when (c < 250*1024 || c > 350*1024) $ fail "wrong limit grace"
- print (length [2..])
- Right _ ->
- fail "didn't catch AllocationLimitExceeded"
-
- forkFinally action (putMVar m)
- r <- takeMVar m
- case r of
- Left e | Just AllocationLimitExceeded <- fromException e -> return ()
- _ -> print r >> exitFailure
diff --git a/utils/deriveConstants/DeriveConstants.hs b/utils/deriveConstants/DeriveConstants.hs
index 0ed18bcf40..8c943f0584 100644
--- a/utils/deriveConstants/DeriveConstants.hs
+++ b/utils/deriveConstants/DeriveConstants.hs
@@ -411,7 +411,6 @@ wanteds = concat
,closureField C "StgTSO" "flags"
,closureField C "StgTSO" "dirty"
,closureField C "StgTSO" "bq"
- ,closureField Both "StgTSO" "alloc_limit"
,closureField_ Both "StgTSO_cccs" "StgTSO" "prof.cccs"
,closureField Both "StgTSO" "stackobj"