summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ghc/includes/ClosureTypes.h1
-rw-r--r--ghc/includes/Closures.h7
-rw-r--r--ghc/includes/SchedAPI.h3
-rw-r--r--ghc/includes/StgMiscClosures.h2
-rw-r--r--ghc/includes/Storage.h14
-rw-r--r--ghc/includes/Updates.h68
-rw-r--r--ghc/includes/mkDerivedConstants.c3
-rw-r--r--ghc/rts/Capability.c117
-rw-r--r--ghc/rts/Capability.h7
-rw-r--r--ghc/rts/GC.c62
-rw-r--r--ghc/rts/GCCompact.c2
-rw-r--r--ghc/rts/HeapStackCheck.cmm12
-rw-r--r--ghc/rts/LdvProfile.c1
-rw-r--r--ghc/rts/Linker.c1
-rw-r--r--ghc/rts/Printer.c10
-rw-r--r--ghc/rts/ProfHeap.c2
-rw-r--r--ghc/rts/RetainerProfile.c8
-rw-r--r--ghc/rts/RtsAPI.c4
-rw-r--r--ghc/rts/RtsStartup.c1
-rw-r--r--ghc/rts/Sanity.c14
-rw-r--r--ghc/rts/Schedule.c139
-rw-r--r--ghc/rts/Schedule.h13
-rw-r--r--ghc/rts/Sparks.c3
-rw-r--r--ghc/rts/Sparks.h2
-rw-r--r--ghc/rts/Stable.c1
-rw-r--r--ghc/rts/Stats.c33
-rw-r--r--ghc/rts/StgMiscClosures.cmm97
-rw-r--r--ghc/rts/StgStartup.cmm2
-rw-r--r--ghc/rts/Storage.c28
-rw-r--r--ghc/rts/Task.c22
-rw-r--r--ghc/rts/Task.h2
31 files changed, 272 insertions, 409 deletions
diff --git a/ghc/includes/ClosureTypes.h b/ghc/includes/ClosureTypes.h
index 243111e330..d79b4f92fb 100644
--- a/ghc/includes/ClosureTypes.h
+++ b/ghc/includes/ClosureTypes.h
@@ -59,7 +59,6 @@
#define STOP_FRAME 44
#define CAF_BLACKHOLE 45
#define BLACKHOLE 46
-#define BLACKHOLE_BQ 47
#define SE_BLACKHOLE 48
#define SE_CAF_BLACKHOLE 49
#define MVAR 50
diff --git a/ghc/includes/Closures.h b/ghc/includes/Closures.h
index a0447316c9..066fe91d3e 100644
--- a/ghc/includes/Closures.h
+++ b/ghc/includes/Closures.h
@@ -403,13 +403,6 @@ typedef struct StgRBH_ {
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
} StgRBH;
-#else
-
-typedef struct StgBlockingQueue_ {
- StgHeader header;
- struct StgTSO_ *blocking_queue;
-} StgBlockingQueue;
-
#endif
#if defined(PAR)
diff --git a/ghc/includes/SchedAPI.h b/ghc/includes/SchedAPI.h
index 8fdf17ed3f..3814b6f4a7 100644
--- a/ghc/includes/SchedAPI.h
+++ b/ghc/includes/SchedAPI.h
@@ -26,9 +26,6 @@ extern StgTSO *createThread(nat stack_size, StgInt pri);
#else
extern StgTSO *createThread(nat stack_size);
#endif
-#if defined(PAR) || defined(SMP)
-extern void taskStart(void);
-#endif
extern void scheduleThread(StgTSO *tso);
extern SchedulerStatus scheduleWaitThread(StgTSO *tso, /*out*/HaskellObj* ret,
Capability *initialCapability);
diff --git a/ghc/includes/StgMiscClosures.h b/ghc/includes/StgMiscClosures.h
index 16d1483bd9..f8332aad2e 100644
--- a/ghc/includes/StgMiscClosures.h
+++ b/ghc/includes/StgMiscClosures.h
@@ -97,7 +97,6 @@ RTS_INFO(stg_CAF_UNENTERED_info);
RTS_INFO(stg_CAF_ENTERED_info);
RTS_INFO(stg_BLACKHOLE_info);
RTS_INFO(stg_CAF_BLACKHOLE_info);
-RTS_INFO(stg_BLACKHOLE_BQ_info);
#ifdef TICKY_TICKY
RTS_INFO(stg_SE_BLACKHOLE_info);
RTS_INFO(stg_SE_CAF_BLACKHOLE_info);
@@ -158,7 +157,6 @@ RTS_ENTRY(stg_CAF_UNENTERED_entry);
RTS_ENTRY(stg_CAF_ENTERED_entry);
RTS_ENTRY(stg_BLACKHOLE_entry);
RTS_ENTRY(stg_CAF_BLACKHOLE_entry);
-RTS_ENTRY(stg_BLACKHOLE_BQ_entry);
#ifdef TICKY_TICKY
RTS_ENTRY(stg_SE_BLACKHOLE_entry);
RTS_ENTRY(stg_SE_CAF_BLACKHOLE_entry);
diff --git a/ghc/includes/Storage.h b/ghc/includes/Storage.h
index 7ed9b91061..229ee33264 100644
--- a/ghc/includes/Storage.h
+++ b/ghc/includes/Storage.h
@@ -187,18 +187,6 @@ extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
-------------------------------------------------------------------------- */
-/*
- * Storage manager mutex
- */
-#if defined(SMP)
-extern Mutex sm_mutex;
-#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
-#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
-#else
-#define ACQUIRE_SM_LOCK
-#define RELEASE_SM_LOCK
-#endif
-
/* ToDo: shouldn't recordMutable acquire some
* kind of lock in the SMP case? Or do we need per-processor
* mutable lists?
@@ -277,7 +265,7 @@ INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgSelector)); }
INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
-{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgBlockingQueue)); }
+{ return sizeofW(StgHeader)+MIN_UPD_SIZE; }
/* --------------------------------------------------------------------------
Sizes of closures
diff --git a/ghc/includes/Updates.h b/ghc/includes/Updates.h
index cf8b8cdcd7..a748a376ce 100644
--- a/ghc/includes/Updates.h
+++ b/ghc/includes/Updates.h
@@ -64,8 +64,7 @@
BLOCK_BEGIN \
DECLARE_IPTR(info); \
info = GET_INFO(updclosure); \
- AWAKEN_BQ(info,updclosure); \
- updateWithIndirection(GET_INFO(updclosure), ind_info, \
+ updateWithIndirection(ind_info, \
updclosure, \
heapptr, \
and_then); \
@@ -74,11 +73,7 @@
#if defined(PROFILING) || defined(TICKY_TICKY)
#define UPD_PERM_IND(updclosure, heapptr) \
BLOCK_BEGIN \
- DECLARE_IPTR(info); \
- info = GET_INFO(updclosure); \
- AWAKEN_BQ(info,updclosure); \
- updateWithPermIndirection(info, \
- updclosure, \
+ updateWithPermIndirection(updclosure, \
heapptr); \
BLOCK_END
#endif
@@ -88,20 +83,13 @@
# ifdef TICKY_TICKY
# define UPD_IND_NOLOCK(updclosure, heapptr) \
BLOCK_BEGIN \
- DECLARE_IPTR(info); \
- info = GET_INFO(updclosure); \
- AWAKEN_BQ_NOLOCK(info,updclosure); \
- updateWithPermIndirection(info, \
- updclosure, \
+ updateWithPermIndirection(updclosure, \
heapptr); \
BLOCK_END
# else
# define UPD_IND_NOLOCK(updclosure, heapptr) \
BLOCK_BEGIN \
- DECLARE_IPTR(info); \
- info = GET_INFO(updclosure); \
- AWAKEN_BQ_NOLOCK(info,updclosure); \
- updateWithIndirection(info, INFO_PTR(stg_IND_info), \
+ updateWithIndirection(INFO_PTR(stg_IND_info), \
updclosure, \
heapptr,); \
BLOCK_END
@@ -167,31 +155,6 @@ extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
}
-
-#else /* !GRAN && !PAR */
-
-#define DO_AWAKEN_BQ(closure) \
- FCALL awakenBlockedQueue(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
-
-#define AWAKEN_BQ(info,closure) \
- if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
- DO_AWAKEN_BQ(closure); \
- }
-
-#define AWAKEN_STATIC_BQ(info,closure) \
- if (info == INFO_PTR(stg_BLACKHOLE_BQ_STATIC_info)) { \
- DO_AWAKEN_BQ(closure); \
- }
-
-#ifdef RTS_SUPPORTS_THREADS
-#define DO_AWAKEN_BQ_NOLOCK(closure) \
- FCALL awakenBlockedQueueNoLock(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
-
-#define AWAKEN_BQ_NOLOCK(info,closure) \
- if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
- DO_AWAKEN_BQ_NOLOCK(closure); \
- }
-#endif
#endif /* GRAN || PAR */
/* -----------------------------------------------------------------------------
@@ -279,7 +242,7 @@ DEBUG_FILL_SLOP(StgClosure *p)
*/
#ifdef CMINUSMINUS
#define generation(n) (W_[generations] + n*SIZEOF_generation)
-#define updateWithIndirection(info, ind_info, p1, p2, and_then) \
+#define updateWithIndirection(ind_info, p1, p2, and_then) \
W_ bd; \
\
/* ASSERT( p1 != p2 && !closure_IND(p1) ); \
@@ -292,11 +255,9 @@ DEBUG_FILL_SLOP(StgClosure *p)
TICK_UPD_NEW_IND(); \
and_then; \
} else { \
- if (info != stg_BLACKHOLE_BQ_info) { \
- DEBUG_FILL_SLOP(p1); \
- foreign "C" recordMutableGen(p1 "ptr", \
+ DEBUG_FILL_SLOP(p1); \
+ foreign "C" recordMutableGen(p1 "ptr", \
generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \
- } \
StgInd_indirectee(p1) = p2; \
SET_INFO(p1, stg_IND_OLDGEN_info); \
LDV_RECORD_CREATE(p1); \
@@ -304,7 +265,7 @@ DEBUG_FILL_SLOP(StgClosure *p)
and_then; \
}
#else
-#define updateWithIndirection(_info, ind_info, p1, p2, and_then) \
+#define updateWithIndirection(ind_info, p1, p2, and_then) \
{ \
bdescr *bd; \
\
@@ -318,10 +279,8 @@ DEBUG_FILL_SLOP(StgClosure *p)
TICK_UPD_NEW_IND(); \
and_then; \
} else { \
- if (_info != &stg_BLACKHOLE_BQ_info) { \
- DEBUG_FILL_SLOP(p1); \
- recordMutableGen(p1, &generations[bd->gen_no]); \
- } \
+ DEBUG_FILL_SLOP(p1); \
+ recordMutableGen(p1, &generations[bd->gen_no]); \
((StgInd *)p1)->indirectee = p2; \
SET_INFO(p1, &stg_IND_OLDGEN_info); \
TICK_UPD_OLD_IND(); \
@@ -335,8 +294,7 @@ DEBUG_FILL_SLOP(StgClosure *p)
*/
#ifndef CMINUSMINUS
INLINE_HEADER void
-updateWithPermIndirection(const StgInfoTable *info,
- StgClosure *p1,
+updateWithPermIndirection(StgClosure *p1,
StgClosure *p2)
{
bdescr *bd;
@@ -361,9 +319,7 @@ updateWithPermIndirection(const StgInfoTable *info,
LDV_RECORD_CREATE(p1);
TICK_UPD_NEW_PERM_IND(p1);
} else {
- if (info != &stg_BLACKHOLE_BQ_info) {
- recordMutableGen(p1, &generations[bd->gen_no]);
- }
+ recordMutableGen(p1, &generations[bd->gen_no]);
((StgInd *)p1)->indirectee = p2;
SET_INFO(p1, &stg_IND_OLDGEN_PERM_info);
/*
diff --git a/ghc/includes/mkDerivedConstants.c b/ghc/includes/mkDerivedConstants.c
index d1e8755fa5..e150b15180 100644
--- a/ghc/includes/mkDerivedConstants.c
+++ b/ghc/includes/mkDerivedConstants.c
@@ -273,9 +273,6 @@ main(int argc, char *argv[])
opt_struct_size(StgTSOGranInfo,GRAN);
opt_struct_size(StgTSODistInfo,DIST);
- closure_size(StgBlockingQueue);
- closure_field(StgBlockingQueue, blocking_queue);
-
closure_field(StgUpdateFrame, updatee);
closure_field(StgCatchFrame, handler);
diff --git a/ghc/rts/Capability.c b/ghc/rts/Capability.c
index e839a6c4da..8a93dc97cc 100644
--- a/ghc/rts/Capability.c
+++ b/ghc/rts/Capability.c
@@ -28,10 +28,10 @@
Capability MainCapability; /* for non-SMP, we have one global capability */
#endif
-#if defined(RTS_SUPPORTS_THREADS)
-
nat rts_n_free_capabilities;
+#if defined(RTS_SUPPORTS_THREADS)
+
/* returning_worker_cond: when a worker thread returns from executing an
* external call, it needs to wait for an RTS Capability before passing
* on the result of the call to the Haskell thread that made it.
@@ -76,6 +76,13 @@ static Condition *passTarget = NULL;
static rtsBool passingCapability = rtsFalse;
#endif
+#if defined(SMP)
+/*
+ * Free capability list.
+ */
+Capability *free_capabilities;
+#endif
+
#ifdef SMP
#define UNUSED_IF_NOT_SMP
#else
@@ -83,9 +90,9 @@ static rtsBool passingCapability = rtsFalse;
#endif
#if defined(RTS_USER_SIGNALS)
-#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || signals_pending())
+#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || blackholes_need_checking || signals_pending())
#else
-#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted)
+#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || blackholes_need_checking)
#endif
/* ----------------------------------------------------------------------------
@@ -99,9 +106,34 @@ initCapability( Capability *cap )
cap->f.stgGCFun = (F_)__stg_gc_fun;
}
+/* -----------------------------------------------------------------------------
+ * Function: initCapabilities_(nat)
+ *
+ * Purpose: upon startup, allocate and fill in table
+ * holding 'n' Capabilities. Only for SMP, since
+ * it is the only build that supports multiple
+ * capabilities within the RTS.
+ * -------------------------------------------------------------------------- */
#if defined(SMP)
-static void initCapabilities_(nat n);
-#endif
+static void
+initCapabilities_(nat n)
+{
+ nat i;
+ Capability *cap, *prev;
+ cap = NULL;
+ prev = NULL;
+ for (i = 0; i < n; i++) {
+ cap = stgMallocBytes(sizeof(Capability), "initCapabilities");
+ initCapability(cap);
+ cap->link = prev;
+ prev = cap;
+ }
+ free_capabilities = cap;
+ rts_n_free_capabilities = n;
+ IF_DEBUG(scheduler,
+ sched_belch("allocated %d capabilities", rts_n_free_capabilities));
+}
+#endif /* SMP */
/* ---------------------------------------------------------------------------
* Function: initCapabilities()
@@ -123,19 +155,11 @@ initCapabilities( void )
#if defined(RTS_SUPPORTS_THREADS)
initCondition(&returning_worker_cond);
initCondition(&thread_ready_cond);
- rts_n_free_capabilities = 1;
#endif
- return;
+ rts_n_free_capabilities = 1;
}
-#if defined(SMP)
-/* Free capability list. */
-static Capability *free_capabilities; /* Available capabilities for running threads */
-static Capability *returning_capabilities;
- /* Capabilities being passed to returning worker threads */
-#endif
-
/* ----------------------------------------------------------------------------
grabCapability( Capability** )
@@ -149,17 +173,18 @@ static
void
grabCapability( Capability** cap )
{
-#if !defined(SMP)
-#if defined(RTS_SUPPORTS_THREADS)
+#if defined(SMP)
+ ASSERT(rts_n_free_capabilities > 0);
+ *cap = free_capabilities;
+ free_capabilities = (*cap)->link;
+ rts_n_free_capabilities--;
+#else
+# if defined(RTS_SUPPORTS_THREADS)
ASSERT(rts_n_free_capabilities == 1);
rts_n_free_capabilities = 0;
-#endif
+# endif
*cap = &MainCapability;
handleSignalsInThisThread();
-#else
- *cap = free_capabilities;
- free_capabilities = (*cap)->link;
- rts_n_free_capabilities--;
#endif
#if defined(RTS_SUPPORTS_THREADS)
IF_DEBUG(scheduler, sched_belch("worker: got capability"));
@@ -179,7 +204,7 @@ releaseCapability( Capability* cap UNUSED_IF_NOT_SMP )
{
// Precondition: sched_mutex is held.
#if defined(RTS_SUPPORTS_THREADS)
-#ifndef SMP
+#if !defined(SMP)
ASSERT(rts_n_free_capabilities == 0);
#endif
// Check to see whether a worker thread can be given
@@ -191,8 +216,8 @@ releaseCapability( Capability* cap UNUSED_IF_NOT_SMP )
#if defined(SMP)
// SMP variant untested
- cap->link = returning_capabilities;
- returning_capabilities = cap;
+ cap->link = free_capabilities;
+ free_capabilities = cap;
#endif
rts_n_waiting_workers--;
@@ -272,13 +297,14 @@ waitForReturnCapability( Mutex* pMutex, Capability** pCap )
context_switch = 1; // make sure it's our turn soon
waitCondition(&returning_worker_cond, pMutex);
#if defined(SMP)
- *pCap = returning_capabilities;
- returning_capabilities = (*pCap)->link;
+ *pCap = free_capabilities;
+ free_capabilities = (*pCap)->link;
+ ASSERT(pCap != NULL);
#else
*pCap = &MainCapability;
ASSERT(rts_n_free_capabilities == 0);
- handleSignalsInThisThread();
#endif
+ handleSignalsInThisThread();
} else {
grabCapability(pCap);
}
@@ -313,7 +339,7 @@ yieldCapability( Capability** pCap )
*pCap = NULL;
}
- // Post-condition: pMutex is assumed held, and either:
+ // Post-condition: either:
//
// 1. *pCap is NULL, in which case the current thread does not
// hold a capability now, or
@@ -418,36 +444,3 @@ threadRunnable ( void )
startSchedulerTaskIfNecessary();
#endif
}
-
-/* ------------------------------------------------------------------------- */
-
-#if defined(SMP)
-/*
- * Function: initCapabilities_(nat)
- *
- * Purpose: upon startup, allocate and fill in table
- * holding 'n' Capabilities. Only for SMP, since
- * it is the only build that supports multiple
- * capabilities within the RTS.
- */
-static void
-initCapabilities_(nat n)
-{
- nat i;
- Capability *cap, *prev;
- cap = NULL;
- prev = NULL;
- for (i = 0; i < n; i++) {
- cap = stgMallocBytes(sizeof(Capability), "initCapabilities");
- initCapability(cap);
- cap->link = prev;
- prev = cap;
- }
- free_capabilities = cap;
- rts_n_free_capabilities = n;
- returning_capabilities = NULL;
- IF_DEBUG(scheduler,
- sched_belch("allocated %d capabilities", n_free_capabilities));
-}
-#endif /* SMP */
-
diff --git a/ghc/rts/Capability.h b/ghc/rts/Capability.h
index b82ec09e1c..c575335b0e 100644
--- a/ghc/rts/Capability.h
+++ b/ghc/rts/Capability.h
@@ -80,6 +80,9 @@ extern void passCapability(Condition *pTargetThreadCond);
extern void passCapabilityToWorker( void );
extern nat rts_n_free_capabilities;
+
+extern Capability *free_capabilities;
+
/* number of worker threads waiting for a return capability
*/
extern nat rts_n_waiting_workers;
@@ -101,7 +104,11 @@ static inline rtsBool noCapabilities (void)
static inline rtsBool allFreeCapabilities (void)
{
+#if defined(SMP)
+ return (rts_n_free_capabilities == RTS_DEREF(RtsFlags).ParFlags.nNodes);
+#else
return (rts_n_free_capabilities == 1);
+#endif
}
#else // !RTS_SUPPORTS_THREADS
diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c
index ea88b21c66..64cfacdb4a 100644
--- a/ghc/rts/GC.c
+++ b/ghc/rts/GC.c
@@ -11,6 +11,7 @@
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Apply.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "LdvProfile.h"
#include "Updates.h"
@@ -1624,7 +1625,9 @@ evacuate_large(StgPtr p)
REGPARM1 static StgClosure *
evacuate(StgClosure *q)
{
+#if defined(PAR)
StgClosure *to;
+#endif
bdescr *bd = NULL;
step *stp;
const StgInfoTable *info;
@@ -1755,10 +1758,6 @@ loop:
case BLACKHOLE:
return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
- case BLACKHOLE_BQ:
- to = copy(q,BLACKHOLE_sizeW(),stp);
- return to;
-
case THUNK_SELECTOR:
{
StgClosure *p;
@@ -1919,7 +1918,7 @@ loop:
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
//StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
to = copy(q,BLACKHOLE_sizeW(),stp);
@@ -2167,7 +2166,6 @@ selector_loop:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- case BLACKHOLE_BQ:
#if defined(PAR)
case RBH:
case BLOCKED_FETCH:
@@ -2614,16 +2612,6 @@ scavenge(step *stp)
p += BLACKHOLE_sizeW();
break;
- case BLACKHOLE_BQ:
- {
- StgBlockingQueue *bh = (StgBlockingQueue *)p;
- bh->blocking_queue =
- (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- failed_to_evac = rtsTrue;
- p += BLACKHOLE_sizeW();
- break;
- }
-
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
@@ -2697,7 +2685,7 @@ scavenge(step *stp)
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
@@ -2740,7 +2728,7 @@ scavenge(step *stp)
p += sizeofW(StgFetchMe);
break; // nothing to do in this case
- case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
@@ -2969,15 +2957,6 @@ linear_scan:
case ARR_WORDS:
break;
- case BLACKHOLE_BQ:
- {
- StgBlockingQueue *bh = (StgBlockingQueue *)p;
- bh->blocking_queue =
- (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- failed_to_evac = rtsTrue;
- break;
- }
-
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
@@ -3039,7 +3018,7 @@ linear_scan:
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
@@ -3078,7 +3057,7 @@ linear_scan:
case FETCH_ME:
break; // nothing to do in this case
- case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
@@ -3271,16 +3250,6 @@ scavenge_one(StgPtr p)
case BLACKHOLE:
break;
- case BLACKHOLE_BQ:
- {
- StgBlockingQueue *bh = (StgBlockingQueue *)p;
- evac_gen = 0; // repeatedly mutable
- bh->blocking_queue =
- (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- failed_to_evac = rtsTrue;
- break;
- }
-
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
@@ -3347,7 +3316,7 @@ scavenge_one(StgPtr p)
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
@@ -3387,7 +3356,7 @@ scavenge_one(StgPtr p)
case FETCH_ME:
break; // nothing to do in this case
- case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
@@ -3941,7 +3910,7 @@ threadLazyBlackHole(StgTSO *tso)
{
StgClosure *frame;
StgRetInfoTable *info;
- StgBlockingQueue *bh;
+ StgClosure *bh;
StgPtr stack_end;
stack_end = &tso->stack[tso->stack_size];
@@ -3954,7 +3923,7 @@ threadLazyBlackHole(StgTSO *tso)
switch (info->i.type) {
case UPDATE_FRAME:
- bh = (StgBlockingQueue *)((StgUpdateFrame *)frame)->updatee;
+ bh = ((StgUpdateFrame *)frame)->updatee;
/* if the thunk is already blackholed, it means we've also
* already blackholed the rest of the thunks on this stack,
@@ -3967,8 +3936,7 @@ threadLazyBlackHole(StgTSO *tso)
return;
}
- if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
- bh->header.info != &stg_CAF_BLACKHOLE_info) {
+ if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
@@ -4072,7 +4040,6 @@ threadSqueezeStack(StgTSO *tso)
* screw us up if we don't check.
*/
if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
- // this wakes the threads up
UPD_IND_NOLOCK(upd->updatee, updatee);
}
@@ -4090,11 +4057,10 @@ threadSqueezeStack(StgTSO *tso)
// single update frame, or the topmost update frame in a series
else {
- StgBlockingQueue *bh = (StgBlockingQueue *)upd->updatee;
+ StgClosure *bh = upd->updatee;
// Do lazy black-holing
if (bh->header.info != &stg_BLACKHOLE_info &&
- bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
diff --git a/ghc/rts/GCCompact.c b/ghc/rts/GCCompact.c
index c6a8f6e73b..f126cfee2c 100644
--- a/ghc/rts/GCCompact.c
+++ b/ghc/rts/GCCompact.c
@@ -10,6 +10,7 @@
#include "Rts.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "BlockAlloc.h"
#include "MBlock.h"
@@ -548,7 +549,6 @@ thread_obj (StgInfoTable *info, StgPtr p)
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- case BLACKHOLE_BQ:
{
StgPtr end;
diff --git a/ghc/rts/HeapStackCheck.cmm b/ghc/rts/HeapStackCheck.cmm
index 409e74466a..e9236f19dc 100644
--- a/ghc/rts/HeapStackCheck.cmm
+++ b/ghc/rts/HeapStackCheck.cmm
@@ -109,18 +109,6 @@ __stg_gc_enter_1
GC_GENERIC
}
-#ifdef SMP
-stg_gc_enter_1_hponly
-{
- Sp_adj(-1);
- Sp(0) = R1;
- R1 = HeapOverflow;
- SAVE_THREAD_STATE();
- TSO_what_next(CurrentTSO) = ThreadRunGHC::I16;
- jump StgReturn;
-}
-#endif
-
#if defined(GRAN)
/*
ToDo: merge the block and yield macros, calling something like BLOCK(N)
diff --git a/ghc/rts/LdvProfile.c b/ghc/rts/LdvProfile.c
index e46f4d7c66..d945008b59 100644
--- a/ghc/rts/LdvProfile.c
+++ b/ghc/rts/LdvProfile.c
@@ -180,7 +180,6 @@ processHeapClosureForDead( StgClosure *c )
case FUN_1_1:
case FUN_0_2:
- case BLACKHOLE_BQ:
case BLACKHOLE:
case SE_BLACKHOLE:
case CAF_BLACKHOLE:
diff --git a/ghc/rts/Linker.c b/ghc/rts/Linker.c
index 11c17836a4..2e37d7178e 100644
--- a/ghc/rts/Linker.c
+++ b/ghc/rts/Linker.c
@@ -528,7 +528,6 @@ typedef struct _RtsSymbolVal {
SymX(stable_ptr_table) \
SymX(stackOverflow) \
SymX(stg_CAF_BLACKHOLE_info) \
- SymX(stg_BLACKHOLE_BQ_info) \
SymX(awakenBlockedQueue) \
SymX(stg_CHARLIKE_closure) \
SymX(stg_EMPTY_MVAR_info) \
diff --git a/ghc/rts/Printer.c b/ghc/rts/Printer.c
index d5c9386174..1bbdb35cb5 100644
--- a/ghc/rts/Printer.c
+++ b/ghc/rts/Printer.c
@@ -290,21 +290,13 @@ printClosure( StgClosure *obj )
}
case CAF_BLACKHOLE:
- debugBelch("CAF_BH(");
- printPtr((StgPtr)stgCast(StgBlockingQueue*,obj)->blocking_queue);
- debugBelch(")\n");
+ debugBelch("CAF_BH");
break;
case BLACKHOLE:
debugBelch("BH\n");
break;
- case BLACKHOLE_BQ:
- debugBelch("BQ(");
- printPtr((StgPtr)stgCast(StgBlockingQueue*,obj)->blocking_queue);
- debugBelch(")\n");
- break;
-
case SE_BLACKHOLE:
debugBelch("SE_BH\n");
break;
diff --git a/ghc/rts/ProfHeap.c b/ghc/rts/ProfHeap.c
index c47554521a..482895f98b 100644
--- a/ghc/rts/ProfHeap.c
+++ b/ghc/rts/ProfHeap.c
@@ -150,7 +150,6 @@ static char *type_names[] = {
, "STOP_FRAME"
, "BLACKHOLE"
- , "BLACKHOLE_BQ"
, "MVAR"
, "ARR_WORDS"
@@ -878,7 +877,6 @@ heapCensusChain( Census *census, bdescr *bd )
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- case BLACKHOLE_BQ:
case CONSTR_INTLIKE:
case CONSTR_CHARLIKE:
case FUN_1_0:
diff --git a/ghc/rts/RetainerProfile.c b/ghc/rts/RetainerProfile.c
index 10bed44931..dfa77b0980 100644
--- a/ghc/rts/RetainerProfile.c
+++ b/ghc/rts/RetainerProfile.c
@@ -466,11 +466,6 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
case MUT_VAR:
*first_child = ((StgMutVar *)c)->var;
return;
- case BLACKHOLE_BQ:
- // blocking_queue must be TSO and the head of a linked list of TSOs.
- // Shoule it be a child? Seems to be yes.
- *first_child = (StgClosure *)((StgBlockingQueue *)c)->blocking_queue;
- return;
case THUNK_SELECTOR:
*first_child = ((StgSelector *)c)->selectee;
return;
@@ -894,7 +889,6 @@ pop( StgClosure **c, StgClosure **cp, retainer *r )
case ARR_WORDS:
// one child (fixed), no SRT
case MUT_VAR:
- case BLACKHOLE_BQ:
case THUNK_SELECTOR:
case IND_PERM:
case IND_OLDGEN_PERM:
@@ -1042,7 +1036,6 @@ isRetainer( StgClosure *c )
case BLACKHOLE:
case SE_BLACKHOLE:
case SE_CAF_BLACKHOLE:
- case BLACKHOLE_BQ:
// indirection
case IND_PERM:
case IND_OLDGEN_PERM:
@@ -2102,7 +2095,6 @@ sanityCheckHeapClosure( StgClosure *c )
case BLACKHOLE:
case SE_BLACKHOLE:
case SE_CAF_BLACKHOLE:
- case BLACKHOLE_BQ:
case IND_PERM:
case IND_OLDGEN:
case IND_OLDGEN_PERM:
diff --git a/ghc/rts/RtsAPI.c b/ghc/rts/RtsAPI.c
index 4ca1225beb..d196c919d6 100644
--- a/ghc/rts/RtsAPI.c
+++ b/ghc/rts/RtsAPI.c
@@ -8,13 +8,13 @@
#include "PosixSource.h"
#include "Rts.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "RtsAPI.h"
#include "SchedAPI.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Prelude.h"
-#include "OSThreads.h"
#include "Schedule.h"
#include "Capability.h"
@@ -501,6 +501,8 @@ rts_lock()
// b) wake the current worker thread from awaitEvent()
// (so that a thread started by rts_eval* will start immediately)
waitForReturnCapability(&sched_mutex,&rtsApiCapability);
+#else
+ grabCapability(&rtsApiCapability);
#endif
}
diff --git a/ghc/rts/RtsStartup.c b/ghc/rts/RtsStartup.c
index 05d403f9fb..98e1459573 100644
--- a/ghc/rts/RtsStartup.c
+++ b/ghc/rts/RtsStartup.c
@@ -11,6 +11,7 @@
#include "RtsAPI.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
+#include "OSThreads.h"
#include "Storage.h" /* initStorage, exitStorage */
#include "Schedule.h" /* initScheduler */
#include "Stats.h" /* initStats */
diff --git a/ghc/rts/Sanity.c b/ghc/rts/Sanity.c
index 7bc2f833d7..410df7450d 100644
--- a/ghc/rts/Sanity.c
+++ b/ghc/rts/Sanity.c
@@ -249,9 +249,6 @@ checkClosure( StgClosure* p )
return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
}
- case BLACKHOLE_BQ:
- checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
- /* fall through to basic ptr check */
case FUN:
case FUN_1_0:
case FUN_0_1:
@@ -395,6 +392,7 @@ checkClosure( StgClosure* p )
case MUT_ARR_PTRS:
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
{
StgMutArrPtrs* a = (StgMutArrPtrs *)p;
nat i;
@@ -644,8 +642,7 @@ checkTSO(StgTSO *tso)
break;
case BlockedOnBlackHole:
checkClosureShallow(tso->block_info.closure);
- ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
- get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
+ ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
get_itbl(tso->block_info.closure)->type==RBH);
break;
case BlockedOnRead:
@@ -830,8 +827,7 @@ checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
rtsBool end = rtsFalse;
StgInfoTable *info = get_itbl(closure);
- ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
- || info->type == FETCH_ME_BQ || info->type == RBH);
+ ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
do {
switch (get_itbl(bqe)->type) {
@@ -860,7 +856,7 @@ checkBQ (StgTSO *bqe, StgClosure *closure)
rtsBool end = rtsFalse;
StgInfoTable *info = get_itbl(closure);
- ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
+ ASSERT(info->type == MVAR);
do {
switch (get_itbl(bqe)->type) {
@@ -884,7 +880,7 @@ checkBQ (StgTSO *bqe, StgClosure *closure)
rtsBool end = rtsFalse;
StgInfoTable *info = get_itbl(closure);
- ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
+ ASSERT(info->type == MVAR);
do {
switch (get_itbl(bqe)->type) {
diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c
index 5b0cd03639..6e363a6765 100644
--- a/ghc/rts/Schedule.c
+++ b/ghc/rts/Schedule.c
@@ -145,10 +145,16 @@ StgTSO *run_queue_hd = NULL;
StgTSO *run_queue_tl = NULL;
StgTSO *blocked_queue_hd = NULL;
StgTSO *blocked_queue_tl = NULL;
+StgTSO *blackhole_queue = NULL;
StgTSO *sleeping_queue = NULL; /* perhaps replace with a hash table? */
#endif
+/* The blackhole_queue should be checked for threads to wake up. See
+ * Schedule.h for more thorough comment.
+ */
+rtsBool blackholes_need_checking = rtsFalse;
+
/* Linked list of all threads.
* Used for detecting garbage collected threads.
*/
@@ -270,6 +276,7 @@ static void schedulePreLoop(void);
static void scheduleHandleInterrupt(void);
static void scheduleStartSignalHandlers(void);
static void scheduleCheckBlockedThreads(void);
+static void scheduleCheckBlackHoles(void);
static void scheduleDetectDeadlock(void);
#if defined(GRAN)
static StgTSO *scheduleProcessEvent(rtsEvent *event);
@@ -293,6 +300,7 @@ static void scheduleDoHeapProfile(void);
static void scheduleDoGC(void);
static void unblockThread(StgTSO *tso);
+static rtsBool checkBlackHoles(void);
static SchedulerStatus waitThread_(/*out*/StgMainThread* m,
Capability *initialCapability
);
@@ -526,6 +534,12 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
scheduleStartSignalHandlers();
+ // Only check the black holes here if we've nothing else to do.
+ // During normal execution, the black hole list only gets checked
+ // at GC time, to avoid repeatedly traversing this possibly long
+ // list each time around the scheduler.
+ if (EMPTY_RUN_QUEUE()) { scheduleCheckBlackHoles(); }
+
scheduleCheckBlockedThreads();
scheduleDetectDeadlock();
@@ -652,9 +666,9 @@ run_thread:
startHeapProfTimer();
#endif
- /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
- /* Run the current thread
- */
+ // ----------------------------------------------------------------------
+ // Run the current thread
+
prev_what_next = t->what_next;
errno = t->saved_errno;
@@ -680,6 +694,12 @@ run_thread:
barf("schedule: invalid what_next field");
}
+ // We have run some Haskell code: there might be blackhole-blocked
+ // threads to wake up now.
+ if ( blackhole_queue != END_TSO_QUEUE ) {
+ blackholes_need_checking = rtsTrue;
+ }
+
in_haskell = rtsFalse;
// The TSO might have moved, eg. if it re-entered the RTS and a GC
@@ -689,7 +709,7 @@ run_thread:
// And save the current errno in this thread.
t->saved_errno = errno;
- /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+ // ----------------------------------------------------------------------
/* Costs for the scheduler are assigned to CCS_SYSTEM */
#if defined(PROFILING)
@@ -834,7 +854,22 @@ scheduleCheckBlockedThreads(void)
// We shouldn't be here...
barf("schedule: awaitEvent() in threaded RTS");
#endif
- awaitEvent( EMPTY_RUN_QUEUE() );
+ awaitEvent( EMPTY_RUN_QUEUE() && !blackholes_need_checking );
+ }
+}
+
+
+/* ----------------------------------------------------------------------------
+ * Check for threads blocked on BLACKHOLEs that can be woken up
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+static void
+scheduleCheckBlackHoles( void )
+{
+ if ( blackholes_need_checking )
+ {
+ checkBlackHoles();
+ blackholes_need_checking = rtsFalse;
}
}
@@ -848,18 +883,13 @@ scheduleDetectDeadlock(void)
{
/*
* Detect deadlock: when we have no threads to run, there are no
- * threads waiting on I/O or sleeping, and all the other tasks are
- * waiting for work, we must have a deadlock of some description.
- *
- * We first try to find threads blocked on themselves (ie. black
- * holes), and generate NonTermination exceptions where necessary.
- *
- * If no threads are black holed, we have a deadlock situation, so
- * inform all the main threads.
+ * threads blocked, waiting for I/O, or sleeping, and all the
+ * other tasks are waiting for work, we must have a deadlock of
+ * some description.
*/
-#if !defined(PARALLEL_HASKELL) && !defined(RTS_SUPPORTS_THREADS)
if ( EMPTY_THREAD_QUEUES() )
{
+#if !defined(PARALLEL_HASKELL) && !defined(RTS_SUPPORTS_THREADS)
IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
// Garbage collection can release some new threads due to
@@ -910,13 +940,13 @@ scheduleDetectDeadlock(void)
barf("deadlock: main thread blocked in a strange way");
}
}
- }
#elif defined(RTS_SUPPORTS_THREADS)
// ToDo: add deadlock detection in threaded RTS
#elif defined(PARALLEL_HASKELL)
// ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
#endif
+ }
}
/* ----------------------------------------------------------------------------
@@ -1883,6 +1913,9 @@ scheduleDoGC(void)
}
}
+ // so this happens periodically:
+ scheduleCheckBlackHoles();
+
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
* to do it in another thread. Either way, we need to
@@ -2036,6 +2069,7 @@ deleteAllThreads ( void )
// being GC'd, and we don't want the "main thread has been GC'd" panic.
ASSERT(blocked_queue_hd == END_TSO_QUEUE);
+ ASSERT(blackhole_queue == END_TSO_QUEUE);
ASSERT(sleeping_queue == END_TSO_QUEUE);
}
@@ -2547,6 +2581,7 @@ initScheduler(void)
blocked_queue_hds[i] = END_TSO_QUEUE;
blocked_queue_tls[i] = END_TSO_QUEUE;
ccalling_threadss[i] = END_TSO_QUEUE;
+ blackhole_queue[i] = END_TSO_QUEUE;
sleeping_queue = END_TSO_QUEUE;
}
#else
@@ -2554,6 +2589,7 @@ initScheduler(void)
run_queue_tl = END_TSO_QUEUE;
blocked_queue_hd = END_TSO_QUEUE;
blocked_queue_tl = END_TSO_QUEUE;
+ blackhole_queue = END_TSO_QUEUE;
sleeping_queue = END_TSO_QUEUE;
#endif
@@ -2709,6 +2745,10 @@ GetRoots( evac_fn evac )
}
#endif
+ if (blackhole_queue != END_TSO_QUEUE) {
+ evac((StgClosure **)&blackhole_queue);
+ }
+
if (suspended_ccalling_threads != END_TSO_QUEUE) {
evac((StgClosure **)&suspended_ccalling_threads);
}
@@ -3365,12 +3405,9 @@ unblockThread(StgTSO *tso)
}
case BlockedOnBlackHole:
- ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
{
- StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
-
- last = &bq->blocking_queue;
- for (t = bq->blocking_queue; t != END_TSO_QUEUE;
+ last = &blackhole_queue;
+ for (t = blackhole_queue; t != END_TSO_QUEUE;
last = &t->link, t = t->link) {
if (t == tso) {
*last = tso->link;
@@ -3462,6 +3499,49 @@ unblockThread(StgTSO *tso)
#endif
/* -----------------------------------------------------------------------------
+ * checkBlackHoles()
+ *
+ * Check the blackhole_queue for threads that can be woken up. We do
+ * this periodically: before every GC, and whenever the run queue is
+ * empty.
+ *
+ * An elegant solution might be to just wake up all the blocked
+ * threads with awakenBlockedQueue occasionally: they'll go back to
+ * sleep again if the object is still a BLACKHOLE. Unfortunately this
+ * doesn't give us a way to tell whether we've actually managed to
+ * wake up any threads, so we would be busy-waiting.
+ *
+ * -------------------------------------------------------------------------- */
+
+static rtsBool
+checkBlackHoles( void )
+{
+ StgTSO **prev, *t;
+ rtsBool any_woke_up = rtsFalse;
+ StgHalfWord type;
+
+ IF_DEBUG(scheduler, sched_belch("checking threads blocked on black holes"));
+
+ // ASSUMES: sched_mutex
+ prev = &blackhole_queue;
+ t = blackhole_queue;
+ while (t != END_TSO_QUEUE) {
+ ASSERT(t->why_blocked == BlockedOnBlackHole);
+ type = get_itbl(t->block_info.closure)->type;
+ if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
+ t = unblockOneLocked(t);
+ *prev = t;
+ any_woke_up = rtsTrue;
+ } else {
+ prev = &t->link;
+ t = t->link;
+ }
+ }
+
+ return any_woke_up;
+}
+
+/* -----------------------------------------------------------------------------
* raiseAsync()
*
* The following function implements the magic for raising an
@@ -4163,25 +4243,6 @@ print_bq (StgClosure *node)
} /* for */
debugBelch("\n");
}
-#else
-/*
- Nice and easy: only TSOs on the blocking queue
-*/
-void
-print_bq (StgClosure *node)
-{
- StgTSO *tso;
-
- ASSERT(node!=(StgClosure*)NULL); // sanity check
- for (tso = ((StgBlockingQueue*)node)->blocking_queue;
- tso != END_TSO_QUEUE;
- tso=tso->link) {
- ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
- ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
- debugBelch(" TSO %d (%p),", tso->id, tso);
- }
- debugBelch("\n");
-}
# endif
#if defined(PARALLEL_HASKELL)
diff --git a/ghc/rts/Schedule.h b/ghc/rts/Schedule.h
index bd744f0355..d8c1643a20 100644
--- a/ghc/rts/Schedule.h
+++ b/ghc/rts/Schedule.h
@@ -146,11 +146,20 @@ extern lnat RTS_VAR(timestamp);
#else
extern StgTSO *RTS_VAR(run_queue_hd), *RTS_VAR(run_queue_tl);
extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
+extern StgTSO *RTS_VAR(blackhole_queue);
extern StgTSO *RTS_VAR(sleeping_queue);
#endif
/* Linked list of all threads. */
extern StgTSO *RTS_VAR(all_threads);
+/* Set to rtsTrue if there are threads on the blackhole_queue, and
+ * it is possible that one or more of them may be available to run.
+ * This flag is set to rtsFalse after we've checked the queue, and
+ * set to rtsTrue just before we run some Haskell code. It is used
+ * to decide whether we should yield the Capability or not.
+ */
+extern rtsBool blackholes_need_checking;
+
#if defined(RTS_SUPPORTS_THREADS)
/* Schedule.c has detailed info on what these do */
extern Mutex RTS_VAR(sched_mutex);
@@ -198,11 +207,7 @@ typedef struct StgMainThread_ {
SchedulerStatus stat;
StgClosure ** ret;
#if defined(RTS_SUPPORTS_THREADS)
-#if defined(THREADED_RTS)
Condition bound_thread_cond;
-#else
- Condition wakeup;
-#endif
#endif
struct StgMainThread_ *prev;
struct StgMainThread_ *link;
diff --git a/ghc/rts/Sparks.c b/ghc/rts/Sparks.c
index 07b3b6e23f..6e638d4106 100644
--- a/ghc/rts/Sparks.c
+++ b/ghc/rts/Sparks.c
@@ -42,7 +42,7 @@
static void slide_spark_pool( StgSparkPool *pool );
-rtsBool
+void
initSparkPools( void )
{
Capability *cap;
@@ -65,7 +65,6 @@ initSparkPools( void )
pool->hd = pool->base;
pool->tl = pool->base;
}
- return rtsTrue; /* Qapla' */
}
/*
diff --git a/ghc/rts/Sparks.h b/ghc/rts/Sparks.h
index f9cce177a3..44a00f1524 100644
--- a/ghc/rts/Sparks.h
+++ b/ghc/rts/Sparks.h
@@ -25,7 +25,7 @@ void markSparkQueue(void);
#elif defined(PAR) || defined(SMP)
StgClosure *findSpark( rtsBool );
-rtsBool initSparkPools( void );
+void initSparkPools( void );
void markSparkQueue( void );
#if defined(PAR)
StgTSO *activateSpark (rtsSpark spark) ;
diff --git a/ghc/rts/Stable.c b/ghc/rts/Stable.c
index eadfa684a9..30d17c04fb 100644
--- a/ghc/rts/Stable.c
+++ b/ghc/rts/Stable.c
@@ -13,6 +13,7 @@
#include "Rts.h"
#include "Hash.h"
#include "RtsUtils.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "RtsAPI.h"
#include "RtsFlags.h"
diff --git a/ghc/rts/Stats.c b/ghc/rts/Stats.c
index 42c49db565..7c14590bd1 100644
--- a/ghc/rts/Stats.c
+++ b/ghc/rts/Stats.c
@@ -19,6 +19,7 @@
#include "ParTicky.h" /* ToDo: move into Rts.h */
#include "Profiling.h"
#include "Storage.h"
+#include "Task.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
@@ -477,15 +478,15 @@ stat_endGC(lnat alloc, lnat collect, lnat live, lnat copied, lnat gen)
GC_tot_time += gc_time;
GCe_tot_time += gc_etime;
-#ifdef SMP
+#if defined(SMP)
{
nat i;
pthread_t me = pthread_self();
for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
- if (me == task_ids[i].id) {
- task_ids[i].gc_time += gc_time;
- task_ids[i].gc_etime += gc_etime;
+ if (me == taskTable[i].id) {
+ taskTable[i].gc_time += gc_time;
+ taskTable[i].gc_etime += gc_etime;
break;
}
}
@@ -578,7 +579,7 @@ stat_endHeapCensus(void)
stat_workerStop
Called under SMP when a worker thread finishes. We drop the timing
- stats for this thread into the task_ids struct for that thread.
+ stats for this thread into the taskTable struct for that thread.
-------------------------------------------------------------------------- */
#if defined(SMP)
@@ -591,13 +592,13 @@ stat_workerStop(void)
getTimes();
for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
- if (task_ids[i].id == me) {
- task_ids[i].mut_time = CurrentUserTime - task_ids[i].gc_time;
- task_ids[i].mut_etime = CurrentElapsedTime
+ if (taskTable[i].id == me) {
+ taskTable[i].mut_time = CurrentUserTime - taskTable[i].gc_time;
+ taskTable[i].mut_etime = CurrentElapsedTime
- GCe_tot_time
- - task_ids[i].elapsedtimestart;
- if (task_ids[i].mut_time < 0.0) { task_ids[i].mut_time = 0.0; }
- if (task_ids[i].mut_etime < 0.0) { task_ids[i].mut_etime = 0.0; }
+ - taskTable[i].elapsedtimestart;
+ if (taskTable[i].mut_time < 0.0) { taskTable[i].mut_time = 0.0; }
+ if (taskTable[i].mut_etime < 0.0) { taskTable[i].mut_etime = 0.0; }
}
}
}
@@ -650,7 +651,7 @@ stat_exit(int alloc)
{ nat i;
MutUserTime = 0.0;
for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
- MutUserTime += task_ids[i].mut_time;
+ MutUserTime += taskTable[i].mut_time;
}
}
time = MutUserTime + GC_tot_time + InitUserTime + ExitUserTime;
@@ -696,10 +697,10 @@ stat_exit(int alloc)
statsPrintf(" Task %2d: MUT time: %6.2fs (%6.2fs elapsed)\n"
" GC time: %6.2fs (%6.2fs elapsed)\n\n",
i,
- TICK_TO_DBL(task_ids[i].mut_time),
- TICK_TO_DBL(task_ids[i].mut_etime),
- TICK_TO_DBL(task_ids[i].gc_time),
- TICK_TO_DBL(task_ids[i].gc_etime));
+ TICK_TO_DBL(taskTable[i].mut_time),
+ TICK_TO_DBL(taskTable[i].mut_etime),
+ TICK_TO_DBL(taskTable[i].gc_time),
+ TICK_TO_DBL(taskTable[i].gc_etime));
}
}
#endif
diff --git a/ghc/rts/StgMiscClosures.cmm b/ghc/rts/StgMiscClosures.cmm
index 07a5ff21e8..4e2c0fbe46 100644
--- a/ghc/rts/StgMiscClosures.cmm
+++ b/ghc/rts/StgMiscClosures.cmm
@@ -325,11 +325,9 @@ INFO_TABLE(stg_IND_OLDGEN_PERM,1,0,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN
waiting for the evaluation of the closure to finish.
------------------------------------------------------------------------- */
-/* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
+/* Note: a BLACKHOLE must be big enough to be
* overwritten with an indirection/evacuee/catch. Thus we claim it
- * has 1 non-pointer word of payload (in addition to the pointer word
- * for the blocking queue in a BQ), which should be big enough for an
- * old-generation indirection.
+ * has 1 non-pointer word of payload.
*/
INFO_TABLE(stg_BLACKHOLE,0,1,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
{
@@ -343,73 +341,18 @@ INFO_TABLE(stg_BLACKHOLE,0,1,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
/* Actually this is not necessary because R1 is about to be destroyed. */
LDV_ENTER(R1);
- /* Put ourselves on the blocking queue for this black hole */
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
- StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+ /* Put ourselves on the blackhole queue */
+ StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
StgTSO_block_info(CurrentTSO) = R1;
- /* Change the BLACKHOLE into a BLACKHOLE_BQ */
-#ifdef PROFILING
- /* The size remains the same, so we call LDV_recordDead() -
- no need to fill slop. */
- foreign "C" LDV_recordDead(R1 "ptr", BYTES_TO_WDS(SIZEOF_StgBlockingQueue));
-#endif
- /*
- * Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
- */
- StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
-#ifdef PROFILING
- foreign "C" LDV_RECORD_CREATE(R1);
-#endif
-
- /* closure is mutable since something has just been added to its BQ */
- foreign "C" recordMutable(R1 "ptr");
-
- /* PAR: dumping of event now done in blockThread -- HWL */
-
/* stg_gen_block is too heavyweight, use a specialised one */
jump stg_block_1;
}
-INFO_TABLE(stg_BLACKHOLE_BQ,1,0,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ")
-{
-#if defined(GRAN)
- /* Before overwriting TSO_LINK */
- STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
-#endif
-
- TICK_ENT_BH();
- LDV_ENTER(R1);
-
- /* Put ourselves on the blocking queue for this black hole */
- StgTSO_link(CurrentTSO) = StgBlockingQueue_blocking_queue(R1);
- StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
-
- /* jot down why and on what closure we are blocked */
- StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
- StgTSO_block_info(CurrentTSO) = R1;
-
- /* PAR: dumping of event now done in blockThread -- HWL */
-
- /* stg_gen_block is too heavyweight, use a specialised one */
- jump stg_block_1;
-}
-
-/*
- Revertible black holes are needed in the parallel world, to handle
- negative acknowledgements of messages containing updatable closures.
- The idea is that when the original message is transmitted, the closure
- is turned into a revertible black hole...an object which acts like a
- black hole when local threads try to enter it, but which can be reverted
- back to the original closure if necessary.
-
- It's actually a lot like a blocking queue (BQ) entry, because revertible
- black holes are initially set up with an empty blocking queue.
-*/
-
#if defined(PAR) || defined(GRAN)
INFO_TABLE(stg_RBH,1,1,RBH,"RBH","RBH")
@@ -455,40 +398,24 @@ INFO_TABLE(stg_CAF_BLACKHOLE,0,1,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE")
TICK_ENT_BH();
LDV_ENTER(R1);
- /* Put ourselves on the blocking queue for this black hole */
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
- StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+ /* Put ourselves on the blackhole queue */
+ StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
StgTSO_block_info(CurrentTSO) = R1;
- /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
- StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
-
- /* closure is mutable since something has just been added to its BQ */
- foreign "C" recordMutable(R1 "ptr");
-
- /* PAR: dumping of event now done in blockThread -- HWL */
-
/* stg_gen_block is too heavyweight, use a specialised one */
jump stg_block_1;
}
#ifdef EAGER_BLACKHOLING
-INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,1,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE");
-IF_(stg_SE_BLACKHOLE_entry)
-{
- STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1);
- STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
-}
+INFO_TABLE(stg_SE_BLACKHOLE,0,1,SE_BLACKHOLE,"SE_BLACKHOLE","SE_BLACKHOLE")
+{ foreign "C" barf("SE_BLACKHOLE object entered!"); }
-INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,1,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
-IF_(stg_SE_CAF_BLACKHOLE_entry)
-{
- STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1);
- STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
-}
+INFO_TABLE(stg_SE_CAF_BLACKHOLE,0,1,SE_CAF_BLACKHOLE,"SE_CAF_BLACKHOLE","SE_CAF_BLACKHOLE")
+{ foreign "C" barf("SE_CAF_BLACKHOLE object entered!"); }
#endif
/* ----------------------------------------------------------------------------
diff --git a/ghc/rts/StgStartup.cmm b/ghc/rts/StgStartup.cmm
index d9308d6e52..2d5d4d8941 100644
--- a/ghc/rts/StgStartup.cmm
+++ b/ghc/rts/StgStartup.cmm
@@ -179,7 +179,7 @@ stg_init_finish
stg_init
{
W_ next;
- Sp = W_[MainCapability + OFFSET_Capability_r + OFFSET_StgRegTable_rSp];
+ Sp = W_[BaseReg + OFFSET_StgRegTable_rSp];
next = W_[Sp];
Sp_adj(1);
jump next;
diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c
index 45d94aea01..0e25b4200a 100644
--- a/ghc/rts/Storage.c
+++ b/ghc/rts/Storage.c
@@ -17,11 +17,10 @@
#include "Weak.h"
#include "Sanity.h"
#include "Arena.h"
-
+#include "OSThreads.h"
+#include "Capability.h"
#include "Storage.h"
#include "Schedule.h"
-#include "OSThreads.h"
-
#include "RetainerProfile.h" // for counting memory blocks (memInventory)
#include <stdlib.h>
@@ -61,6 +60,18 @@ static void *stgAllocForGMP (size_t size_in_bytes);
static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
static void stgDeallocForGMP (void *ptr, size_t size);
+/*
+ * Storage manager mutex
+ */
+#if defined(SMP)
+extern Mutex sm_mutex;
+#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
+#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
+#else
+#define ACQUIRE_SM_LOCK
+#define RELEASE_SM_LOCK
+#endif
+
void
initStorage( void )
{
@@ -335,19 +346,12 @@ allocNurseries( void )
{
#ifdef SMP
Capability *cap;
- bdescr *bd;
g0s0->blocks = NULL;
g0s0->n_blocks = 0;
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
cap->r.rCurrentNursery = cap->r.rNursery;
- /* Set the back links to be equal to the Capability,
- * so we can do slightly better informed locking.
- */
- for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
- bd->u.back = (bdescr *)cap;
- }
}
#else /* SMP */
g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
@@ -368,7 +372,7 @@ resetNurseries( void )
Capability *cap;
/* All tasks must be stopped */
- ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes);
+ ASSERT(rts_n_free_capabilities == RtsFlags.ParFlags.nNodes);
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
for (bd = cap->r.rNursery; bd; bd = bd->link) {
@@ -695,7 +699,7 @@ calcAllocated( void )
/* ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); */
allocated =
- n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
+ rts_n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
+ allocated_bytes();
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
diff --git a/ghc/rts/Task.c b/ghc/rts/Task.c
index ad05208d5e..42dc9c943e 100644
--- a/ghc/rts/Task.c
+++ b/ghc/rts/Task.c
@@ -29,6 +29,10 @@
#include "RtsFlags.h"
#include "Schedule.h"
+#if HAVE_SIGNAL_H
+#include <signal.h>
+#endif
+
/* There's not all that much code that is shared between the
* SMP and threads version of the 'task manager.' A sign
* that the code ought to be structured differently..(Maybe ToDo).
@@ -39,12 +43,13 @@
* accessed with the RTS lock in hand.
*/
#if defined(SMP)
-static TaskInfo* taskTable;
+TaskInfo* taskTable;
#endif
/* upper bound / the number of tasks created. */
static nat maxTasks;
/* number of tasks currently created */
static nat taskCount;
+static nat awaitDeath;
#if defined(SMP)
void
@@ -73,7 +78,7 @@ startTaskManager( nat maxCount, void (*taskStart)(void) )
}
}
-void
+rtsBool
startTask ( void (*taskStart)(void) )
{
int r;
@@ -92,11 +97,11 @@ startTask ( void (*taskStart)(void) )
taskTable[taskCount].elapsedtimestart = stat_getElapsedTime();
IF_DEBUG(scheduler,debugBelch("scheduler: Started task: %ld\n",tid););
- return;
+ return rtsTrue;
}
void
-stopTaskManager ()
+stopTaskManager (void)
{
nat i;
OSThreadId tid = osThreadId();
@@ -120,14 +125,14 @@ stopTaskManager ()
#endif
/* Send 'em all a SIGHUP. That should shut 'em up. */
- await_death = maxCount - 1;
- for (i = 0; i < maxCount; i++) {
+ awaitDeath = taskCount==0 ? 0 : taskCount-1;
+ for (i = 0; i < taskCount; i++) {
/* don't cancel the thread running this piece of code. */
if ( taskTable[i].id != tid ) {
pthread_kill(taskTable[i].id,SIGTERM);
}
}
- while (await_death > 0) {
+ while (awaitDeath > 0) {
sched_yield();
}
@@ -135,7 +140,7 @@ stopTaskManager ()
}
void
-resetTaskManagerAfterFork ()
+resetTaskManagerAfterFork (void)
{
barf("resetTaskManagerAfterFork not implemented for SMP");
}
@@ -180,7 +185,6 @@ startTask ( void (*taskStart)(void) )
return rtsFalse;
}
-
r = createOSThread(&tid,taskStart);
if (r != 0) {
barf("startTask: Can't create new task");
diff --git a/ghc/rts/Task.h b/ghc/rts/Task.h
index b5a4dd27b2..7dd29ad4df 100644
--- a/ghc/rts/Task.h
+++ b/ghc/rts/Task.h
@@ -24,7 +24,7 @@ typedef struct _TaskInfo {
double gc_etime;
} TaskInfo;
-extern TaskInfo *taskIds;
+extern TaskInfo *taskTable;
extern void startTaskManager ( nat maxTasks, void (*taskStart)(void) );
extern void stopTaskManager ( void );