summaryrefslogtreecommitdiff
path: root/ghc/rts/Sparks.c
diff options
context:
space:
mode:
authorhwloidl <unknown>2001-03-22 03:51:13 +0000
committerhwloidl <unknown>2001-03-22 03:51:13 +0000
commit20fc2f0ced64a12d8e44956931b2ac341ed2186f (patch)
treed11231f7dac6d1e918764c7894781175cd36bb5d /ghc/rts/Sparks.c
parent982fe3c72ef579a955271b772c14fd7a10a6144a (diff)
downloadhaskell-20fc2f0ced64a12d8e44956931b2ac341ed2186f.tar.gz
[project @ 2001-03-22 03:51:08 by hwloidl]
-*- outline -*- Time-stamp: <Thu Mar 22 2001 03:50:16 Stardate: [-30]6365.79 hwloidl> This commit covers changes in GHC to get GUM (way=mp) and GUM/GdH (way=md) working. It is a merge of my working version of GUM, based on GHC 4.06, with GHC 4.11. Almost all changes are in the RTS (see below). GUM is reasonably stable, we used the 4.06 version in large-ish programs for recent papers. Couple of things I want to change, but nothing urgent. GUM/GdH has just been merged and needs more testing. Hope to do that in the next weeks. It works in our working build but needs tweaking to run. GranSim doesn't work yet (*sigh*). Most of the code should be in, but needs more debugging. ToDo: I still want to make the following minor modifications before the release - Better wrapper skript for parallel execution [ghc/compiler/main] - Update parallel docu: started on it but it's minimal [ghc/docs/users_guide] - Clean up [nofib/parallel]: it's a real mess right now (*sigh*) - Update visualisation tools (minor things only IIRC) [ghc/utils/parallel] - Add a Klingon-English glossary * RTS: Almost all changes are restricted to ghc/rts/parallel and should not interfere with the rest. I only comment on changes outside the parallel dir: - Several changes in Schedule.c (scheduling loop; createThreads etc); should only affect parallel code - Added ghc/rts/hooks/ShutdownEachPEHook.c - ghc/rts/Linker.[ch]: GUM doesn't know about Stable Names (ifdefs)!! - StgMiscClosures.h: END_TSO_QUEUE etc now defined here (from StgMiscClosures.hc) END_ECAF_LIST was missing a leading stg_ - SchedAPI.h: taskStart now defined in here; it's only a wrapper around scheduleThread now, but might use some init, shutdown later - RtsAPI.h: I have nuked the def of rts_evalNothing * Compiler: - ghc/compiler/main/DriverState.hs added PVM-ish flags to the parallel way added new ways for parallel ticky profiling and distributed exec - ghc/compiler/main/DriverPipeline.hs added a fct run_phase_MoveBinary which is called with way=mp after linking; it moves the bin file into a PVM dir and produces a wrapper script for parallel execution maybe cleaner to add a MoveBinary phase in DriverPhases.hs but this way it's less intrusive and MoveBinary makes probably only sense for mp anyway * Nofib: - nofib/spectral/Makefile, nofib/real/Makefile, ghc/tests/programs/Makefile: modified to skip some tests if HWL_NOFIB_HACK is set; only tmp to record which test prgs cause problems in my working build right now
Diffstat (limited to 'ghc/rts/Sparks.c')
-rw-r--r--ghc/rts/Sparks.c82
1 files changed, 67 insertions, 15 deletions
diff --git a/ghc/rts/Sparks.c b/ghc/rts/Sparks.c
index 4a9bf005fd..9a37d6970b 100644
--- a/ghc/rts/Sparks.c
+++ b/ghc/rts/Sparks.c
@@ -1,5 +1,5 @@
/* ---------------------------------------------------------------------------
- * $Id: Sparks.c,v 1.2 2000/03/31 03:09:36 hwloidl Exp $
+ * $Id: Sparks.c,v 1.3 2001/03/22 03:51:10 hwloidl Exp $
*
* (c) The GHC Team, 2000
*
@@ -15,6 +15,7 @@
//* GUM code::
//* GranSim code::
//@end menu
+//*/
//@node Includes, GUM code, Spark Management Routines, Spark Management Routines
//@subsection Includes
@@ -25,8 +26,10 @@
#include "Storage.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
+#include "ParTicky.h"
# if defined(PAR)
# include "ParallelRts.h"
+# include "GranSimRts.h" // for GR_...
# elif defined(GRAN)
# include "GranSimRts.h"
# endif
@@ -39,7 +42,7 @@
static void slide_spark_pool( StgSparkPool *pool );
-void
+rtsBool
initSparkPools( void )
{
Capability *cap;
@@ -62,14 +65,21 @@ initSparkPools( void )
pool->hd = pool->base;
pool->tl = pool->base;
}
+ return rtsTrue; /* Qapla' */
}
+/*
+ We traverse the spark pool until we find the 2nd usable (i.e. non-NF)
+ spark. Rationale, we don't want to give away the only work a PE has.
+ ToDo: introduce low- and high-water-marks for load balancing.
+*/
StgClosure *
-findSpark( void )
+findSpark( rtsBool for_export )
{
Capability *cap;
StgSparkPool *pool;
- StgClosure *spark;
+ StgClosure *spark, *first=NULL;
+ rtsBool isIdlePE = EMPTY_RUN_QUEUE();
#ifdef SMP
/* walk over the capabilities, allocating a spark pool for each one */
@@ -82,14 +92,36 @@ findSpark( void )
pool = &(cap->rSparks);
while (pool->hd < pool->tl) {
spark = *pool->hd++;
- if (closure_SHOULD_SPARK(spark))
- return spark;
+ if (closure_SHOULD_SPARK(spark)) {
+ if (for_export && isIdlePE) {
+ if (first==NULL) {
+ first = spark; // keep the first usable spark if PE is idle
+ } else {
+ pool->hd--; // found a second spark; keep it in the pool
+ ASSERT(*pool->hd==spark);
+ if (RtsFlags.ParFlags.ParStats.Sparks)
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_STEALING, ((StgTSO *)NULL), first,
+ 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
+ return first; // and return the *first* spark found
+ }
+ } else {
+ if (RtsFlags.ParFlags.ParStats.Sparks && for_export)
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_STEALING, ((StgTSO *)NULL), spark,
+ 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
+ return spark; // return first spark found
+ }
+ }
}
slide_spark_pool(pool);
}
return NULL;
}
+/*
+ activateSpark is defined in Schedule.c
+*/
rtsBool
add_to_spark_queue( StgClosure *closure, StgSparkPool *pool )
{
@@ -99,8 +131,25 @@ add_to_spark_queue( StgClosure *closure, StgSparkPool *pool )
if (closure_SHOULD_SPARK(closure) &&
pool->tl < pool->lim) {
*(pool->tl++) = closure;
+
+#if defined(PAR)
+ // collect parallel global statistics (currently done together with GC stats)
+ if (RtsFlags.ParFlags.ParStats.Global &&
+ RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+ // fprintf(stderr, "Creating spark for %x @ %11.2f\n", closure, usertime());
+ globalParStats.tot_sparks_created++;
+ }
+#endif
return rtsTrue;
} else {
+#if defined(PAR)
+ // collect parallel global statistics (currently done together with GC stats)
+ if (RtsFlags.ParFlags.ParStats.Global &&
+ RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+ //fprintf(stderr, "Ignoring spark for %x @ %11.2f\n", closure, usertime());
+ globalParStats.tot_sparks_ignored++;
+ }
+#endif
return rtsFalse;
}
}
@@ -141,12 +190,12 @@ void
markSparkQueue( void )
{
StgClosure **sparkp, **to_sparkp;
-#ifdef DEBUG
- nat n, pruned_sparks;
-#endif
+ nat n, pruned_sparks; // stats only
StgSparkPool *pool;
Capability *cap;
+ PAR_TICKY_MARK_SPARK_QUEUE_START();
+
#ifdef SMP
/* walk over the capabilities, allocating a spark pool for each one */
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
@@ -156,8 +205,9 @@ markSparkQueue( void )
{
#endif
pool = &(cap->rSparks);
-
-#ifdef DEBUG
+
+#if defined(PAR)
+ // stats only
n = 0;
pruned_sparks = 0;
#endif
@@ -172,11 +222,11 @@ markSparkQueue( void )
if (closure_SHOULD_SPARK(*sparkp)) {
*to_sparkp = MarkRoot(*sparkp);
to_sparkp++;
-#ifdef DEBUG
+#ifdef PAR
n++;
#endif
} else {
-#ifdef DEBUG
+#ifdef PAR
pruned_sparks++;
#endif
}
@@ -185,6 +235,8 @@ markSparkQueue( void )
pool->hd = pool->base;
pool->tl = to_sparkp;
+ PAR_TICKY_MARK_SPARK_QUEUE_END(n);
+
#if defined(SMP)
IF_DEBUG(scheduler,
belch("markSparkQueue: marked %d sparks and pruned %d sparks on [%x]",
@@ -420,7 +472,7 @@ activateSpark (rtsEvent *event, rtsSparkQ spark)
IF_GRAN_DEBUG(pri,
belch("++ No high priority spark available; low priority (%u) spark chosen: node=%p; name=%u\n",
spark->gran_info,
- spark->node, spark->name);)
+ spark->node, spark->name));
}
CurrentTime[proc] += RtsFlags.GranFlags.Costs.threadcreatetime;
@@ -441,7 +493,7 @@ activateSpark (rtsEvent *event, rtsSparkQ spark)
FindWork,
(StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
barf("//// activateSpark: out of heap ; ToDo: call GarbageCollect()");
- GarbageCollect(GetRoots);
+ GarbageCollect(GetRoots, rtsFalse);
// HWL old: ReallyPerformThreadGC(TSO_HS+TSO_CTS_SIZE,rtsFalse);
// HWL old: SAVE_Hp -= TSO_HS+TSO_CTS_SIZE;
spark = NULL;