summaryrefslogtreecommitdiff
path: root/rts/Schedule.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r--rts/Schedule.c35
1 files changed, 13 insertions, 22 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c
index c3911afbe5..ee2d7dbb0d 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -574,6 +574,7 @@ removeFromRunQueue (Capability *cap, StgTSO *tso)
setTSOPrev(cap, tso->_link, tso->block_info.prev);
}
tso->_link = tso->block_info.prev = END_TSO_QUEUE;
+ cap->n_run_queue--;
IF_DEBUG(sanity, checkRunQueue(cap));
}
@@ -639,7 +640,7 @@ shouldYieldCapability (Capability *cap, Task *task, rtsBool didGcLast)
// progress at all.
return ((pending_sync && !didGcLast) ||
- cap->returning_tasks_hd != NULL ||
+ cap->n_returning_tasks != 0 ||
(!emptyRunQueue(cap) && (task->incall->tso == NULL
? peekRunQueue(cap)->bound != NULL
: peekRunQueue(cap)->bound != task->incall)));
@@ -700,31 +701,15 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
Capability *free_caps[n_capabilities], *cap0;
uint32_t i, n_wanted_caps, n_free_caps;
- StgTSO *t;
// migration can be turned off with +RTS -qm
if (!RtsFlags.ParFlags.migrate) return;
- // Check whether we have more threads on our run queue, or sparks
- // in our pool, that we could hand to another Capability.
- if (emptyRunQueue(cap)) {
- if (sparkPoolSizeCap(cap) < 2) return;
- } else {
- if (singletonRunQueue(cap) &&
- sparkPoolSizeCap(cap) < 1) return;
- }
-
// Figure out how many capabilities we want to wake up. We need at least
// sparkPoolSize(cap) plus the number of spare threads we have.
- t = cap->run_queue_hd;
- n_wanted_caps = sparkPoolSizeCap(cap);
- if (t != END_TSO_QUEUE) {
- do {
- t = t->_link;
- if (t == END_TSO_QUEUE) break;
- n_wanted_caps++;
- } while (n_wanted_caps < n_capabilities-1);
- }
+ n_wanted_caps = sparkPoolSizeCap(cap) + cap->n_run_queue - 1;
+
+ if (n_wanted_caps == 0) return;
// First grab as many free Capabilities as we can. ToDo: we should use
// capabilities on the same NUMA node preferably, but not exclusively.
@@ -734,7 +719,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
cap0 = capabilities[i];
if (cap != cap0 && !cap0->disabled && tryGrabCapability(cap0,task)) {
if (!emptyRunQueue(cap0)
- || cap0->returning_tasks_hd != NULL
+ || cap0->n_returning_tasks != 0
|| cap0->inbox != (Message*)END_TSO_QUEUE) {
// it already has some work, we just grabbed it at
// the wrong moment. Or maybe it's deadlocked!
@@ -765,7 +750,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
debugTrace(DEBUG_sched,
"cap %d: %s and %d free capabilities, sharing...",
cap->no,
- (!emptyRunQueue(cap) && !singletonRunQueue(cap))?
+ (cap->n_run_queue > 1)?
"excess threads on run queue":"sparks to share (>=2)",
n_free_caps);
@@ -797,6 +782,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
prev = t;
} else {
appendToRunQueue(free_caps[i],t);
+ cap->n_run_queue--;
traceEventMigrateThread (cap, t, free_caps[i]->no);
@@ -2039,10 +2025,12 @@ forkProcess(HsStablePtr *entry
// bound threads for which the corresponding Task does not
// exist.
truncateRunQueue(cap);
+ cap->n_run_queue = 0;
// Any suspended C-calling Tasks are no more, their OS threads
// don't exist now:
cap->suspended_ccalls = NULL;
+ cap->n_suspended_ccalls = 0;
#if defined(THREADED_RTS)
// Wipe our spare workers list, they no longer exist. New
@@ -2051,6 +2039,7 @@ forkProcess(HsStablePtr *entry
cap->n_spare_workers = 0;
cap->returning_tasks_hd = NULL;
cap->returning_tasks_tl = NULL;
+ cap->n_returning_tasks = 0;
#endif
// Release all caps except 0, we'll use that for starting
@@ -2277,6 +2266,7 @@ suspendTask (Capability *cap, Task *task)
cap->suspended_ccalls->prev = incall;
}
cap->suspended_ccalls = incall;
+ cap->n_suspended_ccalls++;
}
STATIC_INLINE void
@@ -2295,6 +2285,7 @@ recoverSuspendedTask (Capability *cap, Task *task)
incall->next->prev = incall->prev;
}
incall->next = incall->prev = NULL;
+ cap->n_suspended_ccalls--;
}
/* ---------------------------------------------------------------------------