summaryrefslogtreecommitdiff
path: root/rts/Schedule.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2016-04-24 21:31:55 +0100
committerSimon Marlow <smarlow@fb.com>2016-05-04 05:30:30 -0700
commit1fa92ca9b1ed4cf44e2745830c9e9ccc2bee12d5 (patch)
tree767363253d30e007c1d78441b7caa7d83c193802 /rts/Schedule.c
parentf703fd6b50f0ae58bc5f5ddb927a2ce28eeaddf6 (diff)
downloadhaskell-1fa92ca9b1ed4cf44e2745830c9e9ccc2bee12d5.tar.gz
schedulePushWork: avoid unnecessary wakeups
This function had some pathalogically bad behaviour: if we had 2 threads on the current capability and 23 other idle capabilities, we would * grab all 23 capabilities * migrate one Haskell thread to one of them * wake up a worker on *all* 23 other capabilities. This lead to a lot of unnecessary wakeups when using large -N values. Now, we * Count how many capabilities we need to wake up * Start from cap->no+1, so that we don't overload low-numbered capabilities * Only wake up capabilities that we migrated a thread to (unless we have sparks to steal) This results in a pretty dramatic improvement in our production system.
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r--rts/Schedule.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c
index abf3be58af..3eb9624231 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -699,7 +699,8 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
#if defined(THREADED_RTS)
Capability *free_caps[n_capabilities], *cap0;
- nat i, n_free_caps;
+ nat i, n_wanted_caps, n_free_caps;
+ StgTSO *t;
// migration can be turned off with +RTS -qm
if (!RtsFlags.ParFlags.migrate) return;
@@ -713,8 +714,22 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
sparkPoolSizeCap(cap) < 1) return;
}
- // First grab as many free Capabilities as we can.
- for (i=0, n_free_caps=0; i < n_capabilities; i++) {
+ // Figure out how many capabilities we want to wake up. We need at least
+ // sparkPoolSize(cap) plus the number of spare threads we have.
+ t = cap->run_queue_hd;
+ n_wanted_caps = sparkPoolSizeCap(cap);
+ if (t != END_TSO_QUEUE) {
+ do {
+ t = t->_link;
+ if (t == END_TSO_QUEUE) break;
+ n_wanted_caps++;
+ } while (n_wanted_caps < n_capabilities-1);
+ }
+
+ // Grab free capabilities, starting from cap->no+1.
+ for (i = (cap->no + 1) % n_capabilities, n_free_caps=0;
+ n_free_caps < n_wanted_caps && i != cap->no;
+ i = (i + 1) % n_capabilities) {
cap0 = capabilities[i];
if (cap != cap0 && !cap0->disabled && tryGrabCapability(cap0,task)) {
if (!emptyRunQueue(cap0)
@@ -824,10 +839,13 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
// release the capabilities
for (i = 0; i < n_free_caps; i++) {
task->cap = free_caps[i];
- // The idea behind waking up the capability unconditionally is that
- // it might be able to steal sparks. Perhaps we should only do this
- // if there were sparks to steal?
- releaseAndWakeupCapability(free_caps[i]);
+ if (sparkPoolSizeCap(cap) > 0) {
+ // If we have sparks to steal, wake up a worker on the
+ // capability, even if it has no threads to run.
+ releaseAndWakeupCapability(free_caps[i]);
+ } else {
+ releaseCapability(free_caps[i]);
+ }
}
}
task->cap = cap; // reset to point to our Capability.