diff options
author | Simon Marlow <marlowsd@gmail.com> | 2011-12-01 10:53:28 +0000 |
---|---|---|
committer | Simon Marlow <marlowsd@gmail.com> | 2011-12-01 11:29:53 +0000 |
commit | 6d18141d880d55958c3392f6a7ae621dc33ee5c1 (patch) | |
tree | edd5607a50a92ffad4707cef792a06246b5b41e4 /rts/Schedule.c | |
parent | 1d012e31577951ff5fe74d0277fabdb08c27929d (diff) | |
download | haskell-6d18141d880d55958c3392f6a7ae621dc33ee5c1.tar.gz |
Fix a scheduling bug in the threaded RTS
The parallel GC was using setContextSwitches() to stop all the other
threads, which sets the context_switch flag on every Capability. That
had the side effect of causing every Capability to also switch
threads, and since GCs can be much more frequent than context
switches, this increased the context switch frequency. When context
switches are expensive (because the switch is between two bound
threads or a bound and unbound thread), the difference is quite
noticeable.
The fix is to have a separate flag to indicate that a Capability
should stop and return to the scheduler, but not switch threads. I've
called this the "interrupt" flag.
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r-- | rts/Schedule.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c index 04a66e31df..cd704d2871 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -415,6 +415,9 @@ run_thread: SetLastError(t->saved_winerror); #endif + // reset the interrupt flag before running Haskell code + cap->interrupt = 0; + cap->in_haskell = rtsTrue; dirty_TSO(cap,t); @@ -521,7 +524,7 @@ run_thread: break; case ThreadYielding: - if (scheduleHandleYield(cap, t, prev_what_next)) { + if (scheduleHandleYield(cap, t, prev_what_next)) { // shortcut for switching between compiler/interpreter: goto run_thread; } @@ -1167,14 +1170,17 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) // the CPU because the tick always arrives during GC). This way // penalises threads that do a lot of allocation, but that seems // better than the alternative. - cap->context_switch = 0; - + if (cap->context_switch != 0) { + cap->context_switch = 0; + appendToRunQueue(cap,t); + } else { + pushOnRunQueue(cap,t); + } + IF_DEBUG(sanity, //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id); checkTSO(t)); - appendToRunQueue(cap,t); - return rtsFalse; } @@ -1371,7 +1377,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) return cap; // NOTE: task->cap might have changed here } - setContextSwitches(); + interruptAllCapabilities(); // The final shutdown GC is always single-threaded, because it's // possible that some of the Capabilities have no worker threads. @@ -2145,7 +2151,7 @@ void interruptStgRts(void) { sched_state = SCHED_INTERRUPTING; - setContextSwitches(); + interruptAllCapabilities(); #if defined(THREADED_RTS) wakeUpRts(); #endif |