diff options
author | Simon Marlow <marlowsd@gmail.com> | 2010-04-01 09:16:05 +0000 |
---|---|---|
committer | Simon Marlow <marlowsd@gmail.com> | 2010-04-01 09:16:05 +0000 |
commit | f4692220c7cbdadaa633f50eb2b30b59edb30183 (patch) | |
tree | 3d29f1b4770bedb7c69c31b2828f9b5acca3e2c3 /rts/Schedule.c | |
parent | 7c4cb84efd774a21f11fb03118feb0434282ecf3 (diff) | |
download | haskell-f4692220c7cbdadaa633f50eb2b30b59edb30183.tar.gz |
Change the representation of the MVar blocked queue
The list of threads blocked on an MVar is now represented as a list of
separately allocated objects rather than being linked through the TSOs
themselves. This lets us remove a TSO from the list in O(1) time
rather than O(n) time, by marking the list object. Removing this
linear component fixes some pathalogical performance cases where many
threads were blocked on an MVar and became unreachable simultaneously
(nofib/smp/threads007), or when sending an asynchronous exception to a
TSO in a long list of thread blocked on an MVar.
MVar performance has actually improved by a few percent as a result of
this change, slightly to my surprise.
This is the final cleanup in the sequence, which let me remove the old
way of waking up threads (unblockOne(), MSG_WAKEUP) in favour of the
new way (tryWakeupThread and MSG_TRY_WAKEUP, which is idempotent). It
is now the case that only the Capability that owns a TSO may modify
its state (well, almost), and this simplifies various things. More of
the RTS is based on message-passing between Capabilities now.
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r-- | rts/Schedule.c | 39 |
1 files changed, 6 insertions, 33 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c index d7d57411b7..f7b26a4876 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -125,7 +125,7 @@ static Capability *schedule (Capability *initialCapability, Task *task); static void schedulePreLoop (void); static void scheduleFindWork (Capability *cap); #if defined(THREADED_RTS) -static void scheduleYield (Capability **pcap, Task *task, rtsBool); +static void scheduleYield (Capability **pcap, Task *task); #endif static void scheduleStartSignalHandlers (Capability *cap); static void scheduleCheckBlockedThreads (Capability *cap); @@ -204,7 +204,6 @@ schedule (Capability *initialCapability, Task *task) rtsBool ready_to_gc; #if defined(THREADED_RTS) rtsBool first = rtsTrue; - rtsBool force_yield = rtsFalse; #endif cap = initialCapability; @@ -328,9 +327,7 @@ schedule (Capability *initialCapability, Task *task) // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); } - yield: - scheduleYield(&cap,task,force_yield); - force_yield = rtsFalse; + scheduleYield(&cap,task); if (emptyRunQueue(cap)) continue; // look for work again #endif @@ -490,19 +487,6 @@ run_thread: traceEventStopThread(cap, t, ret); -#if defined(THREADED_RTS) - // If ret is ThreadBlocked, and this Task is bound to the TSO that - // blocked, we are in limbo - the TSO is now owned by whatever it - // is blocked on, and may in fact already have been woken up, - // perhaps even on a different Capability. It may be the case - // that task->cap != cap. We better yield this Capability - // immediately and return to normaility. - if (ret == ThreadBlocked) { - force_yield = rtsTrue; - goto yield; - } -#endif - ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); ASSERT(t->cap == cap); @@ -639,23 +623,13 @@ shouldYieldCapability (Capability *cap, Task *task) // and also check the benchmarks in nofib/parallel for regressions. static void -scheduleYield (Capability **pcap, Task *task, rtsBool force_yield) +scheduleYield (Capability **pcap, Task *task) { Capability *cap = *pcap; // if we have work, and we don't need to give up the Capability, continue. // - // The force_yield flag is used when a bound thread blocks. This - // is a particularly tricky situation: the current Task does not - // own the TSO any more, since it is on some queue somewhere, and - // might be woken up or manipulated by another thread at any time. - // The TSO and Task might be migrated to another Capability. - // Certain invariants might be in doubt, such as task->bound->cap - // == cap. We have to yield the current Capability immediately, - // no messing around. - // - if (!force_yield && - !shouldYieldCapability(cap,task) && + if (!shouldYieldCapability(cap,task) && (!emptyRunQueue(cap) || !emptyInbox(cap) || sched_state >= SCHED_INTERRUPTING)) @@ -1891,8 +1865,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { - traceEventMigrateThread (cap, tso, capabilities[cpu].no); - wakeupThreadOnCapability(cap, &capabilities[cpu], tso); + migrateThread(cap, tso, &capabilities[cpu]); } #else appendToRunQueue(cap,tso); @@ -2372,8 +2345,8 @@ deleteThread_(Capability *cap, StgTSO *tso) if (tso->why_blocked == BlockedOnCCall || tso->why_blocked == BlockedOnCCall_NoUnblockExc) { - unblockOne(cap,tso); tso->what_next = ThreadKilled; + appendToRunQueue(tso->cap, tso); } else { deleteThread(cap,tso); } |