diff options
author | Simon Marlow <marlowsd@gmail.com> | 2010-03-09 14:31:11 +0000 |
---|---|---|
committer | Simon Marlow <marlowsd@gmail.com> | 2010-03-09 14:31:11 +0000 |
commit | 7effbbbbdfe7eb05c6402fa9337e358e7e9fadde (patch) | |
tree | d5896a7858db38265b77fc799fc54d5151737aab /rts/Capability.c | |
parent | 4e8b07dbc753a5132c574926468ba886728c9049 (diff) | |
download | haskell-7effbbbbdfe7eb05c6402fa9337e358e7e9fadde.tar.gz |
Split part of the Task struct into a separate struct InCall
The idea is that this leaves Tasks and OSThread in one-to-one
correspondence. The part of a Task that represents a call into
Haskell from C is split into a separate struct InCall, pointed to by
the Task and the TSO bound to it. A given OSThread/Task thus always
uses the same mutex and condition variable, rather than getting a new
one for each callback. Conceptually it is simpler, although there are
more types and indirections in a few places now.
This improves callback performance by removing some of the locks that
we had to take when making in-calls. Now we also keep the current Task
in a thread-local variable if supported by the OS and gcc (currently
only Linux).
Diffstat (limited to 'rts/Capability.c')
-rw-r--r-- | rts/Capability.c | 40 |
1 files changed, 19 insertions, 21 deletions
diff --git a/rts/Capability.c b/rts/Capability.c index cf85372ce0..ce6ecebd72 100644 --- a/rts/Capability.c +++ b/rts/Capability.c @@ -173,10 +173,10 @@ STATIC_INLINE void newReturningTask (Capability *cap, Task *task) { ASSERT_LOCK_HELD(&cap->lock); - ASSERT(task->return_link == NULL); + ASSERT(task->next == NULL); if (cap->returning_tasks_hd) { - ASSERT(cap->returning_tasks_tl->return_link == NULL); - cap->returning_tasks_tl->return_link = task; + ASSERT(cap->returning_tasks_tl->next == NULL); + cap->returning_tasks_tl->next = task; } else { cap->returning_tasks_hd = task; } @@ -190,11 +190,11 @@ popReturningTask (Capability *cap) Task *task; task = cap->returning_tasks_hd; ASSERT(task); - cap->returning_tasks_hd = task->return_link; + cap->returning_tasks_hd = task->next; if (!cap->returning_tasks_hd) { cap->returning_tasks_tl = NULL; } - task->return_link = NULL; + task->next = NULL; return task; } #endif @@ -220,7 +220,7 @@ initCapability( Capability *cap, nat i ) initMutex(&cap->lock); cap->running_task = NULL; // indicates cap is free cap->spare_workers = NULL; - cap->suspended_ccalling_tasks = NULL; + cap->suspended_ccalls = NULL; cap->returning_tasks_hd = NULL; cap->returning_tasks_tl = NULL; cap->wakeup_queue_hd = END_TSO_QUEUE; @@ -342,7 +342,7 @@ giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task) ASSERT_LOCK_HELD(&cap->lock); ASSERT(task->cap == cap); debugTrace(DEBUG_sched, "passing capability %d to %s %p", - cap->no, task->tso ? "bound task" : "worker", + cap->no, task->incall->tso ? "bound task" : "worker", (void *)task->id); ACQUIRE_LOCK(&task->lock); task->wakeup = rtsTrue; @@ -398,7 +398,7 @@ releaseCapability_ (Capability* cap, // assertion is false: in schedule() we force a yield after // ThreadBlocked, but the thread may be back on the run queue // by now. - task = cap->run_queue_hd->bound; + task = cap->run_queue_hd->bound->task; giveCapabilityToTask(cap,task); return; } @@ -411,7 +411,7 @@ releaseCapability_ (Capability* cap, if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) { debugTrace(DEBUG_sched, "starting new worker on capability %d", cap->no); - startWorkerTask(cap, workerStart); + startWorkerTask(cap); return; } } @@ -462,9 +462,7 @@ releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS) // in which case it is not replaced on the spare_worker queue. // This happens when the system is shutting down (see // Schedule.c:workerStart()). - // Also, be careful to check that this task hasn't just exited - // Haskell to do a foreign call (task->suspended_tso). - if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) { + if (!isBoundTask(task) && !task->stopped) { task->next = cap->spare_workers; cap->spare_workers = task; } @@ -612,7 +610,7 @@ yieldCapability (Capability** pCap, Task *task) continue; } - if (task->tso == NULL) { + if (task->incall->tso == NULL) { ASSERT(cap->spare_workers != NULL); // if we're not at the front of the queue, release it // again. This is unlikely to happen. @@ -655,12 +653,12 @@ wakeupThreadOnCapability (Capability *my_cap, // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability) if (tso->bound) { - ASSERT(tso->bound->cap == tso->cap); - tso->bound->cap = other_cap; + ASSERT(tso->bound->task->cap == tso->cap); + tso->bound->task->cap = other_cap; } tso->cap = other_cap; - ASSERT(tso->bound ? tso->bound->cap == other_cap : 1); + ASSERT(tso->bound ? tso->bound->task->cap == other_cap : 1); if (other_cap->running_task == NULL) { // nobody is running this Capability, we can add our thread @@ -781,7 +779,7 @@ shutdownCapability (Capability *cap, Task *task, rtsBool safe) // that will try to return to code that has been unloaded. // We can be a bit more relaxed when this is a standalone // program that is about to terminate, and let safe=false. - if (cap->suspended_ccalling_tasks && safe) { + if (cap->suspended_ccalls && safe) { debugTrace(DEBUG_sched, "thread(s) are involved in foreign calls, yielding"); cap->running_task = NULL; @@ -871,7 +869,7 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta, { nat i; Capability *cap; - Task *task; + InCall *incall; // Each GC thread is responsible for following roots from the // Capability of the same number. There will usually be the same @@ -886,9 +884,9 @@ markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta, evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd); evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl); #endif - for (task = cap->suspended_ccalling_tasks; task != NULL; - task=task->next) { - evac(user, (StgClosure **)(void *)&task->suspended_tso); + for (incall = cap->suspended_ccalls; incall != NULL; + incall=incall->next) { + evac(user, (StgClosure **)(void *)&incall->suspended_tso); } #if defined(THREADED_RTS) |