summaryrefslogtreecommitdiff
path: root/rts/sm/MarkWeak.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2010-03-29 14:44:56 +0000
committerSimon Marlow <marlowsd@gmail.com>2010-03-29 14:44:56 +0000
commit5d52d9b64c21dcf77849866584744722f8121389 (patch)
tree25aeafc9b761e73714c24ae414c0b1c41765c99f /rts/sm/MarkWeak.c
parent79957d77c1bff767f1041d3fabdeb94d92a52878 (diff)
downloadhaskell-5d52d9b64c21dcf77849866584744722f8121389.tar.gz
New implementation of BLACKHOLEs
This replaces the global blackhole_queue with a clever scheme that enables us to queue up blocked threads on the closure that they are blocked on, while still avoiding atomic instructions in the common case. Advantages: - gets rid of a locked global data structure and some tricky GC code (replacing it with some per-thread data structures and different tricky GC code :) - wakeups are more prompt: parallel/concurrent performance should benefit. I haven't seen anything dramatic in the parallel benchmarks so far, but a couple of threading benchmarks do improve a bit. - waking up a thread blocked on a blackhole is now O(1) (e.g. if it is the target of throwTo). - less sharing and better separation of Capabilities: communication is done with messages, the data structures are strictly owned by a Capability and cannot be modified except by sending messages. - this change will utlimately enable us to do more intelligent scheduling when threads block on each other. This is what started off the whole thing, but it isn't done yet (#3838). I'll be documenting all this on the wiki in due course.
Diffstat (limited to 'rts/sm/MarkWeak.c')
-rw-r--r--rts/sm/MarkWeak.c58
1 files changed, 0 insertions, 58 deletions
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 0ac807ff79..e65c176c0a 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -210,21 +210,6 @@ traverseWeakPtrList(void)
}
}
- /* Finally, we can update the blackhole_queue. This queue
- * simply strings together TSOs blocked on black holes, it is
- * not intended to keep anything alive. Hence, we do not follow
- * pointers on the blackhole_queue until now, when we have
- * determined which TSOs are otherwise reachable. We know at
- * this point that all TSOs have been evacuated, however.
- */
- {
- StgTSO **pt;
- for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
- *pt = (StgTSO *)isAlive((StgClosure *)*pt);
- ASSERT(*pt != NULL);
- }
- }
-
weak_stage = WeakDone; // *now* we're done,
return rtsTrue; // but one more round of scavenging, please
}
@@ -310,49 +295,6 @@ static rtsBool tidyThreadList (generation *gen)
}
/* -----------------------------------------------------------------------------
- The blackhole queue
-
- Threads on this list behave like weak pointers during the normal
- phase of garbage collection: if the blackhole is reachable, then
- the thread is reachable too.
- -------------------------------------------------------------------------- */
-rtsBool
-traverseBlackholeQueue (void)
-{
- StgTSO *prev, *t, *tmp;
- rtsBool flag;
- nat type;
-
- flag = rtsFalse;
- prev = NULL;
-
- for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
- // if the thread is not yet alive...
- if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
- // if the closure it is blocked on is either (a) a
- // reachable BLAKCHOLE or (b) not a BLACKHOLE, then we
- // make the thread alive.
- if (!isAlive(t->block_info.closure)) {
- type = get_itbl(t->block_info.closure)->type;
- if (type == BLACKHOLE || type == CAF_BLACKHOLE) {
- continue;
- }
- }
- evacuate((StgClosure **)&t);
- if (prev) {
- prev->_link = t;
- } else {
- blackhole_queue = t;
- }
- // no write barrier when on the blackhole queue,
- // because we traverse the whole queue on every GC.
- flag = rtsTrue;
- }
- }
- return flag;
-}
-
-/* -----------------------------------------------------------------------------
Evacuate every weak pointer object on the weak_ptr_list, and update
the link fields.