summaryrefslogtreecommitdiff
path: root/rts/sm/MarkWeak.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2009-12-01 16:03:21 +0000
committerSimon Marlow <marlowsd@gmail.com>2009-12-01 16:03:21 +0000
commit5270423a6afe69f1dc57e5e5a474812182718d40 (patch)
tree254cc0d910b315c47723a7a7fdd393fffbdee5ea /rts/sm/MarkWeak.c
parent063b822bb68f84dd9729327bb1765637c25aceb4 (diff)
downloadhaskell-5270423a6afe69f1dc57e5e5a474812182718d40.tar.gz
Make allocatePinned use local storage, and other refactorings
This is a batch of refactoring to remove some of the GC's global state, as we move towards CPU-local GC. - allocateLocal() now allocates large objects into the local nursery, rather than taking a global lock and allocating then in gen 0 step 0. - allocatePinned() was still allocating from global storage and taking a lock each time, now it uses local storage. (mallocForeignPtrBytes should be faster with -threaded). - We had a gen 0 step 0, distinct from the nurseries, which are stored in a separate nurseries[] array. This is slightly strange. I removed the g0s0 global that pointed to gen 0 step 0, and removed all uses of it. I think now we don't use gen 0 step 0 at all, except possibly when there is only one generation. Possibly more tidying up is needed here. - I removed the global allocate() function, and renamed allocateLocal() to allocate(). - the alloc_blocks global is gone. MAYBE_GC() and doYouWantToGC() now check the local nursery only.
Diffstat (limited to 'rts/sm/MarkWeak.c')
-rw-r--r--rts/sm/MarkWeak.c224
1 files changed, 123 insertions, 101 deletions
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 4f0a7a451b..2f5964f529 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -21,6 +21,7 @@
#include "Trace.h"
#include "Schedule.h"
#include "Weak.h"
+#include "Storage.h"
/* -----------------------------------------------------------------------------
Weak Pointers
@@ -82,6 +83,9 @@ StgTSO *resurrected_threads;
// List of blocked threads found to have pending throwTos
StgTSO *exception_threads;
+static void resurrectUnreachableThreads (step *stp);
+static rtsBool tidyThreadList (step *stp);
+
void
initWeakForGC(void)
{
@@ -182,85 +186,23 @@ traverseWeakPtrList(void)
return rtsTrue;
case WeakThreads:
- /* Now deal with the all_threads list, which behaves somewhat like
+ /* Now deal with the step->threads lists, which behave somewhat like
* the weak ptr list. If we discover any threads that are about to
* become garbage, we wake them up and administer an exception.
*/
- {
- StgTSO *t, *tmp, *next, **prev;
- nat g, s;
- step *stp;
+ {
+ nat g, s, n;
- // Traverse thread lists for generations we collected...
- for (g = 0; g <= N; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- stp = &generations[g].steps[s];
-
- prev = &stp->old_threads;
-
- for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
-
- tmp = (StgTSO *)isAlive((StgClosure *)t);
-
- if (tmp != NULL) {
- t = tmp;
- }
-
- ASSERT(get_itbl(t)->type == TSO);
- if (t->what_next == ThreadRelocated) {
- next = t->_link;
- *prev = next;
- continue;
- }
-
- next = t->global_link;
-
- // This is a good place to check for blocked
- // exceptions. It might be the case that a thread is
- // blocked on delivering an exception to a thread that
- // is also blocked - we try to ensure that this
- // doesn't happen in throwTo(), but it's too hard (or
- // impossible) to close all the race holes, so we
- // accept that some might get through and deal with
- // them here. A GC will always happen at some point,
- // even if the system is otherwise deadlocked.
- //
- // If an unreachable thread has blocked
- // exceptions, we really want to perform the
- // blocked exceptions rather than throwing
- // BlockedIndefinitely exceptions. This is the
- // only place we can discover such threads.
- // The target thread might even be
- // ThreadFinished or ThreadKilled. Bugs here
- // will only be seen when running on a
- // multiprocessor.
- if (t->blocked_exceptions != END_TSO_QUEUE) {
- if (tmp == NULL) {
- evacuate((StgClosure **)&t);
- flag = rtsTrue;
- }
- t->global_link = exception_threads;
- exception_threads = t;
- *prev = next;
- continue;
- }
-
- if (tmp == NULL) {
- // not alive (yet): leave this thread on the
- // old_all_threads list.
- prev = &(t->global_link);
- }
- else {
- // alive
- *prev = next;
-
- // move this thread onto the correct threads list.
- step *new_step;
- new_step = Bdescr((P_)t)->step;
- t->global_link = new_step->threads;
- new_step->threads = t;
- }
- }
+ // Traverse thread lists for generations we collected...
+ for (n = 0; n < n_capabilities; n++) {
+ if (tidyThreadList(&nurseries[n])) {
+ flag = rtsTrue;
+ }
+ }
+ for (g = 0; g <= N; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (tidyThreadList(&generations[g].steps[s])) {
+ flag = rtsTrue;
}
}
}
@@ -272,36 +214,18 @@ traverseWeakPtrList(void)
/* And resurrect any threads which were about to become garbage.
*/
{
- nat g, s;
- step *stp;
- StgTSO *t, *tmp, *next;
+ nat g, s, n;
+ for (n = 0; n < n_capabilities; n++) {
+ resurrectUnreachableThreads(&nurseries[n]);
+ }
for (g = 0; g <= N; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- stp = &generations[g].steps[s];
-
- for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
-
- // ThreadFinished and ThreadComplete: we have to keep
- // these on the all_threads list until they
- // become garbage, because they might get
- // pending exceptions.
- switch (t->what_next) {
- case ThreadKilled:
- case ThreadComplete:
- continue;
- default:
- tmp = t;
- evacuate((StgClosure **)&tmp);
- tmp->global_link = resurrected_threads;
- resurrected_threads = tmp;
- }
- }
+ resurrectUnreachableThreads(&generations[g].steps[s]);
}
}
}
-
+
/* Finally, we can update the blackhole_queue. This queue
* simply strings together TSOs blocked on black holes, it is
* not intended to keep anything alive. Hence, we do not follow
@@ -316,15 +240,113 @@ traverseWeakPtrList(void)
ASSERT(*pt != NULL);
}
}
-
+
weak_stage = WeakDone; // *now* we're done,
return rtsTrue; // but one more round of scavenging, please
-
+ }
+
default:
barf("traverse_weak_ptr_list");
return rtsTrue;
}
+}
+
+ static void resurrectUnreachableThreads (step *stp)
+{
+ StgTSO *t, *tmp, *next;
+
+ for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->global_link;
+
+ // ThreadFinished and ThreadComplete: we have to keep
+ // these on the all_threads list until they
+ // become garbage, because they might get
+ // pending exceptions.
+ switch (t->what_next) {
+ case ThreadKilled:
+ case ThreadComplete:
+ continue;
+ default:
+ tmp = t;
+ evacuate((StgClosure **)&tmp);
+ tmp->global_link = resurrected_threads;
+ resurrected_threads = tmp;
+ }
+ }
+}
+
+static rtsBool tidyThreadList (step *stp)
+{
+ StgTSO *t, *tmp, *next, **prev;
+ rtsBool flag = rtsFalse;
+ prev = &stp->old_threads;
+
+ for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
+
+ tmp = (StgTSO *)isAlive((StgClosure *)t);
+
+ if (tmp != NULL) {
+ t = tmp;
+ }
+
+ ASSERT(get_itbl(t)->type == TSO);
+ if (t->what_next == ThreadRelocated) {
+ next = t->_link;
+ *prev = next;
+ continue;
+ }
+
+ next = t->global_link;
+
+ // This is a good place to check for blocked
+ // exceptions. It might be the case that a thread is
+ // blocked on delivering an exception to a thread that
+ // is also blocked - we try to ensure that this
+ // doesn't happen in throwTo(), but it's too hard (or
+ // impossible) to close all the race holes, so we
+ // accept that some might get through and deal with
+ // them here. A GC will always happen at some point,
+ // even if the system is otherwise deadlocked.
+ //
+ // If an unreachable thread has blocked
+ // exceptions, we really want to perform the
+ // blocked exceptions rather than throwing
+ // BlockedIndefinitely exceptions. This is the
+ // only place we can discover such threads.
+ // The target thread might even be
+ // ThreadFinished or ThreadKilled. Bugs here
+ // will only be seen when running on a
+ // multiprocessor.
+ if (t->blocked_exceptions != END_TSO_QUEUE) {
+ if (tmp == NULL) {
+ evacuate((StgClosure **)&t);
+ flag = rtsTrue;
+ }
+ t->global_link = exception_threads;
+ exception_threads = t;
+ *prev = next;
+ continue;
+ }
+
+ if (tmp == NULL) {
+ // not alive (yet): leave this thread on the
+ // old_all_threads list.
+ prev = &(t->global_link);
+ }
+ else {
+ // alive
+ *prev = next;
+
+ // move this thread onto the correct threads list.
+ step *new_step;
+ new_step = Bdescr((P_)t)->step;
+ t->global_link = new_step->threads;
+ new_step->threads = t;
+ }
+ }
+
+ return flag;
}
/* -----------------------------------------------------------------------------