summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorSimon Marlow <simonmar@microsoft.com>2007-10-31 12:51:36 +0000
committerSimon Marlow <simonmar@microsoft.com>2007-10-31 12:51:36 +0000
commitd5bd3e829c47c03157cf41cad581d2df44dfd81b (patch)
tree98fb99c2713190f77d1999345888b2dcdabe5bf2 /rts
parent9e5fe6be620eaf03a86f1321bef603ca43699a3c (diff)
downloadhaskell-d5bd3e829c47c03157cf41cad581d2df44dfd81b.tar.gz
Refactoring of the GC in preparation for parallel GC
This patch localises the state of the GC into a gc_thread structure, and reorganises the inner loop of the GC to scavenge one block at a time from global work lists in each "step". The gc_thread structure has a "workspace" for each step, in which it collects evacuated objects until it has a full block to push out to the step's global list. Details of the algorithm will be on the wiki in due course. At the moment, THREADED_RTS does not compile, but the single-threaded GC works (and is 10-20% slower than before).
Diffstat (limited to 'rts')
-rw-r--r--rts/Makefile2
-rw-r--r--rts/RtsFlags.c14
-rw-r--r--rts/sm/Evac.c168
-rw-r--r--rts/sm/GC.c1242
-rw-r--r--rts/sm/GC.h140
-rw-r--r--rts/sm/GCUtils.c165
-rw-r--r--rts/sm/GCUtils.h28
-rw-r--r--rts/sm/MarkWeak.c2
-rw-r--r--rts/sm/Scav.c504
-rw-r--r--rts/sm/Scav.h7
-rw-r--r--rts/sm/Storage.c18
11 files changed, 1446 insertions, 844 deletions
diff --git a/rts/Makefile b/rts/Makefile
index 19a7a2288f..9437253d77 100644
--- a/rts/Makefile
+++ b/rts/Makefile
@@ -390,7 +390,7 @@ sm/Compact_HC_OPTS += -optc-finline-limit=2500
# use a variety of types to represent closure pointers (StgPtr,
# StgClosure, StgMVar, etc.), and without -fno-strict-aliasing gcc is
# allowed to assume that these pointers do not alias. eg. without
-# this flag we get problems in GC.c:copy() with gcc 3.4.3, the
+# this flag we get problems in sm/Evac.c:copy() with gcc 3.4.3, the
# upd_evacee() assigments get moved before the object copy.
SRC_CC_OPTS += -fno-strict-aliasing
diff --git a/rts/RtsFlags.c b/rts/RtsFlags.c
index f8c8403328..69064ea743 100644
--- a/rts/RtsFlags.c
+++ b/rts/RtsFlags.c
@@ -212,6 +212,7 @@ void initRtsFlagsDefaults(void)
RtsFlags.ParFlags.nNodes = 1;
RtsFlags.ParFlags.migrate = rtsTrue;
RtsFlags.ParFlags.wakeupMigrate = rtsFalse;
+ RtsFlags.ParFlags.gcThreads = 1;
#endif
#ifdef PAR
@@ -445,6 +446,7 @@ usage_text[] = {
#endif /* DEBUG */
#if defined(THREADED_RTS) && !defined(NOSMP)
" -N<n> Use <n> OS threads (default: 1)",
+" -g<n> Use <n> OS threads for GC (default: 1)",
" -qm Don't automatically migrate threads between CPUs",
" -qw Migrate a thread to the current CPU when it is woken up",
#endif
@@ -1117,6 +1119,18 @@ error = rtsTrue;
errorBelch("bad value for -N");
error = rtsTrue;
}
+ }
+ ) break;
+
+ case 'g':
+ THREADED_BUILD_ONLY(
+ if (rts_argv[arg][2] != '\0') {
+ RtsFlags.ParFlags.gcThreads
+ = strtol(rts_argv[arg]+2, (char **) NULL, 10);
+ if (RtsFlags.ParFlags.nNodes <= 0) {
+ errorBelch("bad value for -g");
+ error = rtsTrue;
+ }
#if defined(PROFILING)
if (RtsFlags.ParFlags.nNodes > 1) {
errorBelch("bad option %s: only -N1 is supported with profiling", rts_argv[arg]);
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 42b6b1f666..5b37729749 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -23,7 +23,6 @@
/* Used to avoid long recursion due to selector thunks
*/
-lnat thunk_selector_depth = 0;
#define MAX_THUNK_SELECTOR_DEPTH 16
static StgClosure * eval_thunk_selector (StgSelector * p, rtsBool);
@@ -43,10 +42,8 @@ copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
{
StgPtr to, from;
nat i;
-#ifdef PROFILING
- // @LDV profiling
- nat size_org = size;
-#endif
+ step_workspace *ws;
+ bdescr *bd;
TICK_GC_WORDS_COPIED(size);
/* Find out where we're going, using the handy "to" pointer in
@@ -54,24 +51,28 @@ copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
* evacuate to an older generation, adjust it here (see comment
* by evacuate()).
*/
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
+ if (stp->gen_no < gct->evac_gen) {
+ if (gct->eager_promotion) {
+ stp = &generations[gct->evac_gen].steps[0];
} else {
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
}
}
+ ws = &gct->steps[stp->gen_no][stp->no];
+
/* chain a new block onto the to-space for the destination step if
* necessary.
*/
- if (stp->hp + size >= stp->hpLim) {
- gc_alloc_block(stp);
+ bd = ws->todo_bd;
+ to = bd->free;
+ if (to + size >= bd->start + BLOCK_SIZE_W) {
+ bd = gc_alloc_todo_block(ws);
+ to = bd->free;
}
- to = stp->hp;
from = (StgPtr)src;
- stp->hp = to + size;
+ bd->free = to + size;
for (i = 0; i < size; i++) { // unroll for small i
to[i] = from[i];
}
@@ -84,7 +85,7 @@ copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
#ifdef PROFILING
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
- SET_EVACUAEE_FOR_LDV(from, size_org);
+ SET_EVACUAEE_FOR_LDV(from, size);
#endif
return (StgClosure *)to;
}
@@ -97,10 +98,8 @@ copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
{
StgPtr to, from;
nat i;
-#ifdef PROFILING
- // @LDV profiling
- nat size_org = size;
-#endif
+ step_workspace *ws;
+ bdescr *bd;
TICK_GC_WORDS_COPIED(size);
/* Find out where we're going, using the handy "to" pointer in
@@ -108,24 +107,28 @@ copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
* evacuate to an older generation, adjust it here (see comment
* by evacuate()).
*/
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
+ if (stp->gen_no < gct->evac_gen) {
+ if (gct->eager_promotion) {
+ stp = &generations[gct->evac_gen].steps[0];
} else {
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
}
}
+ ws = &gct->steps[stp->gen_no][stp->no];
+
/* chain a new block onto the to-space for the destination step if
* necessary.
*/
- if (stp->scavd_hp + size >= stp->scavd_hpLim) {
- gc_alloc_scavd_block(stp);
+ bd = ws->scavd_list;
+ to = bd->free;
+ if (to + size >= bd->start + BLOCK_SIZE_W) {
+ bd = gc_alloc_scavd_block(ws);
+ to = bd->free;
}
- to = stp->scavd_hp;
from = (StgPtr)src;
- stp->scavd_hp = to + size;
+ bd->free = to + size;
for (i = 0; i < size; i++) { // unroll for small i
to[i] = from[i];
}
@@ -138,57 +141,59 @@ copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
#ifdef PROFILING
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
- SET_EVACUAEE_FOR_LDV(from, size_org);
+ SET_EVACUAEE_FOR_LDV(from, size);
#endif
return (StgClosure *)to;
}
+
/* Special version of copy() for when we only want to copy the info
* pointer of an object, but reserve some padding after it. This is
* used to optimise evacuation of BLACKHOLEs.
*/
-
-
static StgClosure *
copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
- P_ dest, to, from;
-#ifdef PROFILING
- // @LDV profiling
- nat size_to_copy_org = size_to_copy;
-#endif
+ StgPtr to, from;
+ nat i;
+ step_workspace *ws;
+ bdescr *bd;
TICK_GC_WORDS_COPIED(size_to_copy);
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
+ if (stp->gen_no < gct->evac_gen) {
+ if (gct->eager_promotion) {
+ stp = &generations[gct->evac_gen].steps[0];
} else {
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
}
}
- if (stp->hp + size_to_reserve >= stp->hpLim) {
- gc_alloc_block(stp);
+ ws = &gct->steps[stp->gen_no][stp->no];
+
+ bd = ws->todo_bd;
+ to = bd->free;
+ if (to + size_to_reserve >= bd->start + BLOCK_SIZE_W) {
+ bd = gc_alloc_todo_block(ws);
+ to = bd->free;
}
- for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
- *to++ = *from++;
+ from = (StgPtr)src;
+ bd->free = to + size_to_reserve;
+ for (i = 0; i < size_to_copy; i++) { // unroll for small i
+ to[i] = from[i];
}
- dest = stp->hp;
- stp->hp += size_to_reserve;
- upd_evacuee(src,(StgClosure *)dest);
+ upd_evacuee((StgClosure *)from,(StgClosure *)to);
+
#ifdef PROFILING
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
- // size_to_copy_org is wrong because the closure already occupies size_to_reserve
- // words.
- SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
+ SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
// fill the slop
- if (size_to_reserve - size_to_copy_org > 0)
- LDV_FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
+ if (size_to_reserve - size_to_copy > 0)
+ LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
#endif
- return (StgClosure *)dest;
+ return (StgClosure *)to;
}
@@ -222,6 +227,7 @@ evacuate_large(StgPtr p)
{
bdescr *bd = Bdescr(p);
step *stp;
+ step_workspace *ws;
// object must be at the beginning of the block (or be a ByteArray)
ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
@@ -229,17 +235,19 @@ evacuate_large(StgPtr p)
// already evacuated?
if (bd->flags & BF_EVACUATED) {
- /* Don't forget to set the failed_to_evac flag if we didn't get
+ /* Don't forget to set the gct->failed_to_evac flag if we didn't get
* the desired destination (see comments in evacuate()).
*/
- if (bd->gen_no < evac_gen) {
- failed_to_evac = rtsTrue;
+ if (bd->gen_no < gct->evac_gen) {
+ gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
return;
}
stp = bd->step;
+
+ ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
// remove from large_object list
if (bd->u.back) {
bd->u.back->link = bd->link;
@@ -249,22 +257,24 @@ evacuate_large(StgPtr p)
if (bd->link) {
bd->link->u.back = bd->u.back;
}
+ RELEASE_SPIN_LOCK(&stp->sync_large_objects);
/* link it on to the evacuated large object list of the destination step
*/
stp = bd->step->to;
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
+ if (stp->gen_no < gct->evac_gen) {
+ if (gct->eager_promotion) {
+ stp = &generations[gct->evac_gen].steps[0];
} else {
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
}
}
+ ws = &gct->steps[stp->gen_no][stp->no];
bd->step = stp;
bd->gen_no = stp->gen_no;
- bd->link = stp->new_large_objects;
- stp->new_large_objects = bd;
+ bd->link = ws->todo_large_objects;
+ ws->todo_large_objects = bd;
bd->flags |= BF_EVACUATED;
}
@@ -274,22 +284,22 @@ evacuate_large(StgPtr p)
This is called (eventually) for every live object in the system.
The caller to evacuate specifies a desired generation in the
- evac_gen global variable. The following conditions apply to
+ gct->evac_gen thread-lock variable. The following conditions apply to
evacuating an object which resides in generation M when we're
collecting up to generation N
- if M >= evac_gen
+ if M >= gct->evac_gen
if M > N do nothing
else evac to step->to
- if M < evac_gen evac to evac_gen, step 0
+ if M < gct->evac_gen evac to gct->evac_gen, step 0
if the object is already evacuated, then we check which generation
it now resides in.
- if M >= evac_gen do nothing
- if M < evac_gen set failed_to_evac flag to indicate that we
- didn't manage to evacuate this object into evac_gen.
+ if M >= gct->evac_gen do nothing
+ if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we
+ didn't manage to evacuate this object into gct->evac_gen.
OPTIMISATION NOTES:
@@ -385,12 +395,12 @@ loop:
if (bd->gen_no > N) {
/* Can't evacuate this object, because it's in a generation
* older than the ones we're collecting. Let's hope that it's
- * in evac_gen or older, or we will have to arrange to track
+ * in gct->evac_gen or older, or we will have to arrange to track
* this pointer using the mutable list.
*/
- if (bd->gen_no < evac_gen) {
+ if (bd->gen_no < gct->evac_gen) {
// nope
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
return TAG_CLOSURE(tag,q);
@@ -404,8 +414,8 @@ loop:
* object twice, for example).
*/
if (bd->flags & BF_EVACUATED) {
- if (bd->gen_no < evac_gen) {
- failed_to_evac = rtsTrue;
+ if (bd->gen_no < gct->evac_gen) {
+ gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
return TAG_CLOSURE(tag,q);
@@ -556,10 +566,10 @@ loop:
case EVACUATED:
/* Already evacuated, just return the forwarding address.
- * HOWEVER: if the requested destination generation (evac_gen) is
+ * HOWEVER: if the requested destination generation (gct->evac_gen) is
* older than the actual generation (because the object was
* already evacuated to a younger generation) then we have to
- * set the failed_to_evac flag to indicate that we couldn't
+ * set the gct->failed_to_evac flag to indicate that we couldn't
* manage to promote the object to the desired generation.
*/
/*
@@ -571,10 +581,10 @@ loop:
* current object would be evacuated to, so we only do the full
* check if stp is too low.
*/
- if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
+ if (gct->evac_gen > 0 && stp->gen_no < gct->evac_gen) { // optimisation
StgClosure *p = ((StgEvacuated*)q)->evacuee;
- if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
- failed_to_evac = rtsTrue;
+ if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < gct->evac_gen) {
+ gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
}
@@ -832,16 +842,16 @@ selector_loop:
// recursively evaluate this selector. We don't want to
// recurse indefinitely, so we impose a depth bound.
- if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
+ if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
goto bale_out;
}
- thunk_selector_depth++;
+ gct->thunk_selector_depth++;
// rtsFalse says "don't evacuate the result". It will,
// however, update any THUNK_SELECTORs that are evaluated
// along the way.
val = eval_thunk_selector((StgSelector *)selectee, rtsFalse);
- thunk_selector_depth--;
+ gct->thunk_selector_depth--;
// did we actually manage to evaluate it?
if (val == selectee) goto bale_out;
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index f686c6dd5f..17bc2041ef 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -39,6 +39,7 @@
#include "Trace.h"
#include "RetainerProfile.h"
#include "RaiseAsync.h"
+#include "Sparks.h"
#include "GC.h"
#include "Compact.h"
@@ -49,6 +50,10 @@
#include <string.h> // for memset()
+/* -----------------------------------------------------------------------------
+ Global variables
+ -------------------------------------------------------------------------- */
+
/* STATIC OBJECT LIST.
*
* During GC:
@@ -85,6 +90,9 @@
*/
StgClosure* static_objects; // live static objects
StgClosure* scavenged_static_objects; // static objects scavenged so far
+#ifdef THREADED_RTS
+SpinLock static_objects_sync;
+#endif
/* N is the oldest generation being collected, where the generations
* are numbered starting at 0. A major GC (indicated by the major_gc
@@ -94,25 +102,8 @@ StgClosure* scavenged_static_objects; // static objects scavenged so far
nat N;
rtsBool major_gc;
-/* Youngest generation that objects should be evacuated to in
- * evacuate(). (Logically an argument to evacuate, but it's static
- * a lot of the time so we optimise it into a global variable).
- */
-nat evac_gen;
-
-/* Whether to do eager promotion or not.
- */
-rtsBool eager_promotion;
-
-/* Flag indicating failure to evacuate an object to the desired
- * generation.
- */
-rtsBool failed_to_evac;
-
/* Data used for allocation area sizing.
*/
-lnat new_blocks; // blocks allocated during this GC
-lnat new_scavd_blocks; // ditto, but depth-first blocks
static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
/* Mut-list stats */
@@ -123,20 +114,36 @@ nat mutlist_MUTVARS,
mutlist_OTHERS;
#endif
+/* Thread-local data for each GC thread
+ */
+gc_thread *gc_threads = NULL;
+gc_thread *gct = NULL; // this thread's gct TODO: make thread-local
+
+// For stats:
+long copied; // *words* copied & scavenged during this GC
+long scavd_copied; // *words* copied only during this GC
+
/* -----------------------------------------------------------------------------
Static function declarations
-------------------------------------------------------------------------- */
-static void mark_root ( StgClosure **root );
-
-static void zero_static_object_list ( StgClosure* first_static );
+static void mark_root (StgClosure **root);
+static void zero_static_object_list (StgClosure* first_static);
+static void initialise_N (rtsBool force_major_gc);
+static void alloc_gc_threads (void);
+static void init_collected_gen (nat g, nat threads);
+static void init_uncollected_gen (nat g, nat threads);
+static void init_gc_thread (gc_thread *t);
+static void update_task_list (void);
+static void resize_generations (void);
+static void resize_nursery (void);
#if 0 && defined(DEBUG)
-static void gcCAFs ( void );
+static void gcCAFs (void);
#endif
/* -----------------------------------------------------------------------------
- inline functions etc. for dealing with the mark bitmap & stack.
+ The mark bitmap & stack.
-------------------------------------------------------------------------- */
#define MARK_STACK_BLOCKS 4
@@ -153,37 +160,9 @@ bdescr *oldgen_scan_bd;
StgPtr oldgen_scan;
/* -----------------------------------------------------------------------------
- GarbageCollect
-
- Rough outline of the algorithm: for garbage collecting generation N
- (and all younger generations):
-
- - follow all pointers in the root set. the root set includes all
- mutable objects in all generations (mutable_list).
-
- - for each pointer, evacuate the object it points to into either
-
- + to-space of the step given by step->to, which is the next
- highest step in this generation or the first step in the next
- generation if this is the last step.
-
- + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
- When we evacuate an object we attempt to evacuate
- everything it points to into the same generation - this is
- achieved by setting evac_gen to the desired generation. If
- we can't do this, then an entry in the mut list has to
- be made for the cross-generation pointer.
-
- + if the object is already in a generation > N, then leave
- it alone.
-
- - repeatedly scavenge to-space from each step in each generation
- being collected until no more objects can be evacuated.
-
- - free from-space in each step, and set from-space = to-space.
+ GarbageCollect: the main entry point to the garbage collector.
Locks held: all capabilities are held throughout GarbageCollect().
-
-------------------------------------------------------------------------- */
void
@@ -191,9 +170,11 @@ GarbageCollect ( rtsBool force_major_gc )
{
bdescr *bd;
step *stp;
- lnat live, allocated, copied = 0, scavd_copied = 0;
+ lnat live, allocated;
lnat oldgen_saved_blocks = 0;
- nat g, s, i;
+ nat n_threads; // number of threads participating in GC
+
+ nat g, s, t;
#ifdef PROFILING
CostCentreStack *prev_CCS;
@@ -210,7 +191,7 @@ GarbageCollect ( rtsBool force_major_gc )
}
#endif
- // tell the STM to discard any cached closures its hoping to re-use
+ // tell the STM to discard any cached closures it's hoping to re-use
stmPreGCHook();
// tell the stats department that we've started a GC
@@ -240,20 +221,24 @@ GarbageCollect ( rtsBool force_major_gc )
/* Figure out which generation to collect
*/
- if (force_major_gc) {
- N = RtsFlags.GcFlags.generations - 1;
- major_gc = rtsTrue;
+ initialise_N(force_major_gc);
+
+ /* Allocate + initialise the gc_thread structures.
+ */
+ alloc_gc_threads();
+
+ /* How many threads will be participating in this GC?
+ * We don't try to parallelise minor GC.
+ */
+#if defined(THREADED_RTS)
+ if (N == 0) {
+ n_threads = 1;
} else {
- N = 0;
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- if (generations[g].steps[0].n_blocks +
- generations[g].steps[0].n_large_blocks
- >= generations[g].max_blocks) {
- N = g;
- }
- }
- major_gc = (N == RtsFlags.GcFlags.generations-1);
+ n_threads = RtsFlags.ParFlags.gcThreads;
}
+#else
+ n_threads = 1;
+#endif
#ifdef RTS_GTK_FRONTPANEL
if (RtsFlags.GcFlags.frontpanel) {
@@ -268,143 +253,18 @@ GarbageCollect ( rtsBool force_major_gc )
*/
static_objects = END_OF_STATIC_LIST;
scavenged_static_objects = END_OF_STATIC_LIST;
+#ifdef THREADED_RTS
+ initSpinLock(&static_objects_sync);
+#endif
- /* Keep a count of how many new blocks we allocated during this GC
- * (used for resizing the allocation area, later).
- */
- new_blocks = 0;
- new_scavd_blocks = 0;
-
- // Initialise to-space in all the generations/steps that we're
- // collecting.
- //
+ // Initialise all the generations/steps that we're collecting.
for (g = 0; g <= N; g++) {
-
- // throw away the mutable list. Invariant: the mutable list
- // always has at least one block; this means we can avoid a check for
- // NULL in recordMutable().
- if (g != 0) {
- freeChain(generations[g].mut_list);
- generations[g].mut_list = allocBlock();
- for (i = 0; i < n_capabilities; i++) {
- freeChain(capabilities[i].mut_lists[g]);
- capabilities[i].mut_lists[g] = allocBlock();
- }
- }
-
- for (s = 0; s < generations[g].n_steps; s++) {
-
- // generation 0, step 0 doesn't need to-space
- if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
-
- stp = &generations[g].steps[s];
- ASSERT(stp->gen_no == g);
-
- // start a new to-space for this step.
- stp->old_blocks = stp->blocks;
- stp->n_old_blocks = stp->n_blocks;
-
- // allocate the first to-space block; extra blocks will be
- // chained on as necessary.
- stp->hp_bd = NULL;
- bd = gc_alloc_block(stp);
- stp->blocks = bd;
- stp->n_blocks = 1;
- stp->scan = bd->start;
- stp->scan_bd = bd;
-
- // allocate a block for "already scavenged" objects. This goes
- // on the front of the stp->blocks list, so it won't be
- // traversed by the scavenging sweep.
- gc_alloc_scavd_block(stp);
-
- // initialise the large object queues.
- stp->new_large_objects = NULL;
- stp->scavenged_large_objects = NULL;
- stp->n_scavenged_large_blocks = 0;
-
- // mark the large objects as not evacuated yet
- for (bd = stp->large_objects; bd; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED;
- }
-
- // for a compacted step, we need to allocate the bitmap
- if (stp->is_compacted) {
- nat bitmap_size; // in bytes
- bdescr *bitmap_bdescr;
- StgWord *bitmap;
-
- bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
-
- if (bitmap_size > 0) {
- bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
- / BLOCK_SIZE);
- stp->bitmap = bitmap_bdescr;
- bitmap = bitmap_bdescr->start;
-
- debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
- bitmap_size, bitmap);
-
- // don't forget to fill it with zeros!
- memset(bitmap, 0, bitmap_size);
-
- // For each block in this step, point to its bitmap from the
- // block descriptor.
- for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
- bd->u.bitmap = bitmap;
- bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
-
- // Also at this point we set the BF_COMPACTED flag
- // for this block. The invariant is that
- // BF_COMPACTED is always unset, except during GC
- // when it is set on those blocks which will be
- // compacted.
- bd->flags |= BF_COMPACTED;
- }
- }
- }
- }
+ init_collected_gen(g,n_threads);
}
-
- /* make sure the older generations have at least one block to
- * allocate into (this makes things easier for copy(), see below).
- */
+
+ // Initialise all the generations/steps that we're *not* collecting.
for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- stp = &generations[g].steps[s];
- if (stp->hp_bd == NULL) {
- ASSERT(stp->blocks == NULL);
- bd = gc_alloc_block(stp);
- stp->blocks = bd;
- stp->n_blocks = 1;
- }
- if (stp->scavd_hp == NULL) {
- gc_alloc_scavd_block(stp);
- stp->n_blocks++;
- }
- /* Set the scan pointer for older generations: remember we
- * still have to scavenge objects that have been promoted. */
- stp->scan = stp->hp;
- stp->scan_bd = stp->hp_bd;
- stp->new_large_objects = NULL;
- stp->scavenged_large_objects = NULL;
- stp->n_scavenged_large_blocks = 0;
- }
-
- /* Move the private mutable lists from each capability onto the
- * main mutable list for the generation.
- */
- for (i = 0; i < n_capabilities; i++) {
- for (bd = capabilities[i].mut_lists[g];
- bd->link != NULL; bd = bd->link) {
- /* nothing */
- }
- bd->link = generations[g].mut_list;
- generations[g].mut_list = capabilities[i].mut_lists[g];
- capabilities[i].mut_lists[g] = allocBlock();
- }
+ init_uncollected_gen(g,n_threads);
}
/* Allocate a mark stack if we're doing a major collection.
@@ -418,55 +278,51 @@ GarbageCollect ( rtsBool force_major_gc )
mark_stack_bdescr = NULL;
}
- eager_promotion = rtsTrue; // for now
+ // Initialise all our gc_thread structures
+ for (t = 0; t < n_threads; t++) {
+ init_gc_thread(&gc_threads[t]);
+ }
+
+ // Initialise stats
+ copied = 0;
+ scavd_copied = 0;
+
+ // start threads etc.
+ // For now, we just have one thread, and set gct to gc_threads[0]
+ gct = &gc_threads[0];
/* -----------------------------------------------------------------------
* follow all the roots that we know about:
* - mutable lists from each generation > N
* we want to *scavenge* these roots, not evacuate them: they're not
* going to move in this GC.
- * Also: do them in reverse generation order. This is because we
- * often want to promote objects that are pointed to by older
- * generations early, so we don't have to repeatedly copy them.
- * Doing the generations in reverse order ensures that we don't end
- * up in the situation where we want to evac an object to gen 3 and
- * it has already been evaced to gen 2.
+ * Also do them in reverse generation order, for the usual reason:
+ * namely to reduce the likelihood of spurious old->new pointers.
*/
{
- int st;
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
generations[g].saved_mut_list = generations[g].mut_list;
generations[g].mut_list = allocBlock();
// mut_list always has at least one block.
}
-
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
scavenge_mutable_list(&generations[g]);
- evac_gen = g;
- for (st = generations[g].n_steps-1; st >= 0; st--) {
- scavenge(&generations[g].steps[st]);
- }
}
}
- /* follow roots from the CAF list (used by GHCi)
- */
- evac_gen = 0;
+ // follow roots from the CAF list (used by GHCi)
+ gct->evac_gen = 0;
markCAFs(mark_root);
- /* follow all the roots that the application knows about.
- */
- evac_gen = 0;
+ // follow all the roots that the application knows about.
+ gct->evac_gen = 0;
GetRoots(mark_root);
- /* Mark the weak pointer list, and prepare to detect dead weak
- * pointers.
- */
+ // Mark the weak pointer list, and prepare to detect dead weak pointers.
markWeakPtrList();
initWeakForGC();
- /* Mark the stable pointer table.
- */
+ // Mark the stable pointer table.
markStablePtrTable(mark_root);
/* -------------------------------------------------------------------------
@@ -478,54 +334,7 @@ GarbageCollect ( rtsBool force_major_gc )
loop:
flag = rtsFalse;
- // scavenge static objects
- if (major_gc && static_objects != END_OF_STATIC_LIST) {
- IF_DEBUG(sanity, checkStaticObjects(static_objects));
- scavenge_static();
- }
-
- /* When scavenging the older generations: Objects may have been
- * evacuated from generations <= N into older generations, and we
- * need to scavenge these objects. We're going to try to ensure that
- * any evacuations that occur move the objects into at least the
- * same generation as the object being scavenged, otherwise we
- * have to create new entries on the mutable list for the older
- * generation.
- */
-
- // scavenge each step in generations 0..maxgen
- {
- long gen;
- int st;
-
- loop2:
- // scavenge objects in compacted generation
- if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
- (mark_stack_bdescr != NULL && !mark_stack_empty())) {
- scavenge_mark_stack();
- flag = rtsTrue;
- }
-
- for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
- for (st = generations[gen].n_steps; --st >= 0; ) {
- if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- stp = &generations[gen].steps[st];
- evac_gen = gen;
- if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
- scavenge(stp);
- flag = rtsTrue;
- goto loop2;
- }
- if (stp->new_large_objects != NULL) {
- scavenge_large(stp);
- flag = rtsTrue;
- goto loop2;
- }
- }
- }
- }
+ scavenge_loop();
// if any blackholes are alive, make the threads that wait on
// them alive too.
@@ -541,49 +350,12 @@ GarbageCollect ( rtsBool force_major_gc )
}
}
- /* Update the pointers from the task list - these are
- * treated as weak pointers because we want to allow a main thread
- * to get a BlockedOnDeadMVar exception in the same way as any other
- * thread. Note that the threads should all have been retained by
- * GC by virtue of being on the all_threads list, we're just
- * updating pointers here.
- */
- {
- Task *task;
- StgTSO *tso;
- for (task = all_tasks; task != NULL; task = task->all_link) {
- if (!task->stopped && task->tso) {
- ASSERT(task->tso->bound == task);
- tso = (StgTSO *) isAlive((StgClosure *)task->tso);
- if (tso == NULL) {
- barf("task %p: main thread %d has been GC'd",
-#ifdef THREADED_RTS
- (void *)task->id,
-#else
- (void *)task,
-#endif
- task->tso->id);
- }
- task->tso = tso;
- }
- }
- }
+ // Update pointers from the Task list
+ update_task_list();
// Now see which stable names are still alive.
gcStablePtrTable();
- // Tidy the end of the to-space chains
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- stp = &generations[g].steps[s];
- if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
- ASSERT(Bdescr(stp->hp) == stp->hp_bd);
- stp->hp_bd->free = stp->hp;
- Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
- }
- }
- }
-
#ifdef PROFILING
// We call processHeapClosureForDead() on every closure destroyed during
// the current garbage collection, so we invoke LdvCensusForDead().
@@ -602,10 +374,73 @@ GarbageCollect ( rtsBool force_major_gc )
IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
+ // Two-space collector: free the old to-space.
+ // g0s0->old_blocks is the old nursery
+ // g0s0->blocks is to-space from the previous GC
+ if (RtsFlags.GcFlags.generations == 1) {
+ if (g0s0->blocks != NULL) {
+ freeChain(g0s0->blocks);
+ g0s0->blocks = NULL;
+ }
+ }
+
+ // For each workspace, in each thread:
+ // * clear the BF_EVACUATED flag from each copied block
+ // * move the copied blocks to the step
+ {
+ gc_thread *thr;
+ step_workspace *ws;
+ bdescr *prev;
+
+ for (t = 0; t < n_threads; t++) {
+ thr = &gc_threads[t];
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ ws = &thr->steps[g][s];
+ if (g==0 && s==0) continue;
+
+ ASSERT( ws->scan_bd == ws->todo_bd );
+ ASSERT( ws->scan_bd ? ws->scan == ws->scan_bd->free : 1 );
+
+ // Push the final block
+ if (ws->scan_bd) { push_scan_block(ws->scan_bd, ws); }
+
+ // update stats: we haven't counted the block at the
+ // front of the scavd_list yet.
+ scavd_copied += ws->scavd_list->free - ws->scavd_list->start;
+
+ ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks);
+
+ prev = ws->scavd_list;
+ for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED; // now from-space
+ prev = bd;
+ }
+ prev->link = ws->stp->blocks;
+ ws->stp->blocks = ws->scavd_list;
+ ws->stp->n_blocks += ws->n_scavd_blocks;
+ ASSERT(countBlocks(ws->stp->blocks) == ws->stp->n_blocks);
+ }
+ }
+ }
+ }
+
+ // Two-space collector: swap the semi-spaces around.
+ // Currently: g0s0->old_blocks is the old nursery
+ // g0s0->blocks is to-space from this GC
+ // We want these the other way around.
+ if (RtsFlags.GcFlags.generations == 1) {
+ bdescr *nursery_blocks = g0s0->old_blocks;
+ nat n_nursery_blocks = g0s0->n_old_blocks;
+ g0s0->old_blocks = g0s0->blocks;
+ g0s0->n_old_blocks = g0s0->n_blocks;
+ g0s0->blocks = nursery_blocks;
+ g0s0->n_blocks = n_nursery_blocks;
+ }
+
/* run through all the generations/steps and tidy up
*/
- copied = new_blocks * BLOCK_SIZE_W;
- scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
if (g <= N) {
@@ -631,15 +466,6 @@ GarbageCollect ( rtsBool force_major_gc )
bdescr *next;
stp = &generations[g].steps[s];
- if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
- // stats information: how much we copied
- if (g <= N) {
- copied -= stp->hp_bd->start + BLOCK_SIZE_W -
- stp->hp_bd->free;
- scavd_copied -= stp->scavd_hpLim - stp->scavd_hp;
- }
- }
-
// for generations we collected...
if (g <= N) {
@@ -648,7 +474,8 @@ GarbageCollect ( rtsBool force_major_gc )
* freed blocks will probaby be quickly recycled.
*/
if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
- if (stp->is_compacted) {
+ if (stp->is_compacted)
+ {
// for a compacted step, just shift the new to-space
// onto the front of the now-compacted existing blocks.
for (bd = stp->blocks; bd != NULL; bd = bd->link) {
@@ -672,11 +499,10 @@ GarbageCollect ( rtsBool force_major_gc )
// add the new blocks to the block tally
stp->n_blocks += stp->n_old_blocks;
ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
- } else {
+ }
+ else // not copacted
+ {
freeChain(stp->old_blocks);
- for (bd = stp->blocks; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED; // now from-space
- }
}
stp->old_blocks = NULL;
stp->n_old_blocks = 0;
@@ -700,9 +526,9 @@ GarbageCollect ( rtsBool force_major_gc )
stp->large_objects = stp->scavenged_large_objects;
stp->n_large_blocks = stp->n_scavenged_large_blocks;
- } else {
- // for older generations...
-
+ }
+ else // for older generations...
+ {
/* For older generations, we need to append the
* scavenged_large_object list (i.e. large objects that have been
* promoted during this GC) to the large_object list for that step.
@@ -719,88 +545,14 @@ GarbageCollect ( rtsBool force_major_gc )
}
}
- /* Reset the sizes of the older generations when we do a major
- * collection.
- *
- * CURRENT STRATEGY: make all generations except zero the same size.
- * We have to stay within the maximum heap size, and leave a certain
- * percentage of the maximum heap size available to allocate into.
- */
- if (major_gc && RtsFlags.GcFlags.generations > 1) {
- nat live, size, min_alloc;
- nat max = RtsFlags.GcFlags.maxHeapSize;
- nat gens = RtsFlags.GcFlags.generations;
-
- // live in the oldest generations
- live = oldest_gen->steps[0].n_blocks +
- oldest_gen->steps[0].n_large_blocks;
-
- // default max size for all generations except zero
- size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
- RtsFlags.GcFlags.minOldGenSize);
-
- // minimum size for generation zero
- min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
- RtsFlags.GcFlags.minAllocAreaSize);
-
- // Auto-enable compaction when the residency reaches a
- // certain percentage of the maximum heap size (default: 30%).
- if (RtsFlags.GcFlags.generations > 1 &&
- (RtsFlags.GcFlags.compact ||
- (max > 0 &&
- oldest_gen->steps[0].n_blocks >
- (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
- oldest_gen->steps[0].is_compacted = 1;
-// debugBelch("compaction: on\n", live);
- } else {
- oldest_gen->steps[0].is_compacted = 0;
-// debugBelch("compaction: off\n", live);
- }
-
- // if we're going to go over the maximum heap size, reduce the
- // size of the generations accordingly. The calculation is
- // different if compaction is turned on, because we don't need
- // to double the space required to collect the old generation.
- if (max != 0) {
-
- // this test is necessary to ensure that the calculations
- // below don't have any negative results - we're working
- // with unsigned values here.
- if (max < min_alloc) {
- heapOverflow();
- }
-
- if (oldest_gen->steps[0].is_compacted) {
- if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
- size = (max - min_alloc) / ((gens - 1) * 2 - 1);
- }
- } else {
- if ( (size * (gens - 1) * 2) + min_alloc > max ) {
- size = (max - min_alloc) / ((gens - 1) * 2);
- }
- }
-
- if (size < live) {
- heapOverflow();
- }
- }
-
-#if 0
- debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
- min_alloc, size, max);
-#endif
-
- for (g = 0; g < gens; g++) {
- generations[g].max_blocks = size;
- }
- }
-
+ // update the max size of older generations after a major GC
+ resize_generations();
+
// Guess the amount of live data for stats.
live = calcLive();
- /* Free the small objects allocated via allocate(), since this will
- * all have been copied into G0S1 now.
- */
+ // Free the small objects allocated via allocate(), since this will
+ // all have been copied into G0S1 now.
if (RtsFlags.GcFlags.generations > 1) {
if (g0s0->blocks != NULL) {
freeChain(g0s0->blocks);
@@ -814,14 +566,12 @@ GarbageCollect ( rtsBool force_major_gc )
// Start a new pinned_object_block
pinned_object_block = NULL;
- /* Free the mark stack.
- */
+ // Free the mark stack.
if (mark_stack_bdescr != NULL) {
freeGroup(mark_stack_bdescr);
}
- /* Free any bitmaps.
- */
+ // Free any bitmaps.
for (g = 0; g <= N; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
stp = &generations[g].steps[s];
@@ -832,101 +582,7 @@ GarbageCollect ( rtsBool force_major_gc )
}
}
- /* Two-space collector:
- * Free the old to-space, and estimate the amount of live data.
- */
- if (RtsFlags.GcFlags.generations == 1) {
- nat blocks;
-
- /* For a two-space collector, we need to resize the nursery. */
-
- /* set up a new nursery. Allocate a nursery size based on a
- * function of the amount of live data (by default a factor of 2)
- * Use the blocks from the old nursery if possible, freeing up any
- * left over blocks.
- *
- * If we get near the maximum heap size, then adjust our nursery
- * size accordingly. If the nursery is the same size as the live
- * data (L), then we need 3L bytes. We can reduce the size of the
- * nursery to bring the required memory down near 2L bytes.
- *
- * A normal 2-space collector would need 4L bytes to give the same
- * performance we get from 3L bytes, reducing to the same
- * performance at 2L bytes.
- */
- blocks = g0s0->n_blocks;
-
- if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
- blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
- RtsFlags.GcFlags.maxHeapSize ) {
- long adjusted_blocks; // signed on purpose
- int pc_free;
-
- adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
-
- debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
- RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
-
- pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
- if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
- heapOverflow();
- }
- blocks = adjusted_blocks;
-
- } else {
- blocks *= RtsFlags.GcFlags.oldGenFactor;
- if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
- blocks = RtsFlags.GcFlags.minAllocAreaSize;
- }
- }
- resizeNurseries(blocks);
-
- } else {
- /* Generational collector:
- * If the user has given us a suggested heap size, adjust our
- * allocation area to make best use of the memory available.
- */
-
- if (RtsFlags.GcFlags.heapSizeSuggestion) {
- long blocks;
- nat needed = calcNeeded(); // approx blocks needed at next GC
-
- /* Guess how much will be live in generation 0 step 0 next time.
- * A good approximation is obtained by finding the
- * percentage of g0s0 that was live at the last minor GC.
- */
- if (N == 0) {
- g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
- }
-
- /* Estimate a size for the allocation area based on the
- * information available. We might end up going slightly under
- * or over the suggested heap size, but we should be pretty
- * close on average.
- *
- * Formula: suggested - needed
- * ----------------------------
- * 1 + g0s0_pcnt_kept/100
- *
- * where 'needed' is the amount of memory needed at the next
- * collection for collecting all steps except g0s0.
- */
- blocks =
- (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
- (100 + (long)g0s0_pcnt_kept);
-
- if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
- blocks = RtsFlags.GcFlags.minAllocAreaSize;
- }
-
- resizeNurseries((nat)blocks);
-
- } else {
- // we might have added extra large blocks to the nursery, so
- // resize back to minAllocAreaSize again.
- resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
- }
- }
+ resize_nursery();
// mark the garbage collected CAFs as dead
#if 0 && defined(DEBUG) // doesn't work at the moment
@@ -1165,6 +821,326 @@ isAlive(StgClosure *p)
}
}
+/* -----------------------------------------------------------------------------
+ Figure out which generation to collect, initialise N and major_gc.
+ -------------------------------------------------------------------------- */
+
+static void
+initialise_N (rtsBool force_major_gc)
+{
+ nat g;
+
+ if (force_major_gc) {
+ N = RtsFlags.GcFlags.generations - 1;
+ major_gc = rtsTrue;
+ } else {
+ N = 0;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ if (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ >= generations[g].max_blocks) {
+ N = g;
+ }
+ }
+ major_gc = (N == RtsFlags.GcFlags.generations-1);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ Initialise the gc_thread structures.
+ -------------------------------------------------------------------------- */
+
+static void
+alloc_gc_thread (gc_thread *t, int n)
+{
+ nat g, s;
+ step_workspace *ws;
+
+ t->thread_index = n;
+ t->free_blocks = NULL;
+ t->gc_count = 0;
+
+ init_gc_thread(t);
+
+ t->steps = stgMallocBytes(RtsFlags.GcFlags.generations *
+ sizeof(step_workspace *),
+ "initialise_gc_thread");
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++)
+ {
+ t->steps[g] = stgMallocBytes(generations[g].n_steps *
+ sizeof(step_workspace),
+ "initialise_gc_thread/2");
+
+ for (s = 0; s < generations[g].n_steps; s++)
+ {
+ ws = &t->steps[g][s];
+ ws->stp = &generations[g].steps[s];
+ ws->gct = t;
+
+ ws->scan_bd = NULL;
+ ws->scan = NULL;
+
+ ws->todo_bd = NULL;
+ ws->buffer_todo_bd = NULL;
+
+ ws->scavd_list = NULL;
+ ws->n_scavd_blocks = 0;
+ }
+ }
+}
+
+
+static void
+alloc_gc_threads (void)
+{
+ if (gc_threads == NULL) {
+#if defined(THREADED_RTS)
+ nat i;
+
+ gc_threads = stgMallocBytes (RtsFlags.ParFlags.gcThreads *
+ sizeof(gc_thread),
+ "alloc_gc_threads");
+
+ for (i = 0; i < RtsFlags.ParFlags.gcThreads; i++) {
+ alloc_gc_thread(&gc_threads[i], i);
+ }
+#else
+ gc_threads = stgMallocBytes (sizeof(gc_thread),
+ "alloc_gc_threads");
+
+ alloc_gc_thread(gc_threads, 0);
+#endif
+ }
+}
+
+/* ----------------------------------------------------------------------------
+ Initialise a generation that is to be collected
+ ------------------------------------------------------------------------- */
+
+static void
+init_collected_gen (nat g, nat n_threads)
+{
+ nat s, t, i;
+ step_workspace *ws;
+ step *stp;
+ bdescr *bd;
+
+ // Throw away the current mutable list. Invariant: the mutable
+ // list always has at least one block; this means we can avoid a
+ // check for NULL in recordMutable().
+ if (g != 0) {
+ freeChain(generations[g].mut_list);
+ generations[g].mut_list = allocBlock();
+ for (i = 0; i < n_capabilities; i++) {
+ freeChain(capabilities[i].mut_lists[g]);
+ capabilities[i].mut_lists[g] = allocBlock();
+ }
+ }
+
+ for (s = 0; s < generations[g].n_steps; s++) {
+
+ // generation 0, step 0 doesn't need to-space
+ if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+
+ stp = &generations[g].steps[s];
+ ASSERT(stp->gen_no == g);
+
+ // deprecate the existing blocks
+ stp->old_blocks = stp->blocks;
+ stp->n_old_blocks = stp->n_blocks;
+ stp->blocks = NULL;
+ stp->n_blocks = 0;
+
+ // we don't have any to-be-scavenged blocks yet
+ stp->todos = NULL;
+ stp->n_todos = 0;
+
+ // initialise the large object queues.
+ stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
+
+ // mark the large objects as not evacuated yet
+ for (bd = stp->large_objects; bd; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED;
+ }
+
+ // for a compacted step, we need to allocate the bitmap
+ if (stp->is_compacted) {
+ nat bitmap_size; // in bytes
+ bdescr *bitmap_bdescr;
+ StgWord *bitmap;
+
+ bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
+
+ if (bitmap_size > 0) {
+ bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
+ / BLOCK_SIZE);
+ stp->bitmap = bitmap_bdescr;
+ bitmap = bitmap_bdescr->start;
+
+ debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
+ bitmap_size, bitmap);
+
+ // don't forget to fill it with zeros!
+ memset(bitmap, 0, bitmap_size);
+
+ // For each block in this step, point to its bitmap from the
+ // block descriptor.
+ for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
+ bd->u.bitmap = bitmap;
+ bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
+
+ // Also at this point we set the BF_COMPACTED flag
+ // for this block. The invariant is that
+ // BF_COMPACTED is always unset, except during GC
+ // when it is set on those blocks which will be
+ // compacted.
+ bd->flags |= BF_COMPACTED;
+ }
+ }
+ }
+ }
+
+ // For each GC thread, for each step, allocate a "todo" block to
+ // store evacuated objects to be scavenged, and a block to store
+ // evacuated objects that do not need to be scavenged.
+ for (t = 0; t < n_threads; t++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+
+ // we don't copy objects into g0s0, unless -G0
+ if (g==0 && s==0 && RtsFlags.GcFlags.generations > 1) continue;
+
+ ws = &gc_threads[t].steps[g][s];
+
+ ws->scan_bd = NULL;
+ ws->scan = NULL;
+
+ ws->todo_large_objects = NULL;
+
+ // allocate the first to-space block; extra blocks will be
+ // chained on as necessary.
+ ws->todo_bd = NULL;
+ ws->buffer_todo_bd = NULL;
+ gc_alloc_todo_block(ws);
+
+ // allocate a block for "already scavenged" objects. This goes
+ // on the front of the stp->blocks list, so it won't be
+ // traversed by the scavenging sweep.
+ ws->scavd_list = NULL;
+ ws->n_scavd_blocks = 0;
+ gc_alloc_scavd_block(ws);
+ }
+ }
+}
+
+
+/* ----------------------------------------------------------------------------
+ Initialise a generation that is *not* to be collected
+ ------------------------------------------------------------------------- */
+
+static void
+init_uncollected_gen (nat g, nat threads)
+{
+ nat s, t, i;
+ step_workspace *ws;
+ step *stp;
+ bdescr *bd;
+
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+ stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
+ }
+
+ for (t = 0; t < threads; t++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+
+ ws = &gc_threads[t].steps[g][s];
+ stp = ws->stp;
+
+ ws->buffer_todo_bd = NULL;
+ ws->todo_large_objects = NULL;
+
+ // If the block at the head of the list in this generation
+ // is less than 3/4 full, then use it as a todo block.
+ if (isPartiallyFull(stp->blocks))
+ {
+ ws->todo_bd = stp->blocks;
+ stp->blocks = stp->blocks->link;
+ stp->n_blocks -= 1;
+ ws->todo_bd->link = NULL;
+
+ // this block is also the scan block; we must scan
+ // from the current end point.
+ ws->scan_bd = ws->todo_bd;
+ ws->scan = ws->scan_bd->free;
+
+ // subtract the contents of this block from the stats,
+ // because we'll count the whole block later.
+ copied -= ws->scan_bd->free - ws->scan_bd->start;
+ }
+ else
+ {
+ ws->scan_bd = NULL;
+ ws->scan = NULL;
+ ws->todo_bd = NULL;
+ gc_alloc_todo_block(ws);
+ }
+
+ // Do the same trick for the scavd block
+ if (isPartiallyFull(stp->blocks))
+ {
+ ws->scavd_list = stp->blocks;
+ stp->blocks = stp->blocks->link;
+ stp->n_blocks -= 1;
+ ws->scavd_list->link = NULL;
+ ws->n_scavd_blocks = 1;
+ // subtract the contents of this block from the stats,
+ // because we'll count the whole block later.
+ scavd_copied -= ws->scavd_list->free - ws->scavd_list->start;
+ }
+ else
+ {
+ ws->scavd_list = NULL;
+ ws->n_scavd_blocks = 0;
+ gc_alloc_scavd_block(ws);
+ }
+ }
+ }
+
+ // Move the private mutable lists from each capability onto the
+ // main mutable list for the generation.
+ for (i = 0; i < n_capabilities; i++) {
+ for (bd = capabilities[i].mut_lists[g];
+ bd->link != NULL; bd = bd->link) {
+ /* nothing */
+ }
+ bd->link = generations[g].mut_list;
+ generations[g].mut_list = capabilities[i].mut_lists[g];
+ capabilities[i].mut_lists[g] = allocBlock();
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ Initialise a gc_thread before GC
+ -------------------------------------------------------------------------- */
+
+static void
+init_gc_thread (gc_thread *t)
+{
+ t->evac_gen = 0;
+ t->failed_to_evac = rtsFalse;
+ t->eager_promotion = rtsTrue;
+ t->thunk_selector_depth = 0;
+}
+
+/* -----------------------------------------------------------------------------
+ Function we pass to GetRoots to evacuate roots.
+ -------------------------------------------------------------------------- */
+
static void
mark_root(StgClosure **root)
{
@@ -1225,6 +1201,236 @@ markCAFs( evac_fn evac )
}
}
+/* ----------------------------------------------------------------------------
+ Update the pointers from the task list
+
+ These are treated as weak pointers because we want to allow a main
+ thread to get a BlockedOnDeadMVar exception in the same way as any
+ other thread. Note that the threads should all have been retained
+ by GC by virtue of being on the all_threads list, we're just
+ updating pointers here.
+ ------------------------------------------------------------------------- */
+
+static void
+update_task_list (void)
+{
+ Task *task;
+ StgTSO *tso;
+ for (task = all_tasks; task != NULL; task = task->all_link) {
+ if (!task->stopped && task->tso) {
+ ASSERT(task->tso->bound == task);
+ tso = (StgTSO *) isAlive((StgClosure *)task->tso);
+ if (tso == NULL) {
+ barf("task %p: main thread %d has been GC'd",
+#ifdef THREADED_RTS
+ (void *)task->id,
+#else
+ (void *)task,
+#endif
+ task->tso->id);
+ }
+ task->tso = tso;
+ }
+ }
+}
+
+/* ----------------------------------------------------------------------------
+ Reset the sizes of the older generations when we do a major
+ collection.
+
+ CURRENT STRATEGY: make all generations except zero the same size.
+ We have to stay within the maximum heap size, and leave a certain
+ percentage of the maximum heap size available to allocate into.
+ ------------------------------------------------------------------------- */
+
+static void
+resize_generations (void)
+{
+ nat g;
+
+ if (major_gc && RtsFlags.GcFlags.generations > 1) {
+ nat live, size, min_alloc;
+ nat max = RtsFlags.GcFlags.maxHeapSize;
+ nat gens = RtsFlags.GcFlags.generations;
+
+ // live in the oldest generations
+ live = oldest_gen->steps[0].n_blocks +
+ oldest_gen->steps[0].n_large_blocks;
+
+ // default max size for all generations except zero
+ size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
+ RtsFlags.GcFlags.minOldGenSize);
+
+ // minimum size for generation zero
+ min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
+ RtsFlags.GcFlags.minAllocAreaSize);
+
+ // Auto-enable compaction when the residency reaches a
+ // certain percentage of the maximum heap size (default: 30%).
+ if (RtsFlags.GcFlags.generations > 1 &&
+ (RtsFlags.GcFlags.compact ||
+ (max > 0 &&
+ oldest_gen->steps[0].n_blocks >
+ (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
+ oldest_gen->steps[0].is_compacted = 1;
+// debugBelch("compaction: on\n", live);
+ } else {
+ oldest_gen->steps[0].is_compacted = 0;
+// debugBelch("compaction: off\n", live);
+ }
+
+ // if we're going to go over the maximum heap size, reduce the
+ // size of the generations accordingly. The calculation is
+ // different if compaction is turned on, because we don't need
+ // to double the space required to collect the old generation.
+ if (max != 0) {
+
+ // this test is necessary to ensure that the calculations
+ // below don't have any negative results - we're working
+ // with unsigned values here.
+ if (max < min_alloc) {
+ heapOverflow();
+ }
+
+ if (oldest_gen->steps[0].is_compacted) {
+ if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
+ size = (max - min_alloc) / ((gens - 1) * 2 - 1);
+ }
+ } else {
+ if ( (size * (gens - 1) * 2) + min_alloc > max ) {
+ size = (max - min_alloc) / ((gens - 1) * 2);
+ }
+ }
+
+ if (size < live) {
+ heapOverflow();
+ }
+ }
+
+#if 0
+ debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
+ min_alloc, size, max);
+#endif
+
+ for (g = 0; g < gens; g++) {
+ generations[g].max_blocks = size;
+ }
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ Calculate the new size of the nursery, and resize it.
+ -------------------------------------------------------------------------- */
+
+static void
+resize_nursery (void)
+{
+ if (RtsFlags.GcFlags.generations == 1)
+ { // Two-space collector:
+ nat blocks;
+
+ /* set up a new nursery. Allocate a nursery size based on a
+ * function of the amount of live data (by default a factor of 2)
+ * Use the blocks from the old nursery if possible, freeing up any
+ * left over blocks.
+ *
+ * If we get near the maximum heap size, then adjust our nursery
+ * size accordingly. If the nursery is the same size as the live
+ * data (L), then we need 3L bytes. We can reduce the size of the
+ * nursery to bring the required memory down near 2L bytes.
+ *
+ * A normal 2-space collector would need 4L bytes to give the same
+ * performance we get from 3L bytes, reducing to the same
+ * performance at 2L bytes.
+ */
+ blocks = g0s0->n_old_blocks;
+
+ if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
+ blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
+ RtsFlags.GcFlags.maxHeapSize )
+ {
+ long adjusted_blocks; // signed on purpose
+ int pc_free;
+
+ adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
+
+ debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
+ RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
+
+ pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
+ if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even * be < 0 */
+ {
+ heapOverflow();
+ }
+ blocks = adjusted_blocks;
+ }
+ else
+ {
+ blocks *= RtsFlags.GcFlags.oldGenFactor;
+ if (blocks < RtsFlags.GcFlags.minAllocAreaSize)
+ {
+ blocks = RtsFlags.GcFlags.minAllocAreaSize;
+ }
+ }
+ resizeNurseries(blocks);
+ }
+ else // Generational collector
+ {
+ /*
+ * If the user has given us a suggested heap size, adjust our
+ * allocation area to make best use of the memory available.
+ */
+ if (RtsFlags.GcFlags.heapSizeSuggestion)
+ {
+ long blocks;
+ nat needed = calcNeeded(); // approx blocks needed at next GC
+
+ /* Guess how much will be live in generation 0 step 0 next time.
+ * A good approximation is obtained by finding the
+ * percentage of g0s0 that was live at the last minor GC.
+ *
+ * We have an accurate figure for the amount of copied data in
+ * 'copied', but we must convert this to a number of blocks, with
+ * a small adjustment for estimated slop at the end of a block
+ * (- 10 words).
+ */
+ if (N == 0)
+ {
+ g0s0_pcnt_kept = ((copied / (BLOCK_SIZE_W - 10)) * 100)
+ / countNurseryBlocks();
+ }
+
+ /* Estimate a size for the allocation area based on the
+ * information available. We might end up going slightly under
+ * or over the suggested heap size, but we should be pretty
+ * close on average.
+ *
+ * Formula: suggested - needed
+ * ----------------------------
+ * 1 + g0s0_pcnt_kept/100
+ *
+ * where 'needed' is the amount of memory needed at the next
+ * collection for collecting all steps except g0s0.
+ */
+ blocks =
+ (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
+ (100 + (long)g0s0_pcnt_kept);
+
+ if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
+ blocks = RtsFlags.GcFlags.minAllocAreaSize;
+ }
+
+ resizeNurseries((nat)blocks);
+ }
+ else
+ {
+ // we might have added extra large blocks to the nursery, so
+ // resize back to minAllocAreaSize again.
+ resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
+ }
+ }
+}
+
/* -----------------------------------------------------------------------------
Sanity code for CAF garbage collection.
@@ -1275,25 +1481,3 @@ gcCAFs(void)
debugTrace(DEBUG_gccafs, "%d CAFs live", i);
}
#endif
-
-/* -----------------------------------------------------------------------------
- * Debugging
- * -------------------------------------------------------------------------- */
-
-#if DEBUG
-void
-printMutableList(generation *gen)
-{
- bdescr *bd;
- StgPtr p;
-
- debugBelch("mutable list %p: ", gen->mut_list);
-
- for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
- for (p = bd->start; p < bd->free; p++) {
- debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));
- }
- }
- debugBelch("\n");
-}
-#endif /* DEBUG */
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index d3ce8cf92d..69fdc10447 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -14,11 +14,141 @@
#ifndef GC_H
#define GC_H
+#include "OSThreads.h"
+
+/* -----------------------------------------------------------------------------
+ General scheme
+
+ ToDo: move this to the wiki when the implementation is done.
+
+ We're only going to try to parallelise the copying GC for now. The
+ Plan is as follows.
+
+ Each thread has a gc_thread structure (see below) which holds its
+ thread-local data. We'll keep a pointer to this in a thread-local
+ variable, or possibly in a register.
+
+ In the gc_thread structure is a step_workspace for each step. The
+ primary purpose of the step_workspace is to hold evacuated objects;
+ when an object is evacuated, it is copied to the "todo" block in
+ the thread's workspace for the appropriate step. When the todo
+ block is full, it is pushed to the global step->todos list, which
+ is protected by a lock. (in fact we intervene a one-place buffer
+ here to reduce contention).
+
+ A thread repeatedly grabs a block of work from one of the
+ step->todos lists, scavenges it, and keeps the scavenged block on
+ its own ws->scavd_list (this is to avoid unnecessary contention
+ returning the completed buffers back to the step: we can just
+ collect them all later).
+
+ When there is no global work to do, we start scavenging the todo
+ blocks in the workspaces. This is where the scan_bd field comes
+ in: we can scan the contents of the todo block, when we have
+ scavenged the contents of the todo block (up to todo_bd->free), we
+ don't want to move this block immediately to the scavd_list,
+ because it is probably only partially full. So we remember that we
+ have scanned up to this point by saving the block in ws->scan_bd,
+ with the current scan pointer in ws->scan. Later, when more
+ objects have been copied to this block, we can come back and scan
+ the rest. When we visit this workspace again in the future,
+ scan_bd may still be the same as todo_bd, or it might be different:
+ if enough objects were copied into this block that it filled up,
+ then we will have allocated a new todo block, but *not* pushed the
+ old one to the step, because it is partially scanned.
+
+ The reason to leave scanning the todo blocks until last is that we
+ want to deal with full blocks as far as possible.
+ ------------------------------------------------------------------------- */
+
+
+/* -----------------------------------------------------------------------------
+ Step Workspace
+
+ A step workspace exists for each step for each GC thread. The GC
+ thread takes a block from the todos list of the step into the
+ scanbd and then scans it. Objects referred to by those in the scan
+ block are copied into the todo or scavd blocks of the relevant step.
+
+ ------------------------------------------------------------------------- */
+
+typedef struct step_workspace_ {
+ step * stp; // the step for this workspace
+ struct gc_thread_ * gct; // the gc_thread that contains this workspace
+
+ // block that is currently being scanned
+ bdescr * scan_bd;
+ StgPtr scan; // the scan pointer
+
+ // where objects to be scavenged go
+ bdescr * todo_bd;
+ bdescr * buffer_todo_bd; // buffer to reduce contention
+ // on the step's todos list
+
+ // where large objects to be scavenged go
+ bdescr * todo_large_objects;
+
+ // Objects that need not be, or have already been, scavenged. The
+ // block at the front of the list is special: objects that don't
+ // need to be scavenged are copied directly to this block.
+ // Completed scan blocks also go on this list; but we put them
+ // after the head block.
+ bdescr * scavd_list;
+ lnat n_scavd_blocks; // count of blocks in this list
+
+} step_workspace;
+
+/* ----------------------------------------------------------------------------
+ GC thread object
+
+ Every GC thread has one of these. It contains all the step specific
+ workspaces and other GC thread loacl information. At some later
+ point it maybe useful to move this other into the TLS store of the
+ GC threads
+ ------------------------------------------------------------------------- */
+
+typedef struct gc_thread_ {
+#ifdef THREADED_RTS
+ OSThreadId id; // The OS thread that this struct belongs to
+#endif
+ nat thread_index; // a zero based index identifying the thread
+
+ step_workspace ** steps; // 2-d array (gen,step) of workspaces
+
+ bdescr * free_blocks; // a buffer of free blocks for this thread
+ // during GC without accessing the block
+ // allocators spin lock.
+
+ lnat gc_count; // number of gc's this thread has done
+
+ // --------------------
+ // evacuate flags
+
+ nat evac_gen; // Youngest generation that objects
+ // should be evacuated to in
+ // evacuate(). (Logically an
+ // argument to evacuate, but it's
+ // static a lot of the time so we
+ // optimise it into a per-thread
+ // variable).
+
+ rtsBool failed_to_evac; // failue to evacuate an object typically
+ // causes it to be recorded in the mutable
+ // object list
+
+ rtsBool eager_promotion; // forces promotion to the evac gen
+ // instead of the to-space
+ // corresponding to the object
+
+ lnat thunk_selector_depth; // ummm.... not used as of now
+
+} gc_thread;
+
extern nat N;
extern rtsBool major_gc;
-extern nat evac_gen;
-extern rtsBool eager_promotion;
-extern rtsBool failed_to_evac;
+
+extern gc_thread *gc_threads;
+extern gc_thread *gct; // this thread's gct TODO: make thread-local
extern StgClosure* static_objects;
extern StgClosure* scavenged_static_objects;
@@ -32,8 +162,8 @@ extern rtsBool mark_stack_overflowed;
extern bdescr *oldgen_scan_bd;
extern StgPtr oldgen_scan;
-extern lnat new_blocks; // blocks allocated during this GC
-extern lnat new_scavd_blocks; // ditto, but depth-first blocks
+extern long copied;
+extern long scavd_copied;
#ifdef DEBUG
extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS;
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 61b72b6c0a..cee17c439a 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -12,73 +12,172 @@
* ---------------------------------------------------------------------------*/
#include "Rts.h"
+#include "RtsFlags.h"
#include "Storage.h"
#include "GC.h"
#include "GCUtils.h"
+#include "Printer.h"
+
+#ifdef THREADED_RTS
+SpinLock gc_alloc_block_sync;
+#endif
+
+bdescr *
+allocBlock_sync(void)
+{
+ bdescr *bd;
+ ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ bd = allocBlock();
+ RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ return bd;
+}
/* -----------------------------------------------------------------------------
- Allocate a new to-space block in the given step.
+ Workspace utilities
-------------------------------------------------------------------------- */
bdescr *
-gc_alloc_block(step *stp)
+grab_todo_block (step_workspace *ws)
{
- bdescr *bd = allocBlock();
- bd->gen_no = stp->gen_no;
- bd->step = stp;
+ bdescr *bd;
+ step *stp;
+
+ stp = ws->stp;
+ bd = NULL;
+
+ if (ws->buffer_todo_bd)
+ {
+ bd = ws->buffer_todo_bd;
+ ASSERT(bd->link == NULL);
+ ws->buffer_todo_bd = NULL;
+ return bd;
+ }
+
+ ACQUIRE_SPIN_LOCK(&stp->sync_todo);
+ if (stp->todos) {
+ bd = stp->todos;
+ stp->todos = bd->link;
+ bd->link = NULL;
+ }
+ RELEASE_SPIN_LOCK(&stp->sync_todo);
+ return bd;
+}
+
+static void
+push_todo_block (bdescr *bd, step *stp)
+{
+ ASSERT(bd->link == NULL);
+ ACQUIRE_SPIN_LOCK(&stp->sync_todo);
+ bd->link = stp->todos;
+ stp->todos = bd;
+ RELEASE_SPIN_LOCK(&stp->sync_todo);
+}
+
+void
+push_scan_block (bdescr *bd, step_workspace *ws)
+{
+ ASSERT(bd != NULL);
+ ASSERT(bd->link == NULL);
+
+ // update stats: this is a block that has been copied & scavenged
+ copied += bd->free - bd->start;
+
+ // put the scan block *second* in ws->scavd_list. The first block
+ // in this list is for evacuating objects that don't need to be
+ // scavenged.
+ bd->link = ws->scavd_list->link;
+ ws->scavd_list->link = bd;
+ ws->n_scavd_blocks ++;
+
+ IF_DEBUG(sanity,
+ ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
+}
+
+bdescr *
+gc_alloc_todo_block (step_workspace *ws)
+{
+ bdescr *bd;
+
+ // If we already have a todo block, it must be full, so we push it
+ // out: first to the buffer_todo_bd, then to the step. BUT, don't
+ // push out the block out if it is already the scan block.
+ if (ws->todo_bd != NULL && ws->scan_bd != ws->todo_bd) {
+ ASSERT(ws->todo_bd->link == NULL);
+ if (ws->buffer_todo_bd != NULL) {
+ ASSERT(ws->buffer_todo_bd->link == NULL);
+ push_todo_block(ws->buffer_todo_bd, ws->stp);
+ }
+ ws->buffer_todo_bd = ws->todo_bd;
+ ws->todo_bd = NULL;
+ }
+
+ bd = allocBlock_sync();
+
+ bd->gen_no = ws->stp->gen_no;
+ bd->step = ws->stp;
bd->link = NULL;
// blocks in to-space in generations up to and including N
// get the BF_EVACUATED flag.
- if (stp->gen_no <= N) {
+ if (ws->stp->gen_no <= N) {
bd->flags = BF_EVACUATED;
} else {
bd->flags = 0;
}
-
- // Start a new to-space block, chain it on after the previous one.
- if (stp->hp_bd != NULL) {
- stp->hp_bd->free = stp->hp;
- stp->hp_bd->link = bd;
- }
-
- stp->hp_bd = bd;
- stp->hp = bd->start;
- stp->hpLim = stp->hp + BLOCK_SIZE_W;
-
- stp->n_blocks++;
- new_blocks++;
+
+ ws->todo_bd = bd;
return bd;
}
bdescr *
-gc_alloc_scavd_block(step *stp)
+gc_alloc_scavd_block (step_workspace *ws)
{
- bdescr *bd = allocBlock();
- bd->gen_no = stp->gen_no;
- bd->step = stp;
+ bdescr *bd;
+
+ bd = allocBlock_sync();
+
+ bd->gen_no = ws->stp->gen_no;
+ bd->step = ws->stp;
// blocks in to-space in generations up to and including N
// get the BF_EVACUATED flag.
- if (stp->gen_no <= N) {
+ if (ws->stp->gen_no <= N) {
bd->flags = BF_EVACUATED;
} else {
bd->flags = 0;
}
- bd->link = stp->blocks;
- stp->blocks = bd;
-
- if (stp->scavd_hp != NULL) {
- Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
+ // update stats: this is a block that has been copied only
+ if (ws->scavd_list != NULL) {
+ scavd_copied += ws->scavd_list->free - ws->scavd_list->start;
}
- stp->scavd_hp = bd->start;
- stp->scavd_hpLim = stp->scavd_hp + BLOCK_SIZE_W;
- stp->n_blocks++;
- new_scavd_blocks++;
+ bd->link = ws->scavd_list;
+ ws->scavd_list = bd;
+ ws->n_scavd_blocks++;
return bd;
}
+/* -----------------------------------------------------------------------------
+ * Debugging
+ * -------------------------------------------------------------------------- */
+
+#if DEBUG
+void
+printMutableList(generation *gen)
+{
+ bdescr *bd;
+ StgPtr p;
+
+ debugBelch("mutable list %p: ", gen->mut_list);
+
+ for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
+ for (p = bd->start; p < bd->free; p++) {
+ debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));
+ }
+ }
+ debugBelch("\n");
+}
+#endif /* DEBUG */
diff --git a/rts/sm/GCUtils.h b/rts/sm/GCUtils.h
index 70dd7a882f..2b22407d31 100644
--- a/rts/sm/GCUtils.h
+++ b/rts/sm/GCUtils.h
@@ -11,5 +11,29 @@
*
* --------------------------------------------------------------------------*/
-bdescr *gc_alloc_block(step *stp);
-bdescr *gc_alloc_scavd_block(step *stp);
+#include "SMP.h"
+
+#ifdef THREADED_RTS
+extern SpinLock gc_alloc_block_sync;
+#endif
+
+bdescr *allocBlock_sync(void);
+
+void push_scan_block (bdescr *bd, step_workspace *ws);
+bdescr *grab_todo_block (step_workspace *ws);
+bdescr *gc_alloc_todo_block (step_workspace *ws);
+bdescr *gc_alloc_scavd_block (step_workspace *ws);
+
+// Returns true if a block is 3/4 full. This predicate is used to try
+// to re-use partial blocks wherever possible, and to reduce wastage.
+// We might need to tweak the actual value.
+INLINE_HEADER rtsBool
+isPartiallyFull(bdescr *bd)
+{
+ return (bd->free + BLOCK_SIZE_W/4 < bd->start + BLOCK_SIZE_W);
+}
+
+
+#if DEBUG
+void printMutableList (generation *gen);
+#endif
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 455b586289..bfa78e5836 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -109,7 +109,7 @@ traverseWeakPtrList(void)
/* doesn't matter where we evacuate values/finalizers to, since
* these pointers are treated as roots (iff the keys are alive).
*/
- evac_gen = 0;
+ gct->evac_gen = 0;
last_w = &old_weak_ptr_list;
for (w = old_weak_ptr_list; w != NULL; w = next_w) {
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 0fe7a7fa83..2a6ea389e6 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -12,15 +12,18 @@
* ---------------------------------------------------------------------------*/
#include "Rts.h"
+#include "RtsFlags.h"
#include "Storage.h"
#include "MBlock.h"
#include "GC.h"
+#include "GCUtils.h"
#include "Compact.h"
#include "Evac.h"
#include "Scav.h"
#include "Apply.h"
#include "Trace.h"
#include "LdvProfile.h"
+#include "Sanity.h"
static void scavenge_stack (StgPtr p, StgPtr stack_end);
@@ -28,6 +31,9 @@ static void scavenge_large_bitmap (StgPtr p,
StgLargeBitmap *large_bitmap,
nat size );
+static void scavenge_block (bdescr *bd, StgPtr scan);
+
+
/* Similar to scavenge_large_bitmap(), but we don't write back the
* pointers we get back from evacuate().
*/
@@ -247,48 +253,42 @@ scavenge_AP (StgAP *ap)
}
/* -----------------------------------------------------------------------------
- Scavenge a given step until there are no more objects in this step
- to scavenge.
+ Scavenge a block from the given scan pointer up to bd->free.
evac_gen is set by the caller to be either zero (for a step in a
generation < N) or G where G is the generation of the step being
scavenged.
We sometimes temporarily change evac_gen back to zero if we're
- scavenging a mutable object where early promotion isn't such a good
+ scavenging a mutable object where eager promotion isn't such a good
idea.
-------------------------------------------------------------------------- */
-void
-scavenge(step *stp)
+static void
+scavenge_block (bdescr *bd, StgPtr scan)
{
StgPtr p, q;
StgInfoTable *info;
- bdescr *bd;
- nat saved_evac_gen = evac_gen;
+ nat saved_evac_gen;
- p = stp->scan;
- bd = stp->scan_bd;
-
- failed_to_evac = rtsFalse;
-
- /* scavenge phase - standard breadth-first scavenging of the
- * evacuated objects
- */
+ p = scan;
+
+ debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
+ bd->start, bd->gen_no, bd->step->no, scan);
- while (bd != stp->hp_bd || p < stp->hp) {
+ gct->evac_gen = bd->gen_no;
+ saved_evac_gen = gct->evac_gen;
+ gct->failed_to_evac = rtsFalse;
- // If we're at the end of this block, move on to the next block
- if (bd != stp->hp_bd && p == bd->free) {
- bd = bd->link;
- p = bd->start;
- continue;
- }
+ // we might be evacuating into the very object that we're
+ // scavenging, so we have to check the real bd->free pointer each
+ // time around the loop.
+ while (p < bd->free) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
- ASSERT(thunk_selector_depth == 0);
+ ASSERT(gct->thunk_selector_depth == 0);
q = p;
switch (info->type) {
@@ -296,16 +296,16 @@ scavenge(step *stp)
case MVAR_CLEAN:
case MVAR_DIRTY:
{
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
StgMVar *mvar = ((StgMVar *)p);
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
} else {
mvar->header.info = &stg_MVAR_CLEAN_info;
@@ -424,7 +424,7 @@ scavenge(step *stp)
}
case IND_PERM:
- if (stp->gen->no != 0) {
+ if (bd->gen_no != 0) {
#ifdef PROFILING
// @LDV profiling
// No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
@@ -447,13 +447,13 @@ scavenge(step *stp)
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
@@ -512,21 +512,21 @@ scavenge(step *stp)
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = eager_promotion;
- eager_promotion = rtsFalse;
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
}
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
- failed_to_evac = rtsTrue; // always put it on the mutable list.
+ gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
break;
}
@@ -543,7 +543,7 @@ scavenge(step *stp)
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
@@ -554,19 +554,19 @@ scavenge(step *stp)
case TSO:
{
StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = eager_promotion;
+ rtsBool saved_eager = gct->eager_promotion;
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
scavengeTSO(tso);
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
tso->flags |= TSO_DIRTY;
} else {
tso->flags &= ~TSO_DIRTY;
}
- failed_to_evac = rtsTrue; // always on the mutable list
+ gct->failed_to_evac = rtsTrue; // always on the mutable list
p += tso_sizeW(tso);
break;
}
@@ -574,12 +574,12 @@ scavenge(step *stp)
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVarWatchQueue);
break;
}
@@ -587,11 +587,11 @@ scavenge(step *stp)
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
tvar->current_value = evacuate((StgClosure*)tvar->current_value);
tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVar);
break;
}
@@ -599,12 +599,12 @@ scavenge(step *stp)
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecHeader);
break;
}
@@ -614,15 +614,15 @@ scavenge(step *stp)
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- evac_gen = 0;
+ gct->evac_gen = 0;
tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
e->expected_value = evacuate((StgClosure*)e->expected_value);
e->new_value = evacuate((StgClosure*)e->new_value);
}
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecChunk);
break;
}
@@ -630,11 +630,11 @@ scavenge(step *stp)
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
invariant->code = (StgClosure *)evacuate(invariant->code);
invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgAtomicInvariant);
break;
}
@@ -642,12 +642,12 @@ scavenge(step *stp)
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgInvariantCheckQueue);
break;
}
@@ -664,16 +664,15 @@ scavenge(step *stp)
* Case (b) arises if we didn't manage to promote everything that
* the current object points to into the current generation.
*/
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- if (stp->gen_no > 0) {
- recordMutableGen((StgClosure *)q, stp->gen);
+ if (gct->failed_to_evac) {
+ gct->failed_to_evac = rtsFalse;
+ if (bd->gen_no > 0) {
+ recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
}
}
}
- stp->scan_bd = bd;
- stp->scan = p;
+ debugTrace(DEBUG_gc, " scavenged %ld bytes", (bd->free - scan) * sizeof(W_));
}
/* -----------------------------------------------------------------------------
@@ -684,15 +683,15 @@ scavenge(step *stp)
doesn't need to advance the pointer on to the next object.
-------------------------------------------------------------------------- */
-void
+static void
scavenge_mark_stack(void)
{
StgPtr p, q;
StgInfoTable *info;
nat saved_evac_gen;
- evac_gen = oldest_gen->no;
- saved_evac_gen = evac_gen;
+ gct->evac_gen = oldest_gen->no;
+ saved_evac_gen = gct->evac_gen;
linear_scan:
while (!mark_stack_empty()) {
@@ -707,16 +706,16 @@ linear_scan:
case MVAR_CLEAN:
case MVAR_DIRTY:
{
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
StgMVar *mvar = ((StgMVar *)p);
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
} else {
mvar->header.info = &stg_MVAR_CLEAN_info;
@@ -824,13 +823,13 @@ linear_scan:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
@@ -881,21 +880,21 @@ linear_scan:
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = eager_promotion;
- eager_promotion = rtsFalse;
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
}
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
- failed_to_evac = rtsTrue; // mutable anyhow.
+ gct->failed_to_evac = rtsTrue; // mutable anyhow.
break;
}
@@ -912,7 +911,7 @@ linear_scan:
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
@@ -923,42 +922,42 @@ linear_scan:
case TSO:
{
StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = eager_promotion;
+ rtsBool saved_eager = gct->eager_promotion;
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
scavengeTSO(tso);
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
tso->flags |= TSO_DIRTY;
} else {
tso->flags &= ~TSO_DIRTY;
}
- failed_to_evac = rtsTrue; // always on the mutable list
+ gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
tvar->current_value = evacuate((StgClosure*)tvar->current_value);
tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
@@ -967,50 +966,50 @@ linear_scan:
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- evac_gen = 0;
+ gct->evac_gen = 0;
tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
e->expected_value = evacuate((StgClosure*)e->expected_value);
e->new_value = evacuate((StgClosure*)e->new_value);
}
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
invariant->code = (StgClosure *)evacuate(invariant->code);
invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
@@ -1019,10 +1018,10 @@ linear_scan:
info->type, p);
}
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- if (evac_gen > 0) {
- recordMutableGen((StgClosure *)q, &generations[evac_gen]);
+ if (gct->failed_to_evac) {
+ gct->failed_to_evac = rtsFalse;
+ if (gct->evac_gen > 0) {
+ recordMutableGen_GC((StgClosure *)q, &generations[gct->evac_gen]);
}
}
@@ -1082,7 +1081,7 @@ static rtsBool
scavenge_one(StgPtr p)
{
const StgInfoTable *info;
- nat saved_evac_gen = evac_gen;
+ nat saved_evac_gen = gct->evac_gen;
rtsBool no_luck;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
@@ -1093,16 +1092,16 @@ scavenge_one(StgPtr p)
case MVAR_CLEAN:
case MVAR_DIRTY:
{
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
StgMVar *mvar = ((StgMVar *)p);
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
} else {
mvar->header.info = &stg_MVAR_CLEAN_info;
@@ -1153,13 +1152,13 @@ scavenge_one(StgPtr p)
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
StgPtr q = p;
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
@@ -1212,22 +1211,22 @@ scavenge_one(StgPtr p)
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = eager_promotion;
- eager_promotion = rtsFalse;
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
q = p;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
}
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
break;
}
@@ -1244,7 +1243,7 @@ scavenge_one(StgPtr p)
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
@@ -1255,54 +1254,54 @@ scavenge_one(StgPtr p)
case TSO:
{
StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = eager_promotion;
+ rtsBool saved_eager = gct->eager_promotion;
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
scavengeTSO(tso);
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
tso->flags |= TSO_DIRTY;
} else {
tso->flags &= ~TSO_DIRTY;
}
- failed_to_evac = rtsTrue; // always on the mutable list
+ gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
tvar->current_value = evacuate((StgClosure*)tvar->current_value);
tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
@@ -1311,38 +1310,38 @@ scavenge_one(StgPtr p)
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- evac_gen = 0;
+ gct->evac_gen = 0;
tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
e->expected_value = evacuate((StgClosure*)e->expected_value);
e->new_value = evacuate((StgClosure*)e->new_value);
}
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
invariant->code = (StgClosure *)evacuate(invariant->code);
invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- evac_gen = 0;
+ gct->evac_gen = 0;
queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_gen = saved_evac_gen;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
@@ -1395,8 +1394,8 @@ scavenge_one(StgPtr p)
barf("scavenge_one: strange object %d", (int)(info->type));
}
- no_luck = failed_to_evac;
- failed_to_evac = rtsFalse;
+ no_luck = gct->failed_to_evac;
+ gct->failed_to_evac = rtsFalse;
return (no_luck);
}
@@ -1416,7 +1415,7 @@ scavenge_mutable_list(generation *gen)
bd = gen->saved_mut_list;
- evac_gen = gen->no;
+ gct->evac_gen = gen->no;
for (; bd != NULL; bd = bd->link) {
for (q = bd->start; q < bd->free; q++) {
p = (StgPtr)*q;
@@ -1451,7 +1450,7 @@ scavenge_mutable_list(generation *gen)
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
- recordMutableGen((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen);
continue;
case TSO: {
StgTSO *tso = (StgTSO *)p;
@@ -1463,7 +1462,7 @@ scavenge_mutable_list(generation *gen)
if (tso->why_blocked != BlockedOnBlackHole) {
tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
}
- recordMutableGen((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen);
continue;
}
}
@@ -1474,7 +1473,7 @@ scavenge_mutable_list(generation *gen)
if (scavenge_one(p)) {
// didn't manage to promote everything, so put the
// object back on the list.
- recordMutableGen((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen);
}
}
}
@@ -1492,7 +1491,7 @@ scavenge_mutable_list(generation *gen)
remove non-mutable objects from the mutable list at this point.
-------------------------------------------------------------------------- */
-void
+static void
scavenge_static(void)
{
StgClosure* p = static_objects;
@@ -1500,7 +1499,7 @@ scavenge_static(void)
/* Always evacuate straight to the oldest generation for static
* objects */
- evac_gen = oldest_gen->no;
+ gct->evac_gen = oldest_gen->no;
/* keep going until we've scavenged all the objects on the linked
list... */
@@ -1533,9 +1532,9 @@ scavenge_static(void)
* leave it *on* the scavenged_static_objects list, though,
* in case we visit this object again.
*/
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutableGen((StgClosure *)p,oldest_gen);
+ if (gct->failed_to_evac) {
+ gct->failed_to_evac = rtsFalse;
+ recordMutableGen_GC((StgClosure *)p,oldest_gen);
}
break;
}
@@ -1564,7 +1563,7 @@ scavenge_static(void)
barf("scavenge_static: strange closure %d", (int)(info->type));
}
- ASSERT(failed_to_evac == rtsFalse);
+ ASSERT(gct->failed_to_evac == rtsFalse);
/* get the next static object from the list. Remember, there might
* be more stuff on this list now that we've done some evacuating!
@@ -1775,33 +1774,176 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
be zero.
--------------------------------------------------------------------------- */
-void
-scavenge_large(step *stp)
+static void
+scavenge_large (step_workspace *ws)
{
- bdescr *bd;
- StgPtr p;
+ bdescr *bd;
+ StgPtr p;
+
+ gct->evac_gen = ws->stp->gen_no;
- bd = stp->new_large_objects;
+ bd = ws->todo_large_objects;
+
+ for (; bd != NULL; bd = ws->todo_large_objects) {
+
+ // take this object *off* the large objects list and put it on
+ // the scavenged large objects list. This is so that we can
+ // treat new_large_objects as a stack and push new objects on
+ // the front when evacuating.
+ ws->todo_large_objects = bd->link;
+
+ ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects);
+ dbl_link_onto(bd, &ws->stp->scavenged_large_objects);
+ ws->stp->n_scavenged_large_blocks += bd->blocks;
+ RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects);
+
+ p = bd->start;
+ if (scavenge_one(p)) {
+ if (ws->stp->gen_no > 0) {
+ recordMutableGen_GC((StgClosure *)p, ws->stp->gen);
+ }
+ }
+ }
+}
- for (; bd != NULL; bd = stp->new_large_objects) {
+/* ----------------------------------------------------------------------------
+ Find the oldest full block to scavenge, and scavenge it.
+ ------------------------------------------------------------------------- */
- /* take this object *off* the large objects list and put it on
- * the scavenged large objects list. This is so that we can
- * treat new_large_objects as a stack and push new objects on
- * the front when evacuating.
- */
- stp->new_large_objects = bd->link;
- dbl_link_onto(bd, &stp->scavenged_large_objects);
+static rtsBool
+scavenge_find_global_work (void)
+{
+ bdescr *bd;
+ int g, s;
+ rtsBool flag;
+ step_workspace *ws;
+
+ flag = rtsFalse;
+ for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
+ for (s = generations[g].n_steps; --s >= 0; ) {
+ if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ ws = &gct->steps[g][s];
- // update the block count in this step.
- stp->n_scavenged_large_blocks += bd->blocks;
+ // If we have any large objects to scavenge, do them now.
+ if (ws->todo_large_objects) {
+ scavenge_large(ws);
+ flag = rtsTrue;
+ }
- p = bd->start;
- if (scavenge_one(p)) {
- if (stp->gen_no > 0) {
- recordMutableGen((StgClosure *)p, stp->gen);
+ if ((bd = grab_todo_block(ws)) != NULL) {
+ // no need to assign this to ws->scan_bd, we're going
+ // to scavenge the whole thing and then push it on
+ // our scavd list. This saves pushing out the
+ // scan_bd block, which might be partial.
+ scavenge_block(bd, bd->start);
+ push_scan_block(bd, ws);
+ return rtsTrue;
+ }
+
+ if (flag) return rtsTrue;
}
}
- }
+ return rtsFalse;
}
+/* ----------------------------------------------------------------------------
+ Look for local work to do.
+
+ We can have outstanding scavenging to do if, for any of the workspaces,
+
+ - the scan block is the same as the todo block, and new objects
+ have been evacuated to the todo block.
+
+ - the scan block *was* the same as the todo block, but the todo
+ block filled up and a new one has been allocated.
+ ------------------------------------------------------------------------- */
+
+static rtsBool
+scavenge_find_local_work (void)
+{
+ int g, s;
+ step_workspace *ws;
+ rtsBool flag;
+
+ flag = rtsFalse;
+ for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
+ for (s = generations[g].n_steps; --s >= 0; ) {
+ if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ ws = &gct->steps[g][s];
+
+ // If we have a todo block and no scan block, start
+ // scanning the todo block.
+ if (ws->scan_bd == NULL && ws->todo_bd != NULL)
+ {
+ ws->scan_bd = ws->todo_bd;
+ ws->scan = ws->scan_bd->start;
+ }
+
+ // If we have a scan block with some work to do,
+ // scavenge everything up to the free pointer.
+ if (ws->scan != NULL && ws->scan < ws->scan_bd->free)
+ {
+ scavenge_block(ws->scan_bd, ws->scan);
+ ws->scan = ws->scan_bd->free;
+ flag = rtsTrue;
+ }
+
+ if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free
+ && ws->scan_bd != ws->todo_bd)
+ {
+ // we're not going to evac any more objects into
+ // this block, so push it now.
+ push_scan_block(ws->scan_bd, ws);
+ ws->scan_bd = NULL;
+ ws->scan = NULL;
+ // we might be able to scan the todo block now. But
+ // don't do it right away: there might be full blocks
+ // waiting to be scanned as a result of scavenge_block above.
+ flag = rtsTrue;
+ }
+
+ if (flag) return rtsTrue;
+ }
+ }
+ return rtsFalse;
+}
+
+/* ----------------------------------------------------------------------------
+ Scavenge until we can't find anything more to scavenge.
+ ------------------------------------------------------------------------- */
+
+void
+scavenge_loop(void)
+{
+ rtsBool work_to_do;
+
+loop:
+ work_to_do = rtsFalse;
+
+ // scavenge static objects
+ if (major_gc && static_objects != END_OF_STATIC_LIST) {
+ IF_DEBUG(sanity, checkStaticObjects(static_objects));
+ scavenge_static();
+ }
+
+ // scavenge objects in compacted generation
+ if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
+ (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ scavenge_mark_stack();
+ work_to_do = rtsTrue;
+ }
+
+ // Order is important here: we want to deal in full blocks as
+ // much as possible, so go for global work in preference to
+ // local work. Only if all the global work has been exhausted
+ // do we start scavenging the fragments of blocks in the local
+ // workspaces.
+ if (scavenge_find_global_work()) goto loop;
+ if (scavenge_find_local_work()) goto loop;
+
+ if (work_to_do) goto loop;
+}
diff --git a/rts/sm/Scav.h b/rts/sm/Scav.h
index d9caca2c37..ab66775641 100644
--- a/rts/sm/Scav.h
+++ b/rts/sm/Scav.h
@@ -11,8 +11,5 @@
*
* ---------------------------------------------------------------------------*/
-void scavenge ( step * );
-void scavenge_mark_stack ( void );
-void scavenge_large ( step * );
-void scavenge_static ( void );
-void scavenge_mutable_list ( generation *g );
+void scavenge_loop (void);
+void scavenge_mutable_list (generation *g);
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 3ede82d141..9b86b432e1 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -29,6 +29,8 @@
#include "RetainerProfile.h" // for counting memory blocks (memInventory)
#include "OSMem.h"
#include "Trace.h"
+#include "GC.h"
+#include "GCUtils.h"
#include <stdlib.h>
#include <string.h>
@@ -84,20 +86,16 @@ initStep (step *stp, int g, int s)
stp->n_old_blocks = 0;
stp->gen = &generations[g];
stp->gen_no = g;
- stp->hp = NULL;
- stp->hpLim = NULL;
- stp->hp_bd = NULL;
- stp->scavd_hp = NULL;
- stp->scavd_hpLim = NULL;
- stp->scan = NULL;
- stp->scan_bd = NULL;
stp->large_objects = NULL;
stp->n_large_blocks = 0;
- stp->new_large_objects = NULL;
stp->scavenged_large_objects = NULL;
stp->n_scavenged_large_blocks = 0;
stp->is_compacted = 0;
stp->bitmap = NULL;
+#ifdef THREADED_RTS
+ initSpinLock(&stp->sync_todo);
+ initSpinLock(&stp->sync_large_objects);
+#endif
}
void
@@ -248,6 +246,10 @@ initStorage( void )
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
+#ifdef THREADED_RTS
+ initSpinLock(&gc_alloc_block_sync);
+#endif
+
IF_DEBUG(gc, statDescribeGens());
RELEASE_SM_LOCK;