diff options
author | simonmar <unknown> | 2001-07-23 17:23:20 +0000 |
---|---|---|
committer | simonmar <unknown> | 2001-07-23 17:23:20 +0000 |
commit | dfd7d6d02a597949b08161ae3d49dc6dfc9e812d (patch) | |
tree | 4afa5a75fa30ebbe08247543c3863a49ed54a792 /ghc/rts | |
parent | 9528fa3e6229f36e424c5e327255694066017e10 (diff) | |
download | haskell-dfd7d6d02a597949b08161ae3d49dc6dfc9e812d.tar.gz |
[project @ 2001-07-23 17:23:19 by simonmar]
Add a compacting garbage collector.
It isn't enabled by default, as there are still a couple of problems:
there's a fallback case I haven't implemented yet which means it will
occasionally bomb out, and speed-wise it's quite a bit slower than the
copying collector (about 1.8x slower).
Until I can make it go faster, it'll only be useful when you're
actually running low on real memory.
'+RTS -c' to enable it.
Oh, and I cleaned up a few things in the RTS while I was there, and
fixed one or two possibly real bugs in the existing GC.
Diffstat (limited to 'ghc/rts')
-rw-r--r-- | ghc/rts/BlockAlloc.c | 4 | ||||
-rw-r--r-- | ghc/rts/ClosureFlags.c | 11 | ||||
-rw-r--r-- | ghc/rts/GC.c | 1703 | ||||
-rw-r--r-- | ghc/rts/GC.h | 12 | ||||
-rw-r--r-- | ghc/rts/GCCompact.c | 907 | ||||
-rw-r--r-- | ghc/rts/GCCompact.h | 30 | ||||
-rw-r--r-- | ghc/rts/PrimOps.hc | 19 | ||||
-rw-r--r-- | ghc/rts/Printer.c | 4 | ||||
-rw-r--r-- | ghc/rts/ProfHeap.c | 4 | ||||
-rw-r--r-- | ghc/rts/RtsFlags.c | 8 | ||||
-rw-r--r-- | ghc/rts/RtsFlags.h | 7 | ||||
-rw-r--r-- | ghc/rts/Sanity.c | 297 | ||||
-rw-r--r-- | ghc/rts/Sanity.h | 28 | ||||
-rw-r--r-- | ghc/rts/Schedule.c | 60 | ||||
-rw-r--r-- | ghc/rts/Stable.c | 224 | ||||
-rw-r--r-- | ghc/rts/StablePriv.h | 15 | ||||
-rw-r--r-- | ghc/rts/Stats.c | 8 | ||||
-rw-r--r-- | ghc/rts/Stats.h | 4 | ||||
-rw-r--r-- | ghc/rts/StgMiscClosures.hc | 43 | ||||
-rw-r--r-- | ghc/rts/Storage.c | 109 | ||||
-rw-r--r-- | ghc/rts/Storage.h | 7 | ||||
-rw-r--r-- | ghc/rts/StoragePriv.h | 20 | ||||
-rw-r--r-- | ghc/rts/parallel/GranSim.c | 3 |
23 files changed, 2380 insertions, 1147 deletions
diff --git a/ghc/rts/BlockAlloc.c b/ghc/rts/BlockAlloc.c index e6a176b658..6186671059 100644 --- a/ghc/rts/BlockAlloc.c +++ b/ghc/rts/BlockAlloc.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: BlockAlloc.c,v 1.8 2001/07/23 10:47:16 simonmar Exp $ + * $Id: BlockAlloc.c,v 1.9 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team 1998-2000 * @@ -63,6 +63,8 @@ allocGroup(nat n) void *mblock; bdescr *bd, **last; + ASSERT(n != 0); + if (n > BLOCKS_PER_MBLOCK) { return allocMegaGroup(BLOCKS_TO_MBLOCKS(n)); } diff --git a/ghc/rts/ClosureFlags.c b/ghc/rts/ClosureFlags.c index c4129df6ad..b94670be3d 100644 --- a/ghc/rts/ClosureFlags.c +++ b/ghc/rts/ClosureFlags.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: ClosureFlags.c,v 1.9 2001/03/22 03:51:10 hwloidl Exp $ + * $Id: ClosureFlags.c,v 1.10 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team 1998-1999 * @@ -25,7 +25,7 @@ StgWord16 closure_flags[] = { /* 0 1 2 3 4 5 6 7 */ /* HNF BTM NS STA THU MUT UPT SRT */ - + [INVALID_OBJECT ] = ( 0 ), [CONSTR ] = (_HNF| _NS ), [CONSTR_1_0 ] = (_HNF| _NS ), @@ -60,7 +60,7 @@ StgWord16 closure_flags[] = { [IND_PERM ] = ( _NS |_IND ), [IND_OLDGEN_PERM ] = ( _NS |_IND ), [IND_STATIC ] = ( _NS|_STA |_IND ), -[CAF_BLACKHOLE ] = ( _BTM|_NS| _MUT|_UPT ), +[CAF_BLACKHOLE ] = ( _BTM|_NS| _UPT ), [RET_BCO ] = ( _BTM ), [RET_SMALL ] = ( _BTM| _SRT ), [RET_VEC_SMALL ] = ( _BTM| _SRT ), @@ -71,15 +71,16 @@ StgWord16 closure_flags[] = { [CATCH_FRAME ] = ( _BTM ), [STOP_FRAME ] = ( _BTM ), [SEQ_FRAME ] = ( _BTM ), -[BLACKHOLE ] = ( _NS| _MUT|_UPT ), +[BLACKHOLE ] = ( _NS| _UPT ), [BLACKHOLE_BQ ] = ( _NS| _MUT|_UPT ), [SE_BLACKHOLE ] = ( _NS| _UPT ), [SE_CAF_BLACKHOLE ] = ( _NS| _UPT ), [MVAR ] = (_HNF| _NS| _MUT|_UPT ), [ARR_WORDS ] = (_HNF| _NS| _UPT ), [MUT_ARR_PTRS ] = (_HNF| _NS| _MUT|_UPT ), -[MUT_ARR_PTRS_FROZEN ] = (_HNF| _NS| _MUT|_UPT ), +[MUT_ARR_PTRS_FROZEN ] = (_HNF| _NS| _UPT ), [MUT_VAR ] = (_HNF| _NS| _MUT|_UPT ), +[MUT_CONS ] = (_HNF| _NS| _UPT ), [WEAK ] = (_HNF| _NS| _UPT ), [FOREIGN ] = (_HNF| _NS| _UPT ), [STABLE_NAME ] = (_HNF| _NS| _UPT ), diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 3f7e5ec25b..79c8ef57e3 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.103 2001/07/23 10:47:16 simonmar Exp $ + * $Id: GC.c,v 1.104 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team 1998-1999 * @@ -7,25 +7,6 @@ * * ---------------------------------------------------------------------------*/ -//@menu -//* Includes:: -//* STATIC OBJECT LIST:: -//* Static function declarations:: -//* Garbage Collect:: -//* Weak Pointers:: -//* Evacuation:: -//* Scavenging:: -//* Reverting CAFs:: -//* Sanity code for CAF garbage collection:: -//* Lazy black holing:: -//* Stack squeezing:: -//* Pausing a thread:: -//* Index:: -//@end menu - -//@node Includes, STATIC OBJECT LIST -//@subsection Includes - #include "Rts.h" #include "RtsFlags.h" #include "RtsUtils.h" @@ -33,9 +14,8 @@ #include "StoragePriv.h" #include "Stats.h" #include "Schedule.h" -#include "SchedAPI.h" /* for ReverCAFs prototype */ +#include "SchedAPI.h" // for ReverCAFs prototype #include "Sanity.h" -#include "GC.h" #include "BlockAlloc.h" #include "MBlock.h" #include "Main.h" @@ -44,7 +24,8 @@ #include "Weak.h" #include "StablePriv.h" #include "Prelude.h" -#include "ParTicky.h" // ToDo: move into Rts.h +#include "ParTicky.h" // ToDo: move into Rts.h +#include "GCCompact.h" #if defined(GRAN) || defined(PAR) # include "GranSimRts.h" # include "ParallelRts.h" @@ -60,9 +41,6 @@ #include "FrontPanel.h" #endif -//@node STATIC OBJECT LIST, Static function declarations, Includes -//@subsection STATIC OBJECT LIST - /* STATIC OBJECT LIST. * * During GC: @@ -97,8 +75,8 @@ * We build up a static object list while collecting generations 0..N, * which is then appended to the static object list of generation N+1. */ -StgClosure* static_objects; /* live static objects */ -StgClosure* scavenged_static_objects; /* static objects scavenged so far */ +StgClosure* static_objects; // live static objects +StgClosure* scavenged_static_objects; // static objects scavenged so far /* N is the oldest generation being collected, where the generations * are numbered starting at 0. A major GC (indicated by the major_gc @@ -116,8 +94,8 @@ static nat evac_gen; /* Weak pointers */ -static StgWeak *old_weak_ptr_list; /* also pending finaliser list */ -static rtsBool weak_done; /* all done for this pass */ +StgWeak *old_weak_ptr_list; // also pending finaliser list +static rtsBool weak_done; // all done for this pass /* List of all threads during GC */ @@ -131,25 +109,23 @@ static rtsBool failed_to_evac; /* Old to-space (used for two-space collector only) */ -bdescr *old_to_space; +bdescr *old_to_blocks; /* Data used for allocation area sizing. */ -lnat new_blocks; /* blocks allocated during this GC */ -lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */ +lnat new_blocks; // blocks allocated during this GC +lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC /* Used to avoid long recursion due to selector thunks */ lnat thunk_selector_depth = 0; #define MAX_THUNK_SELECTOR_DEPTH 256 -//@node Static function declarations, Garbage Collect, STATIC OBJECT LIST -//@subsection Static function declarations - /* ----------------------------------------------------------------------------- Static function declarations -------------------------------------------------------------------------- */ +static void mark_root ( StgClosure **root ); static StgClosure * evacuate ( StgClosure *q ); static void zero_static_object_list ( StgClosure* first_static ); static void zero_mutable_list ( StgMutClosure *first ); @@ -157,22 +133,54 @@ static void zero_mutable_list ( StgMutClosure *first ); static rtsBool traverse_weak_ptr_list ( void ); static void cleanup_weak_ptr_list ( StgWeak **list ); +static void scavenge ( step * ); +static void scavenge_mark_stack ( void ); static void scavenge_stack ( StgPtr p, StgPtr stack_end ); +static rtsBool scavenge_one ( StgClosure *p ); static void scavenge_large ( step * ); -static void scavenge ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); static void scavenge_mut_once_list ( generation *g ); +static void scavengeCAFs ( void ); -#ifdef DEBUG +#if 0 && defined(DEBUG) static void gcCAFs ( void ); #endif -void revertCAFs ( void ); -void scavengeCAFs ( void ); +/* ----------------------------------------------------------------------------- + inline functions etc. for dealing with the mark bitmap & stack. + -------------------------------------------------------------------------- */ + +#define MARK_STACK_BLOCKS 4 + +static bdescr *mark_stack_bdescr; +static StgPtr *mark_stack; +static StgPtr *mark_sp; +static StgPtr *mark_splim; + +static inline rtsBool +mark_stack_empty(void) +{ + return mark_sp == mark_stack; +} + +static inline rtsBool +mark_stack_full(void) +{ + return mark_sp >= mark_splim; +} + +static inline void +push_mark_stack(StgPtr p) +{ + *mark_sp++ = p; +} -//@node Garbage Collect, Weak Pointers, Static function declarations -//@subsection Garbage Collect +static inline StgPtr +pop_mark_stack(void) +{ + return *--mark_sp; +} /* ----------------------------------------------------------------------------- GarbageCollect @@ -196,9 +204,9 @@ void scavengeCAFs ( void ); - free from-space in each step, and set from-space = to-space. -------------------------------------------------------------------------- */ -//@cindex GarbageCollect -void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) +void +GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) { bdescr *bd; step *stp; @@ -214,13 +222,13 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) Now, Now)); #endif - /* tell the stats department that we've started a GC */ + // tell the stats department that we've started a GC stat_startGC(); - /* Init stats and print par specific (timing) info */ + // Init stats and print par specific (timing) info PAR_TICKY_PAR_START(); - /* attribute any costs to CCS_GC */ + // attribute any costs to CCS_GC #ifdef PROFILING prev_CCS = CCCS; CCCS = CCS_GC; @@ -252,7 +260,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) } #endif - /* check stack sanity *before* GC (ToDo: check all threads) */ + // check stack sanity *before* GC (ToDo: check all threads) #if defined(GRAN) // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity()); #endif @@ -273,8 +281,8 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) /* Save the old to-space if we're doing a two-space collection */ if (RtsFlags.GcFlags.generations == 1) { - old_to_space = g0s0->to_space; - g0s0->to_space = NULL; + old_to_blocks = g0s0->to_blocks; + g0s0->to_blocks = NULL; } /* Keep a count of how many new blocks we allocated during this GC @@ -291,7 +299,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) for (s = 0; s < generations[g].n_steps; s++) { - /* generation 0, step 0 doesn't need to-space */ + // generation 0, step 0 doesn't need to-space if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { continue; } @@ -306,20 +314,49 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) bd->gen_no = g; bd->step = stp; bd->link = NULL; - bd->evacuated = 1; /* it's a to-space block */ - stp->hp = bd->start; - stp->hpLim = stp->hp + BLOCK_SIZE_W; - stp->hp_bd = bd; - stp->to_space = bd; - stp->to_blocks = 1; - stp->scan = bd->start; - stp->scan_bd = bd; + bd->flags = BF_EVACUATED; // it's a to-space block + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->to_blocks = bd; + stp->n_to_blocks = 1; + stp->scan = bd->start; + stp->scan_bd = bd; stp->new_large_objects = NULL; stp->scavenged_large_objects = NULL; new_blocks++; - /* mark the large objects as not evacuated yet */ + // mark the large objects as not evacuated yet for (bd = stp->large_objects; bd; bd = bd->link) { - bd->evacuated = 0; + bd->flags = BF_LARGE; + } + + // for a compacted step, we need to allocate the bitmap + if (stp->is_compacted) { + nat bitmap_size; // in bytes + bdescr *bitmap_bdescr; + StgWord *bitmap; + + bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE); + + if (bitmap_size > 0) { + bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size) + / BLOCK_SIZE); + stp->bitmap = bitmap_bdescr; + bitmap = bitmap_bdescr->start; + + IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n", + bitmap_size, bitmap);); + + // don't forget to fill it with zeros! + memset(bitmap, 0, bitmap_size); + + // for each block in this step, point to its bitmap from the + // block descriptor. + for (bd=stp->blocks; bd != NULL; bd = bd->link) { + bd->u.bitmap = bitmap; + bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE); + } + } } } } @@ -331,29 +368,41 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) for (s = 0; s < generations[g].n_steps; s++) { stp = &generations[g].steps[s]; if (stp->hp_bd == NULL) { - bd = allocBlock(); - bd->gen_no = g; - bd->step = stp; - bd->link = NULL; - bd->evacuated = 0; /* *not* a to-space block */ - stp->hp = bd->start; - stp->hpLim = stp->hp + BLOCK_SIZE_W; - stp->hp_bd = bd; - stp->blocks = bd; - stp->n_blocks = 1; - new_blocks++; + ASSERT(stp->blocks == NULL); + bd = allocBlock(); + bd->gen_no = g; + bd->step = stp; + bd->link = NULL; + bd->flags = 0; // *not* a to-space block or a large object + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->blocks = bd; + stp->n_blocks = 1; + new_blocks++; } /* Set the scan pointer for older generations: remember we * still have to scavenge objects that have been promoted. */ stp->scan = stp->hp; stp->scan_bd = stp->hp_bd; - stp->to_space = NULL; - stp->to_blocks = 0; + stp->to_blocks = NULL; + stp->n_to_blocks = 0; stp->new_large_objects = NULL; stp->scavenged_large_objects = NULL; } } + /* Allocate a mark stack if we're doing a major collection. + */ + if (major_gc) { + mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS); + mark_stack = (StgPtr *)mark_stack_bdescr->start; + mark_sp = mark_stack; + mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W); + } else { + mark_stack_bdescr = NULL; + } + /* ----------------------------------------------------------------------- * follow all the roots that we know about: * - mutable lists from each generation > N @@ -373,7 +422,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) generations[g].mut_list = END_MUT_LIST; } - /* Do the mut-once lists first */ + // Do the mut-once lists first for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { IF_PAR_DEBUG(verbose, printMutOnceList(&generations[g])); @@ -400,7 +449,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) /* follow all the roots that the application knows about. */ evac_gen = 0; - get_roots(); + get_roots(mark_root); #if defined(PAR) /* And don't forget to mark the TSO if we got here direct from @@ -411,9 +460,9 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) } */ - /* Mark the entries in the GALA table of the parallel system */ + // Mark the entries in the GALA table of the parallel system markLocalGAs(major_gc); - /* Mark all entries on the list of pending fetches */ + // Mark all entries on the list of pending fetches markPendingFetches(major_gc); #endif @@ -433,7 +482,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) /* Mark the stable pointer table. */ - markStablePtrTable(major_gc); + markStablePtrTable(mark_root); #ifdef INTERPRETER { @@ -455,11 +504,16 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) loop: flag = rtsFalse; - /* scavenge static objects */ + // scavenge static objects if (major_gc && static_objects != END_OF_STATIC_LIST) { - IF_DEBUG(sanity, - checkStaticObjects()); - scavenge_static(); + IF_DEBUG(sanity, checkStaticObjects(static_objects)); + scavenge_static(); + } + + // scavenge objects in compacted generation + if (mark_stack_bdescr != NULL && !mark_stack_empty()) { + scavenge_mark_stack(); + flag = rtsTrue; } /* When scavenging the older generations: Objects may have been @@ -471,7 +525,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) * generation. */ - /* scavenge each step in generations 0..maxgen */ + // scavenge each step in generations 0..maxgen { int gen, st; loop2: @@ -495,10 +549,11 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) } } } + if (flag) { goto loop; } - /* must be last... */ - if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */ + // must be last... + if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something goto loop; } } @@ -508,49 +563,41 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) */ cleanup_weak_ptr_list(&weak_ptr_list); - /* Now see which stable names are still alive. - */ - gcStablePtrTable(major_gc); - #if defined(PAR) - /* Reconstruct the Global Address tables used in GUM */ + // Reconstruct the Global Address tables used in GUM rebuildGAtables(major_gc); - IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/)); IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/)); #endif - /* Set the maximum blocks for the oldest generation, based on twice - * the amount of live data now, adjusted to fit the maximum heap - * size if necessary. - * - * This is an approximation, since in the worst case we'll need - * twice the amount of live data plus whatever space the other - * generations need. - */ - if (RtsFlags.GcFlags.generations > 1) { - if (major_gc) { - oldest_gen->max_blocks = - stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor, - RtsFlags.GcFlags.minOldGenSize); - if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) { - oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2; - if (((int)oldest_gen->max_blocks - - (int)oldest_gen->steps[0].to_blocks) < - (RtsFlags.GcFlags.pcFreeHeap * - RtsFlags.GcFlags.maxHeapSize / 200)) { - heapOverflow(); - } + // Now see which stable names are still alive. + gcStablePtrTable(); + + // Tidy the end of the to-space chains + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) { + stp->hp_bd->free = stp->hp; + stp->hp_bd->link = NULL; + } } - } } + // NO MORE EVACUATION AFTER THIS POINT! + // Finally: compaction of the oldest generation. + if (major_gc && RtsFlags.GcFlags.compact) { + compact(get_roots); + } + + IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse)); + /* run through all the generations/steps and tidy up */ copied = new_blocks * BLOCK_SIZE_W; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { if (g <= N) { - generations[g].collections++; /* for stats */ + generations[g].collections++; // for stats } for (s = 0; s < generations[g].n_steps; s++) { @@ -558,34 +605,52 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) stp = &generations[g].steps[s]; if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) { - /* Tidy the end of the to-space chains */ - stp->hp_bd->free = stp->hp; - stp->hp_bd->link = NULL; - /* stats information: how much we copied */ + // stats information: how much we copied if (g <= N) { copied -= stp->hp_bd->start + BLOCK_SIZE_W - stp->hp_bd->free; } } - /* for generations we collected... */ + // for generations we collected... if (g <= N) { - collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */ + collected += stp->n_blocks * BLOCK_SIZE_W; // for stats /* free old memory and shift to-space into from-space for all * the collected steps (except the allocation area). These * freed blocks will probaby be quickly recycled. */ if (!(g == 0 && s == 0)) { - freeChain(stp->blocks); - stp->blocks = stp->to_space; - stp->n_blocks = stp->to_blocks; - stp->to_space = NULL; - stp->to_blocks = 0; - for (bd = stp->blocks; bd != NULL; bd = bd->link) { - bd->evacuated = 0; /* now from-space */ - } + if (stp->is_compacted) { + // for a compacted step, just shift the new to-space + // onto the front of the now-compacted existing blocks. + for (bd = stp->to_blocks; bd != NULL; bd = bd->link) { + bd->flags &= ~BF_EVACUATED; // now from-space + } + // tack the new blocks on the end of the existing blocks + if (stp->blocks == NULL) { + stp->blocks = stp->to_blocks; + } else { + for (bd = stp->blocks; bd != NULL; bd = next) { + next = bd->link; + if (next == NULL) { + bd->link = stp->to_blocks; + } + } + } + // add the new blocks to the block tally + stp->n_blocks += stp->n_to_blocks; + } else { + freeChain(stp->blocks); + stp->blocks = stp->to_blocks; + stp->n_blocks = stp->n_to_blocks; + for (bd = stp->blocks; bd != NULL; bd = bd->link) { + bd->flags &= ~BF_EVACUATED; // now from-space + } + } + stp->to_blocks = NULL; + stp->n_to_blocks = 0; } /* LARGE OBJECTS. The current live large objects are chained on @@ -599,7 +664,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) bd = next; } for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) { - bd->evacuated = 0; + bd->flags &= ~BF_EVACUATED; } stp->large_objects = stp->scavenged_large_objects; @@ -619,7 +684,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) generations[g].max_blocks = oldest_gen->max_blocks; } - /* for older generations... */ + // for older generations... } else { /* For older generations, we need to append the @@ -628,17 +693,40 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) */ for (bd = stp->scavenged_large_objects; bd; bd = next) { next = bd->link; - bd->evacuated = 0; + bd->flags &= ~BF_EVACUATED; dbl_link_onto(bd, &stp->large_objects); } - /* add the new blocks we promoted during this GC */ - stp->n_blocks += stp->to_blocks; + // add the new blocks we promoted during this GC + stp->n_blocks += stp->n_to_blocks; } } } - /* Guess the amount of live data for stats. */ + /* Set the maximum blocks for the oldest generation, based on twice + * the amount of live data now, adjusted to fit the maximum heap + * size if necessary. + * + * This is an approximation, since in the worst case we'll need + * twice the amount of live data plus whatever space the other + * generations need. + */ + if (major_gc && RtsFlags.GcFlags.generations > 1) { + oldest_gen->max_blocks = + stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor, + RtsFlags.GcFlags.minOldGenSize); + if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) { + oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2; + if (((int)oldest_gen->max_blocks - + (int)oldest_gen->steps[0].n_blocks) < + (RtsFlags.GcFlags.pcFreeHeap * + RtsFlags.GcFlags.maxHeapSize / 200)) { + heapOverflow(); + } + } + } + + // Guess the amount of live data for stats. live = calcLive(); /* Free the small objects allocated via allocate(), since this will @@ -653,17 +741,34 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) alloc_HpLim = NULL; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + /* Free the mark stack. + */ + if (mark_stack_bdescr != NULL) { + freeGroup(mark_stack_bdescr); + } + + /* Free any bitmaps. + */ + for (g = 0; g <= N; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + if (stp->is_compacted && stp->bitmap != NULL) { + freeGroup(stp->bitmap); + } + } + } + /* Two-space collector: * Free the old to-space, and estimate the amount of live data. */ if (RtsFlags.GcFlags.generations == 1) { nat blocks; - if (old_to_space != NULL) { - freeChain(old_to_space); + if (old_to_blocks != NULL) { + freeChain(old_to_blocks); } - for (bd = g0s0->to_space; bd != NULL; bd = bd->link) { - bd->evacuated = 0; /* now from-space */ + for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) { + bd->flags = 0; // now from-space } /* For a two-space collector, we need to resize the nursery. */ @@ -682,11 +787,11 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) * performance we get from 3L bytes, reducing to the same * performance at 2L bytes. */ - blocks = g0s0->to_blocks; + blocks = g0s0->n_to_blocks; if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > RtsFlags.GcFlags.maxHeapSize ) { - int adjusted_blocks; /* signed on purpose */ + int adjusted_blocks; // signed on purpose int pc_free; adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks); @@ -713,7 +818,7 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) if (RtsFlags.GcFlags.heapSizeSuggestion) { int blocks; - nat needed = calcNeeded(); /* approx blocks needed at next GC */ + nat needed = calcNeeded(); // approx blocks needed at next GC /* Guess how much will be live in generation 0 step 0 next time. * A good approximation is the obtained by finding the @@ -747,14 +852,12 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) } } - /* mark the garbage collected CAFs as dead */ -#if 0 /* doesn't work at the moment */ -#if defined(DEBUG) + // mark the garbage collected CAFs as dead +#if 0 && defined(DEBUG) // doesn't work at the moment if (major_gc) { gcCAFs(); } #endif -#endif - /* zero the scavenged static object list */ + // zero the scavenged static object list if (major_gc) { zero_static_object_list(scavenged_static_objects); } @@ -763,30 +866,33 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) */ resetNurseries(); - /* start any pending finalizers */ + // start any pending finalizers scheduleFinalizers(old_weak_ptr_list); - /* send exceptions to any threads which were about to die */ + // send exceptions to any threads which were about to die resurrectThreads(resurrected_threads); - /* check sanity after GC */ - IF_DEBUG(sanity, checkSanity(N)); + // Update the stable pointer hash table. + updateStablePtrTable(major_gc); - /* extra GC trace info */ - IF_DEBUG(gc, stat_describe_gens()); + // check sanity after GC + IF_DEBUG(sanity, checkSanity()); + + // extra GC trace info + IF_DEBUG(gc, statDescribeGens()); #ifdef DEBUG - /* symbol-table based profiling */ - /* heapCensus(to_space); */ /* ToDo */ + // symbol-table based profiling + /* heapCensus(to_blocks); */ /* ToDo */ #endif - /* restore enclosing cost centre */ + // restore enclosing cost centre #ifdef PROFILING heapCensus(); CCCS = prev_CCS; #endif - /* check for memory leaks if sanity checking is on */ + // check for memory leaks if sanity checking is on IF_DEBUG(sanity, memInventory()); #ifdef RTS_GTK_FRONTPANEL @@ -795,14 +901,12 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) } #endif - /* ok, GC over: tell the stats department what happened. */ + // ok, GC over: tell the stats department what happened. stat_endGC(allocated, collected, live, copied, N); //PAR_TICKY_TP(); } -//@node Weak Pointers, Evacuation, Garbage Collect -//@subsection Weak Pointers /* ----------------------------------------------------------------------------- Weak Pointers @@ -823,7 +927,6 @@ void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) probably be optimised by keeping per-generation lists of weak pointers, but for a few weak pointers this scheme will work. -------------------------------------------------------------------------- */ -//@cindex traverse_weak_ptr_list static rtsBool traverse_weak_ptr_list(void) @@ -840,7 +943,7 @@ traverse_weak_ptr_list(void) evac_gen = 0; last_w = &old_weak_ptr_list; - for (w = old_weak_ptr_list; w; w = next_w) { + for (w = old_weak_ptr_list; w != NULL; w = next_w) { /* First, this weak pointer might have been evacuated. If so, * remove the forwarding pointer from the weak_ptr_list. @@ -865,12 +968,12 @@ traverse_weak_ptr_list(void) */ if ((new = isAlive(w->key))) { w->key = new; - /* evacuate the value and finalizer */ + // evacuate the value and finalizer w->value = evacuate(w->value); w->finalizer = evacuate(w->finalizer); - /* remove this weak ptr from the old_weak_ptr list */ + // remove this weak ptr from the old_weak_ptr list *last_w = w->link; - /* and put it on the new weak ptr list */ + // and put it on the new weak ptr list next_w = w->link; w->link = weak_ptr_list; weak_ptr_list = w; @@ -895,9 +998,13 @@ traverse_weak_ptr_list(void) prev = &old_all_threads; for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { - /* Threads which have finished or died get dropped from - * the list. - */ + (StgClosure *)tmp = isAlive((StgClosure *)t); + + if (tmp != NULL) { + t = tmp; + } + + ASSERT(get_itbl(t)->type == TSO); switch (t->what_next) { case ThreadRelocated: next = t->link; @@ -905,24 +1012,30 @@ traverse_weak_ptr_list(void) continue; case ThreadKilled: case ThreadComplete: + // finshed or died. The thread might still be alive, but we + // don't keep it on the all_threads list. Don't forget to + // stub out its global_link field. next = t->global_link; + t->global_link = END_TSO_QUEUE; *prev = next; continue; - default: ; + default: + ; } - /* Threads which have already been determined to be alive are - * moved onto the all_threads list. - */ - (StgClosure *)tmp = isAlive((StgClosure *)t); - if (tmp != NULL) { - next = tmp->global_link; - tmp->global_link = all_threads; - all_threads = tmp; - *prev = next; - } else { - prev = &(t->global_link); - next = t->global_link; + if (tmp == NULL) { + // not alive (yet): leave this thread on the old_all_threads list. + prev = &(t->global_link); + next = t->global_link; + continue; + } + else { + // alive: move this thread onto the all_threads list. + next = t->global_link; + t->global_link = all_threads; + all_threads = t; + *prev = next; + break; } } } @@ -967,7 +1080,6 @@ traverse_weak_ptr_list(void) evacuated need to be evacuated now. -------------------------------------------------------------------------- */ -//@cindex cleanup_weak_ptr_list static void cleanup_weak_ptr_list ( StgWeak **list ) @@ -982,7 +1094,7 @@ cleanup_weak_ptr_list ( StgWeak **list ) *last_w = w; } - if (Bdescr((P_)w)->evacuated == 0) { + if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) { (StgClosure *)w = evacuate((StgClosure *)w); *last_w = w; } @@ -994,15 +1106,16 @@ cleanup_weak_ptr_list ( StgWeak **list ) isAlive determines whether the given closure is still alive (after a garbage collection) or not. It returns the new address of the closure if it is alive, or NULL otherwise. + + NOTE: Use it before compaction only! -------------------------------------------------------------------------- */ -//@cindex isAlive StgClosure * isAlive(StgClosure *p) { const StgInfoTable *info; - nat size; + bdescr *bd; while (1) { @@ -1013,81 +1126,66 @@ isAlive(StgClosure *p) * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs. */ - /* ignore closures in generations that we're not collecting. */ - if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen_no > N) { - return p; + loop: + bd = Bdescr((P_)p); + // ignore closures in generations that we're not collecting. + if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) { + return p; } - + // large objects have an evacuated flag + if ((bd->flags & BF_LARGE) && (bd->flags & BF_EVACUATED)) { + return p; + } + // check the mark bit for compacted steps + if (bd->step->is_compacted && is_marked((P_)p,bd)) { + return p; + } + switch (info->type) { - + case IND: case IND_STATIC: case IND_PERM: - case IND_OLDGEN: /* rely on compatible layout with StgInd */ + case IND_OLDGEN: // rely on compatible layout with StgInd case IND_OLDGEN_PERM: - /* follow indirections */ + // follow indirections p = ((StgInd *)p)->indirectee; continue; - + case EVACUATED: - /* alive! */ + // alive! return ((StgEvacuated *)p)->evacuee; - case ARR_WORDS: - size = arr_words_sizeW((StgArrWords *)p); - goto large; - - case MUT_ARR_PTRS: - case MUT_ARR_PTRS_FROZEN: - size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p); - goto large; - case TSO: if (((StgTSO *)p)->what_next == ThreadRelocated) { p = (StgClosure *)((StgTSO *)p)->link; - continue; + goto loop; } - - size = tso_sizeW((StgTSO *)p); - large: - if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_) - && Bdescr((P_)p)->evacuated) - return p; - else - return NULL; default: - /* dead. */ + // dead. return NULL; } } } -//@cindex MarkRoot -StgClosure * -MarkRoot(StgClosure *root) +static void +mark_root(StgClosure **root) { -# if 0 && defined(PAR) && defined(DEBUG) - StgClosure *foo = evacuate(root); - // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated); - ASSERT(isAlive(foo)); // must be in to-space - return foo; -# else - return evacuate(root); -# endif + *root = evacuate(*root); } -//@cindex addBlock -static void addBlock(step *stp) +static void +addBlock(step *stp) { bdescr *bd = allocBlock(); bd->gen_no = stp->gen_no; bd->step = stp; if (stp->gen_no <= N) { - bd->evacuated = 1; + bd->flags = BF_EVACUATED; } else { - bd->evacuated = 0; + bd->flags = 0; } stp->hp_bd->free = stp->hp; @@ -1095,11 +1193,10 @@ static void addBlock(step *stp) stp->hp = bd->start; stp->hpLim = stp->hp + BLOCK_SIZE_W; stp->hp_bd = bd; - stp->to_blocks++; + stp->n_to_blocks++; new_blocks++; } -//@cindex upd_evacuee static __inline__ void upd_evacuee(StgClosure *p, StgClosure *dest) @@ -1108,7 +1205,6 @@ upd_evacuee(StgClosure *p, StgClosure *dest) ((StgEvacuated *)p)->evacuee = dest; } -//@cindex copy static __inline__ StgClosure * copy(StgClosure *src, nat size, step *stp) @@ -1151,7 +1247,6 @@ copy(StgClosure *src, nat size, step *stp) * used to optimise evacuation of BLACKHOLEs. */ -//@cindex copyPart static __inline__ StgClosure * copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) @@ -1181,8 +1276,6 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) return (StgClosure *)dest; } -//@node Evacuation, Scavenging, Weak Pointers -//@subsection Evacuation /* ----------------------------------------------------------------------------- Evacuate a large object @@ -1191,23 +1284,22 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) large_alloc_list, and linking it on to the (singly-linked) new_large_objects list, from where it will be scavenged later. - Convention: bd->evacuated is /= 0 for a large object that has been - evacuated, or 0 otherwise. + Convention: bd->flags has BF_EVACUATED set for a large object + that has been evacuated, or unset otherwise. -------------------------------------------------------------------------- */ -//@cindex evacuate_large static inline void -evacuate_large(StgPtr p, rtsBool mutable) +evacuate_large(StgPtr p) { bdescr *bd = Bdescr(p); step *stp; - /* should point to the beginning of the block */ + // should point to the beginning of the block ASSERT(((W_)p & BLOCK_MASK) == 0); - /* already evacuated? */ - if (bd->evacuated) { + // already evacuated? + if (bd->flags & BF_EVACUATED) { /* Don't forget to set the failed_to_evac flag if we didn't get * the desired destination (see comments in evacuate()). */ @@ -1219,14 +1311,14 @@ evacuate_large(StgPtr p, rtsBool mutable) } stp = bd->step; - /* remove from large_object list */ - if (bd->back) { - bd->back->link = bd->link; - } else { /* first object in the list */ + // remove from large_object list + if (bd->u.back) { + bd->u.back->link = bd->link; + } else { // first object in the list stp->large_objects = bd->link; } if (bd->link) { - bd->link->back = bd->back; + bd->link->u.back = bd->u.back; } /* link it on to the evacuated large object list of the destination step @@ -1244,11 +1336,7 @@ evacuate_large(StgPtr p, rtsBool mutable) bd->gen_no = stp->gen_no; bd->link = stp->new_large_objects; stp->new_large_objects = bd; - bd->evacuated = 1; - - if (mutable) { - recordMutable((StgMutClosure *)p); - } + bd->flags |= BF_EVACUATED; } /* ----------------------------------------------------------------------------- @@ -1259,7 +1347,6 @@ evacuate_large(StgPtr p, rtsBool mutable) the promotion until the next GC. -------------------------------------------------------------------------- */ -//@cindex mkMutCons static StgClosure * mkMutCons(StgClosure *ptr, generation *gen) @@ -1310,7 +1397,6 @@ mkMutCons(StgClosure *ptr, generation *gen) didn't manage to evacuate this object into evac_gen. -------------------------------------------------------------------------- */ -//@cindex evacuate static StgClosure * evacuate(StgClosure *q) @@ -1323,51 +1409,71 @@ evacuate(StgClosure *q) loop: if (HEAP_ALLOCED(q)) { bd = Bdescr((P_)q); + if (bd->gen_no > N) { - /* Can't evacuate this object, because it's in a generation - * older than the ones we're collecting. Let's hope that it's - * in evac_gen or older, or we will have to make an IND_OLDGEN object. - */ - if (bd->gen_no < evac_gen) { - /* nope */ - failed_to_evac = rtsTrue; - TICK_GC_FAILED_PROMOTION(); - } - return q; + /* Can't evacuate this object, because it's in a generation + * older than the ones we're collecting. Let's hope that it's + * in evac_gen or older, or we will have to arrange to track + * this pointer using the mutable list. + */ + if (bd->gen_no < evac_gen) { + // nope + failed_to_evac = rtsTrue; + TICK_GC_FAILED_PROMOTION(); + } + return q; } + + /* evacuate large objects by re-linking them onto a different list. + */ + if (bd->flags & BF_LARGE) { + info = get_itbl(q); + if (info->type == TSO && + ((StgTSO *)q)->what_next == ThreadRelocated) { + q = (StgClosure *)((StgTSO *)q)->link; + goto loop; + } + evacuate_large((P_)q); + return q; + } + + /* If the object is in a step that we're compacting, then we + * need to use an alternative evacuate procedure. + */ + if (bd->step->is_compacted) { + if (!is_marked((P_)q,bd)) { + mark((P_)q,bd); + if (mark_stack_full()) { + barf("ToDo: mark stack full"); + } + push_mark_stack((P_)q); + } + return q; + } + stp = bd->step->to; } #ifdef DEBUG - else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */ + else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong #endif - /* make sure the info pointer is into text space */ + // make sure the info pointer is into text space ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q)) || IS_HUGS_CONSTR_INFO(GET_INFO(q)))); info = get_itbl(q); - /* - if (info->type==RBH) { - info = REVERT_INFOPTR(info); - IF_DEBUG(gc, - belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)", - q, info_type(q), info, info_type_by_ip(info))); - } - */ switch (info -> type) { case MUT_VAR: - ASSERT(q->header.info != &stg_MUT_CONS_info); case MVAR: - to = copy(q,sizeW_fromITBL(info),stp); - recordMutable((StgMutClosure *)to); - return to; + to = copy(q,sizeW_fromITBL(info),stp); + return to; case CONSTR_0_1: { StgWord w = (StgWord)q->payload[0]; if (q->header.info == Czh_con_info && - /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */ + // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && (StgChar)w <= MAX_CHARLIKE) { return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w); } @@ -1375,7 +1481,7 @@ loop: (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) { return (StgClosure *)INTLIKE_CLOSURE((StgInt)w); } - /* else, fall through ... */ + // else, fall through ... } case FUN_1_0: @@ -1383,7 +1489,7 @@ loop: case CONSTR_1_0: return copy(q,sizeofW(StgHeader)+1,stp); - case THUNK_1_0: /* here because of MIN_UPD_SIZE */ + case THUNK_1_0: // here because of MIN_UPD_SIZE case THUNK_0_1: case THUNK_1_1: case THUNK_0_2: @@ -1424,7 +1530,6 @@ loop: case BLACKHOLE_BQ: to = copy(q,BLACKHOLE_sizeW(),stp); - recordMutable((StgMutClosure *)to); return to; case THUNK_SELECTOR: @@ -1445,12 +1550,12 @@ loop: { StgWord32 offset = info->layout.selector_offset; - /* check that the size is in range */ + // check that the size is in range ASSERT(offset < (StgWord32)(selectee_info->layout.payload.ptrs + selectee_info->layout.payload.nptrs)); - /* perform the selection! */ + // perform the selection! q = selectee->payload[offset]; /* if we're already in to-space, there's no need to continue @@ -1459,7 +1564,7 @@ loop: */ if (HEAP_ALLOCED(q)) { bdescr *bd = Bdescr((P_)q); - if (bd->evacuated) { + if (bd->flags & BF_EVACUATED) { if (bd->gen_no < evac_gen) { failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); @@ -1499,14 +1604,14 @@ loop: if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) { bdescr *bd; bd = Bdescr((P_)selectee); - if (!bd->evacuated) { + if (!bd->flags & BF_EVACUATED) { thunk_selector_depth++; selectee = evacuate(selectee); thunk_selector_depth--; goto selector_loop; } } - /* otherwise, fall through... */ + // otherwise, fall through... # endif case AP_UPD: @@ -1522,11 +1627,11 @@ loop: case SE_BLACKHOLE: case BLACKHOLE: case BLACKHOLE_BQ: - /* not evaluated yet */ + // not evaluated yet break; #if defined(PAR) - /* a copy of the top-level cases below */ + // a copy of the top-level cases below case RBH: // cf. BLACKHOLE_BQ { //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); @@ -1565,7 +1670,7 @@ loop: case IND: case IND_OLDGEN: - /* follow chains of indirections, don't evacuate them */ + // follow chains of indirections, don't evacuate them q = ((StgInd*)q)->indirectee; goto loop; @@ -1623,27 +1728,15 @@ loop: case STOP_FRAME: case CATCH_FRAME: case SEQ_FRAME: - /* shouldn't see these */ + // shouldn't see these barf("evacuate: stack frame at %p\n", q); case AP_UPD: case PAP: /* PAPs and AP_UPDs are special - the payload is a copy of a chunk * of stack, tagging and all. - * - * They can be larger than a block in size. Both are only - * allocated via allocate(), so they should be chained on to the - * large_object list. */ - { - nat size = pap_sizeW((StgPAP*)q); - if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q, rtsFalse); - return q; - } else { - return copy(q,size,stp); - } - } + return copy(q,pap_sizeW((StgPAP*)q),stp); case EVACUATED: /* Already evacuated, just return the forwarding address. @@ -1653,7 +1746,7 @@ loop: * set the failed_to_evac flag to indicate that we couldn't * manage to promote the object to the desired generation. */ - if (evac_gen > 0) { /* optimisation */ + if (evac_gen > 0) { // optimisation StgClosure *p = ((StgEvacuated*)q)->evacuee; if (Bdescr((P_)p)->gen_no < evac_gen) { IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p)); @@ -1664,41 +1757,17 @@ loop: return ((StgEvacuated*)q)->evacuee; case ARR_WORDS: - { - nat size = arr_words_sizeW((StgArrWords *)q); - - if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q, rtsFalse); - return q; - } else { - /* just copy the block */ - return copy(q,size,stp); - } - } + // just copy the block + return copy(q,arr_words_sizeW((StgArrWords *)q),stp); case MUT_ARR_PTRS: case MUT_ARR_PTRS_FROZEN: - { - nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q); - - if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q, info->type == MUT_ARR_PTRS); - to = q; - } else { - /* just copy the block */ - to = copy(q,size,stp); - if (info->type == MUT_ARR_PTRS) { - recordMutable((StgMutClosure *)to); - } - } - return to; - } + // just copy the block + return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp); case TSO: { StgTSO *tso = (StgTSO *)q; - nat size = tso_sizeW(tso); - int diff; /* Deal with redirected TSOs (a TSO that's had its stack enlarged). */ @@ -1707,28 +1776,13 @@ loop: goto loop; } - /* Large TSOs don't get moved, so no relocation is required. - */ - if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q, rtsTrue); - return q; - /* To evacuate a small TSO, we need to relocate the update frame * list it contains. */ - } else { - StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp); - - diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */ - - /* relocate the stack pointers... */ - new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff); - new_tso->sp = (StgPtr)new_tso->sp + diff; - - relocate_TSO(tso, new_tso); - - recordMutable((StgMutClosure *)new_tso); - return (StgClosure *)new_tso; + { + StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp); + move_TSO(tso, new_tso); + return (StgClosure *)new_tso; } } @@ -1739,7 +1793,6 @@ loop: to = copy(q,BLACKHOLE_sizeW(),stp); //ToDo: derive size etc from reverted IP //to = copy(q,size,stp); - recordMutable((StgMutClosure *)to); IF_DEBUG(gc, belch("@@ evacuate: RBH %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); @@ -1782,27 +1835,42 @@ loop: } /* ----------------------------------------------------------------------------- - relocate_TSO is called just after a TSO has been copied from src to - dest. It adjusts the update frame list for the new location. + move_TSO is called to update the TSO structure after it has been + moved from one place to another. + -------------------------------------------------------------------------- */ + +void +move_TSO(StgTSO *src, StgTSO *dest) +{ + int diff; + + // relocate the stack pointers... + diff = (StgPtr)dest - (StgPtr)src; // In *words* + dest->sp = (StgPtr)dest->sp + diff; + dest->su = (StgUpdateFrame *) ((P_)dest->su + diff); + + relocate_stack(dest, diff); +} + +/* ----------------------------------------------------------------------------- + relocate_stack is called to update the linkage between + UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one + place to another. -------------------------------------------------------------------------- */ -//@cindex relocate_TSO StgTSO * -relocate_TSO(StgTSO *src, StgTSO *dest) +relocate_stack(StgTSO *dest, int diff) { StgUpdateFrame *su; StgCatchFrame *cf; StgSeqFrame *sf; - int diff; - - diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */ su = dest->su; while ((P_)su < dest->stack + dest->stack_size) { switch (get_itbl(su)->type) { - /* GCC actually manages to common up these three cases! */ + // GCC actually manages to common up these three cases! case UPDATE_FRAME: su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff); @@ -1822,11 +1890,11 @@ relocate_TSO(StgTSO *src, StgTSO *dest) continue; case STOP_FRAME: - /* all done! */ + // all done! break; default: - barf("relocate_TSO %d", (int)(get_itbl(su)->type)); + barf("relocate_stack %d", (int)(get_itbl(su)->type)); } break; } @@ -1834,10 +1902,7 @@ relocate_TSO(StgTSO *src, StgTSO *dest) return dest; } -//@node Scavenging, Reverting CAFs, Evacuation -//@subsection Scavenging -//@cindex scavenge_srt static inline void scavenge_srt(const StgInfoTable *info) @@ -1879,7 +1944,7 @@ scavenge_srt(const StgInfoTable *info) static void scavengeTSO (StgTSO *tso) { - /* chase the link field for any TSOs on the same queue */ + // chase the link field for any TSOs on the same queue (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole @@ -1895,7 +1960,7 @@ scavengeTSO (StgTSO *tso) tso->blocked_exceptions = (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); } - /* scavenge this thread's stack */ + // scavenge this thread's stack scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); } @@ -1911,15 +1976,14 @@ scavengeTSO (StgTSO *tso) scavenging a mutable object where early promotion isn't such a good idea. -------------------------------------------------------------------------- */ -//@cindex scavenge static void scavenge(step *stp) { StgPtr p, q; - const StgInfoTable *info; + StgInfoTable *info; bdescr *bd; - nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */ + nat saved_evac_gen = evac_gen; p = stp->scan; bd = stp->scan_bd; @@ -1932,134 +1996,134 @@ scavenge(step *stp) while (bd != stp->hp_bd || p < stp->hp) { - /* If we're at the end of this block, move on to the next block */ + // If we're at the end of this block, move on to the next block if (bd != stp->hp_bd && p == bd->free) { bd = bd->link; p = bd->start; continue; } - q = p; /* save ptr to object */ - - ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p)) - || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p)))); - info = get_itbl((StgClosure *)p); - /* - if (info->type==RBH) - info = REVERT_INFOPTR(info); - */ - - switch (info -> type) { - + ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info))); + + q = p; + switch (info->type) { + case MVAR: - /* treat MVars specially, because we don't want to evacuate the - * mut_link field in the middle of the closure. - */ - { + /* treat MVars specially, because we don't want to evacuate the + * mut_link field in the middle of the closure. + */ + { StgMVar *mvar = ((StgMVar *)p); evac_gen = 0; (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); - p += sizeofW(StgMVar); evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)mvar); + failed_to_evac = rtsFalse; // mutable. + p += sizeofW(StgMVar); break; - } + } case THUNK_2_0: case FUN_2_0: - scavenge_srt(info); + scavenge_srt(info); case CONSTR_2_0: - ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - p += sizeofW(StgHeader) + 2; - break; - + ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + case THUNK_1_0: - scavenge_srt(info); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */ - break; - + scavenge_srt(info); + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + break; + case FUN_1_0: - scavenge_srt(info); + scavenge_srt(info); case CONSTR_1_0: - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - p += sizeofW(StgHeader) + 1; - break; - + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 1; + break; + case THUNK_0_1: - scavenge_srt(info); - p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */ - break; - + scavenge_srt(info); + p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + break; + case FUN_0_1: - scavenge_srt(info); + scavenge_srt(info); case CONSTR_0_1: - p += sizeofW(StgHeader) + 1; - break; - + p += sizeofW(StgHeader) + 1; + break; + case THUNK_0_2: case FUN_0_2: - scavenge_srt(info); + scavenge_srt(info); case CONSTR_0_2: - p += sizeofW(StgHeader) + 2; - break; - + p += sizeofW(StgHeader) + 2; + break; + case THUNK_1_1: case FUN_1_1: - scavenge_srt(info); + scavenge_srt(info); case CONSTR_1_1: - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - p += sizeofW(StgHeader) + 2; - break; - + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + case FUN: case THUNK: - scavenge_srt(info); - /* fall through */ - + scavenge_srt(info); + // fall through + case CONSTR: case WEAK: case FOREIGN: case STABLE_NAME: case BCO: - { + { StgPtr end; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + (StgClosure *)*p = evacuate((StgClosure *)*p); } p += info->layout.payload.nptrs; break; - } + } case IND_PERM: - if (stp->gen_no != 0) { - SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); - } - /* fall through */ + if (stp->gen_no != 0) { + SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); + } + // fall through case IND_OLDGEN_PERM: - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordOldToNewPtrs((StgMutClosure *)p); - } - p += sizeofW(StgIndOldGen); - break; + ((StgIndOldGen *)p)->indirectee = + evacuate(((StgIndOldGen *)p)->indirectee); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordOldToNewPtrs((StgMutClosure *)p); + } + p += sizeofW(StgIndOldGen); + break; case MUT_VAR: - /* ignore MUT_CONSs */ - if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) { evac_gen = 0; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); evac_gen = saved_evac_gen; - } - p += sizeofW(StgMutVar); - break; + recordMutable((StgMutClosure *)p); + failed_to_evac = rtsFalse; // mutable anyhow + p += sizeofW(StgMutVar); + break; + + case MUT_CONS: + // ignore these + failed_to_evac = rtsFalse; // mutable anyhow + p += sizeofW(StgMutVar); + break; case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: @@ -2069,186 +2133,156 @@ scavenge(step *stp) break; case BLACKHOLE_BQ: - { + { StgBlockingQueue *bh = (StgBlockingQueue *)p; (StgClosure *)bh->blocking_queue = - evacuate((StgClosure *)bh->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bh); - } + evacuate((StgClosure *)bh->blocking_queue); + recordMutable((StgMutClosure *)bh); + failed_to_evac = rtsFalse; p += BLACKHOLE_sizeW(); break; - } + } case THUNK_SELECTOR: - { + { StgSelector *s = (StgSelector *)p; s->selectee = evacuate(s->selectee); p += THUNK_SELECTOR_sizeW(); break; - } - - case IND: - case IND_OLDGEN: - barf("scavenge:IND???\n"); - - case CONSTR_INTLIKE: - case CONSTR_CHARLIKE: - case CONSTR_STATIC: - case CONSTR_NOCAF_STATIC: - case THUNK_STATIC: - case FUN_STATIC: - case IND_STATIC: - /* Shouldn't see a static object here. */ - barf("scavenge: STATIC object\n"); - - case RET_BCO: - case RET_SMALL: - case RET_VEC_SMALL: - case RET_BIG: - case RET_VEC_BIG: - case RET_DYN: - case UPDATE_FRAME: - case STOP_FRAME: - case CATCH_FRAME: - case SEQ_FRAME: - /* Shouldn't see stack frames here. */ - barf("scavenge: stack frame\n"); + } - case AP_UPD: /* same as PAPs */ + case AP_UPD: // same as PAPs case PAP: - /* Treat a PAP just like a section of stack, not forgetting to - * evacuate the function pointer too... - */ - { + /* Treat a PAP just like a section of stack, not forgetting to + * evacuate the function pointer too... + */ + { StgPAP* pap = (StgPAP *)p; pap->fun = evacuate(pap->fun); scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); p += pap_sizeW(pap); break; - } + } case ARR_WORDS: - /* nothing to follow */ - p += arr_words_sizeW((StgArrWords *)p); - break; + // nothing to follow + p += arr_words_sizeW((StgArrWords *)p); + break; case MUT_ARR_PTRS: - /* follow everything */ - { + // follow everything + { StgPtr next; - evac_gen = 0; /* repeatedly mutable */ + evac_gen = 0; // repeatedly mutable next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + (StgClosure *)*p = evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)q); + failed_to_evac = rtsFalse; // mutable anyhow. break; - } + } case MUT_ARR_PTRS_FROZEN: - /* follow everything */ - { - StgPtr start = p, next; + // follow everything + { + StgPtr next; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - if (failed_to_evac) { - /* we can do this easier... */ - recordMutable((StgMutClosure *)start); - failed_to_evac = rtsFalse; + (StgClosure *)*p = evacuate((StgClosure *)*p); } + // it's tempting to recordMutable() if failed_to_evac is + // false, but that breaks some assumptions (eg. every + // closure on the mutable list is supposed to have the MUT + // flag set, and MUT_ARR_PTRS_FROZEN doesn't). break; - } + } case TSO: - { + { StgTSO *tso = (StgTSO *)p; evac_gen = 0; scavengeTSO(tso); evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)tso); + failed_to_evac = rtsFalse; // mutable anyhow. p += tso_sizeW(tso); break; - } + } #if defined(PAR) case RBH: // cf. BLACKHOLE_BQ - { - // nat size, ptrs, nonptrs, vhs; - // char str[80]; - // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); + { +#if 0 + nat size, ptrs, nonptrs, vhs; + char str[80]; + StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); +#endif StgRBH *rbh = (StgRBH *)p; (StgClosure *)rbh->blocking_queue = - evacuate((StgClosure *)rbh->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)rbh); - } + evacuate((StgClosure *)rbh->blocking_queue); + recordMutable((StgMutClosure *)to); + failed_to_evac = rtsFalse; // mutable anyhow. IF_DEBUG(gc, belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", p, info_type(p), (StgClosure *)rbh->blocking_queue)); // ToDo: use size of reverted closure here! p += BLACKHOLE_sizeW(); break; - } + } case BLOCKED_FETCH: - { + { StgBlockedFetch *bf = (StgBlockedFetch *)p; - /* follow the pointer to the node which is being demanded */ + // follow the pointer to the node which is being demanded (StgClosure *)bf->node = - evacuate((StgClosure *)bf->node); - /* follow the link to the rest of the blocking queue */ + evacuate((StgClosure *)bf->node); + // follow the link to the rest of the blocking queue (StgClosure *)bf->link = - evacuate((StgClosure *)bf->link); + evacuate((StgClosure *)bf->link); if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bf); + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)bf); } IF_DEBUG(gc, belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", - bf, info_type((StgClosure *)bf), - bf->node, info_type(bf->node))); + bf, info_type((StgClosure *)bf), + bf->node, info_type(bf->node))); p += sizeofW(StgBlockedFetch); break; - } + } #ifdef DIST case REMOTE_REF: #endif case FETCH_ME: - p += sizeofW(StgFetchMe); - break; // nothing to do in this case + p += sizeofW(StgFetchMe); + break; // nothing to do in this case case FETCH_ME_BQ: // cf. BLACKHOLE_BQ - { + { StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; (StgClosure *)fmbq->blocking_queue = - evacuate((StgClosure *)fmbq->blocking_queue); + evacuate((StgClosure *)fmbq->blocking_queue); if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)fmbq); + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)fmbq); } IF_DEBUG(gc, belch("@@ scavenge: %p (%s) exciting, isn't it", - p, info_type((StgClosure *)p))); + p, info_type((StgClosure *)p))); p += sizeofW(StgFetchMeBlockingQueue); break; - } + } #endif - case EVACUATED: - barf("scavenge: unimplemented/strange closure type %d @ %p", - info->type, p); - default: - barf("scavenge: unimplemented/strange closure type %d @ %p", - info->type, p); + barf("scavenge: unimplemented/strange closure type %d @ %p", + info->type, p); } /* If we didn't manage to promote all the objects pointed to by @@ -2256,8 +2290,8 @@ scavenge(step *stp) * mutable (because it contains old-to-new generation pointers). */ if (failed_to_evac) { - mkMutCons((StgClosure *)q, &generations[evac_gen]); - failed_to_evac = rtsFalse; + failed_to_evac = rtsFalse; + mkMutCons((StgClosure *)q, &generations[evac_gen]); } } @@ -2266,13 +2300,275 @@ scavenge(step *stp) } /* ----------------------------------------------------------------------------- + Scavenge everything on the mark stack. + + This is slightly different from scavenge(): + - we don't walk linearly through the objects, so the scavenger + doesn't need to advance the pointer on to the next object. + -------------------------------------------------------------------------- */ + +static void +scavenge_mark_stack(void) +{ + StgPtr p; + StgInfoTable *info; + nat saved_evac_gen; + + evac_gen = oldest_gen->no; + saved_evac_gen = evac_gen; + + while (!mark_stack_empty()) { + p = pop_mark_stack(); + + info = get_itbl((StgClosure *)p); + ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info))); + + switch (info->type) { + + case MVAR: + /* treat MVars specially, because we don't want to evacuate the + * mut_link field in the middle of the closure. + */ + { + StgMVar *mvar = ((StgMVar *)p); + evac_gen = 0; + (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); + (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); + (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; // mutable. + break; + } + + case FUN_2_0: + case THUNK_2_0: + scavenge_srt(info); + case CONSTR_2_0: + ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + break; + + case FUN_1_0: + case FUN_1_1: + case THUNK_1_0: + case THUNK_1_1: + scavenge_srt(info); + case CONSTR_1_0: + case CONSTR_1_1: + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + break; + + case FUN_0_1: + case FUN_0_2: + case THUNK_0_1: + case THUNK_0_2: + scavenge_srt(info); + case CONSTR_0_1: + case CONSTR_0_2: + break; + + case FUN: + case THUNK: + scavenge_srt(info); + // fall through + + case CONSTR: + case WEAK: + case FOREIGN: + case STABLE_NAME: + case BCO: + { + StgPtr end; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + break; + } + + case IND_PERM: + // don't need to do anything here: the only possible case + // is that we're in a 1-space compacting collector, with + // no "old" generation. + break; + + case IND_OLDGEN: + case IND_OLDGEN_PERM: + ((StgIndOldGen *)p)->indirectee = + evacuate(((StgIndOldGen *)p)->indirectee); + if (failed_to_evac) { + recordOldToNewPtrs((StgMutClosure *)p); + } + failed_to_evac = rtsFalse; + break; + + case MUT_VAR: + evac_gen = 0; + ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; + + case MUT_CONS: + // ignore these + failed_to_evac = rtsFalse; + break; + + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + case ARR_WORDS: + break; + + case BLACKHOLE_BQ: + { + StgBlockingQueue *bh = (StgBlockingQueue *)p; + (StgClosure *)bh->blocking_queue = + evacuate((StgClosure *)bh->blocking_queue); + failed_to_evac = rtsFalse; + break; + } + + case THUNK_SELECTOR: + { + StgSelector *s = (StgSelector *)p; + s->selectee = evacuate(s->selectee); + break; + } + + case AP_UPD: // same as PAPs + case PAP: + /* Treat a PAP just like a section of stack, not forgetting to + * evacuate the function pointer too... + */ + { + StgPAP* pap = (StgPAP *)p; + + pap->fun = evacuate(pap->fun); + scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + break; + } + + case MUT_ARR_PTRS: + // follow everything + { + StgPtr next; + + evac_gen = 0; // repeatedly mutable + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; // mutable anyhow. + break; + } + + case MUT_ARR_PTRS_FROZEN: + // follow everything + { + StgPtr next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + break; + } + + case TSO: + { + StgTSO *tso = (StgTSO *)p; + evac_gen = 0; + scavengeTSO(tso); + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; + } + +#if defined(PAR) + case RBH: // cf. BLACKHOLE_BQ + { +#if 0 + nat size, ptrs, nonptrs, vhs; + char str[80]; + StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); +#endif + StgRBH *rbh = (StgRBH *)p; + (StgClosure *)rbh->blocking_queue = + evacuate((StgClosure *)rbh->blocking_queue); + recordMutable((StgMutClosure *)rbh); + failed_to_evac = rtsFalse; // mutable anyhow. + IF_DEBUG(gc, + belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + p, info_type(p), (StgClosure *)rbh->blocking_queue)); + break; + } + + case BLOCKED_FETCH: + { + StgBlockedFetch *bf = (StgBlockedFetch *)p; + // follow the pointer to the node which is being demanded + (StgClosure *)bf->node = + evacuate((StgClosure *)bf->node); + // follow the link to the rest of the blocking queue + (StgClosure *)bf->link = + evacuate((StgClosure *)bf->link); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)bf); + } + IF_DEBUG(gc, + belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + bf, info_type((StgClosure *)bf), + bf->node, info_type(bf->node))); + break; + } + +#ifdef DIST + case REMOTE_REF: +#endif + case FETCH_ME: + break; // nothing to do in this case + + case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + { + StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; + (StgClosure *)fmbq->blocking_queue = + evacuate((StgClosure *)fmbq->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)fmbq); + } + IF_DEBUG(gc, + belch("@@ scavenge: %p (%s) exciting, isn't it", + p, info_type((StgClosure *)p))); + break; + } +#endif + + default: + barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", + info->type, p); + } + + if (failed_to_evac) { + failed_to_evac = rtsFalse; + mkMutCons((StgClosure *)p, &generations[evac_gen]); + } + + } // while (!mark_stack_empty()) +} + +/* ----------------------------------------------------------------------------- Scavenge one object. This is used for objects that are temporarily marked as mutable because they contain old-to-new generation pointers. Only certain objects can have this property. -------------------------------------------------------------------------- */ -//@cindex scavenge_one static rtsBool scavenge_one(StgClosure *p) @@ -2285,15 +2581,10 @@ scavenge_one(StgClosure *p) info = get_itbl(p); - /* ngoq moHqu'! - if (info->type==RBH) - info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure - */ - switch (info -> type) { case FUN: - case FUN_1_0: /* hardly worth specialising these guys */ + case FUN_1_0: // hardly worth specialising these guys case FUN_0_1: case FUN_1_1: case FUN_0_2: @@ -2351,12 +2642,25 @@ scavenge_one(StgClosure *p) } case IND_OLDGEN: - /* This might happen if for instance a MUT_CONS was pointing to a - * THUNK which has since been updated. The IND_OLDGEN will - * be on the mutable list anyway, so we don't need to do anything - * here. - */ - break; + /* This might happen if for instance a MUT_CONS was pointing to a + * THUNK which has since been updated. The IND_OLDGEN will + * be on the mutable list anyway, so we don't need to do anything + * here. + */ + break; + + case MUT_ARR_PTRS_FROZEN: + { + // follow everything + StgPtr q, next; + + q = (StgPtr)p; + next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) { + (StgClosure *)*q = evacuate((StgClosure *)*q); + } + break; + } default: barf("scavenge_one: strange object %d", (int)(info->type)); @@ -2367,7 +2671,6 @@ scavenge_one(StgClosure *p) return (no_luck); } - /* ----------------------------------------------------------------------------- Scavenging mutable lists. @@ -2375,7 +2678,6 @@ scavenge_one(StgClosure *p) generations older than the one being collected) as roots. We also remove non-mutable objects from the mutable list at this point. -------------------------------------------------------------------------- */ -//@cindex scavenge_mut_once_list static void scavenge_mut_once_list(generation *gen) @@ -2392,7 +2694,7 @@ scavenge_mut_once_list(generation *gen) for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - /* make sure the info pointer is into text space */ + // make sure the info pointer is into text space ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); @@ -2412,7 +2714,7 @@ scavenge_mut_once_list(generation *gen) ((StgIndOldGen *)p)->indirectee = evacuate(((StgIndOldGen *)p)->indirectee); -#ifdef DEBUG +#if 0 && defined(DEBUG) if (RtsFlags.DebugFlags.gc) /* Debugging code to print out the size of the thing we just * promoted @@ -2459,23 +2761,24 @@ scavenge_mut_once_list(generation *gen) } continue; - case MUT_VAR: - /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove - * it from the mutable list if possible by promoting whatever it - * points to. - */ - ASSERT(p->header.info == &stg_MUT_CONS_info); - if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) { - /* didn't manage to promote everything, so put the - * MUT_CONS back on the list. + case MUT_CONS: + /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove + * it from the mutable list if possible by promoting whatever it + * points to. */ - p->mut_link = new_list; - new_list = p; - } - continue; + scavenge_one((StgClosure *)((StgMutVar *)p)->var); + if (failed_to_evac == rtsTrue) { + /* didn't manage to promote everything, so put the + * MUT_CONS back on the list. + */ + failed_to_evac = rtsFalse; + p->mut_link = new_list; + new_list = p; + } + continue; default: - /* shouldn't have anything else on the mutables list */ + // shouldn't have anything else on the mutables list barf("scavenge_mut_once_list: strange object? %d", (int)(info->type)); } } @@ -2483,7 +2786,6 @@ scavenge_mut_once_list(generation *gen) gen->mut_once_list = new_list; } -//@cindex scavenge_mutable_list static void scavenge_mutable_list(generation *gen) @@ -2499,7 +2801,7 @@ scavenge_mutable_list(generation *gen) for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - /* make sure the info pointer is into text space */ + // make sure the info pointer is into text space ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); @@ -2510,30 +2812,8 @@ scavenge_mutable_list(generation *gen) */ switch(info->type) { - case MUT_ARR_PTRS_FROZEN: - /* remove this guy from the mutable list, but follow the ptrs - * anyway (and make sure they get promoted to this gen). - */ - { - StgPtr end, q; - - end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - evac_gen = gen->no; - for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); - } - evac_gen = 0; - - if (failed_to_evac) { - failed_to_evac = rtsFalse; - p->mut_link = gen->mut_list; - gen->mut_list = p; - } - continue; - } - case MUT_ARR_PTRS: - /* follow everything */ + // follow everything p->mut_link = gen->mut_list; gen->mut_list = p; { @@ -2547,15 +2827,10 @@ scavenge_mutable_list(generation *gen) } case MUT_VAR: - /* MUT_CONS is a kind of MUT_VAR, except that we try to remove - * it from the mutable list if possible by promoting whatever it - * points to. - */ - ASSERT(p->header.info != &stg_MUT_CONS_info); - ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; + ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); + p->mut_link = gen->mut_list; + gen->mut_list = p; + continue; case MVAR: { @@ -2637,10 +2912,10 @@ scavenge_mutable_list(generation *gen) case BLOCKED_FETCH: { StgBlockedFetch *bf = (StgBlockedFetch *)p; - /* follow the pointer to the node which is being demanded */ + // follow the pointer to the node which is being demanded (StgClosure *)bf->node = evacuate((StgClosure *)bf->node); - /* follow the link to the rest of the blocking queue */ + // follow the link to the rest of the blocking queue (StgClosure *)bf->link = evacuate((StgClosure *)bf->link); if (failed_to_evac) { @@ -2674,13 +2949,12 @@ scavenge_mutable_list(generation *gen) #endif default: - /* shouldn't have anything else on the mutables list */ + // shouldn't have anything else on the mutables list barf("scavenge_mutable_list: strange object? %d", (int)(info->type)); } } } -//@cindex scavenge_static static void scavenge_static(void) @@ -2701,7 +2975,7 @@ scavenge_static(void) if (info->type==RBH) info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure */ - /* make sure the info pointer is into text space */ + // make sure the info pointer is into text space ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); @@ -2743,7 +3017,7 @@ scavenge_static(void) StgPtr q, next; next = (P_)p->payload + info->layout.payload.ptrs; - /* evacuate the pointers */ + // evacuate the pointers for (q = (P_)p->payload; q < next; q++) { (StgClosure *)*q = evacuate((StgClosure *)*q); } @@ -2769,7 +3043,6 @@ scavenge_static(void) objects pointed to by it. We can use the same code for walking PAPs, since these are just sections of copied stack. -------------------------------------------------------------------------- */ -//@cindex scavenge_stack static void scavenge_stack(StgPtr p, StgPtr stack_end) @@ -2789,7 +3062,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) while (p < stack_end) { q = *(P_ *)p; - /* If we've got a tag, skip over that many words on the stack */ + // If we've got a tag, skip over that many words on the stack if (IS_ARG_TAG((W_)q)) { p += ARG_SIZE(q); p++; continue; @@ -2799,10 +3072,10 @@ scavenge_stack(StgPtr p, StgPtr stack_end) */ if (! LOOKS_LIKE_GHC_INFO(q) ) { #ifdef DEBUG - if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */ + if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure? ASSERT(closure_STATIC((StgClosure *)q)); } - /* otherwise, must be a pointer into the allocation space. */ + // otherwise, must be a pointer into the allocation space. #endif (StgClosure *)*p = evacuate((StgClosure *)q); @@ -2819,13 +3092,13 @@ scavenge_stack(StgPtr p, StgPtr stack_end) switch (info->type) { - /* Dynamic bitmap: the mask is stored on the stack */ + // Dynamic bitmap: the mask is stored on the stack case RET_DYN: bitmap = ((StgRetDyn *)p)->liveness; p = (P_)&((StgRetDyn *)p)->payload[0]; goto small_bitmap; - /* probably a slow-entry point return address: */ + // probably a slow-entry point return address: case FUN: case FUN_STATIC: { @@ -2836,7 +3109,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)", old_p, p, old_p+1)); #else - p++; /* what if FHS!=1 !? -- HWL */ + p++; // what if FHS!=1 !? -- HWL #endif goto follow_srt; } @@ -2848,10 +3121,16 @@ scavenge_stack(StgPtr p, StgPtr stack_end) case UPDATE_FRAME: { StgUpdateFrame *frame = (StgUpdateFrame *)p; + + p += sizeofW(StgUpdateFrame); + +#ifndef not_yet + frame->updatee = evacuate(frame->updatee); + continue; +#else // specialised code for update frames, not sure if it's worth it. StgClosure *to; nat type = get_itbl(frame->updatee)->type; - p += sizeofW(StgUpdateFrame); if (type == EVACUATED) { frame->updatee = evacuate(frame->updatee); continue; @@ -2865,7 +3144,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) continue; } - /* Don't promote blackholes */ + // Don't promote blackholes stp = bd->step; if (!(stp->gen_no == 0 && stp->no != 0 && @@ -2891,9 +3170,10 @@ scavenge_stack(StgPtr p, StgPtr stack_end) barf("scavenge_stack: UPDATE_FRAME updatee"); } } +#endif } - /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */ + // small bitmap (< 32 entries, or 64 on a 64-bit machine) case STOP_FRAME: case CATCH_FRAME: case SEQ_FRAME: @@ -2902,7 +3182,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) case RET_VEC_SMALL: bitmap = info->layout.bitmap; p++; - /* this assumes that the payload starts immediately after the info-ptr */ + // this assumes that the payload starts immediately after the info-ptr small_bitmap: while (bitmap != 0) { if ((bitmap & 1) == 0) { @@ -2916,7 +3196,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) scavenge_srt(info); continue; - /* large bitmap (> 32 entries) */ + // large bitmap (> 32 entries) case RET_BIG: case RET_VEC_BIG: { @@ -2945,7 +3225,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) } } - /* and don't forget to follow the SRT */ + // and don't forget to follow the SRT goto follow_srt; } @@ -2963,17 +3243,15 @@ scavenge_stack(StgPtr p, StgPtr stack_end) objects are (repeatedly) mutable, so most of the time evac_gen will be zero. --------------------------------------------------------------------------- */ -//@cindex scavenge_large static void scavenge_large(step *stp) { bdescr *bd; - StgPtr p; + StgPtr p, q; const StgInfoTable* info; - nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */ + nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen - evac_gen = 0; /* most objects are mutable */ bd = stp->new_large_objects; for (; bd != NULL; bd = stp->new_large_objects) { @@ -2989,66 +3267,77 @@ scavenge_large(step *stp) p = bd->start; info = get_itbl((StgClosure *)p); + // only certain objects can be "large"... + q = p; switch (info->type) { - /* only certain objects can be "large"... */ - case ARR_WORDS: - /* nothing to follow */ - continue; + // nothing to follow + break; case MUT_ARR_PTRS: - /* follow everything */ - { + { + // follow everything StgPtr next; - + + evac_gen = 0; // repeatedly mutable + recordMutable((StgMutClosure *)p); next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + (StgClosure *)*p = evacuate((StgClosure *)*p); } - continue; - } + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; + } case MUT_ARR_PTRS_FROZEN: - /* follow everything */ { - StgPtr start = p, next; - - evac_gen = saved_evac_gen; /* not really mutable */ - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - evac_gen = 0; - if (failed_to_evac) { - recordMutable((StgMutClosure *)start); - } - continue; + // follow everything + StgPtr next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + break; } case TSO: - scavengeTSO((StgTSO *)p); - continue; + { + StgTSO *tso = (StgTSO *)p; + + evac_gen = 0; // repeatedly mutable + scavengeTSO(tso); + recordMutable((StgMutClosure *)tso); + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; + } case AP_UPD: case PAP: { StgPAP* pap = (StgPAP *)p; - - evac_gen = saved_evac_gen; /* not really mutable */ pap->fun = evacuate(pap->fun); scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); - evac_gen = 0; - continue; + break; } default: barf("scavenge_large: unknown/strange object %d", (int)(info->type)); } + + if (failed_to_evac) { + failed_to_evac = rtsFalse; + mkMutCons((StgClosure *)q, &generations[evac_gen]); + } } } -//@cindex zero_static_object_list +/* ----------------------------------------------------------------------------- + Initialising the static object & mutable lists + -------------------------------------------------------------------------- */ static void zero_static_object_list(StgClosure* first_static) @@ -3098,7 +3387,7 @@ revertCAFs( void ) { c->header.info = c->saved_info; c->saved_info = NULL; - /* could, but not necessary: c->static_link = NULL; */ + // could, but not necessary: c->static_link = NULL; } caf_list = NULL; } @@ -3128,8 +3417,7 @@ scavengeCAFs( void ) time. -------------------------------------------------------------------------- */ -#ifdef DEBUG -//@cindex gcCAFs +#if 0 && defined(DEBUG) static void gcCAFs(void) @@ -3151,7 +3439,7 @@ gcCAFs(void) if (STATIC_LINK(info,p) == NULL) { IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p)); - /* black hole it */ + // black hole it SET_INFO(p,&stg_BLACKHOLE_info); p = STATIC_LINK2(info,p); *pp = p; @@ -3164,12 +3452,10 @@ gcCAFs(void) } - /* fprintf(stderr, "%d CAFs live\n", i); */ + // fprintf(stderr, "%d CAFs live\n", i); } #endif -//@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection -//@subsection Lazy black holing /* ----------------------------------------------------------------------------- Lazy black holing. @@ -3178,7 +3464,6 @@ gcCAFs(void) some work, we have to run down the stack and black-hole all the closures referred to by update frames. -------------------------------------------------------------------------- */ -//@cindex threadLazyBlackHole static void threadLazyBlackHole(StgTSO *tso) @@ -3234,8 +3519,6 @@ threadLazyBlackHole(StgTSO *tso) } } -//@node Stack squeezing, Pausing a thread, Lazy black holing -//@subsection Stack squeezing /* ----------------------------------------------------------------------------- * Stack squeezing @@ -3244,15 +3527,14 @@ threadLazyBlackHole(StgTSO *tso) * lazy black holing here. * * -------------------------------------------------------------------------- */ -//@cindex threadSqueezeStack static void threadSqueezeStack(StgTSO *tso) { lnat displacement = 0; StgUpdateFrame *frame; - StgUpdateFrame *next_frame; /* Temporally next */ - StgUpdateFrame *prev_frame; /* Temporally previous */ + StgUpdateFrame *next_frame; // Temporally next + StgUpdateFrame *prev_frame; // Temporally previous StgPtr bottom; rtsBool prev_was_update_frame; #if DEBUG @@ -3282,7 +3564,7 @@ threadSqueezeStack(StgTSO *tso) */ next_frame = NULL; - /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */ + // bottom - sizeof(StgStopFrame) is the STOP_FRAME while ((P_)frame < bottom - sizeofW(StgStopFrame)) { prev_frame = frame->link; frame->link = next_frame; @@ -3361,7 +3643,7 @@ threadSqueezeStack(StgTSO *tso) if (prev_was_update_frame && is_update_frame && (P_)prev_frame == frame_bottom + displacement) { - /* Now squeeze out the current frame */ + // Now squeeze out the current frame StgClosure *updatee_keep = prev_frame->updatee; StgClosure *updatee_bypass = frame->updatee; @@ -3379,16 +3661,16 @@ threadSqueezeStack(StgTSO *tso) * and probably less bug prone, although it's probably much * slower --SDM */ -#if 0 /* do it properly... */ +#if 0 // do it properly... # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) # error Unimplemented lazy BH warning. (KSW 1999-01) # endif if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info ) { - /* Sigh. It has one. Don't lose those threads! */ + // Sigh. It has one. Don't lose those threads! if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) { - /* Urgh. Two queues. Merge them. */ + // Urgh. Two queues. Merge them. P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue; while (keep_tso->link != END_TSO_QUEUE) { @@ -3397,13 +3679,13 @@ threadSqueezeStack(StgTSO *tso) keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue; } else { - /* For simplicity, just swap the BQ for the BH */ + // For simplicity, just swap the BQ for the BH P_ temp = updatee_keep; updatee_keep = updatee_bypass; updatee_bypass = temp; - /* Record the swap in the kept frame (below) */ + // Record the swap in the kept frame (below) prev_frame->updatee = updatee_keep; } } @@ -3422,16 +3704,16 @@ threadSqueezeStack(StgTSO *tso) * screw us up if we don't check. */ if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) { - /* this wakes the threads up */ + // this wakes the threads up UPD_IND_NOLOCK(updatee_bypass, updatee_keep); } - sp = (P_)frame - 1; /* sp = stuff to slide */ + sp = (P_)frame - 1; // sp = stuff to slide displacement += sizeofW(StgUpdateFrame); } else { - /* No squeeze for this frame */ - sp = frame_bottom - 1; /* Keep the current frame */ + // No squeeze for this frame + sp = frame_bottom - 1; // Keep the current frame /* Do lazy black-holing. */ @@ -3465,12 +3747,12 @@ threadSqueezeStack(StgTSO *tso) } } - /* Fix the link in the current frame (should point to the frame below) */ + // Fix the link in the current frame (should point to the frame below) frame->link = prev_frame; prev_was_update_frame = is_update_frame; } - /* Now slide all words from sp up to the next frame */ + // Now slide all words from sp up to the next frame if (displacement > 0) { P_ next_frame_bottom; @@ -3504,8 +3786,6 @@ threadSqueezeStack(StgTSO *tso) #endif } -//@node Pausing a thread, Index, Stack squeezing -//@subsection Pausing a thread /* ----------------------------------------------------------------------------- * Pausing a thread @@ -3514,12 +3794,11 @@ threadSqueezeStack(StgTSO *tso) * here. We also take the opportunity to do stack squeezing if it's * turned on. * -------------------------------------------------------------------------- */ -//@cindex threadPaused void threadPaused(StgTSO *tso) { if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue ) - threadSqueezeStack(tso); /* does black holing too */ + threadSqueezeStack(tso); // does black holing too else threadLazyBlackHole(tso); } @@ -3529,7 +3808,6 @@ threadPaused(StgTSO *tso) * -------------------------------------------------------------------------- */ #if DEBUG -//@cindex printMutOnceList void printMutOnceList(generation *gen) { @@ -3546,7 +3824,6 @@ printMutOnceList(generation *gen) fputc('\n', stderr); } -//@cindex printMutableList void printMutableList(generation *gen) { @@ -3563,7 +3840,6 @@ printMutableList(generation *gen) fputc('\n', stderr); } -//@cindex maybeLarge static inline rtsBool maybeLarge(StgClosure *closure) { @@ -3578,41 +3854,4 @@ maybeLarge(StgClosure *closure) } -#endif /* DEBUG */ - -//@node Index, , Pausing a thread -//@subsection Index - -//@index -//* GarbageCollect:: @cindex\s-+GarbageCollect -//* MarkRoot:: @cindex\s-+MarkRoot -//* RevertCAFs:: @cindex\s-+RevertCAFs -//* addBlock:: @cindex\s-+addBlock -//* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list -//* copy:: @cindex\s-+copy -//* copyPart:: @cindex\s-+copyPart -//* evacuate:: @cindex\s-+evacuate -//* evacuate_large:: @cindex\s-+evacuate_large -//* gcCAFs:: @cindex\s-+gcCAFs -//* isAlive:: @cindex\s-+isAlive -//* maybeLarge:: @cindex\s-+maybeLarge -//* mkMutCons:: @cindex\s-+mkMutCons -//* printMutOnceList:: @cindex\s-+printMutOnceList -//* printMutableList:: @cindex\s-+printMutableList -//* relocate_TSO:: @cindex\s-+relocate_TSO -//* scavenge:: @cindex\s-+scavenge -//* scavenge_large:: @cindex\s-+scavenge_large -//* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list -//* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list -//* scavenge_one:: @cindex\s-+scavenge_one -//* scavenge_srt:: @cindex\s-+scavenge_srt -//* scavenge_stack:: @cindex\s-+scavenge_stack -//* scavenge_static:: @cindex\s-+scavenge_static -//* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole -//* threadPaused:: @cindex\s-+threadPaused -//* threadSqueezeStack:: @cindex\s-+threadSqueezeStack -//* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list -//* upd_evacuee:: @cindex\s-+upd_evacuee -//* zero_mutable_list:: @cindex\s-+zero_mutable_list -//* zero_static_object_list:: @cindex\s-+zero_static_object_list -//@end index +#endif // DEBUG diff --git a/ghc/rts/GC.h b/ghc/rts/GC.h deleted file mode 100644 index 9b0e9622cb..0000000000 --- a/ghc/rts/GC.h +++ /dev/null @@ -1,12 +0,0 @@ -/* ----------------------------------------------------------------------------- - * $Id: GC.h,v 1.6 2000/04/11 16:36:53 sewardj Exp $ - * - * (c) The GHC Team, 1998-1999 - * - * Prototypes for functions in GC.c - * - * ---------------------------------------------------------------------------*/ - -void threadPaused(StgTSO *); -StgClosure *isAlive(StgClosure *p); -void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ); diff --git a/ghc/rts/GCCompact.c b/ghc/rts/GCCompact.c new file mode 100644 index 0000000000..3aba9f55d0 --- /dev/null +++ b/ghc/rts/GCCompact.c @@ -0,0 +1,907 @@ +/* ----------------------------------------------------------------------------- + * $Id: GCCompact.c,v 1.1 2001/07/23 17:23:19 simonmar Exp $ + * + * (c) The GHC Team 2001 + * + * Compacting garbage collector + * + * ---------------------------------------------------------------------------*/ + +#include "Rts.h" +#include "RtsUtils.h" +#include "RtsFlags.h" +#include "Storage.h" +#include "BlockAlloc.h" +#include "MBlock.h" +#include "GCCompact.h" +#include "Schedule.h" +#include "StablePriv.h" + +static inline void +thread( StgPtr p ) +{ + StgPtr q = (StgPtr)*p; + ASSERT(!LOOKS_LIKE_GHC_INFO(q)); + if (HEAP_ALLOCED(q)) { + *p = (StgWord)*q; + *q = (StgWord)p; + } +} + +static inline void +unthread( StgPtr p, StgPtr free ) +{ + StgPtr q = (StgPtr)*p, r; + + while (!LOOKS_LIKE_GHC_INFO(q)) { + r = (StgPtr)*q; + *q = (StgWord)free; + q = r; + } + *p = (StgWord)q; +} + +static inline StgInfoTable * +get_threaded_info( StgPtr p ) +{ + StgPtr q = (P_)GET_INFO((StgClosure *)p); + + while (!LOOKS_LIKE_GHC_INFO(q)) { + q = (P_)*q; + } + return INFO_PTR_TO_STRUCT((StgInfoTable *)q); +} + +// A word-aligned memmove will be faster for small objects than libc's or gcc's. +// Remember, the two regions *might* overlap, but: to <= from. +static inline void +move(StgPtr to, StgPtr from, nat size) +{ + for(; size > 0; --size) { + *to++ = *from++; + } +} + +static inline nat +obj_sizeW( StgClosure *p, StgInfoTable *info ) +{ + switch (info->type) { + case FUN_0_1: + case CONSTR_0_1: + case FUN_1_0: + case CONSTR_1_0: + return sizeofW(StgHeader) + 1; + case THUNK_0_1: + case THUNK_0_2: + case FUN_0_2: + case CONSTR_0_2: + case THUNK_1_0: + case THUNK_1_1: + case FUN_1_1: + case CONSTR_1_1: + case THUNK_2_0: + case FUN_2_0: + case CONSTR_2_0: + return sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + case THUNK_SELECTOR: + return THUNK_SELECTOR_sizeW(); + case AP_UPD: + case PAP: + return pap_sizeW((StgPAP *)p); + case ARR_WORDS: + return arr_words_sizeW((StgArrWords *)p); + case MUT_ARR_PTRS: + case MUT_ARR_PTRS_FROZEN: + return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + case TSO: + return tso_sizeW((StgTSO *)p); + default: + return sizeW_fromITBL(info); + } +} + +static void +thread_static( StgClosure* p ) +{ + const StgInfoTable *info; + + // keep going until we've threaded all the objects on the linked + // list... + while (p != END_OF_STATIC_LIST) { + + info = get_itbl(p); + switch (info->type) { + + case IND_STATIC: + thread((StgPtr)&((StgInd *)p)->indirectee); + break; + + case THUNK_STATIC: + case FUN_STATIC: + case CONSTR_STATIC: + break; + + default: + barf("thread_static: strange closure %d", (int)(info->type)); + } + + p = STATIC_LINK(info,p); + } +} + +static void +thread_stack(StgPtr p, StgPtr stack_end) +{ + StgPtr q; + const StgInfoTable* info; + StgWord32 bitmap; + + // highly similar to scavenge_stack, but we do pointer threading here. + + while (p < stack_end) { + q = (StgPtr)*p; + + // If we've got a tag, skip over that many words on the stack + if ( IS_ARG_TAG((W_)q) ) { + p += ARG_SIZE(q); + p++; continue; + } + + // Is q a pointer to a closure? + if ( !LOOKS_LIKE_GHC_INFO(q) ) { + thread(p); + p++; + continue; + } + + // Otherwise, q must be the info pointer of an activation + // record. All activation records have 'bitmap' style layout + // info. + // + info = get_itbl((StgClosure *)p); + + switch (info->type) { + + // Dynamic bitmap: the mask is stored on the stack + case RET_DYN: + bitmap = ((StgRetDyn *)p)->liveness; + p = (P_)&((StgRetDyn *)p)->payload[0]; + goto small_bitmap; + + // probably a slow-entry point return address: + case FUN: + case FUN_STATIC: + p++; + continue; + + // small bitmap (< 32 entries, or 64 on a 64-bit machine) + case UPDATE_FRAME: + case STOP_FRAME: + case CATCH_FRAME: + case SEQ_FRAME: + case RET_BCO: + case RET_SMALL: + case RET_VEC_SMALL: + bitmap = info->layout.bitmap; + p++; + // this assumes that the payload starts immediately after the info-ptr + small_bitmap: + while (bitmap != 0) { + if ((bitmap & 1) == 0) { + thread(p); + } + p++; + bitmap = bitmap >> 1; + } + continue; + + // large bitmap (> 32 entries) + case RET_BIG: + case RET_VEC_BIG: + { + StgPtr q; + StgLargeBitmap *large_bitmap; + nat i; + + large_bitmap = info->layout.large_bitmap; + p++; + + for (i=0; i<large_bitmap->size; i++) { + bitmap = large_bitmap->bitmap[i]; + q = p + sizeof(W_) * 8; + while (bitmap != 0) { + if ((bitmap & 1) == 0) { + thread(p); + } + p++; + bitmap = bitmap >> 1; + } + if (i+1 < large_bitmap->size) { + while (p < q) { + thread(p); + p++; + } + } + } + continue; + } + + default: + barf("thread_stack: weird activation record found on stack: %d", + (int)(info->type)); + } + } +} + +static void +update_fwd_large( bdescr *bd ) +{ + StgPtr p; + const StgInfoTable* info; + + for (; bd != NULL; bd = bd->link) { + + p = bd->start; + unthread(p,p); + info = get_itbl((StgClosure *)p); + + switch (info->type) { + + case ARR_WORDS: + // nothing to follow + continue; + + case MUT_ARR_PTRS: + case MUT_ARR_PTRS_FROZEN: + // follow everything + { + StgPtr next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + thread(p); + } + continue; + } + + case TSO: + { + StgTSO *tso = (StgTSO *)p; + thread_stack(tso->sp, &(tso->stack[tso->stack_size])); + continue; + } + + case AP_UPD: + case PAP: + { + StgPAP* pap = (StgPAP *)p; + thread((StgPtr)&pap->fun); + thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + continue; + } + + default: + barf("update_fwd_large: unknown/strange object %d", (int)(info->type)); + } + } +} + +static void +update_fwd( bdescr *blocks ) +{ + StgPtr p; + bdescr *bd; + StgInfoTable *info; + + bd = blocks; + +#if defined(PAR) + barf("update_fwd: ToDo"); +#endif + + // cycle through all the blocks in the step + for (; bd != NULL; bd = bd->link) { + p = bd->start; + + // linearly scan the objects in this block + while (p < bd->free) { + + /* unthread the info ptr */ + unthread(p,p); + info = get_itbl((StgClosure *)p); + + ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) + || IS_HUGS_CONSTR_INFO(info))); + + switch (info->type) { + case FUN_0_1: + case CONSTR_0_1: + p += sizeofW(StgHeader) + 1; + break; + + case FUN_1_0: + case CONSTR_1_0: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_1_0: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + break; + + case THUNK_0_1: // MIN_UPD_SIZE + case THUNK_0_2: + case FUN_0_2: + case CONSTR_0_2: + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_1: + case FUN_1_1: + case CONSTR_1_1: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_2_0: + case FUN_2_0: + case CONSTR_2_0: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + thread((StgPtr)&((StgClosure *)p)->payload[1]); + p += sizeofW(StgHeader) + 2; + break; + + case FUN: + case THUNK: + case CONSTR: + case FOREIGN: + case STABLE_NAME: + case BCO: + case IND_PERM: + case MUT_VAR: + case MUT_CONS: + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + case BLACKHOLE_BQ: + { + StgPtr end; + + end = (P_)((StgClosure *)p)->payload + + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + thread(p); + } + p += info->layout.payload.nptrs; + break; + } + + // the info table for a weak ptr lies about the number of ptrs + // (because we have special GC routines for them, but we + // want to use the standard evacuate code). So we have to + // special case here. + case WEAK: + { + StgWeak *w = (StgWeak *)p; + thread((StgPtr)&w->key); + thread((StgPtr)&w->value); + thread((StgPtr)&w->finalizer); + if (w->link != NULL) { + thread((StgPtr)&w->link); + } + p += sizeofW(StgWeak); + break; + } + + // again, the info table for MVar isn't suitable here (it includes + // the mut_link field as a pointer, and we don't want to + // thread it). + case MVAR: + { + StgMVar *mvar = (StgMVar *)p; + thread((StgPtr)&mvar->head); + thread((StgPtr)&mvar->tail); + thread((StgPtr)&mvar->value); + p += sizeofW(StgMVar); + break; + } + + // specialise this case, because we want to update the + // mut_link field too. + case IND_OLDGEN: + case IND_OLDGEN_PERM: + { + StgIndOldGen *ind = (StgIndOldGen *)p; + thread((StgPtr)&ind->indirectee); + if (ind->mut_link != NULL) { + thread((StgPtr)&ind->mut_link); + } + break; + } + + case THUNK_SELECTOR: + { + StgSelector *s = (StgSelector *)p; + thread((StgPtr)&s->selectee); + p += THUNK_SELECTOR_sizeW(); + break; + } + + case AP_UPD: // same as PAPs + case PAP: + { + StgPAP* pap = (StgPAP *)p; + + thread((P_)&pap->fun); + thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + p += pap_sizeW(pap); + break; + } + + case ARR_WORDS: + p += arr_words_sizeW((StgArrWords *)p); + break; + + case MUT_ARR_PTRS: + case MUT_ARR_PTRS_FROZEN: + // follow everything + { + StgPtr next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + thread(p); + } + break; + } + + case TSO: + { + StgTSO *tso = (StgTSO *)p; + thread_stack(tso->sp, &(tso->stack[tso->stack_size])); + thread((StgPtr)&tso->link); + thread((StgPtr)&tso->global_link); + p += tso_sizeW(tso); + break; + } + + default: + barf("update_fwd: unknown/strange object %d", (int)(info->type)); + } + } + } +} + +static void +update_fwd_compact( bdescr *blocks ) +{ + StgPtr p, q, free; + StgWord m; + bdescr *bd, *free_bd; + StgInfoTable *info; + nat size; + + bd = blocks; + free_bd = blocks; + free = free_bd->start; + +#if defined(PAR) + barf("update_fwd: ToDo"); +#endif + + // cycle through all the blocks in the step + for (; bd != NULL; bd = bd->link) { + p = bd->start; + + while (p < bd->free ) { + + while ( p < bd->free && !is_marked(p,bd) ) { + p++; + } + if (p >= bd->free) { + break; + } + +#if 0 + next: + m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord)))); + m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1)); + + while ( p < bd->free ) { + + if ((m & 1) == 0) { + m >>= 1; + p++; + if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) { + goto next; + } else { + continue; + } + } +#endif + + // Problem: we need to know the destination for this cell + // in order to unthread its info pointer. But we can't + // know the destination without the size, because we may + // spill into the next block. So we have to run down the + // threaded list and get the info ptr first. + info = get_threaded_info(p); + + q = p; + ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) + || IS_HUGS_CONSTR_INFO(info))); + + switch (info->type) { + case FUN_0_1: + case CONSTR_0_1: + p += sizeofW(StgHeader) + 1; + break; + + case FUN_1_0: + case CONSTR_1_0: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_1_0: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + break; + + case THUNK_0_1: // MIN_UPD_SIZE + case THUNK_0_2: + case FUN_0_2: + case CONSTR_0_2: + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_1: + case FUN_1_1: + case CONSTR_1_1: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_2_0: + case FUN_2_0: + case CONSTR_2_0: + thread((StgPtr)&((StgClosure *)p)->payload[0]); + thread((StgPtr)&((StgClosure *)p)->payload[1]); + p += sizeofW(StgHeader) + 2; + break; + + case FUN: + case THUNK: + case CONSTR: + case FOREIGN: + case STABLE_NAME: + case BCO: + case IND_PERM: + case MUT_VAR: + case MUT_CONS: + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + case BLACKHOLE_BQ: + { + StgPtr end; + + end = (P_)((StgClosure *)p)->payload + + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + thread(p); + } + p += info->layout.payload.nptrs; + break; + } + + case WEAK: + { + StgWeak *w = (StgWeak *)p; + thread((StgPtr)&w->key); + thread((StgPtr)&w->value); + thread((StgPtr)&w->finalizer); + if (w->link != NULL) { + thread((StgPtr)&w->link); + } + p += sizeofW(StgWeak); + break; + } + + case MVAR: + { + StgMVar *mvar = (StgMVar *)p; + thread((StgPtr)&mvar->head); + thread((StgPtr)&mvar->tail); + thread((StgPtr)&mvar->value); + p += sizeofW(StgMVar); + break; + } + + case IND_OLDGEN: + case IND_OLDGEN_PERM: + // specialise this case, because we want to update the + // mut_link field too. + { + StgIndOldGen *ind = (StgIndOldGen *)p; + thread((StgPtr)&ind->indirectee); + if (ind->mut_link != NULL) { + thread((StgPtr)&ind->mut_link); + } + p += sizeofW(StgIndOldGen); + break; + } + + case THUNK_SELECTOR: + { + StgSelector *s = (StgSelector *)p; + thread((StgPtr)&s->selectee); + p += THUNK_SELECTOR_sizeW(); + break; + } + + case AP_UPD: // same as PAPs + case PAP: + { + StgPAP* pap = (StgPAP *)p; + + thread((P_)&pap->fun); + thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + p += pap_sizeW(pap); + break; + } + + case ARR_WORDS: + p += arr_words_sizeW((StgArrWords *)p); + break; + + case MUT_ARR_PTRS: + case MUT_ARR_PTRS_FROZEN: + // follow everything + { + StgPtr next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + thread(p); + } + break; + } + + case TSO: + { + StgTSO *tso = (StgTSO *)p; + thread_stack(tso->sp, &(tso->stack[tso->stack_size])); + thread((StgPtr)&tso->link); + thread((StgPtr)&tso->global_link); + p += tso_sizeW(tso); + break; + } + + default: + barf("update_fwd: unknown/strange object %d", (int)(info->type)); + } + + size = p - q; + if (free + size > free_bd->start + BLOCK_SIZE_W) { + free_bd = free_bd->link; + free = free_bd->start; + } + + unthread(q,free); + free += size; +#if 0 + goto next; +#endif + } + } +} + +static void +update_bkwd( bdescr *blocks ) +{ + StgPtr p; + bdescr *bd; + StgInfoTable *info; + + bd = blocks; + +#if defined(PAR) + barf("update_bkwd: ToDo"); +#endif + + // cycle through all the blocks in the step + for (; bd != NULL; bd = bd->link) { + p = bd->start; + + // linearly scan the objects in this block + while (p < bd->free) { + + // must unthread before we look at the info ptr... + unthread(p,p); + + info = get_itbl((StgClosure *)p); + ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) + || IS_HUGS_CONSTR_INFO(info))); + + p += obj_sizeW((StgClosure *)p,info); + } + } +} + +static nat +update_bkwd_compact( step *stp ) +{ + StgPtr p, free; + StgWord m; + bdescr *bd, *free_bd; + StgInfoTable *info; + nat size, free_blocks; + + bd = free_bd = stp->blocks; + free = free_bd->start; + free_blocks = 1; + +#if defined(PAR) + barf("update_bkwd: ToDo"); +#endif + + // cycle through all the blocks in the step + for (; bd != NULL; bd = bd->link) { + p = bd->start; + + while (p < bd->free ) { + + while ( p < bd->free && !is_marked(p,bd) ) { + p++; + } + if (p >= bd->free) { + break; + } + +#if 0 + next: + m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord)))); + m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1)); + + while ( p < bd->free ) { + + if ((m & 1) == 0) { + m >>= 1; + p++; + if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) { + goto next; + } else { + continue; + } + } +#endif + + // must unthread before we look at the info ptr... + info = get_threaded_info(p); + + ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) + || IS_HUGS_CONSTR_INFO(info))); + + size = obj_sizeW((StgClosure *)p,info); + + if (free + size > free_bd->start + BLOCK_SIZE_W) { + // don't forget to update the free ptr in the block desc. + free_bd->free = free; + free_bd = free_bd->link; + free = free_bd->start; + free_blocks++; + } + + unthread(p,free); + move(free,p,size); + + // Rebuild the mutable list for the old generation. + // (the mut_once list is updated using threading, with + // special cases for IND_OLDGEN and MUT_CONS above). + if (ip_MUTABLE(info)) { + recordMutable((StgMutClosure *)free); + } + + // relocate TSOs + if (info->type == TSO) { + move_TSO((StgTSO *)p, (StgTSO *)free); + } + + free += size; + p += size; +#if 0 + goto next; +#endif + } + } + + // free the remaining blocks and count what's left. + free_bd->free = free; + if (free_bd->link != NULL) { + freeChain(free_bd->link); + free_bd->link = NULL; + } + stp->n_blocks = free_blocks; + + return free_blocks; +} + +static void +update_bkwd_large( bdescr *blocks ) +{ + bdescr *bd; + + for (bd = blocks; bd != NULL; bd = bd->link ) { + unthread(bd->start, bd->start); + } +} + + +void +compact( void (*get_roots)(evac_fn) ) +{ + nat g, s, blocks; + step *stp; + extern StgWeak *old_weak_ptr_list; // tmp + + // 1. thread the roots + get_roots((evac_fn)thread); + + // the weak pointer lists... + if (weak_ptr_list != NULL) { + thread((StgPtr)&weak_ptr_list); + } + if (old_weak_ptr_list != NULL) { + thread((StgPtr)&old_weak_ptr_list); // tmp + } + + // mutable lists (ToDo: all gens) + thread((StgPtr)&oldest_gen->mut_list); + thread((StgPtr)&oldest_gen->mut_once_list); + + // the global thread list + thread((StgPtr)&all_threads); + + // the static objects + thread_static(scavenged_static_objects); + + // the stable pointer table + threadStablePtrTable((evac_fn)thread); + + // 2. update forward ptrs + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + IF_DEBUG(gc, fprintf(stderr,"update_fwd: %d.%d\n", stp->gen->no, stp->no);); + + update_fwd(stp->to_blocks); + update_fwd_large(stp->scavenged_large_objects); + if (g == RtsFlags.GcFlags.generations-1 && stp->blocks != NULL) { + IF_DEBUG(gc, fprintf(stderr,"update_fwd: %d.%d (compact)\n", stp->gen->no, stp->no);); + update_fwd_compact(stp->blocks); + } + } + } + + // 3. update backward ptrs + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d\n", stp->gen->no, stp->no);); + update_bkwd(stp->to_blocks); + update_bkwd_large(stp->scavenged_large_objects); + if (g == RtsFlags.GcFlags.generations-1 && stp->blocks != NULL) { + IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d (compact)\n", stp->gen->no, stp->no);); + blocks = update_bkwd_compact(stp); + IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)\n", + stp->gen->no, stp->no, + stp->n_blocks, blocks);); + stp->n_blocks = blocks; + } + } + } +} diff --git a/ghc/rts/GCCompact.h b/ghc/rts/GCCompact.h new file mode 100644 index 0000000000..8244e87c45 --- /dev/null +++ b/ghc/rts/GCCompact.h @@ -0,0 +1,30 @@ +/* ----------------------------------------------------------------------------- + * $Id: GCCompact.h,v 1.1 2001/07/23 17:23:19 simonmar Exp $ + * + * (c) The GHC Team 1998-1999 + * + * Compacting garbage collector + * + * ---------------------------------------------------------------------------*/ + +static inline void +mark(StgPtr p, bdescr *bd) +{ + nat offset_within_block = p - bd->start; // in words + StgPtr bitmap_word = (StgPtr)bd->u.bitmap + + (offset_within_block / (sizeof(W_)*BITS_PER_BYTE)); + nat bit_mask = 1 << (offset_within_block & (sizeof(W_)*BITS_PER_BYTE - 1)); + *bitmap_word |= bit_mask; +} + +static inline int +is_marked(StgPtr p, bdescr *bd) +{ + nat offset_within_block = p - bd->start; // in words + StgPtr bitmap_word = (StgPtr)bd->u.bitmap + + (offset_within_block / (sizeof(W_)*BITS_PER_BYTE)); + nat bit_mask = 1 << (offset_within_block & (sizeof(W_)*BITS_PER_BYTE - 1)); + return (*bitmap_word & bit_mask); +} + +void compact( void (*get_roots)(evac_fn) ); diff --git a/ghc/rts/PrimOps.hc b/ghc/rts/PrimOps.hc index 346705d472..9c591100f8 100644 --- a/ghc/rts/PrimOps.hc +++ b/ghc/rts/PrimOps.hc @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: PrimOps.hc,v 1.79 2001/07/06 14:11:38 simonmar Exp $ + * $Id: PrimOps.hc,v 1.80 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team, 1998-2000 * @@ -594,8 +594,23 @@ FN_(word64ToIntegerzh_fast) FE_ } +#elif SIZEOF_VOID_P == 8 -#endif /* HAVE_LONG_LONG */ +FN_(word64ToIntegerzh_fast) +{ + FB_ + JMP_(wordToIntegerzh_fast); + FE_ +} + +FN_(int64ToIntegerzh_fast) +{ + FB_ + JMP_(intToIntegerzh_fast); + FE_ +} + +#endif /* SUPPORT_LONG_LONGS || SIZEOF_VOID_P == 8 */ /* ToDo: this is shockingly inefficient */ diff --git a/ghc/rts/Printer.c b/ghc/rts/Printer.c index ec4acdc0de..96c26774cd 100644 --- a/ghc/rts/Printer.c +++ b/ghc/rts/Printer.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Printer.c,v 1.40 2001/04/03 10:09:23 rrt Exp $ + * $Id: Printer.c,v 1.41 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team, 1994-2000. * @@ -941,7 +941,7 @@ findPtr(P_ p, int follow) for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { if (RtsFlags.GcFlags.generations == 1) { - bd = generations[g].steps[s].to_space; + bd = generations[g].steps[s].to_blocks; } else { bd = generations[g].steps[s].blocks; } diff --git a/ghc/rts/ProfHeap.c b/ghc/rts/ProfHeap.c index c0760dbfa7..40137b7234 100644 --- a/ghc/rts/ProfHeap.c +++ b/ghc/rts/ProfHeap.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: ProfHeap.c,v 1.22 2001/07/19 07:28:00 andy Exp $ + * $Id: ProfHeap.c,v 1.23 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team, 1998-2000 * @@ -573,7 +573,7 @@ heapCensus(void) /* Only do heap profiling in a two-space heap */ ASSERT(RtsFlags.GcFlags.generations == 1); - bd = g0s0->to_space; + bd = g0s0->to_blocks; fprintf(hp_file, "BEGIN_SAMPLE %0.2f\n", time); diff --git a/ghc/rts/RtsFlags.c b/ghc/rts/RtsFlags.c index 1c840934d2..d9e51fc124 100644 --- a/ghc/rts/RtsFlags.c +++ b/ghc/rts/RtsFlags.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: RtsFlags.c,v 1.40 2001/07/23 10:42:37 simonmar Exp $ + * $Id: RtsFlags.c,v 1.41 2001/07/23 17:23:19 simonmar Exp $ * * (c) The AQUA Project, Glasgow University, 1994-1997 * (c) The GHC Team, 1998-1999 @@ -235,6 +235,7 @@ void initRtsFlagsDefaults(void) #else RtsFlags.GcFlags.generations = 2; RtsFlags.GcFlags.steps = 2; + RtsFlags.GcFlags.compact = rtsFalse; RtsFlags.GcFlags.squeezeUpdFrames = rtsTrue; #endif #ifdef RTS_GTK_FRONTPANEL @@ -387,6 +388,7 @@ usage_text[] = { " -m<n>% Minimum % of heap which must be available (default 3%)", " -G<n> Number of generations (default: 2)", " -T<n> Number of steps in younger generations (default: 2)", +" -c Enable compaction for the oldest generation", "", " -t<file> One-line GC statistics (default file: <program>.stat)", " -s<file> Summary GC statistics (with -Sstderr going to stderr)", @@ -617,6 +619,10 @@ error = rtsTrue; RtsFlags.GcFlags.ringBell = rtsTrue; break; + case 'c': + RtsFlags.GcFlags.compact = rtsTrue; + break; + case 'F': RtsFlags.GcFlags.oldGenFactor = atof(rts_argv[arg]+2); diff --git a/ghc/rts/RtsFlags.h b/ghc/rts/RtsFlags.h index a7e903fca5..26f9a9c689 100644 --- a/ghc/rts/RtsFlags.h +++ b/ghc/rts/RtsFlags.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: RtsFlags.h,v 1.33 2001/07/19 07:28:00 andy Exp $ + * $Id: RtsFlags.h,v 1.34 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -33,11 +33,10 @@ struct GC_FLAGS { nat generations; nat steps; - - rtsBool ringBell; - + rtsBool compact; rtsBool squeezeUpdFrames; + rtsBool ringBell; rtsBool frontpanel; }; diff --git a/ghc/rts/Sanity.c b/ghc/rts/Sanity.c index d5e412471b..af0a38d33e 100644 --- a/ghc/rts/Sanity.c +++ b/ghc/rts/Sanity.c @@ -1,11 +1,11 @@ /* ----------------------------------------------------------------------------- - * $Id: Sanity.c,v 1.27 2001/03/22 03:51:10 hwloidl Exp $ + * $Id: Sanity.c,v 1.28 2001/07/23 17:23:19 simonmar Exp $ * - * (c) The GHC Team, 1998-1999 + * (c) The GHC Team, 1998-2001 * * Sanity checking code for the heap and stack. * - * Used when debugging: check that the stack looks reasonable. + * Used when debugging: check that everything reasonable. * * - All things that are supposed to be pointers look like pointers. * @@ -14,19 +14,6 @@ * * ---------------------------------------------------------------------------*/ -//@menu -//* Includes:: -//* Macros:: -//* Stack sanity:: -//* Heap Sanity:: -//* TSO Sanity:: -//* Thread Queue Sanity:: -//* Blackhole Sanity:: -//@end menu - -//@node Includes, Macros -//@subsection Includes - #include "Rts.h" #ifdef DEBUG /* whole file */ @@ -40,34 +27,46 @@ #include "Schedule.h" #include "StoragePriv.h" // for END_OF_STATIC_LIST -//@node Macros, Stack sanity, Includes -//@subsection Macros - -#define LOOKS_LIKE_PTR(r) ((LOOKS_LIKE_STATIC_CLOSURE(r) || \ - ((HEAP_ALLOCED(r) && Bdescr((P_)r)->free != (void *)-1))) && \ - ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa)) +/* ----------------------------------------------------------------------------- + A valid pointer is either: -//@node Stack sanity, Heap Sanity, Macros -//@subsection Stack sanity + - a pointer to a static closure, or + - a pointer into the heap, and + - the block is not free + - either: - the object is large, or + - it is not after the free pointer in the block + - the contents of the pointer is not 0xaaaaaaaa -/* ----------------------------------------------------------------------------- - Check stack sanity -------------------------------------------------------------------------- */ -StgOffset checkStackClosure( StgClosure* c ); +#define LOOKS_LIKE_PTR(r) \ + ({ bdescr *bd = Bdescr((P_)r); \ + LOOKS_LIKE_STATIC_CLOSURE(r) || \ + (HEAP_ALLOCED(r) \ + && bd != (void *)-1 \ + && ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa) \ + ); \ + }) -StgOffset checkStackObject( StgPtr sp ); +// NOT always true, but can be useful for spotting bugs: (generally +// true after GC, but not for things just allocated using allocate(), +// for example): +// (bd->flags & BF_LARGE || bd->free > (P_)r) -void checkStackChunk( StgPtr sp, StgPtr stack_end ); - -static StgOffset checkSmallBitmap( StgPtr payload, StgWord32 bitmap ); +/* ----------------------------------------------------------------------------- + Forward decls. + -------------------------------------------------------------------------- */ -static StgOffset checkLargeBitmap( StgPtr payload, - StgLargeBitmap* large_bitmap ); +static StgOffset checkStackClosure ( StgClosure* c ); +static StgOffset checkStackObject ( StgPtr sp ); +static StgOffset checkSmallBitmap ( StgPtr payload, StgWord32 bitmap ); +static StgOffset checkLargeBitmap ( StgPtr payload, StgLargeBitmap* ); +static void checkClosureShallow ( StgClosure* p ); -void checkClosureShallow( StgClosure* p ); +/* ----------------------------------------------------------------------------- + Check stack sanity + -------------------------------------------------------------------------- */ -//@cindex checkSmallBitmap static StgOffset checkSmallBitmap( StgPtr payload, StgWord32 bitmap ) { @@ -76,13 +75,12 @@ checkSmallBitmap( StgPtr payload, StgWord32 bitmap ) i = 0; for(; bitmap != 0; ++i, bitmap >>= 1 ) { if ((bitmap & 1) == 0) { - checkClosure(stgCast(StgClosure*,payload[i])); + checkClosure((StgClosure *)payload[i]); } } return i; } -//@cindex checkLargeBitmap static StgOffset checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap ) { @@ -94,15 +92,14 @@ checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap ) StgWord32 bitmap = large_bitmap->bitmap[bmp]; for(; bitmap != 0; ++i, bitmap >>= 1 ) { if ((bitmap & 1) == 0) { - checkClosure(stgCast(StgClosure*,payload[i])); + checkClosure((StgClosure *)payload[i]); } } } return i; } -//@cindex checkStackClosure -StgOffset +static StgOffset checkStackClosure( StgClosure* c ) { const StgInfoTable* info = get_itbl(c); @@ -163,12 +160,11 @@ checkStackClosure( StgClosure* c ) * chunks. */ -//@cindex checkClosureShallow void checkClosureShallow( StgClosure* p ) { ASSERT(p); - ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info) + ASSERT(LOOKS_LIKE_GHC_INFO(GET_INFO(p)) || IS_HUGS_CONSTR_INFO(GET_INFO(p))); /* Is it a static closure (i.e. in the data segment)? */ @@ -180,27 +176,24 @@ checkClosureShallow( StgClosure* p ) } } -/* check an individual stack object */ -//@cindex checkStackObject +// check an individual stack object StgOffset checkStackObject( StgPtr sp ) { if (IS_ARG_TAG(*sp)) { - /* Tagged words might be "stubbed" pointers, so there's no - * point checking to see whether they look like pointers or - * not (some of them will). - */ + // Tagged words might be "stubbed" pointers, so there's no + // point checking to see whether they look like pointers or + // not (some of them will). return ARG_SIZE(*sp) + 1; - } else if (LOOKS_LIKE_GHC_INFO(*stgCast(StgPtr*,sp))) { - return checkStackClosure(stgCast(StgClosure*,sp)); - } else { /* must be an untagged closure pointer in the stack */ - checkClosureShallow(*stgCast(StgClosure**,sp)); + } else if (LOOKS_LIKE_GHC_INFO(*(StgPtr *)sp)) { + return checkStackClosure((StgClosure *)sp); + } else { // must be an untagged closure pointer in the stack + checkClosureShallow(*(StgClosure **)sp); return 1; } } -/* check sections of stack between update frames */ -//@cindex checkStackChunk +// check sections of stack between update frames void checkStackChunk( StgPtr sp, StgPtr stack_end ) { @@ -213,7 +206,6 @@ checkStackChunk( StgPtr sp, StgPtr stack_end ) // ASSERT( p == stack_end ); -- HWL } -//@cindex checkStackChunk StgOffset checkClosure( StgClosure* p ) { @@ -290,6 +282,7 @@ checkClosure( StgClosure* p ) case BCO: case STABLE_NAME: case MUT_VAR: + case MUT_CONS: case CONSTR_INTLIKE: case CONSTR_CHARLIKE: case CONSTR_STATIC: @@ -323,7 +316,7 @@ checkClosure( StgClosure* p ) } case THUNK_SELECTOR: - ASSERT(LOOKS_LIKE_PTR(stgCast(StgSelector*,p)->selectee)); + ASSERT(LOOKS_LIKE_PTR(((StgSelector *)p)->selectee)); return sizeofW(StgHeader) + MIN_UPD_SIZE; case IND: @@ -332,7 +325,7 @@ checkClosure( StgClosure* p ) * but they might appear during execution */ P_ q; - StgInd *ind = stgCast(StgInd*,p); + StgInd *ind = (StgInd *)p; ASSERT(LOOKS_LIKE_PTR(ind->indirectee)); q = (P_)p + sizeofW(StgInd); while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/ @@ -354,7 +347,7 @@ checkClosure( StgClosure* p ) case AP_UPD: /* we can treat this as being the same as a PAP */ case PAP: { - StgPAP *pap = stgCast(StgPAP*,p); + StgPAP *pap = (StgPAP *)p; ASSERT(LOOKS_LIKE_PTR(pap->fun)); checkStackChunk((StgPtr)pap->payload, (StgPtr)pap->payload + pap->n_args @@ -363,12 +356,12 @@ checkClosure( StgClosure* p ) } case ARR_WORDS: - return arr_words_sizeW(stgCast(StgArrWords*,p)); + return arr_words_sizeW((StgArrWords *)p); case MUT_ARR_PTRS: case MUT_ARR_PTRS_FROZEN: { - StgMutArrPtrs* a = stgCast(StgMutArrPtrs*,p); + StgMutArrPtrs* a = (StgMutArrPtrs *)p; nat i; for (i = 0; i < a->ptrs; i++) { ASSERT(LOOKS_LIKE_PTR(a->payload[i])); @@ -458,8 +451,6 @@ looks_like_ga(globalAddr *ga) #endif -//@node Heap Sanity, TSO Sanity, Stack sanity -//@subsection Heap Sanity /* ----------------------------------------------------------------------------- Check Heap Sanity @@ -470,46 +461,31 @@ looks_like_ga(globalAddr *ga) all the objects in the remainder of the chain. -------------------------------------------------------------------------- */ -//@cindex checkHeap -extern void -checkHeap(bdescr *bd, StgPtr start) +void +checkHeap(bdescr *bd) { StgPtr p; - nat xxx = 0; // tmp -- HWL - - if (start == NULL) { - if (bd != NULL) p = bd->start; - } else { - p = start; - } - while (bd != NULL) { - while (p < bd->free) { - nat size = checkClosure(stgCast(StgClosure*,p)); - /* This is the smallest size of closure that can live in the heap. */ - ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) ); - if (get_itbl(stgCast(StgClosure*,p))->type == IND_STATIC) - xxx++; - p += size; - - /* skip over slop */ - while (p < bd->free && - (*p < 0x1000 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; } - } - bd = bd->link; - if (bd != NULL) { + for (; bd != NULL; bd = bd->link) { p = bd->start; - } + while (p < bd->free) { + nat size = checkClosure((StgClosure *)p); + /* This is the smallest size of closure that can live in the heap */ + ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) ); + p += size; + + /* skip over slop */ + while (p < bd->free && + (*p < 0x1000 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; } + } } - fprintf(stderr,"@@@@ checkHeap: Heap ok; %d IND_STATIC closures checked\n", - xxx); } #if defined(PAR) /* Check heap between start and end. Used after unpacking graphs. */ -extern void +void checkHeapChunk(StgPtr start, StgPtr end) { extern globalAddr *LAGAlookup(StgClosure *addr); @@ -527,14 +503,14 @@ checkHeapChunk(StgPtr start, StgPtr end) *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */ size = MIN_UPD_SIZE; } else { - size = checkClosure(stgCast(StgClosure*,p)); + size = checkClosure((StgClosure *)p); /* This is the smallest size of closure that can live in the heap. */ ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) ); } } } #else /* !PAR */ -extern void +void checkHeapChunk(StgPtr start, StgPtr end) { StgPtr p; @@ -542,15 +518,14 @@ checkHeapChunk(StgPtr start, StgPtr end) for (p=start; p<end; p+=size) { ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p)); - size = checkClosure(stgCast(StgClosure*,p)); + size = checkClosure((StgClosure *)p); /* This is the smallest size of closure that can live in the heap. */ ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) ); } } #endif -//@cindex checkChain -extern void +void checkChain(bdescr *bd) { while (bd != NULL) { @@ -560,40 +535,36 @@ checkChain(bdescr *bd) } /* check stack - making sure that update frames are linked correctly */ -//@cindex checkStack void checkStack(StgPtr sp, StgPtr stack_end, StgUpdateFrame* su ) { /* check everything down to the first update frame */ - checkStackChunk( sp, stgCast(StgPtr,su) ); - while ( stgCast(StgPtr,su) < stack_end) { - sp = stgCast(StgPtr,su); + checkStackChunk( sp, (StgPtr)su ); + while ( (StgPtr)su < stack_end) { + sp = (StgPtr)su; switch (get_itbl(su)->type) { case UPDATE_FRAME: su = su->link; break; case SEQ_FRAME: - su = stgCast(StgSeqFrame*,su)->link; + su = ((StgSeqFrame *)su)->link; break; case CATCH_FRAME: - su = stgCast(StgCatchFrame*,su)->link; + su = ((StgCatchFrame *)su)->link; break; case STOP_FRAME: - /* not quite: ASSERT(stgCast(StgPtr,su) == stack_end); */ + /* not quite: ASSERT((StgPtr)su == stack_end); */ return; default: barf("checkStack: weird record found on update frame list."); } - checkStackChunk( sp, stgCast(StgPtr,su) ); + checkStackChunk( sp, (StgPtr)su ); } - ASSERT(stgCast(StgPtr,su) == stack_end); + ASSERT((StgPtr)su == stack_end); } -//@node TSO Sanity, Thread Queue Sanity, Heap Sanity -//@subsection TSO Sanity -//@cindex checkTSO -extern void +void checkTSO(StgTSO *tso) { StgPtr sp = tso->sp; @@ -615,7 +586,7 @@ checkTSO(StgTSO *tso) } ASSERT(stack <= sp && sp < stack_end); - ASSERT(sp <= stgCast(StgPtr,su)); + ASSERT(sp <= (StgPtr)su); #if defined(PAR) ASSERT(tso->par.magic==TSO_MAGIC); @@ -667,8 +638,7 @@ checkTSO(StgTSO *tso) } #if defined(GRAN) -//@cindex checkTSOsSanity -extern void +void checkTSOsSanity(void) { nat i, tsos; StgTSO *tso; @@ -687,13 +657,10 @@ checkTSOsSanity(void) { belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc); } -//@node Thread Queue Sanity, Blackhole Sanity, TSO Sanity -//@subsection Thread Queue Sanity // still GRAN only -//@cindex checkThreadQSanity -extern rtsBool +rtsBool checkThreadQSanity (PEs proc, rtsBool check_TSO_too) { StgTSO *tso, *prev; @@ -715,8 +682,7 @@ checkThreadQSanity (PEs proc, rtsBool check_TSO_too) ASSERT(prev==run_queue_tls[proc]); } -//@cindex checkThreadQsSanity -extern rtsBool +rtsBool checkThreadQsSanity (rtsBool check_TSO_too) { PEs p; @@ -736,14 +702,56 @@ checkGlobalTSOList (rtsBool checkTSOs) extern StgTSO *all_threads; StgTSO *tso; for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) { - ASSERT(Bdescr((P_)tso)->evacuated == 1); - if (checkTSOs) - checkTSO(tso); + ASSERT(LOOKS_LIKE_PTR(tso)); + ASSERT(get_itbl(tso)->type == TSO); + if (checkTSOs) + checkTSO(tso); } } -//@node Blackhole Sanity, GALA table sanity, Thread Queue Sanity -//@subsection Blackhole Sanity +/* ----------------------------------------------------------------------------- + Check mutable list sanity. + -------------------------------------------------------------------------- */ + +void +checkMutableList( StgMutClosure *p, nat gen ) +{ + bdescr *bd; + + for (; p != END_MUT_LIST; p = p->mut_link) { + bd = Bdescr((P_)p); + ASSERT(closure_MUTABLE(p)); + ASSERT(bd->gen_no == gen); + ASSERT(LOOKS_LIKE_PTR(p->mut_link)); + } +} + +void +checkMutOnceList( StgMutClosure *p, nat gen ) +{ + bdescr *bd; + StgInfoTable *info; + + for (; p != END_MUT_LIST; p = p->mut_link) { + bd = Bdescr((P_)p); + info = get_itbl(p); + + ASSERT(!closure_MUTABLE(p)); + ASSERT(ip_STATIC(info) || bd->gen_no == gen); + ASSERT(LOOKS_LIKE_PTR(p->mut_link)); + + switch (info->type) { + case IND_STATIC: + case IND_OLDGEN: + case IND_OLDGEN_PERM: + case MUT_CONS: + break; + default: + barf("checkMutOnceList: strange closure %p (%s)", + p, info_type((StgClosure *)p)); + } + } +} /* ----------------------------------------------------------------------------- Check Blackhole Sanity @@ -756,7 +764,6 @@ checkGlobalTSOList (rtsBool checkTSOs) the update frame list. -------------------------------------------------------------------------- */ -//@cindex isBlackhole rtsBool isBlackhole( StgTSO* tso, StgClosure* p ) { @@ -771,10 +778,10 @@ isBlackhole( StgTSO* tso, StgClosure* p ) } break; case SEQ_FRAME: - su = stgCast(StgSeqFrame*,su)->link; + su = ((StgSeqFrame *)su)->link; break; case CATCH_FRAME: - su = stgCast(StgCatchFrame*,su)->link; + su = ((StgCatchFrame *)su)->link; break; case STOP_FRAME: return rtsFalse; @@ -787,9 +794,9 @@ isBlackhole( StgTSO* tso, StgClosure* p ) /* Check the static objects list. */ -extern void -checkStaticObjects ( void ) { - extern StgClosure* static_objects; +void +checkStaticObjects ( StgClosure* static_objects ) +{ StgClosure *p = static_objects; StgInfoTable *info; @@ -799,7 +806,7 @@ checkStaticObjects ( void ) { switch (info->type) { case IND_STATIC: { - StgClosure *indirectee = stgCast(StgIndStatic*,p)->indirectee; + StgClosure *indirectee = ((StgIndStatic *)p)->indirectee; ASSERT(LOOKS_LIKE_PTR(indirectee)); ASSERT(LOOKS_LIKE_GHC_INFO(indirectee->header.info)); @@ -832,7 +839,6 @@ checkStaticObjects ( void ) { Note that in GUM we can have several different closure types in a blocking queue */ -//@cindex checkBQ #if defined(PAR) void checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure) @@ -914,8 +920,6 @@ checkBQ (StgTSO *bqe, StgClosure *closure) #endif -//@node GALA table sanity, Index, Blackhole Sanity -//@subsection GALA table sanity /* This routine checks the sanity of the LAGA and GALA tables. They are @@ -935,7 +939,6 @@ extern GALA *liveIndirections; extern GALA *liveRemoteGAs; extern HashTable *LAtoGALAtable; -//@cindex checkLAGAtable void checkLAGAtable(rtsBool check_closures) { @@ -949,7 +952,7 @@ checkLAGAtable(rtsBool check_closures) ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info)); ASSERT(gala->next!=gala); // detect direct loops if ( check_closures ) { - checkClosure(stgCast(StgClosure*,gala->la)); + checkClosure((StgClosure *)gala->la); } } @@ -961,33 +964,11 @@ checkLAGAtable(rtsBool check_closures) ASSERT(gala->next!=gala); // detect direct loops /* if ( check_closures ) { - checkClosure(stgCast(StgClosure*,gala->la)); + checkClosure((StgClosure *)gala->la); } */ } } #endif -//@node Index, , GALA table sanity -//@subsection Index - #endif /* DEBUG */ - -//@index -//* checkBQ:: @cindex\s-+checkBQ -//* checkChain:: @cindex\s-+checkChain -//* checkClosureShallow:: @cindex\s-+checkClosureShallow -//* checkHeap:: @cindex\s-+checkHeap -//* checkLargeBitmap:: @cindex\s-+checkLargeBitmap -//* checkSmallBitmap:: @cindex\s-+checkSmallBitmap -//* checkStack:: @cindex\s-+checkStack -//* checkStackChunk:: @cindex\s-+checkStackChunk -//* checkStackChunk:: @cindex\s-+checkStackChunk -//* checkStackClosure:: @cindex\s-+checkStackClosure -//* checkStackObject:: @cindex\s-+checkStackObject -//* checkTSO:: @cindex\s-+checkTSO -//* checkTSOsSanity:: @cindex\s-+checkTSOsSanity -//* checkThreadQSanity:: @cindex\s-+checkThreadQSanity -//* checkThreadQsSanity:: @cindex\s-+checkThreadQsSanity -//* isBlackhole:: @cindex\s-+isBlackhole -//@end index diff --git a/ghc/rts/Sanity.h b/ghc/rts/Sanity.h index 88568988bf..22889078cf 100644 --- a/ghc/rts/Sanity.h +++ b/ghc/rts/Sanity.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Sanity.h,v 1.8 2001/03/22 03:51:10 hwloidl Exp $ + * $Id: Sanity.h,v 1.9 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -17,27 +17,35 @@ # endif /* debugging routines */ -extern void checkHeap ( bdescr *bd, StgPtr start ); +extern void checkHeap ( bdescr *bd ); extern void checkHeapChunk ( StgPtr start, StgPtr end ); -extern void checkChain ( bdescr *bd ); -extern void checkStack ( StgPtr sp, StgPtr stack_end, StgUpdateFrame* su ); -extern void checkTSO ( StgTSO* tso ); -extern void checkGlobalTSOList (rtsBool checkTSOs); -extern void checkStaticObjects ( void ); +extern void checkChain ( bdescr *bd ); +extern void checkStack ( StgPtr sp, StgPtr stack_end, StgUpdateFrame* su ); +extern void checkTSO ( StgTSO* tso ); +extern void checkGlobalTSOList ( rtsBool checkTSOs ); +extern void checkStaticObjects ( StgClosure* static_objects ); +extern void checkStackChunk ( StgPtr sp, StgPtr stack_end ); +extern StgOffset checkClosure ( StgClosure* p ); + +extern void checkMutableList ( StgMutClosure *p, nat gen ); +extern void checkMutOnceList ( StgMutClosure *p, nat gen ); + #if defined(GRAN) extern void checkTSOsSanity(void); extern rtsBool checkThreadQSanity (PEs proc, rtsBool check_TSO_too); extern rtsBool checkThreadQsSanity (rtsBool check_TSO_too); #endif + #if defined(PAR) extern void checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure); -extern void checkLAGAtable(rtsBool check_closures); -extern void checkHeapChunk(StgPtr start, StgPtr end); #else extern void checkBQ (StgTSO *bqe, StgClosure *closure); #endif -extern StgOffset checkClosure( StgClosure* p ); +#if defined(PAR) +extern void checkLAGAtable(rtsBool check_closures); +extern void checkHeapChunk(StgPtr start, StgPtr end); +#endif /* test whether an object is already on update list */ extern rtsBool isBlackhole( StgTSO* tso, StgClosure* p ); diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c index 369c8c603a..425551cb05 100644 --- a/ghc/rts/Schedule.c +++ b/ghc/rts/Schedule.c @@ -1,5 +1,5 @@ /* --------------------------------------------------------------------------- - * $Id: Schedule.c,v 1.96 2001/06/04 16:26:54 simonmar Exp $ + * $Id: Schedule.c,v 1.97 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team, 1998-2000 * @@ -81,7 +81,6 @@ #include "Storage.h" #include "StgRun.h" #include "StgStartup.h" -#include "GC.h" #include "Hooks.h" #include "Schedule.h" #include "StgMiscClosures.h" @@ -181,7 +180,7 @@ StgTSO *all_threads; */ static StgTSO *suspended_ccalling_threads; -static void GetRoots(void); +static void GetRoots(evac_fn); static StgTSO *threadStackOverflow(StgTSO *tso); /* KH: The following two flags are shared memory locations. There is no need @@ -911,7 +910,7 @@ schedule( void ) #else cap = &MainRegTable; #endif - + cap->rCurrentTSO = t; /* context switches are now initiated by the timer signal, unless @@ -2093,7 +2092,7 @@ take_off_run_queue(StgTSO *tso) { KH @ 25/10/99 */ -static void GetRoots(void) +static void GetRoots(evac_fn evac) { StgMainThread *m; @@ -2102,16 +2101,16 @@ static void GetRoots(void) nat i; for (i=0; i<=RtsFlags.GranFlags.proc; i++) { if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL))) - run_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_hds[i]); + evac((StgClosure **)&run_queue_hds[i]); if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL))) - run_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_tls[i]); + evac((StgClosure **)&run_queue_tls[i]); if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL))) - blocked_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hds[i]); + evac((StgClosure **)&blocked_queue_hds[i]); if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL))) - blocked_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tls[i]); + evac((StgClosure **)&blocked_queue_tls[i]); if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL))) - ccalling_threadss[i] = (StgTSO *)MarkRoot((StgClosure *)ccalling_threadss[i]); + evac((StgClosure **)&ccalling_threads[i]); } } @@ -2119,31 +2118,31 @@ static void GetRoots(void) #else /* !GRAN */ if (run_queue_hd != END_TSO_QUEUE) { - ASSERT(run_queue_tl != END_TSO_QUEUE); - run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd); - run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl); + ASSERT(run_queue_tl != END_TSO_QUEUE); + evac((StgClosure **)&run_queue_hd); + evac((StgClosure **)&run_queue_tl); } - + if (blocked_queue_hd != END_TSO_QUEUE) { - ASSERT(blocked_queue_tl != END_TSO_QUEUE); - blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd); - blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl); + ASSERT(blocked_queue_tl != END_TSO_QUEUE); + evac((StgClosure **)&blocked_queue_hd); + evac((StgClosure **)&blocked_queue_tl); } - + if (sleeping_queue != END_TSO_QUEUE) { - sleeping_queue = (StgTSO *)MarkRoot((StgClosure *)sleeping_queue); + evac((StgClosure **)&sleeping_queue); } #endif for (m = main_threads; m != NULL; m = m->link) { - m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso); + evac((StgClosure **)&m->tso); + } + if (suspended_ccalling_threads != END_TSO_QUEUE) { + evac((StgClosure **)&suspended_ccalling_threads); } - if (suspended_ccalling_threads != END_TSO_QUEUE) - suspended_ccalling_threads = - (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads); #if defined(SMP) || defined(PAR) || defined(GRAN) - markSparkQueue(); + markSparkQueue(evac); #endif } @@ -2160,7 +2159,7 @@ static void GetRoots(void) This needs to be protected by the GC condition variable above. KH. -------------------------------------------------------------------------- */ -void (*extra_roots)(void); +void (*extra_roots)(evac_fn); void performGC(void) @@ -2175,17 +2174,16 @@ performMajorGC(void) } static void -AllRoots(void) +AllRoots(evac_fn evac) { - GetRoots(); /* the scheduler's roots */ - extra_roots(); /* the user's roots */ + GetRoots(evac); // the scheduler's roots + extra_roots(evac); // the user's roots } void -performGCWithRoots(void (*get_roots)(void)) +performGCWithRoots(void (*get_roots)(evac_fn)) { extra_roots = get_roots; - GarbageCollect(AllRoots,rtsFalse); } @@ -2248,7 +2246,7 @@ threadStackOverflow(StgTSO *tso) dest->stack_size = new_stack_size; /* and relocate the update frame list */ - relocate_TSO(tso, dest); + relocate_stack(dest, diff); /* Mark the old TSO as relocated. We have to check for relocated * TSOs in the garbage collector and any primops that deal with TSOs. diff --git a/ghc/rts/Stable.c b/ghc/rts/Stable.c index cc0e0b3ec9..f104075889 100644 --- a/ghc/rts/Stable.c +++ b/ghc/rts/Stable.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Stable.c,v 1.14 2001/07/13 13:41:42 rrt Exp $ + * $Id: Stable.c,v 1.15 2001/07/23 17:23:19 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -10,7 +10,6 @@ #include "Rts.h" #include "Hash.h" #include "StablePriv.h" -#include "GC.h" #include "RtsUtils.h" #include "Storage.h" #include "RtsAPI.h" @@ -133,6 +132,7 @@ initFreeList(snEntry *table, nat n, snEntry *free) for (p = table + n - 1; p >= table; p--) { p->addr = (P_)free; + p->old = NULL; p->weight = 0; p->sn_obj = NULL; free = p; @@ -182,8 +182,7 @@ lookupStableName(StgPtr p) } /* removing indirections increases the likelihood - * of finding a match in the stable name - * hash table. + * of finding a match in the stable name hash table. */ p = (StgPtr)removeIndirections((StgClosure*)p); @@ -251,7 +250,7 @@ enlargeStablePtrTable(void) nat old_SPT_size = SPT_size; if (SPT_size == 0) { - /* 1st time */ + // 1st time SPT_size = INIT_SPT_SIZE; stable_ptr_table = stgMallocWords(SPT_size * sizeof(snEntry), "initStablePtrTable"); @@ -264,7 +263,7 @@ enlargeStablePtrTable(void) addrToStableHash = allocHashTable(); } else { - /* 2nd and subsequent times */ + // 2nd and subsequent times SPT_size *= 2; stable_ptr_table = stgReallocWords(stable_ptr_table, SPT_size * sizeof(snEntry), @@ -282,49 +281,63 @@ enlargeStablePtrTable(void) * -------------------------------------------------------------------------- */ void -markStablePtrTable(rtsBool full) +markStablePtrTable(evac_fn evac) { - snEntry *p, *end_stable_ptr_table; - StgPtr q; - StgClosure *new; - - if (SPT_size == 0) - return; - - if (full) { - freeHashTable(addrToStableHash,NULL); - addrToStableHash = allocHashTable(); - } + snEntry *p, *end_stable_ptr_table; + StgPtr q; + + end_stable_ptr_table = &stable_ptr_table[SPT_size]; + + // Mark all the stable *pointers* (not stable names). + // _starting_ at index 1; index 0 is unused. + for (p = stable_ptr_table+1; p < end_stable_ptr_table; p++) { + q = p->addr; + + // internal pointers or NULL are free slots + if (q && (q < (P_)stable_ptr_table || q >= (P_)end_stable_ptr_table)) { + + // save the current addr away: we need to be able to tell + // whether the objects moved in order to be able to update + // the hash table later. + p->old = p->addr; + + // if the weight is non-zero, treat addr as a root + if (p->weight != 0) { + evac((StgClosure **)&p->addr); + } + } + } +} - end_stable_ptr_table = &stable_ptr_table[SPT_size]; +/* ----------------------------------------------------------------------------- + * Thread the stable pointer table for compacting GC. + * + * Here we must call the supplied evac function for each pointer into + * the heap from the stable pointer table, because the compacting + * collector may move the object it points to. + * -------------------------------------------------------------------------- */ - /* Mark all the stable *pointers* (not stable names). - * _starting_ at index 1; index 0 is unused. - */ - for (p = stable_ptr_table+1; p < end_stable_ptr_table; p++) { - q = p->addr; - /* internal pointers or NULL are free slots - */ - if (q && (q < (P_)stable_ptr_table || q >= (P_)end_stable_ptr_table)) { - if (p->weight != 0) { - new = MarkRoot((StgClosure *)q); - /* Update the hash table */ - if (full) { - insertHashTable(addrToStableHash, (W_)new, - (void *)(p - stable_ptr_table)); - (StgClosure *)p->addr = new; - } else if ((P_)new != q) { - removeHashTable(addrToStableHash, (W_)q, NULL); - if (!lookupHashTable(addrToStableHash, (W_)new)) { - insertHashTable(addrToStableHash, (W_)new, - (void *)(p - stable_ptr_table)); - } - (StgClosure *)p->addr = new; +void +threadStablePtrTable( evac_fn evac ) +{ + snEntry *p, *end_stable_ptr_table; + StgPtr q; + + end_stable_ptr_table = &stable_ptr_table[SPT_size]; + + for (p = stable_ptr_table+1; p < end_stable_ptr_table; p++) { + q = p->addr; + + // internal pointers or NULL are free slots + if (q && (q < (P_)stable_ptr_table || q >= (P_)end_stable_ptr_table)) { + if (p->weight != 0) { + evac((StgClosure **)&p->addr); + } + if (p->sn_obj != NULL) { + evac((StgClosure **)&p->sn_obj); + } } - IF_DEBUG(stable, fprintf(stderr,"Stable ptr %d still alive at %p, weight %u\n", p - stable_ptr_table, new, p->weight)); - } } - } } /* ----------------------------------------------------------------------------- @@ -339,6 +352,47 @@ markStablePtrTable(rtsBool full) * name table entry. We can re-use stable name table entries for live * heap objects, as long as the program has no StableName objects that * refer to the entry. + * -------------------------------------------------------------------------- */ + +void +gcStablePtrTable( void ) +{ + snEntry *p, *end_stable_ptr_table; + StgPtr q; + + end_stable_ptr_table = &stable_ptr_table[SPT_size]; + + // NOTE: _starting_ at index 1; index 0 is unused. + for (p = stable_ptr_table + 1; p < end_stable_ptr_table; p++) { + + // Update the pointer to the StableName object, if there is one + if (p->sn_obj != NULL) { + p->sn_obj = isAlive(p->sn_obj); + } + + q = p->addr; + if (q && (q < (P_)stable_ptr_table || q >= (P_)end_stable_ptr_table)) { + + // StableNames only: + if (p->weight == 0) { + if (p->sn_obj == NULL) { + // StableName object is dead + freeStableName(p); + IF_DEBUG(stable, fprintf(stderr,"GC'd Stable name %d\n", + p - stable_ptr_table)); + continue; + + } else { + (StgClosure *)p->addr = isAlive((StgClosure *)p->addr); + IF_DEBUG(stable, fprintf(stderr,"Stable name %d still alive at %p, weight %d\n", p - stable_ptr_table, p->addr, p->weight)); + } + } + } + } +} + +/* ----------------------------------------------------------------------------- + * Update the StablePtr/StableName hash table * * The boolean argument 'full' indicates that a major collection is * being done, so we might as well throw away the hash table and build @@ -347,65 +401,39 @@ markStablePtrTable(rtsBool full) * -------------------------------------------------------------------------- */ void -gcStablePtrTable(rtsBool full) +updateStablePtrTable(rtsBool full) { - snEntry *p, *end_stable_ptr_table; - StgPtr q, new; - - if (SPT_size == 0) { - return; - } - - end_stable_ptr_table = &stable_ptr_table[SPT_size]; - - /* NOTE: _starting_ at index 1; index 0 is unused. */ - for (p = stable_ptr_table + 1; p < end_stable_ptr_table; p++) { - - /* Update the pointer to the StableName object, if there is one */ - if (p->sn_obj != NULL) { - p->sn_obj = isAlive(p->sn_obj); + snEntry *p, *end_stable_ptr_table; + + if (full && addrToStableHash != NULL) { + freeHashTable(addrToStableHash,NULL); + addrToStableHash = allocHashTable(); } - - q = p->addr; - if (q && (q < (P_)stable_ptr_table || q >= (P_)end_stable_ptr_table)) { - - /* We're only interested in Stable Names here. The weight != 0 - * case is handled in markStablePtrTable above. - */ - if (p->weight == 0) { + + end_stable_ptr_table = &stable_ptr_table[SPT_size]; + + // NOTE: _starting_ at index 1; index 0 is unused. + for (p = stable_ptr_table + 1; p < end_stable_ptr_table; p++) { - if (p->sn_obj == NULL) { - /* StableName object is dead */ - freeStableName(p); - IF_DEBUG(stable, fprintf(stderr,"GC'd Stable name %d\n", p - stable_ptr_table)); - } - else { - (StgClosure *)new = isAlive((StgClosure *)q); - IF_DEBUG(stable, fprintf(stderr,"Stable name %d still alive at %p, weight %d\n", p - stable_ptr_table, new, p->weight)); - - if (new == NULL) { - /* The target has been garbage collected. Remove its - * entry from the hash table. - */ - removeHashTable(addrToStableHash, (W_)q, NULL); - - } else { - /* Target still alive, Re-hash this stable name - */ + if (p->addr == NULL) { + if (p->old != NULL) { + // The target has been garbage collected. Remove its + // entry from the hash table. + removeHashTable(addrToStableHash, (W_)p->old, NULL); + p->old = NULL; + } + } + else if (p->addr < (P_)stable_ptr_table + || p->addr >= (P_)end_stable_ptr_table) { + // Target still alive, Re-hash this stable name if (full) { - insertHashTable(addrToStableHash, (W_)new, (void *)(p - stable_ptr_table)); - } else if (new != q) { - removeHashTable(addrToStableHash, (W_)q, NULL); - insertHashTable(addrToStableHash, (W_)new, (void *)(p - stable_ptr_table)); + insertHashTable(addrToStableHash, (W_)p->addr, + (void *)(p - stable_ptr_table)); + } else if (p->addr != p->old) { + removeHashTable(addrToStableHash, (W_)p->old, NULL); + insertHashTable(addrToStableHash, (W_)p->addr, + (void *)(p - stable_ptr_table)); } - } - - /* finally update the address of the target to point to its - * new location. - */ - p->addr = new; } - } } - } } diff --git a/ghc/rts/StablePriv.h b/ghc/rts/StablePriv.h index f245216050..05a50bc881 100644 --- a/ghc/rts/StablePriv.h +++ b/ghc/rts/StablePriv.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: StablePriv.h,v 1.2 1999/02/05 16:02:56 simonm Exp $ + * $Id: StablePriv.h,v 1.3 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -7,8 +7,11 @@ * * ---------------------------------------------------------------------------*/ -extern void initStablePtrTable(void); -extern void markStablePtrTable(rtsBool full); -extern void enlargeStablePtrTable(void); -extern void gcStablePtrTable(rtsBool full); -extern StgWord lookupStableName(StgPtr p); +extern void initStablePtrTable ( void ); +extern void enlargeStablePtrTable ( void ); +extern StgWord lookupStableName ( StgPtr p ); + +extern void markStablePtrTable ( evac_fn evac ); +extern void threadStablePtrTable ( evac_fn evac ); +extern void gcStablePtrTable ( void ); +extern void updateStablePtrTable ( rtsBool full ); diff --git a/ghc/rts/Stats.c b/ghc/rts/Stats.c index 3b1213554c..70dd866f3b 100644 --- a/ghc/rts/Stats.c +++ b/ghc/rts/Stats.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Stats.c,v 1.30 2001/07/08 17:04:04 sof Exp $ + * $Id: Stats.c,v 1.31 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -617,14 +617,14 @@ stat_exit(int alloc) Produce some detailed info on the state of the generational GC. -------------------------------------------------------------------------- */ void -stat_describe_gens(void) +statDescribeGens(void) { nat g, s, mut, mut_once, lge, live; StgMutClosure *m; bdescr *bd; step *step; - fprintf(stderr, " Gen Steps Max Mutable Mut-Once Step Blocks Live Large\n Blocks Closures Closures Objects\n"); + fprintf(stderr, " Gen Steps Max Mutable Mut-Once Step Blocks Live Large\n Blocks Closures Closures Objects\n"); for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (m = generations[g].mut_list, mut = 0; m != END_MUT_LIST; @@ -642,7 +642,7 @@ stat_describe_gens(void) lge++; live = 0; if (RtsFlags.GcFlags.generations == 1) { - bd = step->to_space; + bd = step->to_blocks; } else { bd = step->blocks; } diff --git a/ghc/rts/Stats.h b/ghc/rts/Stats.h index 297cc52ce9..b5c9826a8b 100644 --- a/ghc/rts/Stats.h +++ b/ghc/rts/Stats.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Stats.h,v 1.10 2000/12/19 14:30:39 simonmar Exp $ + * $Id: Stats.h,v 1.11 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -22,8 +22,8 @@ extern void stat_workerStop(void); extern void initStats(void); -extern void stat_describe_gens(void); extern double mut_user_time_during_GC(void); extern double mut_user_time(void); +extern void statDescribeGens( void ); extern HsInt getAllocations( void ); diff --git a/ghc/rts/StgMiscClosures.hc b/ghc/rts/StgMiscClosures.hc index 9fced45f2f..b0433a32a7 100644 --- a/ghc/rts/StgMiscClosures.hc +++ b/ghc/rts/StgMiscClosures.hc @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: StgMiscClosures.hc,v 1.66 2001/03/23 16:36:21 simonmar Exp $ + * $Id: StgMiscClosures.hc,v 1.67 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-2000 * @@ -434,28 +434,29 @@ STGFUN(stg_BLACKHOLE_entry) #endif TICK_ENT_BH(); - /* Put ourselves on the blocking queue for this black hole */ + // Put ourselves on the blocking queue for this black hole #if defined(GRAN) || defined(PAR) - /* in fact, only difference is the type of the end-of-queue marker! */ + // in fact, only difference is the type of the end-of-queue marker! CurrentTSO->link = END_BQ_QUEUE; ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO; #else CurrentTSO->link = END_TSO_QUEUE; ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO; #endif - /* jot down why and on what closure we are blocked */ + // jot down why and on what closure we are blocked CurrentTSO->why_blocked = BlockedOnBlackHole; CurrentTSO->block_info.closure = R1.cl; - /* closure is mutable since something has just been added to its BQ */ - recordMutable((StgMutClosure *)R1.cl); - /* Change the BLACKHOLE into a BLACKHOLE_BQ */ + + // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info; - /* PAR: dumping of event now done in blockThread -- HWL */ + // closure is mutable since something has just been added to its BQ + recordMutable((StgMutClosure *)R1.cl); - /* stg_gen_block is too heavyweight, use a specialised one */ - BLOCK_NP(1); + // PAR: dumping of event now done in blockThread -- HWL + // stg_gen_block is too heavyweight, use a specialised one + BLOCK_NP(1); FE_ } @@ -563,7 +564,7 @@ STGFUN(stg_CAF_BLACKHOLE_entry) { bdescr *bd = Bdescr(R1.p); if (bd->back != (bdescr *)BaseReg) { - if (bd->gen->no >= 1 || bd->step->no >= 1) { + if (bd->gen_no >= 1 || bd->step->no >= 1) { CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info); } else { EXTFUN_RTS(stg_gc_enter_1_hponly); @@ -575,26 +576,28 @@ STGFUN(stg_CAF_BLACKHOLE_entry) TICK_ENT_BH(); - /* Put ourselves on the blocking queue for this black hole */ + // Put ourselves on the blocking queue for this black hole #if defined(GRAN) || defined(PAR) - /* in fact, only difference is the type of the end-of-queue marker! */ + // in fact, only difference is the type of the end-of-queue marker! CurrentTSO->link = END_BQ_QUEUE; ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO; #else CurrentTSO->link = END_TSO_QUEUE; ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO; #endif - /* jot down why and on what closure we are blocked */ + // jot down why and on what closure we are blocked CurrentTSO->why_blocked = BlockedOnBlackHole; CurrentTSO->block_info.closure = R1.cl; - /* closure is mutable since something has just been added to its BQ */ - recordMutable((StgMutClosure *)R1.cl); - /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */ + + // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info; - /* PAR: dumping of event now done in blockThread -- HWL */ + // closure is mutable since something has just been added to its BQ + recordMutable((StgMutClosure *)R1.cl); - /* stg_gen_block is too heavyweight, use a specialised one */ + // PAR: dumping of event now done in blockThread -- HWL + + // stg_gen_block is too heavyweight, use a specialised one BLOCK_NP(1); FE_ } @@ -727,7 +730,7 @@ NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST); SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_) , /*payload*/{} }; -INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_VAR, , EF_, 0, 0); +INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 2, 0, MUT_CONS, , EF_, 0, 0); NON_ENTERABLE_ENTRY_CODE(MUT_CONS); /* ----------------------------------------------------------------------------- diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index aec6f7f21d..320a834857 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.40 2001/07/23 10:47:16 simonmar Exp $ + * $Id: Storage.c,v 1.41 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -148,6 +148,7 @@ initStorage (void) stp->large_objects = NULL; stp->new_large_objects = NULL; stp->scavenged_large_objects = NULL; + stp->is_compacted = 0; } } @@ -159,8 +160,10 @@ initStorage (void) generations[g].steps[s].to = &generations[g+1].steps[0]; } - /* The oldest generation has one step and its destination is the - * same step. */ + /* The oldest generation has one step and it is compacted. */ + if (RtsFlags.GcFlags.compact) { + oldest_gen->steps[0].is_compacted = 1; + } oldest_gen->steps[0].to = &oldest_gen->steps[0]; /* generation 0 is special: that's the nursery */ @@ -192,7 +195,7 @@ initStorage (void) pthread_mutex_init(&sm_mutex, NULL); #endif - IF_DEBUG(gc, stat_describe_gens()); + IF_DEBUG(gc, statDescribeGens()); } void @@ -294,7 +297,7 @@ allocNurseries( void ) cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); cap->rCurrentNursery = cap->rNursery; for (bd = cap->rNursery; bd != NULL; bd = bd->link) { - bd->back = (bdescr *)cap; + bd->u.back = (bdescr *)cap; } } /* Set the back links to be equal to the Capability, @@ -302,10 +305,11 @@ allocNurseries( void ) */ } #else /* SMP */ - nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; - g0s0->blocks = allocNursery(NULL, nursery_blocks); - g0s0->n_blocks = nursery_blocks; - g0s0->to_space = NULL; + nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; + g0s0->blocks = allocNursery(NULL, nursery_blocks); + g0s0->n_blocks = nursery_blocks; + g0s0->to_blocks = NULL; + g0s0->n_to_blocks = 0; MainRegTable.rNursery = g0s0->blocks; MainRegTable.rCurrentNursery = g0s0->blocks; /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ @@ -355,7 +359,7 @@ allocNursery (bdescr *last_bd, nat blocks) bd->link = last_bd; bd->step = g0s0; bd->gen_no = 0; - bd->evacuated = 0; + bd->flags = 0; bd->free = bd->start; last_bd = bd; } @@ -425,7 +429,7 @@ allocate(nat n) dbl_link_onto(bd, &g0s0->large_objects); bd->gen_no = 0; bd->step = g0s0; - bd->evacuated = 0; + bd->flags = BF_LARGE; bd->free = bd->start; /* don't add these blocks to alloc_blocks, since we're assuming * that large objects are likely to remain live for quite a while @@ -446,12 +450,12 @@ allocate(nat n) small_alloc_list = bd; bd->gen_no = 0; bd->step = g0s0; - bd->evacuated = 0; + bd->flags = 0; alloc_Hp = bd->start; alloc_HpLim = bd->start + BLOCK_SIZE_W; alloc_blocks++; } - + p = alloc_Hp; alloc_Hp += n; RELEASE_LOCK(&sm_mutex); @@ -587,7 +591,7 @@ calcLive(void) step *stp; if (RtsFlags.GcFlags.generations == 1) { - live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W + + live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W + ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_); return live; } @@ -601,8 +605,11 @@ calcLive(void) continue; } stp = &generations[g].steps[s]; - live += (stp->n_blocks - 1) * BLOCK_SIZE_W + - ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start) / sizeof(W_); + live += (stp->n_blocks - 1) * BLOCK_SIZE_W; + if (stp->hp_bd != NULL) { + live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start) + / sizeof(W_); + } } } return live; @@ -626,7 +633,8 @@ calcNeeded(void) for (s = 0; s < generations[g].n_steps; s++) { if (g == 0 && s == 0) { continue; } stp = &generations[g].steps[s]; - if (generations[g].steps[0].n_blocks > generations[g].max_blocks) { + if (generations[g].steps[0].n_blocks > generations[g].max_blocks + && stp->is_compacted == 0) { needed += 2 * stp->n_blocks; } else { needed += stp->n_blocks; @@ -646,7 +654,7 @@ calcNeeded(void) #ifdef DEBUG -extern void +void memInventory(void) { nat g, s; @@ -662,7 +670,7 @@ memInventory(void) total_blocks += stp->n_blocks; if (RtsFlags.GcFlags.generations == 1) { /* two-space collector has a to-space too :-) */ - total_blocks += g0s0->to_blocks; + total_blocks += g0s0->n_to_blocks; } for (bd = stp->large_objects; bd; bd = bd->link) { total_blocks += bd->blocks; @@ -689,45 +697,52 @@ memInventory(void) /* count the blocks on the free list */ free_blocks = countFreeList(); - ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK); - -#if 0 if (total_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK) { fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n", total_blocks, free_blocks, total_blocks + free_blocks, mblocks_allocated * BLOCKS_PER_MBLOCK); } -#endif -} -/* Full heap sanity check. */ + ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK); +} -extern void -checkSanity(nat N) +static nat +countBlocks(bdescr *bd) { - nat g, s; - - if (RtsFlags.GcFlags.generations == 1) { - checkHeap(g0s0->to_space, NULL); - checkChain(g0s0->large_objects); - } else { - - for (g = 0; g <= N; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - if (g == 0 && s == 0) { continue; } - checkHeap(generations[g].steps[s].blocks, NULL); - } + nat n; + for (n=0; bd != NULL; bd=bd->link) { + n++; } - for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - checkHeap(generations[g].steps[s].blocks, - generations[g].steps[s].blocks->start); - checkChain(generations[g].steps[s].large_objects); - } + return n; +} + +/* Full heap sanity check. */ +void +checkSanity( void ) +{ + nat g, s; + + if (RtsFlags.GcFlags.generations == 1) { + checkHeap(g0s0->to_blocks); + checkChain(g0s0->large_objects); + } else { + + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + if (g == 0 && s == 0) { continue; } + checkHeap(generations[g].steps[s].blocks); + ASSERT(countBlocks(generations[g].steps[s].blocks) + == generations[g].steps[s].n_blocks); + checkChain(generations[g].steps[s].large_objects); + if (g > 0) { + checkMutableList(generations[g].mut_list, g); + checkMutOnceList(generations[g].mut_once_list, g); + } + } + } + checkFreeListSanity(); } - checkFreeListSanity(); - } } #endif diff --git a/ghc/rts/Storage.h b/ghc/rts/Storage.h index e32d207a44..2b44e8b8de 100644 --- a/ghc/rts/Storage.h +++ b/ghc/rts/Storage.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.h,v 1.33 2001/07/23 10:47:16 simonmar Exp $ + * $Id: Storage.h,v 1.34 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -78,8 +78,7 @@ extern void PleaseStopAllocating(void); MarkRoot(StgClosure *p) Returns the new location of the root. -------------------------------------------------------------------------- */ -extern void GarbageCollect(void (*get_roots)(void),rtsBool force_major_gc); -extern StgClosure *MarkRoot(StgClosure *p); +extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc); /* ----------------------------------------------------------------------------- Generational garbage collection support @@ -251,6 +250,8 @@ updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure * The CAF table - used to let us revert CAFs -------------------------------------------------------------------------- */ +void revertCAFs( void ); + #if defined(DEBUG) void printMutOnceList(generation *gen); void printMutableList(generation *gen); diff --git a/ghc/rts/StoragePriv.h b/ghc/rts/StoragePriv.h index 687ba1c07c..f953613c65 100644 --- a/ghc/rts/StoragePriv.h +++ b/ghc/rts/StoragePriv.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: StoragePriv.h,v 1.14 2001/01/24 15:39:50 simonmar Exp $ + * $Id: StoragePriv.h,v 1.15 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -19,7 +19,12 @@ extern step *g0s0; extern generation *oldest_gen; extern void newCAF(StgClosure*); -extern StgTSO *relocate_TSO(StgTSO *src, StgTSO *dest); + +extern void move_TSO(StgTSO *src, StgTSO *dest); +extern StgTSO *relocate_stack(StgTSO *dest, int diff); + +extern StgClosure *static_objects; +extern StgClosure *scavenged_static_objects; extern StgWeak *weak_ptr_list; extern StgClosure *caf_list; @@ -53,9 +58,9 @@ static inline void dbl_link_onto(bdescr *bd, bdescr **list) { bd->link = *list; - bd->back = NULL; + bd->u.back = NULL; if (*list) { - (*list)->back = bd; /* double-link the list */ + (*list)->u.back = bd; /* double-link the list */ } *list = bd; } @@ -68,7 +73,7 @@ dbl_link_onto(bdescr *bd, bdescr **list) #ifdef DEBUG extern void memInventory(void); -extern void checkSanity(nat N); +extern void checkSanity(void); #endif /* @@ -81,4 +86,9 @@ int is_dynamically_loaded_code_or_rodata_ptr ( void* p ); int is_dynamically_loaded_rwdata_ptr ( void* p ); int is_not_dynamically_loaded_ptr ( void* p ); +/* Functions from GC.c + */ +void threadPaused(StgTSO *); +StgClosure *isAlive(StgClosure *p); + #endif /* STORAGEPRIV_H */ diff --git a/ghc/rts/parallel/GranSim.c b/ghc/rts/parallel/GranSim.c index c05a24898c..3e799ad58e 100644 --- a/ghc/rts/parallel/GranSim.c +++ b/ghc/rts/parallel/GranSim.c @@ -1,6 +1,6 @@ /* Time-stamp: <Tue Mar 06 2001 00:17:42 Stardate: [-30]6285.06 hwloidl> - $Id: GranSim.c,v 1.4 2001/03/22 03:51:11 hwloidl Exp $ + $Id: GranSim.c,v 1.5 2001/07/23 17:23:20 simonmar Exp $ Variables and functions specific to GranSim the parallelism simulator for GPH. @@ -48,7 +48,6 @@ #include "StgTypes.h" #include "Schedule.h" #include "SchedAPI.h" // for pushClosure -#include "GC.h" #include "GranSimRts.h" #include "GranSim.h" #include "ParallelRts.h" |