summaryrefslogtreecommitdiff
path: root/rts/sm/NonMovingScav.c
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-10-23 14:01:45 -0400
committerBen Gamari <ben@smart-cactus.org>2019-10-23 14:56:46 -0400
commit7f72b540288bbdb32a6750dd64b9d366501ed10c (patch)
tree438203c9c0b052fb65210b5e89acfa7b1d44d5b8 /rts/sm/NonMovingScav.c
parent8abddac870d4b49f77b5ce56bfeb68328dd0d651 (diff)
parent984745b074c186f6058730087a4fc8156240ec76 (diff)
downloadhaskell-7f72b540288bbdb32a6750dd64b9d366501ed10c.tar.gz
Merge non-moving garbage collector
This introduces a concurrent mark & sweep garbage collector to manage the old generation. The concurrent nature of this collector typically results in significantly reduced maximum and mean pause times in applications with large working sets. Due to the large and intricate nature of the change I have opted to preserve the fully-buildable history, including merge commits, which is described in the "Branch overview" section below. Collector design ================ The full design of the collector implemented here is described in detail in a technical note > B. Gamari. "A Concurrent Garbage Collector For the Glasgow Haskell > Compiler" (2018) This document can be requested from @bgamari. The basic heap structure used in this design is heavily inspired by > K. Ueno & A. Ohori. "A fully concurrent garbage collector for > functional programs on multicore processors." /ACM SIGPLAN Notices/ > Vol. 51. No. 9 (presented at ICFP 2016) This design is intended to allow both marking and sweeping concurrent to execution of a multi-core mutator. Unlike the Ueno design, which requires no global synchronization pauses, the collector introduced here requires a stop-the-world pause at the beginning and end of the mark phase. To avoid heap fragmentation, the allocator consists of a number of fixed-size /sub-allocators/. Each of these sub-allocators allocators into its own set of /segments/, themselves allocated from the block allocator. Each segment is broken into a set of fixed-size allocation blocks (which back allocations) in addition to a bitmap (used to track the liveness of blocks) and some additional metadata (used also used to track liveness). This heap structure enables collection via mark-and-sweep, which can be performed concurrently via a snapshot-at-the-beginning scheme (although concurrent collection is not implemented in this patch). Implementation structure ======================== The majority of the collector is implemented in a handful of files: * `rts/Nonmoving.c` is the heart of the beast. It implements the entry-point to the nonmoving collector (`nonmoving_collect`), as well as the allocator (`nonmoving_allocate`) and a number of utilities for manipulating the heap. * `rts/NonmovingMark.c` implements the mark queue functionality, update remembered set, and mark loop. * `rts/NonmovingSweep.c` implements the sweep loop. * `rts/NonmovingScav.c` implements the logic necessary to scavenge the nonmoving heap. Branch overview =============== ``` * wip/gc/opt-pause: | A variety of small optimisations to further reduce pause times. | * wip/gc/compact-nfdata: | Introduce support for compact regions into the non-moving |\ collector | \ | \ | | * wip/gc/segment-header-to-bdescr: | | | Another optimization that we are considering, pushing | | | some segment metadata into the segment descriptor for | | | the sake of locality during mark | | | | * | wip/gc/shortcutting: | | | Support for indirection shortcutting and the selector optimization | | | in the non-moving heap. | | | * | | wip/gc/docs: | |/ Work on implementation documentation. | / |/ * wip/gc/everything: | A roll-up of everything below. |\ | \ | |\ | | \ | | * wip/gc/optimize: | | | A variety of optimizations, primarily to the mark loop. | | | Some of these are microoptimizations but a few are quite | | | significant. In particular, the prefetch patches have | | | produced a nontrivial improvement in mark performance. | | | | | * wip/gc/aging: | | | Enable support for aging in major collections. | | | | * | wip/gc/test: | | | Fix up the testsuite to more or less pass. | | | * | | wip/gc/instrumentation: | | | A variety of runtime instrumentation including statistics | | / support, the nonmoving census, and eventlog support. | |/ | / |/ * wip/gc/nonmoving-concurrent: | The concurrent write barriers. | * wip/gc/nonmoving-nonconcurrent: | The nonmoving collector without the write barriers necessary | for concurrent collection. | * wip/gc/preparation: | A merge of the various preparatory patches that aren't directly | implementing the GC. | | * GHC HEAD . . . ```
Diffstat (limited to 'rts/sm/NonMovingScav.c')
-rw-r--r--rts/sm/NonMovingScav.c389
1 files changed, 389 insertions, 0 deletions
diff --git a/rts/sm/NonMovingScav.c b/rts/sm/NonMovingScav.c
new file mode 100644
index 0000000000..9583c7baf9
--- /dev/null
+++ b/rts/sm/NonMovingScav.c
@@ -0,0 +1,389 @@
+#include "Rts.h"
+#include "RtsUtils.h"
+#include "NonMoving.h"
+#include "NonMovingScav.h"
+#include "Capability.h"
+#include "Scav.h"
+#include "Evac.h"
+#include "GCThread.h" // for GCUtils.h
+#include "GCUtils.h"
+#include "Printer.h"
+#include "MarkWeak.h" // scavengeLiveWeak
+
+void
+nonmovingScavengeOne (StgClosure *q)
+{
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
+ StgPtr p = (StgPtr)q;
+ const StgInfoTable *info = get_itbl(q);
+ const bool saved_eager_promotion = gct->eager_promotion;
+
+ switch (info->type) {
+
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ {
+ StgMVar *mvar = ((StgMVar *)p);
+ gct->eager_promotion = false;
+ evacuate((StgClosure **)&mvar->head);
+ evacuate((StgClosure **)&mvar->tail);
+ evacuate((StgClosure **)&mvar->value);
+ gct->eager_promotion = saved_eager_promotion;
+ if (gct->failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
+ break;
+ }
+
+ case TVAR:
+ {
+ StgTVar *tvar = ((StgTVar *)p);
+ gct->eager_promotion = false;
+ evacuate((StgClosure **)&tvar->current_value);
+ evacuate((StgClosure **)&tvar->first_watch_queue_entry);
+ gct->eager_promotion = saved_eager_promotion;
+ if (gct->failed_to_evac) {
+ tvar->header.info = &stg_TVAR_DIRTY_info;
+ } else {
+ tvar->header.info = &stg_TVAR_CLEAN_info;
+ }
+ break;
+ }
+
+ case FUN_2_0:
+ scavenge_fun_srt(info);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
+ break;
+
+ case THUNK_2_0:
+ scavenge_thunk_srt(info);
+ evacuate(&((StgThunk *)p)->payload[1]);
+ evacuate(&((StgThunk *)p)->payload[0]);
+ break;
+
+ case CONSTR_2_0:
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
+ break;
+
+ case THUNK_1_0:
+ scavenge_thunk_srt(info);
+ evacuate(&((StgThunk *)p)->payload[0]);
+ break;
+
+ case FUN_1_0:
+ scavenge_fun_srt(info);
+ FALLTHROUGH;
+ case CONSTR_1_0:
+ evacuate(&((StgClosure *)p)->payload[0]);
+ break;
+
+ case THUNK_0_1:
+ scavenge_thunk_srt(info);
+ break;
+
+ case FUN_0_1:
+ scavenge_fun_srt(info);
+ FALLTHROUGH;
+ case CONSTR_0_1:
+ break;
+
+ case THUNK_0_2:
+ scavenge_thunk_srt(info);
+ break;
+
+ case FUN_0_2:
+ scavenge_fun_srt(info);
+ FALLTHROUGH;
+ case CONSTR_0_2:
+ break;
+
+ case THUNK_1_1:
+ scavenge_thunk_srt(info);
+ evacuate(&((StgThunk *)p)->payload[0]);
+ break;
+
+ case FUN_1_1:
+ scavenge_fun_srt(info);
+ FALLTHROUGH;
+ case CONSTR_1_1:
+ evacuate(&q->payload[0]);
+ break;
+
+ case FUN:
+ scavenge_fun_srt(info);
+ goto gen_obj;
+
+ case THUNK:
+ {
+ scavenge_thunk_srt(info);
+ StgPtr end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+ break;
+ }
+
+ case WEAK:
+ {
+ // We must evacuate the key since it may refer to an object in the
+ // moving heap which may be long gone by the time we call
+ // nonmovingTidyWeaks.
+ StgWeak *weak = (StgWeak *) p;
+ gct->eager_promotion = true;
+ evacuate(&weak->key);
+ gct->eager_promotion = saved_eager_promotion;
+ goto gen_obj;
+ }
+
+ gen_obj:
+ case CONSTR:
+ case CONSTR_NOCAF:
+ case PRIM:
+ {
+ StgPtr end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+ break;
+ }
+
+ case BCO: {
+ StgBCO *bco = (StgBCO *)p;
+ evacuate((StgClosure **)&bco->instrs);
+ evacuate((StgClosure **)&bco->literals);
+ evacuate((StgClosure **)&bco->ptrs);
+ break;
+ }
+
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY:
+ gct->eager_promotion = false;
+ evacuate(&((StgMutVar *)p)->var);
+ gct->eager_promotion = saved_eager_promotion;
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
+ }
+ break;
+
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ gct->eager_promotion = false;
+ evacuate(&bq->bh);
+ evacuate((StgClosure**)&bq->owner);
+ evacuate((StgClosure**)&bq->queue);
+ evacuate((StgClosure**)&bq->link);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
+ } else {
+ bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
+ }
+ break;
+ }
+
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)p;
+ evacuate(&s->selectee);
+ break;
+ }
+
+ // A chunk of stack saved in a heap object
+ case AP_STACK:
+ {
+ StgAP_STACK *ap = (StgAP_STACK *)p;
+
+ evacuate(&ap->fun);
+ scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
+ break;
+ }
+
+ case PAP:
+ p = scavenge_PAP((StgPAP *)p);
+ break;
+
+ case AP:
+ scavenge_AP((StgAP *)p);
+ break;
+
+ case ARR_WORDS:
+ // nothing to follow
+ break;
+
+ case MUT_ARR_PTRS_CLEAN:
+ case MUT_ARR_PTRS_DIRTY:
+ {
+ gct->eager_promotion = false;
+ scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
+ gct->eager_promotion = saved_eager_promotion;
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ }
+ gct->failed_to_evac = true; // always put it on the mutable list.
+ break;
+ }
+
+ case MUT_ARR_PTRS_FROZEN_CLEAN:
+ case MUT_ARR_PTRS_FROZEN_DIRTY:
+ // follow everything
+ {
+ scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info;
+ }
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ // follow everything
+ {
+ StgPtr next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ gct->eager_promotion = false;
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+ gct->failed_to_evac = true; // always put it on the mutable list.
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
+ case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
+ // follow everything
+ {
+ StgPtr next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info;
+ }
+ break;
+ }
+
+ case TSO:
+ {
+ scavengeTSO((StgTSO *)p);
+ break;
+ }
+
+ case STACK:
+ {
+ StgStack *stack = (StgStack*)p;
+
+ gct->eager_promotion = false;
+ scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ gct->eager_promotion = saved_eager_promotion;
+ stack->dirty = gct->failed_to_evac;
+ break;
+ }
+
+ case MUT_PRIM:
+ {
+ StgPtr end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ gct->eager_promotion = false;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager_promotion;
+ gct->failed_to_evac = true; // mutable
+ break;
+ }
+
+ case TREC_CHUNK:
+ {
+ StgWord i;
+ StgTRecChunk *tc = ((StgTRecChunk *) p);
+ TRecEntry *e = &(tc -> entries[0]);
+ gct->eager_promotion = false;
+ evacuate((StgClosure **)&tc->prev_chunk);
+ for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
+ evacuate((StgClosure **)&e->tvar);
+ evacuate((StgClosure **)&e->expected_value);
+ evacuate((StgClosure **)&e->new_value);
+ }
+ gct->eager_promotion = saved_eager_promotion;
+ gct->failed_to_evac = true; // mutable
+ break;
+ }
+
+ case IND:
+ case BLACKHOLE:
+ case IND_STATIC:
+ evacuate(&((StgInd *)p)->indirectee);
+ break;
+
+ case COMPACT_NFDATA:
+ scavenge_compact((StgCompactNFData*)p);
+ break;
+
+ default:
+ barf("nonmoving scavenge: unimplemented/strange closure type %d @ %p",
+ info->type, p);
+ }
+
+ if (gct->failed_to_evac) {
+ // Mutable object or points to a younger object, add to the mut_list
+ gct->failed_to_evac = false;
+ if (oldest_gen->no > 0) {
+ recordMutableGen_GC(q, oldest_gen->no);
+ }
+ }
+}
+
+/* Scavenge objects evacuated into a nonmoving segment by a minor GC */
+void
+scavengeNonmovingSegment (struct NonmovingSegment *seg)
+{
+ const StgWord blk_size = nonmovingSegmentBlockSize(seg);
+ gct->evac_gen_no = oldest_gen->no;
+ gct->failed_to_evac = false;
+
+ // scavenge objects between scan and free_ptr whose bitmap bits are 0
+ bdescr *seg_block = Bdescr((P_)seg);
+
+ ASSERT(seg_block->u.scan >= (P_)nonmovingSegmentGetBlock(seg, 0));
+ ASSERT(seg_block->u.scan <= (P_)nonmovingSegmentGetBlock(seg, seg->next_free));
+
+ StgPtr scan_end = (P_)nonmovingSegmentGetBlock(seg, seg->next_free);
+ if (seg_block->u.scan == scan_end)
+ return;
+
+ nonmoving_block_idx p_idx = nonmovingGetBlockIdx(seg_block->u.scan);
+ while (seg_block->u.scan < scan_end) {
+ StgClosure *p = (StgClosure*)seg_block->u.scan;
+
+ // bit set = was allocated in a previous GC, no need to scavenge
+ // bit not set = new allocation, so scavenge
+ if (nonmovingGetMark(seg, p_idx) == 0) {
+ nonmovingScavengeOne(p);
+ }
+
+ p_idx++;
+ seg_block->u.scan = (P_)(((uint8_t*)seg_block->u.scan) + blk_size);
+ }
+}