diff options
author | Ben Gamari <ben@smart-cactus.org> | 2020-03-03 23:42:14 -0500 |
---|---|---|
committer | Marge Bot <ben+marge-bot@smart-cactus.org> | 2020-03-14 05:28:43 -0400 |
commit | 20d4d676964382b313b9e44062e45a7c38621999 (patch) | |
tree | d575a7cf7de04ad22bb309ac22baef311464a907 | |
parent | 7c3e39a9a7ccb3b6c2953b0397a0d315dc0ec7d5 (diff) | |
download | haskell-20d4d676964382b313b9e44062e45a7c38621999.tar.gz |
nonmoving: Don't traverse filled segment list in pause
The non-moving collector would previously walk the entire filled segment
list during the preparatory pause. However, this is far more work than
is strictly necessary. We can rather get away with merely collecting the
allocators' filled segment list heads and process the lists themselves
during the concurrent phase. This can significantly reduce the maximum
gen1 GC pause time in programs with high rates of long-lived allocations.
-rw-r--r-- | rts/sm/NonMoving.c | 43 | ||||
-rw-r--r-- | rts/sm/NonMoving.h | 1 |
2 files changed, 26 insertions, 18 deletions
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c index fe9d22b479..bdf22dc37b 100644 --- a/rts/sm/NonMoving.c +++ b/rts/sm/NonMoving.c @@ -727,25 +727,10 @@ static void nonmovingPrepareMark(void) nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free; } - // Update filled segments' snapshot pointers and move to sweep_list - uint32_t n_filled = 0; - struct NonmovingSegment *const filled = alloca->filled; + // Save the filled segments for later processing during the concurrent + // mark phase. + alloca->saved_filled = alloca->filled; alloca->filled = NULL; - if (filled) { - struct NonmovingSegment *seg = filled; - while (true) { - // Set snapshot - nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free; - n_filled++; - if (seg->link) - seg = seg->link; - else - break; - } - // add filled segments to sweep_list - seg->link = nonmovingHeap.sweep_list; - nonmovingHeap.sweep_list = filled; - } // N.B. It's not necessary to update snapshot pointers of active segments; // they were set after they were swept and haven't seen any allocation @@ -969,6 +954,28 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO * debugTrace(DEBUG_nonmoving_gc, "Starting mark..."); stat_startNonmovingGc(); + // Walk the list of filled segments that we collected during preparation, + // updated their snapshot pointers and move them to the sweep list. + for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) { + struct NonmovingSegment *filled = nonmovingHeap.allocators[alloca_idx]->saved_filled; + uint32_t n_filled = 0; + if (filled) { + struct NonmovingSegment *seg = filled; + while (true) { + // Set snapshot + nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free; + n_filled++; + if (seg->link) + seg = seg->link; + else + break; + } + // add filled segments to sweep_list + seg->link = nonmovingHeap.sweep_list; + nonmovingHeap.sweep_list = filled; + } + } + // Do concurrent marking; most of the heap will get marked here. nonmovingMarkThreadsWeaks(mark_queue); diff --git a/rts/sm/NonMoving.h b/rts/sm/NonMoving.h index 36ecd8b0af..6eabcb8493 100644 --- a/rts/sm/NonMoving.h +++ b/rts/sm/NonMoving.h @@ -62,6 +62,7 @@ struct NonmovingSegment { // A non-moving allocator for a particular block size struct NonmovingAllocator { struct NonmovingSegment *filled; + struct NonmovingSegment *saved_filled; struct NonmovingSegment *active; // indexed by capability number struct NonmovingSegment *current[]; |