summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
authorGHC GitLab CI <ghc-ci@gitlab-haskell.org>2020-12-08 00:19:38 +0000
committerBen Gamari <ben@well-typed.com>2020-12-20 21:05:13 -0500
commita13bd3f147d63968bc50df6f43154bf2a05809df (patch)
tree047b6181c29370df65ce49ba2059c60125d5223a /rts/sm
parentcde7499476f66f1369bd55cc046e4bfdb5d4a2a6 (diff)
downloadhaskell-a13bd3f147d63968bc50df6f43154bf2a05809df.tar.gz
nonmoving: Refactor alloc_for_copy
Pull the cold non-moving allocation path out of alloc_for_copy.
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/Evac.c127
1 files changed, 79 insertions, 48 deletions
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index c37251d9b3..4355e96399 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -64,50 +64,93 @@ ATTR_NOINLINE static void evacuate_large(StgPtr p);
Allocate some space in which to copy an object.
-------------------------------------------------------------------------- */
-/* size is in words */
+static StgPtr
+alloc_in_nonmoving_heap (uint32_t size)
+{
+ gct->copied += size;
+ StgPtr to = nonmovingAllocate(gct->cap, size);
+
+ // Add segment to the todo list unless it's already there
+ // current->todo_link == NULL means not in todo list
+ struct NonmovingSegment *seg = nonmovingGetSegment(to);
+ if (!seg->todo_link) {
+ gen_workspace *ws = &gct->gens[oldest_gen->no];
+ seg->todo_link = ws->todo_seg;
+ ws->todo_seg = seg;
+ }
+
+ // The object which refers to this closure may have been aged (i.e.
+ // retained in a younger generation). Consequently, we must add the
+ // closure to the mark queue to ensure that it will be marked.
+ //
+ // However, if we are in a deadlock detection GC then we disable aging
+ // so there is no need.
+ //
+ // See Note [Non-moving GC: Marking evacuated objects].
+ if (major_gc && !deadlock_detect_gc) {
+ markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
+ }
+ return to;
+}
+
+/* Inlined helper shared between alloc_for_copy_nonmoving and alloc_for_copy. */
STATIC_INLINE StgPtr
-alloc_for_copy (uint32_t size, uint32_t gen_no)
+alloc_in_moving_heap (uint32_t size, uint32_t gen_no)
{
- ASSERT(gen_no < RtsFlags.GcFlags.generations);
+ gen_workspace *ws = &gct->gens[gen_no]; // zero memory references here
- StgPtr to;
- gen_workspace *ws;
+ /* chain a new block onto the to-space for the destination gen if
+ * necessary.
+ */
+ StgPtr to = ws->todo_free;
+ ws->todo_free += size;
+ if (ws->todo_free > ws->todo_lim) {
+ to = todo_block_full(size, ws);
+ }
+ ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
- if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)) {
- /* See Note [Deadlock detection under nonmoving collector]. */
- const uint32_t oldest_gen_no = oldest_gen->no;
- if (deadlock_detect_gc) {
- gen_no = oldest_gen_no;
- }
+ return to;
+}
- if (gen_no == oldest_gen_no) {
- gct->copied += size;
- to = nonmovingAllocate(gct->cap, size);
-
- // Add segment to the todo list unless it's already there
- // current->todo_link == NULL means not in todo list
- struct NonmovingSegment *seg = nonmovingGetSegment(to);
- if (!seg->todo_link) {
- gen_workspace *ws = &gct->gens[oldest_gen_no];
- seg->todo_link = ws->todo_seg;
- ws->todo_seg = seg;
- }
+/*
+ * N.B. We duplicate much of alloc_for_copy here to minimize the number of
+ * branches introduced in the moving GC path of alloc_for_copy while minimizing
+ * repeated work.
+ */
+static StgPtr
+alloc_for_copy_nonmoving (uint32_t size, uint32_t gen_no)
+{
+ /* See Note [Deadlock detection under nonmoving collector]. */
+ if (deadlock_detect_gc) {
+ return alloc_in_nonmoving_heap(size);
+ }
- // The object which refers to this closure may have been aged (i.e.
- // retained in a younger generation). Consequently, we must add the
- // closure to the mark queue to ensure that it will be marked.
- //
- // However, if we are in a deadlock detection GC then we disable aging
- // so there is no need.
- //
- // See Note [Non-moving GC: Marking evacuated objects].
- if (major_gc && !deadlock_detect_gc) {
- markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) to);
- }
- return to;
+ /* Should match logic from alloc_for_copy */
+ if (gen_no < gct->evac_gen_no) {
+ if (gct->eager_promotion) {
+ gen_no = gct->evac_gen_no;
+ } else {
+ gct->failed_to_evac = true;
}
}
+ if (gen_no == oldest_gen->no) {
+ return alloc_in_nonmoving_heap(size);
+ } else {
+ return alloc_in_moving_heap(size, gen_no);
+ }
+}
+
+/* size is in words */
+STATIC_INLINE StgPtr
+alloc_for_copy (uint32_t size, uint32_t gen_no)
+{
+ ASSERT(gen_no < RtsFlags.GcFlags.generations);
+
+ if (RTS_UNLIKELY(RtsFlags.GcFlags.useNonmoving)) {
+ return alloc_for_copy_nonmoving(size, gen_no);
+ }
+
/* Find out where we're going, using the handy "to" pointer in
* the gen of the source object. If it turns out we need to
* evacuate to an older generation, adjust it here (see comment
@@ -121,19 +164,7 @@ alloc_for_copy (uint32_t size, uint32_t gen_no)
}
}
- ws = &gct->gens[gen_no]; // zero memory references here
-
- /* chain a new block onto the to-space for the destination gen if
- * necessary.
- */
- to = ws->todo_free;
- ws->todo_free += size;
- if (ws->todo_free > ws->todo_lim) {
- to = todo_block_full(size, ws);
- }
- ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
-
- return to;
+ return alloc_in_moving_heap(size, gen_no);
}
/* -----------------------------------------------------------------------------