summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2020-05-06 18:40:30 -0400
committerBen Gamari <ben@smart-cactus.org>2020-05-06 18:40:30 -0400
commit22d40db13f41354b9fc696c505fdc2eeca4d4d56 (patch)
tree683596a7c2a1d6504f9085e4cdc23f1676fdd001
parentdd406b5fdb870c57edbe05e287de9d07d2e06239 (diff)
downloadhaskell-wip/gc/mark-queue-refactor.tar.gz
MarkContext refactoringwip/gc/mark-queue-refactor
-rw-r--r--rts/STM.c4
-rw-r--r--rts/Schedule.c2
-rw-r--r--rts/sm/NonMovingMark.c572
-rw-r--r--rts/sm/NonMovingMark.h11
4 files changed, 326 insertions, 263 deletions
diff --git a/rts/STM.c b/rts/STM.c
index 3ba9dcf7a8..71de71399c 100644
--- a/rts/STM.c
+++ b/rts/STM.c
@@ -300,7 +300,7 @@ static StgClosure *lock_tvar(Capability *cap,
IF_NONMOVING_WRITE_BARRIER_ENABLED {
if (result)
- updateRemembSetPushClosure(cap->upd_rem_set, result);
+ updateRemembSetPushClosure(&cap->upd_rem_set, result);
}
return result;
}
@@ -327,7 +327,7 @@ static StgBool cond_lock_tvar(Capability *cap,
result = (StgClosure *)w;
IF_NONMOVING_WRITE_BARRIER_ENABLED {
if (result)
- updateRemembSetPushClosure(cap->upd_rem_set, expected);
+ updateRemembSetPushClosure(&cap->upd_rem_set, expected);
}
TRACE("%p : %s", trec, result ? "success" : "failure");
return (result == expected);
diff --git a/rts/Schedule.c b/rts/Schedule.c
index ce1a1fc060..415047bff2 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -2504,7 +2504,7 @@ resumeThread (void *task_)
incall->suspended_cap = NULL;
// we will modify tso->_link
IF_NONMOVING_WRITE_BARRIER_ENABLED {
- updateRemembSetPushClosure(cap, (StgClosure *)tso->_link);
+ updateRemembSetPushClosure(&cap->upd_rem_set, (StgClosure *)tso->_link);
}
tso->_link = END_TSO_QUEUE;
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index 5798330b19..bb0c0d8b59 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -27,13 +27,15 @@
#include "sm/Storage.h"
#include "CNF.h"
-static void mark_closure (MarkQueue *queue, const StgClosure *p, StgClosure **origin);
-static void mark_tso (MarkQueue *queue, StgTSO *tso);
-static void mark_stack_upd_rem_set (UpdRemSet *rs, StgStack *stack);
-static void mark_PAP_payload (MarkQueue *queue,
- StgClosure *fun,
- StgClosure **payload,
- StgWord size);
+/* forward declarations */
+struct MarkContext;
+static void mark_tso (struct MarkContext *queue, StgTSO *tso);
+static void mark_stack (struct MarkContext *mctx, StgStack *stack);
+static
+void mark_PAP_payload (struct MarkContext *mctx,
+ StgClosure *fun,
+ StgClosure **payload,
+ StgWord size);
// How many Array# entries to add to the mark queue at once?
#define MARK_ARRAY_CHUNK_LENGTH 128
@@ -248,6 +250,152 @@ StgWord nonmoving_write_barrier_enabled = false;
*/
MarkQueue *current_mark_queue = NULL;
+
+/* The context from which nonmovingPushClosure is being called */
+enum MarkContextKind {
+ MARK_CTX_IN_CONC_MARK,
+ MARK_CTX_IN_MOVING_GC,
+ MARK_CTX_IN_MUTATOR
+};
+
+/* The context from which nonmovingPushClosure is being called */
+struct MarkContext {
+ enum MarkContextKind kind;
+ union {
+ struct {
+ MarkQueue *queue;
+ } in_conc_mark;
+ struct {
+ UpdRemSet *rset;
+ } in_moving_gc;
+ struct {
+ UpdRemSet *rset;
+ } in_mutator;
+ };
+};
+
+static struct MarkContext markContextInConcMark(MarkQueue *queue) {
+ return (struct MarkContext) {
+ .kind = MARK_CTX_IN_CONC_MARK,
+ .in_conc_mark = { .queue = queue },
+ };
+}
+
+static struct MarkContext markContextInMovingGc(UpdRemSet *rset) {
+ return (struct MarkContext) {
+ .kind = MARK_CTX_IN_MOVING_GC,
+ .in_moving_gc = { .rset = rset },
+ };
+}
+
+static struct MarkContext markContextInMutator(UpdRemSet *rset) {
+ return (struct MarkContext) {
+ .kind = MARK_CTX_IN_MUTATOR,
+ .in_moving_gc = { .rset = rset },
+ };
+}
+
+/*********************************************************
+ * Pushing to either the mark queue or remembered set
+ *********************************************************/
+
+
+// Check if the object is traced by the non-moving collector. This holds in two
+// conditions:
+//
+// - Object is in non-moving heap
+// - Object is a large (BF_LARGE) and marked as BF_NONMOVING
+// - Object is static (HEAP_ALLOCED_GC(obj) == false)
+//
+static
+bool check_in_nonmoving_heap(StgClosure *p) {
+ if (HEAP_ALLOCED_GC(p)) {
+ // This works for both large and small objects:
+ return Bdescr((P_)p)->flags & BF_NONMOVING;
+ } else {
+ return true; // a static object
+ }
+}
+
+STATIC_INLINE void
+markQueueBlockPush (MarkQueueBlock *b, const MarkQueueEnt *ent)
+{
+ ASSERT(!markQueueBlockIsFull(b));
+ b->entries[b->head] = *ent;
+ b->head++;
+}
+
+STATIC_INLINE void
+upd_rem_set_push(UpdRemSet *rs, MarkQueueEnt *ent)
+{
+ // Are we at the end of the block?
+ if (markQueueBlockIsFull(rs->block)) {
+ // Yes, this block is full.
+ // Allocate a fresh block.
+ ACQUIRE_SM_LOCK;
+ nonmovingAddUpdRemSetBlocks(rs);
+ bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
+ bd->link = markQueueBlockBdescr(rs->block);
+ rs->block = (MarkQueueBlock *) bd->start;
+ rs->block->head = 0;
+ RELEASE_SM_LOCK;
+ }
+
+ markQueueBlockPush(rs->block, ent);
+}
+
+STATIC_INLINE void
+upd_rem_set_push_gc(UpdRemSet *rs, MarkQueueEnt *ent)
+{
+ // Are we at the end of the block?
+ if (markQueueBlockIsFull(rs->block)) {
+ // Yes, this block is full.
+ // Allocate a fresh block.
+ ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ nonmovingAddUpdRemSetBlocks(rs);
+ bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
+ bd->link = markQueueBlockBdescr(rs->block);
+ rs->block = (MarkQueueBlock *) bd->start;
+ rs->block->head = 0;
+ RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ }
+
+ markQueueBlockPush(rs->block, ent);
+}
+
+
+STATIC_INLINE void
+mark_queue_push (MarkQueue *q, const MarkQueueEnt *ent)
+{
+ // Are we at the end of the block?
+ if (markQueueBlockIsFull(q->top)) {
+ // Yes, this block is full.
+ // Allocate a fresh block.
+ ACQUIRE_SM_LOCK;
+ bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
+ bd->link = markQueueBlockBdescr(q->top);
+ q->top = (MarkQueueBlock *) bd->start;
+ q->top->head = 0;
+ RELEASE_SM_LOCK;
+ }
+
+ markQueueBlockPush(q->top, ent);
+}
+
+STATIC_INLINE void push(struct MarkContext *mc, MarkQueueEnt *ent) {
+ switch (mc->kind) {
+ case MARK_CTX_IN_CONC_MARK:
+ mark_queue_push(mc->in_conc_mark.queue, ent);
+ break;
+ case MARK_CTX_IN_MOVING_GC:
+ upd_rem_set_push(mc->in_moving_gc.rset, ent);
+ break;
+ case MARK_CTX_IN_MUTATOR:
+ upd_rem_set_push_gc(mc->in_mutator.rset, ent);
+ break;
+ }
+}
+
/* Initialise update remembered set data structures */
void nonmovingMarkInitUpdRemSet() {
#if defined(THREADED_RTS)
@@ -406,67 +554,30 @@ void nonmovingFinishFlush(Task *task)
}
#endif
-/*********************************************************
- * Pushing to either the mark queue or remembered set
- *********************************************************/
-
-STATIC_INLINE void
-push (MarkQueue *q, const MarkQueueEnt *ent)
-{
- // Are we at the end of the block?
- if (markQueueBlockIsFull(q->top)) {
- // Yes, this block is full.
- // Allocate a fresh block.
- ACQUIRE_SM_LOCK;
- bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
- bd->link = markQueueBlockBdescr(q->top);
- q->top = (MarkQueueBlock *) bd->start;
- q->top->head = 0;
- RELEASE_SM_LOCK;
- }
-
- q->top->entries[q->top->head] = *ent;
- q->top->head++;
-}
-
/* A variant of push to be used by the minor GC when it encounters a reference
* to an object in the non-moving heap. In contrast to the other push
* operations this uses the gc_alloc_block_sync spinlock instead of the
* SM_LOCK to allocate new blocks in the event that the mark queue is full.
*/
void
-markQueuePushClosureGC (UpdRemSet *q, StgClosure *p)
+markQueuePushClosureGC (UpdRemSet *rset, StgClosure *p)
{
/* We should not make it here if we are doing a deadlock detect GC.
* See Note [Deadlock detection under nonmoving collector].
*/
ASSERT(!deadlock_detect_gc);
- // Are we at the end of the block?
- if (markQueueBlockIsFull(q->block)) {
- // Yes, this block is full.
- // Allocate a fresh block.
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
- nonmovingAddUpdRemSetBlocks(q);
- bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
- bd->link = markQueueBlockBdescr(q->top);
- q->top = (MarkQueueBlock *) bd->start;
- q->top->head = 0;
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
- }
-
MarkQueueEnt ent = {
.mark_closure = {
.p = TAG_CLOSURE(MARK_CLOSURE, UNTAG_CLOSURE(p)),
.origin = NULL,
}
};
- q->top->entries[q->top->head] = ent;
- q->top->head++;
+ upd_rem_set_push_gc(rset, &ent);
}
static inline
-void push_closure (MarkQueue *q,
+void push_closure (struct MarkContext *mctx,
StgClosure *p,
StgClosure **origin)
{
@@ -492,11 +603,17 @@ void push_closure (MarkQueue *q,
.origin = origin,
}
};
- push(q, &ent);
+ push(mctx, &ent);
+}
+
+static inline
+void push_closure_ (struct MarkContext *mctx, StgClosure *p)
+{
+ push_closure(mctx, p, NULL);
}
static
-void push_array (MarkQueue *q,
+void push_array (struct MarkContext *mctx,
const StgMutArrPtrs *array,
StgWord start_index)
{
@@ -510,24 +627,24 @@ void push_array (MarkQueue *q,
.start_index = start_index,
}
};
- push(q, &ent);
+ push(mctx, &ent);
}
static
-void push_thunk_srt (MarkQueue *q, const StgInfoTable *info)
+void push_thunk_srt (struct MarkContext *mctx, const StgInfoTable *info)
{
const StgThunkInfoTable *thunk_info = itbl_to_thunk_itbl(info);
if (thunk_info->i.srt) {
- push_closure(q, (StgClosure*)GET_SRT(thunk_info), NULL);
+ push_closure(mctx, (StgClosure*)GET_SRT(thunk_info), NULL);
}
}
static
-void push_fun_srt (MarkQueue *q, const StgInfoTable *info)
+void push_fun_srt (struct MarkContext *mctx, const StgInfoTable *info)
{
const StgFunInfoTable *fun_info = itbl_to_fun_itbl(info);
if (fun_info->i.srt) {
- push_closure(q, (StgClosure*)GET_FUN_SRT(fun_info), NULL);
+ push_closure(mctx, (StgClosure*)GET_FUN_SRT(fun_info), NULL);
}
}
@@ -539,23 +656,6 @@ void push_fun_srt (MarkQueue *q, const StgInfoTable *info)
* non-moving heap.
*********************************************************/
-// Check if the object is traced by the non-moving collector. This holds in two
-// conditions:
-//
-// - Object is in non-moving heap
-// - Object is a large (BF_LARGE) and marked as BF_NONMOVING
-// - Object is static (HEAP_ALLOCED_GC(obj) == false)
-//
-static
-bool check_in_nonmoving_heap(StgClosure *p) {
- if (HEAP_ALLOCED_GC(p)) {
- // This works for both large and small objects:
- return Bdescr((P_)p)->flags & BF_NONMOVING;
- } else {
- return true; // a static object
- }
-}
-
/* Push the free variables of a (now-evaluated) thunk to the
* update remembered set.
*/
@@ -582,7 +682,8 @@ void updateRemembSetPushThunkEager(Capability *cap,
StgThunk *thunk)
{
/* N.B. info->i.type mustn't be WHITEHOLE */
- MarkQueue *queue = &cap->upd_rem_set.queue;
+ UpdRemSet *rs = &cap->upd_rem_set;
+ struct MarkContext mctx = markContextInMutator(rs);
switch (info->i.type) {
case THUNK:
case THUNK_1_0:
@@ -591,24 +692,18 @@ void updateRemembSetPushThunkEager(Capability *cap,
case THUNK_1_1:
case THUNK_0_2:
{
- push_thunk_srt(queue, &info->i);
+ push_thunk_srt(&mctx, &info->i);
for (StgWord i = 0; i < info->i.layout.payload.ptrs; i++) {
- if (check_in_nonmoving_heap(thunk->payload[i])) {
- // Don't bother to push origin; it makes the barrier needlessly
- // expensive with little benefit.
- push_closure(queue, thunk->payload[i], NULL);
- }
+ push_closure_(&mctx, thunk->payload[i]);
}
break;
}
case AP:
{
StgAP *ap = (StgAP *) thunk;
- if (check_in_nonmoving_heap(ap->fun)) {
- push_closure(queue, ap->fun, NULL);
- }
- mark_PAP_payload(queue, ap->fun, ap->payload, ap->n_args);
+ push_closure_(&mctx, ap->fun);
+ mark_PAP_payload(&mctx, ap->fun, ap->payload, ap->n_args);
break;
}
case THUNK_SELECTOR:
@@ -620,9 +715,7 @@ void updateRemembSetPushThunkEager(Capability *cap,
case IND:
{
StgInd *ind = (StgInd *) thunk;
- if (check_in_nonmoving_heap(ind->indirectee)) {
- push_closure(queue, ind->indirectee, NULL);
- }
+ push_closure_(&mctx, ind->indirectee);
break;
}
default:
@@ -636,33 +729,16 @@ void updateRemembSetPushThunkRegs(StgRegTable *reg, StgThunk *p)
updateRemembSetPushThunk(regTableToCapability(reg), p);
}
-STATIC_INLINE void updateRemembSetPushClosure_(UpdRemSet *rs, StgClosure *p)
-{
- if (markQueueBlockIsFull(rs->block)) {
- // Yes, this block is full.
- // Allocate a fresh block.
- ACQUIRE_SM_LOCK;
- nonmovingAddUpdRemSetBlocks(rs);
- bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
- bd->link = markQueueBlockBdescr(rs->block);
- rs->block = (MarkQueueBlock *) bd->start;
- rs->block->head = 0;
- RELEASE_SM_LOCK;
- }
-
- MarkQueueEnt ent = {
- .mark_closure = {
- .p = TAG_CLOSURE(MARK_CLOSURE, UNTAG_CLOSURE(p)),
- .origin = NULL,
- }
- };
- markQueueBlockPush(rs->block, &ent);
-}
-
void updateRemembSetPushClosure(UpdRemSet *rs, StgClosure *p)
{
if (check_in_nonmoving_heap(p)) {
- updateRemembSetPushClosure_(rs, p);
+ MarkQueueEnt ent = {
+ .mark_closure = {
+ .p = TAG_CLOSURE(MARK_CLOSURE, UNTAG_CLOSURE(p)),
+ .origin = NULL,
+ }
+ };
+ upd_rem_set_push(rs, &ent);
}
}
@@ -716,7 +792,8 @@ void updateRemembSetPushTSO(Capability *cap, StgTSO *tso)
{
if (needs_upd_rem_set_mark((StgClosure *) tso)) {
debugTrace(DEBUG_nonmoving_gc, "upd_rem_set: TSO %p", tso);
- mark_tso(&cap->upd_rem_set.queue, tso);
+ struct MarkContext mctx = markContextInMutator(&cap->upd_rem_set);
+ mark_tso(&mctx, tso);
finish_upd_rem_set_mark((StgClosure *) tso);
}
}
@@ -730,8 +807,9 @@ void updateRemembSetPushStack(Capability *cap, StgStack *stack)
if (cas_word8(&stack->marking, marking, nonmovingMarkEpoch)
!= nonmovingMarkEpoch) {
// We have claimed the right to mark the stack.
+ struct MarkContext mctx = markContextInMutator(&cap->upd_rem_set);
debugTrace(DEBUG_nonmoving_gc, "upd_rem_set: STACK %p", stack->sp);
- mark_stack_upd_rem_set(&cap->upd_rem_set, stack);
+ mark_stack(&mctx, stack);
finish_upd_rem_set_mark((StgClosure *) stack);
return;
} else {
@@ -751,10 +829,10 @@ void updateRemembSetPushStack(Capability *cap, StgStack *stack)
* Pushing to the mark queue
*********************************************************/
-void markQueuePush (MarkQueue *q, const MarkQueueEnt *ent)
-{
- push(q, ent);
-}
+//void markQueuePush (MarkQueue *q, const MarkQueueEnt *ent)
+//{
+// push(q, ent);
+//}
void markQueuePushClosure (MarkQueue *q,
StgClosure *p,
@@ -762,37 +840,24 @@ void markQueuePushClosure (MarkQueue *q,
{
// TODO: Push this into callers where they already have the Bdescr
if (check_in_nonmoving_heap(p)) {
- push_closure(q, p, origin);
+ struct MarkContext mctx = markContextInConcMark(q);
+ push_closure(&mctx, p, origin);
}
}
-/* TODO: Do we really never want to specify the origin here? */
-void markQueueAddRoot (MarkQueue* q, StgClosure** root)
-{
- markQueuePushClosure(q, *root, NULL);
-}
-
/* Push a closure to the mark queue without origin information */
void markQueuePushClosure_ (MarkQueue *q, StgClosure *p)
{
markQueuePushClosure(q, p, NULL);
}
-void markQueuePushFunSrt (MarkQueue *q, const StgInfoTable *info)
-{
- push_fun_srt(q, info);
-}
-
-void markQueuePushThunkSrt (MarkQueue *q, const StgInfoTable *info)
-{
- push_thunk_srt(q, info);
-}
-
-void markQueuePushArray (MarkQueue *q,
- const StgMutArrPtrs *array,
- StgWord start_index)
+/* Used to add roots during preparatory GC.
+ *
+ * TODO: Do we really never want to specify the origin here?
+ */
+void markQueueAddRoot (MarkQueue* q, StgClosure** root)
{
- push_array(q, array, start_index);
+ markQueuePushClosure_(q, *root);
}
/*********************************************************
@@ -877,7 +942,7 @@ static MarkQueueEnt markQueuePop (MarkQueue *q)
*********************************************************/
/* Must hold sm_mutex. */
-static void init_upd_rem_set (UpdRemSet *queue)
+void init_upd_rem_set (UpdRemSet *queue)
{
bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
queue->block = markQueueBlockFromBdescr(bd);
@@ -907,8 +972,8 @@ void reset_upd_rem_set (UpdRemSet *rset)
{
// UpdRemSets always have one block for the mark queue. This assertion is to
// update this code if we change that.
- ASSERT(markQueueBlockBdescr(rset->queue.top)->link == NULL);
- rset->queue.top->head = 0;
+ ASSERT(markQueueBlockBdescr(rset->block)->link == NULL);
+ rset->block->head = 0;
}
void freeMarkQueue (MarkQueue *queue)
@@ -940,18 +1005,18 @@ markQueueLength (MarkQueue *q)
* any outstanding transactions.
*/
static void
-mark_trec_header (MarkQueue *queue, StgTRecHeader *trec)
+mark_trec_header (struct MarkContext *mctx, StgTRecHeader *trec)
{
while (trec != NO_TREC) {
StgTRecChunk *chunk = trec->current_chunk;
- markQueuePushClosure_(queue, (StgClosure *) trec);
- markQueuePushClosure_(queue, (StgClosure *) chunk);
+ push_closure_(mctx, (StgClosure *) trec);
+ push_closure_(mctx, (StgClosure *) chunk);
while (chunk != END_STM_CHUNK_LIST) {
for (StgWord i=0; i < chunk->next_entry_idx; i++) {
TRecEntry *ent = &chunk->entries[i];
- markQueuePushClosure_(queue, (StgClosure *) ent->tvar);
- markQueuePushClosure_(queue, ent->expected_value);
- markQueuePushClosure_(queue, ent->new_value);
+ push_closure_(mctx, (StgClosure *) ent->tvar);
+ push_closure_(mctx, ent->expected_value);
+ push_closure_(mctx, ent->new_value);
}
chunk = chunk->prev_chunk;
}
@@ -960,35 +1025,52 @@ mark_trec_header (MarkQueue *queue, StgTRecHeader *trec)
}
static void
-mark_tso (MarkQueue *queue, StgTSO *tso)
+mark_tso (struct MarkContext *mctx, StgTSO *tso)
{
// TODO: Clear dirty if contains only old gen objects
if (tso->bound != NULL) {
- markQueuePushClosure_(queue, (StgClosure *) tso->bound->tso);
+ push_closure_(mctx, (StgClosure *) tso->bound->tso);
}
- markQueuePushClosure_(queue, (StgClosure *) tso->blocked_exceptions);
- markQueuePushClosure_(queue, (StgClosure *) tso->bq);
- mark_trec_header(queue, tso->trec);
- markQueuePushClosure_(queue, (StgClosure *) tso->stackobj);
- markQueuePushClosure_(queue, (StgClosure *) tso->_link);
+ push_closure_(mctx, (StgClosure *) tso->blocked_exceptions);
+ push_closure_(mctx, (StgClosure *) tso->bq);
+ mark_trec_header(mctx, tso->trec);
+ push_closure_(mctx, (StgClosure *) tso->stackobj);
+ push_closure_(mctx, (StgClosure *) tso->_link);
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnMVarRead
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnMsgThrowTo
|| tso->why_blocked == NotBlocked
) {
- markQueuePushClosure_(queue, tso->block_info.closure);
+ push_closure_(mctx, tso->block_info.closure);
}
}
+/* Helper for walk_large_bitmap uses below. */
+static void
+do_push_closure(StgClosure **p, void *user)
+{
+ struct MarkContext *mctx = (struct MarkContext *) user;
+ push_closure_(mctx, *p);
+}
+
static void
-mark_small_bitmap (StgClosure **p, StgWord size, StgWord bitmap, walk_closures_cb cb, void *user)
+mark_large_bitmap (struct MarkContext *mctx,
+ StgClosure **p,
+ StgLargeBitmap *large_bitmap,
+ StgWord size)
+{
+ walk_large_bitmap(do_push_closure, p, large_bitmap, size, mctx);
+}
+
+static void
+mark_small_bitmap (struct MarkContext *mctx, StgClosure **p, StgWord size, StgWord bitmap)
{
while (size > 0) {
if ((bitmap & 1) == 0) {
- cb(p, user);
+ push_closure_(mctx, *p);
}
p++;
bitmap = bitmap >> 1;
@@ -997,11 +1079,10 @@ mark_small_bitmap (StgClosure **p, StgWord size, StgWord bitmap, walk_closures_c
}
static GNUC_ATTR_HOT
-void mark_PAP_payload (StgClosure *fun,
+void mark_PAP_payload (struct MarkContext *mctx,
+ StgClosure *fun,
StgClosure **payload,
- StgWord size,
- walk_closures_cb cb,
- void *user)
+ StgWord size)
{
const StgFunInfoTable *fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
ASSERT(fun_info->i.type != PAP);
@@ -1013,26 +1094,24 @@ void mark_PAP_payload (StgClosure *fun,
bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
goto small_bitmap;
case ARG_GEN_BIG:
- walk_large_bitmap(cb, p, GET_FUN_LARGE_BITMAP(fun_info), size, user);
+ walk_large_bitmap(do_push_closure, payload, GET_FUN_LARGE_BITMAP(fun_info), size, mctx);
break;
case ARG_BCO:
- walk_large_bitmap(cb, p, BCO_BITMAP(fun), size, user);
+ walk_large_bitmap(do_push_closure, payload, BCO_BITMAP(fun), size, mctx);
break;
default:
bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
small_bitmap:
- mark_small_bitmap((StgClosure **) p, size, bitmap, cb, user);
+ mark_small_bitmap(mctx, (StgClosure **) p, size, bitmap);
break;
}
}
/* Helper for mark_stack; returns next stack frame. */
static StgPtr
-mark_arg_block (MarkQueue *queue,
+mark_arg_block (struct MarkContext *mctx,
const StgFunInfoTable *fun_info,
- StgClosure **args,
- walk_closures_cb cb,
- void *user)
+ StgClosure **args)
{
StgWord bitmap, size;
@@ -1044,14 +1123,14 @@ mark_arg_block (MarkQueue *queue,
goto small_bitmap;
case ARG_GEN_BIG:
size = GET_FUN_LARGE_BITMAP(fun_info)->size;
- walk_large_bitmap(cb, (StgClosure**) p, GET_FUN_LARGE_BITMAP(fun_info), size, user);
+ walk_large_bitmap(do_push_closure, (StgClosure**) p, GET_FUN_LARGE_BITMAP(fun_info), size, mctx);
p += size;
break;
default:
bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
small_bitmap:
- mark_small_bitmap((StgClosure**)p, size, bitmap, cb, user);
+ mark_small_bitmap(mctx, (StgClosure**)p, size, bitmap);
p += size;
break;
}
@@ -1059,7 +1138,7 @@ mark_arg_block (MarkQueue *queue,
}
static GNUC_ATTR_HOT void
-mark_stack__ (StgPtr sp, StgPtr spBottom, walk_closure_cb trace, void *user)
+mark_stack_ (struct MarkContext *mctx, StgPtr sp, StgPtr spBottom)
{
ASSERT(sp <= spBottom);
@@ -1070,7 +1149,7 @@ mark_stack__ (StgPtr sp, StgPtr spBottom, walk_closure_cb trace, void *user)
{
// See Note [upd-black-hole] in rts/Scav.c
StgUpdateFrame *frame = (StgUpdateFrame *) sp;
- cb(frame->updatee, user);
+ push_closure_(mctx, frame->updatee);
sp += sizeofW(StgUpdateFrame);
continue;
}
@@ -1089,22 +1168,22 @@ mark_stack__ (StgPtr sp, StgPtr spBottom, walk_closure_cb trace, void *user)
// NOTE: the payload starts immediately after the info-ptr, we
// don't have an StgHeader in the same sense as a heap closure.
sp++;
- mark_small_bitmap((StgClosure **) sp, size, bitmap, cb, user);
+ mark_small_bitmap(mctx, (StgClosure **) sp, size, bitmap);
sp += size;
}
follow_srt:
if (info->i.srt) {
- cb((StgClosure*)GET_SRT(info), user);
+ push_closure_(mctx, (StgClosure*) GET_SRT(info));
}
continue;
case RET_BCO: {
sp++;
- cb(*(StgClosure**)sp, user);
+ push_closure_(mctx, *(StgClosure**)sp);
StgBCO *bco = (StgBCO *)*sp;
sp++;
StgWord size = BCO_BITMAP_SIZE(bco);
- mark_large_bitmap((StgClosure **) sp, BCO_BITMAP(bco), size, cb, user);
+ mark_large_bitmap(mctx, (StgClosure **) sp, BCO_BITMAP(bco), size);
sp += size;
continue;
}
@@ -1116,7 +1195,7 @@ mark_stack__ (StgPtr sp, StgPtr spBottom, walk_closure_cb trace, void *user)
size = GET_LARGE_BITMAP(&info->i)->size;
sp++;
- mark_large_bitmap((StgClosure **) sp, GET_LARGE_BITMAP(&info->i), size, cb, user);
+ mark_large_bitmap(mctx, (StgClosure **) sp, GET_LARGE_BITMAP(&info->i), size);
sp += size;
// and don't forget to follow the SRT
goto follow_srt;
@@ -1127,9 +1206,9 @@ mark_stack__ (StgPtr sp, StgPtr spBottom, walk_closure_cb trace, void *user)
StgRetFun *ret_fun = (StgRetFun *)sp;
const StgFunInfoTable *fun_info;
- cb(ret_fun->fun, use);
+ push_closure_(mctx, ret_fun->fun);
fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
- sp = mark_arg_block(fun_info, ret_fun->payload, cb, user);
+ sp = mark_arg_block(mctx, fun_info, ret_fun->payload);
goto follow_srt;
}
@@ -1139,39 +1218,12 @@ mark_stack__ (StgPtr sp, StgPtr spBottom, walk_closure_cb trace, void *user)
}
}
-static void
-do_push_closure_upd_rem_set (StgClosure **p, void *user)
-{
- UpdRemSet *rs = (UpdRemSet *) user;
- updRemSetPushClosure(queue, *p);
-}
-
static GNUC_ATTR_HOT void
-mark_stack_upd_rem_set (UpdRemSet *rs, StgStack *stack)
-{
- mark_stack__(stack->sp, stack->stack + stack->stack_size, do_push_closure_upd_rem_set, rs);
-}
-
-static void
-do_push_closure (StgClosure **p, void *user)
-{
- MarkQueue *queue = (MarkQueue *) user;
- // TODO: Origin? need reference to containing closure
- markQueuePushClosure_(queue, *p);
-}
-
-static GNUC_ATTR_HOT void
-mark_stack_ (MarkQueue *queue, StgStack *stack)
-{
- mark_stack__(stack->sp, stack->stack + stack->stack_size, do_push_closure, queue);
-}
-
-static GNUC_ATTR_HOT void
-mark_stack (MarkQueue *queue, StgStack *stack)
+mark_stack (struct MarkContext *mctx, StgStack *stack)
{
// TODO: Clear dirty if contains only old gen objects
- mark_stack_(queue, stack->sp, stack->stack + stack->stack_size);
+ mark_stack_(mctx, stack->sp, stack->stack + stack->stack_size);
}
/* See Note [Static objects under the nonmoving collector].
@@ -1194,7 +1246,7 @@ bump_static_flag(StgClosure **link_field, StgClosure *q STG_UNUSED)
/* N.B. p0 may be tagged */
static GNUC_ATTR_HOT void
-mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
+mark_closure (struct MarkContext *mctx, const StgClosure *p0, StgClosure **origin)
{
StgClosure *p = (StgClosure*)p0;
@@ -1205,10 +1257,10 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
StgWord tag = GET_CLOSURE_TAG(p);
p = UNTAG_CLOSURE(p);
-# define PUSH_FIELD(obj, field) \
- markQueuePushClosure(queue, \
- (StgClosure *) (obj)->field, \
- (StgClosure **) &(obj)->field)
+# define PUSH_FIELD(obj, field) \
+ push_closure(mctx, \
+ (StgClosure *) (obj)->field, \
+ (StgClosure **) &(obj)->field)
if (!HEAP_ALLOCED_GC(p)) {
const StgInfoTable *info = get_itbl(p);
@@ -1225,7 +1277,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
case THUNK_STATIC:
if (info->srt != 0) {
if (bump_static_flag(THUNK_STATIC_LINK((StgClosure *)p), p)) {
- markQueuePushThunkSrt(queue, info); // TODO this function repeats the check above
+ push_thunk_srt(mctx, info); // TODO this function repeats the check above
}
}
goto done;
@@ -1233,7 +1285,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
case FUN_STATIC:
if (info->srt != 0 || info->layout.payload.ptrs != 0) {
if (bump_static_flag(STATIC_LINK(info, (StgClosure *)p), p)) {
- markQueuePushFunSrt(queue, info); // TODO this function repeats the check above
+ push_fun_srt(mctx, info); // TODO this function repeats the check above
// a FUN_STATIC can also be an SRT, so it may have pointer
// fields. See Note [SRTs] in CmmBuildInfoTables, specifically
@@ -1394,14 +1446,14 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
}
case FUN_2_0:
- markQueuePushFunSrt(queue, info);
+ push_fun_srt(mctx, info);
PUSH_FIELD(p, payload[1]);
PUSH_FIELD(p, payload[0]);
break;
case THUNK_2_0: {
StgThunk *thunk = (StgThunk *) p;
- markQueuePushThunkSrt(queue, info);
+ push_thunk_srt(mctx, info);
PUSH_FIELD(thunk, payload[1]);
PUSH_FIELD(thunk, payload[0]);
break;
@@ -1413,12 +1465,12 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
break;
case THUNK_1_0:
- markQueuePushThunkSrt(queue, info);
+ push_thunk_srt(mctx, info);
PUSH_FIELD((StgThunk *) p, payload[0]);
break;
case FUN_1_0:
- markQueuePushFunSrt(queue, info);
+ push_fun_srt(mctx, info);
PUSH_FIELD(p, payload[0]);
break;
@@ -1427,11 +1479,11 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
break;
case THUNK_0_1:
- markQueuePushThunkSrt(queue, info);
+ push_thunk_srt(mctx, info);
break;
case FUN_0_1:
- markQueuePushFunSrt(queue, info);
+ push_fun_srt(mctx, info);
break;
case CONSTR_0_1:
@@ -1439,20 +1491,20 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
break;
case THUNK_0_2:
- markQueuePushThunkSrt(queue, info);
+ push_thunk_srt(mctx, info);
break;
case FUN_0_2:
- markQueuePushFunSrt(queue, info);
+ push_fun_srt(mctx, info);
break;
case THUNK_1_1:
- markQueuePushThunkSrt(queue, info);
+ push_thunk_srt(mctx, info);
PUSH_FIELD((StgThunk *) p, payload[0]);
break;
case FUN_1_1:
- markQueuePushFunSrt(queue, info);
+ push_fun_srt(mctx, info);
PUSH_FIELD(p, payload[0]);
break;
@@ -1461,14 +1513,14 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
break;
case FUN:
- markQueuePushFunSrt(queue, info);
+ push_fun_srt(mctx, info);
goto gen_obj;
case THUNK: {
- markQueuePushThunkSrt(queue, info);
+ push_thunk_srt(mctx, info);
for (StgWord i = 0; i < info->layout.payload.ptrs; i++) {
StgClosure **field = &((StgThunk *) p)->payload[i];
- markQueuePushClosure(queue, *field, field);
+ push_closure(mctx, *field, field);
}
break;
}
@@ -1481,7 +1533,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
{
for (StgWord i = 0; i < info->layout.payload.ptrs; i++) {
StgClosure **field = &((StgClosure *) p)->payload[i];
- markQueuePushClosure(queue, *field, field);
+ push_closure(mctx, *field, field);
}
break;
}
@@ -1530,7 +1582,8 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
case THUNK_SELECTOR:
if (RtsFlags.GcFlags.nonmovingSelectorOpt) {
- nonmoving_eval_thunk_selector(queue, (StgSelector*)p, origin);
+ ASSERT(mctx.kind == MARK_CTX_IN_CONC_MARK);
+ nonmoving_eval_thunk_selector(mctx->in_conc_mark.queue, (StgSelector*)p, origin);
} else {
PUSH_FIELD((StgSelector *) p, selectee);
}
@@ -1539,21 +1592,21 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
case AP_STACK: {
StgAP_STACK *ap = (StgAP_STACK *)p;
PUSH_FIELD(ap, fun);
- mark_stack_(queue, (StgPtr) ap->payload, (StgPtr) ap->payload + ap->size);
+ mark_stack_(mctx, (StgPtr) ap->payload, (StgPtr) ap->payload + ap->size);
break;
}
case PAP: {
StgPAP *pap = (StgPAP *) p;
PUSH_FIELD(pap, fun);
- mark_PAP_payload(queue, pap->fun, pap->payload, pap->n_args);
+ mark_PAP_payload(mctx, pap->fun, pap->payload, pap->n_args);
break;
}
case AP: {
StgAP *ap = (StgAP *) p;
PUSH_FIELD(ap, fun);
- mark_PAP_payload(queue, ap->fun, ap->payload, ap->n_args);
+ mark_PAP_payload(mctx, ap->fun, ap->payload, ap->n_args);
break;
}
@@ -1566,7 +1619,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
case MUT_ARR_PTRS_FROZEN_CLEAN:
case MUT_ARR_PTRS_FROZEN_DIRTY:
// TODO: Check this against Scav.c
- markQueuePushArray(queue, (StgMutArrPtrs *) p, 0);
+ push_array(mctx, (StgMutArrPtrs *) p, 0);
break;
case SMALL_MUT_ARR_PTRS_CLEAN:
@@ -1576,13 +1629,13 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
StgSmallMutArrPtrs *arr = (StgSmallMutArrPtrs *) p;
for (StgWord i = 0; i < arr->ptrs; i++) {
StgClosure **field = &arr->payload[i];
- markQueuePushClosure(queue, *field, field);
+ push_closure(mctx, *field, field);
}
break;
}
case TSO:
- mark_tso(queue, (StgTSO *) p);
+ mark_tso(mctx, (StgTSO *) p);
break;
case STACK: {
@@ -1595,7 +1648,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
if (cas_word8(&stack->marking, marking, nonmovingMarkEpoch)
!= nonmovingMarkEpoch) {
// We have claimed the right to mark the stack.
- mark_stack(queue, stack);
+ mark_stack(mctx, stack);
} else {
// A mutator has already started marking the stack; we just let it
// do its thing and move on. There's no reason to wait; we know that
@@ -1610,7 +1663,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
case MUT_PRIM: {
for (StgHalfWord p_idx = 0; p_idx < info->layout.payload.ptrs; ++p_idx) {
StgClosure **field = &p->payload[p_idx];
- markQueuePushClosure(queue, *field, field);
+ push_closure(mctx, *field, field);
}
break;
}
@@ -1622,9 +1675,9 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
PUSH_FIELD(tc, prev_chunk);
TRecEntry *end = &tc->entries[tc->next_entry_idx];
for (TRecEntry *e = &tc->entries[0]; e < end; e++) {
- markQueuePushClosure_(queue, (StgClosure *) e->tvar);
- markQueuePushClosure_(queue, (StgClosure *) e->expected_value);
- markQueuePushClosure_(queue, (StgClosure *) e->new_value);
+ push_closure_(mctx, (StgClosure *) e->tvar);
+ push_closure_(mctx, (StgClosure *) e->expected_value);
+ push_closure_(mctx, (StgClosure *) e->new_value);
}
break;
}
@@ -1699,6 +1752,7 @@ done:
GNUC_ATTR_HOT void
nonmovingMark (MarkQueue *queue)
{
+ struct MarkContext mctx = markContextInConcMark(queue);
traceConcMarkBegin();
debugTrace(DEBUG_nonmoving_gc, "Starting mark pass");
unsigned int count = 0;
@@ -1708,7 +1762,7 @@ nonmovingMark (MarkQueue *queue)
switch (nonmovingMarkQueueEntryType(&ent)) {
case MARK_CLOSURE:
- mark_closure(queue, ent.mark_closure.p, ent.mark_closure.origin);
+ mark_closure(&mctx, ent.mark_closure.p, ent.mark_closure.origin);
break;
case MARK_ARRAY: {
const StgMutArrPtrs *arr = (const StgMutArrPtrs *)
@@ -1717,12 +1771,12 @@ nonmovingMark (MarkQueue *queue)
StgWord end = start + MARK_ARRAY_CHUNK_LENGTH;
if (end < arr->ptrs) {
// There is more to be marked after this chunk.
- markQueuePushArray(queue, arr, end);
+ push_array(&mctx, arr, end);
} else {
end = arr->ptrs;
}
for (StgWord i = start; i < end; i++) {
- markQueuePushClosure_(queue, arr->payload[i]);
+ push_closure_(&mctx, arr->payload[i]);
}
break;
}
@@ -1845,10 +1899,19 @@ static bool nonmovingIsNowAlive (StgClosure *p)
}
}
+static void nonmovingMarkLiveWeak (struct MarkContext *mctx, StgWeak *w)
+{
+ ASSERT(nonmovingClosureMarkedThisCycle((P_)w));
+ push_closure_(mctx, w->value);
+ push_closure_(mctx, w->finalizer);
+ push_closure_(mctx, w->cfinalizers);
+}
+
// Non-moving heap variant of `tidyWeakList`
bool nonmovingTidyWeaks (struct MarkQueue_ *queue)
{
bool did_work = false;
+ struct MarkContext mctx = markContextInConcMark(queue);
StgWeak **last_w = &nonmoving_old_weak_ptr_list;
StgWeak *next_w;
@@ -1864,7 +1927,7 @@ bool nonmovingTidyWeaks (struct MarkQueue_ *queue)
ASSERT(w->header.info == &stg_WEAK_info);
if (nonmovingIsNowAlive(w->key)) {
- nonmovingMarkLiveWeak(queue, w);
+ nonmovingMarkLiveWeak(&mctx, w);
did_work = true;
// remove this weak ptr from old_weak_ptr list
@@ -1883,32 +1946,32 @@ bool nonmovingTidyWeaks (struct MarkQueue_ *queue)
return did_work;
}
-void nonmovingMarkDeadWeak (struct MarkQueue_ *queue, StgWeak *w)
+static void nonmovingMarkDeadWeak_ (struct MarkContext *mctx, StgWeak *w)
{
if (w->cfinalizers != &stg_NO_FINALIZER_closure) {
- markQueuePushClosure_(queue, w->value);
+ push_closure_(mctx, w->value);
}
- markQueuePushClosure_(queue, w->finalizer);
+ push_closure_(mctx, w->finalizer);
}
-void nonmovingMarkLiveWeak (struct MarkQueue_ *queue, StgWeak *w)
+void nonmovingMarkDeadWeak (MarkQueue *queue, StgWeak *w)
{
- ASSERT(nonmovingClosureMarkedThisCycle((P_)w));
- markQueuePushClosure_(queue, w->value);
- markQueuePushClosure_(queue, w->finalizer);
- markQueuePushClosure_(queue, w->cfinalizers);
+ // FIXME: Refactor call site in Nonmoving.c
+ struct MarkContext mctx = markContextInConcMark(queue);
+ nonmovingMarkDeadWeak_(&mctx, w);
}
// When we're done with marking, any weak pointers with non-marked keys will be
// considered "dead". We mark values and finalizers of such weaks, and then
// schedule them for finalization in `scheduleFinalizers` (which we run during
// synchronization).
-void nonmovingMarkDeadWeaks (struct MarkQueue_ *queue, StgWeak **dead_weaks)
+void nonmovingMarkDeadWeaks (MarkQueue *queue, StgWeak **dead_weaks)
{
+ struct MarkContext mctx = markContextInConcMark(queue);
StgWeak *next_w;
for (StgWeak *w = nonmoving_old_weak_ptr_list; w; w = next_w) {
ASSERT(!nonmovingClosureMarkedThisCycle((P_)(w->key)));
- nonmovingMarkDeadWeak(queue, w);
+ nonmovingMarkDeadWeak_(&mctx, w);
next_w = w ->link;
w->link = *dead_weaks;
*dead_weaks = w;
@@ -1943,6 +2006,7 @@ void nonmovingTidyThreads ()
void nonmovingResurrectThreads (struct MarkQueue_ *queue, StgTSO **resurrected_threads)
{
+ struct MarkContext mctx = markContextInConcMark(queue);
StgTSO *next;
for (StgTSO *t = nonmoving_old_threads; t != END_TSO_QUEUE; t = next) {
next = t->global_link;
@@ -1952,7 +2016,7 @@ void nonmovingResurrectThreads (struct MarkQueue_ *queue, StgTSO **resurrected_t
case ThreadComplete:
continue;
default:
- markQueuePushClosure_(queue, (StgClosure*)t);
+ push_closure_(&mctx, (StgClosure*)t);
t->global_link = *resurrected_threads;
*resurrected_threads = t;
}
diff --git a/rts/sm/NonMovingMark.h b/rts/sm/NonMovingMark.h
index c6e5dbada1..f8b9e233a3 100644
--- a/rts/sm/NonMovingMark.h
+++ b/rts/sm/NonMovingMark.h
@@ -158,6 +158,8 @@ extern MarkQueue *current_mark_queue;
extern bdescr *upd_rem_set_block_list;
+struct MarkContext;
+
void nonmovingMarkInitUpdRemSet(void);
void init_upd_rem_set(UpdRemSet *rset);
@@ -186,17 +188,14 @@ void nonmovingMarkDeadWeaks(struct MarkQueue_ *queue, StgWeak **dead_weak_ptr_li
void nonmovingResurrectThreads(struct MarkQueue_ *queue, StgTSO **resurrected_threads);
bool nonmovingIsAlive(StgClosure *p);
void nonmovingMarkDeadWeak(struct MarkQueue_ *queue, StgWeak *w);
-void nonmovingMarkLiveWeak(struct MarkQueue_ *queue, StgWeak *w);
void nonmovingAddUpdRemSetBlocks(UpdRemSet *rset);
-void markQueuePush(MarkQueue *q, const MarkQueueEnt *ent);
+//void markQueuePush(MarkQueue *q, const MarkQueueEnt *ent);
void markQueuePushClosureGC(UpdRemSet *q, StgClosure *p);
void markQueuePushClosure(MarkQueue *q,
- StgClosure *p,
- StgClosure **origin);
+ StgClosure *p,
+ StgClosure **origin);
void markQueuePushClosure_(MarkQueue *q, StgClosure *p);
-void markQueuePushThunkSrt(MarkQueue *q, const StgInfoTable *info);
-void markQueuePushFunSrt(MarkQueue *q, const StgInfoTable *info);
void markQueuePushArray(MarkQueue *q, const StgMutArrPtrs *array, StgWord start_index);
void updateRemembSetPushThunkEager(Capability *cap,
const StgThunkInfoTable *orig_info,