summaryrefslogtreecommitdiff
path: root/rts/STM.c
diff options
context:
space:
mode:
authorBen Gamari <ben@well-typed.com>2019-02-05 11:51:14 -0500
committerBen Gamari <ben@smart-cactus.org>2019-10-20 21:15:52 -0400
commitbd8e3ff43b64a72ed1c820e89691d0a83a1c6e96 (patch)
tree8b07778e3c09460edce24750ae6da4d487eb5774 /rts/STM.c
parentf8f77a070f4a9a93944dff0b7270162a40931c58 (diff)
downloadhaskell-bd8e3ff43b64a72ed1c820e89691d0a83a1c6e96.tar.gz
rts: Implement concurrent collection in the nonmoving collector
This extends the non-moving collector to allow concurrent collection. The full design of the collector implemented here is described in detail in a technical note B. Gamari. "A Concurrent Garbage Collector For the Glasgow Haskell Compiler" (2018) This extension involves the introduction of a capability-local remembered set, known as the /update remembered set/, which tracks objects which may no longer be visible to the collector due to mutation. To maintain this remembered set we introduce a write barrier on mutations which is enabled while a concurrent mark is underway. The update remembered set representation is similar to that of the nonmoving mark queue, being a chunked array of `MarkEntry`s. Each `Capability` maintains a single accumulator chunk, which it flushed when it (a) is filled, or (b) when the nonmoving collector enters its post-mark synchronization phase. While the write barrier touches a significant amount of code it is conceptually straightforward: the mutator must ensure that the referee of any pointer it overwrites is added to the update remembered set. However, there are a few details: * In the case of objects with a dirty flag (e.g. `MVar`s) we can exploit the fact that only the *first* mutation requires a write barrier. * Weak references, as usual, complicate things. In particular, we must ensure that the referee of a weak object is marked if dereferenced by the mutator. For this we (unfortunately) must introduce a read barrier, as described in Note [Concurrent read barrier on deRefWeak#] (in `NonMovingMark.c`). * Stable names are also a bit tricky as described in Note [Sweeping stable names in the concurrent collector] (`NonMovingSweep.c`). We take quite some pains to ensure that the high thread count often seen in parallel Haskell applications doesn't affect pause times. To this end we allow thread stacks to be marked either by the thread itself (when it is executed or stack-underflows) or the concurrent mark thread (if the thread owning the stack is never scheduled). There is a non-trivial handshake to ensure that this happens without racing which is described in Note [StgStack dirtiness flags and concurrent marking]. Co-Authored-by: Ömer Sinan Ağacan <omer@well-typed.com>
Diffstat (limited to 'rts/STM.c')
-rw-r--r--rts/STM.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/rts/STM.c b/rts/STM.c
index dc0b0ebb78..c17f33aaa0 100644
--- a/rts/STM.c
+++ b/rts/STM.c
@@ -182,7 +182,8 @@ static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
TRACE("%p : unlock_stm()", trec);
}
-static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
+static StgClosure *lock_tvar(Capability *cap STG_UNUSED,
+ StgTRecHeader *trec STG_UNUSED,
StgTVar *s STG_UNUSED) {
StgClosure *result;
TRACE("%p : lock_tvar(%p)", trec, s);
@@ -197,12 +198,14 @@ static void unlock_tvar(Capability *cap,
StgBool force_update) {
TRACE("%p : unlock_tvar(%p)", trec, s);
if (force_update) {
+ StgClosure *old_value = s -> current_value;
s -> current_value = c;
- dirty_TVAR(cap,s);
+ dirty_TVAR(cap, s, old_value);
}
}
-static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
+static StgBool cond_lock_tvar(Capability *cap STG_UNUSED,
+ StgTRecHeader *trec STG_UNUSED,
StgTVar *s STG_UNUSED,
StgClosure *expected) {
StgClosure *result;
@@ -231,7 +234,8 @@ static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
smp_locked = 0;
}
-static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
+static StgClosure *lock_tvar(Capability *cap STG_UNUSED,
+ StgTRecHeader *trec STG_UNUSED,
StgTVar *s STG_UNUSED) {
StgClosure *result;
TRACE("%p : lock_tvar(%p)", trec, s);
@@ -248,12 +252,14 @@ static void *unlock_tvar(Capability *cap,
TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
ASSERT(smp_locked == trec);
if (force_update) {
+ StgClosure *old_value = s -> current_value;
s -> current_value = c;
- dirty_TVAR(cap,s);
+ dirty_TVAR(cap, s, old_value);
}
}
-static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
+static StgBool cond_lock_tvar(Capability *cap STG_UNUSED,
+ StgTRecHeader *trec STG_UNUSED,
StgTVar *s STG_UNUSED,
StgClosure *expected) {
StgClosure *result;
@@ -279,7 +285,8 @@ static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
TRACE("%p : unlock_stm()", trec);
}
-static StgClosure *lock_tvar(StgTRecHeader *trec,
+static StgClosure *lock_tvar(Capability *cap,
+ StgTRecHeader *trec,
StgTVar *s STG_UNUSED) {
StgClosure *result;
TRACE("%p : lock_tvar(%p)", trec, s);
@@ -289,6 +296,10 @@ static StgClosure *lock_tvar(StgTRecHeader *trec,
} while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info);
} while (cas((void *)&(s -> current_value),
(StgWord)result, (StgWord)trec) != (StgWord)result);
+
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled && result)) {
+ updateRemembSetPushClosure(cap, result);
+ }
return result;
}
@@ -300,10 +311,11 @@ static void unlock_tvar(Capability *cap,
TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
ASSERT(s -> current_value == (StgClosure *)trec);
s -> current_value = c;
- dirty_TVAR(cap,s);
+ dirty_TVAR(cap, s, (StgClosure *) trec);
}
-static StgBool cond_lock_tvar(StgTRecHeader *trec,
+static StgBool cond_lock_tvar(Capability *cap,
+ StgTRecHeader *trec,
StgTVar *s,
StgClosure *expected) {
StgClosure *result;
@@ -311,6 +323,9 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec,
TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
result = (StgClosure *)w;
+ if (RTS_UNLIKELY(nonmoving_write_barrier_enabled && result)) {
+ updateRemembSetPushClosure(cap, expected);
+ }
TRACE("%p : %s", trec, result ? "success" : "failure");
return (result == expected);
}
@@ -525,7 +540,7 @@ static void build_watch_queue_entries_for_trec(Capability *cap,
}
s -> first_watch_queue_entry = q;
e -> new_value = (StgClosure *) q;
- dirty_TVAR(cap,s); // we modified first_watch_queue_entry
+ dirty_TVAR(cap, s, (StgClosure *) fq); // we modified first_watch_queue_entry
});
}
@@ -545,7 +560,7 @@ static void remove_watch_queue_entries_for_trec(Capability *cap,
StgTVarWatchQueue *q;
StgClosure *saw;
s = e -> tvar;
- saw = lock_tvar(trec, s);
+ saw = lock_tvar(cap, trec, s);
q = (StgTVarWatchQueue *) (e -> new_value);
TRACE("%p : removing tso=%p from watch queue for tvar=%p",
trec,
@@ -562,7 +577,7 @@ static void remove_watch_queue_entries_for_trec(Capability *cap,
} else {
ASSERT(s -> first_watch_queue_entry == q);
s -> first_watch_queue_entry = nq;
- dirty_TVAR(cap,s); // we modified first_watch_queue_entry
+ dirty_TVAR(cap, s, (StgClosure *) q); // we modified first_watch_queue_entry
}
free_stg_tvar_watch_queue(cap, q);
unlock_tvar(cap, trec, s, saw, false);
@@ -773,7 +788,7 @@ static StgBool validate_and_acquire_ownership (Capability *cap,
s = e -> tvar;
if (acquire_all || entry_is_update(e)) {
TRACE("%p : trying to acquire %p", trec, s);
- if (!cond_lock_tvar(trec, s, e -> expected_value)) {
+ if (!cond_lock_tvar(cap, trec, s, e -> expected_value)) {
TRACE("%p : failed to acquire %p", trec, s);
result = false;
BREAK_FOR_EACH;