summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-11-10 10:27:54 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2022-12-23 19:09:30 -0500
commit11241efa3422fae97aed0abb1857baab2f9018fb (patch)
treee9498c82c9a1e7f62fd4676b5093876052a7619d
parent18d2acd2791a751c0b1894fd72dd0317583619cd (diff)
downloadhaskell-11241efa3422fae97aed0abb1857baab2f9018fb.tar.gz
nonmoving: Fix segment list races
-rw-r--r--rts/sm/NonMoving.c12
-rw-r--r--rts/sm/NonMoving.h6
2 files changed, 12 insertions, 6 deletions
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 41510e7f8e..130da95c3e 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -85,6 +85,10 @@ Mutex concurrent_coll_finished_lock;
* - A set of *filled* segments, which contain no unallocated blocks and will
* be collected during the next major GC cycle
*
+ * These sets are maintained as atomic singly-linked lists. This is not
+ * susceptible to the ABA problem since we are guaranteed to push a given
+ * segment to a list only once per garbage collection cycle.
+ *
* Storage for segments is allocated using the block allocator using an aligned
* group of NONMOVING_SEGMENT_BLOCKS blocks. This makes the task of locating
* the segment header for a clone a simple matter of bit-masking (as
@@ -543,7 +547,7 @@ void nonmovingPushFreeSegment(struct NonmovingSegment *seg)
static struct NonmovingSegment *nonmovingPopFreeSegment(void)
{
while (true) {
- struct NonmovingSegment *seg = nonmovingHeap.free;
+ struct NonmovingSegment *seg = ACQUIRE_LOAD(&nonmovingHeap.free);
if (seg == NULL) {
return NULL;
}
@@ -641,13 +645,15 @@ static bool advance_next_free(struct NonmovingSegment *seg, const unsigned int b
static struct NonmovingSegment *pop_active_segment(struct NonmovingAllocator *alloca)
{
while (true) {
- struct NonmovingSegment *seg = alloca->active;
+ // Synchronizes with CAS in nonmovingPushActiveSegment
+ struct NonmovingSegment *seg = ACQUIRE_LOAD(&alloca->active);
if (seg == NULL) {
return NULL;
}
+ struct NonmovingSegment *next = RELAXED_LOAD(&seg->link);
if (cas((StgVolatilePtr) &alloca->active,
(StgWord) seg,
- (StgWord) seg->link) == (StgWord) seg) {
+ (StgWord) next) == (StgWord) seg) {
return seg;
}
}
diff --git a/rts/sm/NonMoving.h b/rts/sm/NonMoving.h
index 12fb9ddaab..1d40ef726e 100644
--- a/rts/sm/NonMoving.h
+++ b/rts/sm/NonMoving.h
@@ -169,7 +169,7 @@ INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
SET_SEGMENT_STATE(seg, ACTIVE);
while (true) {
- struct NonmovingSegment *current_active = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->active);
+ struct NonmovingSegment *current_active = RELAXED_LOAD(&alloc->active);
seg->link = current_active;
if (cas((StgVolatilePtr) &alloc->active, (StgWord) current_active, (StgWord) seg) == (StgWord) current_active) {
break;
@@ -184,8 +184,8 @@ INLINE_HEADER void nonmovingPushFilledSegment(struct NonmovingSegment *seg)
nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
SET_SEGMENT_STATE(seg, FILLED);
while (true) {
- struct NonmovingSegment *current_filled = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->filled);
- seg->link = current_filled;
+ struct NonmovingSegment *current_filled = (struct NonmovingSegment*) RELAXED_LOAD(&alloc->filled);
+ RELAXED_STORE(&seg->link, current_filled);
if (cas((StgVolatilePtr) &alloc->filled, (StgWord) current_filled, (StgWord) seg) == (StgWord) current_filled) {
break;
}