summaryrefslogtreecommitdiff
path: root/rts/sm/NonMoving.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm/NonMoving.c')
-rw-r--r--rts/sm/NonMoving.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 41510e7f8e..130da95c3e 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -85,6 +85,10 @@ Mutex concurrent_coll_finished_lock;
* - A set of *filled* segments, which contain no unallocated blocks and will
* be collected during the next major GC cycle
*
+ * These sets are maintained as atomic singly-linked lists. This is not
+ * susceptible to the ABA problem since we are guaranteed to push a given
+ * segment to a list only once per garbage collection cycle.
+ *
* Storage for segments is allocated using the block allocator using an aligned
* group of NONMOVING_SEGMENT_BLOCKS blocks. This makes the task of locating
* the segment header for a clone a simple matter of bit-masking (as
@@ -543,7 +547,7 @@ void nonmovingPushFreeSegment(struct NonmovingSegment *seg)
static struct NonmovingSegment *nonmovingPopFreeSegment(void)
{
while (true) {
- struct NonmovingSegment *seg = nonmovingHeap.free;
+ struct NonmovingSegment *seg = ACQUIRE_LOAD(&nonmovingHeap.free);
if (seg == NULL) {
return NULL;
}
@@ -641,13 +645,15 @@ static bool advance_next_free(struct NonmovingSegment *seg, const unsigned int b
static struct NonmovingSegment *pop_active_segment(struct NonmovingAllocator *alloca)
{
while (true) {
- struct NonmovingSegment *seg = alloca->active;
+ // Synchronizes with CAS in nonmovingPushActiveSegment
+ struct NonmovingSegment *seg = ACQUIRE_LOAD(&alloca->active);
if (seg == NULL) {
return NULL;
}
+ struct NonmovingSegment *next = RELAXED_LOAD(&seg->link);
if (cas((StgVolatilePtr) &alloca->active,
(StgWord) seg,
- (StgWord) seg->link) == (StgWord) seg) {
+ (StgWord) next) == (StgWord) seg) {
return seg;
}
}