summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-12-06 13:54:09 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2023-03-08 15:02:31 -0500
commitb500867a9eae6381e5c686aaa71ae069398eacb9 (patch)
treec9bef464f62bd05d213b1e227bb66ffab2b44359 /rts
parent78746906d133765a9a4219eb34ed01e78f31344c (diff)
downloadhaskell-b500867a9eae6381e5c686aaa71ae069398eacb9.tar.gz
nonmoving: Move current segment array into Capability
The current segments are conceptually owned by the mutator, not the collector. Consequently, it was quite tricky to prove that the mutator would not race with the collect due to this shared state. It turns out that such races are possible: when resizing the current segment array we may concurrently try to take a heap census. This will attempt to walk the current segment array, causing a data race. Fix this by moving the current segment array into `Capability`, where it belongs. Fixes #22926.
Diffstat (limited to 'rts')
-rw-r--r--rts/Capability.c4
-rw-r--r--rts/Capability.h3
-rw-r--r--rts/Schedule.c2
-rw-r--r--rts/sm/NonMoving.c118
-rw-r--r--rts/sm/NonMoving.h11
-rw-r--r--rts/sm/NonMovingCensus.c24
-rw-r--r--rts/sm/NonMovingCensus.h4
-rw-r--r--rts/sm/NonMovingMark.c4
-rw-r--r--rts/sm/NonMovingMark.h2
-rw-r--r--rts/sm/Sanity.c31
-rw-r--r--rts/sm/Storage.c23
11 files changed, 89 insertions, 137 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index 7d5def832d..05f4794bef 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -293,6 +293,7 @@ initCapability (Capability *cap, uint32_t i)
cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
RtsFlags.GcFlags.generations,
"initCapability");
+ cap->current_segments = NULL;
// At this point storage manager is not initialized yet, so this will be
@@ -1258,6 +1259,9 @@ freeCapability (Capability *cap)
{
stgFree(cap->mut_lists);
stgFree(cap->saved_mut_lists);
+ if (cap->current_segments) {
+ stgFree(cap->current_segments);
+ }
#if defined(THREADED_RTS)
freeSparkPool(cap->sparks);
#endif
diff --git a/rts/Capability.h b/rts/Capability.h
index 4f9593c8b2..a039aae235 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -97,6 +97,9 @@ struct Capability_ {
// The update remembered set for the non-moving collector
UpdRemSet upd_rem_set;
+ // Array of current segments for the non-moving collector.
+ // Of length NONMOVING_ALLOCA_CNT.
+ struct NonmovingSegment **current_segments;
// block for allocating pinned objects into
bdescr *pinned_object_block;
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 94f756c5e4..5b5e765231 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -2331,7 +2331,9 @@ setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
moreCapabilities(n_capabilities, new_n_capabilities);
// Resize and update storage manager data structures
+ ACQUIRE_SM_LOCK;
storageAddCapabilities(n_capabilities, new_n_capabilities);
+ RELEASE_SM_LOCK;
}
}
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 320898f0b3..798e09545c 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -715,10 +715,11 @@ void *nonmovingAllocate(Capability *cap, StgWord sz)
// object and not moved) which is covered by allocator 9.
ASSERT(log_block_size < NONMOVING_ALLOCA0 + NONMOVING_ALLOCA_CNT);
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[log_block_size - NONMOVING_ALLOCA0];
+ unsigned int alloca_idx = log_block_size - NONMOVING_ALLOCA0;
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
// Allocate into current segment
- struct NonmovingSegment *current = alloca->current[cap->no];
+ struct NonmovingSegment *current = cap->current_segments[alloca_idx];
ASSERT(current); // current is never NULL
void *ret = nonmovingSegmentGetBlock_(current, log_block_size, current->next_free);
ASSERT(GET_CLOSURE_TAG(ret) == 0); // check alignment
@@ -751,29 +752,12 @@ void *nonmovingAllocate(Capability *cap, StgWord sz)
// make it current
new_current->link = NULL;
SET_SEGMENT_STATE(new_current, CURRENT);
- alloca->current[cap->no] = new_current;
+ cap->current_segments[alloca_idx] = new_current;
}
return ret;
}
-/* Allocate a nonmovingAllocator */
-static struct NonmovingAllocator *alloc_nonmoving_allocator(uint32_t n_caps)
-{
- size_t allocator_sz =
- sizeof(struct NonmovingAllocator) +
- sizeof(void*) * n_caps; // current segment pointer for each capability
- struct NonmovingAllocator *alloc =
- stgMallocBytes(allocator_sz, "nonmovingInit");
- memset(alloc, 0, allocator_sz);
- return alloc;
-}
-
-static void free_nonmoving_allocator(struct NonmovingAllocator *alloc)
-{
- stgFree(alloc);
-}
-
void nonmovingInit(void)
{
if (! RtsFlags.GcFlags.useNonmoving) return;
@@ -782,10 +766,7 @@ void nonmovingInit(void)
initCondition(&concurrent_coll_finished);
initMutex(&concurrent_coll_finished_lock);
#endif
- for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- nonmovingHeap.allocators[i] = alloc_nonmoving_allocator(getNumCapabilities());
- }
- nonmovingMarkInitUpdRemSet();
+ nonmovingMarkInit();
}
// Stop any nonmoving collection in preparation for RTS shutdown.
@@ -818,44 +799,24 @@ void nonmovingExit(void)
closeCondition(&concurrent_coll_finished);
closeMutex(&nonmoving_collection_mutex);
#endif
-
- for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- free_nonmoving_allocator(nonmovingHeap.allocators[i]);
- }
}
-/*
- * Assumes that no garbage collector or mutator threads are running to safely
- * resize the nonmoving_allocators.
- *
- * Must hold sm_mutex.
- */
-void nonmovingAddCapabilities(uint32_t new_n_caps)
+/* Initialize a new capability. Caller must hold SM_LOCK */
+void nonmovingInitCapability(Capability *cap)
{
- unsigned int old_n_caps = nonmovingHeap.n_caps;
- struct NonmovingAllocator **allocs = nonmovingHeap.allocators;
-
+ // Initialize current segment array
+ struct NonmovingSegment **segs =
+ stgMallocBytes(sizeof(struct NonmovingSegment*) * NONMOVING_ALLOCA_CNT, "current segment array");
for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- struct NonmovingAllocator *old = allocs[i];
- allocs[i] = alloc_nonmoving_allocator(new_n_caps);
-
- // Copy the old state
- allocs[i]->filled = old->filled;
- allocs[i]->active = old->active;
- for (unsigned int j = 0; j < old_n_caps; j++) {
- allocs[i]->current[j] = old->current[j];
- }
- stgFree(old);
-
- // Initialize current segments for the new capabilities
- for (unsigned int j = old_n_caps; j < new_n_caps; j++) {
- allocs[i]->current[j] = nonmovingAllocSegment(getCapability(j)->node);
- nonmovingInitSegment(allocs[i]->current[j], NONMOVING_ALLOCA0 + i);
- SET_SEGMENT_STATE(allocs[i]->current[j], CURRENT);
- allocs[i]->current[j]->link = NULL;
- }
+ segs[i] = nonmovingAllocSegment(cap->node);
+ nonmovingInitSegment(segs[i], NONMOVING_ALLOCA0 + i);
+ SET_SEGMENT_STATE(segs[i], CURRENT);
}
- nonmovingHeap.n_caps = new_n_caps;
+ cap->current_segments = segs;
+
+ // Initialize update remembered set
+ cap->upd_rem_set.queue.blocks = NULL;
+ nonmovingInitUpdRemSet(&cap->upd_rem_set);
}
void nonmovingClearBitmap(struct NonmovingSegment *seg)
@@ -875,13 +836,15 @@ static void nonmovingPrepareMark(void)
// Should have been cleared by the last sweep
ASSERT(nonmovingHeap.sweep_list == NULL);
+ nonmovingHeap.n_caps = n_capabilities;
nonmovingBumpEpoch();
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
// Update current segments' snapshot pointers
for (uint32_t cap_n = 0; cap_n < nonmovingHeap.n_caps; ++cap_n) {
- struct NonmovingSegment *seg = alloca->current[cap_n];
+ Capability *cap = getCapability(cap_n);
+ struct NonmovingSegment *seg = cap->current_segments[alloca_idx];
nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
}
@@ -1114,7 +1077,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
// Walk the list of filled segments that we collected during preparation,
// updated their snapshot pointers and move them to the sweep list.
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingSegment *filled = nonmovingHeap.allocators[alloca_idx]->saved_filled;
+ struct NonmovingSegment *filled = nonmovingHeap.allocators[alloca_idx].saved_filled;
uint32_t n_filled = 0;
if (filled) {
struct NonmovingSegment *seg = filled;
@@ -1133,7 +1096,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
seg->link = nonmovingHeap.sweep_list;
nonmovingHeap.sweep_list = filled;
}
- nonmovingHeap.allocators[alloca_idx]->saved_filled = NULL;
+ nonmovingHeap.allocators[alloca_idx].saved_filled = NULL;
}
// Mark Weak#s
@@ -1350,10 +1313,12 @@ void assert_in_nonmoving_heap(StgPtr p)
}
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
+
// Search current segments
for (uint32_t cap_idx = 0; cap_idx < nonmovingHeap.n_caps; ++cap_idx) {
- struct NonmovingSegment *seg = alloca->current[cap_idx];
+ Capability *cap = getCapability(cap_idx);
+ struct NonmovingSegment *seg = cap->current_segments[alloca_idx];
if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
return;
}
@@ -1412,33 +1377,16 @@ void nonmovingPrintSegment(struct NonmovingSegment *seg)
debugBelch("End of segment\n\n");
}
-void nonmovingPrintAllocator(struct NonmovingAllocator *alloc)
-{
- debugBelch("Allocator at %p\n", (void*)alloc);
- debugBelch("Filled segments:\n");
- for (struct NonmovingSegment *seg = alloc->filled; seg != NULL; seg = seg->link) {
- debugBelch("%p ", (void*)seg);
- }
- debugBelch("\nActive segments:\n");
- for (struct NonmovingSegment *seg = alloc->active; seg != NULL; seg = seg->link) {
- debugBelch("%p ", (void*)seg);
- }
- debugBelch("\nCurrent segments:\n");
- for (uint32_t i = 0; i < nonmovingHeap.n_caps; ++i) {
- debugBelch("%p ", alloc->current[i]);
- }
- debugBelch("\n");
-}
-
void locate_object(P_ obj)
{
// Search allocators
for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
- struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
- for (uint32_t cap = 0; cap < nonmovingHeap.n_caps; ++cap) {
- struct NonmovingSegment *seg = alloca->current[cap];
+ struct NonmovingAllocator *alloca = &nonmovingHeap.allocators[alloca_idx];
+ for (uint32_t cap_n = 0; cap_n < getNumCapabilities(); ++cap_n) {
+ Capability *cap = getCapability(cap_n);
+ struct NonmovingSegment *seg = cap->current_segments[alloca_idx];
if (obj >= (P_)seg && obj < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
- debugBelch("%p is in current segment of capability %d of allocator %d at %p\n", obj, cap, alloca_idx, (void*)seg);
+ debugBelch("%p is in current segment of capability %d of allocator %d at %p\n", obj, cap_n, alloca_idx, (void*)seg);
return;
}
}
diff --git a/rts/sm/NonMoving.h b/rts/sm/NonMoving.h
index 91144f6f9b..26031626a9 100644
--- a/rts/sm/NonMoving.h
+++ b/rts/sm/NonMoving.h
@@ -87,8 +87,7 @@ struct NonmovingAllocator {
struct NonmovingSegment *filled;
struct NonmovingSegment *saved_filled;
struct NonmovingSegment *active;
- // indexed by capability number
- struct NonmovingSegment *current[];
+ // N.B. Per-capabilty "current" segment lives in Capability
};
// first allocator is of size 2^NONMOVING_ALLOCA0 (in bytes)
@@ -102,7 +101,7 @@ struct NonmovingAllocator {
#define NONMOVING_MAX_FREE 16
struct NonmovingHeap {
- struct NonmovingAllocator *allocators[NONMOVING_ALLOCA_CNT];
+ struct NonmovingAllocator allocators[NONMOVING_ALLOCA_CNT];
// free segment list. This is a cache where we keep up to
// NONMOVING_MAX_FREE segments to avoid thrashing the block allocator.
// Note that segments in this list are still counted towards
@@ -153,7 +152,7 @@ void nonmovingCollect(StgWeak **dead_weaks,
StgTSO **resurrected_threads);
void *nonmovingAllocate(Capability *cap, StgWord sz);
-void nonmovingAddCapabilities(uint32_t new_n_caps);
+void nonmovingInitCapability(Capability *cap);
void nonmovingPushFreeSegment(struct NonmovingSegment *seg);
void nonmovingClearBitmap(struct NonmovingSegment *seg);
@@ -170,7 +169,7 @@ INLINE_HEADER uint8_t nonmovingSegmentLogBlockSize(struct NonmovingSegment *seg)
INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
{
struct NonmovingAllocator *alloc =
- nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
+ &nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
SET_SEGMENT_STATE(seg, ACTIVE);
while (true) {
struct NonmovingSegment *current_active = RELAXED_LOAD(&alloc->active);
@@ -185,7 +184,7 @@ INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
INLINE_HEADER void nonmovingPushFilledSegment(struct NonmovingSegment *seg)
{
struct NonmovingAllocator *alloc =
- nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
+ &nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
SET_SEGMENT_STATE(seg, FILLED);
while (true) {
struct NonmovingSegment *current_filled = (struct NonmovingSegment*) RELAXED_LOAD(&alloc->filled);
diff --git a/rts/sm/NonMovingCensus.c b/rts/sm/NonMovingCensus.c
index 426179928b..27494284fe 100644
--- a/rts/sm/NonMovingCensus.c
+++ b/rts/sm/NonMovingCensus.c
@@ -21,10 +21,12 @@
// stopped. In this case is safe to look at active and current segments so we can
// also collect statistics on live words.
static struct NonmovingAllocCensus
-nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_words)
+nonmovingAllocatorCensus_(uint32_t alloc_idx, bool collect_live_words)
{
struct NonmovingAllocCensus census = {collect_live_words, 0, 0, 0, 0};
+ struct NonmovingAllocator *alloc = &nonmovingHeap.allocators[alloc_idx];
+ // filled segments
for (struct NonmovingSegment *seg = alloc->filled;
seg != NULL;
seg = seg->link)
@@ -40,6 +42,7 @@ nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_wo
}
}
+ // active segments
for (struct NonmovingSegment *seg = alloc->active;
seg != NULL;
seg = seg->link)
@@ -56,9 +59,11 @@ nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_wo
}
}
- for (unsigned int cap=0; cap < getNumCapabilities(); cap++)
+ // current segments
+ for (unsigned int cap_n=0; cap_n < getNumCapabilities(); cap_n++)
{
- struct NonmovingSegment *seg = alloc->current[cap];
+ Capability *cap = getCapability(cap_n);
+ struct NonmovingSegment *seg = cap->current_segments[alloc_idx];
unsigned int n = nonmovingSegmentBlockCount(seg);
for (unsigned int i=0; i < n; i++) {
if (nonmovingGetMark(seg, i)) {
@@ -76,15 +81,15 @@ nonmovingAllocatorCensus_(struct NonmovingAllocator *alloc, bool collect_live_wo
* all blocks in nonmoving heap are valid closures.
*/
struct NonmovingAllocCensus
-nonmovingAllocatorCensusWithWords(struct NonmovingAllocator *alloc)
+nonmovingAllocatorCensusWithWords(uint32_t alloc_idx)
{
- return nonmovingAllocatorCensus_(alloc, true);
+ return nonmovingAllocatorCensus_(alloc_idx, true);
}
struct NonmovingAllocCensus
-nonmovingAllocatorCensus(struct NonmovingAllocator *alloc)
+nonmovingAllocatorCensus(uint32_t alloc_idx)
{
- return nonmovingAllocatorCensus_(alloc, false);
+ return nonmovingAllocatorCensus_(alloc_idx, false);
}
@@ -130,7 +135,7 @@ void nonmovingPrintAllocatorCensus(bool collect_live_words)
for (int i=0; i < NONMOVING_ALLOCA_CNT; i++) {
struct NonmovingAllocCensus census =
- nonmovingAllocatorCensus_(nonmovingHeap.allocators[i], collect_live_words);
+ nonmovingAllocatorCensus_(i, collect_live_words);
print_alloc_census(i, census);
}
@@ -143,8 +148,7 @@ void nonmovingTraceAllocatorCensus()
return;
for (int i=0; i < NONMOVING_ALLOCA_CNT; i++) {
- const struct NonmovingAllocCensus census =
- nonmovingAllocatorCensus(nonmovingHeap.allocators[i]);
+ const struct NonmovingAllocCensus census = nonmovingAllocatorCensus(i);
const uint32_t log_blk_size = i + NONMOVING_ALLOCA0;
traceNonmovingHeapCensus(log_blk_size, &census);
}
diff --git a/rts/sm/NonMovingCensus.h b/rts/sm/NonMovingCensus.h
index 988df290ea..164017eccc 100644
--- a/rts/sm/NonMovingCensus.h
+++ b/rts/sm/NonMovingCensus.h
@@ -20,10 +20,10 @@ struct NonmovingAllocCensus {
struct NonmovingAllocCensus
-nonmovingAllocatorCensusWithWords(struct NonmovingAllocator *alloc);
+nonmovingAllocatorCensusWithWords(uint32_t alloc_idx);
struct NonmovingAllocCensus
-nonmovingAllocatorCensus(struct NonmovingAllocator *alloc);
+nonmovingAllocatorCensus(uint32_t alloc_idx);
void nonmovingPrintAllocatorCensus(bool collect_live_words);
void nonmovingTraceAllocatorCensus(void);
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index d46ace2849..3ec1baee20 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -251,7 +251,7 @@ StgWord nonmoving_write_barrier_enabled = false;
MarkQueue *current_mark_queue = NULL;
/* Initialise update remembered set data structures */
-void nonmovingMarkInitUpdRemSet() {
+void nonmovingMarkInit() {
#if defined(THREADED_RTS)
initMutex(&upd_rem_set_lock);
initCondition(&upd_rem_set_flushed_cond);
@@ -295,8 +295,8 @@ static void nonmovingAddUpdRemSetBlocks_lock(MarkQueue *rset)
// Reset the state of the remembered set.
ACQUIRE_SM_LOCK;
init_mark_queue_(rset);
- rset->is_upd_rem_set = true;
RELEASE_SM_LOCK;
+ rset->is_upd_rem_set = true;
}
/*
diff --git a/rts/sm/NonMovingMark.h b/rts/sm/NonMovingMark.h
index 1b56083113..763192ff4b 100644
--- a/rts/sm/NonMovingMark.h
+++ b/rts/sm/NonMovingMark.h
@@ -140,7 +140,7 @@ extern MarkQueue *current_mark_queue;
extern bdescr *upd_rem_set_block_list;
-void nonmovingMarkInitUpdRemSet(void);
+void nonmovingMarkInit(void);
void nonmovingInitUpdRemSet(UpdRemSet *rset);
void updateRemembSetPushClosure(Capability *cap, StgClosure *p);
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index e360586222..74260955bc 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -637,12 +637,13 @@ void checkNonmovingHeap (const struct NonmovingHeap *heap)
checkLargeObjects(nonmoving_marked_large_objects);
checkCompactObjects(nonmoving_compact_objects);
for (unsigned int i=0; i < NONMOVING_ALLOCA_CNT; i++) {
- const struct NonmovingAllocator *alloc = heap->allocators[i];
+ const struct NonmovingAllocator *alloc = &heap->allocators[i];
checkNonmovingSegments(alloc->filled);
checkNonmovingSegments(alloc->saved_filled);
checkNonmovingSegments(alloc->active);
- for (unsigned int cap=0; cap < getNumCapabilities(); cap++) {
- checkNonmovingSegments(alloc->current[cap]);
+ for (unsigned int cap_n=0; cap_n < getNumCapabilities(); cap_n++) {
+ Capability *cap = getCapability(cap_n);
+ checkNonmovingSegments(cap->current_segments[i]);
}
}
}
@@ -1070,12 +1071,13 @@ findMemoryLeak (void)
markBlocks(nonmoving_compact_objects);
markBlocks(nonmoving_marked_compact_objects);
for (i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
- struct NonmovingAllocator *alloc = nonmovingHeap.allocators[i];
+ struct NonmovingAllocator *alloc = &nonmovingHeap.allocators[i];
markNonMovingSegments(alloc->filled);
markNonMovingSegments(alloc->saved_filled);
markNonMovingSegments(alloc->active);
for (j = 0; j < getNumCapabilities(); j++) {
- markNonMovingSegments(alloc->current[j]);
+ Capability *cap = getCapability(j);
+ markNonMovingSegments(cap->current_segments[i]);
}
}
markNonMovingSegments(nonmovingHeap.sweep_list);
@@ -1181,22 +1183,17 @@ countNonMovingSegments(struct NonmovingSegment *segs)
}
static W_
-countNonMovingAllocator(struct NonmovingAllocator *alloc)
-{
- W_ ret = countNonMovingSegments(alloc->filled)
- + countNonMovingSegments(alloc->active);
- for (uint32_t i = 0; i < getNumCapabilities(); ++i) {
- ret += countNonMovingSegments(alloc->current[i]);
- }
- return ret;
-}
-
-static W_
countNonMovingHeap(struct NonmovingHeap *heap)
{
W_ ret = 0;
for (int alloc_idx = 0; alloc_idx < NONMOVING_ALLOCA_CNT; alloc_idx++) {
- ret += countNonMovingAllocator(heap->allocators[alloc_idx]);
+ struct NonmovingAllocator *alloc = &heap->allocators[alloc_idx];
+ ret += countNonMovingSegments(alloc->filled);
+ ret += countNonMovingSegments(alloc->active);
+ for (uint32_t c = 0; c < getNumCapabilities(); ++c) {
+ Capability *cap = getCapability(c);
+ ret += countNonMovingSegments(cap->current_segments[alloc_idx]);
+ }
}
ret += countNonMovingSegments(heap->sweep_list);
ret += countNonMovingSegments(heap->free);
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 0309d3a565..9605d0a764 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -193,14 +193,13 @@ initStorage (void)
initMutex(&sm_mutex);
#endif
- ACQUIRE_SM_LOCK;
-
/* allocate generation info array */
generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
* sizeof(struct generation_),
"initStorage: gens");
/* Initialise all generations */
+ ACQUIRE_SM_LOCK;
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
initGeneration(&generations[g], g);
}
@@ -215,17 +214,14 @@ initStorage (void)
}
oldest_gen->to = oldest_gen;
- // Nonmoving heap uses oldest_gen so initialize it after initializing oldest_gen
- nonmovingInit();
-
#if defined(THREADED_RTS)
// nonmovingAddCapabilities allocates segments, which requires taking the gc
// sync lock, so initialize it before nonmovingAddCapabilities
initSpinLock(&gc_alloc_block_sync);
#endif
- if (RtsFlags.GcFlags.useNonmoving)
- nonmovingAddCapabilities(getNumCapabilities());
+ // Nonmoving heap uses oldest_gen so initialize it after initializing oldest_gen
+ nonmovingInit();
/* The oldest generation has one step. */
if (RtsFlags.GcFlags.compact || RtsFlags.GcFlags.sweep) {
@@ -264,9 +260,9 @@ initStorage (void)
RELEASE_SM_LOCK;
traceInitEvent(traceHeapInfo);
-
}
+// Caller must hold SM_LOCK.
void storageAddCapabilities (uint32_t from, uint32_t to)
{
uint32_t n, g, i, new_n_nurseries;
@@ -321,12 +317,10 @@ void storageAddCapabilities (uint32_t from, uint32_t to)
}
}
- // Initialize NonmovingAllocators and UpdRemSets
+ // Initialize non-moving collector
if (RtsFlags.GcFlags.useNonmoving) {
- nonmovingAddCapabilities(to);
for (i = from; i < to; i++) {
- getCapability(i)->upd_rem_set.queue.blocks = NULL;
- nonmovingInitUpdRemSet(&getCapability(i)->upd_rem_set);
+ nonmovingInitCapability(getCapability(i));
}
}
@@ -1954,14 +1948,15 @@ void rts_clearMemory(void) {
}
for (int i = 0; i < NONMOVING_ALLOCA_CNT; ++i) {
- struct NonmovingAllocator *alloc = nonmovingHeap.allocators[i];
+ struct NonmovingAllocator *alloc = &nonmovingHeap.allocators[i];
for (struct NonmovingSegment *seg = alloc->active; seg; seg = seg->link) {
clear_segment_free_blocks(seg);
}
for (unsigned int j = 0; j < getNumCapabilities(); ++j) {
- clear_segment_free_blocks(alloc->current[j]);
+ Capability *cap = getCapability(j);
+ clear_segment_free_blocks(cap->current_segments[i]);
}
}
}