summaryrefslogtreecommitdiff
path: root/rts/sm/Storage.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm/Storage.c')
-rw-r--r--rts/sm/Storage.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 648f8ae3d2..7da91c4e19 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -193,14 +193,13 @@ initStorage (void)
initMutex(&sm_mutex);
#endif
- ACQUIRE_SM_LOCK;
-
/* allocate generation info array */
generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
* sizeof(struct generation_),
"initStorage: gens");
/* Initialise all generations */
+ ACQUIRE_SM_LOCK;
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
initGeneration(&generations[g], g);
}
@@ -214,16 +213,11 @@ initStorage (void)
generations[g].to = &generations[g+1];
}
oldest_gen->to = oldest_gen;
+ RELEASE_SM_LOCK;
// Nonmoving heap uses oldest_gen so initialize it after initializing oldest_gen
nonmovingInit();
-#if defined(THREADED_RTS)
- // nonmovingAddCapabilities allocates segments, which requires taking the gc
- // sync lock, so initialize it before nonmovingAddCapabilities
- initSpinLock(&gc_alloc_block_sync);
-#endif
-
if (RtsFlags.GcFlags.useNonmoving)
nonmovingAddCapabilities(getNumCapabilities());
@@ -261,8 +255,6 @@ initStorage (void)
IF_DEBUG(gc, statDescribeGens());
- RELEASE_SM_LOCK;
-
traceInitEvent(traceHeapInfo);
}
@@ -314,12 +306,14 @@ void storageAddCapabilities (uint32_t from, uint32_t to)
assignNurseriesToCapabilities(from,to);
// allocate a block for each mut list
+ ACQUIRE_SM_LOCK;
for (n = from; n < to; n++) {
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
getCapability(n)->mut_lists[g] =
allocBlockOnNode(capNoToNumaNode(n));
}
}
+ RELEASE_SM_LOCK;
// Initialize NonmovingAllocators and UpdRemSets
if (RtsFlags.GcFlags.useNonmoving) {
@@ -565,9 +559,7 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf)
// Allocate the blackhole indirection closure
if (RtsFlags.GcFlags.useNonmoving) {
// See Note [Static objects under the nonmoving collector].
- ACQUIRE_SM_LOCK;
bh = (StgInd *)nonmovingAllocate(cap, sizeofW(*bh));
- RELEASE_SM_LOCK;
recordMutableCap((StgClosure*)bh,
regTableToCapability(reg), oldest_gen->no);
} else {
@@ -725,6 +717,7 @@ allocNursery (uint32_t node, bdescr *tail, W_ blocks)
// automatic prefetching works across nursery blocks. This is a
// tiny optimisation (~0.5%), but it's free.
+ ACQUIRE_SM_LOCK;
while (blocks > 0) {
n = stg_min(BLOCKS_PER_MBLOCK, blocks);
// allocLargeChunk will prefer large chunks, but will pick up
@@ -760,6 +753,7 @@ allocNursery (uint32_t node, bdescr *tail, W_ blocks)
tail = &bd[0];
}
+ RELEASE_SM_LOCK;
return &bd[0];
}
@@ -879,7 +873,7 @@ resizeNurseriesEach (W_ blocks)
next_bd = bd->link;
next_bd->u.back = NULL;
nursery_blocks -= bd->blocks; // might be a large block
- freeGroup(bd);
+ freeGroup_lock(bd);
bd = next_bd;
}
nursery->blocks = bd;
@@ -1300,9 +1294,7 @@ allocatePinned (Capability *cap, W_ n /*words*/, W_ alignment /*bytes*/, W_ alig
if (bd == NULL) {
// The pinned block list is empty: allocate a fresh block (we can't fail
// here).
- ACQUIRE_SM_LOCK;
bd = allocNursery(cap->node, NULL, PINNED_EMPTY_SIZE);
- RELEASE_SM_LOCK;
}
// Bump up the nursery pointer to avoid the pathological situation