summaryrefslogtreecommitdiff
path: root/rts/sm/Storage.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2009-12-01 16:03:21 +0000
committerSimon Marlow <marlowsd@gmail.com>2009-12-01 16:03:21 +0000
commit5270423a6afe69f1dc57e5e5a474812182718d40 (patch)
tree254cc0d910b315c47723a7a7fdd393fffbdee5ea /rts/sm/Storage.c
parent063b822bb68f84dd9729327bb1765637c25aceb4 (diff)
downloadhaskell-5270423a6afe69f1dc57e5e5a474812182718d40.tar.gz
Make allocatePinned use local storage, and other refactorings
This is a batch of refactoring to remove some of the GC's global state, as we move towards CPU-local GC. - allocateLocal() now allocates large objects into the local nursery, rather than taking a global lock and allocating then in gen 0 step 0. - allocatePinned() was still allocating from global storage and taking a lock each time, now it uses local storage. (mallocForeignPtrBytes should be faster with -threaded). - We had a gen 0 step 0, distinct from the nurseries, which are stored in a separate nurseries[] array. This is slightly strange. I removed the g0s0 global that pointed to gen 0 step 0, and removed all uses of it. I think now we don't use gen 0 step 0 at all, except possibly when there is only one generation. Possibly more tidying up is needed here. - I removed the global allocate() function, and renamed allocateLocal() to allocate(). - the alloc_blocks global is gone. MAYBE_GC() and doYouWantToGC() now check the local nursery only.
Diffstat (limited to 'rts/sm/Storage.c')
-rw-r--r--rts/sm/Storage.c304
1 files changed, 107 insertions, 197 deletions
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 73ef53f036..5d371b9bf1 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -40,16 +40,14 @@ StgClosure *caf_list = NULL;
StgClosure *revertible_caf_list = NULL;
rtsBool keepCAFs;
-bdescr *pinned_object_block; /* allocate pinned objects into this block */
-nat alloc_blocks; /* number of allocate()d blocks since GC */
-nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
+nat alloc_blocks_lim; /* GC if n_large_blocks in any nursery
+ * reaches this. */
static bdescr *exec_block;
generation *generations = NULL; /* all the generations */
generation *g0 = NULL; /* generation 0, for convenience */
generation *oldest_gen = NULL; /* oldest generation, for convenience */
-step *g0s0 = NULL; /* generation 0, step 0, for convenience */
nat total_steps = 0;
step *all_steps = NULL; /* single array of steps */
@@ -143,14 +141,6 @@ initStorage( void )
* sizeof(struct generation_),
"initStorage: gens");
- /* allocate all the steps into an array. It is important that we do
- it this way, because we need the invariant that two step pointers
- can be directly compared to see which is the oldest.
- Remember that the last generation has only one step. */
- total_steps = 1 + (RtsFlags.GcFlags.generations - 1) * RtsFlags.GcFlags.steps;
- all_steps = stgMallocBytes(total_steps * sizeof(struct step_),
- "initStorage: steps");
-
/* Initialise all generations */
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen = &generations[g];
@@ -166,6 +156,14 @@ initStorage( void )
g0 = &generations[0];
oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
+ /* allocate all the steps into an array. It is important that we do
+ it this way, because we need the invariant that two step pointers
+ can be directly compared to see which is the oldest.
+ Remember that the last generation has only one step. */
+ total_steps = 1 + (RtsFlags.GcFlags.generations - 1) * RtsFlags.GcFlags.steps;
+ all_steps = stgMallocBytes(total_steps * sizeof(struct step_),
+ "initStorage: steps");
+
/* Allocate step structures in each generation */
if (RtsFlags.GcFlags.generations > 1) {
/* Only for multiple-generations */
@@ -187,11 +185,7 @@ initStorage( void )
g0->steps = all_steps;
}
-#ifdef THREADED_RTS
n_nurseries = n_capabilities;
-#else
- n_nurseries = 1;
-#endif
nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
"initStorage: nurseries");
@@ -231,7 +225,6 @@ initStorage( void )
}
generations[0].max_blocks = 0;
- g0s0 = &generations[0].steps[0];
/* The allocation area. Policy: keep the allocation area
* small to begin with, even if we have a large suggested heap
@@ -246,7 +239,6 @@ initStorage( void )
revertible_caf_list = NULL;
/* initialise the allocate() interface */
- alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
exec_block = NULL;
@@ -274,7 +266,7 @@ exitStorage (void)
void
freeStorage (void)
{
- stgFree(g0s0); // frees all the steps
+ stgFree(all_steps); // frees all the steps
stgFree(generations);
freeAllMBlocks();
#if defined(THREADED_RTS)
@@ -423,7 +415,6 @@ allocNursery (step *stp, bdescr *tail, nat blocks)
static void
assignNurseriesToCapabilities (void)
{
-#ifdef THREADED_RTS
nat i;
for (i = 0; i < n_nurseries; i++) {
@@ -431,11 +422,6 @@ assignNurseriesToCapabilities (void)
capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
capabilities[i].r.rCurrentAlloc = NULL;
}
-#else /* THREADED_RTS */
- MainCapability.r.rNursery = &nurseries[0];
- MainCapability.r.rCurrentNursery = nurseries[0].blocks;
- MainCapability.r.rCurrentAlloc = NULL;
-#endif
}
static void
@@ -469,6 +455,10 @@ resetNurseries( void )
ASSERT(bd->step == stp);
IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
}
+ // these large objects are dead, since we have just GC'd
+ freeChain(stp->large_objects);
+ stp->large_objects = NULL;
+ stp->n_large_blocks = 0;
}
assignNurseriesToCapabilities();
}
@@ -481,6 +471,7 @@ countNurseryBlocks (void)
for (i = 0; i < n_nurseries; i++) {
blocks += nurseries[i].n_blocks;
+ blocks += nurseries[i].n_large_blocks;
}
return blocks;
}
@@ -565,129 +556,46 @@ move_TSO (StgTSO *src, StgTSO *dest)
}
/* -----------------------------------------------------------------------------
- The allocate() interface
-
- allocateInGen() function allocates memory directly into a specific
- generation. It always succeeds, and returns a chunk of memory n
- words long. n can be larger than the size of a block if necessary,
- in which case a contiguous block group will be allocated.
-
- allocate(n) is equivalent to allocateInGen(g0).
+ split N blocks off the front of the given bdescr, returning the
+ new block group. We add the remainder to the large_blocks list
+ in the same step as the original block.
-------------------------------------------------------------------------- */
-StgPtr
-allocateInGen (generation *g, lnat n)
-{
- step *stp;
- bdescr *bd;
- StgPtr ret;
-
- ACQUIRE_SM_LOCK;
-
- TICK_ALLOC_HEAP_NOCTR(n);
- CCS_ALLOC(CCCS,n);
-
- stp = &g->steps[0];
-
- if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_))
- {
- lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
-
- // Attempting to allocate an object larger than maxHeapSize
- // should definitely be disallowed. (bug #1791)
- if (RtsFlags.GcFlags.maxHeapSize > 0 &&
- req_blocks >= RtsFlags.GcFlags.maxHeapSize) {
- heapOverflow();
- // heapOverflow() doesn't exit (see #2592), but we aren't
- // in a position to do a clean shutdown here: we
- // either have to allocate the memory or exit now.
- // Allocating the memory would be bad, because the user
- // has requested that we not exceed maxHeapSize, so we
- // just exit.
- stg_exit(EXIT_HEAPOVERFLOW);
- }
-
- bd = allocGroup(req_blocks);
- dbl_link_onto(bd, &stp->large_objects);
- stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
- alloc_blocks += bd->blocks;
- initBdescr(bd, stp);
- bd->flags = BF_LARGE;
- bd->free = bd->start + n;
- ret = bd->start;
- }
- else
- {
- // small allocation (<LARGE_OBJECT_THRESHOLD) */
- bd = stp->blocks;
- if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
- bd = allocBlock();
- initBdescr(bd, stp);
- bd->flags = 0;
- bd->link = stp->blocks;
- stp->blocks = bd;
- stp->n_blocks++;
- alloc_blocks++;
- }
- ret = bd->free;
- bd->free += n;
- }
-
- RELEASE_SM_LOCK;
-
- return ret;
-}
-
-StgPtr
-allocate (lnat n)
-{
- return allocateInGen(g0,n);
-}
-
-lnat
-allocatedBytes( void )
-{
- lnat allocated;
-
- allocated = alloc_blocks * BLOCK_SIZE_W;
- if (pinned_object_block != NULL) {
- allocated -= (pinned_object_block->start + BLOCK_SIZE_W) -
- pinned_object_block->free;
- }
-
- return allocated;
-}
-
-// split N blocks off the front of the given bdescr, returning the
-// new block group. We treat the remainder as if it
-// had been freshly allocated in generation 0.
bdescr *
splitLargeBlock (bdescr *bd, nat blocks)
{
bdescr *new_bd;
+ ACQUIRE_SM_LOCK;
+
+ ASSERT(countBlocks(bd->step->large_objects) == bd->step->n_large_blocks);
+
// subtract the original number of blocks from the counter first
bd->step->n_large_blocks -= bd->blocks;
new_bd = splitBlockGroup (bd, blocks);
-
- dbl_link_onto(new_bd, &g0s0->large_objects);
- g0s0->n_large_blocks += new_bd->blocks;
- initBdescr(new_bd, g0s0);
- new_bd->flags = BF_LARGE;
+ initBdescr(new_bd, bd->step);
+ new_bd->flags = BF_LARGE | (bd->flags & BF_EVACUATED);
+ // if new_bd is in an old generation, we have to set BF_EVACUATED
new_bd->free = bd->free;
+ dbl_link_onto(new_bd, &bd->step->large_objects);
+
ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
// add the new number of blocks to the counter. Due to the gaps
- // for block descriptor, new_bd->blocks + bd->blocks might not be
+ // for block descriptors, new_bd->blocks + bd->blocks might not be
// equal to the original bd->blocks, which is why we do it this way.
- bd->step->n_large_blocks += bd->blocks;
+ bd->step->n_large_blocks += bd->blocks + new_bd->blocks;
+
+ ASSERT(countBlocks(bd->step->large_objects) == bd->step->n_large_blocks);
+
+ RELEASE_SM_LOCK;
return new_bd;
}
/* -----------------------------------------------------------------------------
- allocateLocal()
+ allocate()
This allocates memory in the current thread - it is intended for
use primarily from STG-land where we have a Capability. It is
@@ -700,13 +608,38 @@ splitLargeBlock (bdescr *bd, nat blocks)
-------------------------------------------------------------------------- */
StgPtr
-allocateLocal (Capability *cap, lnat n)
+allocate (Capability *cap, lnat n)
{
bdescr *bd;
StgPtr p;
+ step *stp;
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- return allocateInGen(g0,n);
+ lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
+
+ // Attempting to allocate an object larger than maxHeapSize
+ // should definitely be disallowed. (bug #1791)
+ if (RtsFlags.GcFlags.maxHeapSize > 0 &&
+ req_blocks >= RtsFlags.GcFlags.maxHeapSize) {
+ heapOverflow();
+ // heapOverflow() doesn't exit (see #2592), but we aren't
+ // in a position to do a clean shutdown here: we
+ // either have to allocate the memory or exit now.
+ // Allocating the memory would be bad, because the user
+ // has requested that we not exceed maxHeapSize, so we
+ // just exit.
+ stg_exit(EXIT_HEAPOVERFLOW);
+ }
+
+ stp = &nurseries[cap->no];
+
+ bd = allocGroup(req_blocks);
+ dbl_link_onto(bd, &stp->large_objects);
+ stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
+ initBdescr(bd, stp);
+ bd->flags = BF_LARGE;
+ bd->free = bd->start + n;
+ return bd->start;
}
/* small allocation (<LARGE_OBJECT_THRESHOLD) */
@@ -731,10 +664,8 @@ allocateLocal (Capability *cap, lnat n)
RELEASE_SM_LOCK;
initBdescr(bd, cap->r.rNursery);
bd->flags = 0;
- // NO: alloc_blocks++;
- // calcAllocated() uses the size of the nursery, and we've
- // already bumpted nursery->n_blocks above. We'll GC
- // pretty quickly now anyway, because MAYBE_GC() will
+ // If we had to allocate a new block, then we'll GC
+ // pretty quickly now, because MAYBE_GC() will
// notice that CurrentNursery->link is NULL.
} else {
// we have a block in the nursery: take it and put
@@ -778,39 +709,41 @@ allocateLocal (Capability *cap, lnat n)
------------------------------------------------------------------------- */
StgPtr
-allocatePinned( lnat n )
+allocatePinned (Capability *cap, lnat n)
{
StgPtr p;
- bdescr *bd = pinned_object_block;
+ bdescr *bd;
+ step *stp;
// If the request is for a large object, then allocate()
// will give us a pinned object anyway.
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- p = allocate(n);
+ p = allocate(cap, n);
Bdescr(p)->flags |= BF_PINNED;
return p;
}
- ACQUIRE_SM_LOCK;
-
TICK_ALLOC_HEAP_NOCTR(n);
CCS_ALLOC(CCCS,n);
+ bd = cap->pinned_object_block;
+
// If we don't have a block of pinned objects yet, or the current
// one isn't large enough to hold the new object, allocate a new one.
if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
- pinned_object_block = bd = allocBlock();
- dbl_link_onto(bd, &g0s0->large_objects);
- g0s0->n_large_blocks++;
- initBdescr(bd, g0s0);
+ ACQUIRE_SM_LOCK
+ cap->pinned_object_block = bd = allocBlock();
+ RELEASE_SM_LOCK
+ stp = &nurseries[cap->no];
+ dbl_link_onto(bd, &stp->large_objects);
+ stp->n_large_blocks++;
+ initBdescr(bd, stp);
bd->flags = BF_PINNED | BF_LARGE;
bd->free = bd->start;
- alloc_blocks++;
}
p = bd->free;
bd->free += n;
- RELEASE_SM_LOCK;
return p;
}
@@ -900,14 +833,11 @@ calcAllocated( void )
{
nat allocated;
bdescr *bd;
+ nat i;
- allocated = allocatedBytes();
- allocated += countNurseryBlocks() * BLOCK_SIZE_W;
+ allocated = countNurseryBlocks() * BLOCK_SIZE_W;
- {
-#ifdef THREADED_RTS
- nat i;
- for (i = 0; i < n_nurseries; i++) {
+ for (i = 0; i < n_capabilities; i++) {
Capability *cap;
for ( bd = capabilities[i].r.rCurrentNursery->link;
bd != NULL; bd = bd->link ) {
@@ -919,18 +849,10 @@ calcAllocated( void )
allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
- cap->r.rCurrentNursery->free;
}
- }
-#else
- bdescr *current_nursery = MainCapability.r.rCurrentNursery;
-
- for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
- allocated -= BLOCK_SIZE_W;
- }
- if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
- allocated -= (current_nursery->start + BLOCK_SIZE_W)
- - current_nursery->free;
- }
-#endif
+ if (cap->pinned_object_block != NULL) {
+ allocated -= (cap->pinned_object_block->start + BLOCK_SIZE_W) -
+ cap->pinned_object_block->free;
+ }
}
total_allocated += allocated;
@@ -947,16 +869,12 @@ calcLiveBlocks(void)
lnat live = 0;
step *stp;
- if (RtsFlags.GcFlags.generations == 1) {
- return g0s0->n_large_blocks + g0s0->n_blocks;
- }
-
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
/* approximate amount of live data (doesn't take into account slop
* at end of each block).
*/
- if (g == 0 && s == 0) {
+ if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
continue;
}
stp = &generations[g].steps[s];
@@ -988,14 +906,10 @@ calcLiveWords(void)
lnat live;
step *stp;
- if (RtsFlags.GcFlags.generations == 1) {
- return g0s0->n_words + countOccupied(g0s0->large_objects);
- }
-
live = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0) continue;
+ if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) continue;
stp = &generations[g].steps[s];
live += stp->n_words + countOccupied(stp->large_objects);
}
@@ -1384,32 +1298,28 @@ checkSanity( void )
{
nat g, s;
- if (RtsFlags.GcFlags.generations == 1) {
- checkHeap(g0s0->blocks);
- checkLargeObjects(g0s0->large_objects);
- } else {
-
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0) { continue; }
- ASSERT(countBlocks(generations[g].steps[s].blocks)
- == generations[g].steps[s].n_blocks);
- ASSERT(countBlocks(generations[g].steps[s].large_objects)
- == generations[g].steps[s].n_large_blocks);
- checkHeap(generations[g].steps[s].blocks);
- checkLargeObjects(generations[g].steps[s].large_objects);
- }
- }
-
- for (s = 0; s < n_nurseries; s++) {
- ASSERT(countBlocks(nurseries[s].blocks)
- == nurseries[s].n_blocks);
- ASSERT(countBlocks(nurseries[s].large_objects)
- == nurseries[s].n_large_blocks);
- }
-
- checkFreeListSanity();
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ ASSERT(countBlocks(generations[g].steps[s].blocks)
+ == generations[g].steps[s].n_blocks);
+ ASSERT(countBlocks(generations[g].steps[s].large_objects)
+ == generations[g].steps[s].n_large_blocks);
+ checkHeap(generations[g].steps[s].blocks);
+ checkLargeObjects(generations[g].steps[s].large_objects);
+ }
+ }
+
+ for (s = 0; s < n_nurseries; s++) {
+ ASSERT(countBlocks(nurseries[s].blocks)
+ == nurseries[s].n_blocks);
+ ASSERT(countBlocks(nurseries[s].large_objects)
+ == nurseries[s].n_large_blocks);
}
+
+ checkFreeListSanity();
#if defined(THREADED_RTS)
// check the stacks too in threaded mode, because we don't do a