summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/BlockAlloc.c1
-rw-r--r--rts/sm/Evac.c7
-rw-r--r--rts/sm/GCUtils.c3
-rw-r--r--rts/sm/Storage.c18
4 files changed, 11 insertions, 18 deletions
diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c
index bf7a55e7a9..d30d29b2f5 100644
--- a/rts/sm/BlockAlloc.c
+++ b/rts/sm/BlockAlloc.c
@@ -59,6 +59,7 @@ static void initMBlock(void *mblock);
bd->flags
bd->gen_no
bd->step
+ bd->dest
Exceptions: we don't maintain invariants for all the blocks within a
group on the free list, because it is expensive to modify every
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 3212ce5852..9836e51376 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -282,8 +282,7 @@ evacuate_large(StgPtr p)
ws = &gct->steps[new_stp->abs_no];
bd->flags |= BF_EVACUATED;
- bd->step = new_stp;
- bd->gen_no = new_stp->gen_no;
+ initBdescr(bd, new_stp);
// If this is a block of pinned objects, we don't have to scan
// these objects, because they aren't allowed to contain any
@@ -505,7 +504,7 @@ loop:
return;
}
- stp = bd->step->to;
+ stp = bd->dest;
info = q->header.info;
if (IS_FORWARDING_PTR(info))
@@ -1069,7 +1068,7 @@ bale_out:
// check whether it was updated in the meantime.
*q = (StgClosure *)p;
if (evac) {
- copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
+ copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest);
}
unchain_thunk_selectors(prev_thunk_selector, *q);
return;
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 70c53cb8bf..7e99e29023 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -269,8 +269,7 @@ alloc_todo_block (step_workspace *ws, nat size)
} else {
bd = allocBlock_sync();
}
- bd->step = ws->step;
- bd->gen_no = ws->step->gen_no;
+ initBdescr(bd, ws->step);
bd->flags = BF_EVACUATED;
bd->u.scan = bd->free = bd->start;
}
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index f0506cd77c..73ef53f036 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -411,8 +411,7 @@ allocNursery (step *stp, bdescr *tail, nat blocks)
if (tail != NULL) {
tail->u.back = bd;
}
- bd->step = stp;
- bd->gen_no = 0;
+ initBdescr(bd, stp);
bd->flags = 0;
bd->free = bd->start;
tail = bd;
@@ -612,8 +611,7 @@ allocateInGen (generation *g, lnat n)
dbl_link_onto(bd, &stp->large_objects);
stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
alloc_blocks += bd->blocks;
- bd->gen_no = g->no;
- bd->step = stp;
+ initBdescr(bd, stp);
bd->flags = BF_LARGE;
bd->free = bd->start + n;
ret = bd->start;
@@ -624,8 +622,7 @@ allocateInGen (generation *g, lnat n)
bd = stp->blocks;
if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
bd = allocBlock();
- bd->gen_no = g->no;
- bd->step = stp;
+ initBdescr(bd, stp);
bd->flags = 0;
bd->link = stp->blocks;
stp->blocks = bd;
@@ -676,8 +673,7 @@ splitLargeBlock (bdescr *bd, nat blocks)
dbl_link_onto(new_bd, &g0s0->large_objects);
g0s0->n_large_blocks += new_bd->blocks;
- new_bd->gen_no = g0s0->no;
- new_bd->step = g0s0;
+ initBdescr(new_bd, g0s0);
new_bd->flags = BF_LARGE;
new_bd->free = bd->free;
ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
@@ -733,8 +729,7 @@ allocateLocal (Capability *cap, lnat n)
bd = allocBlock();
cap->r.rNursery->n_blocks++;
RELEASE_SM_LOCK;
- bd->gen_no = 0;
- bd->step = cap->r.rNursery;
+ initBdescr(bd, cap->r.rNursery);
bd->flags = 0;
// NO: alloc_blocks++;
// calcAllocated() uses the size of the nursery, and we've
@@ -807,8 +802,7 @@ allocatePinned( lnat n )
pinned_object_block = bd = allocBlock();
dbl_link_onto(bd, &g0s0->large_objects);
g0s0->n_large_blocks++;
- bd->gen_no = 0;
- bd->step = g0s0;
+ initBdescr(bd, g0s0);
bd->flags = BF_PINNED | BF_LARGE;
bd->free = bd->start;
alloc_blocks++;