summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
Diffstat (limited to 'rts')
-rw-r--r--rts/Arena.c1
-rw-r--r--rts/Schedule.c4
-rw-r--r--rts/Updates.h2
-rw-r--r--rts/sm/BlockAlloc.c1
-rw-r--r--rts/sm/Evac.c7
-rw-r--r--rts/sm/GCUtils.c3
-rw-r--r--rts/sm/Storage.c18
7 files changed, 15 insertions, 21 deletions
diff --git a/rts/Arena.c b/rts/Arena.c
index 7fc49f44d7..e636de4b9a 100644
--- a/rts/Arena.c
+++ b/rts/Arena.c
@@ -86,6 +86,7 @@ arenaAlloc( Arena *arena, size_t size )
bd->gen_no = 0;
bd->step = NULL;
+ bd->dest = NULL;
bd->flags = 0;
bd->free = bd->start;
bd->link = arena->current;
diff --git a/rts/Schedule.c b/rts/Schedule.c
index dbee436a68..c377974dca 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -1118,8 +1118,8 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
{
bdescr *x;
for (x = bd; x < bd + blocks; x++) {
- x->step = cap->r.rNursery;
- x->gen_no = 0;
+ initBdescr(x,cap->r.rNursery);
+ x->free = x->start;
x->flags = 0;
}
}
diff --git a/rts/Updates.h b/rts/Updates.h
index 988fb6059f..bf4f89dd2a 100644
--- a/rts/Updates.h
+++ b/rts/Updates.h
@@ -191,7 +191,7 @@ no_slop:
StgInd_indirectee(p1) = p2; \
prim %write_barrier() []; \
bd = Bdescr(p1); \
- if (bdescr_gen_no(bd) != 0 :: CInt) { \
+ if (bdescr_gen_no(bd) != 0 :: bits16) { \
recordMutableCap(p1, TO_W_(bdescr_gen_no(bd)), R1); \
SET_INFO(p1, stg_IND_OLDGEN_info); \
LDV_RECORD_CREATE(p1); \
diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c
index bf7a55e7a9..d30d29b2f5 100644
--- a/rts/sm/BlockAlloc.c
+++ b/rts/sm/BlockAlloc.c
@@ -59,6 +59,7 @@ static void initMBlock(void *mblock);
bd->flags
bd->gen_no
bd->step
+ bd->dest
Exceptions: we don't maintain invariants for all the blocks within a
group on the free list, because it is expensive to modify every
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 3212ce5852..9836e51376 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -282,8 +282,7 @@ evacuate_large(StgPtr p)
ws = &gct->steps[new_stp->abs_no];
bd->flags |= BF_EVACUATED;
- bd->step = new_stp;
- bd->gen_no = new_stp->gen_no;
+ initBdescr(bd, new_stp);
// If this is a block of pinned objects, we don't have to scan
// these objects, because they aren't allowed to contain any
@@ -505,7 +504,7 @@ loop:
return;
}
- stp = bd->step->to;
+ stp = bd->dest;
info = q->header.info;
if (IS_FORWARDING_PTR(info))
@@ -1069,7 +1068,7 @@ bale_out:
// check whether it was updated in the meantime.
*q = (StgClosure *)p;
if (evac) {
- copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
+ copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest);
}
unchain_thunk_selectors(prev_thunk_selector, *q);
return;
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 70c53cb8bf..7e99e29023 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -269,8 +269,7 @@ alloc_todo_block (step_workspace *ws, nat size)
} else {
bd = allocBlock_sync();
}
- bd->step = ws->step;
- bd->gen_no = ws->step->gen_no;
+ initBdescr(bd, ws->step);
bd->flags = BF_EVACUATED;
bd->u.scan = bd->free = bd->start;
}
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index f0506cd77c..73ef53f036 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -411,8 +411,7 @@ allocNursery (step *stp, bdescr *tail, nat blocks)
if (tail != NULL) {
tail->u.back = bd;
}
- bd->step = stp;
- bd->gen_no = 0;
+ initBdescr(bd, stp);
bd->flags = 0;
bd->free = bd->start;
tail = bd;
@@ -612,8 +611,7 @@ allocateInGen (generation *g, lnat n)
dbl_link_onto(bd, &stp->large_objects);
stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
alloc_blocks += bd->blocks;
- bd->gen_no = g->no;
- bd->step = stp;
+ initBdescr(bd, stp);
bd->flags = BF_LARGE;
bd->free = bd->start + n;
ret = bd->start;
@@ -624,8 +622,7 @@ allocateInGen (generation *g, lnat n)
bd = stp->blocks;
if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
bd = allocBlock();
- bd->gen_no = g->no;
- bd->step = stp;
+ initBdescr(bd, stp);
bd->flags = 0;
bd->link = stp->blocks;
stp->blocks = bd;
@@ -676,8 +673,7 @@ splitLargeBlock (bdescr *bd, nat blocks)
dbl_link_onto(new_bd, &g0s0->large_objects);
g0s0->n_large_blocks += new_bd->blocks;
- new_bd->gen_no = g0s0->no;
- new_bd->step = g0s0;
+ initBdescr(new_bd, g0s0);
new_bd->flags = BF_LARGE;
new_bd->free = bd->free;
ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
@@ -733,8 +729,7 @@ allocateLocal (Capability *cap, lnat n)
bd = allocBlock();
cap->r.rNursery->n_blocks++;
RELEASE_SM_LOCK;
- bd->gen_no = 0;
- bd->step = cap->r.rNursery;
+ initBdescr(bd, cap->r.rNursery);
bd->flags = 0;
// NO: alloc_blocks++;
// calcAllocated() uses the size of the nursery, and we've
@@ -807,8 +802,7 @@ allocatePinned( lnat n )
pinned_object_block = bd = allocBlock();
dbl_link_onto(bd, &g0s0->large_objects);
g0s0->n_large_blocks++;
- bd->gen_no = 0;
- bd->step = g0s0;
+ initBdescr(bd, g0s0);
bd->flags = BF_PINNED | BF_LARGE;
bd->free = bd->start;
alloc_blocks++;