summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
authorSimon Marlow <simonmar@microsoft.com>2007-10-31 14:36:34 +0000
committerSimon Marlow <simonmar@microsoft.com>2007-10-31 14:36:34 +0000
commitbf1197b67163d9f5b6509cf836e07ff83cc0a063 (patch)
treee01bc2f69a3f3581667334db3c72032ff0eb7a7b /rts/sm
parent698364afaf2f346227910c0cf8d4f1929cdc56ef (diff)
downloadhaskell-bf1197b67163d9f5b6509cf836e07ff83cc0a063.tar.gz
GC refactoring: make evacuate() take an StgClosure**
Change the type of evacuate() from StgClosure *evacuate(StgClosure *); to void evacuate(StgClosure **); So evacuate() itself writes the source pointer, rather than the caller. This is slightly cleaner, and avoids a few memory writes: sometimes evacuate() doesn't move the object, and in these cases the source pointer doesn't need to be written. It doesn't have a measurable impact on performance, though.
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/Evac.c219
-rw-r--r--rts/sm/Evac.h2
-rw-r--r--rts/sm/GC.c9
-rw-r--r--rts/sm/MarkWeak.c17
-rw-r--r--rts/sm/Scav.c263
5 files changed, 276 insertions, 234 deletions
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index fb9f4c49b2..518b383a54 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -25,7 +25,7 @@
*/
#define MAX_THUNK_SELECTOR_DEPTH 16
-static StgClosure * eval_thunk_selector (StgSelector * p, rtsBool);
+static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
STATIC_INLINE StgPtr
alloc_for_copy (nat size, step *stp)
@@ -99,12 +99,12 @@ alloc_for_copy_noscav (nat size, step *stp)
return to;
}
-STATIC_INLINE StgClosure *
-copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
+STATIC_INLINE void
+copy_tag(StgClosure **p, StgClosure *src, nat size, step *stp,StgWord tag)
{
- StgPtr to, from;
- nat i;
- StgWord info;
+ StgPtr to, tagged_to, from;
+ nat i;
+ StgWord info;
#ifdef THREADED_RTS
do {
@@ -121,6 +121,8 @@ copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
#endif
to = alloc_for_copy(size,stp);
+ tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ *p = (StgClosure *)tagged_to;
TICK_GC_WORDS_COPIED(size);
@@ -130,14 +132,14 @@ copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
to[i] = from[i];
}
+ ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
+
// retag pointer before updating EVACUATE closure and returning
- to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
// if (to+size+2 < bd->start + BLOCK_SIZE_W) {
// __builtin_prefetch(to + size + 2, 1);
// }
- ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
#ifdef THREADED_RTS
write_barrier();
((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
@@ -148,17 +150,16 @@ copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
// the profiler can guess the position of the next object later.
SET_EVACUAEE_FOR_LDV(from, size);
#endif
- return (StgClosure *)to;
}
// Same as copy() above, except the object will be allocated in memory
// that will not be scavenged. Used for object that have no pointer
// fields.
-STATIC_INLINE StgClosure *
-copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
+STATIC_INLINE void
+copy_noscav_tag(StgClosure **p, StgClosure *src, nat size, step *stp, StgWord tag)
{
- StgPtr to, from;
+ StgPtr to, tagged_to, from;
nat i;
StgWord info;
@@ -176,6 +177,8 @@ copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
#endif
to = alloc_for_copy_noscav(size,stp);
+ tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ *p = (StgClosure *)tagged_to;
TICK_GC_WORDS_COPIED(size);
@@ -185,10 +188,8 @@ copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
to[i] = from[i];
}
- // retag pointer before updating EVACUATE closure and returning
- to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
- ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
#ifdef THREADED_RTS
write_barrier();
((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
@@ -199,7 +200,6 @@ copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
// the profiler can guess the position of the next object later.
SET_EVACUAEE_FOR_LDV(from, size);
#endif
- return (StgClosure *)to;
}
@@ -207,8 +207,8 @@ copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
* pointer of an object, but reserve some padding after it. This is
* used to optimise evacuation of BLACKHOLEs.
*/
-static StgClosure *
-copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
+static void
+copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
StgPtr to, from;
nat i;
@@ -228,6 +228,7 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
#endif
to = alloc_for_copy(size_to_reserve, stp);
+ *p = (StgClosure *)to;
TICK_GC_WORDS_COPIED(size_to_copy);
@@ -251,21 +252,20 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
if (size_to_reserve - size_to_copy > 0)
LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
#endif
- return (StgClosure *)to;
}
/* Copy wrappers that don't tag the closure after copying */
-STATIC_INLINE StgClosure *
-copy(StgClosure *src, nat size, step *stp)
+STATIC_INLINE void
+copy(StgClosure **p, StgClosure *src, nat size, step *stp)
{
- return copy_tag(src,size,stp,0);
+ copy_tag(p,src,size,stp,0);
}
-STATIC_INLINE StgClosure *
-copy_noscav(StgClosure *src, nat size, step *stp)
+STATIC_INLINE void
+copy_noscav(StgClosure **p, StgClosure *src, nat size, step *stp)
{
- return copy_noscav_tag(src,size,stp,0);
+ copy_noscav_tag(p,src,size,stp,0);
}
/* -----------------------------------------------------------------------------
@@ -378,14 +378,17 @@ evacuate_large(StgPtr p)
extra reads/writes than we save.
-------------------------------------------------------------------------- */
-REGPARM1 StgClosure *
-evacuate(StgClosure *q)
+REGPARM1 void
+evacuate(StgClosure **p)
{
bdescr *bd = NULL;
step *stp;
+ StgClosure *q;
const StgInfoTable *info;
StgWord tag;
+ q = *p;
+
loop:
/* The tag and the pointer are split, to be merged after evacing */
tag = GET_CLOSURE_TAG(q);
@@ -395,7 +398,7 @@ loop:
if (!HEAP_ALLOCED(q)) {
- if (!major_gc) return TAG_CLOSURE(tag,q);
+ if (!major_gc) return;
info = get_itbl(q);
switch (info->type) {
@@ -410,7 +413,7 @@ loop:
}
RELEASE_SPIN_LOCK(&static_objects_sync);
}
- return q;
+ return;
case FUN_STATIC:
if (info->srt_bitmap != 0 &&
@@ -422,7 +425,7 @@ loop:
}
RELEASE_SPIN_LOCK(&static_objects_sync);
}
- return q;
+ return;
case IND_STATIC:
/* If q->saved_info != NULL, then it's a revertible CAF - it'll be
@@ -437,7 +440,7 @@ loop:
}
RELEASE_SPIN_LOCK(&static_objects_sync);
}
- return q;
+ return;
case CONSTR_STATIC:
if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
@@ -451,13 +454,13 @@ loop:
/* I am assuming that static_objects pointers are not
* written to other objects, and thus, no need to retag. */
}
- return TAG_CLOSURE(tag,q);
+ return;
case CONSTR_NOCAF_STATIC:
/* no need to put these on the static linked list, they don't need
* to be scavenged.
*/
- return TAG_CLOSURE(tag,q);
+ return;
default:
barf("evacuate(static): strange closure type %d", (int)(info->type));
@@ -477,7 +480,7 @@ loop:
gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
- return TAG_CLOSURE(tag,q);
+ return;
}
if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
@@ -492,7 +495,7 @@ loop:
gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
- return TAG_CLOSURE(tag,q);
+ return;
}
/* evacuate large objects by re-linking them onto a different list.
@@ -502,10 +505,11 @@ loop:
if (info->type == TSO &&
((StgTSO *)q)->what_next == ThreadRelocated) {
q = (StgClosure *)((StgTSO *)q)->link;
+ *p = q;
goto loop;
}
evacuate_large((P_)q);
- return TAG_CLOSURE(tag,q);
+ return;
}
/* If the object is in a step that we're compacting, then we
@@ -520,7 +524,7 @@ loop:
}
push_mark_stack((P_)q);
}
- return TAG_CLOSURE(tag,q);
+ return;
}
}
@@ -537,7 +541,8 @@ loop:
case MUT_VAR_DIRTY:
case MVAR_CLEAN:
case MVAR_DIRTY:
- return copy(q,sizeW_fromITBL(info),stp);
+ copy(p,q,sizeW_fromITBL(info),stp);
+ return;
case CONSTR_0_1:
{
@@ -545,28 +550,32 @@ loop:
if (q->header.info == Czh_con_info &&
// unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
(StgChar)w <= MAX_CHARLIKE) {
- return TAG_CLOSURE(tag,
- (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
- );
+ *p = TAG_CLOSURE(tag,
+ (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
+ );
}
if (q->header.info == Izh_con_info &&
(StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
- return TAG_CLOSURE(tag,
+ *p = TAG_CLOSURE(tag,
(StgClosure *)INTLIKE_CLOSURE((StgInt)w)
);
}
- // else
- return copy_noscav_tag(q,sizeofW(StgHeader)+1,stp,tag);
+ else {
+ copy_noscav_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
+ }
+ return;
}
case FUN_0_1:
case FUN_1_0:
case CONSTR_1_0:
- return copy_tag(q,sizeofW(StgHeader)+1,stp,tag);
+ copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
+ return;
case THUNK_1_0:
case THUNK_0_1:
- return copy(q,sizeofW(StgThunk)+1,stp);
+ copy(p,q,sizeofW(StgThunk)+1,stp);
+ return;
case THUNK_1_1:
case THUNK_2_0:
@@ -578,20 +587,24 @@ loop:
stp = bd->step;
}
#endif
- return copy(q,sizeofW(StgThunk)+2,stp);
+ copy(p,q,sizeofW(StgThunk)+2,stp);
+ return;
case FUN_1_1:
case FUN_2_0:
case FUN_0_2:
case CONSTR_1_1:
case CONSTR_2_0:
- return copy_tag(q,sizeofW(StgHeader)+2,stp,tag);
+ copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
+ return;
case CONSTR_0_2:
- return copy_noscav_tag(q,sizeofW(StgHeader)+2,stp,tag);
+ copy_noscav_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
+ return;
case THUNK:
- return copy(q,thunk_sizeW_fromITBL(info),stp);
+ copy(p,q,thunk_sizeW_fromITBL(info),stp);
+ return;
case FUN:
case IND_PERM:
@@ -599,24 +612,29 @@ loop:
case WEAK:
case STABLE_NAME:
case CONSTR:
- return copy_tag(q,sizeW_fromITBL(info),stp,tag);
+ copy_tag(p,q,sizeW_fromITBL(info),stp,tag);
+ return;
case BCO:
- return copy(q,bco_sizeW((StgBCO *)q),stp);
+ copy(p,q,bco_sizeW((StgBCO *)q),stp);
+ return;
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
+ copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
+ return;
case THUNK_SELECTOR:
- return eval_thunk_selector((StgSelector *)q, rtsTrue);
+ eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
+ return;
case IND:
case IND_OLDGEN:
// follow chains of indirections, don't evacuate them
q = ((StgInd*)q)->indirectee;
+ *p = q;
goto loop;
case RET_BCO:
@@ -633,13 +651,16 @@ loop:
barf("evacuate: stack frame at %p\n", q);
case PAP:
- return copy(q,pap_sizeW((StgPAP*)q),stp);
+ copy(p,q,pap_sizeW((StgPAP*)q),stp);
+ return;
case AP:
- return copy(q,ap_sizeW((StgAP*)q),stp);
+ copy(p,q,ap_sizeW((StgAP*)q),stp);
+ return;
case AP_STACK:
- return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
+ copy(p,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
+ return;
case EVACUATED:
/* Already evacuated, just return the forwarding address.
@@ -658,25 +679,30 @@ loop:
* current object would be evacuated to, so we only do the full
* check if stp is too low.
*/
- if (gct->evac_gen > 0 && stp->gen_no < gct->evac_gen) { // optimisation
- StgClosure *p = ((StgEvacuated*)q)->evacuee;
- if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < gct->evac_gen) {
- gct->failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
+ {
+ StgClosure *e = ((StgEvacuated*)q)->evacuee;
+ *p = e;
+ if (gct->evac_gen > 0 && stp->gen_no < gct->evac_gen) { // optimisation
+ if (HEAP_ALLOCED(e) && Bdescr((P_)e)->gen_no < gct->evac_gen) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
}
- }
- return ((StgEvacuated*)q)->evacuee;
+ return;
+ }
case ARR_WORDS:
// just copy the block
- return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
+ copy_noscav(p,q,arr_words_sizeW((StgArrWords *)q),stp);
+ return;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
// just copy the block
- return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+ copy(p,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+ return;
case TSO:
{
@@ -686,6 +712,7 @@ loop:
*/
if (tso->what_next == ThreadRelocated) {
q = (StgClosure *)tso->link;
+ *p = q;
goto loop;
}
@@ -694,38 +721,42 @@ loop:
*/
{
StgTSO *new_tso;
- StgPtr p, q;
+ StgPtr r, s;
- new_tso = (StgTSO *)copyPart((StgClosure *)tso,
- tso_sizeW(tso),
- sizeofW(StgTSO), stp);
+ copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
+ new_tso = (StgTSO *)*p;
move_TSO(tso, new_tso);
- for (p = tso->sp, q = new_tso->sp;
- p < tso->stack+tso->stack_size;) {
- *q++ = *p++;
+ for (r = tso->sp, s = new_tso->sp;
+ r < tso->stack+tso->stack_size;) {
+ *s++ = *r++;
}
-
- return (StgClosure *)new_tso;
+ return;
}
}
case TREC_HEADER:
- return copy(q,sizeofW(StgTRecHeader),stp);
+ copy(p,q,sizeofW(StgTRecHeader),stp);
+ return;
case TVAR_WATCH_QUEUE:
- return copy(q,sizeofW(StgTVarWatchQueue),stp);
+ copy(p,q,sizeofW(StgTVarWatchQueue),stp);
+ return;
case TVAR:
- return copy(q,sizeofW(StgTVar),stp);
+ copy(p,q,sizeofW(StgTVar),stp);
+ return;
case TREC_CHUNK:
- return copy(q,sizeofW(StgTRecChunk),stp);
+ copy(p,q,sizeofW(StgTRecChunk),stp);
+ return;
case ATOMIC_INVARIANT:
- return copy(q,sizeofW(StgAtomicInvariant),stp);
+ copy(p,q,sizeofW(StgAtomicInvariant),stp);
+ return;
case INVARIANT_CHECK_QUEUE:
- return copy(q,sizeofW(StgInvariantCheckQueue),stp);
+ copy(p,q,sizeofW(StgInvariantCheckQueue),stp);
+ return;
default:
barf("evacuate: strange closure type %d", (int)(info->type));
@@ -774,8 +805,9 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val)
evacuated.
-------------------------------------------------------------------------- */
-static StgClosure *
-eval_thunk_selector (StgSelector * p, rtsBool evac)
+static void
+eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
+ // NB. for legacy reasons, p & q are swapped around :(
{
nat field;
StgInfoTable *info;
@@ -810,7 +842,8 @@ selector_chain:
// mutable list.
if ((bd->gen_no > N) || (bd->flags & BF_EVACUATED)) {
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
- return (StgClosure *)p;
+ *q = (StgClosure *)p;
+ return;
}
// we don't update THUNK_SELECTORS in the compacted
// generation, because compaction does not remove the INDs
@@ -820,9 +853,10 @@ selector_chain:
// around here, test by compiling stage 3 with +RTS -c -RTS.
if (bd->flags & BF_COMPACTED) {
// must call evacuate() to mark this closure if evac==rtsTrue
- if (evac) p = (StgSelector *)evacuate((StgClosure *)p);
+ *q = (StgClosure *)p;
+ if (evac) evacuate(q);
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
- return (StgClosure *)p;
+ return;
}
}
@@ -889,12 +923,14 @@ selector_loop:
((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
prev_thunk_selector = p;
- if (evac) val = evacuate(val);
+ *q = val;
+ if (evac) evacuate(q);
+ val = *q;
// evacuate() cannot recurse through
// eval_thunk_selector(), because we know val is not
// a THUNK_SELECTOR.
unchain_thunk_selectors(prev_thunk_selector, val);
- return val;
+ return;
}
}
@@ -927,7 +963,7 @@ selector_loop:
// rtsFalse says "don't evacuate the result". It will,
// however, update any THUNK_SELECTORs that are evaluated
// along the way.
- val = eval_thunk_selector((StgSelector *)selectee, rtsFalse);
+ eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
gct->thunk_selector_depth--;
// did we actually manage to evaluate it?
@@ -964,12 +1000,13 @@ bale_out:
// pointer. But don't forget: we still need to evacuate the thunk itself.
SET_INFO(p, info_ptr);
if (evac) {
- val = copy((StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
+ copy(&val,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
} else {
val = (StgClosure *)p;
}
+ *q = val;
unchain_thunk_selectors(prev_thunk_selector, val);
- return val;
+ return;
}
/* -----------------------------------------------------------------------------
diff --git a/rts/sm/Evac.h b/rts/sm/Evac.h
index beaba755c1..c73008fb1b 100644
--- a/rts/sm/Evac.h
+++ b/rts/sm/Evac.h
@@ -27,6 +27,6 @@
#define REGPARM1
#endif
-REGPARM1 StgClosure * evacuate (StgClosure *q);
+REGPARM1 void evacuate (StgClosure **p);
extern lnat thunk_selector_depth;
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index eed2da7020..d064f1d656 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -183,9 +183,12 @@ GarbageCollect ( rtsBool force_major_gc )
lnat live, allocated;
lnat oldgen_saved_blocks = 0;
nat n_threads; // number of threads participating in GC
-
+ gc_thread *saved_gct;
nat g, s, t;
+ // necessary if we stole a callee-saves register for gct:
+ saved_gct = gct;
+
#ifdef PROFILING
CostCentreStack *prev_CCS;
#endif
@@ -679,6 +682,8 @@ GarbageCollect ( rtsBool force_major_gc )
#endif
RELEASE_SM_LOCK;
+
+ gct = saved_gct;
}
/* ---------------------------------------------------------------------------
@@ -1295,7 +1300,7 @@ init_gc_thread (gc_thread *t)
static void
mark_root(StgClosure **root)
{
- *root = evacuate(*root);
+ evacuate(root);
}
/* -----------------------------------------------------------------------------
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index bfa78e5836..62ac8e0397 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -137,8 +137,8 @@ traverseWeakPtrList(void)
if (new != NULL) {
w->key = new;
// evacuate the value and finalizer
- w->value = evacuate(w->value);
- w->finalizer = evacuate(w->finalizer);
+ evacuate(&w->value);
+ evacuate(&w->finalizer);
// remove this weak ptr from the old_weak_ptr list
*last_w = w->link;
// and put it on the new weak ptr list
@@ -169,7 +169,7 @@ traverseWeakPtrList(void)
*/
if (flag == rtsFalse) {
for (w = old_weak_ptr_list; w; w = w->link) {
- w->finalizer = evacuate(w->finalizer);
+ evacuate(&w->finalizer);
}
// Next, move to the WeakThreads stage after fully
@@ -241,7 +241,8 @@ traverseWeakPtrList(void)
StgTSO *t, *tmp, *next;
for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
next = t->global_link;
- tmp = (StgTSO *)evacuate((StgClosure *)t);
+ tmp = t;
+ evacuate((StgClosure **)&tmp);
tmp->global_link = resurrected_threads;
resurrected_threads = tmp;
}
@@ -301,7 +302,8 @@ traverseBlackholeQueue (void)
continue;
}
}
- t = (StgTSO *)evacuate((StgClosure *)t);
+ tmp = t;
+ evacuate((StgClosure **)&tmp);
if (prev) prev->link = t;
flag = rtsTrue;
}
@@ -324,14 +326,15 @@ traverseBlackholeQueue (void)
void
markWeakPtrList ( void )
{
- StgWeak *w, **last_w;
+ StgWeak *w, **last_w, *tmp;
last_w = &weak_ptr_list;
for (w = weak_ptr_list; w; w = w->link) {
// w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
ASSERT(w->header.info == &stg_DEAD_WEAK_info
|| get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
- w = (StgWeak *)evacuate((StgClosure *)w);
+ tmp = w;
+ evacuate((StgClosure **)&tmp);
*last_w = w;
last_w = &(w->link);
}
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 8d7e5825ee..080c75014e 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -50,7 +50,7 @@ scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
p = (StgClosure **)large_srt->srt;
for (i = 0; i < size; ) {
if ((bitmap & 1) != 0) {
- evacuate(*p);
+ evacuate(p);
}
i++;
p++;
@@ -93,12 +93,12 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap)
// If the SRT entry hasn't got bit 0 set, the SRT entry points to a
// closure that's fixed at link-time, and no extra magic is required.
if ( (unsigned long)(*srt) & 0x1 ) {
- evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
+ evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
} else {
- evacuate(*p);
+ evacuate(p);
}
#else
- evacuate(*p);
+ evacuate(p);
#endif
}
p++;
@@ -140,20 +140,19 @@ scavengeTSO (StgTSO *tso)
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnException
) {
- tso->block_info.closure = evacuate(tso->block_info.closure);
+ evacuate(&tso->block_info.closure);
}
- tso->blocked_exceptions =
- (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
+ evacuate((StgClosure **)&tso->blocked_exceptions);
// We don't always chase the link field: TSOs on the blackhole
// queue are not automatically alive, so the link field is a
// "weak" pointer in that case.
if (tso->why_blocked != BlockedOnBlackHole) {
- tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
+ evacuate((StgClosure **)&tso->link);
}
// scavange current transaction record
- tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
+ evacuate((StgClosure **)&tso->trec);
// scavenge this thread's stack
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
@@ -188,7 +187,7 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
small_bitmap:
while (size > 0) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p++;
bitmap = bitmap >> 1;
@@ -227,7 +226,7 @@ scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
small_bitmap:
while (size > 0) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p++;
bitmap = bitmap >> 1;
@@ -241,14 +240,14 @@ scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
STATIC_INLINE StgPtr
scavenge_PAP (StgPAP *pap)
{
- pap->fun = evacuate(pap->fun);
+ evacuate(&pap->fun);
return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
}
STATIC_INLINE StgPtr
scavenge_AP (StgAP *ap)
{
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
}
@@ -300,9 +299,9 @@ scavenge_block (bdescr *bd, StgPtr scan)
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
+ evacuate((StgClosure **)&mvar->head);
+ evacuate((StgClosure **)&mvar->tail);
+ evacuate((StgClosure **)&mvar->value);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -316,34 +315,34 @@ scavenge_block (bdescr *bd, StgPtr scan)
case FUN_2_0:
scavenge_fun_srt(info);
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 2;
break;
case THUNK_2_0:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[1]);
+ evacuate(&((StgThunk *)p)->payload[0]);
p += sizeofW(StgThunk) + 2;
break;
case CONSTR_2_0:
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 2;
break;
case THUNK_1_0:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[0]);
p += sizeofW(StgThunk) + 1;
break;
case FUN_1_0:
scavenge_fun_srt(info);
case CONSTR_1_0:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 1;
break;
@@ -371,14 +370,14 @@ scavenge_block (bdescr *bd, StgPtr scan)
case THUNK_1_1:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[0]);
p += sizeofW(StgThunk) + 2;
break;
case FUN_1_1:
scavenge_fun_srt(info);
case CONSTR_1_1:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 2;
break;
@@ -393,7 +392,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
scavenge_thunk_srt(info);
end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p += info->layout.payload.nptrs;
break;
@@ -408,7 +407,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p += info->layout.payload.nptrs;
break;
@@ -416,9 +415,9 @@ scavenge_block (bdescr *bd, StgPtr scan)
case BCO: {
StgBCO *bco = (StgBCO *)p;
- bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
- bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
- bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
+ evacuate((StgClosure **)&bco->instrs);
+ evacuate((StgClosure **)&bco->literals);
+ evacuate((StgClosure **)&bco->ptrs);
p += bco_sizeW(bco);
break;
}
@@ -441,7 +440,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
}
// fall through
case IND_OLDGEN_PERM:
- ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
+ evacuate(&((StgInd *)p)->indirectee);
p += sizeofW(StgInd);
break;
@@ -450,7 +449,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
rtsBool saved_eager_promotion = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -472,7 +471,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
+ evacuate(&s->selectee);
p += THUNK_SELECTOR_sizeW();
break;
}
@@ -482,7 +481,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
p = (StgPtr)ap->payload + ap->size;
break;
@@ -516,7 +515,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
gct->eager_promotion = saved_eager;
@@ -538,7 +537,7 @@ scavenge_block (bdescr *bd, StgPtr scan)
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then
@@ -575,9 +574,9 @@ scavenge_block (bdescr *bd, StgPtr scan)
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
gct->evac_gen = 0;
- wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
- wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
- wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
+ evacuate((StgClosure **)&wq->closure);
+ evacuate((StgClosure **)&wq->next_queue_entry);
+ evacuate((StgClosure **)&wq->prev_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVarWatchQueue);
@@ -588,8 +587,8 @@ scavenge_block (bdescr *bd, StgPtr scan)
{
StgTVar *tvar = ((StgTVar *) p);
gct->evac_gen = 0;
- tvar->current_value = evacuate((StgClosure*)tvar->current_value);
- tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
+ evacuate((StgClosure **)&tvar->current_value);
+ evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVar);
@@ -600,9 +599,9 @@ scavenge_block (bdescr *bd, StgPtr scan)
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
gct->evac_gen = 0;
- trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
- trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
- trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
+ evacuate((StgClosure **)&trec->enclosing_trec);
+ evacuate((StgClosure **)&trec->current_chunk);
+ evacuate((StgClosure **)&trec->invariants_to_check);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecHeader);
@@ -615,11 +614,11 @@ scavenge_block (bdescr *bd, StgPtr scan)
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
gct->evac_gen = 0;
- tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
+ evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
- e->expected_value = evacuate((StgClosure*)e->expected_value);
- e->new_value = evacuate((StgClosure*)e->new_value);
+ evacuate((StgClosure **)&e->tvar);
+ evacuate((StgClosure **)&e->expected_value);
+ evacuate((StgClosure **)&e->new_value);
}
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
@@ -631,8 +630,8 @@ scavenge_block (bdescr *bd, StgPtr scan)
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
gct->evac_gen = 0;
- invariant->code = (StgClosure *)evacuate(invariant->code);
- invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
+ evacuate(&invariant->code);
+ evacuate((StgClosure **)&invariant->last_execution);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgAtomicInvariant);
@@ -643,9 +642,9 @@ scavenge_block (bdescr *bd, StgPtr scan)
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
gct->evac_gen = 0;
- queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
- queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
- queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
+ evacuate((StgClosure **)&queue->invariant);
+ evacuate((StgClosure **)&queue->my_execution);
+ evacuate((StgClosure **)&queue->next_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgInvariantCheckQueue);
@@ -710,9 +709,9 @@ linear_scan:
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
+ evacuate((StgClosure **)&mvar->head);
+ evacuate((StgClosure **)&mvar->tail);
+ evacuate((StgClosure **)&mvar->value);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -725,36 +724,36 @@ linear_scan:
case FUN_2_0:
scavenge_fun_srt(info);
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case THUNK_2_0:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[1]);
+ evacuate(&((StgThunk *)p)->payload[0]);
break;
case CONSTR_2_0:
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case FUN_1_0:
case FUN_1_1:
scavenge_fun_srt(info);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case THUNK_1_0:
case THUNK_1_1:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[0]);
break;
case CONSTR_1_0:
case CONSTR_1_1:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case FUN_0_1:
@@ -782,7 +781,7 @@ linear_scan:
scavenge_thunk_srt(info);
end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
break;
}
@@ -796,16 +795,16 @@ linear_scan:
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
break;
}
case BCO: {
StgBCO *bco = (StgBCO *)p;
- bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
- bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
- bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
+ evacuate((StgClosure **)&bco->instrs);
+ evacuate((StgClosure **)&bco->literals);
+ evacuate((StgClosure **)&bco->ptrs);
break;
}
@@ -817,8 +816,7 @@ linear_scan:
case IND_OLDGEN:
case IND_OLDGEN_PERM:
- ((StgInd *)p)->indirectee =
- evacuate(((StgInd *)p)->indirectee);
+ evacuate(&((StgInd *)p)->indirectee);
break;
case MUT_VAR_CLEAN:
@@ -826,7 +824,7 @@ linear_scan:
rtsBool saved_eager_promotion = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -847,7 +845,7 @@ linear_scan:
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
+ evacuate(&s->selectee);
break;
}
@@ -856,7 +854,7 @@ linear_scan:
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
break;
}
@@ -884,7 +882,7 @@ linear_scan:
gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
gct->eager_promotion = saved_eager;
@@ -906,7 +904,7 @@ linear_scan:
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then
@@ -942,9 +940,9 @@ linear_scan:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
gct->evac_gen = 0;
- wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
- wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
- wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
+ evacuate((StgClosure **)&wq->closure);
+ evacuate((StgClosure **)&wq->next_queue_entry);
+ evacuate((StgClosure **)&wq->prev_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -954,8 +952,8 @@ linear_scan:
{
StgTVar *tvar = ((StgTVar *) p);
gct->evac_gen = 0;
- tvar->current_value = evacuate((StgClosure*)tvar->current_value);
- tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
+ evacuate((StgClosure **)&tvar->current_value);
+ evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -967,11 +965,11 @@ linear_scan:
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
gct->evac_gen = 0;
- tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
+ evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
- e->expected_value = evacuate((StgClosure*)e->expected_value);
- e->new_value = evacuate((StgClosure*)e->new_value);
+ evacuate((StgClosure **)&e->tvar);
+ evacuate((StgClosure **)&e->expected_value);
+ evacuate((StgClosure **)&e->new_value);
}
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
@@ -982,9 +980,9 @@ linear_scan:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
gct->evac_gen = 0;
- trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
- trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
- trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
+ evacuate((StgClosure **)&trec->enclosing_trec);
+ evacuate((StgClosure **)&trec->current_chunk);
+ evacuate((StgClosure **)&trec->invariants_to_check);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -994,8 +992,8 @@ linear_scan:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
gct->evac_gen = 0;
- invariant->code = (StgClosure *)evacuate(invariant->code);
- invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
+ evacuate(&invariant->code);
+ evacuate((StgClosure **)&invariant->last_execution);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -1005,9 +1003,9 @@ linear_scan:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
gct->evac_gen = 0;
- queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
- queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
- queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
+ evacuate((StgClosure **)&queue->invariant);
+ evacuate((StgClosure **)&queue->my_execution);
+ evacuate((StgClosure **)&queue->next_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -1096,9 +1094,9 @@ scavenge_one(StgPtr p)
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
+ evacuate((StgClosure **)&mvar->head);
+ evacuate((StgClosure **)&mvar->tail);
+ evacuate((StgClosure **)&mvar->value);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -1120,7 +1118,7 @@ scavenge_one(StgPtr p)
end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
+ evacuate((StgClosure **)q);
}
break;
}
@@ -1144,7 +1142,7 @@ scavenge_one(StgPtr p)
end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
+ evacuate((StgClosure **)q);
}
break;
}
@@ -1155,7 +1153,7 @@ scavenge_one(StgPtr p)
rtsBool saved_eager_promotion = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -1175,7 +1173,7 @@ scavenge_one(StgPtr p)
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
+ evacuate(&s->selectee);
break;
}
@@ -1183,7 +1181,7 @@ scavenge_one(StgPtr p)
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
p = (StgPtr)ap->payload + ap->size;
break;
@@ -1216,7 +1214,7 @@ scavenge_one(StgPtr p)
q = p;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
gct->eager_promotion = saved_eager;
@@ -1238,7 +1236,7 @@ scavenge_one(StgPtr p)
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then
@@ -1274,9 +1272,9 @@ scavenge_one(StgPtr p)
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
gct->evac_gen = 0;
- wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
- wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
- wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
+ evacuate((StgClosure **)&wq->closure);
+ evacuate((StgClosure **)&wq->next_queue_entry);
+ evacuate((StgClosure **)&wq->prev_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -1286,8 +1284,8 @@ scavenge_one(StgPtr p)
{
StgTVar *tvar = ((StgTVar *) p);
gct->evac_gen = 0;
- tvar->current_value = evacuate((StgClosure*)tvar->current_value);
- tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
+ evacuate((StgClosure **)&tvar->current_value);
+ evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -1297,9 +1295,9 @@ scavenge_one(StgPtr p)
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
gct->evac_gen = 0;
- trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
- trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
- trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
+ evacuate((StgClosure **)&trec->enclosing_trec);
+ evacuate((StgClosure **)&trec->current_chunk);
+ evacuate((StgClosure **)&trec->invariants_to_check);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -1311,11 +1309,11 @@ scavenge_one(StgPtr p)
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
gct->evac_gen = 0;
- tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
+ evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
- e->expected_value = evacuate((StgClosure*)e->expected_value);
- e->new_value = evacuate((StgClosure*)e->new_value);
+ evacuate((StgClosure **)&e->tvar);
+ evacuate((StgClosure **)&e->expected_value);
+ evacuate((StgClosure **)&e->new_value);
}
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
@@ -1326,8 +1324,8 @@ scavenge_one(StgPtr p)
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
gct->evac_gen = 0;
- invariant->code = (StgClosure *)evacuate(invariant->code);
- invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
+ evacuate(&invariant->code);
+ evacuate((StgClosure **)&invariant->last_execution);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -1337,9 +1335,9 @@ scavenge_one(StgPtr p)
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
gct->evac_gen = 0;
- queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
- queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
- queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
+ evacuate((StgClosure **)&queue->invariant);
+ evacuate((StgClosure **)&queue->my_execution);
+ evacuate((StgClosure **)&queue->next_queue_entry);
gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
@@ -1360,7 +1358,7 @@ scavenge_one(StgPtr p)
if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
break;
}
- ((StgInd *)p)->indirectee = evacuate(q);
+ evacuate(&((StgInd *)p)->indirectee);
}
#if 0 && defined(DEBUG)
@@ -1460,7 +1458,7 @@ scavenge_mutable_list(generation *gen)
// we don't want to have to mark a TSO dirty just
// because we put it on a different queue.
if (tso->why_blocked != BlockedOnBlackHole) {
- tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
+ evacuate((StgClosure **)&tso->link);
}
recordMutableGen_GC((StgClosure *)p,gen);
continue;
@@ -1540,7 +1538,7 @@ scavenge_static(void)
case IND_STATIC:
{
StgInd *ind = (StgInd *)p;
- ind->indirectee = evacuate(ind->indirectee);
+ evacuate(&ind->indirectee);
/* might fail to evacuate it, in which case we have to pop it
* back on the mutable list of the oldest generation. We
@@ -1569,7 +1567,7 @@ scavenge_static(void)
next = (P_)p->payload + info->layout.payload.ptrs;
// evacuate the pointers
for (q = (P_)p->payload; q < next; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
+ evacuate((StgClosure **)q);
}
break;
}
@@ -1596,7 +1594,7 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
bitmap = large_bitmap->bitmap[b];
for (i = 0; i < size; ) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
i++;
p++;
@@ -1614,7 +1612,7 @@ scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
{
while (size > 0) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p++;
bitmap = bitmap >> 1;
@@ -1679,8 +1677,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
((StgUpdateFrame *)p)->updatee->header.info =
(StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
}
- ((StgUpdateFrame *)p)->updatee
- = evacuate(((StgUpdateFrame *)p)->updatee);
+ evacuate(&((StgUpdateFrame *)p)->updatee);
p += sizeofW(StgUpdateFrame);
continue;
}
@@ -1709,7 +1706,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
nat size;
p++;
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
bco = (StgBCO *)*p;
p++;
size = BCO_BITMAP_SIZE(bco);
@@ -1751,7 +1748,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
// follow the ptr words
for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
p++;
}
continue;
@@ -1762,7 +1759,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
StgRetFun *ret_fun = (StgRetFun *)p;
StgFunInfoTable *fun_info;
- ret_fun->fun = evacuate(ret_fun->fun);
+ evacuate(&ret_fun->fun);
fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
p = scavenge_arg_block(fun_info, ret_fun->payload);
goto follow_srt;