summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorSimon Marlow <simonmarhaskell@gmail.com>2008-04-17 21:27:07 +0000
committerSimon Marlow <simonmarhaskell@gmail.com>2008-04-17 21:27:07 +0000
commit4e79709df545c16812b85f2c27ab3411f5a7b54f (patch)
tree518c23ecad5b34b5ad99d5ecd7b2fb2c52949c4f /rts
parenta4e09e8f27b81e915b128ef244c7b0d082bfb89a (diff)
downloadhaskell-4e79709df545c16812b85f2c27ab3411f5a7b54f.tar.gz
remove EVACUATED: store the forwarding pointer in the info pointer
Diffstat (limited to 'rts')
-rw-r--r--rts/LdvProfile.c25
-rw-r--r--rts/RetainerProfile.c3
-rw-r--r--rts/Sanity.c12
-rw-r--r--rts/StgMiscClosures.cmm8
-rw-r--r--rts/sm/Evac.c87
-rw-r--r--rts/sm/Evac.c-inc226
-rw-r--r--rts/sm/GCAux.c14
-rw-r--r--rts/sm/MarkWeak.c16
-rw-r--r--rts/sm/Scav.c34
-rw-r--r--rts/sm/Scav.c-inc18
10 files changed, 242 insertions, 201 deletions
diff --git a/rts/LdvProfile.c b/rts/LdvProfile.c
index 1e2ffc8c02..0cd80dee65 100644
--- a/rts/LdvProfile.c
+++ b/rts/LdvProfile.c
@@ -68,26 +68,27 @@ STATIC_INLINE nat
processHeapClosureForDead( StgClosure *c )
{
nat size;
- StgInfoTable *info;
+ const StgInfoTable *info;
info = get_itbl(c);
- if (info->type != EVACUATED) {
- ASSERT(((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) <= era &&
- ((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) > 0);
- ASSERT(((LDVW(c) & LDV_STATE_MASK) == LDV_STATE_CREATE) ||
- (
- (LDVW(c) & LDV_LAST_MASK) <= era &&
- (LDVW(c) & LDV_LAST_MASK) > 0
- ));
- }
-
- if (info->type == EVACUATED) {
+ info = c->header.info;
+ if (IS_FORWARDING_PTR(info)) {
// The size of the evacuated closure is currently stored in
// the LDV field. See SET_EVACUAEE_FOR_LDV() in
// includes/StgLdvProf.h.
return LDVW(c);
}
+ info = INFO_PTR_TO_STRUCT(info);
+
+ ASSERT(((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) <= era &&
+ ((LDVW(c) & LDV_CREATE_MASK) >> LDV_SHIFT) > 0);
+ ASSERT(((LDVW(c) & LDV_STATE_MASK) == LDV_STATE_CREATE) ||
+ (
+ (LDVW(c) & LDV_LAST_MASK) <= era &&
+ (LDVW(c) & LDV_LAST_MASK) > 0
+ ));
+
size = closure_sizeW(c);
diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c
index b71b620d96..4850b94d21 100644
--- a/rts/RetainerProfile.c
+++ b/rts/RetainerProfile.c
@@ -626,7 +626,6 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
case FETCH_ME_BQ:
case RBH:
case REMOTE_REF:
- case EVACUATED:
case INVALID_OBJECT:
default:
barf("Invalid object *c in push()");
@@ -992,7 +991,6 @@ pop( StgClosure **c, StgClosure **cp, retainer *r )
case FETCH_ME_BQ:
case RBH:
case REMOTE_REF:
- case EVACUATED:
case INVALID_OBJECT:
default:
barf("Invalid object *c in pop()");
@@ -1157,7 +1155,6 @@ isRetainer( StgClosure *c )
case FETCH_ME_BQ:
case RBH:
case REMOTE_REF:
- case EVACUATED:
case INVALID_OBJECT:
default:
barf("Invalid object in isRetainer(): %d", get_itbl(c)->type);
diff --git a/rts/Sanity.c b/rts/Sanity.c
index b8bf5d4183..3df5aef413 100644
--- a/rts/Sanity.c
+++ b/rts/Sanity.c
@@ -257,7 +257,13 @@ checkClosure( StgClosure* p )
ASSERT(!closure_STATIC(p));
}
- info = get_itbl(p);
+ info = p->header.info;
+
+ if (IS_FORWARDING_PTR(info)) {
+ barf("checkClosure: found EVACUATED closure %d", info->type);
+ }
+ info = INFO_PTR_TO_STRUCT(info);
+
switch (info->type) {
case MVAR_CLEAN:
@@ -506,10 +512,6 @@ checkClosure( StgClosure* p )
return sizeofW(StgTRecHeader);
}
-
- case EVACUATED:
- barf("checkClosure: found EVACUATED closure %d",
- info->type);
default:
barf("checkClosure (closure type %d)", info->type);
}
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index 6a8f773586..26c8093a5f 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -409,14 +409,6 @@ INFO_TABLE(stg_TSO, 0,0,TSO, "TSO", "TSO")
{ foreign "C" barf("TSO object entered!") never returns; }
/* ----------------------------------------------------------------------------
- Evacuees are left behind by the garbage collector. Any attempt to enter
- one is a real bug.
- ------------------------------------------------------------------------- */
-
-INFO_TABLE(stg_EVACUATED,1,0,EVACUATED,"EVACUATED","EVACUATED")
-{ foreign "C" barf("EVACUATED object entered!") never returns; }
-
-/* ----------------------------------------------------------------------------
Weak pointers
Live weak pointers have a special closure type. Dead ones are just
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 1ccc8e2da4..fd36cb03c8 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -76,11 +76,13 @@ alloc_for_copy (nat size, step *stp)
The evacuate() code
-------------------------------------------------------------------------- */
-#define PARALLEL_GC
+#undef PARALLEL_GC
#include "Evac.c-inc"
-#undef PARALLEL_GC
+#ifdef THREADED_RTS
+#define PARALLEL_GC
#include "Evac.c-inc"
+#endif
/* -----------------------------------------------------------------------------
Evacuate a large object
@@ -261,9 +263,10 @@ selector_chain:
} while (info_ptr == (W_)&stg_WHITEHOLE_info);
// make sure someone else didn't get here first...
- if (INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
+ if (IS_FORWARDING_PTR(p) ||
+ INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
// v. tricky now. The THUNK_SELECTOR has been evacuated
- // by another thread, and is now either EVACUATED or IND.
+ // by another thread, and is now either a forwarding ptr or IND.
// We need to extract ourselves from the current situation
// as cleanly as possible.
// - unlock the closure
@@ -298,7 +301,16 @@ selector_loop:
// from-space during marking, for example. We rely on the property
// that evacuate() doesn't mind if it gets passed a to-space pointer.
- info = get_itbl(selectee);
+ info = (StgInfoTable*)selectee->header.info;
+
+ if (IS_FORWARDING_PTR(info)) {
+ // We don't follow pointers into to-space; the constructor
+ // has already been evacuated, so we won't save any space
+ // leaks by evaluating this selector thunk anyhow.
+ goto bale_out;
+ }
+
+ info = INFO_PTR_TO_STRUCT(info);
switch (info->type) {
case WHITEHOLE:
goto bale_out; // about to be evacuated by another thread (or a loop).
@@ -333,33 +345,38 @@ selector_loop:
// evaluating until we find the real value, and then
// update the whole chain to point to the value.
val_loop:
- info = get_itbl(UNTAG_CLOSURE(val));
- switch (info->type) {
- case IND:
- case IND_PERM:
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
- case IND_STATIC:
- val = ((StgInd *)val)->indirectee;
- goto val_loop;
- case THUNK_SELECTOR:
- ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
- prev_thunk_selector = p;
- p = (StgSelector*)val;
- goto selector_chain;
- default:
- ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
- prev_thunk_selector = p;
-
- *q = val;
- if (evac) evacuate(q);
- val = *q;
- // evacuate() cannot recurse through
- // eval_thunk_selector(), because we know val is not
- // a THUNK_SELECTOR.
- unchain_thunk_selectors(prev_thunk_selector, val);
- return;
+ info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
+ if (!IS_FORWARDING_PTR(info_ptr))
+ {
+ info = INFO_PTR_TO_STRUCT(info_ptr);
+ switch (info->type) {
+ case IND:
+ case IND_PERM:
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ case IND_STATIC:
+ val = ((StgInd *)val)->indirectee;
+ goto val_loop;
+ case THUNK_SELECTOR:
+ ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
+ prev_thunk_selector = p;
+ p = (StgSelector*)val;
+ goto selector_chain;
+ default:
+ break;
+ }
}
+ ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
+ prev_thunk_selector = p;
+
+ *q = val;
+ if (evac) evacuate(q);
+ val = *q;
+ // evacuate() cannot recurse through
+ // eval_thunk_selector(), because we know val is not
+ // a THUNK_SELECTOR.
+ unchain_thunk_selectors(prev_thunk_selector, val);
+ return;
}
case IND:
@@ -371,12 +388,6 @@ selector_loop:
selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
goto selector_loop;
- case EVACUATED:
- // We don't follow pointers into to-space; the constructor
- // has already been evacuated, so we won't save any space
- // leaks by evaluating this selector thunk anyhow.
- goto bale_out;
-
case THUNK_SELECTOR:
{
StgClosure *val;
@@ -432,7 +443,7 @@ bale_out:
// check whether it was updated in the meantime.
*q = (StgClosure *)p;
if (evac) {
- copy(q,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
+ copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
}
unchain_thunk_selectors(prev_thunk_selector, *q);
return;
diff --git a/rts/sm/Evac.c-inc b/rts/sm/Evac.c-inc
index eabdcdcc1d..7a657ca226 100644
--- a/rts/sm/Evac.c-inc
+++ b/rts/sm/Evac.c-inc
@@ -10,55 +10,77 @@
// non-minor, parallel, GC. This file contains the code for both,
// controllled by the CPP symbol MINOR_GC.
-#ifndef PARALLEL_GC
-#define copy(a,b,c,d) copy1(a,b,c,d)
-#define copy_tag(a,b,c,d,e) copy_tag1(a,b,c,d,e)
-#define copyPart(a,b,c,d,e) copyPart1(a,b,c,d,e)
-#define evacuate(a) evacuate1(a)
+#if defined(THREADED_RTS)
+# if !defined(PARALLEL_GC)
+# define copy(a,b,c,d,e) copy1(a,b,c,d,e)
+# define copy_tag(a,b,c,d,e,f) copy_tag1(a,b,c,d,e,f)
+# define copy_tag_nolock(a,b,c,d,e,f) copy_tag1(a,b,c,d,e,f)
+# define copyPart(a,b,c,d,e) copyPart1(a,b,c,d,e)
+# define evacuate(a) evacuate1(a)
+# endif
#else
-#undef copy
-#undef copy_tag
-#undef copyPart
-#undef evacuate
+# define copy_tag_nolock(a,b,c,d,e,f) copy_tag(a,b,c,d,e,f)
#endif
STATIC_INLINE void
-copy_tag(StgClosure **p, StgClosure *src, nat size, step *stp, StgWord tag)
+copy_tag(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, step *stp, StgWord tag)
{
- StgPtr to, tagged_to, from;
+ StgPtr to, from;
nat i;
- StgWord info;
-#if defined(PARALLEL_GC) && defined(THREADED_RTS)
-spin:
- info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
- // so.. what is it?
- if (info == (W_)&stg_WHITEHOLE_info) {
-#ifdef PROF_SPIN
- whitehole_spin++;
-#endif
- goto spin;
+ to = alloc_for_copy(size,stp);
+
+ TICK_GC_WORDS_COPIED(size);
+
+ from = (StgPtr)src;
+ to[0] = (W_)info;
+ for (i = 1; i < size; i++) { // unroll for small i
+ to[i] = from[i];
}
- if (info == (W_)&stg_EVACUATED_info || info == (W_)&stg_IND_info) {
- // NB. a closure might be updated with an IND by
- // unchain_selector_thunks(), hence the test above.
- src->header.info = (const StgInfoTable *)info;
- return evacuate(p); // does the failed_to_evac stuff
+
+// if (to+size+2 < bd->start + BLOCK_SIZE_W) {
+// __builtin_prefetch(to + size + 2, 1);
+// }
+
+#if defined(PARALLEL_GC)
+ {
+ const StgInfoTable *new_info;
+ new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
+ if (new_info != info) {
+ return evacuate(p); // does the failed_to_evac stuff
+ } else {
+ *p = TAG_CLOSURE(tag,(StgClosure*)to);
+ }
}
#else
- ASSERT(n_gc_threads == 1);
- info = (W_)src->header.info;
- src->header.info = &stg_EVACUATED_info;
+ src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
+ *p = TAG_CLOSURE(tag,(StgClosure*)to);
+#endif
+
+#ifdef PROFILING
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(from, size);
#endif
+}
+
+#if defined(PARALLEL_GC)
+STATIC_INLINE void
+copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, step *stp, StgWord tag)
+{
+ StgPtr to, from;
+ nat i;
to = alloc_for_copy(size,stp);
- tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
- *p = (StgClosure *)tagged_to;
+ *p = TAG_CLOSURE(tag,(StgClosure*)to);
+ src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
TICK_GC_WORDS_COPIED(size);
from = (StgPtr)src;
- to[0] = info;
+ to[0] = (W_)info;
for (i = 1; i < size; i++) { // unroll for small i
to[i] = from[i];
}
@@ -67,19 +89,13 @@ spin:
// __builtin_prefetch(to + size + 2, 1);
// }
- ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
-#if defined(PARALLEL_GC) && defined(THREADED_RTS)
- write_barrier();
- ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
-#endif
-
#ifdef PROFILING
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
SET_EVACUAEE_FOR_LDV(from, size);
#endif
}
-
+#endif
/* Special version of copy() for when we only want to copy the info
* pointer of an object, but reserve some padding after it. This is
@@ -92,7 +108,7 @@ copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy,
nat i;
StgWord info;
-#if defined(PARALLEL_GC) && defined(THREADED_RTS)
+#if defined(PARALLEL_GC)
spin:
info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
if (info == (W_)&stg_WHITEHOLE_info) {
@@ -101,14 +117,13 @@ spin:
#endif
goto spin;
}
- if (info == (W_)&stg_EVACUATED_info) {
+ if (IS_FORWARDING_PTR(info)) {
src->header.info = (const StgInfoTable *)info;
evacuate(p); // does the failed_to_evac stuff
return ;
}
#else
info = (W_)src->header.info;
- src->header.info = &stg_EVACUATED_info;
#endif
to = alloc_for_copy(size_to_reserve, stp);
@@ -122,11 +137,10 @@ spin:
to[i] = from[i];
}
- ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
-#if defined(PARALLEL_GC) && defined(THREADED_RTS)
+#if defined(PARALLEL_GC)
write_barrier();
- ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
#endif
+ src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
#ifdef PROFILING
// We store the size of the just evacuated object in the LDV word so that
@@ -141,9 +155,10 @@ spin:
/* Copy wrappers that don't tag the closure after copying */
STATIC_INLINE void
-copy(StgClosure **p, StgClosure *src, nat size, step *stp)
+copy(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, step *stp)
{
- copy_tag(p,src,size,stp,0);
+ copy_tag(p,info,src,size,stp,0);
}
/* ----------------------------------------------------------------------------
@@ -356,9 +371,37 @@ loop:
stp = bd->step->to;
- info = get_itbl(q);
-
- switch (info->type) {
+ info = q->header.info;
+ if (IS_FORWARDING_PTR(info))
+ {
+ /* Already evacuated, just return the forwarding address.
+ * HOWEVER: if the requested destination generation (gct->evac_step) is
+ * older than the actual generation (because the object was
+ * already evacuated to a younger generation) then we have to
+ * set the gct->failed_to_evac flag to indicate that we couldn't
+ * manage to promote the object to the desired generation.
+ */
+ /*
+ * Optimisation: the check is fairly expensive, but we can often
+ * shortcut it if either the required generation is 0, or the
+ * current object (the EVACUATED) is in a high enough generation.
+ * We know that an EVACUATED always points to an object in the
+ * same or an older generation. stp is the lowest step that the
+ * current object would be evacuated to, so we only do the full
+ * check if stp is too low.
+ */
+ StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
+ *p = TAG_CLOSURE(tag,e);
+ if (stp < gct->evac_step) { // optimisation
+ if (Bdescr((P_)e)->step < gct->evac_step) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
+ }
+ return;
+ }
+
+ switch (INFO_PTR_TO_STRUCT(info)->type) {
case WHITEHOLE:
goto loop;
@@ -367,27 +410,27 @@ loop:
case MUT_VAR_DIRTY:
case MVAR_CLEAN:
case MVAR_DIRTY:
- copy(p,q,sizeW_fromITBL(info),stp);
+ copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
return;
case CONSTR_0_1:
{
StgWord w = (StgWord)q->payload[0];
- if (q->header.info == Czh_con_info &&
+ if (info == Czh_con_info &&
// unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
(StgChar)w <= MAX_CHARLIKE) {
*p = TAG_CLOSURE(tag,
(StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
);
}
- else if (q->header.info == Izh_con_info &&
+ else if (info == Izh_con_info &&
(StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
*p = TAG_CLOSURE(tag,
(StgClosure *)INTLIKE_CLOSURE((StgInt)w)
);
}
else {
- copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
}
return;
}
@@ -395,12 +438,12 @@ loop:
case FUN_0_1:
case FUN_1_0:
case CONSTR_1_0:
- copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
return;
case THUNK_1_0:
case THUNK_0_1:
- copy(p,q,sizeofW(StgThunk)+1,stp);
+ copy(p,info,q,sizeofW(StgThunk)+1,stp);
return;
case THUNK_1_1:
@@ -413,7 +456,7 @@ loop:
stp = bd->step;
}
#endif
- copy(p,q,sizeofW(StgThunk)+2,stp);
+ copy(p,info,q,sizeofW(StgThunk)+2,stp);
return;
case FUN_1_1:
@@ -421,28 +464,31 @@ loop:
case FUN_0_2:
case CONSTR_1_1:
case CONSTR_2_0:
- copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
return;
case CONSTR_0_2:
- copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
return;
case THUNK:
- copy(p,q,thunk_sizeW_fromITBL(info),stp);
+ copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
return;
case FUN:
case IND_PERM:
case IND_OLDGEN_PERM:
+ case CONSTR:
+ copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
+ return;
+
case WEAK:
case STABLE_NAME:
- case CONSTR:
- copy_tag(p,q,sizeW_fromITBL(info),stp,tag);
+ copy_tag(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
return;
case BCO:
- copy(p,q,bco_sizeW((StgBCO *)q),stp);
+ copy(p,info,q,bco_sizeW((StgBCO *)q),stp);
return;
case CAF_BLACKHOLE:
@@ -477,49 +523,20 @@ loop:
barf("evacuate: stack frame at %p\n", q);
case PAP:
- copy(p,q,pap_sizeW((StgPAP*)q),stp);
+ copy(p,info,q,pap_sizeW((StgPAP*)q),stp);
return;
case AP:
- copy(p,q,ap_sizeW((StgAP*)q),stp);
+ copy(p,info,q,ap_sizeW((StgAP*)q),stp);
return;
case AP_STACK:
- copy(p,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
- return;
-
- case EVACUATED:
- /* Already evacuated, just return the forwarding address.
- * HOWEVER: if the requested destination generation (gct->evac_step) is
- * older than the actual generation (because the object was
- * already evacuated to a younger generation) then we have to
- * set the gct->failed_to_evac flag to indicate that we couldn't
- * manage to promote the object to the desired generation.
- */
- /*
- * Optimisation: the check is fairly expensive, but we can often
- * shortcut it if either the required generation is 0, or the
- * current object (the EVACUATED) is in a high enough generation.
- * We know that an EVACUATED always points to an object in the
- * same or an older generation. stp is the lowest step that the
- * current object would be evacuated to, so we only do the full
- * check if stp is too low.
- */
- {
- StgClosure *e = ((StgEvacuated*)q)->evacuee;
- *p = e;
- if (stp < gct->evac_step) { // optimisation
- if (Bdescr((P_)e)->step < gct->evac_step) {
- gct->failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
- }
- }
+ copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
return;
- }
case ARR_WORDS:
// just copy the block
- copy(p,q,arr_words_sizeW((StgArrWords *)q),stp);
+ copy(p,info,q,arr_words_sizeW((StgArrWords *)q),stp);
return;
case MUT_ARR_PTRS_CLEAN:
@@ -527,7 +544,7 @@ loop:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
// just copy the block
- copy(p,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+ copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
return;
case TSO:
@@ -561,31 +578,31 @@ loop:
}
case TREC_HEADER:
- copy(p,q,sizeofW(StgTRecHeader),stp);
+ copy(p,info,q,sizeofW(StgTRecHeader),stp);
return;
case TVAR_WATCH_QUEUE:
- copy(p,q,sizeofW(StgTVarWatchQueue),stp);
+ copy(p,info,q,sizeofW(StgTVarWatchQueue),stp);
return;
case TVAR:
- copy(p,q,sizeofW(StgTVar),stp);
+ copy(p,info,q,sizeofW(StgTVar),stp);
return;
case TREC_CHUNK:
- copy(p,q,sizeofW(StgTRecChunk),stp);
+ copy(p,info,q,sizeofW(StgTRecChunk),stp);
return;
case ATOMIC_INVARIANT:
- copy(p,q,sizeofW(StgAtomicInvariant),stp);
+ copy(p,info,q,sizeofW(StgAtomicInvariant),stp);
return;
case INVARIANT_CHECK_QUEUE:
- copy(p,q,sizeofW(StgInvariantCheckQueue),stp);
+ copy(p,info,q,sizeofW(StgInvariantCheckQueue),stp);
return;
default:
- barf("evacuate: strange closure type %d", (int)(info->type));
+ barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
}
barf("evacuate");
@@ -593,5 +610,6 @@ loop:
#undef copy
#undef copy_tag
+#undef copy_tag_nolock
#undef copyPart
#undef evacuate
diff --git a/rts/sm/GCAux.c b/rts/sm/GCAux.c
index df47e18794..825d0f9a3e 100644
--- a/rts/sm/GCAux.c
+++ b/rts/sm/GCAux.c
@@ -70,7 +70,15 @@ isAlive(StgClosure *p)
return p;
}
- info = get_itbl(q);
+ info = q->header.info;
+
+ if (IS_FORWARDING_PTR(info)) {
+ // alive!
+ return (StgClosure*)UN_FORWARDING_PTR(info);
+ }
+
+ info = INFO_PTR_TO_STRUCT(info);
+
switch (info->type) {
case IND:
@@ -82,10 +90,6 @@ isAlive(StgClosure *p)
p = ((StgInd *)q)->indirectee;
continue;
- case EVACUATED:
- // alive!
- return ((StgEvacuated *)q)->evacuee;
-
case TSO:
if (((StgTSO *)q)->what_next == ThreadRelocated) {
p = (StgClosure *)((StgTSO *)q)->_link;
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 5f71a30627..96b4f674f3 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -96,6 +96,7 @@ traverseWeakPtrList(void)
StgWeak *w, **last_w, *next_w;
StgClosure *new;
rtsBool flag = rtsFalse;
+ const StgInfoTable *info;
switch (weak_stage) {
@@ -120,12 +121,14 @@ traverseWeakPtrList(void)
continue;
}
- switch (get_itbl(w)->type) {
-
- case EVACUATED:
- next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
+ info = w->header.info;
+ if (IS_FORWARDING_PTR(info)) {
+ next_w = (StgWeak *)UN_FORWARDING_PTR(info);
*last_w = next_w;
continue;
+ }
+
+ switch (INFO_PTR_TO_STRUCT(info)->type) {
case WEAK:
/* Now, check whether the key is reachable.
@@ -367,8 +370,9 @@ markWeakPtrList ( void )
last_w = &weak_ptr_list;
for (w = weak_ptr_list; w; w = w->link) {
// w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
- ASSERT(w->header.info == &stg_DEAD_WEAK_info
- || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
+ ASSERT(IS_FORWARDING_PTR(w->header.info)
+ || w->header.info == &stg_DEAD_WEAK_info
+ || get_itbl(w)->type == WEAK);
tmp = w;
evacuate((StgClosure **)&tmp);
*last_w = w;
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index b969de3a74..5d156ed64c 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -1251,17 +1251,22 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
// discarding it.
{
nat type;
- type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
- if (type == IND) {
- ((StgUpdateFrame *)p)->updatee->header.info =
- (StgInfoTable *)&stg_IND_PERM_info;
- } else if (type == IND_OLDGEN) {
- ((StgUpdateFrame *)p)->updatee->header.info =
- (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
- }
- evacuate(&((StgUpdateFrame *)p)->updatee);
- p += sizeofW(StgUpdateFrame);
- continue;
+ const StgInfoTable *i;
+
+ i = ((StgUpdateFrame *)p)->updatee->header.info;
+ if (!IS_FORWARDING_PTR(i)) {
+ type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
+ if (type == IND) {
+ ((StgUpdateFrame *)p)->updatee->header.info =
+ (StgInfoTable *)&stg_IND_PERM_info;
+ } else if (type == IND_OLDGEN) {
+ ((StgUpdateFrame *)p)->updatee->header.info =
+ (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
+ }
+ evacuate(&((StgUpdateFrame *)p)->updatee);
+ p += sizeofW(StgUpdateFrame);
+ continue;
+ }
}
// small bitmap (< 32 entries, or 64 on a 64-bit machine)
@@ -1401,11 +1406,14 @@ scavenge_large (step_workspace *ws)
Scavenge a block
------------------------------------------------------------------------- */
-#define PARALLEL_GC
-#include "Scav.c-inc"
#undef PARALLEL_GC
#include "Scav.c-inc"
+#ifdef THREADED_RTS
+#define PARALLEL_GC
+#include "Scav.c-inc"
+#endif
+
/* ----------------------------------------------------------------------------
Look for work to do.
diff --git a/rts/sm/Scav.c-inc b/rts/sm/Scav.c-inc
index ae6a6bba59..a75f6ee04e 100644
--- a/rts/sm/Scav.c-inc
+++ b/rts/sm/Scav.c-inc
@@ -14,16 +14,20 @@
// This file is #included into Scav.c, twice: firstly with PARALLEL_GC
// defined, the second time without.
-#ifndef PARALLEL_GC
-#define scavenge_block(a) scavenge_block1(a)
-#define evacuate(a) evacuate1(a)
-#define recordMutableGen_GC(a,b) recordMutableGen(a,b)
+#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
+# define scavenge_block(a) scavenge_block1(a)
+# define evacuate(a) evacuate1(a)
+# define recordMutableGen_GC(a,b) recordMutableGen(a,b)
#else
-#undef scavenge_block
-#undef evacuate
-#undef recordMutableGen_GC
+# undef scavenge_block
+# undef evacuate
+# undef recordMutableGen_GC
+# if !defined(THREADED_RTS)
+# define scavenge_block1(a) scavenge_block(a)
+# endif
#endif
+
static void scavenge_block (bdescr *bd);
/* -----------------------------------------------------------------------------