summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2009-08-19 14:11:06 +0000
committerSimon Marlow <marlowsd@gmail.com>2009-08-19 14:11:06 +0000
commit68bc07fed38228a1f9fd1885333c7412f57c7e17 (patch)
tree5f7233818c77a38f4d8603b1160951147401a49a
parent0d70162da789d577579b1a34bd85737047223431 (diff)
downloadhaskell-68bc07fed38228a1f9fd1885333c7412f57c7e17.tar.gz
use cas() to claim the closure in copyPart(), to match copy_tag()
copyPart() was still using the old WHITEHOLE mechanism for locking the closure. I don't think this fixes any actual bugs, but it removes a gratuitous difference between two functions that should look similar.
-rw-r--r--rts/sm/Evac.c50
1 files changed, 22 insertions, 28 deletions
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index ae843bd575..b7119148c8 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -112,7 +112,8 @@ copy_tag(StgClosure **p, const StgInfoTable *info,
#if defined(PARALLEL_GC)
{
const StgInfoTable *new_info;
- new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
+ new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info,
+ (W_)info, MK_FORWARDING_PTR(to));
if (new_info != info) {
return evacuate(p); // does the failed_to_evac stuff
} else {
@@ -168,46 +169,39 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
* used to optimise evacuation of BLACKHOLEs.
*/
static rtsBool
-copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
+copyPart(StgClosure **p, const StgInfoTable *info, StgClosure *src,
+ nat size_to_reserve, nat size_to_copy, step *stp)
{
StgPtr to, from;
nat i;
- StgWord info;
-#if defined(PARALLEL_GC)
-spin:
- info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
- if (info == (W_)&stg_WHITEHOLE_info) {
-#ifdef PROF_SPIN
- whitehole_spin++;
-#endif
- goto spin;
- }
- if (IS_FORWARDING_PTR(info)) {
- src->header.info = (const StgInfoTable *)info;
- evacuate(p); // does the failed_to_evac stuff
- return rtsFalse;
- }
-#else
- info = (W_)src->header.info;
-#endif
-
to = alloc_for_copy(size_to_reserve, stp);
- *p = (StgClosure *)to;
TICK_GC_WORDS_COPIED(size_to_copy);
from = (StgPtr)src;
- to[0] = info;
+ to[0] = (W_)info;
for (i = 1; i < size_to_copy; i++) { // unroll for small i
to[i] = from[i];
}
#if defined(PARALLEL_GC)
- write_barrier();
+ {
+ const StgInfoTable *new_info;
+ new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info,
+ (W_)info, MK_FORWARDING_PTR(to));
+ if (new_info != info) {
+ evacuate(p); // does the failed_to_evac stuff
+ return rtsFalse;
+ } else {
+ *p = (StgClosure*)to;
+ }
+ }
+#else
+ src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
+ *p = (StgClosure*)to;
#endif
- src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
-
+
#ifdef PROFILING
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
@@ -639,7 +633,7 @@ loop:
case CAF_BLACKHOLE:
case BLACKHOLE:
- copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
+ copyPart(p,info,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
return;
case THUNK_SELECTOR:
@@ -711,7 +705,7 @@ loop:
StgPtr r, s;
rtsBool mine;
- mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso),
+ mine = copyPart(p,info,(StgClosure *)tso, tso_sizeW(tso),
sizeofW(StgTSO), stp);
if (mine) {
new_tso = (StgTSO *)*p;