summaryrefslogtreecommitdiff
path: root/rts/Sparks.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2011-02-14 12:38:58 +0000
committerSimon Marlow <marlowsd@gmail.com>2011-02-14 12:38:58 +0000
commit9ef55740aeffbc39b0cb65007d278cb3fd4be8f7 (patch)
treef8f6f593d0cd5f988200848dc13602eb182cee09 /rts/Sparks.c
parenta96f61282b0c5981be5c5c2d442b9b16195fb746 (diff)
downloadhaskell-9ef55740aeffbc39b0cb65007d278cb3fd4be8f7.tar.gz
pruneSparkQueue: check for tagged pointers
This was a bug in 6.12.3. I think the problem no longer occurs due to the way sparks are treated as weak pointers, but it doesn't hurt to test for tagged pointers anyway: better to do the test than have a subtle invariant.
Diffstat (limited to 'rts/Sparks.c')
-rw-r--r--rts/Sparks.c55
1 files changed, 33 insertions, 22 deletions
diff --git a/rts/Sparks.c b/rts/Sparks.c
index 857921260b..ad08f3b577 100644
--- a/rts/Sparks.c
+++ b/rts/Sparks.c
@@ -197,31 +197,42 @@ pruneSparkQueue (Capability *cap)
// We have to be careful here: in the parallel GC, another
// thread might evacuate this closure while we're looking at it,
// so grab the info pointer just once.
- info = spark->header.info;
- if (IS_FORWARDING_PTR(info)) {
- tmp = (StgClosure*)UN_FORWARDING_PTR(info);
- /* if valuable work: shift inside the pool */
- if (closure_SHOULD_SPARK(tmp)) {
- elements[botInd] = tmp; // keep entry (new address)
- botInd++;
- n++;
- } else {
- pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
- }
- } else if (HEAP_ALLOCED(spark) &&
- (Bdescr((P_)spark)->flags & BF_EVACUATED)) {
- if (closure_SHOULD_SPARK(spark)) {
- elements[botInd] = spark; // keep entry (new address)
- botInd++;
- n++;
+ if (GET_CLOSURE_TAG(spark) != 0) {
+ // Tagged pointer is a value, so the spark has fizzled. It
+ // probably never happens that we get a tagged pointer in
+ // the spark pool, because we would have pruned the spark
+ // during the previous GC cycle if it turned out to be
+ // evaluated, but it doesn't hurt to have this check for
+ // robustness.
+ pruned_sparks++;
+ cap->sparks_fizzled++;
+ } else {
+ info = spark->header.info;
+ if (IS_FORWARDING_PTR(info)) {
+ tmp = (StgClosure*)UN_FORWARDING_PTR(info);
+ /* if valuable work: shift inside the pool */
+ if (closure_SHOULD_SPARK(tmp)) {
+ elements[botInd] = tmp; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
+ } else if (HEAP_ALLOCED(spark) &&
+ (Bdescr((P_)spark)->flags & BF_EVACUATED)) {
+ if (closure_SHOULD_SPARK(spark)) {
+ elements[botInd] = spark; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
} else {
pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
+ cap->sparks_gcd++;
}
- } else {
- pruned_sparks++; // discard spark
- cap->sparks_gcd++;
}
currInd++;