summaryrefslogtreecommitdiff
path: root/rts/sm/Scav.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm/Scav.c')
-rw-r--r--rts/sm/Scav.c148
1 files changed, 148 insertions, 0 deletions
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 5b1e5d0fc8..c35444bbaa 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -661,6 +661,54 @@ scavenge_block (bdescr *bd)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ // follow everything
+ {
+ StgPtr next;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ gct->eager_promotion = rtsFalse;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgPtr next;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO *)p);
@@ -1016,6 +1064,56 @@ scavenge_mark_stack(void)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ // follow everything
+ {
+ StgPtr next;
+ rtsBool saved_eager;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue; // mutable anyhow.
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgPtr next, q = p;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO*)p);
@@ -1281,6 +1379,56 @@ scavenge_one(StgPtr p)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ {
+ StgPtr next, q;
+ rtsBool saved_eager;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ q = p;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue;
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ {
+ // follow everything
+ StgPtr next, q=p;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO*)p);