summaryrefslogtreecommitdiff
path: root/rts/sm/Scav.c
diff options
context:
space:
mode:
authorJohan Tibell <johan.tibell@gmail.com>2014-03-23 12:06:56 +0100
committerJohan Tibell <johan.tibell@gmail.com>2014-03-29 11:24:07 +0100
commit90329b6cc183b3cd05956ae6bdeb6ac6951549c2 (patch)
treeba7d31656fe75fad2555c8a66b7ebd13dd9ebeb1 /rts/sm/Scav.c
parent4c8edfd2c722504baaa6896d194fd3a8c3f9b652 (diff)
downloadhaskell-90329b6cc183b3cd05956ae6bdeb6ac6951549c2.tar.gz
Add SmallArray# and SmallMutableArray# types
These array types are smaller than Array# and MutableArray# and are faster when the array size is small, as they don't have the overhead of a card table. Having no card table reduces the closure size with 2 words in the typical small array case and leads to less work when updating or GC:ing the array. Reduces both the runtime and memory allocation by 8.8% on my insert benchmark for the HashMap type in the unordered-containers package, which makes use of lots of small arrays. With tuned GC settings (i.e. `+RTS -A6M`) the runtime reduction is 15%. Fixes #8923.
Diffstat (limited to 'rts/sm/Scav.c')
-rw-r--r--rts/sm/Scav.c148
1 files changed, 148 insertions, 0 deletions
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 5b1e5d0fc8..c35444bbaa 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -661,6 +661,54 @@ scavenge_block (bdescr *bd)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ // follow everything
+ {
+ StgPtr next;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ gct->eager_promotion = rtsFalse;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgPtr next;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO *)p);
@@ -1016,6 +1064,56 @@ scavenge_mark_stack(void)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ // follow everything
+ {
+ StgPtr next;
+ rtsBool saved_eager;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue; // mutable anyhow.
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgPtr next, q = p;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO*)p);
@@ -1281,6 +1379,56 @@ scavenge_one(StgPtr p)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ {
+ StgPtr next, q;
+ rtsBool saved_eager;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ q = p;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue;
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ {
+ // follow everything
+ StgPtr next, q=p;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO*)p);