diff options
author | Johan Tibell <johan.tibell@gmail.com> | 2014-03-23 12:06:56 +0100 |
---|---|---|
committer | Johan Tibell <johan.tibell@gmail.com> | 2014-03-29 11:24:07 +0100 |
commit | 90329b6cc183b3cd05956ae6bdeb6ac6951549c2 (patch) | |
tree | ba7d31656fe75fad2555c8a66b7ebd13dd9ebeb1 /rts/sm | |
parent | 4c8edfd2c722504baaa6896d194fd3a8c3f9b652 (diff) | |
download | haskell-90329b6cc183b3cd05956ae6bdeb6ac6951549c2.tar.gz |
Add SmallArray# and SmallMutableArray# types
These array types are smaller than Array# and MutableArray# and are
faster when the array size is small, as they don't have the overhead
of a card table. Having no card table reduces the closure size with 2
words in the typical small array case and leads to less work when
updating or GC:ing the array.
Reduces both the runtime and memory allocation by 8.8% on my insert
benchmark for the HashMap type in the unordered-containers package,
which makes use of lots of small arrays. With tuned GC settings
(i.e. `+RTS -A6M`) the runtime reduction is 15%.
Fixes #8923.
Diffstat (limited to 'rts/sm')
-rw-r--r-- | rts/sm/Compact.c | 31 | ||||
-rw-r--r-- | rts/sm/Evac.c | 8 | ||||
-rw-r--r-- | rts/sm/Scav.c | 148 |
3 files changed, 187 insertions, 0 deletions
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c index e9973d3f8a..8ae72a96e0 100644 --- a/rts/sm/Compact.c +++ b/rts/sm/Compact.c @@ -495,6 +495,21 @@ update_fwd_large( bdescr *bd ) continue; } + case SMALL_MUT_ARR_PTRS_CLEAN: + case SMALL_MUT_ARR_PTRS_DIRTY: + case SMALL_MUT_ARR_PTRS_FROZEN: + case SMALL_MUT_ARR_PTRS_FROZEN0: + // follow everything + { + StgSmallMutArrPtrs *a; + + a = (StgSmallMutArrPtrs*)p; + for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) { + thread((StgClosure **)p); + } + continue; + } + case STACK: { StgStack *stack = (StgStack*)p; @@ -680,6 +695,22 @@ thread_obj (StgInfoTable *info, StgPtr p) return (StgPtr)a + mut_arr_ptrs_sizeW(a); } + + case SMALL_MUT_ARR_PTRS_CLEAN: + case SMALL_MUT_ARR_PTRS_DIRTY: + case SMALL_MUT_ARR_PTRS_FROZEN: + case SMALL_MUT_ARR_PTRS_FROZEN0: + // follow everything + { + StgSmallMutArrPtrs *a; + + a = (StgSmallMutArrPtrs *)p; + for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) { + thread((StgClosure **)p); + } + + return (StgPtr)a + small_mut_arr_ptrs_sizeW(a); + } case TSO: return thread_TSO((StgTSO *)p); diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c index 577edc38f5..4a550cdde5 100644 --- a/rts/sm/Evac.c +++ b/rts/sm/Evac.c @@ -716,6 +716,14 @@ loop: copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no); return; + case SMALL_MUT_ARR_PTRS_CLEAN: + case SMALL_MUT_ARR_PTRS_DIRTY: + case SMALL_MUT_ARR_PTRS_FROZEN: + case SMALL_MUT_ARR_PTRS_FROZEN0: + // just copy the block + copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no); + return; + case TSO: copy(p,info,q,sizeofW(StgTSO),gen_no); return; diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 5b1e5d0fc8..c35444bbaa 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -661,6 +661,54 @@ scavenge_block (bdescr *bd) break; } + case SMALL_MUT_ARR_PTRS_CLEAN: + case SMALL_MUT_ARR_PTRS_DIRTY: + // follow everything + { + StgPtr next; + + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + gct->eager_promotion = rtsFalse; + next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); + for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; + } + + gct->failed_to_evac = rtsTrue; // always put it on the mutable list. + break; + } + + case SMALL_MUT_ARR_PTRS_FROZEN: + case SMALL_MUT_ARR_PTRS_FROZEN0: + // follow everything + { + StgPtr next; + + next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); + for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } + + // If we're going to put this object on the mutable list, then + // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that. + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info; + } else { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info; + } + break; + } + case TSO: { scavengeTSO((StgTSO *)p); @@ -1016,6 +1064,56 @@ scavenge_mark_stack(void) break; } + case SMALL_MUT_ARR_PTRS_CLEAN: + case SMALL_MUT_ARR_PTRS_DIRTY: + // follow everything + { + StgPtr next; + rtsBool saved_eager; + + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; + next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); + for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } + gct->eager_promotion = saved_eager; + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; + } + + gct->failed_to_evac = rtsTrue; // mutable anyhow. + break; + } + + case SMALL_MUT_ARR_PTRS_FROZEN: + case SMALL_MUT_ARR_PTRS_FROZEN0: + // follow everything + { + StgPtr next, q = p; + + next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); + for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } + + // If we're going to put this object on the mutable list, then + // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that. + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info; + } else { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info; + } + break; + } + case TSO: { scavengeTSO((StgTSO*)p); @@ -1281,6 +1379,56 @@ scavenge_one(StgPtr p) break; } + case SMALL_MUT_ARR_PTRS_CLEAN: + case SMALL_MUT_ARR_PTRS_DIRTY: + { + StgPtr next, q; + rtsBool saved_eager; + + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; + q = p; + next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); + for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } + gct->eager_promotion = saved_eager; + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; + } + + gct->failed_to_evac = rtsTrue; + break; + } + + case SMALL_MUT_ARR_PTRS_FROZEN: + case SMALL_MUT_ARR_PTRS_FROZEN0: + { + // follow everything + StgPtr next, q=p; + + next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); + for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } + + // If we're going to put this object on the mutable list, then + // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that. + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info; + } else { + ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info; + } + break; + } + case TSO: { scavengeTSO((StgTSO*)p); |