summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorJohan Tibell <johan.tibell@gmail.com>2014-03-23 12:06:56 +0100
committerJohan Tibell <johan.tibell@gmail.com>2014-03-29 11:24:07 +0100
commit90329b6cc183b3cd05956ae6bdeb6ac6951549c2 (patch)
treeba7d31656fe75fad2555c8a66b7ebd13dd9ebeb1 /rts
parent4c8edfd2c722504baaa6896d194fd3a8c3f9b652 (diff)
downloadhaskell-90329b6cc183b3cd05956ae6bdeb6ac6951549c2.tar.gz
Add SmallArray# and SmallMutableArray# types
These array types are smaller than Array# and MutableArray# and are faster when the array size is small, as they don't have the overhead of a card table. Having no card table reduces the closure size with 2 words in the typical small array case and leads to less work when updating or GC:ing the array. Reduces both the runtime and memory allocation by 8.8% on my insert benchmark for the HashMap type in the unordered-containers package, which makes use of lots of small arrays. With tuned GC settings (i.e. `+RTS -A6M`) the runtime reduction is 15%. Fixes #8923.
Diffstat (limited to 'rts')
-rw-r--r--rts/CheckUnload.c8
-rw-r--r--rts/ClosureFlags.c8
-rw-r--r--rts/LdvProfile.c4
-rw-r--r--rts/Linker.c9
-rw-r--r--rts/PrimOps.cmm118
-rw-r--r--rts/Printer.c15
-rw-r--r--rts/ProfHeap.c8
-rw-r--r--rts/RetainerProfile.c12
-rw-r--r--rts/StgMiscClosures.cmm12
-rw-r--r--rts/sm/Compact.c31
-rw-r--r--rts/sm/Evac.c8
-rw-r--r--rts/sm/Scav.c148
12 files changed, 379 insertions, 2 deletions
diff --git a/rts/CheckUnload.c b/rts/CheckUnload.c
index 8692dea8bf..f1f454ceaf 100644
--- a/rts/CheckUnload.c
+++ b/rts/CheckUnload.c
@@ -198,6 +198,14 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
prim = rtsTrue;
size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
break;
+
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ prim = rtsTrue;
+ size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
+ break;
case TSO:
prim = rtsTrue;
diff --git a/rts/ClosureFlags.c b/rts/ClosureFlags.c
index 020f28438a..c43437dc04 100644
--- a/rts/ClosureFlags.c
+++ b/rts/ClosureFlags.c
@@ -81,9 +81,13 @@ StgWord16 closure_flags[] = {
[ATOMICALLY_FRAME] = ( _BTM ),
[CATCH_RETRY_FRAME] = ( _BTM ),
[CATCH_STM_FRAME] = ( _BTM ),
- [WHITEHOLE] = ( 0 )
+ [WHITEHOLE] = ( 0 ),
+ [SMALL_MUT_ARR_PTRS_CLEAN] = (_HNF| _NS| _MUT|_UPT ),
+ [SMALL_MUT_ARR_PTRS_DIRTY] = (_HNF| _NS| _MUT|_UPT ),
+ [SMALL_MUT_ARR_PTRS_FROZEN0] = (_HNF| _NS| _MUT|_UPT ),
+ [SMALL_MUT_ARR_PTRS_FROZEN] = (_HNF| _NS| _UPT )
};
-#if N_CLOSURE_TYPES != 61
+#if N_CLOSURE_TYPES != 65
#error Closure types changed: update ClosureFlags.c!
#endif
diff --git a/rts/LdvProfile.c b/rts/LdvProfile.c
index d077f3caf7..4530969123 100644
--- a/rts/LdvProfile.c
+++ b/rts/LdvProfile.c
@@ -68,6 +68,10 @@ processHeapClosureForDead( StgClosure *c )
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
case ARR_WORDS:
case WEAK:
case MUT_VAR_CLEAN:
diff --git a/rts/Linker.c b/rts/Linker.c
index fee6124965..9c73757a63 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -1168,6 +1168,15 @@ typedef struct _RtsSymbolVal {
SymI_HasProto(stg_thawArrayzh) \
SymI_HasProto(stg_newArrayArrayzh) \
SymI_HasProto(stg_casArrayzh) \
+ SymI_HasProto(stg_newSmallArrayzh) \
+ SymI_HasProto(stg_unsafeThawSmallArrayzh) \
+ SymI_HasProto(stg_cloneSmallArrayzh) \
+ SymI_HasProto(stg_cloneSmallMutableArrayzh) \
+ SymI_HasProto(stg_freezzeSmallArrayzh) \
+ SymI_HasProto(stg_thawSmallArrayzh) \
+ SymI_HasProto(stg_copySmallArrayzh) \
+ SymI_HasProto(stg_copySmallMutableArrayzh) \
+ SymI_HasProto(stg_casSmallArrayzh) \
SymI_HasProto(stg_newBCOzh) \
SymI_HasProto(stg_newByteArrayzh) \
SymI_HasProto(stg_casIntArrayzh) \
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index 2f697b43ce..df2119fc77 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -322,6 +322,124 @@ stg_newArrayArrayzh ( W_ n /* words */ )
/* -----------------------------------------------------------------------------
+ SmallArray primitives
+ -------------------------------------------------------------------------- */
+
+stg_newSmallArrayzh ( W_ n /* words */, gcptr init )
+{
+ W_ words, size, p;
+ gcptr arr;
+
+ again: MAYBE_GC(again);
+
+ words = BYTES_TO_WDS(SIZEOF_StgSmallMutArrPtrs) + n;
+ ("ptr" arr) = ccall allocate(MyCapability() "ptr",words);
+ TICK_ALLOC_PRIM(SIZEOF_StgSmallMutArrPtrs, WDS(n), 0);
+
+ SET_HDR(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info, CCCS);
+ StgSmallMutArrPtrs_ptrs(arr) = n;
+
+ // Initialise all elements of the the array with the value in R2
+ p = arr + SIZEOF_StgSmallMutArrPtrs;
+ for:
+ if (p < arr + SIZEOF_StgSmallMutArrPtrs + WDS(n)) {
+ W_[p] = init;
+ p = p + WDS(1);
+ goto for;
+ }
+
+ return (arr);
+}
+
+stg_unsafeThawSmallArrayzh ( gcptr arr )
+{
+ // See stg_unsafeThawArrayzh
+ if (StgHeader_info(arr) != stg_SMALL_MUT_ARR_PTRS_FROZEN0_info) {
+ SET_INFO(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
+ recordMutable(arr);
+ // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
+ return (arr);
+ } else {
+ SET_INFO(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
+ return (arr);
+ }
+}
+
+stg_cloneSmallArrayzh ( gcptr src, W_ offset, W_ n )
+{
+ cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_FROZEN_info, src, offset, n)
+}
+
+stg_cloneSmallMutableArrayzh ( gcptr src, W_ offset, W_ n )
+{
+ cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_DIRTY_info, src, offset, n)
+}
+
+// We have to escape the "z" in the name.
+stg_freezzeSmallArrayzh ( gcptr src, W_ offset, W_ n )
+{
+ cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_FROZEN_info, src, offset, n)
+}
+
+stg_thawSmallArrayzh ( gcptr src, W_ offset, W_ n )
+{
+ cloneSmallArray(stg_SMALL_MUT_ARR_PTRS_DIRTY_info, src, offset, n)
+}
+
+stg_copySmallArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n)
+{
+ W_ dst_p, src_p, bytes;
+
+ SET_INFO(dst, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
+
+ dst_p = dst + SIZEOF_StgSmallMutArrPtrs + WDS(dst_off);
+ src_p = src + SIZEOF_StgSmallMutArrPtrs + WDS(src_off);
+ bytes = WDS(n);
+ prim %memcpy(dst_p, src_p, bytes, WDS(1));
+
+ return ();
+}
+
+stg_copySmallMutableArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n)
+{
+ W_ dst_p, src_p, bytes;
+
+ SET_INFO(dst, stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
+
+ dst_p = dst + SIZEOF_StgSmallMutArrPtrs + WDS(dst_off);
+ src_p = src + SIZEOF_StgSmallMutArrPtrs + WDS(src_off);
+ bytes = WDS(n);
+ if (src == dst) {
+ prim %memmove(dst_p, src_p, bytes, WDS(1));
+ } else {
+ prim %memcpy(dst_p, src_p, bytes, WDS(1));
+ }
+
+ return ();
+}
+
+// RRN: Uses the ticketed approach; see casMutVar
+stg_casSmallArrayzh ( gcptr arr, W_ ind, gcptr old, gcptr new )
+/* SmallMutableArray# s a -> Int# -> a -> a -> State# s -> (# State# s, Int#, Any a #) */
+{
+ gcptr h;
+ W_ p, len;
+
+ p = arr + SIZEOF_StgSmallMutArrPtrs + WDS(ind);
+ (h) = ccall cas(p, old, new);
+
+ if (h != old) {
+ // Failure, return what was there instead of 'old':
+ return (1,h);
+ } else {
+ // Compare and Swap Succeeded:
+ SET_HDR(arr, stg_SMALL_MUT_ARR_PTRS_DIRTY_info, CCCS);
+ return (0,new);
+ }
+}
+
+
+/* -----------------------------------------------------------------------------
MutVar primitives
-------------------------------------------------------------------------- */
diff --git a/rts/Printer.c b/rts/Printer.c
index ca9ca496b5..b7125d9980 100644
--- a/rts/Printer.c
+++ b/rts/Printer.c
@@ -322,6 +322,21 @@ printClosure( StgClosure *obj )
debugBelch("MUT_ARR_PTRS_FROZEN(size=%" FMT_Word ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);
break;
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ debugBelch("SMALL_MUT_ARR_PTRS_CLEAN(size=%" FMT_Word ")\n",
+ (W_)((StgSmallMutArrPtrs *)obj)->ptrs);
+ break;
+
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ debugBelch("SMALL_MUT_ARR_PTRS_DIRTY(size=%" FMT_Word ")\n",
+ (W_)((StgSmallMutArrPtrs *)obj)->ptrs);
+ break;
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ debugBelch("SMALL_MUT_ARR_PTRS_FROZEN(size=%" FMT_Word ")\n",
+ (W_)((StgSmallMutArrPtrs *)obj)->ptrs);
+ break;
+
case MVAR_CLEAN:
case MVAR_DIRTY:
{
diff --git a/rts/ProfHeap.c b/rts/ProfHeap.c
index 6d78886e39..d21b14a26d 100644
--- a/rts/ProfHeap.c
+++ b/rts/ProfHeap.c
@@ -1025,6 +1025,14 @@ heapCensusChain( Census *census, bdescr *bd )
prim = rtsTrue;
size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
break;
+
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ prim = rtsTrue;
+ size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
+ break;
case TSO:
prim = rtsTrue;
diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c
index 973e03bd1a..bdfc831b94 100644
--- a/rts/RetainerProfile.c
+++ b/rts/RetainerProfile.c
@@ -531,6 +531,18 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
return;
break;
+ // StgMutArrPtr.ptrs, no SRT
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ init_ptrs(&se.info, ((StgSmallMutArrPtrs *)c)->ptrs,
+ (StgPtr)(((StgSmallMutArrPtrs *)c)->payload));
+ *first_child = find_ptrs(&se.info);
+ if (*first_child == NULL)
+ return;
+ break;
+
// layout.payload.ptrs, SRT
case FUN: // *c is a heap object.
case FUN_2_0:
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index 450b2d96c6..42ef39e134 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -604,6 +604,18 @@ INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN, 0, 0, MUT_ARR_PTRS_FROZEN, "MUT_ARR_PTRS_FRO
INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN0, 0, 0, MUT_ARR_PTRS_FROZEN0, "MUT_ARR_PTRS_FROZEN0", "MUT_ARR_PTRS_FROZEN0")
{ foreign "C" barf("MUT_ARR_PTRS_FROZEN0 object entered!") never returns; }
+INFO_TABLE(stg_SMALL_MUT_ARR_PTRS_CLEAN, 0, 0, SMALL_MUT_ARR_PTRS_CLEAN, "SMALL_MUT_ARR_PTRS_CLEAN", "SMALL_MUT_ARR_PTRS_CLEAN")
+{ foreign "C" barf("SMALL_MUT_ARR_PTRS_CLEAN object entered!") never returns; }
+
+INFO_TABLE(stg_SMALL_MUT_ARR_PTRS_DIRTY, 0, 0, SMALL_MUT_ARR_PTRS_DIRTY, "SMALL_MUT_ARR_PTRS_DIRTY", "SMALL_MUT_ARR_PTRS_DIRTY")
+{ foreign "C" barf("SMALL_MUT_ARR_PTRS_DIRTY object entered!") never returns; }
+
+INFO_TABLE(stg_SMALL_MUT_ARR_PTRS_FROZEN, 0, 0, SMALL_MUT_ARR_PTRS_FROZEN, "SMALL_MUT_ARR_PTRS_FROZEN", "SMALL_MUT_ARR_PTRS_FROZEN")
+{ foreign "C" barf("SMALL_MUT_ARR_PTRS_FROZEN object entered!") never returns; }
+
+INFO_TABLE(stg_SMALL_MUT_ARR_PTRS_FROZEN0, 0, 0, SMALL_MUT_ARR_PTRS_FROZEN0, "SMALL_MUT_ARR_PTRS_FROZEN0", "SMALL_MUT_ARR_PTRS_FROZEN0")
+{ foreign "C" barf("SMALL_MUT_ARR_PTRS_FROZEN0 object entered!") never returns; }
+
/* ----------------------------------------------------------------------------
Mutable Variables
------------------------------------------------------------------------- */
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index e9973d3f8a..8ae72a96e0 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -495,6 +495,21 @@ update_fwd_large( bdescr *bd )
continue;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgSmallMutArrPtrs *a;
+
+ a = (StgSmallMutArrPtrs*)p;
+ for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) {
+ thread((StgClosure **)p);
+ }
+ continue;
+ }
+
case STACK:
{
StgStack *stack = (StgStack*)p;
@@ -680,6 +695,22 @@ thread_obj (StgInfoTable *info, StgPtr p)
return (StgPtr)a + mut_arr_ptrs_sizeW(a);
}
+
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgSmallMutArrPtrs *a;
+
+ a = (StgSmallMutArrPtrs *)p;
+ for (p = (P_)a->payload; p < (P_)&a->payload[a->ptrs]; p++) {
+ thread((StgClosure **)p);
+ }
+
+ return (StgPtr)a + small_mut_arr_ptrs_sizeW(a);
+ }
case TSO:
return thread_TSO((StgTSO *)p);
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 577edc38f5..4a550cdde5 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -716,6 +716,14 @@ loop:
copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
return;
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // just copy the block
+ copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no);
+ return;
+
case TSO:
copy(p,info,q,sizeofW(StgTSO),gen_no);
return;
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 5b1e5d0fc8..c35444bbaa 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -661,6 +661,54 @@ scavenge_block (bdescr *bd)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ // follow everything
+ {
+ StgPtr next;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ gct->eager_promotion = rtsFalse;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgPtr next;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO *)p);
@@ -1016,6 +1064,56 @@ scavenge_mark_stack(void)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ // follow everything
+ {
+ StgPtr next;
+ rtsBool saved_eager;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue; // mutable anyhow.
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgPtr next, q = p;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO*)p);
@@ -1281,6 +1379,56 @@ scavenge_one(StgPtr p)
break;
}
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ {
+ StgPtr next, q;
+ rtsBool saved_eager;
+
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ q = p;
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+ gct->eager_promotion = saved_eager;
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->failed_to_evac = rtsTrue;
+ break;
+ }
+
+ case SMALL_MUT_ARR_PTRS_FROZEN:
+ case SMALL_MUT_ARR_PTRS_FROZEN0:
+ {
+ // follow everything
+ StgPtr next, q=p;
+
+ next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
+ for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ // If we're going to put this object on the mutable list, then
+ // set its info ptr to SMALL_MUT_ARR_PTRS_FROZEN0 to indicate that.
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN0_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_info;
+ }
+ break;
+ }
+
case TSO:
{
scavengeTSO((StgTSO*)p);