diff options
author | Simon Marlow <marlowsd@gmail.com> | 2012-11-16 09:22:00 +0000 |
---|---|---|
committer | Simon Marlow <marlowsd@gmail.com> | 2012-11-16 11:26:20 +0000 |
commit | 6d784c43592290ec16db8b7f0f2a012dff3ed497 (patch) | |
tree | a317ef1fc1ee40883e92cde308c292f7125db2b7 /rts/sm/Scav.c | |
parent | 97dc57c6e2bdbddd0a0170a283149a570a07179c (diff) | |
download | haskell-6d784c43592290ec16db8b7f0f2a012dff3ed497.tar.gz |
Add a write barrier for TVAR closures
This improves GC performance when there are a lot of TVars in the
heap. For instance, a TChan with a lot of elements causes a massive
GC drag without this patch.
There's more to do - several other STM closure types don't have write
barriers, so GC performance when there are a lot of threads blocked on
STM isn't great. But fixing the problem for TVar is a good start.
Diffstat (limited to 'rts/sm/Scav.c')
-rw-r--r-- | rts/sm/Scav.c | 73 |
1 files changed, 69 insertions, 4 deletions
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 668b95da6b..1e0411a972 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -424,6 +424,23 @@ scavenge_block (bdescr *bd) break; } + case TVAR: + { + StgTVar *tvar = ((StgTVar *)p); + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&tvar->current_value); + evacuate((StgClosure **)&tvar->first_watch_queue_entry); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + tvar->header.info = &stg_TVAR_DIRTY_info; + } else { + tvar->header.info = &stg_TVAR_CLEAN_info; + } + p += sizeofW(StgTVar); + break; + } + case FUN_2_0: scavenge_fun_srt(info); evacuate(&((StgClosure *)p)->payload[1]); @@ -783,6 +800,22 @@ scavenge_mark_stack(void) break; } + case TVAR: + { + StgTVar *tvar = ((StgTVar *)p); + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&tvar->current_value); + evacuate((StgClosure **)&tvar->first_watch_queue_entry); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + tvar->header.info = &stg_TVAR_DIRTY_info; + } else { + tvar->header.info = &stg_TVAR_CLEAN_info; + } + break; + } + case FUN_2_0: scavenge_fun_srt(info); evacuate(&((StgClosure *)p)->payload[1]); @@ -1088,6 +1121,22 @@ scavenge_one(StgPtr p) break; } + case TVAR: + { + StgTVar *tvar = ((StgTVar *)p); + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&tvar->current_value); + evacuate((StgClosure **)&tvar->first_watch_queue_entry); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + tvar->header.info = &stg_TVAR_DIRTY_info; + } else { + tvar->header.info = &stg_TVAR_CLEAN_info; + } + break; + } + case THUNK: case THUNK_1_0: case THUNK_0_1: @@ -1363,10 +1412,26 @@ scavenge_mutable_list(bdescr *bd, generation *gen) case MVAR_CLEAN: barf("MVAR_CLEAN on mutable list"); case MVAR_DIRTY: - mutlist_MVARS++; break; - default: - mutlist_OTHERS++; break; - } + mutlist_MVARS++; break; + case TVAR: + mutlist_TVAR++; break; + case TREC_CHUNK: + mutlist_TREC_CHUNK++; break; + case MUT_PRIM: + if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info) + mutlist_TVAR_WATCH_QUEUE++; + else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info) + mutlist_TREC_HEADER++; + else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info) + mutlist_ATOMIC_INVARIANT++; + else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info) + mutlist_INVARIANT_CHECK_QUEUE++; + else + mutlist_OTHERS++; + break; + default: + mutlist_OTHERS++; break; + } #endif // Check whether this object is "clean", that is it |