diff options
Diffstat (limited to 'rts/sm')
-rw-r--r-- | rts/sm/CNF.c | 7 | ||||
-rw-r--r-- | rts/sm/Compact.c | 6 | ||||
-rw-r--r-- | rts/sm/Evac.c | 18 | ||||
-rw-r--r-- | rts/sm/GCAux.c | 2 | ||||
-rw-r--r-- | rts/sm/Scav.c | 28 | ||||
-rw-r--r-- | rts/sm/Storage.c | 3 |
6 files changed, 60 insertions, 4 deletions
diff --git a/rts/sm/CNF.c b/rts/sm/CNF.c index 2c701c2c29..9988fba986 100644 --- a/rts/sm/CNF.c +++ b/rts/sm/CNF.c @@ -376,7 +376,6 @@ compactNew (Capability *cap, StgWord size) ALLOCATE_NEW); self = firstBlockGetCompact(block); - SET_HDR((StgClosure*)self, &stg_COMPACT_NFDATA_CLEAN_info, CCS_SYSTEM); self->autoBlockW = aligned_size / sizeof(StgWord); self->nursery = block; self->last = block; @@ -394,6 +393,9 @@ compactNew (Capability *cap, StgWord size) debugTrace(DEBUG_compact, "compactNew: size %" FMT_Word, size); + write_barrier(); + SET_HDR((StgClosure*)self, &stg_COMPACT_NFDATA_CLEAN_info, CCS_SYSTEM); + return self; } @@ -546,6 +548,7 @@ insertCompactHash (Capability *cap, { insertHashTable(str->hash, (StgWord)p, (const void*)to); const StgInfoTable **strinfo = &str->header.info; + load_load_barrier(); if (*strinfo == &stg_COMPACT_NFDATA_CLEAN_info) { *strinfo = &stg_COMPACT_NFDATA_DIRTY_info; recordClosureMutated(cap, (StgClosure*)str); @@ -690,6 +693,7 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) ASSERT(LOOKS_LIKE_CLOSURE_PTR(q)); info = get_itbl(q); + load_load_barrier(); switch (info->type) { case CONSTR_1_0: check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0])); @@ -929,6 +933,7 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count) while (p < bd->free) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl((StgClosure*)p); + load_load_barrier(); switch (info->type) { case CONSTR_1_0: diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c index 5031c535a1..e2b5ca70e8 100644 --- a/rts/sm/Compact.c +++ b/rts/sm/Compact.c @@ -197,6 +197,7 @@ STATIC_INLINE StgInfoTable* get_threaded_info( P_ p ) { W_ q = (W_)GET_INFO(UNTAG_CLOSURE((StgClosure *)p)); + load_load_barrier(); loop: switch (GET_PTR_TAG(q)) @@ -382,6 +383,7 @@ thread_stack(P_ p, P_ stack_end) StgRetFun *ret_fun = (StgRetFun *)p; StgFunInfoTable *fun_info = FUN_INFO_PTR_TO_STRUCT(get_threaded_info((P_)ret_fun->fun)); + load_load_barrier(); // *before* threading it! thread(&ret_fun->fun); p = thread_arg_block(fun_info, ret_fun->payload); @@ -400,6 +402,7 @@ thread_PAP_payload (StgClosure *fun, StgClosure **payload, W_ size) { StgFunInfoTable *fun_info = FUN_INFO_PTR_TO_STRUCT(get_threaded_info((P_)fun)); + load_load_barrier(); ASSERT(fun_info->i.type != PAP); P_ p = (P_)payload; @@ -620,6 +623,8 @@ update_fwd_large( bdescr *bd ) static /* STATIC_INLINE */ P_ thread_obj (const StgInfoTable *info, P_ p) { + load_load_barrier(); + switch (info->type) { case THUNK_0_1: return p + sizeofW(StgThunk) + 1; @@ -851,6 +856,7 @@ update_fwd_compact( bdescr *blocks ) // definitely have enough room. Also see bug #1147. StgInfoTable *iptr = get_threaded_info(p); StgInfoTable *info = INFO_PTR_TO_STRUCT(iptr); + load_load_barrier(); P_ q = p; diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c index 0ece06016a..57d9e0c919 100644 --- a/rts/sm/Evac.c +++ b/rts/sm/Evac.c @@ -157,6 +157,7 @@ copy_tag(StgClosure **p, const StgInfoTable *info, { const StgInfoTable *new_info; new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to)); + load_load_barrier(); if (new_info != info) { #if defined(PROFILING) // We copied this object at the same time as another @@ -175,8 +176,11 @@ copy_tag(StgClosure **p, const StgInfoTable *info, } } #else - src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); + // if somebody else reads the forwarding pointer, we better make + // sure there's a closure at the end of it. + write_barrier(); *p = TAG_CLOSURE(tag,(StgClosure*)to); + src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); #endif /* defined(PARALLEL_GC) */ #if defined(PROFILING) @@ -251,6 +255,7 @@ spin: } #else info = (W_)src->header.info; + load_load_barrier(); #endif /* PARALLEL_GC */ to = alloc_for_copy(size_to_reserve, gen_no); @@ -703,6 +708,7 @@ loop: gen_no = bd->dest_no; info = q->header.info; + load_load_barrier(); if (IS_FORWARDING_PTR(info)) { /* Already evacuated, just return the forwarding address. @@ -813,11 +819,14 @@ loop: StgClosure *r; const StgInfoTable *i; r = ((StgInd*)q)->indirectee; + load_load_barrier(); if (GET_CLOSURE_TAG(r) == 0) { i = r->header.info; + load_load_barrier(); if (IS_FORWARDING_PTR(i)) { r = (StgClosure *)UN_FORWARDING_PTR(i); i = r->header.info; + load_load_barrier(); } if (i == &stg_TSO_info || i == &stg_WHITEHOLE_info @@ -1016,6 +1025,7 @@ evacuate_BLACKHOLE(StgClosure **p) } gen_no = bd->dest_no; info = q->header.info; + load_load_barrier(); if (IS_FORWARDING_PTR(info)) { StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info); @@ -1208,6 +1218,7 @@ selector_chain: #else // Save the real info pointer (NOTE: not the same as get_itbl()). info_ptr = (StgWord)p->header.info; + load_load_barrier(); SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info); #endif /* THREADED_RTS */ @@ -1226,6 +1237,7 @@ selector_loop: // that evacuate() doesn't mind if it gets passed a to-space pointer. info = (StgInfoTable*)selectee->header.info; + load_load_barrier(); if (IS_FORWARDING_PTR(info)) { // We don't follow pointers into to-space; the constructor @@ -1235,6 +1247,7 @@ selector_loop: } info = INFO_PTR_TO_STRUCT(info); + load_load_barrier(); switch (info->type) { case WHITEHOLE: goto bale_out; // about to be evacuated by another thread (or a loop). @@ -1282,6 +1295,7 @@ selector_loop: if (!IS_FORWARDING_PTR(info_ptr)) { info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr); + load_load_barrier(); switch (info->type) { case IND: case IND_STATIC: @@ -1333,9 +1347,11 @@ selector_loop: // indirection, as in evacuate(). if (GET_CLOSURE_TAG(r) == 0) { i = r->header.info; + load_load_barrier(); if (IS_FORWARDING_PTR(i)) { r = (StgClosure *)UN_FORWARDING_PTR(i); i = r->header.info; + load_load_barrier(); } if (i == &stg_TSO_info || i == &stg_WHITEHOLE_info diff --git a/rts/sm/GCAux.c b/rts/sm/GCAux.c index 11080c1f22..210fbaa1b9 100644 --- a/rts/sm/GCAux.c +++ b/rts/sm/GCAux.c @@ -84,6 +84,7 @@ isAlive(StgClosure *p) } info = q->header.info; + load_load_barrier(); if (IS_FORWARDING_PTR(info)) { // alive! @@ -131,6 +132,7 @@ revertCAFs( void ) SET_INFO((StgClosure *)c, c->saved_info); c->saved_info = NULL; + write_barrier(); // We must reset static_link lest the major GC finds that // static_flag==3 and will consequently ignore references // into code that we are trying to unload. This would result diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 501d958aae..95bb99c304 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -386,6 +386,7 @@ scavenge_thunk_srt(const StgInfoTable *info) if (!major_gc) return; thunk_info = itbl_to_thunk_itbl(info); + load_load_barrier(); if (thunk_info->i.srt) { StgClosure *srt = (StgClosure*)GET_SRT(thunk_info); evacuate(&srt); @@ -400,6 +401,7 @@ scavenge_fun_srt(const StgInfoTable *info) if (!major_gc) return; fun_info = itbl_to_fun_itbl(info); + load_load_barrier(); if (fun_info->i.srt) { StgClosure *srt = (StgClosure*)GET_FUN_SRT(fun_info); evacuate(&srt); @@ -462,6 +464,7 @@ scavenge_block (bdescr *bd) evacuate((StgClosure **)&mvar->value); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { mvar->header.info = &stg_MVAR_DIRTY_info; } else { @@ -479,6 +482,7 @@ scavenge_block (bdescr *bd) evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { tvar->header.info = &stg_TVAR_DIRTY_info; } else { @@ -613,6 +617,7 @@ scavenge_block (bdescr *bd) evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; } else { @@ -632,6 +637,7 @@ scavenge_block (bdescr *bd) evacuate((StgClosure**)&bq->link); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; } else { @@ -684,6 +690,7 @@ scavenge_block (bdescr *bd) p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { @@ -701,6 +708,7 @@ scavenge_block (bdescr *bd) { p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info; } else { @@ -726,6 +734,7 @@ scavenge_block (bdescr *bd) } gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info; } else { @@ -747,6 +756,7 @@ scavenge_block (bdescr *bd) evacuate((StgClosure **)p); } + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info; } else { @@ -887,6 +897,7 @@ scavenge_mark_stack(void) evacuate((StgClosure **)&mvar->value); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { mvar->header.info = &stg_MVAR_DIRTY_info; } else { @@ -903,6 +914,7 @@ scavenge_mark_stack(void) evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { tvar->header.info = &stg_TVAR_DIRTY_info; } else { @@ -1009,6 +1021,7 @@ scavenge_mark_stack(void) evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; } else { @@ -1028,6 +1041,7 @@ scavenge_mark_stack(void) evacuate((StgClosure**)&bq->link); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; } else { @@ -1076,6 +1090,7 @@ scavenge_mark_stack(void) scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { @@ -1095,6 +1110,7 @@ scavenge_mark_stack(void) scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info; } else { @@ -1122,6 +1138,7 @@ scavenge_mark_stack(void) } gct->eager_promotion = saved_eager; + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info; } else { @@ -1143,6 +1160,7 @@ scavenge_mark_stack(void) evacuate((StgClosure **)p); } + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info; } else { @@ -1249,6 +1267,7 @@ scavenge_one(StgPtr p) evacuate((StgClosure **)&mvar->value); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { mvar->header.info = &stg_MVAR_DIRTY_info; } else { @@ -1265,6 +1284,7 @@ scavenge_one(StgPtr p) evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { tvar->header.info = &stg_TVAR_DIRTY_info; } else { @@ -1329,6 +1349,7 @@ scavenge_one(StgPtr p) evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; } else { @@ -1348,6 +1369,7 @@ scavenge_one(StgPtr p) evacuate((StgClosure**)&bq->link); gct->eager_promotion = saved_eager_promotion; + write_barrier(); if (gct->failed_to_evac) { bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; } else { @@ -1396,6 +1418,7 @@ scavenge_one(StgPtr p) scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { @@ -1413,6 +1436,7 @@ scavenge_one(StgPtr p) // follow everything scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info; } else { @@ -1440,6 +1464,7 @@ scavenge_one(StgPtr p) } gct->eager_promotion = saved_eager; + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_DIRTY_info; } else { @@ -1461,6 +1486,7 @@ scavenge_one(StgPtr p) evacuate((StgClosure **)p); } + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info; } else { @@ -1613,6 +1639,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) mutlist_TREC_CHUNK++; break; case MUT_PRIM: pinfo = ((StgClosure*)p)->header.info; + load_load_barrier(); if (pinfo == &stg_TVAR_WATCH_QUEUE_info) mutlist_TVAR_WATCH_QUEUE++; else if (pinfo == &stg_TREC_HEADER_info) @@ -1645,6 +1672,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p); + write_barrier(); if (gct->failed_to_evac) { ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index a73228dce6..ce67a19840 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -500,9 +500,8 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf) bh = (StgInd *)allocate(cap, sizeofW(*bh)); } bh->indirectee = (StgClosure *)cap->r.rCurrentTSO; - SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs); - // Ensure that above writes are visible before we introduce reference as CAF indirectee. write_barrier(); + SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs); caf->indirectee = (StgClosure *)bh; write_barrier(); |