summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/CNF.c5
-rw-r--r--rts/sm/Evac.c12
-rw-r--r--rts/sm/GC.c2
-rw-r--r--rts/sm/GCAux.c1
-rw-r--r--rts/sm/MarkWeak.c10
-rw-r--r--rts/sm/Sanity.c3
-rw-r--r--rts/sm/Scav.c8
-rw-r--r--rts/sm/Storage.c8
8 files changed, 36 insertions, 13 deletions
diff --git a/rts/sm/CNF.c b/rts/sm/CNF.c
index d0447f867c..0432505cd2 100644
--- a/rts/sm/CNF.c
+++ b/rts/sm/CNF.c
@@ -542,8 +542,9 @@ insertCompactHash (Capability *cap,
StgClosure *p, StgClosure *to)
{
insertHashTable(str->hash, (StgWord)p, (const void*)to);
- if (str->header.info == &stg_COMPACT_NFDATA_CLEAN_info) {
- str->header.info = &stg_COMPACT_NFDATA_DIRTY_info;
+ const StgInfoTable *strinfo = str->header.info;
+ if (strinfo == &stg_COMPACT_NFDATA_CLEAN_info) {
+ strinfo = &stg_COMPACT_NFDATA_DIRTY_info;
recordClosureMutated(cap, (StgClosure*)str);
}
}
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 2a2a26ec09..7c82caa185 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -131,7 +131,7 @@ copy_tag(StgClosure **p, const StgInfoTable *info,
#else
src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
*p = TAG_CLOSURE(tag,(StgClosure*)to);
-#endif
+#endif /* defined(PARALLEL_GC) */
#if defined(PROFILING)
// We store the size of the just evacuated object in the LDV word so that
@@ -194,7 +194,7 @@ spin:
if (info == (W_)&stg_WHITEHOLE_info) {
#if defined(PROF_SPIN)
whitehole_gc_spin++;
-#endif
+#endif /* PROF_SPIN */
busy_wait_nop();
goto spin;
}
@@ -205,7 +205,7 @@ spin:
}
#else
info = (W_)src->header.info;
-#endif
+#endif /* PARALLEL_GC */
to = alloc_for_copy(size_to_reserve, gen_no);
@@ -216,8 +216,8 @@ spin:
}
write_barrier();
- src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
*p = (StgClosure *)to;
+ src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
#if defined(PROFILING)
// We store the size of the just evacuated object in the LDV word so that
@@ -1099,6 +1099,7 @@ selector_chain:
// need the write-barrier stuff.
// - undo the chain we've built to point to p.
SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
+ write_barrier();
*q = (StgClosure *)p;
if (evac) evacuate(q);
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
@@ -1109,7 +1110,7 @@ selector_chain:
// Save the real info pointer (NOTE: not the same as get_itbl()).
info_ptr = (StgWord)p->header.info;
SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
-#endif
+#endif /* THREADED_RTS */
field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;
@@ -1165,6 +1166,7 @@ selector_loop:
SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
OVERWRITING_CLOSURE((StgClosure*)p);
SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
+ write_barrier();
}
#endif
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 4bf540a4a7..92a5e229a1 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -1104,6 +1104,8 @@ loop:
// scavenge_loop() only exits when there's no work to do
+ // This atomic decrement also serves as a full barrier to ensure that any
+ // writes we made during scavenging are visible to other threads.
#if defined(DEBUG)
r = dec_running();
#else
diff --git a/rts/sm/GCAux.c b/rts/sm/GCAux.c
index e8ca0c4002..650dc2c1df 100644
--- a/rts/sm/GCAux.c
+++ b/rts/sm/GCAux.c
@@ -83,6 +83,7 @@ isAlive(StgClosure *p)
}
info = INFO_PTR_TO_STRUCT(info);
+ load_load_barrier();
switch (info->type) {
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index d5982e2f64..7475b5e625 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -234,16 +234,22 @@ static bool tidyWeakList(generation *gen)
last_w = &gen->old_weak_ptr_list;
for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
+ info = w->header.info;
+ /* N.B. This function is executed only during the serial part of GC
+ * so consequently there is no potential for data races and therefore
+ * no need for memory barriers.
+ */
+
/* There might be a DEAD_WEAK on the list if finalizeWeak# was
* called on a live weak pointer object. Just remove it.
*/
- if (w->header.info == &stg_DEAD_WEAK_info) {
+ if (info == &stg_DEAD_WEAK_info) {
next_w = w->link;
*last_w = next_w;
continue;
}
- info = get_itbl((StgClosure *)w);
+ info = INFO_PTR_TO_STRUCT(info);
switch (info->type) {
case WEAK:
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 8082b7e6d0..ff76f747c9 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -233,6 +233,7 @@ checkClosure( const StgClosure* p )
p = UNTAG_CONST_CLOSURE(p);
info = p->header.info;
+ load_load_barrier();
if (IS_FORWARDING_PTR(info)) {
barf("checkClosure: found EVACUATED closure %d", info->type);
@@ -243,6 +244,7 @@ checkClosure( const StgClosure* p )
#endif
info = INFO_PTR_TO_STRUCT(info);
+ load_load_barrier();
switch (info->type) {
@@ -564,6 +566,7 @@ checkTSO(StgTSO *tso)
next = tso->_link;
info = (const StgInfoTable*) tso->_link->header.info;
+ load_load_barrier();
ASSERT(next == END_TSO_QUEUE ||
info == &stg_MVAR_TSO_QUEUE_info ||
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 73a790e9ed..c486cd96c5 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -431,7 +431,7 @@ scavenge_block (bdescr *bd)
// time around the loop.
while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
- ASSERT(bd->link == NULL);
+ ASSERT(bd->link == NULL);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
@@ -1580,6 +1580,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
#if defined(DEBUG)
+ const StgInfoTable *pinfo;
switch (get_itbl((StgClosure *)p)->type) {
case MUT_VAR_CLEAN:
// can happen due to concurrent writeMutVars
@@ -1599,9 +1600,10 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
case TREC_CHUNK:
mutlist_TREC_CHUNK++; break;
case MUT_PRIM:
- if (((StgClosure*)p)->header.info == &stg_TVAR_WATCH_QUEUE_info)
+ pinfo = ((StgClosure*)p)->header.info;
+ if (pinfo == &stg_TVAR_WATCH_QUEUE_info)
mutlist_TVAR_WATCH_QUEUE++;
- else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
+ else if (pinfo == &stg_TREC_HEADER_info)
mutlist_TREC_HEADER++;
else
mutlist_OTHERS++;
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 2e03b77695..3f91905f3c 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -407,8 +407,10 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf)
// Allocate the blackhole indirection closure
bh = (StgInd *)allocate(cap, sizeofW(*bh));
- SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs);
bh->indirectee = (StgClosure *)cap->r.rCurrentTSO;
+ SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs);
+ // Ensure that above writes are visible before we introduce reference as CAF indirectee.
+ write_barrier();
caf->indirectee = (StgClosure *)bh;
write_barrier();
@@ -1081,6 +1083,8 @@ void
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
{
Capability *cap = regTableToCapability(reg);
+ // No barrier required here as no other heap object fields are read. See
+ // note [Heap memory barriers] in SMP.h.
if (p->header.info == &stg_MUT_VAR_CLEAN_info) {
p->header.info = &stg_MUT_VAR_DIRTY_info;
recordClosureMutated(cap,p);
@@ -1090,6 +1094,8 @@ dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
void
dirty_TVAR(Capability *cap, StgTVar *p)
{
+ // No barrier required here as no other heap object fields are read. See
+ // note [Heap memory barriers] in SMP.h.
if (p->header.info == &stg_TVAR_CLEAN_info) {
p->header.info = &stg_TVAR_DIRTY_info;
recordClosureMutated(cap,(StgClosure*)p);