summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-10-01 17:36:56 +0000
committerMoritz Angermann <moritz.angermann@gmail.com>2020-09-18 07:40:21 +0000
commit2f275cf9723e11d3da3471d40541ece80de2dc5a (patch)
tree020332dcf1f54bbfa14a4a8e418cc05e7be975a2
parent03458c2b791cf5e746220c567c0490bee8ec3823 (diff)
downloadhaskell-2f275cf9723e11d3da3471d40541ece80de2dc5a.tar.gz
rts/Threads: Avoid data races (TODO)
Replace barriers with appropriate ordering. Drop redundant barrier in tryWakeupThread (the RELEASE barrier will be provided by sendMessage's mutex release). We use relaxed operations on why_blocked and the stack although it's not clear to me why this is necessary.
-rw-r--r--rts/Threads.c28
1 files changed, 10 insertions, 18 deletions
diff --git a/rts/Threads.c b/rts/Threads.c
index 2609256a66..198792fa97 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -261,8 +261,6 @@ tryWakeupThread (Capability *cap, StgTSO *tso)
msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
msg->tso = tso;
SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
- // Ensure that writes constructing Message are committed before sending.
- write_barrier();
sendMessage(cap, tso->cap, (Message*)msg);
debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
(W_)tso->id, tso->cap->no);
@@ -368,8 +366,7 @@ wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
msg = msg->link) {
- i = msg->header.info;
- load_load_barrier();
+ i = ACQUIRE_LOAD(&msg->header.info);
if (i != &stg_IND_info) {
ASSERT(i == &stg_MSG_BLACKHOLE_info);
tryWakeupThread(cap,msg->tso);
@@ -405,8 +402,7 @@ checkBlockingQueues (Capability *cap, StgTSO *tso)
for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
next = bq->link;
- const StgInfoTable *bqinfo = bq->header.info;
- load_load_barrier(); // XXX: Is this needed?
+ const StgInfoTable *bqinfo = ACQUIRE_LOAD(&bq->header.info);
if (bqinfo == &stg_IND_info) {
// ToDo: could short it out right here, to avoid
// traversing this IND multiple times.
@@ -414,8 +410,7 @@ checkBlockingQueues (Capability *cap, StgTSO *tso)
}
p = bq->bh;
- const StgInfoTable *pinfo = p->header.info;
- load_load_barrier();
+ const StgInfoTable *pinfo = ACQUIRE_LOAD(&p->header.info);
if (pinfo != &stg_BLACKHOLE_info ||
((StgInd *)p)->indirectee != (StgClosure*)bq)
{
@@ -439,8 +434,7 @@ updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
StgTSO *owner;
const StgInfoTable *i;
- i = thunk->header.info;
- load_load_barrier();
+ i = ACQUIRE_LOAD(&thunk->header.info);
if (i != &stg_BLACKHOLE_info &&
i != &stg_CAF_BLACKHOLE_info &&
i != &__stg_EAGER_BLACKHOLE_info &&
@@ -460,8 +454,7 @@ updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
return;
}
- i = v->header.info;
- load_load_barrier();
+ i = ACQUIRE_LOAD(&v->header.info);
if (i == &stg_TSO_info) {
checkBlockingQueues(cap, tso);
return;
@@ -784,8 +777,7 @@ loop:
return true;
}
- qinfo = q->header.info;
- load_load_barrier();
+ qinfo = ACQUIRE_LOAD(&q->header.info);
if (qinfo == &stg_IND_info ||
qinfo == &stg_MSG_NULL_info) {
q = (StgMVarTSOQueue*)((StgInd*)q)->indirectee;
@@ -802,15 +794,15 @@ loop:
ASSERT(tso->block_info.closure == (StgClosure*)mvar);
// save why_blocked here, because waking up the thread destroys
// this information
- StgWord why_blocked = tso->why_blocked;
+ StgWord why_blocked = RELAXED_LOAD(&tso->why_blocked);
// actually perform the takeMVar
StgStack* stack = tso->stackobj;
- stack->sp[1] = (W_)value;
- stack->sp[0] = (W_)&stg_ret_p_info;
+ RELAXED_STORE(&stack->sp[1], (W_)value);
+ RELAXED_STORE(&stack->sp[0], (W_)&stg_ret_p_info);
// indicate that the MVar operation has now completed.
- tso->_link = (StgTSO*)&stg_END_TSO_QUEUE_closure;
+ RELEASE_STORE(&tso->_link, (StgTSO*)&stg_END_TSO_QUEUE_closure);
if (stack->dirty == 0) {
dirty_STACK(cap, stack);