summaryrefslogtreecommitdiff
path: root/rts/Threads.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/Threads.c')
-rw-r--r--rts/Threads.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/rts/Threads.c b/rts/Threads.c
index 977635322d..2bdcea1c00 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -126,6 +126,8 @@ createThread(Capability *cap, W_ size)
ACQUIRE_LOCK(&sched_mutex);
tso->id = next_thread_id++; // while we have the mutex
tso->global_link = g0->threads;
+ /* Mutations above need no memory barrier since this lock will provide
+ * a release barrier */
g0->threads = tso;
RELEASE_LOCK(&sched_mutex);
@@ -257,8 +259,10 @@ tryWakeupThread (Capability *cap, StgTSO *tso)
{
MessageWakeup *msg;
msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
- SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
msg->tso = tso;
+ SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
+ // Ensure that writes constructing Message are committed before sending.
+ write_barrier();
sendMessage(cap, tso->cap, (Message*)msg);
debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
(W_)tso->id, tso->cap->no);
@@ -363,6 +367,7 @@ wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq)
for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE;
msg = msg->link) {
i = msg->header.info;
+ load_load_barrier();
if (i != &stg_IND_info) {
ASSERT(i == &stg_MSG_BLACKHOLE_info);
tryWakeupThread(cap,msg->tso);
@@ -392,15 +397,18 @@ checkBlockingQueues (Capability *cap, StgTSO *tso)
for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) {
next = bq->link;
- if (bq->header.info == &stg_IND_info) {
+ const StgInfoTable *bqinfo = bq->header.info;
+ load_load_barrier(); // XXX: Is this needed?
+ if (bqinfo == &stg_IND_info) {
// ToDo: could short it out right here, to avoid
// traversing this IND multiple times.
continue;
}
p = bq->bh;
-
- if (p->header.info != &stg_BLACKHOLE_info ||
+ const StgInfoTable *pinfo = p->header.info;
+ load_load_barrier();
+ if (pinfo != &stg_BLACKHOLE_info ||
((StgInd *)p)->indirectee != (StgClosure*)bq)
{
wakeBlockingQueue(cap,bq);
@@ -424,6 +432,7 @@ updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
const StgInfoTable *i;
i = thunk->header.info;
+ load_load_barrier();
if (i != &stg_BLACKHOLE_info &&
i != &stg_CAF_BLACKHOLE_info &&
i != &__stg_EAGER_BLACKHOLE_info &&
@@ -444,6 +453,7 @@ updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val)
}
i = v->header.info;
+ load_load_barrier();
if (i == &stg_TSO_info) {
checkBlockingQueues(cap, tso);
return;
@@ -667,6 +677,8 @@ threadStackOverflow (Capability *cap, StgTSO *tso)
new_stack->sp -= chunk_words;
}
+ // No write barriers needed; all of the writes above are to structured
+ // owned by our capability.
tso->stackobj = new_stack;
// we're about to run it, better mark it dirty
@@ -738,6 +750,7 @@ threadStackUnderflow (Capability *cap, StgTSO *tso)
bool performTryPutMVar(Capability *cap, StgMVar *mvar, StgClosure *value)
{
const StgInfoTable *info;
+ const StgInfoTable *qinfo;
StgMVarTSOQueue *q;
StgTSO *tso;
@@ -762,8 +775,11 @@ loop:
unlockClosure((StgClosure*)mvar, &stg_MVAR_DIRTY_info);
return true;
}
- if (q->header.info == &stg_IND_info ||
- q->header.info == &stg_MSG_NULL_info) {
+
+ qinfo = q->header.info;
+ load_load_barrier();
+ if (qinfo == &stg_IND_info ||
+ qinfo == &stg_MSG_NULL_info) {
q = (StgMVarTSOQueue*)((StgInd*)q)->indirectee;
goto loop;
}