summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--rts/Capability.h7
-rw-r--r--rts/HeapStackCheck.cmm9
-rw-r--r--rts/PrimOps.cmm4
-rw-r--r--rts/Schedule.c9
4 files changed, 17 insertions, 12 deletions
diff --git a/rts/Capability.h b/rts/Capability.h
index 168f8e565e..9cbfbd4bdd 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -469,15 +469,14 @@ stopCapability (Capability *cap)
// It may not work - the thread might be updating HpLim itself
// at the same time - so we also have the context_switch/interrupted
// flags as a sticky way to tell the thread to stop.
- TSAN_ANNOTATE_BENIGN_RACE(&cap->r.rHpLim, "stopCapability");
- SEQ_CST_STORE(&cap->r.rHpLim, NULL);
+ RELAXED_STORE_ALWAYS(&cap->r.rHpLim, NULL);
}
INLINE_HEADER void
interruptCapability (Capability *cap)
{
stopCapability(cap);
- SEQ_CST_STORE(&cap->interrupt, true);
+ RELAXED_STORE_ALWAYS(&cap->interrupt, true);
}
INLINE_HEADER void
@@ -486,7 +485,7 @@ contextSwitchCapability (Capability *cap, bool immediately)
if(immediately) {
stopCapability(cap);
}
- SEQ_CST_STORE(&cap->context_switch, true);
+ RELAXED_STORE_ALWAYS(&cap->context_switch, true);
}
#if defined(THREADED_RTS)
diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm
index 50b84a023c..18cd9eef6f 100644
--- a/rts/HeapStackCheck.cmm
+++ b/rts/HeapStackCheck.cmm
@@ -127,8 +127,13 @@ stg_gc_noregs
CurrentNursery = bdescr_link(CurrentNursery);
bdescr_free(CurrentNursery) = bdescr_start(CurrentNursery);
OPEN_NURSERY();
- if (Capability_context_switch(MyCapability()) != 0 :: CInt ||
- Capability_interrupt(MyCapability()) != 0 :: CInt ||
+
+ CInt context_switch, interrupt;
+ context_switch = %relaxed Capability_context_switch(MyCapability());
+ interrupt = %relaxed Capability_interrupt(MyCapability());
+
+ if (context_switch != 0 :: CInt ||
+ interrupt != 0 :: CInt ||
(StgTSO_alloc_limit(CurrentTSO) `lt` (0::I64) &&
(TO_W_(StgTSO_flags(CurrentTSO)) & TSO_ALLOC_LIMIT) != 0)) {
ret = ThreadYielding;
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index 38845f1631..d764801c83 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -1097,7 +1097,7 @@ stg_forkzh ( gcptr closure )
// context switch soon, but not immediately: we don't want every
// forkIO to force a context-switch.
- Capability_context_switch(MyCapability()) = 1 :: CInt;
+ %relaxed Capability_context_switch(MyCapability()) = 1 :: CInt;
return (threadid);
}
@@ -1129,7 +1129,7 @@ stg_yieldzh ()
// current thread to the back of the queue by setting the
// context_switch flag. If we don't do this, it will run the same
// thread again.
- Capability_context_switch(MyCapability()) = 1 :: CInt;
+ %relaxed Capability_context_switch(MyCapability()) = 1 :: CInt;
jump stg_yield_noregs();
}
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 75d9cffef4..62bd6bfbbb 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -1134,12 +1134,13 @@ schedulePostRunThread (Capability *cap, StgTSO *t)
static bool
scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
{
- if (cap->r.rHpLim == NULL || RELAXED_LOAD(&cap->context_switch)) {
+ if (RELAXED_LOAD_ALWAYS(&cap->r.rHpLim) == NULL ||
+ RELAXED_LOAD_ALWAYS(&cap->context_switch)) {
// Sometimes we miss a context switch, e.g. when calling
// primitives in a tight loop, MAYBE_GC() doesn't check the
// context switch flag, and we end up waiting for a GC.
// See #1984, and concurrent/should_run/1984
- RELAXED_STORE(&cap->context_switch, 0);
+ RELAXED_STORE_ALWAYS(&cap->context_switch, 0);
appendToRunQueue(cap,t);
} else {
pushOnRunQueue(cap,t);
@@ -1240,8 +1241,8 @@ scheduleHandleYield( Capability *cap, StgTSO *t, uint32_t prev_what_next )
// the CPU because the tick always arrives during GC). This way
// penalises threads that do a lot of allocation, but that seems
// better than the alternative.
- if (RELAXED_LOAD(&cap->context_switch) != 0) {
- RELAXED_STORE(&cap->context_switch, 0);
+ if (RELAXED_LOAD_ALWAYS(&cap->context_switch) != 0) {
+ RELAXED_STORE_ALWAYS(&cap->context_switch, 0);
appendToRunQueue(cap,t);
} else {
pushOnRunQueue(cap,t);