summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-12-01 14:11:01 -0500
committerMoritz Angermann <moritz.angermann@gmail.com>2020-09-18 07:34:12 +0000
commit06730ad846a337b05d508d6c5cc07b50f611ba19 (patch)
tree9e746a24a30f5e2c5c17492c72ab8a1685603444
parent1325b6ba25a3e5575e6795653c28279494229ff9 (diff)
downloadhaskell-06730ad846a337b05d508d6c5cc07b50f611ba19.tar.gz
rts/Schedule: Use relaxed operations for sched_state
-rw-r--r--rts/Capability.c4
-rw-r--r--rts/Schedule.c38
2 files changed, 23 insertions, 19 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index ef5128b419..2f466b0785 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -85,7 +85,7 @@ Capability * rts_unsafeGetMyCapability (void)
STATIC_INLINE bool
globalWorkToDo (void)
{
- return sched_state >= SCHED_INTERRUPTING
+ return RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING
|| recent_activity == ACTIVITY_INACTIVE; // need to check for deadlock
}
#endif
@@ -562,7 +562,7 @@ releaseCapability_ (Capability* cap,
// is interrupted, we only create a worker task if there
// are threads that need to be completed. If the system is
// shutting down, we never create a new worker.
- if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
+ if (RELAXED_LOAD(&sched_state) < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
debugTrace(DEBUG_sched,
"starting new worker on capability %d", cap->no);
startWorkerTask(cap);
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 647acf0d53..149c42791e 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -245,7 +245,7 @@ schedule (Capability *initialCapability, Task *task)
// * We might be left with threads blocked in foreign calls,
// we should really attempt to kill these somehow (TODO).
- switch (sched_state) {
+ switch (RELAXED_LOAD(&sched_state)) {
case SCHED_RUNNING:
break;
case SCHED_INTERRUPTING:
@@ -257,7 +257,7 @@ schedule (Capability *initialCapability, Task *task)
// other Capability did the final GC, or we did it above,
// either way we can fall through to the SCHED_SHUTTING_DOWN
// case now.
- ASSERT(sched_state == SCHED_SHUTTING_DOWN);
+ ASSERT(RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN);
// fall through
case SCHED_SHUTTING_DOWN:
@@ -347,7 +347,7 @@ schedule (Capability *initialCapability, Task *task)
// killed, kill it now. This sometimes happens when a finalizer
// thread is created by the final GC, or a thread previously
// in a foreign call returns.
- if (sched_state >= SCHED_INTERRUPTING &&
+ if (RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING &&
!(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
deleteThread(t);
}
@@ -665,7 +665,7 @@ scheduleYield (Capability **pcap, Task *task)
if (!shouldYieldCapability(cap,task,false) &&
(!emptyRunQueue(cap) ||
!emptyInbox(cap) ||
- sched_state >= SCHED_INTERRUPTING)) {
+ RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING)) {
return;
}
@@ -806,7 +806,11 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
appendToRunQueue(free_caps[i],t);
traceEventMigrateThread (cap, t, free_caps[i]->no);
- if (t->bound) { t->bound->task->cap = free_caps[i]; }
+ if (t->bound) {
+ // N.B. we typically would need to hold 't->bound->task->lock' to change 'cap'
+ // but this is safe because TODO.
+ RELAXED_STORE(&t->bound->task->cap, free_caps[i]);
+ }
t->cap = free_caps[i];
n--; // we have one fewer threads now
i++; // move on to the next free_cap
@@ -1313,7 +1317,7 @@ scheduleHandleThreadFinished (Capability *cap, Task *task, StgTSO *t)
if (task->incall->ret) {
*(task->incall->ret) = NULL;
}
- if (sched_state >= SCHED_INTERRUPTING) {
+ if (RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING) {
if (heap_overflow) {
task->incall->rstat = HeapExhausted;
} else {
@@ -1528,7 +1532,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
// cycle.
#endif
- if (sched_state == SCHED_SHUTTING_DOWN) {
+ if (RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN) {
// The final GC has already been done, and the system is
// shutting down. We'll probably deadlock if we try to GC
// now.
@@ -1543,7 +1547,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
major_gc = (collect_gen == RtsFlags.GcFlags.generations-1);
#if defined(THREADED_RTS)
- if (sched_state < SCHED_INTERRUPTING
+ if (RELAXED_LOAD(&sched_state) < SCHED_INTERRUPTING
&& RtsFlags.ParFlags.parGcEnabled
&& collect_gen >= RtsFlags.ParFlags.parGcGen
&& ! oldest_gen->mark)
@@ -1636,7 +1640,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
}
if (was_syncing &&
(prev_sync == SYNC_GC_SEQ || prev_sync == SYNC_GC_PAR) &&
- !(sched_state == SCHED_INTERRUPTING && force_major)) {
+ !(RELAXED_LOAD(&sched_state) == SCHED_INTERRUPTING && force_major)) {
// someone else had a pending sync request for a GC, so
// let's assume GC has been done and we don't need to GC
// again.
@@ -1644,7 +1648,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
// need to do the final GC.
return;
}
- if (sched_state == SCHED_SHUTTING_DOWN) {
+ if (RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN) {
// The scheduler might now be shutting down. We tested
// this above, but it might have become true since then as
// we yielded the capability in requestSync().
@@ -1749,7 +1753,7 @@ delete_threads_and_gc:
* threads in the system.
* Checking for major_gc ensures that the last GC is major.
*/
- if (sched_state == SCHED_INTERRUPTING && major_gc) {
+ if (RELAXED_LOAD(&sched_state) == SCHED_INTERRUPTING && major_gc) {
deleteAllThreads();
#if defined(THREADED_RTS)
// Discard all the sparks from every Capability. Why?
@@ -1763,7 +1767,7 @@ delete_threads_and_gc:
discardSparksCap(capabilities[i]);
}
#endif
- sched_state = SCHED_SHUTTING_DOWN;
+ RELAXED_STORE(&sched_state, SCHED_SHUTTING_DOWN);
}
/*
@@ -1807,7 +1811,7 @@ delete_threads_and_gc:
#endif
// If we're shutting down, don't leave any idle GC work to do.
- if (sched_state == SCHED_SHUTTING_DOWN) {
+ if (RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN) {
doIdleGCWork(cap, true /* all of it */);
}
@@ -1882,7 +1886,7 @@ delete_threads_and_gc:
releaseGCThreads(cap, idle_cap);
}
#endif
- if (heap_overflow && sched_state == SCHED_RUNNING) {
+ if (heap_overflow && RELAXED_LOAD(&sched_state) == SCHED_RUNNING) {
// GC set the heap_overflow flag. We should throw an exception if we
// can, or shut down otherwise.
@@ -1894,7 +1898,7 @@ delete_threads_and_gc:
// shutdown now. Ultimately we want the main thread to return to
// its caller with HeapExhausted, at which point the caller should
// call hs_exit(). The first step is to delete all the threads.
- sched_state = SCHED_INTERRUPTING;
+ RELAXED_STORE(&sched_state, SCHED_INTERRUPTING);
goto delete_threads_and_gc;
}
@@ -2656,8 +2660,8 @@ exitScheduler (bool wait_foreign USED_IF_THREADS)
task = newBoundTask();
// If we haven't killed all the threads yet, do it now.
- if (sched_state < SCHED_SHUTTING_DOWN) {
- sched_state = SCHED_INTERRUPTING;
+ if (RELAXED_LOAD(&sched_state) < SCHED_SHUTTING_DOWN) {
+ RELAXED_STORE(&sched_state, SCHED_INTERRUPTING);
Capability *cap = task->cap;
waitForCapability(&cap,task);
scheduleDoGC(&cap,task,true);