summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-11-11 17:59:12 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2022-12-16 16:12:45 -0500
commit7ca683e44f9f7a9a7984bbed4f49712838638fc8 (patch)
tree8824b7a97d4ecdee78788398af1f742076d699f4
parente0affaa9fc3e6dc0e65808afa383426b7fe9420a (diff)
downloadhaskell-7ca683e44f9f7a9a7984bbed4f49712838638fc8.tar.gz
rts: Encapsulate sched_state
-rw-r--r--rts/Capability.c6
-rw-r--r--rts/Schedule.c47
-rw-r--r--rts/Schedule.h20
-rw-r--r--rts/eventlog/EventLog.c2
-rw-r--r--rts/posix/Select.c4
-rw-r--r--rts/posix/Signals.c4
-rw-r--r--rts/sm/NonMoving.c6
-rw-r--r--rts/win32/AsyncMIO.c2
-rw-r--r--rts/win32/AwaitEvent.c2
-rw-r--r--rts/win32/ConsoleHandler.c2
10 files changed, 53 insertions, 42 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index 26afddc4df..7ca20a6fc3 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -82,7 +82,7 @@ Capability * rts_unsafeGetMyCapability (void)
STATIC_INLINE bool
globalWorkToDo (void)
{
- return RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING
+ return getSchedState() >= SCHED_INTERRUPTING
|| getRecentActivity() == ACTIVITY_INACTIVE; // need to check for deadlock
}
#endif
@@ -581,7 +581,7 @@ releaseCapability_ (Capability* cap,
// is interrupted, we only create a worker task if there
// are threads that need to be completed. If the system is
// shutting down, we never create a new worker.
- if (RELAXED_LOAD(&sched_state) < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
+ if (getSchedState() < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
debugTrace(DEBUG_sched,
"starting new worker on capability %d", cap->no);
startWorkerTask(cap);
@@ -1153,7 +1153,7 @@ shutdownCapability (Capability *cap USED_IF_THREADS,
// isn't safe, for one thing).
for (i = 0; /* i < 50 */; i++) {
- ASSERT(sched_state == SCHED_SHUTTING_DOWN);
+ ASSERT(getSchedState() == SCHED_SHUTTING_DOWN);
debugTrace(DEBUG_sched,
"shutting down capability %d, attempt %d", cap->no, i);
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 9dfb08bdf5..54fc976a44 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -91,7 +91,7 @@ StgWord recent_activity = ACTIVITY_YES;
/* if this flag is set as well, give up execution
* LOCK: none (changes monotonically)
*/
-volatile StgWord sched_state = SCHED_RUNNING;
+StgWord sched_state = SCHED_RUNNING;
/*
* This mutex protects most of the global scheduler data in
@@ -166,7 +166,6 @@ static void deleteAllThreads (void);
static void deleteThread_(StgTSO *tso);
#endif
-
/* ---------------------------------------------------------------------------
Main scheduling loop.
@@ -254,7 +253,7 @@ schedule (Capability *initialCapability, Task *task)
// * We might be left with threads blocked in foreign calls,
// we should really attempt to kill these somehow (TODO).
- switch (RELAXED_LOAD(&sched_state)) {
+ switch (getSchedState()) {
case SCHED_RUNNING:
break;
case SCHED_INTERRUPTING:
@@ -266,7 +265,7 @@ schedule (Capability *initialCapability, Task *task)
// other Capability did the final GC, or we did it above,
// either way we can fall through to the SCHED_SHUTTING_DOWN
// case now.
- ASSERT(RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN);
+ ASSERT(getSchedState() == SCHED_SHUTTING_DOWN);
// fall through
case SCHED_SHUTTING_DOWN:
@@ -321,7 +320,7 @@ schedule (Capability *initialCapability, Task *task)
*/
awaitEvent (cap, emptyRunQueue(cap));
#else
- ASSERT(sched_state >= SCHED_INTERRUPTING);
+ ASSERT(getSchedState() >= SCHED_INTERRUPTING);
#endif
}
#endif
@@ -371,7 +370,7 @@ schedule (Capability *initialCapability, Task *task)
// killed, kill it now. This sometimes happens when a finalizer
// thread is created by the final GC, or a thread previously
// in a foreign call returns.
- if (RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING &&
+ if (getSchedState() >= SCHED_INTERRUPTING &&
!(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
deleteThread(t);
}
@@ -688,7 +687,7 @@ scheduleYield (Capability **pcap, Task *task)
if (!shouldYieldCapability(cap,task,false) &&
(!emptyRunQueue(cap) ||
!emptyInbox(cap) ||
- RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING)) {
+ getSchedState() >= SCHED_INTERRUPTING)) {
return;
}
@@ -991,7 +990,7 @@ scheduleDetectDeadlock (Capability **pcap, Task *task)
}
// either we have threads to run, or we were interrupted:
- ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
+ ASSERT(!emptyRunQueue(cap) || getSchedState() >= SCHED_INTERRUPTING);
return;
}
@@ -1343,7 +1342,7 @@ scheduleHandleThreadFinished (Capability *cap, Task *task, StgTSO *t)
if (task->incall->ret) {
*(task->incall->ret) = NULL;
}
- if (RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING) {
+ if (getSchedState() >= SCHED_INTERRUPTING) {
if (heap_overflow) {
task->incall->rstat = HeapExhausted;
} else {
@@ -1603,7 +1602,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
// cycle.
#endif
- if (RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN) {
+ if (getSchedState() == SCHED_SHUTTING_DOWN) {
// The final GC has already been done, and the system is
// shutting down. We'll probably deadlock if we try to GC
// now.
@@ -1622,7 +1621,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
major_gc = (collect_gen == RtsFlags.GcFlags.generations-1);
#if defined(THREADED_RTS)
- if (RELAXED_LOAD(&sched_state) < SCHED_INTERRUPTING
+ if (getSchedState() < SCHED_INTERRUPTING
&& RtsFlags.ParFlags.parGcEnabled
&& collect_gen >= RtsFlags.ParFlags.parGcGen
&& ! oldest_gen->mark)
@@ -1715,7 +1714,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
}
if (was_syncing &&
(prev_sync == SYNC_GC_SEQ || prev_sync == SYNC_GC_PAR) &&
- !(RELAXED_LOAD(&sched_state) == SCHED_INTERRUPTING && force_major)) {
+ !(getSchedState() == SCHED_INTERRUPTING && force_major)) {
// someone else had a pending sync request for a GC, so
// let's assume GC has been done and we don't need to GC
// again.
@@ -1723,7 +1722,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
// need to do the final GC.
return;
}
- if (RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN) {
+ if (getSchedState() == SCHED_SHUTTING_DOWN) {
// The scheduler might now be shutting down. We tested
// this above, but it might have become true since then as
// we yielded the capability in requestSync().
@@ -1826,7 +1825,7 @@ delete_threads_and_gc:
* threads in the system.
* Checking for major_gc ensures that the last GC is major.
*/
- if (RELAXED_LOAD(&sched_state) == SCHED_INTERRUPTING && major_gc) {
+ if (getSchedState() == SCHED_INTERRUPTING && major_gc) {
deleteAllThreads();
#if defined(THREADED_RTS)
// Discard all the sparks from every Capability. Why?
@@ -1840,7 +1839,7 @@ delete_threads_and_gc:
discardSparksCap(getCapability(i));
}
#endif
- RELAXED_STORE(&sched_state, SCHED_SHUTTING_DOWN);
+ setSchedState(SCHED_SHUTTING_DOWN);
}
/*
@@ -1885,7 +1884,7 @@ delete_threads_and_gc:
#endif
// If we're shutting down, don't leave any idle GC work to do.
- if (RELAXED_LOAD(&sched_state) == SCHED_SHUTTING_DOWN) {
+ if (getSchedState() == SCHED_SHUTTING_DOWN) {
doIdleGCWork(cap, true /* all of it */);
}
@@ -1962,7 +1961,7 @@ delete_threads_and_gc:
releaseGCThreads(cap, idle_cap);
}
#endif
- if (heap_overflow && RELAXED_LOAD(&sched_state) == SCHED_RUNNING) {
+ if (heap_overflow && getSchedState() == SCHED_RUNNING) {
// GC set the heap_overflow flag. We should throw an exception if we
// can, or shut down otherwise.
@@ -1974,7 +1973,7 @@ delete_threads_and_gc:
// shutdown now. Ultimately we want the main thread to return to
// its caller with HeapExhausted, at which point the caller should
// call hs_exit(). The first step is to delete all the threads.
- RELAXED_STORE(&sched_state, SCHED_INTERRUPTING);
+ setSchedState(SCHED_INTERRUPTING);
goto delete_threads_and_gc;
}
@@ -2720,7 +2719,7 @@ startWorkerTasks (uint32_t from USED_IF_THREADS, uint32_t to USED_IF_THREADS)
void
initScheduler(void)
{
- sched_state = SCHED_RUNNING;
+ setSchedState(SCHED_RUNNING);
setRecentActivity(ACTIVITY_YES);
@@ -2763,8 +2762,8 @@ exitScheduler (bool wait_foreign USED_IF_THREADS)
Task *task = newBoundTask();
// If we haven't killed all the threads yet, do it now.
- if (RELAXED_LOAD(&sched_state) < SCHED_SHUTTING_DOWN) {
- RELAXED_STORE(&sched_state, SCHED_INTERRUPTING);
+ if (getSchedState() < SCHED_SHUTTING_DOWN) {
+ setSchedState(SCHED_INTERRUPTING);
nonmovingStop();
Capability *cap = task->cap;
waitForCapability(&cap,task);
@@ -2772,7 +2771,7 @@ exitScheduler (bool wait_foreign USED_IF_THREADS)
ASSERT(task->incall->tso == NULL);
releaseCapability(cap);
}
- ASSERT(sched_state == SCHED_SHUTTING_DOWN);
+ ASSERT(getSchedState() == SCHED_SHUTTING_DOWN);
shutdownCapabilities(task, wait_foreign);
@@ -2851,8 +2850,8 @@ performMajorGC(void)
void
interruptStgRts(void)
{
- ASSERT(sched_state != SCHED_SHUTTING_DOWN);
- sched_state = SCHED_INTERRUPTING;
+ ASSERT(getSchedState() != SCHED_SHUTTING_DOWN);
+ setSchedState(SCHED_INTERRUPTING);
interruptAllCapabilities();
#if defined(THREADED_RTS)
wakeUpRts();
diff --git a/rts/Schedule.h b/rts/Schedule.h
index f6ce795c87..415d2a3291 100644
--- a/rts/Schedule.h
+++ b/rts/Schedule.h
@@ -64,11 +64,23 @@ void releaseAllCapabilities(uint32_t n, Capability *keep_cap, Task *task);
/* The state of the scheduler. This is used to control the sequence
* of events during shutdown. See Note [shutdown] in Schedule.c.
*/
-#define SCHED_RUNNING 0 /* running as normal */
-#define SCHED_INTERRUPTING 1 /* before threads are deleted */
-#define SCHED_SHUTTING_DOWN 2 /* final shutdown */
+enum SchedState {
+ SCHED_RUNNING = 0, /* running as normal */
+ SCHED_INTERRUPTING = 1, /* before threads are deleted */
+ SCHED_SHUTTING_DOWN = 2, /* final shutdown */
+};
+
+extern StgWord sched_state;
-extern volatile StgWord sched_state;
+INLINE_HEADER void setSchedState(enum SchedState ss)
+{
+ SEQ_CST_STORE_ALWAYS(&sched_state, (StgWord) ss);
+}
+
+INLINE_HEADER enum SchedState getSchedState(void)
+{
+ return (enum SchedState) SEQ_CST_LOAD_ALWAYS(&sched_state);
+}
/*
* flag that tracks whether we have done any execution in this time
diff --git a/rts/eventlog/EventLog.c b/rts/eventlog/EventLog.c
index 1522645112..66ae98b019 100644
--- a/rts/eventlog/EventLog.c
+++ b/rts/eventlog/EventLog.c
@@ -478,7 +478,7 @@ endEventLogging(void)
//
// N.B. Don't flush if shutting down: this was done in
// finishCapEventLogging and the capabilities have already been freed.
- if (sched_state != SCHED_SHUTTING_DOWN) {
+ if (getSchedState() != SCHED_SHUTTING_DOWN) {
flushEventLog(NULL);
}
diff --git a/rts/posix/Select.c b/rts/posix/Select.c
index 4e698685fa..89a46fd763 100644
--- a/rts/posix/Select.c
+++ b/rts/posix/Select.c
@@ -362,7 +362,7 @@ awaitEvent(Capability *cap, bool wait)
/* we were interrupted, return to the scheduler immediately.
*/
- if (sched_state >= SCHED_INTERRUPTING) {
+ if (getSchedState() >= SCHED_INTERRUPTING) {
return; /* still hold the lock */
}
@@ -459,7 +459,7 @@ awaitEvent(Capability *cap, bool wait)
}
}
- } while (wait && sched_state == SCHED_RUNNING
+ } while (wait && getSchedState() == SCHED_RUNNING
&& emptyRunQueue(cap));
}
diff --git a/rts/posix/Signals.c b/rts/posix/Signals.c
index 0d252d56b2..1d96fcfdf5 100644
--- a/rts/posix/Signals.c
+++ b/rts/posix/Signals.c
@@ -350,7 +350,7 @@ anyUserHandlers(void)
void
awaitUserSignals(void)
{
- while (!signals_pending() && sched_state == SCHED_RUNNING) {
+ while (!signals_pending() && getSchedState() == SCHED_RUNNING) {
pause();
}
}
@@ -521,7 +521,7 @@ shutdown_handler(int sig STG_UNUSED)
// If we're already trying to interrupt the RTS, terminate with
// extreme prejudice. So the first ^C tries to exit the program
// cleanly, and the second one just kills it.
- if (sched_state >= SCHED_INTERRUPTING) {
+ if (getSchedState() >= SCHED_INTERRUPTING) {
stg_exit(EXIT_INTERRUPTED);
} else {
interruptStgRts();
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 0f4af4ed69..41510e7f8e 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -921,7 +921,7 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
#if defined(THREADED_RTS)
// We can't start a new collection until the old one has finished
// We also don't run in final GC
- if (concurrent_coll_running || sched_state > SCHED_RUNNING) {
+ if (concurrent_coll_running || getSchedState() > SCHED_RUNNING) {
return;
}
#endif
@@ -994,7 +994,7 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
// again for the sync if we let it go, because it'll immediately start doing
// a major GC, because that's what we do when exiting scheduler (see
// exitScheduler()).
- if (sched_state == SCHED_RUNNING) {
+ if (getSchedState() == SCHED_RUNNING) {
concurrent_coll_running = true;
nonmoving_write_barrier_enabled = true;
debugTrace(DEBUG_nonmoving_gc, "Starting concurrent mark thread");
@@ -1086,7 +1086,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
Task *task = newBoundTask();
// If at this point if we've decided to exit then just return
- if (sched_state > SCHED_RUNNING) {
+ if (getSchedState() > SCHED_RUNNING) {
// Note that we break our invariants here and leave segments in
// nonmovingHeap.sweep_list, don't free nonmoving_large_objects etc.
// However because we won't be running mark-sweep in the final GC this
diff --git a/rts/win32/AsyncMIO.c b/rts/win32/AsyncMIO.c
index 65204bd416..00d1638d63 100644
--- a/rts/win32/AsyncMIO.c
+++ b/rts/win32/AsyncMIO.c
@@ -247,7 +247,7 @@ start:
if (completed_hw == 0) {
// empty table, drop lock and wait
OS_RELEASE_LOCK(&queue_lock);
- if ( wait && sched_state == SCHED_RUNNING ) {
+ if ( wait && getSchedState() == SCHED_RUNNING ) {
DWORD dwRes = WaitForMultipleObjects(2, wait_handles,
FALSE, INFINITE);
switch (dwRes) {
diff --git a/rts/win32/AwaitEvent.c b/rts/win32/AwaitEvent.c
index 6ddd2103c0..fbe037c2a2 100644
--- a/rts/win32/AwaitEvent.c
+++ b/rts/win32/AwaitEvent.c
@@ -56,7 +56,7 @@ awaitEvent(Capability *cap, bool wait)
// - the run-queue is now non- empty
} while (wait
- && sched_state == SCHED_RUNNING
+ && getSchedState() == SCHED_RUNNING
&& emptyRunQueue(cap)
);
}
diff --git a/rts/win32/ConsoleHandler.c b/rts/win32/ConsoleHandler.c
index 194a659a8d..a7940a60c0 100644
--- a/rts/win32/ConsoleHandler.c
+++ b/rts/win32/ConsoleHandler.c
@@ -91,7 +91,7 @@ static BOOL WINAPI shutdown_handler(DWORD dwCtrlType)
// If we're already trying to interrupt the RTS, terminate with
// extreme prejudice. So the first ^C tries to exit the program
// cleanly, and the second one just kills it.
- if (sched_state >= SCHED_INTERRUPTING) {
+ if (getSchedState() >= SCHED_INTERRUPTING) {
stg_exit(EXIT_INTERRUPTED);
} else {
interruptStgRts();