summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-11-10 19:08:55 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2022-12-16 16:12:45 -0500
commitf8e901dcc090ef81723fb9f3d8ea0a1baac4cbc3 (patch)
tree65844b02fb748364a34c3d4dded6149ed44f1861 /rts
parent86f20258ab7dbfb56e323ee811e9eaef80b077d3 (diff)
downloadhaskell-f8e901dcc090ef81723fb9f3d8ea0a1baac4cbc3.tar.gz
rts: Encapsulate recent_activity access
This makes it easier to ensure that it is accessed using the necessary atomic operations.
Diffstat (limited to 'rts')
-rw-r--r--rts/Capability.c2
-rw-r--r--rts/Schedule.c20
-rw-r--r--rts/Schedule.h38
-rw-r--r--rts/Timer.c8
-rw-r--r--rts/include/stg/SMP.h2
5 files changed, 45 insertions, 25 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index bf20d991e8..35214a6470 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -83,7 +83,7 @@ STATIC_INLINE bool
globalWorkToDo (void)
{
return RELAXED_LOAD(&sched_state) >= SCHED_INTERRUPTING
- || RELAXED_LOAD(&recent_activity) == ACTIVITY_INACTIVE; // need to check for deadlock
+ || getRecentActivity() == ACTIVITY_INACTIVE; // need to check for deadlock
}
#endif
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 62bd6bfbbb..80d573d975 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -86,7 +86,7 @@ bool heap_overflow = false;
*
* NB. must be StgWord, we do xchg() on it.
*/
-volatile StgWord recent_activity = ACTIVITY_YES;
+StgWord recent_activity = ACTIVITY_YES;
/* if this flag is set as well, give up execution
* LOCK: none (changes monotonically)
@@ -438,13 +438,13 @@ run_thread:
dirty_TSO(cap,t);
dirty_STACK(cap,t->stackobj);
- switch (SEQ_CST_LOAD(&recent_activity))
+ switch (getRecentActivity())
{
case ACTIVITY_DONE_GC: {
// ACTIVITY_DONE_GC means we turned off the timer signal to
// conserve power (see #1623). Re-enable it here.
uint32_t prev;
- prev = xchg((P_)&recent_activity, ACTIVITY_YES);
+ prev = setRecentActivity(ACTIVITY_YES);
if (prev == ACTIVITY_DONE_GC) {
#if !defined(PROFILING)
startTimer();
@@ -459,7 +459,7 @@ run_thread:
// wakeUpRts().
break;
default:
- SEQ_CST_STORE(&recent_activity, ACTIVITY_YES);
+ setRecentActivity(ACTIVITY_YES);
}
traceEventRunThread(cap, t);
@@ -957,7 +957,7 @@ scheduleDetectDeadlock (Capability **pcap, Task *task)
* we won't eagerly start a full GC just because we don't have
* any threads to run currently.
*/
- if (SEQ_CST_LOAD(&recent_activity) != ACTIVITY_INACTIVE) return;
+ if (getRecentActivity() != ACTIVITY_INACTIVE) return;
#endif
debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
@@ -1891,14 +1891,14 @@ delete_threads_and_gc:
traceSparkCounters(cap);
- switch (SEQ_CST_LOAD(&recent_activity)) {
+ switch (getRecentActivity()) {
case ACTIVITY_INACTIVE:
if (force_major) {
// We are doing a GC because the system has been idle for a
// timeslice and we need to check for deadlock. Record the
// fact that we've done a GC and turn off the timer signal;
// it will get re-enabled if we run any threads after the GC.
- SEQ_CST_STORE(&recent_activity, ACTIVITY_DONE_GC);
+ setRecentActivity(ACTIVITY_DONE_GC);
#if !defined(PROFILING)
stopTimer();
#endif
@@ -1910,13 +1910,15 @@ delete_threads_and_gc:
// the GC might have taken long enough for the timer to set
// recent_activity = ACTIVITY_MAYBE_NO or ACTIVITY_INACTIVE,
// but we aren't necessarily deadlocked:
- SEQ_CST_STORE(&recent_activity, ACTIVITY_YES);
+ setRecentActivity(ACTIVITY_YES);
break;
case ACTIVITY_DONE_GC:
// If we are actually active, the scheduler will reset the
// recent_activity flag and re-enable the timer.
break;
+ case ACTIVITY_YES:
+ break;
}
#if defined(THREADED_RTS)
@@ -2719,7 +2721,7 @@ void
initScheduler(void)
{
sched_state = SCHED_RUNNING;
- SEQ_CST_STORE(&recent_activity, ACTIVITY_YES);
+ setRecentActivity(ACTIVITY_YES);
/* Initialise the mutex and condition variables used by
diff --git a/rts/Schedule.h b/rts/Schedule.h
index b62e5a9762..f6ce795c87 100644
--- a/rts/Schedule.h
+++ b/rts/Schedule.h
@@ -86,23 +86,41 @@ extern volatile StgWord sched_state;
* If the scheduler finds ACTIVITY_DONE_GC and it has a thread to run,
* it enables the timer again with startTimer().
*/
-#define ACTIVITY_YES 0
- // the RTS is active
-#define ACTIVITY_MAYBE_NO 1
- // no activity since the last timer signal
-#define ACTIVITY_INACTIVE 2
- // RtsFlags.GcFlags.idleGCDelayTime has passed with no activity
-#define ACTIVITY_DONE_GC 3
- // like ACTIVITY_INACTIVE, but we've done a GC too (if idle GC is
- // enabled) and the interval timer is now turned off.
+enum RecentActivity {
+ // the RTS is active
+ ACTIVITY_YES = 0,
+ // no activity since the last timer signal
+ ACTIVITY_MAYBE_NO = 1,
+ // RtsFlags.GcFlags.idleGCDelayTime has passed with no activity
+ ACTIVITY_INACTIVE = 2,
+ // like ACTIVITY_INACTIVE, but we've done a GC too (if idle GC is
+ // enabled) and the interval timer is now turned off.
+ ACTIVITY_DONE_GC = 3,
+};
/* Recent activity flag.
* Locks required : Transition from MAYBE_NO to INACTIVE
* happens in the timer signal, so it is atomic. Transition from
* INACTIVE to DONE_GC happens under sched_mutex. No lock required
* to set it to ACTIVITY_YES.
+ *
+ * N.B. we must always use atomics here since even in the non-threaded runtime
+ * the timer may be provided via a signal.
*/
-extern volatile StgWord recent_activity;
+extern StgWord recent_activity;
+
+INLINE_HEADER enum RecentActivity
+setRecentActivity(enum RecentActivity new_value)
+{
+ StgWord old = SEQ_CST_XCHG_ALWAYS((StgPtr) &recent_activity, (StgWord) new_value);
+ return (enum RecentActivity) old;
+}
+
+INLINE_HEADER enum RecentActivity
+getRecentActivity(void)
+{
+ return RELAXED_LOAD_ALWAYS(&recent_activity);
+}
extern bool heap_overflow;
diff --git a/rts/Timer.c b/rts/Timer.c
index 5961e5a27b..7685a7f262 100644
--- a/rts/Timer.c
+++ b/rts/Timer.c
@@ -134,16 +134,16 @@ handle_tick(int unused STG_UNUSED)
* for threads that are deadlocked. However, ensure we wait
* at least interIdleGCWait (+RTS -Iw) between idle GCs.
*/
- switch (SEQ_CST_LOAD_ALWAYS(&recent_activity)) {
+ switch (getRecentActivity()) {
case ACTIVITY_YES:
- SEQ_CST_STORE_ALWAYS(&recent_activity, ACTIVITY_MAYBE_NO);
+ setRecentActivity(ACTIVITY_MAYBE_NO);
idle_ticks_to_gc = RtsFlags.GcFlags.idleGCDelayTime /
RtsFlags.MiscFlags.tickInterval;
break;
case ACTIVITY_MAYBE_NO:
if (idle_ticks_to_gc == 0 && inter_gc_ticks_to_gc == 0) {
if (RtsFlags.GcFlags.doIdleGC) {
- SEQ_CST_STORE_ALWAYS(&recent_activity, ACTIVITY_INACTIVE);
+ setRecentActivity(ACTIVITY_INACTIVE);
inter_gc_ticks_to_gc = RtsFlags.GcFlags.interIdleGCWait /
RtsFlags.MiscFlags.tickInterval;
#if defined(THREADED_RTS)
@@ -152,7 +152,7 @@ handle_tick(int unused STG_UNUSED)
// the GC.
#endif
} else {
- SEQ_CST_STORE_ALWAYS(&recent_activity, ACTIVITY_DONE_GC);
+ setRecentActivity(ACTIVITY_DONE_GC);
// disable timer signals (see #1623, #5991, #9105)
// but only if we're not profiling (e.g. passed -h or -p RTS
// flags). If we are profiling we need to keep the timer active
diff --git a/rts/include/stg/SMP.h b/rts/include/stg/SMP.h
index c9ff736032..2c14c17081 100644
--- a/rts/include/stg/SMP.h
+++ b/rts/include/stg/SMP.h
@@ -35,7 +35,7 @@ void arm_atomic_spin_unlock(void);
#define SEQ_CST_STORE_ALWAYS(ptr,val) __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST)
#define SEQ_CST_ADD_ALWAYS(ptr,val) __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST)
#define SEQ_CST_SUB_ALWAYS(ptr,val) __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST)
-
+#define SEQ_CST_XCHG_ALWAYS(ptr,val) __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
#if defined(THREADED_RTS)