summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--includes/mkDerivedConstants.c1
-rw-r--r--rts/Capability.c10
-rw-r--r--rts/Capability.h46
-rw-r--r--rts/HeapStackCheck.cmm3
-rw-r--r--rts/Messages.c4
-rw-r--r--rts/Schedule.c20
-rw-r--r--rts/Timer.c2
-rw-r--r--rts/sm/GC.c2
8 files changed, 66 insertions, 22 deletions
diff --git a/includes/mkDerivedConstants.c b/includes/mkDerivedConstants.c
index a2c9160e95..6688330a8c 100644
--- a/includes/mkDerivedConstants.c
+++ b/includes/mkDerivedConstants.c
@@ -238,6 +238,7 @@ main(int argc, char *argv[])
struct_field(Capability, no);
struct_field(Capability, mut_lists);
struct_field(Capability, context_switch);
+ struct_field(Capability, interrupt);
struct_field(Capability, sparks);
struct_field(bdescr, start);
diff --git a/rts/Capability.c b/rts/Capability.c
index 6c84d1ec35..26e420970b 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -342,7 +342,7 @@ initCapabilities( void )
* soon as possible.
* ------------------------------------------------------------------------- */
-void setContextSwitches(void)
+void contextSwitchAllCapabilities(void)
{
nat i;
for (i=0; i < n_capabilities; i++) {
@@ -350,6 +350,14 @@ void setContextSwitches(void)
}
}
+void interruptAllCapabilities(void)
+{
+ nat i;
+ for (i=0; i < n_capabilities; i++) {
+ interruptCapability(&capabilities[i]);
+ }
+}
+
/* ----------------------------------------------------------------------------
* Give a Capability to a Task. The task must currently be sleeping
* on its condition variable.
diff --git a/rts/Capability.h b/rts/Capability.h
index 0bc2985d9e..1957487329 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -72,10 +72,20 @@ struct Capability_ {
// block for allocating pinned objects into
bdescr *pinned_object_block;
- // Context switch flag. We used to have one global flag, now one
- // per capability. Locks required : none (conflicts are harmless)
+ // Context switch flag. When non-zero, this means: stop running
+ // Haskell code, and switch threads.
int context_switch;
+ // Interrupt flag. Like the context_switch flag, this also
+ // indicates that we should stop running Haskell code, but we do
+ // *not* switch threads. This is used to stop a Capability in
+ // order to do GC, for example.
+ //
+ // The interrupt flag is always reset before we start running
+ // Haskell code, unlike the context_switch flag which is only
+ // reset after we have executed the context switch.
+ int interrupt;
+
#if defined(THREADED_RTS)
// Worker Tasks waiting in the wings. Singly-linked.
Task *spare_workers;
@@ -275,9 +285,14 @@ void shutdownCapability (Capability *cap, Task *task, rtsBool wait_foreign);
void shutdownCapabilities(Task *task, rtsBool wait_foreign);
// cause all capabilities to context switch as soon as possible.
-void setContextSwitches(void);
+void contextSwitchAllCapabilities(void);
INLINE_HEADER void contextSwitchCapability(Capability *cap);
+// cause all capabilities to stop running Haskell code and return to
+// the scheduler as soon as possible.
+void interruptAllCapabilities(void);
+INLINE_HEADER void interruptCapability(Capability *cap);
+
// Free all capabilities
void freeCapabilities (void);
@@ -346,14 +361,27 @@ discardSparksCap (Capability *cap)
#endif
INLINE_HEADER void
-contextSwitchCapability (Capability *cap)
+stopCapability (Capability *cap)
{
- // setting HpLim to NULL ensures that the next heap check will
- // fail, and the thread will return to the scheduler.
+ // setting HpLim to NULL tries to make the next heap check will
+ // fail, which will cause the thread to return to the scheduler.
+ // It may not work - the thread might be updating HpLim itself
+ // at the same time - so we also have the context_switch/interrupted
+ // flags as a sticky way to tell the thread to stop.
cap->r.rHpLim = NULL;
- // But just in case it didn't work (the target thread might be
- // modifying HpLim at the same time), we set the end-of-block
- // context-switch flag too:
+}
+
+INLINE_HEADER void
+interruptCapability (Capability *cap)
+{
+ stopCapability(cap);
+ cap->interrupt = 1;
+}
+
+INLINE_HEADER void
+contextSwitchCapability (Capability *cap)
+{
+ stopCapability(cap);
cap->context_switch = 1;
}
diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm
index d80e101c59..cb75af01d8 100644
--- a/rts/HeapStackCheck.cmm
+++ b/rts/HeapStackCheck.cmm
@@ -95,7 +95,8 @@ import LeaveCriticalSection;
CLOSE_NURSERY(); \
CurrentNursery = bdescr_link(CurrentNursery); \
OPEN_NURSERY(); \
- if (Capability_context_switch(MyCapability()) != 0 :: CInt) { \
+ if (Capability_context_switch(MyCapability()) != 0 :: CInt || \
+ Capability_interrupt(MyCapability()) != 0 :: CInt) { \
R1 = ThreadYielding; \
goto sched; \
} else { \
diff --git a/rts/Messages.c b/rts/Messages.c
index 5dec6c6927..6cb66479ee 100644
--- a/rts/Messages.c
+++ b/rts/Messages.c
@@ -46,9 +46,9 @@ void sendMessage(Capability *from_cap, Capability *to_cap, Message *msg)
if (to_cap->running_task == NULL) {
to_cap->running_task = myTask();
// precond for releaseCapability_()
- releaseCapability_(to_cap,rtsFalse);
+ releaseCapability_(to_cap,rtsFalse);
} else {
- contextSwitchCapability(to_cap);
+ interruptCapability(to_cap);
}
RELEASE_LOCK(&to_cap->lock);
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 04a66e31df..cd704d2871 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -415,6 +415,9 @@ run_thread:
SetLastError(t->saved_winerror);
#endif
+ // reset the interrupt flag before running Haskell code
+ cap->interrupt = 0;
+
cap->in_haskell = rtsTrue;
dirty_TSO(cap,t);
@@ -521,7 +524,7 @@ run_thread:
break;
case ThreadYielding:
- if (scheduleHandleYield(cap, t, prev_what_next)) {
+ if (scheduleHandleYield(cap, t, prev_what_next)) {
// shortcut for switching between compiler/interpreter:
goto run_thread;
}
@@ -1167,14 +1170,17 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
// the CPU because the tick always arrives during GC). This way
// penalises threads that do a lot of allocation, but that seems
// better than the alternative.
- cap->context_switch = 0;
-
+ if (cap->context_switch != 0) {
+ cap->context_switch = 0;
+ appendToRunQueue(cap,t);
+ } else {
+ pushOnRunQueue(cap,t);
+ }
+
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
- appendToRunQueue(cap,t);
-
return rtsFalse;
}
@@ -1371,7 +1377,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
return cap; // NOTE: task->cap might have changed here
}
- setContextSwitches();
+ interruptAllCapabilities();
// The final shutdown GC is always single-threaded, because it's
// possible that some of the Capabilities have no worker threads.
@@ -2145,7 +2151,7 @@ void
interruptStgRts(void)
{
sched_state = SCHED_INTERRUPTING;
- setContextSwitches();
+ interruptAllCapabilities();
#if defined(THREADED_RTS)
wakeUpRts();
#endif
diff --git a/rts/Timer.c b/rts/Timer.c
index 02d106fde9..3f9bc8ab0c 100644
--- a/rts/Timer.c
+++ b/rts/Timer.c
@@ -48,7 +48,7 @@ handle_tick(int unused STG_UNUSED)
ticks_to_ctxt_switch--;
if (ticks_to_ctxt_switch <= 0) {
ticks_to_ctxt_switch = RtsFlags.ConcFlags.ctxtSwitchTicks;
- setContextSwitches(); /* schedule a context switch */
+ contextSwitchAllCapabilities(); /* schedule a context switch */
}
}
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 733c2d67c2..88d5a02f89 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -1114,7 +1114,7 @@ waitForGcThreads (Capability *cap USED_IF_THREADS)
for (i=0; i < n_threads; i++) {
if (i == me) continue;
write_barrier();
- setContextSwitches();
+ interruptAllCapabilities();
if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
retry = rtsTrue;
}