diff options
Diffstat (limited to 'rts')
-rw-r--r-- | rts/Capability.c | 19 | ||||
-rw-r--r-- | rts/Capability.h | 13 | ||||
-rw-r--r-- | rts/HeapStackCheck.cmm | 11 | ||||
-rw-r--r-- | rts/Interpreter.c | 5 | ||||
-rw-r--r-- | rts/PrimOps.cmm | 6 | ||||
-rw-r--r-- | rts/Schedule.c | 2 | ||||
-rw-r--r-- | rts/StgStartup.cmm | 4 | ||||
-rw-r--r-- | rts/Threads.c | 10 | ||||
-rw-r--r-- | rts/posix/Signals.c | 2 |
9 files changed, 52 insertions, 20 deletions
diff --git a/rts/Capability.c b/rts/Capability.c index a81d71073a..bd6d56ff00 100644 --- a/rts/Capability.c +++ b/rts/Capability.c @@ -294,10 +294,10 @@ initCapabilities( void ) void setContextSwitches(void) { - nat i; - for (i=0; i < n_capabilities; i++) { - capabilities[i].context_switch = 1; - } + nat i; + for (i=0; i < n_capabilities; i++) { + contextSwitchCapability(&capabilities[i]); + } } /* ---------------------------------------------------------------------------- @@ -482,14 +482,17 @@ waitForReturnCapability (Capability **pCap, Task *task) if (!cap->running_task) { nat i; // otherwise, search for a free capability + cap = NULL; for (i = 0; i < n_capabilities; i++) { - cap = &capabilities[i]; - if (!cap->running_task) { + if (!capabilities[i].running_task) { + cap = &capabilities[i]; break; } } - // Can't find a free one, use last_free_capability. - cap = last_free_capability; + if (cap == NULL) { + // Can't find a free one, use last_free_capability. + cap = last_free_capability; + } } // record the Capability as the one this Task is now assocated with. diff --git a/rts/Capability.h b/rts/Capability.h index 77132e318e..0f61fad913 100644 --- a/rts/Capability.h +++ b/rts/Capability.h @@ -276,6 +276,7 @@ extern void grabCapability (Capability **pCap); // cause all capabilities to context switch as soon as possible. void setContextSwitches(void); +INLINE_HEADER void contextSwitchCapability(Capability *cap); // Free all capabilities void freeCapabilities (void); @@ -322,4 +323,16 @@ discardSparksCap (Capability *cap) { return discardSparks(cap->sparks); } #endif +INLINE_HEADER void +contextSwitchCapability (Capability *cap) +{ + // setting HpLim to NULL ensures that the next heap check will + // fail, and the thread will return to the scheduler. + cap->r.rHpLim = NULL; + // But just in case it didn't work (the target thread might be + // modifying HpLim at the same time), we set the end-of-block + // context-switch flag too: + cap->context_switch = 1; +} + #endif /* CAPABILITY_H */ diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm index 94cec387cc..10baca23c6 100644 --- a/rts/HeapStackCheck.cmm +++ b/rts/HeapStackCheck.cmm @@ -23,8 +23,11 @@ import LeaveCriticalSection; * * On discovering that a stack or heap check has failed, we do the following: * - * - If the context_switch flag is set, indicating that there are more - * threads waiting to run, we yield to the scheduler + * - If HpLim==0, indicating that we should context-switch, we yield + * to the scheduler (return ThreadYielding). + * + * - If the context_switch flag is set (the backup plan if setting HpLim + * to 0 didn't trigger a context switch), we yield to the scheduler * (return ThreadYielding). * * - If Hp > HpLim, we've had a heap check failure. This means we've @@ -60,6 +63,10 @@ import LeaveCriticalSection; #define GC_GENERIC \ DEBUG_ONLY(foreign "C" heapCheckFail()); \ if (Hp > HpLim) { \ + if (HpLim == 0) { \ + R1 = ThreadYielding; \ + goto sched; \ + } \ Hp = Hp - HpAlloc/*in bytes*/; \ if (HpAlloc <= BLOCK_SIZE \ && bdescr_link(CurrentNursery) != NULL) { \ diff --git a/rts/Interpreter.c b/rts/Interpreter.c index 1b2d7303ed..1a6e9273e0 100644 --- a/rts/Interpreter.c +++ b/rts/Interpreter.c @@ -196,6 +196,9 @@ interpretBCO (Capability* cap) LOAD_STACK_POINTERS; + cap->r.rHpLim = (P_)1; // HpLim is the context-switch flag; when it + // goes to zero we must return to the scheduler. + // ------------------------------------------------------------------------ // Case 1: // @@ -1281,7 +1284,7 @@ run_BCO: // context switching: sometimes the scheduler can invoke // the interpreter with context_switch == 1, particularly // if the -C0 flag has been given on the cmd line. - if (cap->context_switch) { + if (cap->r.rHpLim == NULL) { Sp--; Sp[0] = (W_)&stg_enter_info; RETURN_TO_SCHEDULER(ThreadInterpret, ThreadYielding); } diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm index adb2a643a9..121102c8e1 100644 --- a/rts/PrimOps.cmm +++ b/rts/PrimOps.cmm @@ -1073,7 +1073,8 @@ forkzh_fast foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") []; - // switch at the earliest opportunity + // context switch soon, but not immediately: we don't want every + // forkIO to force a context-switch. Capability_context_switch(MyCapability()) = 1 :: CInt; RET_P(threadid); @@ -1102,7 +1103,8 @@ forkOnzh_fast foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") []; - // switch at the earliest opportunity + // context switch soon, but not immediately: we don't want every + // forkIO to force a context-switch. Capability_context_switch(MyCapability()) = 1 :: CInt; RET_P(threadid); diff --git a/rts/Schedule.c b/rts/Schedule.c index 47636a3ff4..040d16f25a 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -1268,7 +1268,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) "--<< thread %ld (%s) stopped: HeapOverflow", (long)t->id, whatNext_strs[t->what_next]); - if (cap->context_switch) { + if (cap->r.rHpLim == NULL || cap->context_switch) { // Sometimes we miss a context switch, e.g. when calling // primitives in a tight loop, MAYBE_GC() doesn't check the // context switch flag, and we end up waiting for a GC. diff --git a/rts/StgStartup.cmm b/rts/StgStartup.cmm index 16e5c62801..c3c0bc3dd7 100644 --- a/rts/StgStartup.cmm +++ b/rts/StgStartup.cmm @@ -28,9 +28,7 @@ ASSERT(Hp != 0); \ ASSERT(Sp != 0); \ ASSERT(SpLim != 0); \ - ASSERT(HpLim != 0); \ - ASSERT(SpLim - WDS(RESERVED_STACK_WORDS) <= Sp); \ - ASSERT(HpLim >= Hp); + ASSERT(SpLim - WDS(RESERVED_STACK_WORDS) <= Sp); /* ----------------------------------------------------------------------------- Returning from the STG world. diff --git a/rts/Threads.c b/rts/Threads.c index 2c7b2beb86..936b90e884 100644 --- a/rts/Threads.c +++ b/rts/Threads.c @@ -505,8 +505,10 @@ unblockOne_ (Capability *cap, StgTSO *tso, } tso->cap = cap; appendToRunQueue(cap,tso); - // we're holding a newly woken thread, make sure we context switch - // quickly so we can migrate it if necessary. + + // context-switch soonish so we can migrate the new thread if + // necessary. NB. not contextSwitchCapability(cap), which would + // force a context switch immediately. cap->context_switch = 1; } else { // we'll try to wake it up on the Capability it was last on. @@ -514,6 +516,10 @@ unblockOne_ (Capability *cap, StgTSO *tso, } #else appendToRunQueue(cap,tso); + + // context-switch soonish so we can migrate the new thread if + // necessary. NB. not contextSwitchCapability(cap), which would + // force a context switch immediately. cap->context_switch = 1; #endif diff --git a/rts/posix/Signals.c b/rts/posix/Signals.c index 8268e6fe74..6d5ef43278 100644 --- a/rts/posix/Signals.c +++ b/rts/posix/Signals.c @@ -214,7 +214,7 @@ generic_handler(int sig USED_IF_THREADS, stg_exit(EXIT_FAILURE); } - MainCapability.context_switch = 1; + contextSwitchCapability(&MainCapability); #endif /* THREADED_RTS */ } |