diff options
author | Simon Marlow <marlowsd@gmail.com> | 2009-03-17 16:42:14 +0000 |
---|---|---|
committer | Simon Marlow <marlowsd@gmail.com> | 2009-03-17 16:42:14 +0000 |
commit | 8b18faef8aeaf40150c208272a2fc117611e8ae8 (patch) | |
tree | 2aabc6115dccd0a3e303320515564b5628c8771c /rts/Schedule.c | |
parent | f8f4cb3f3a46e0495917a927cefe906531b7b38e (diff) | |
download | haskell-8b18faef8aeaf40150c208272a2fc117611e8ae8.tar.gz |
Add fast event logging
Generate binary log files from the RTS containing a log of runtime
events with timestamps. The log file can be visualised in various
ways, for investigating runtime behaviour and debugging performance
problems. See for example the forthcoming ThreadScope viewer.
New GHC option:
-eventlog (link-time option) Enables event logging.
+RTS -l (runtime option) Generates <prog>.eventlog with
the binary event information.
This replaces some of the tracing machinery we already had in the RTS:
e.g. +RTS -vg for GC tracing (we should do this using the new event
logging instead).
Event logging has almost no runtime cost when it isn't enabled, though
in the future we might add more fine-grained events and this might
change; hence having a link-time option and compiling a separate
version of the RTS for event logging. There's a small runtime cost
for enabling event-logging, for most programs it shouldn't make much
difference.
(Todo: docs)
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r-- | rts/Schedule.c | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c index 040d16f25a..666b59e3e3 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -33,6 +33,7 @@ #include "ProfHeap.h" #include "GC.h" #include "Weak.h" +#include "EventLog.h" /* PARALLEL_HASKELL includes go here */ @@ -539,6 +540,8 @@ run_thread: } #endif + postEvent(cap, EVENT_RUN_THREAD, t->id, 0); + switch (prev_what_next) { case ThreadKilled: @@ -587,6 +590,8 @@ run_thread: t->saved_winerror = GetLastError(); #endif + postEvent (cap, EVENT_STOP_THREAD, t->id, ret); + #if defined(THREADED_RTS) // If ret is ThreadBlocked, and this Task is bound to the TSO that // blocked, we are in limbo - the TSO is now owned by whatever it @@ -852,6 +857,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS, } else { debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); + + postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no); + if (t->bound) { t->bound->cap = free_caps[i]; } t->cap = free_caps[i]; i++; @@ -1560,6 +1568,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) if (gc_type == PENDING_GC_SEQ) { + postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0); // single-threaded GC: grab all the capabilities for (i=0; i < n_capabilities; i++) { debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities); @@ -1582,6 +1591,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) { // multi-threaded GC: make sure all the Capabilities donate one // GC thread each. + postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0); debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads"); waitForGcThreads(cap); @@ -1607,6 +1617,7 @@ delete_threads_and_gc: heap_census = scheduleNeedHeapProfile(rtsTrue); #if defined(THREADED_RTS) + postEvent(cap, EVENT_GC_START, 0, 0); debugTrace(DEBUG_sched, "doing GC"); // reset waiting_for_gc *before* GC, so that when the GC threads // emerge they don't immediately re-enter the GC. @@ -1615,6 +1626,7 @@ delete_threads_and_gc: #else GarbageCollect(force_major || heap_census, 0, cap); #endif + postEvent(cap, EVENT_GC_END, 0, 0); if (recent_activity == ACTIVITY_INACTIVE && force_major) { @@ -1930,6 +1942,7 @@ suspendThread (StgRegTable *reg) task = cap->running_task; tso = cap->r.rCurrentTSO; + postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL); debugTrace(DEBUG_sched, "thread %lu did a safe foreign call", (unsigned long)cap->r.rCurrentTSO->id); @@ -2001,6 +2014,8 @@ resumeThread (void *task_) tso = task->suspended_tso; task->suspended_tso = NULL; tso->_link = END_TSO_QUEUE; // no write barrier reqd + + postEvent(cap, EVENT_RUN_THREAD, tso->id, 0); debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id); if (tso->why_blocked == BlockedOnCCall) { @@ -2057,6 +2072,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { + postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no); wakeupThreadOnCapability(cap, &capabilities[cpu], tso); } #else @@ -2196,8 +2212,6 @@ initScheduler(void) } #endif - trace(TRACE_sched, "start: %d capabilities", n_capabilities); - RELEASE_LOCK(&sched_mutex); } |