summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
Diffstat (limited to 'rts')
-rw-r--r--rts/Capability.c46
-rw-r--r--rts/GC.c131
-rw-r--r--rts/GCCompact.c14
-rw-r--r--rts/MBlock.c5
-rw-r--r--rts/Profiling.c11
-rw-r--r--rts/RtsStartup.c4
-rw-r--r--rts/STM.c12
-rw-r--r--rts/Schedule.c276
-rw-r--r--rts/Schedule.h5
-rw-r--r--rts/Sparks.c23
-rw-r--r--rts/Stable.c13
-rw-r--r--rts/Stats.c5
-rw-r--r--rts/Stats.h1
-rw-r--r--rts/Storage.c13
-rw-r--r--rts/Task.c13
-rw-r--r--rts/Trace.c155
-rw-r--r--rts/Trace.h123
-rw-r--r--rts/Weak.c3
18 files changed, 569 insertions, 284 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index 51a42ef468..0415092a03 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -25,6 +25,7 @@
#include "Capability.h"
#include "Schedule.h"
#include "Sparks.h"
+#include "Trace.h"
// one global capability, this is the Capability for non-threaded
// builds, and for +RTS -N1
@@ -196,8 +197,7 @@ initCapabilities( void )
initCapability(&capabilities[i], i);
}
- IF_DEBUG(scheduler, sched_belch("allocated %d capabilities",
- n_capabilities));
+ debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
#else /* !THREADED_RTS */
@@ -233,10 +233,10 @@ giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
{
ASSERT_LOCK_HELD(&cap->lock);
ASSERT(task->cap == cap);
- IF_DEBUG(scheduler,
- sched_belch("passing capability %d to %s %p",
- cap->no, task->tso ? "bound task" : "worker",
- (void *)task->id));
+ trace(TRACE_sched | DEBUG_sched,
+ "passing capability %d to %s %p",
+ cap->no, task->tso ? "bound task" : "worker",
+ (void *)task->id);
ACQUIRE_LOCK(&task->lock);
task->wakeup = rtsTrue;
// the wakeup flag is needed because signalCondition() doesn't
@@ -291,8 +291,8 @@ releaseCapability_ (Capability* cap)
// are threads that need to be completed. If the system is
// shutting down, we never create a new worker.
if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
- IF_DEBUG(scheduler,
- sched_belch("starting new worker on capability %d", cap->no));
+ debugTrace(DEBUG_sched,
+ "starting new worker on capability %d", cap->no);
startWorkerTask(cap, workerStart);
return;
}
@@ -310,7 +310,7 @@ releaseCapability_ (Capability* cap)
}
last_free_capability = cap;
- IF_DEBUG(scheduler, sched_belch("freeing capability %d", cap->no));
+ trace(TRACE_sched | DEBUG_sched, "freeing capability %d", cap->no);
}
void
@@ -396,8 +396,7 @@ waitForReturnCapability (Capability **pCap, Task *task)
ACQUIRE_LOCK(&cap->lock);
- IF_DEBUG(scheduler,
- sched_belch("returning; I want capability %d", cap->no));
+ debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
if (!cap->running_task) {
// It's free; just grab it
@@ -435,8 +434,7 @@ waitForReturnCapability (Capability **pCap, Task *task)
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
- IF_DEBUG(scheduler,
- sched_belch("returning; got capability %d", cap->no));
+ trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
*pCap = cap;
#endif
@@ -455,7 +453,7 @@ yieldCapability (Capability** pCap, Task *task)
// The fast path has no locking, if we don't enter this while loop
while ( cap->returning_tasks_hd != NULL || !anyWorkForMe(cap,task) ) {
- IF_DEBUG(scheduler, sched_belch("giving up capability %d", cap->no));
+ debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
// We must now release the capability and wait to be woken up
// again.
@@ -470,10 +468,12 @@ yieldCapability (Capability** pCap, Task *task)
task->wakeup = rtsFalse;
RELEASE_LOCK(&task->lock);
- IF_DEBUG(scheduler, sched_belch("woken up on capability %d", cap->no));
+ debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
+
ACQUIRE_LOCK(&cap->lock);
if (cap->running_task != NULL) {
- IF_DEBUG(scheduler, sched_belch("capability %d is owned by another task", cap->no));
+ debugTrace(DEBUG_sched,
+ "capability %d is owned by another task", cap->no);
RELEASE_LOCK(&cap->lock);
continue;
}
@@ -495,7 +495,7 @@ yieldCapability (Capability** pCap, Task *task)
break;
}
- IF_DEBUG(scheduler, sched_belch("got capability %d", cap->no));
+ trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
ASSERT(cap->running_task == task);
}
@@ -527,6 +527,7 @@ wakeupThreadOnCapability (Capability *cap, StgTSO *tso)
// start it up
cap->running_task = myTask(); // precond for releaseCapability_()
+ trace(TRACE_sched, "resuming capability %d", cap->no);
releaseCapability_(cap);
} else {
appendToWakeupQueue(cap,tso);
@@ -557,6 +558,7 @@ prodCapabilities(rtsBool all)
ACQUIRE_LOCK(&cap->lock);
if (!cap->running_task) {
if (cap->spare_workers) {
+ trace(TRACE_sched, "resuming capability %d", cap->no);
task = cap->spare_workers;
ASSERT(!task->stopped);
giveCapabilityToTask(cap,task);
@@ -616,23 +618,25 @@ shutdownCapability (Capability *cap, Task *task)
task->cap = cap;
for (i = 0; i < 50; i++) {
- IF_DEBUG(scheduler, sched_belch("shutting down capability %d, attempt %d", cap->no, i));
+ debugTrace(DEBUG_sched,
+ "shutting down capability %d, attempt %d", cap->no, i);
ACQUIRE_LOCK(&cap->lock);
if (cap->running_task) {
RELEASE_LOCK(&cap->lock);
- IF_DEBUG(scheduler, sched_belch("not owner, yielding"));
+ debugTrace(DEBUG_sched, "not owner, yielding");
yieldThread();
continue;
}
cap->running_task = task;
if (!emptyRunQueue(cap) || cap->spare_workers) {
- IF_DEBUG(scheduler, sched_belch("runnable threads or workers still alive, yielding"));
+ debugTrace(DEBUG_sched,
+ "runnable threads or workers still alive, yielding");
releaseCapability_(cap); // this will wake up a worker
RELEASE_LOCK(&cap->lock);
yieldThread();
continue;
}
- IF_DEBUG(scheduler, sched_belch("capability %d is stopped.", cap->no));
+ debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
RELEASE_LOCK(&cap->lock);
break;
}
diff --git a/rts/GC.c b/rts/GC.c
index b75c549480..727027dd93 100644
--- a/rts/GC.c
+++ b/rts/GC.c
@@ -42,7 +42,7 @@
#if defined(RTS_GTK_FRONTPANEL)
#include "FrontPanel.h"
#endif
-
+#include "Trace.h"
#include "RetainerProfile.h"
#include <string.h>
@@ -355,10 +355,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
CostCentreStack *prev_CCS;
#endif
-#if defined(DEBUG) && defined(GRAN)
- IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
- Now, Now));
-#endif
+ debugTrace(DEBUG_gc, "starting GC");
#if defined(RTS_USER_SIGNALS)
// block signals
@@ -516,8 +513,8 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
stp->bitmap = bitmap_bdescr;
bitmap = bitmap_bdescr->start;
- IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
- bitmap_size, bitmap););
+ debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
+ bitmap_size, bitmap);
// don't forget to fill it with zeros!
memset(bitmap, 0, bitmap_size);
@@ -828,7 +825,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
}
copied += mut_list_size;
- IF_DEBUG(gc, debugBelch("mut_list_size: %ld (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS));
+ debugTrace(DEBUG_gc,
+ "mut_list_size: %ld (%d vars, %d arrays, %d others)",
+ mut_list_size * sizeof(W_),
+ mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
}
for (s = 0; s < generations[g].n_steps; s++) {
@@ -1077,7 +1077,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
int pc_free;
adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
- IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
+
+ debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
+ RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
+
pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
heapOverflow();
@@ -1309,8 +1312,10 @@ traverse_weak_ptr_list(void)
w->link = weak_ptr_list;
weak_ptr_list = w;
flag = rtsTrue;
- IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
- w, w->key));
+
+ debugTrace(DEBUG_weak,
+ "weak pointer still alive at %p -> %p",
+ w, w->key);
continue;
}
else {
@@ -2196,18 +2201,16 @@ loop:
to = copy(q,BLACKHOLE_sizeW(),stp);
//ToDo: derive size etc from reverted IP
//to = copy(q,size,stp);
- IF_DEBUG(gc,
- debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
- q, info_type(q), to, info_type(to)));
+ debugTrace(DEBUG_gc, "evacuate: RBH %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to));
return to;
}
-
+
case BLOCKED_FETCH:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOD_SIZE);
to = copy(q,sizeofW(StgBlockedFetch),stp);
- IF_DEBUG(gc,
- debugBelch("@@ evacuate: %p (%s) to %p (%s)",
- q, info_type(q), to, info_type(to)));
+ debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to));
return to;
# ifdef DIST
@@ -2216,17 +2219,15 @@ loop:
case FETCH_ME:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
to = copy(q,sizeofW(StgFetchMe),stp);
- IF_DEBUG(gc,
- debugBelch("@@ evacuate: %p (%s) to %p (%s)",
- q, info_type(q), to, info_type(to)));
+ debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to)));
return to;
case FETCH_ME_BQ:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
- IF_DEBUG(gc,
- debugBelch("@@ evacuate: %p (%s) to %p (%s)",
- q, info_type(q), to, info_type(to)));
+ debugTrace(DEBUG_gc, "evacuate: %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to)));
return to;
#endif
@@ -3072,9 +3073,8 @@ scavenge(step *stp)
(StgClosure *)rbh->blocking_queue =
evacuate((StgClosure *)rbh->blocking_queue);
failed_to_evac = rtsTrue; // mutable anyhow.
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
- p, info_type(p), (StgClosure *)rbh->blocking_queue));
+ debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ p, info_type(p), (StgClosure *)rbh->blocking_queue);
// ToDo: use size of reverted closure here!
p += BLACKHOLE_sizeW();
break;
@@ -3089,10 +3089,9 @@ scavenge(step *stp)
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
- bf, info_type((StgClosure *)bf),
- bf->node, info_type(bf->node)));
+ debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
p += sizeofW(StgBlockedFetch);
break;
}
@@ -3109,9 +3108,8 @@ scavenge(step *stp)
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
- p, info_type((StgClosure *)p)));
+ debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
+ p, info_type((StgClosure *)p)));
p += sizeofW(StgFetchMeBlockingQueue);
break;
}
@@ -3464,9 +3462,8 @@ linear_scan:
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
failed_to_evac = rtsTrue; // mutable anyhow.
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
- p, info_type(p), (StgClosure *)rbh->blocking_queue));
+ debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ p, info_type(p), (StgClosure *)rbh->blocking_queue));
break;
}
@@ -3479,10 +3476,9 @@ linear_scan:
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
- bf, info_type((StgClosure *)bf),
- bf->node, info_type(bf->node)));
+ debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
break;
}
@@ -3497,9 +3493,8 @@ linear_scan:
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
- p, info_type((StgClosure *)p)));
+ debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
+ p, info_type((StgClosure *)p)));
break;
}
#endif /* PAR */
@@ -3574,7 +3569,7 @@ linear_scan:
// start a new linear scan if the mark stack overflowed at some point
if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
- IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
+ debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
mark_stack_overflowed = rtsFalse;
oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
oldgen_scan = oldgen_scan_bd->start;
@@ -3816,9 +3811,8 @@ scavenge_one(StgPtr p)
(StgClosure *)rbh->blocking_queue =
evacuate((StgClosure *)rbh->blocking_queue);
failed_to_evac = rtsTrue; // mutable anyhow.
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
- p, info_type(p), (StgClosure *)rbh->blocking_queue));
+ debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ p, info_type(p), (StgClosure *)rbh->blocking_queue));
// ToDo: use size of reverted closure here!
break;
}
@@ -3832,10 +3826,10 @@ scavenge_one(StgPtr p)
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
- bf, info_type((StgClosure *)bf),
- bf->node, info_type(bf->node)));
+ debugTrace(DEBUG_gc,
+ "scavenge: %p (%s); node is now %p; exciting, isn't it",
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
break;
}
@@ -3850,9 +3844,8 @@ scavenge_one(StgPtr p)
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
- IF_DEBUG(gc,
- debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
- p, info_type((StgClosure *)p)));
+ debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
+ p, info_type((StgClosure *)p)));
break;
}
#endif
@@ -4180,8 +4173,6 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
StgWord bitmap;
nat size;
- //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end));
-
/*
* Each time around this loop, we are looking at a chunk of stack
* that starts with an activation record.
@@ -4441,11 +4432,11 @@ gcCAFs(void)
ASSERT(info->type == IND_STATIC);
if (STATIC_LINK(info,p) == NULL) {
- IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p));
- // black hole it
- SET_INFO(p,&stg_BLACKHOLE_info);
- p = STATIC_LINK2(info,p);
- *pp = p;
+ debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
+ // black hole it
+ SET_INFO(p,&stg_BLACKHOLE_info);
+ p = STATIC_LINK2(info,p);
+ *pp = p;
}
else {
pp = &STATIC_LINK2(info,p);
@@ -4455,7 +4446,7 @@ gcCAFs(void)
}
- // debugBelch("%d CAFs live", i);
+ debugTrace(DEBUG_gccafs, "%d CAFs live", i);
}
#endif
@@ -4650,7 +4641,9 @@ threadPaused(Capability *cap, StgTSO *tso)
bh = ((StgUpdateFrame *)frame)->updatee;
if (closure_IND(bh) || bh->header.info == &stg_BLACKHOLE_info) {
- IF_DEBUG(squeeze, debugBelch("suspending duplicate work: %ld words of stack\n", (StgPtr)frame - tso->sp));
+ debugTrace(DEBUG_squeeze,
+ "suspending duplicate work: %ld words of stack",
+ (StgPtr)frame - tso->sp);
// If this closure is already an indirection, then
// suspend the computation up to this point:
@@ -4710,10 +4703,10 @@ threadPaused(Capability *cap, StgTSO *tso)
}
end:
- IF_DEBUG(squeeze,
- debugBelch("words_to_squeeze: %d, weight: %d, squeeze: %s\n",
- words_to_squeeze, weight,
- weight < words_to_squeeze ? "YES" : "NO"));
+ debugTrace(DEBUG_squeeze,
+ "words_to_squeeze: %d, weight: %d, squeeze: %s",
+ words_to_squeeze, weight,
+ weight < words_to_squeeze ? "YES" : "NO");
// Should we squeeze or not? Arbitrary heuristic: we squeeze if
// the number of words we have to shift down is less than the
@@ -4735,7 +4728,7 @@ printMutableList(generation *gen)
bdescr *bd;
StgPtr p;
- debugBelch("@@ Mutable list %p: ", gen->mut_list);
+ debugBelch("mutable list %p: ", gen->mut_list);
for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
for (p = bd->start; p < bd->free; p++) {
diff --git a/rts/GCCompact.c b/rts/GCCompact.c
index 682a09a303..45222c3b9b 100644
--- a/rts/GCCompact.c
+++ b/rts/GCCompact.c
@@ -17,6 +17,7 @@
#include "GCCompact.h"
#include "Schedule.h"
#include "Apply.h"
+#include "Trace.h"
// Turn off inlining when debugging - it obfuscates things
#ifdef DEBUG
@@ -931,12 +932,14 @@ compact( void (*get_roots)(evac_fn) )
for (s = 0; s < generations[g].n_steps; s++) {
if (g==0 && s ==0) continue;
stp = &generations[g].steps[s];
- IF_DEBUG(gc, debugBelch("update_fwd: %d.%d\n", stp->gen->no, stp->no););
+ debugTrace(DEBUG_gc, "update_fwd: %d.%d",
+ stp->gen->no, stp->no);
update_fwd(stp->blocks);
update_fwd_large(stp->scavenged_large_objects);
if (g == RtsFlags.GcFlags.generations-1 && stp->old_blocks != NULL) {
- IF_DEBUG(gc, debugBelch("update_fwd: %d.%d (compact)\n", stp->gen->no, stp->no););
+ debugTrace(DEBUG_gc, "update_fwd: %d.%d (compact)",
+ stp->gen->no, stp->no);
update_fwd_compact(stp->old_blocks);
}
}
@@ -946,9 +949,10 @@ compact( void (*get_roots)(evac_fn) )
stp = &oldest_gen->steps[0];
if (stp->old_blocks != NULL) {
blocks = update_bkwd_compact(stp);
- IF_DEBUG(gc, debugBelch("update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)\n",
- stp->gen->no, stp->no,
- stp->n_old_blocks, blocks););
+ debugTrace(DEBUG_gc,
+ "update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)",
+ stp->gen->no, stp->no,
+ stp->n_old_blocks, blocks);
stp->n_old_blocks = blocks;
}
}
diff --git a/rts/MBlock.c b/rts/MBlock.c
index fa8fd49d88..6d05940be5 100644
--- a/rts/MBlock.c
+++ b/rts/MBlock.c
@@ -16,6 +16,7 @@
#include "RtsFlags.h"
#include "MBlock.h"
#include "BlockAlloc.h"
+#include "Trace.h"
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
@@ -287,7 +288,7 @@ getMBlocks(nat n)
// ToDo: check that we haven't already grabbed the memory at next_request
next_request = ret + size;
- IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at %p\n",n,ret));
+ debugTrace(DEBUG_gc, "allocated %d megablock(s) at %p",n,ret);
// fill in the table
for (i = 0; i < n; i++) {
@@ -402,7 +403,7 @@ getMBlocks(nat n)
barf("getMBlocks: unknown memory allocation failure on Win32.");
}
- IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at 0x%x\n",n,(nat)ret));
+ debugTrace(DEBUG_gc, "allocated %d megablock(s) at 0x%x",n,(nat)ret);
next_request = (char*)next_request + size;
mblocks_allocated += n;
diff --git a/rts/Profiling.c b/rts/Profiling.c
index 0bb975cafc..33301a91c1 100644
--- a/rts/Profiling.c
+++ b/rts/Profiling.c
@@ -353,11 +353,12 @@ CostCentreStack *
PushCostCentre ( CostCentreStack *ccs, CostCentre *cc )
#define PushCostCentre _PushCostCentre
{
- IF_DEBUG(prof,
- debugBelch("Pushing %s on ", cc->label);
- debugCCS(ccs);
- debugBelch("\n"));
- return PushCostCentre(ccs,cc);
+ IF_DEBUG(prof,
+ traceBegin("pushing %s on ", cc->label);
+ debugCCS(ccs);
+ traceEnd(););
+
+ return PushCostCentre(ccs,cc);
}
#endif
diff --git a/rts/RtsStartup.c b/rts/RtsStartup.c
index 71978007f3..0406ae6f09 100644
--- a/rts/RtsStartup.c
+++ b/rts/RtsStartup.c
@@ -27,6 +27,7 @@
#include "Linker.h"
#include "ThreadLabels.h"
#include "BlockAlloc.h"
+#include "Trace.h"
#if defined(RTS_GTK_FRONTPANEL)
#include "FrontPanel.h"
@@ -161,6 +162,9 @@ hs_init(int *argc, char **argv[])
setProgArgv(*argc,*argv);
}
+ /* initTracing must be after setupRtsFlags() */
+ initTracing();
+
#if defined(PAR)
/* NB: this really must be done after processing the RTS flags */
IF_PAR_DEBUG(verbose,
diff --git a/rts/STM.c b/rts/STM.c
index 424796265a..5c3b4341e2 100644
--- a/rts/STM.c
+++ b/rts/STM.c
@@ -90,6 +90,7 @@
#include "SMP.h"
#include "STM.h"
#include "Storage.h"
+#include "Trace.h"
#include <stdlib.h>
#include <stdio.h>
@@ -113,16 +114,7 @@
// If SHAKE is defined then validation will sometime spuriously fail. They helps test
// unusualy code paths if genuine contention is rare
-#if defined(DEBUG)
-#define SHAKE
-#if defined(THREADED_RTS)
-#define TRACE(_x...) IF_DEBUG(stm, debugBelch("STM (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()); debugBelch ( _x ))
-#else
-#define TRACE(_x...) IF_DEBUG(stm, debugBelch ( _x ))
-#endif
-#else
-#define TRACE(_x...) /*Nothing*/
-#endif
+#define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
#ifdef SHAKE
static const int do_shake = TRUE;
diff --git a/rts/Schedule.c b/rts/Schedule.c
index bd8ba743de..270a7d8715 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -50,6 +50,7 @@
#if defined(mingw32_HOST_OS)
#include "win32/IOManager.h"
#endif
+#include "Trace.h"
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
@@ -344,10 +345,9 @@ schedule (Capability *initialCapability, Task *task)
// The sched_mutex is *NOT* held
// NB. on return, we still hold a capability.
- IF_DEBUG(scheduler,
- sched_belch("### NEW SCHEDULER LOOP (task: %p, cap: %p)",
- task, initialCapability);
- );
+ debugTrace (DEBUG_sched,
+ "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
+ task, initialCapability);
schedulePreLoop();
@@ -434,7 +434,7 @@ schedule (Capability *initialCapability, Task *task)
case SCHED_RUNNING:
break;
case SCHED_INTERRUPTING:
- IF_DEBUG(scheduler, sched_belch("SCHED_INTERRUPTING"));
+ debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
#if defined(THREADED_RTS)
discardSparksCap(cap);
#endif
@@ -442,7 +442,7 @@ schedule (Capability *initialCapability, Task *task)
cap = scheduleDoGC(cap,task,rtsFalse,GetRoots);
break;
case SCHED_SHUTTING_DOWN:
- IF_DEBUG(scheduler, sched_belch("SCHED_SHUTTING_DOWN"));
+ debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
// If we are a worker, just exit. If we're a bound thread
// then we will exit below when we've removed our TSO from
// the run queue.
@@ -461,9 +461,9 @@ schedule (Capability *initialCapability, Task *task)
StgClosure *spark;
spark = findSpark(cap);
if (spark != NULL) {
- IF_DEBUG(scheduler,
- sched_belch("turning spark of closure %p into a thread",
- (StgClosure *)spark));
+ debugTrace(DEBUG_sched,
+ "turning spark of closure %p into a thread",
+ (StgClosure *)spark);
createSparkThread(cap,spark);
}
}
@@ -552,14 +552,12 @@ schedule (Capability *initialCapability, Task *task)
if (bound) {
if (bound == task) {
- IF_DEBUG(scheduler,
- sched_belch("### Running thread %d in bound thread",
- t->id));
+ debugTrace(DEBUG_sched,
+ "### Running thread %d in bound thread", t->id);
// yes, the Haskell thread is bound to the current native thread
} else {
- IF_DEBUG(scheduler,
- sched_belch("### thread %d bound to another OS thread",
- t->id));
+ debugTrace(DEBUG_sched,
+ "### thread %d bound to another OS thread", t->id);
// no, bound to a different Haskell thread: pass to that thread
pushOnRunQueue(cap,t);
continue;
@@ -567,8 +565,8 @@ schedule (Capability *initialCapability, Task *task)
} else {
// The thread we want to run is unbound.
if (task->tso) {
- IF_DEBUG(scheduler,
- sched_belch("### this OS thread cannot run thread %d", t->id));
+ debugTrace(DEBUG_sched,
+ "### this OS thread cannot run thread %d", t->id);
// no, the current native thread is bound to a different
// Haskell thread, so pass it to any worker thread
pushOnRunQueue(cap,t);
@@ -591,8 +589,8 @@ schedule (Capability *initialCapability, Task *task)
run_thread:
- IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...",
- (long)t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
+ (long)t->id, whatNext_strs[t->what_next]);
#if defined(PROFILING)
startHeapProfTimer();
@@ -665,9 +663,9 @@ run_thread:
// that task->cap != cap. We better yield this Capability
// immediately and return to normaility.
if (ret == ThreadBlocked) {
- IF_DEBUG(scheduler,
- sched_belch("--<< thread %d (%s) stopped: blocked\n",
- t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched,
+ "--<< thread %d (%s) stopped: blocked",
+ t->id, whatNext_strs[t->what_next]);
continue;
}
#endif
@@ -683,12 +681,6 @@ run_thread:
CCCS = CCS_SYSTEM;
#endif
-#if defined(THREADED_RTS)
- IF_DEBUG(scheduler,debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()););
-#elif !defined(GRAN) && !defined(PARALLEL_HASKELL)
- IF_DEBUG(scheduler,debugBelch("sched: "););
-#endif
-
schedulePostRunThread();
ready_to_gc = rtsFalse;
@@ -728,8 +720,8 @@ run_thread:
}
} /* end of while() */
- IF_PAR_DEBUG(verbose,
- debugBelch("== Leaving schedule() after having received Finish\n"));
+ debugTrace(PAR_DEBUG_verbose,
+ "== Leaving schedule() after having received Finish");
}
/* ----------------------------------------------------------------------------
@@ -746,10 +738,10 @@ schedulePreLoop(void)
ContinueThread,
CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
- IF_DEBUG(gran,
- debugBelch("GRAN: Init CurrentTSO (in schedule) = %p\n",
- CurrentTSO);
- G_TSO(CurrentTSO, 5));
+ debugTrace (DEBUG_gran,
+ "GRAN: Init CurrentTSO (in schedule) = %p",
+ CurrentTSO);
+ IF_DEBUG(gran, G_TSO(CurrentTSO, 5));
if (RtsFlags.GranFlags.Light) {
/* Save current time; GranSim Light only */
@@ -811,7 +803,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
StgTSO *prev, *t, *next;
rtsBool pushed_to_all;
- IF_DEBUG(scheduler, sched_belch("excess threads on run queue and %d free capabilities, sharing...", n_free_caps));
+ debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
i = 0;
pushed_to_all = rtsFalse;
@@ -835,7 +827,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
prev->link = t;
prev = t;
} else {
- IF_DEBUG(scheduler, sched_belch("pushing thread %d to capability %d", t->id, free_caps[i]->no));
+ debugTrace(DEBUG_sched, "pushing thread %d to capability %d", t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
@@ -854,7 +846,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
if (emptySparkPoolCap(free_caps[i])) {
spark = findSpark(cap);
if (spark != NULL) {
- IF_DEBUG(scheduler, sched_belch("pushing spark %p to capability %d", spark, free_caps[i]->no));
+ debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
newSpark(&(free_caps[i]->r), spark);
}
}
@@ -984,7 +976,7 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
if (recent_activity != ACTIVITY_INACTIVE) return;
#endif
- IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
+ debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
// Garbage collection can release some new threads due to
// either (a) finalizers or (b) threads resurrected because
@@ -1003,8 +995,8 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
* deadlock.
*/
if ( anyUserHandlers() ) {
- IF_DEBUG(scheduler,
- sched_belch("still deadlocked, waiting for signals..."));
+ debugTrace(DEBUG_sched,
+ "still deadlocked, waiting for signals...");
awaitUserSignals();
@@ -1510,10 +1502,10 @@ schedulePostRunThread(void)
case ThreadBlocked:
# if defined(GRAN)
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
- t->id, t, whatNext_strs[t->what_next], t->block_info.closure,
- (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
+ t->id, t, whatNext_strs[t->what_next], t->block_info.closure,
+ (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
if (t->block_info.closure!=(StgClosure*)NULL)
print_bq(t->block_info.closure);
debugBelch("\n"));
@@ -1562,10 +1554,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
- (long)t->id, whatNext_strs[t->what_next], blocks));
-
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
+ (long)t->id, whatNext_strs[t->what_next], blocks);
+
// don't do this if the nursery is (nearly) full, we'll GC first.
if (cap->r.rCurrentNursery->link != NULL ||
cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
@@ -1622,9 +1614,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
}
}
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
- (long)t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped: HeapOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
#elif defined(PARALLEL_HASKELL)
@@ -1650,8 +1643,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
static void
scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
{
- IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n",
- (long)t->id, whatNext_strs[t->what_next]));
+ debugTrace (DEBUG_sched,
+ "--<< thread %ld (%s) stopped, StackOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+
/* just adjust the stack for this thread, then pop it back
* on the run queue.
*/
@@ -1689,15 +1684,17 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
* up the GC thread. getThread will block during a GC until the
* GC is finished.
*/
- IF_DEBUG(scheduler,
- if (t->what_next != prev_what_next) {
- debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n",
- (long)t->id, whatNext_strs[t->what_next]);
- } else {
- debugBelch("--<< thread %ld (%s) stopped, yielding\n",
- (long)t->id, whatNext_strs[t->what_next]);
- }
- );
+#ifdef DEBUG
+ if (t->what_next != prev_what_next) {
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped to switch evaluators\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+ } else {
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped, yielding\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+ }
+#endif
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
@@ -1795,11 +1792,14 @@ scheduleHandleThreadBlocked( StgTSO *t
// conc023 +RTS -N2.
#endif
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %d (%s) stopped: ",
- t->id, whatNext_strs[t->what_next]);
- printThreadBlockage(t);
- debugBelch("\n"));
+#ifdef DEBUG
+ if (traceClass(DEBUG_sched)) {
+ debugTraceBegin("--<< thread %d (%s) stopped: ",
+ t->id, whatNext_strs[t->what_next]);
+ printThreadBlockage(t);
+ debugTraceEnd();
+ }
+#endif
/* Only for dumping event to log file
ToDo: do I need this in GranSim, too?
@@ -1821,8 +1821,8 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
* We also end up here if the thread kills itself with an
* uncaught exception, see Exception.cmm.
*/
- IF_DEBUG(scheduler,debugBelch("--++ thread %d (%s) finished\n",
- t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched, "--++ thread %d (%s) finished",
+ t->id, whatNext_strs[t->what_next]);
#if defined(GRAN)
endThread(t, CurrentProc); // clean-up the thread
@@ -1942,10 +1942,10 @@ scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED )
// deadlocked.
scheduleCheckBlackHoles(&MainCapability);
- IF_DEBUG(scheduler, sched_belch("garbage collecting before heap census"));
+ debugTrace(DEBUG_sched, "garbage collecting before heap census");
GarbageCollect(GetRoots, rtsTrue);
- IF_DEBUG(scheduler, sched_belch("performing heap census"));
+ debugTrace(DEBUG_sched, "performing heap census");
heapCensus();
performHeapProfile = rtsFalse;
@@ -1985,14 +1985,14 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS,
was_waiting = cas(&waiting_for_gc, 0, 1);
if (was_waiting) {
do {
- IF_DEBUG(scheduler, sched_belch("someone else is trying to GC..."));
+ debugTrace(DEBUG_sched, "someone else is trying to GC...");
if (cap) yieldCapability(&cap,task);
} while (waiting_for_gc);
return cap; // NOTE: task->cap might have changed here
}
for (i=0; i < n_capabilities; i++) {
- IF_DEBUG(scheduler, sched_belch("ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities));
+ debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
if (cap != &capabilities[i]) {
Capability *pcap = &capabilities[i];
// we better hope this task doesn't get migrated to
@@ -2026,7 +2026,8 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS,
next = t->global_link;
if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
if (!stmValidateNestOfTransactions (t -> trec)) {
- IF_DEBUG(stm, sched_belch("trec %p found wasting its time", t));
+ debugTrace(DEBUG_sched | DEBUG_stm,
+ "trec %p found wasting its time", t);
// strip the stack back to the
// ATOMICALLY_FRAME, aborting the (nested)
@@ -2064,7 +2065,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS,
* broadcast on gc_pending_cond afterward.
*/
#if defined(THREADED_RTS)
- IF_DEBUG(scheduler,sched_belch("doing GC"));
+ debugTrace(DEBUG_sched, "doing GC");
#endif
GarbageCollect(get_roots, force_major);
@@ -2157,7 +2158,7 @@ forkProcess(HsStablePtr *entry
}
#endif
- IF_DEBUG(scheduler,sched_belch("forking!"));
+ debugTrace(DEBUG_sched, "forking!");
// ToDo: for SMP, we should probably acquire *all* the capabilities
cap = rts_lock();
@@ -2243,7 +2244,7 @@ static void
deleteAllThreads ( Capability *cap )
{
StgTSO* t, *next;
- IF_DEBUG(scheduler,sched_belch("deleting all threads"));
+ debugTrace(DEBUG_sched,"deleting all threads");
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
next = t->link;
@@ -2327,8 +2328,9 @@ suspendThread (StgRegTable *reg)
task = cap->running_task;
tso = cap->r.rCurrentTSO;
- IF_DEBUG(scheduler,
- sched_belch("thread %d did a safe foreign call", cap->r.rCurrentTSO->id));
+ debugTrace(DEBUG_sched,
+ "thread %d did a safe foreign call",
+ cap->r.rCurrentTSO->id);
// XXX this might not be necessary --SDM
tso->what_next = ThreadRunGHC;
@@ -2357,7 +2359,7 @@ suspendThread (StgRegTable *reg)
/* Preparing to leave the RTS, so ensure there's a native thread/task
waiting to take over.
*/
- IF_DEBUG(scheduler, sched_belch("thread %d: leaving RTS", tso->id));
+ debugTrace(DEBUG_sched, "thread %d: leaving RTS", tso->id);
#endif
errno = saved_errno;
@@ -2385,7 +2387,7 @@ resumeThread (void *task_)
tso = task->suspended_tso;
task->suspended_tso = NULL;
tso->link = END_TSO_QUEUE;
- IF_DEBUG(scheduler, sched_belch("thread %d: re-entering RTS", tso->id));
+ debugTrace(DEBUG_sched, "thread %d: re-entering RTS", tso->id);
if (tso->why_blocked == BlockedOnCCall) {
awakenBlockedQueue(cap,tso->blocked_exceptions);
@@ -2629,16 +2631,17 @@ createThread(Capability *cap, nat size)
#endif
#if defined(GRAN)
- IF_GRAN_DEBUG(pri,
- sched_belch("==__ schedule: Created TSO %d (%p);",
- CurrentProc, tso, tso->id));
+ debugTrace(GRAN_DEBUG_pri,
+ "==__ schedule: Created TSO %d (%p);",
+ CurrentProc, tso, tso->id);
#elif defined(PARALLEL_HASKELL)
- IF_PAR_DEBUG(verbose,
- sched_belch("==__ schedule: Created TSO %d (%p); %d threads active",
- (long)tso->id, tso, advisory_thread_count));
+ debugTrace(PAR_DEBUG_verbose,
+ "==__ schedule: Created TSO %d (%p); %d threads active",
+ (long)tso->id, tso, advisory_thread_count);
#else
- IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
- (long)tso->id, (long)tso->stack_size));
+ debugTrace(DEBUG_sched,
+ "created thread %ld, stack size = %lx words",
+ (long)tso->id, (long)tso->stack_size);
#endif
return tso;
}
@@ -2759,7 +2762,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
appendToRunQueue(cap,tso);
- IF_DEBUG(scheduler, sched_belch("new bound thread (%d)", tso->id));
+ debugTrace(DEBUG_sched, "new bound thread (%d)", tso->id);
#if defined(GRAN)
/* GranSim specific init */
@@ -2773,7 +2776,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
ASSERT(task->stat != NoStatus);
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
- IF_DEBUG(scheduler, sched_belch("bound thread (%d) finished", task->tso->id));
+ debugTrace(DEBUG_sched, "bound thread (%d) finished", task->tso->id);
return cap;
}
@@ -2881,6 +2884,8 @@ initScheduler(void)
}
#endif
+ trace(TRACE_sched, "start: %d capabilities", n_capabilities);
+
RELEASE_LOCK(&sched_mutex);
}
@@ -2967,7 +2972,8 @@ GetRoots( evac_fn evac )
#endif
for (task = cap->suspended_ccalling_tasks; task != NULL;
task=task->next) {
- IF_DEBUG(scheduler,sched_belch("evac'ing suspended TSO %d", task->suspended_tso->id));
+ debugTrace(DEBUG_sched,
+ "evac'ing suspended TSO %d", task->suspended_tso->id);
evac((StgClosure **)(void *)&task->suspended_tso);
}
@@ -3068,12 +3074,13 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
IF_DEBUG(sanity,checkTSO(tso));
if (tso->stack_size >= tso->max_stack_size) {
- IF_DEBUG(gc,
- debugBelch("@@ threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)\n",
- (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
- /* If we're debugging, just print out the top of the stack */
- printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
- tso->sp+64)));
+ debugTrace(DEBUG_gc,
+ "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)\n",
+ (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
+ IF_DEBUG(gc,
+ /* If we're debugging, just print out the top of the stack */
+ printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
+ tso->sp+64)));
/* Send this thread the StackOverflow exception */
raiseAsync(cap, tso, (StgClosure *)stackOverflow_closure);
@@ -3090,7 +3097,9 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
- IF_DEBUG(scheduler, sched_belch("increasing stack size from %ld words to %d.\n", (long)tso->stack_size, new_stack_size));
+ debugTrace(DEBUG_sched,
+ "increasing stack size from %ld words to %d.\n",
+ (long)tso->stack_size, new_stack_size);
dest = (StgTSO *)allocate(new_tso_size);
TICK_ALLOC_TSO(new_stack_size,0);
@@ -3211,8 +3220,8 @@ unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
(node_loc==tso_loc ? "Local" : "Global"),
tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
tso->block_info.closure = NULL;
- IF_DEBUG(scheduler,debugBelch("-- Waking up thread %ld (%p)\n",
- tso->id, tso));
+ debugTrace(DEBUG_sched, "-- waking up thread %ld (%p)\n",
+ tso->id, tso));
}
#elif defined(PARALLEL_HASKELL)
StgBlockingQueueElement *
@@ -3295,7 +3304,10 @@ unblockOne(Capability *cap, StgTSO *tso)
context_switch = 1;
#endif
- IF_DEBUG(scheduler,sched_belch("waking up thread %ld on cap %d", (long)tso->id, tso->cap->no));
+ debugTrace(DEBUG_sched,
+ "waking up thread %ld on cap %d",
+ (long)tso->id, tso->cap->no);
+
return next;
}
@@ -3774,7 +3786,7 @@ checkBlackHoles (Capability *cap)
// blackhole_queue is global:
ASSERT_LOCK_HELD(&sched_mutex);
- IF_DEBUG(scheduler, sched_belch("checking threads blocked on black holes"));
+ debugTrace(DEBUG_sched, "checking threads blocked on black holes");
// ASSUMES: sched_mutex
prev = &blackhole_queue;
@@ -3860,8 +3872,8 @@ raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
return;
}
- IF_DEBUG(scheduler,
- sched_belch("raising exception in thread %ld.", (long)tso->id));
+ debugTrace(DEBUG_sched,
+ "raising exception in thread %ld.", (long)tso->id);
// Remove it from any blocking queues
unblockThread(cap,tso);
@@ -3929,12 +3941,12 @@ raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
((StgClosure *)frame)->header.prof.ccs /* ToDo */);
TICK_ALLOC_UP_THK(words+1,0);
- IF_DEBUG(scheduler,
- debugBelch("sched: Updating ");
- printPtr((P_)((StgUpdateFrame *)frame)->updatee);
- debugBelch(" with ");
- printObj((StgClosure *)ap);
- );
+ //IF_DEBUG(scheduler,
+ // debugBelch("sched: Updating ");
+ // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
+ // debugBelch(" with ");
+ // printObj((StgClosure *)ap);
+ // );
// Replace the updatee with an indirection
//
@@ -4035,8 +4047,9 @@ raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
// whether the transaction is valid or not because its
// possible validity cannot have caused the exception
// and will not be visible after the abort.
- IF_DEBUG(stm,
- debugBelch("Found atomically block delivering async exception\n"));
+ debugTrace(DEBUG_stm,
+ "found atomically block delivering async exception");
+
StgTRecHeader *trec = tso -> trec;
StgTRecHeader *outer = stmGetEnclosingTRec(trec);
stmAbortTransaction(cap, trec);
@@ -4146,7 +4159,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
continue;
case ATOMICALLY_FRAME:
- IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p\n", p));
+ debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
tso->sp = p;
return ATOMICALLY_FRAME;
@@ -4155,7 +4168,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
return CATCH_FRAME;
case CATCH_STM_FRAME:
- IF_DEBUG(stm, debugBelch("Found CATCH_STM_FRAME at %p\n", p));
+ debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
tso->sp = p;
return CATCH_STM_FRAME;
@@ -4201,14 +4214,16 @@ findRetryFrameHelper (StgTSO *tso)
switch (info->i.type) {
case ATOMICALLY_FRAME:
- IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p during retrry\n", p));
- tso->sp = p;
- return ATOMICALLY_FRAME;
+ debugTrace(DEBUG_stm,
+ "found ATOMICALLY_FRAME at %p during retrry", p);
+ tso->sp = p;
+ return ATOMICALLY_FRAME;
case CATCH_RETRY_FRAME:
- IF_DEBUG(stm, debugBelch("Found CATCH_RETRY_FRAME at %p during retrry\n", p));
- tso->sp = p;
- return CATCH_RETRY_FRAME;
+ debugTrace(DEBUG_stm,
+ "found CATCH_RETRY_FRAME at %p during retrry", p);
+ tso->sp = p;
+ return CATCH_RETRY_FRAME;
case CATCH_STM_FRAME:
default:
@@ -4240,7 +4255,7 @@ resurrectThreads (StgTSO *threads)
next = tso->global_link;
tso->global_link = all_threads;
all_threads = tso;
- IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id));
+ debugTrace(DEBUG_sched, "resurrecting thread %d", tso->id);
// Wake up the thread on the Capability it was last on
cap = tso->cap;
@@ -4562,21 +4577,4 @@ run_queue_len(void)
}
#endif
-void
-sched_belch(char *s, ...)
-{
- va_list ap;
- va_start(ap,s);
-#ifdef THREADED_RTS
- debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId());
-#elif defined(PARALLEL_HASKELL)
- debugBelch("== ");
-#else
- debugBelch("sched: ");
-#endif
- vdebugBelch(s, ap);
- debugBelch("\n");
- va_end(ap);
-}
-
#endif /* DEBUG */
diff --git a/rts/Schedule.h b/rts/Schedule.h
index edbe246ed3..3adb70f4a8 100644
--- a/rts/Schedule.h
+++ b/rts/Schedule.h
@@ -314,11 +314,6 @@ emptyThreadQueues(Capability *cap)
;
}
-#ifdef DEBUG
-void sched_belch(char *s, ...)
- GNU_ATTRIBUTE(format (printf, 1, 2));
-#endif
-
#endif /* !IN_STG_CODE */
STATIC_INLINE void
diff --git a/rts/Sparks.c b/rts/Sparks.c
index 615d832e33..68ad19ddd3 100644
--- a/rts/Sparks.c
+++ b/rts/Sparks.c
@@ -21,6 +21,7 @@
# include "GranSimRts.h"
# endif
#include "Sparks.h"
+#include "Trace.h"
#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
@@ -149,19 +150,18 @@ markSparkQueue (evac_fn evac)
PAR_TICKY_MARK_SPARK_QUEUE_END(n);
#if defined(PARALLEL_HASKELL)
- IF_DEBUG(scheduler,
- debugBelch("markSparkQueue: marked %d sparks and pruned %d sparks on [%x]",
- n, pruned_sparks, mytid));
+ debugTrace(DEBUG_sched,
+ "marked %d sparks and pruned %d sparks on [%x]",
+ n, pruned_sparks, mytid);
#else
- IF_DEBUG(scheduler,
- debugBelch("markSparkQueue: marked %d sparks and pruned %d sparks\n",
- n, pruned_sparks));
+ debugTrace(DEBUG_sched,
+ "marked %d sparks and pruned %d sparks",
+ n, pruned_sparks);
#endif
- IF_DEBUG(scheduler,
- debugBelch("markSparkQueue: new spark queue len=%d; (hd=%p; tl=%p)\n",
- sparkPoolSize(pool), pool->hd, pool->tl));
-
+ debugTrace(DEBUG_sched,
+ "new spark queue len=%d; (hd=%p; tl=%p)\n",
+ sparkPoolSize(pool), pool->hd, pool->tl);
}
}
@@ -825,8 +825,9 @@ markSparkQueue(void)
// ToDo?: statistics gathering here (also for GUM!)
sp->node = (StgClosure *)MarkRoot(sp->node);
}
+
IF_DEBUG(gc,
- debugBelch("@@ markSparkQueue: spark statistics at start of GC:");
+ debugBelch("markSparkQueue: spark statistics at start of GC:");
print_sparkq_stats());
}
diff --git a/rts/Stable.c b/rts/Stable.c
index a4db5cd749..2391cb127f 100644
--- a/rts/Stable.c
+++ b/rts/Stable.c
@@ -18,6 +18,7 @@
#include "RtsAPI.h"
#include "RtsFlags.h"
#include "OSThreads.h"
+#include "Trace.h"
/* Comment from ADR's implementation in old RTS:
@@ -199,7 +200,7 @@ lookupStableName_(StgPtr p)
if (sn != 0) {
ASSERT(stable_ptr_table[sn].addr == p);
- IF_DEBUG(stable,debugBelch("cached stable name %ld at %p\n",sn,p));
+ debugTrace(DEBUG_stable, "cached stable name %ld at %p",sn,p);
return sn;
} else {
sn = stable_ptr_free - stable_ptr_table;
@@ -207,7 +208,7 @@ lookupStableName_(StgPtr p)
stable_ptr_table[sn].ref = 0;
stable_ptr_table[sn].addr = p;
stable_ptr_table[sn].sn_obj = NULL;
- /* IF_DEBUG(stable,debugBelch("new stable name %d at %p\n",sn,p)); */
+ /* debugTrace(DEBUG_stable, "new stable name %d at %p\n",sn,p); */
/* add the new stable name to the hash table */
insertHashTable(addrToStableHash, (W_)p, (void *)sn);
@@ -399,13 +400,15 @@ gcStablePtrTable( void )
if (p->sn_obj == NULL) {
// StableName object is dead
freeStableName(p);
- IF_DEBUG(stable, debugBelch("GC'd Stable name %ld\n",
- p - stable_ptr_table));
+ debugTrace(DEBUG_stable, "GC'd Stable name %ld",
+ p - stable_ptr_table);
continue;
} else {
p->addr = (StgPtr)isAlive((StgClosure *)p->addr);
- IF_DEBUG(stable, debugBelch("Stable name %ld still alive at %p, ref %ld\n", p - stable_ptr_table, p->addr, p->ref));
+ debugTrace(DEBUG_stable,
+ "stable name %ld still alive at %p, ref %ld\n",
+ p - stable_ptr_table, p->addr, p->ref);
}
}
}
diff --git a/rts/Stats.c b/rts/Stats.c
index f0f61b25b4..ec8d5838fb 100644
--- a/rts/Stats.c
+++ b/rts/Stats.c
@@ -75,6 +75,11 @@ Ticks stat_getElapsedGCTime(void)
return GCe_tot_time;
}
+Ticks stat_getElapsedTime(void)
+{
+ return getProcessElapsedTime() - ElapsedTimeStart;
+}
+
/* mut_user_time_during_GC() and mut_user_time()
*
* The former function can be used to get the current mutator time
diff --git a/rts/Stats.h b/rts/Stats.h
index 20bc0155ad..9de6b718bb 100644
--- a/rts/Stats.h
+++ b/rts/Stats.h
@@ -52,5 +52,6 @@ void statDescribeGens( void );
HsInt64 getAllocations( void );
Ticks stat_getElapsedGCTime(void);
+Ticks stat_getElapsedTime(void);
#endif /* STATS_H */
diff --git a/rts/Storage.c b/rts/Storage.c
index ee860e27a2..46db1eefc9 100644
--- a/rts/Storage.c
+++ b/rts/Storage.c
@@ -23,6 +23,7 @@
#include "Schedule.h"
#include "RetainerProfile.h" // for counting memory blocks (memInventory)
#include "OSMem.h"
+#include "Trace.h"
#include <stdlib.h>
#include <string.h>
@@ -495,15 +496,15 @@ resizeNursery ( step *stp, nat blocks )
if (nursery_blocks == blocks) return;
if (nursery_blocks < blocks) {
- IF_DEBUG(gc, debugBelch("Increasing size of nursery to %d blocks\n",
- blocks));
+ debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks",
+ blocks);
stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
}
else {
bdescr *next_bd;
- IF_DEBUG(gc, debugBelch("Decreasing size of nursery to %d blocks\n",
- blocks));
+ debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks",
+ blocks);
bd = stp->blocks;
while (nursery_blocks > blocks) {
@@ -1005,7 +1006,7 @@ void *allocateExec (nat bytes)
bdescr *bd;
lnat pagesize = getPageSize();
bd = allocGroup(stg_max(1, pagesize / BLOCK_SIZE));
- IF_DEBUG(gc, debugBelch("allocate exec block %p\n", bd->start));
+ debugTrace(DEBUG_gc, "allocate exec block %p", bd->start);
bd->gen_no = 0;
bd->flags = BF_EXEC;
bd->link = exec_block;
@@ -1046,7 +1047,7 @@ void freeExec (void *addr)
// Free the block if it is empty, but not if it is the block at
// the head of the queue.
if (bd->gen_no == 0 && bd != exec_block) {
- IF_DEBUG(gc, debugBelch("free exec block %p\n", bd->start));
+ debugTrace(DEBUG_gc, "free exec block %p", bd->start);
if (bd->u.back) {
bd->u.back->link = bd->link;
} else {
diff --git a/rts/Task.c b/rts/Task.c
index 9923609884..918dc559b8 100644
--- a/rts/Task.c
+++ b/rts/Task.c
@@ -17,6 +17,7 @@
#include "RtsFlags.h"
#include "Schedule.h"
#include "Hash.h"
+#include "Trace.h"
#if HAVE_SIGNAL_H
#include <signal.h>
@@ -69,7 +70,9 @@ initTaskManager (void)
void
stopTaskManager (void)
{
- IF_DEBUG(scheduler, sched_belch("stopping task manager, %d tasks still running", tasksRunning));
+ debugTrace(DEBUG_sched,
+ "stopping task manager, %d tasks still running",
+ tasksRunning);
}
@@ -144,7 +147,7 @@ newBoundTask (void)
taskEnter(task);
- IF_DEBUG(scheduler,sched_belch("new task (taskCount: %d)", taskCount););
+ debugTrace(DEBUG_sched, "new task (taskCount: %d)", taskCount);
return task;
}
@@ -168,7 +171,7 @@ boundTaskExiting (Task *task)
task_free_list = task;
RELEASE_LOCK(&sched_mutex);
- IF_DEBUG(scheduler,sched_belch("task exiting"));
+ debugTrace(DEBUG_sched, "task exiting");
}
#ifdef THREADED_RTS
@@ -182,7 +185,7 @@ discardTask (Task *task)
{
ASSERT_LOCK_HELD(&sched_mutex);
if (!task->stopped) {
- IF_DEBUG(scheduler,sched_belch("discarding task %p", TASK_ID(task)));
+ debugTrace(DEBUG_sched, "discarding task %p", TASK_ID(task));
task->cap = NULL;
task->tso = NULL;
task->stopped = rtsTrue;
@@ -275,7 +278,7 @@ startWorkerTask (Capability *cap,
barf("startTask: Can't create new task");
}
- IF_DEBUG(scheduler,sched_belch("new worker task (taskCount: %d)", taskCount););
+ debugTrace(DEBUG_sched, "new worker task (taskCount: %d)", taskCount);
task->id = tid;
diff --git a/rts/Trace.c b/rts/Trace.c
new file mode 100644
index 0000000000..042de6d8d4
--- /dev/null
+++ b/rts/Trace.c
@@ -0,0 +1,155 @@
+/* -----------------------------------------------------------------------------
+ *
+ * (c) The GHC Team 2006
+ *
+ * Debug and performance tracing
+ *
+ * ---------------------------------------------------------------------------*/
+
+#include "Rts.h"
+#include "OSThreads.h"
+#include "Trace.h"
+#include "RtsFlags.h"
+#include "GetTime.h"
+#include "Stats.h"
+
+/*
+ Features we want:
+ - multiple log message classes
+ - outpout thread ID & time on each message
+ - thread-safe
+ - trace source locations?
+ - break into the debugger?
+*/
+
+StgWord32 classes_enabled; // not static due to inline funcs
+
+#ifdef THREADED_RTS
+static Mutex trace_utx;
+#endif
+
+#ifdef DEBUG
+#define DEBUG_FLAG(name, class) \
+ if (RtsFlags.DebugFlags.name) classes_enabled |= class;
+#else
+#define DEBUG_FLAG(name, class) \
+ /* nothing */
+#endif
+
+#ifdef PAR
+#define PAR_FLAG(name, class) \
+ if (RtsFlags.ParFlags.Debug.name) classes_enabled |= class;
+#else
+#define PAR_FLAG(name, class) \
+ /* nothing */
+#endif
+
+#ifdef GRAN
+#define GRAN_FLAG(name, class) \
+ if (RtsFlags.GranFlags.Debug.name) classes_enabled |= class;
+#else
+#define GRAN_FLAG(name, class) \
+ /* nothing */
+#endif
+
+#define TRACE_FLAG(name, class) \
+ if (RtsFlags.TraceFlags.name) classes_enabled |= class;
+
+
+void initTracing (void)
+{
+#ifdef THREADED_RTS
+ initMutex(&trace_utx);
+#endif
+
+ DEBUG_FLAG(scheduler, DEBUG_sched);
+ DEBUG_FLAG(interpreter, DEBUG_interp);
+ DEBUG_FLAG(weak, DEBUG_weak);
+ DEBUG_FLAG(gccafs, DEBUG_gccafs);
+ DEBUG_FLAG(gc, DEBUG_gc);
+ DEBUG_FLAG(block_alloc, DEBUG_block_alloc);
+ DEBUG_FLAG(sanity, DEBUG_sanity);
+ DEBUG_FLAG(stable, DEBUG_stable);
+ DEBUG_FLAG(stm, DEBUG_stm);
+ DEBUG_FLAG(prof, DEBUG_prof);
+ DEBUG_FLAG(gran, DEBUG_gran);
+ DEBUG_FLAG(par, DEBUG_par);
+ DEBUG_FLAG(linker, DEBUG_linker);
+ DEBUG_FLAG(squeeze, DEBUG_squeeze);
+
+ PAR_FLAG(verbose, PAR_DEBUG_verbose);
+ PAR_FLAG(bq, PAR_DEBUG_bq);
+ PAR_FLAG(schedule, PAR_DEBUG_schedule);
+ PAR_FLAG(free, PAR_DEBUG_free);
+ PAR_FLAG(resume, PAR_DEBUG_resume);
+ PAR_FLAG(weight, PAR_DEBUG_weight);
+ PAR_FLAG(fetch, PAR_DEBUG_fetch);
+ PAR_FLAG(fish, PAR_DEBUG_fish);
+ PAR_FLAG(tables, PAR_DEBUG_tables);
+ PAR_FLAG(packet, PAR_DEBUG_packet);
+ PAR_FLAG(pack, PAR_DEBUG_pack);
+ PAR_FLAG(paranoia, PAR_DEBUG_paranoia);
+
+ GRAN_FLAG(event_trace, GRAN_DEBUG_event_trace);
+ GRAN_FLAG(event_stats, GRAN_DEBUG_event_stats);
+ GRAN_FLAG(bq, GRAN_DEBUG_bq);
+ GRAN_FLAG(pack, GRAN_DEBUG_pack);
+ GRAN_FLAG(checkSparkQ, GRAN_DEBUG_checkSparkQ);
+ GRAN_FLAG(thunkStealing, GRAN_DEBUG_thunkStealing);
+ GRAN_FLAG(randomSteal, GRAN_DEBUG_randomSteal);
+ GRAN_FLAG(findWork, GRAN_DEBUG_findWork);
+ GRAN_FLAG(unused, GRAN_DEBUG_unused);
+ GRAN_FLAG(pri, GRAN_DEBUG_pri);
+ GRAN_FLAG(checkLight, GRAN_DEBUG_checkLight);
+ GRAN_FLAG(sortedQ, GRAN_DEBUG_sortedQ);
+ GRAN_FLAG(blockOnFetch, GRAN_DEBUG_blockOnFetch);
+ GRAN_FLAG(packBuffer, GRAN_DEBUG_packBuffer);
+ GRAN_FLAG(blockedOnFetch_sanity, GRAN_DEBUG_BOF_sanity);
+
+ TRACE_FLAG(sched, TRACE_sched);
+}
+
+static void tracePreface (void)
+{
+#ifdef THREADED_RTS
+ debugBelch("%12lx: ", (unsigned long)osThreadId());
+#endif
+ if (RtsFlags.TraceFlags.timestamp) {
+ debugBelch("%9" FMT_Word64 ": ", stat_getElapsedTime());
+ }
+}
+
+void trace (StgWord32 class, const char *str, ...)
+{
+ va_list ap;
+ va_start(ap,str);
+
+ ACQUIRE_LOCK(&trace_utx);
+
+ if ((classes_enabled & class) != 0) {
+ tracePreface();
+ vdebugBelch(str,ap);
+ debugBelch("\n");
+ }
+
+ RELEASE_LOCK(&trace_utx);
+
+ va_end(ap);
+}
+
+void traceBegin (const char *str, ...)
+{
+ va_list ap;
+ va_start(ap,str);
+
+ ACQUIRE_LOCK(&trace_utx);
+
+ tracePreface();
+ vdebugBelch(str,ap);
+}
+
+void traceEnd (void)
+{
+ debugBelch("\n");
+ RELEASE_LOCK(&trace_utx);
+}
diff --git a/rts/Trace.h b/rts/Trace.h
new file mode 100644
index 0000000000..19e492c26e
--- /dev/null
+++ b/rts/Trace.h
@@ -0,0 +1,123 @@
+/* -----------------------------------------------------------------------------
+ *
+ * (c) The GHC Team 2006
+ *
+ * Debug and performance tracing.
+ *
+ * This is a layer over RtsMessages, which provides for generating
+ * trace messages with timestamps and thread Ids attached
+ * automatically. Also, multiple classes of messages are supported,
+ * which can be enabled separately via RTS flags.
+ *
+ * All debug trace messages go through here. Additionally, we
+ * generate timestamped trace messages for consumption by profiling
+ * tools using this API.
+ *
+ * ---------------------------------------------------------------------------*/
+
+#ifndef TRACE_H
+#define TRACE_H
+
+// -----------------------------------------------------------------------------
+// Tracing functions
+// -----------------------------------------------------------------------------
+
+void initTracing (void);
+
+// The simple way:
+void trace (StgWord32 class, const char *str, ...)
+ GNUC3_ATTRIBUTE(format (printf, 2, 3));
+
+// The harder way: sometimes we want to generate a trace message that
+// consists of multiple components generated by different functions.
+// So we provide the functionality of trace() split into 3 parts:
+// - traceClass(): a check that the required class is enabled
+// - traceBegin(): print the beginning of the trace message
+// - traceEnd(): complete the trace message (release the lock too).
+//
+INLINE_HEADER rtsBool traceClass (StgWord32 class);
+
+void traceBegin (const char *str, ...)
+ GNUC3_ATTRIBUTE(format (printf, 1, 2));
+
+void traceEnd (void);
+
+#ifdef DEBUG
+#define debugTrace(class, str, ...) trace(class,str, ## __VA_ARGS__)
+// variable arg macros are C99, and supported by gcc.
+#define debugTraceBegin(class, str, ...) traceBegin(class,str, ## __VA_ARGS__)
+#define debugTraceEnd() traceEnd()
+#else
+#define debugTrace(class, str, ...) /* nothing */
+#define debugTraceBegin(class, str, ...) /* nothing */
+#define debugTraceEnd() /* nothing */
+#endif
+
+
+// -----------------------------------------------------------------------------
+// Message classes, these may be OR-ed together
+// -----------------------------------------------------------------------------
+
+// debugging flags, set with +RTS -D<something>
+#define DEBUG_sched (1<<0)
+#define DEBUG_interp (1<<1)
+#define DEBUG_weak (1<<2)
+#define DEBUG_gccafs (1<<3)
+#define DEBUG_gc (1<<4)
+#define DEBUG_block_alloc (1<<5)
+#define DEBUG_sanity (1<<6)
+#define DEBUG_stable (1<<7)
+#define DEBUG_stm (1<<8)
+#define DEBUG_prof (1<<9)
+#define DEBUG_gran (1<<10)
+#define DEBUG_par (1<<11)
+#define DEBUG_linker (1<<12)
+#define DEBUG_squeeze (1<<13)
+
+// PAR debugging flags, set with +RTS -qD<something>
+#define PAR_DEBUG_verbose (1<<14)
+#define PAR_DEBUG_bq (1<<15)
+#define PAR_DEBUG_schedule (1<<16)
+#define PAR_DEBUG_free (1<<17)
+#define PAR_DEBUG_resume (1<<18)
+#define PAR_DEBUG_weight (1<<19)
+#define PAR_DEBUG_fetch (1<<21)
+#define PAR_DEBUG_fish (1<<22)
+#define PAR_DEBUG_tables (1<<23)
+#define PAR_DEBUG_packet (1<<24)
+#define PAR_DEBUG_pack (1<<25)
+#define PAR_DEBUG_paranoia (1<<26)
+
+// GRAN and PAR don't coexist, so we re-use the PAR values for GRAN.
+#define GRAN_DEBUG_event_trace (1<<14)
+#define GRAN_DEBUG_event_stats (1<<15)
+#define GRAN_DEBUG_bq (1<<16)
+#define GRAN_DEBUG_pack (1<<17)
+#define GRAN_DEBUG_checkSparkQ (1<<18)
+#define GRAN_DEBUG_thunkStealing (1<<19)
+#define GRAN_DEBUG_randomSteal (1<<20)
+#define GRAN_DEBUG_findWork (1<<21)
+#define GRAN_DEBUG_unused (1<<22)
+#define GRAN_DEBUG_pri (1<<23)
+#define GRAN_DEBUG_checkLight (1<<24)
+#define GRAN_DEBUG_sortedQ (1<<25)
+#define GRAN_DEBUG_blockOnFetch (1<<26)
+#define GRAN_DEBUG_packBuffer (1<<27)
+#define GRAN_DEBUG_BOF_sanity (1<<28)
+
+// Profiling flags
+#define TRACE_sched (1<<29)
+
+
+// -----------------------------------------------------------------------------
+// PRIVATE below here
+// -----------------------------------------------------------------------------
+
+extern StgWord32 classes_enabled;
+
+INLINE_HEADER rtsBool
+traceClass (StgWord32 class) { return (classes_enabled & class); }
+
+// -----------------------------------------------------------------------------
+
+#endif /* TRACE_H */
diff --git a/rts/Weak.c b/rts/Weak.c
index f010395221..a83cef995f 100644
--- a/rts/Weak.c
+++ b/rts/Weak.c
@@ -17,6 +17,7 @@
#include "Schedule.h"
#include "Prelude.h"
#include "RtsAPI.h"
+#include "Trace.h"
StgWeak *weak_ptr_list;
@@ -70,7 +71,7 @@ scheduleFinalizers(Capability *cap, StgWeak *list)
// No finalizers to run?
if (n == 0) return;
- IF_DEBUG(weak,debugBelch("weak: batching %d finalizers\n", n));
+ debugTrace(DEBUG_weak, "weak: batching %d finalizers", n);
arr = (StgMutArrPtrs *)allocateLocal(cap, sizeofW(StgMutArrPtrs) + n);
TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);