summaryrefslogtreecommitdiff
path: root/rts/Trace.h
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2012-09-07 13:55:11 +0100
committerSimon Marlow <marlowsd@gmail.com>2012-09-07 15:32:14 +0100
commit41737f12f99c9ea776f7658b93e5b03ffc8f120b (patch)
tree76dd200a6f0e3fd8af87270ae1010985038c26a9 /rts/Trace.h
parenta8179622f84bbd52e127a9596d2d4a918ca64e0c (diff)
downloadhaskell-41737f12f99c9ea776f7658b93e5b03ffc8f120b.tar.gz
Deprecate lnat, and use StgWord instead
lnat was originally "long unsigned int" but we were using it when we wanted a 64-bit type on a 64-bit machine. This broke on Windows x64, where long == int == 32 bits. Using types of unspecified size is bad, but what we really wanted was a type with N bits on an N-bit machine. StgWord is exactly that. lnat was mentioned in some APIs that clients might be using (e.g. StackOverflowHook()), so we leave it defined but with a comment to say that it's deprecated.
Diffstat (limited to 'rts/Trace.h')
-rw-r--r--rts/Trace.h44
1 files changed, 22 insertions, 22 deletions
diff --git a/rts/Trace.h b/rts/Trace.h
index b3710d32c9..4f1ac3bf0a 100644
--- a/rts/Trace.h
+++ b/rts/Trace.h
@@ -133,24 +133,24 @@ void traceGcEventAtT_ (Capability *cap, StgWord64 ts, EventTypeNum tag);
void traceHeapEvent_ (Capability *cap,
EventTypeNum tag,
CapsetID heap_capset,
- lnat info1);
+ W_ info1);
void traceEventHeapInfo_ (CapsetID heap_capset,
nat gens,
- lnat maxHeapSize,
- lnat allocAreaSize,
- lnat mblockSize,
- lnat blockSize);
+ W_ maxHeapSize,
+ W_ allocAreaSize,
+ W_ mblockSize,
+ W_ blockSize);
void traceEventGcStats_ (Capability *cap,
CapsetID heap_capset,
nat gen,
- lnat copied,
- lnat slop,
- lnat fragmentation,
+ W_ copied,
+ W_ slop,
+ W_ fragmentation,
nat par_n_threads,
- lnat par_max_copied,
- lnat par_tot_copied);
+ W_ par_max_copied,
+ W_ par_tot_copied);
/*
* Record a spark event
@@ -642,12 +642,12 @@ INLINE_HEADER void traceEventGcGlobalSync(Capability *cap STG_UNUSED)
INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
nat gen STG_UNUSED,
- lnat copied STG_UNUSED,
- lnat slop STG_UNUSED,
- lnat fragmentation STG_UNUSED,
+ W_ copied STG_UNUSED,
+ W_ slop STG_UNUSED,
+ W_ fragmentation STG_UNUSED,
nat par_n_threads STG_UNUSED,
- lnat par_max_copied STG_UNUSED,
- lnat par_tot_copied STG_UNUSED)
+ W_ par_max_copied STG_UNUSED,
+ W_ par_tot_copied STG_UNUSED)
{
if (RTS_UNLIKELY(TRACE_gc)) {
traceEventGcStats_(cap, heap_capset, gen,
@@ -661,10 +661,10 @@ INLINE_HEADER void traceEventGcStats(Capability *cap STG_UNUSED,
INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
nat gens STG_UNUSED,
- lnat maxHeapSize STG_UNUSED,
- lnat allocAreaSize STG_UNUSED,
- lnat mblockSize STG_UNUSED,
- lnat blockSize STG_UNUSED)
+ W_ maxHeapSize STG_UNUSED,
+ W_ allocAreaSize STG_UNUSED,
+ W_ mblockSize STG_UNUSED,
+ W_ blockSize STG_UNUSED)
{
if (RTS_UNLIKELY(TRACE_gc)) {
traceEventHeapInfo_(heap_capset, gens,
@@ -678,7 +678,7 @@ INLINE_HEADER void traceEventHeapInfo(CapsetID heap_capset STG_UNUSED,
INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
- lnat allocated STG_UNUSED)
+ W_ allocated STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_ALLOCATED, heap_capset, allocated);
dtraceEventHeapAllocated((EventCapNo)cap->no, heap_capset, allocated);
@@ -686,7 +686,7 @@ INLINE_HEADER void traceEventHeapAllocated(Capability *cap STG_UNUSED,
INLINE_HEADER void traceEventHeapSize(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
- lnat heap_size STG_UNUSED)
+ W_ heap_size STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_SIZE, heap_capset, heap_size);
dtraceEventHeapSize(heap_capset, heap_size);
@@ -694,7 +694,7 @@ INLINE_HEADER void traceEventHeapSize(Capability *cap STG_UNUSED,
INLINE_HEADER void traceEventHeapLive(Capability *cap STG_UNUSED,
CapsetID heap_capset STG_UNUSED,
- lnat heap_live STG_UNUSED)
+ W_ heap_live STG_UNUSED)
{
traceHeapEvent(cap, EVENT_HEAP_LIVE, heap_capset, heap_live);
dtraceEventHeapLive(heap_capset, heap_live);