summaryrefslogtreecommitdiff
path: root/rts/Stats.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2012-09-07 13:55:11 +0100
committerSimon Marlow <marlowsd@gmail.com>2012-09-07 15:32:14 +0100
commit41737f12f99c9ea776f7658b93e5b03ffc8f120b (patch)
tree76dd200a6f0e3fd8af87270ae1010985038c26a9 /rts/Stats.c
parenta8179622f84bbd52e127a9596d2d4a918ca64e0c (diff)
downloadhaskell-41737f12f99c9ea776f7658b93e5b03ffc8f120b.tar.gz
Deprecate lnat, and use StgWord instead
lnat was originally "long unsigned int" but we were using it when we wanted a 64-bit type on a 64-bit machine. This broke on Windows x64, where long == int == 32 bits. Using types of unspecified size is bad, but what we really wanted was a type with N bits on an N-bit machine. StgWord is exactly that. lnat was mentioned in some APIs that clients might be using (e.g. StackOverflowHook()), so we leave it defined but with a comment to say that it's deprecated.
Diffstat (limited to 'rts/Stats.c')
-rw-r--r--rts/Stats.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/rts/Stats.c b/rts/Stats.c
index b12cb769f7..36ca0e11b3 100644
--- a/rts/Stats.c
+++ b/rts/Stats.c
@@ -57,14 +57,14 @@ static Time HCe_start_time, HCe_tot_time = 0; // heap census prof elap time
#endif
// current = current as of last GC
-static lnat current_residency = 0; // in words; for stats only
-static lnat max_residency = 0;
-static lnat cumulative_residency = 0;
-static lnat residency_samples = 0; // for stats only
-static lnat current_slop = 0;
-static lnat max_slop = 0;
+static W_ current_residency = 0; // in words; for stats only
+static W_ max_residency = 0;
+static W_ cumulative_residency = 0;
+static W_ residency_samples = 0; // for stats only
+static W_ current_slop = 0;
+static W_ max_slop = 0;
-static lnat GC_end_faults = 0;
+static W_ GC_end_faults = 0;
static Time *GC_coll_cpu = NULL;
static Time *GC_coll_elapsed = NULL;
@@ -340,8 +340,8 @@ stat_gcWorkerThreadDone (gc_thread *gct STG_UNUSED)
void
stat_endGC (Capability *cap, gc_thread *gct,
- lnat alloc, lnat live, lnat copied, lnat slop, nat gen,
- nat par_n_threads, lnat par_max_copied, lnat par_tot_copied)
+ W_ alloc, W_ live, W_ copied, W_ slop, nat gen,
+ nat par_n_threads, W_ par_max_copied, W_ par_tot_copied)
{
if (RtsFlags.GcFlags.giveStats != NO_GC_STATS ||
RtsFlags.ProfFlags.doHeapProfile)
@@ -419,8 +419,8 @@ stat_endGC (Capability *cap, gc_thread *gct,
* to calculate the total
*/
{
- lnat tot_alloc = 0;
- lnat n;
+ W_ tot_alloc = 0;
+ W_ n;
for (n = 0; n < n_capabilities; n++) {
tot_alloc += capabilities[n].total_allocated;
traceEventHeapAllocated(&capabilities[n],
@@ -627,7 +627,7 @@ stat_exit(int alloc)
if (tot_elapsed == 0.0) tot_elapsed = 1;
if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) {
- statsPrintf("%9" FMT_SizeT " %9.9s %9.9s", (lnat)alloc*sizeof(W_), "", "");
+ statsPrintf("%9" FMT_SizeT " %9.9s %9.9s", (W_)alloc*sizeof(W_), "", "");
statsPrintf(" %5.2f %5.2f\n\n", 0.0, 0.0);
}
@@ -675,7 +675,7 @@ stat_exit(int alloc)
statsPrintf("%16" FMT_SizeT " MB total memory in use (%" FMT_SizeT " MB lost due to fragmentation)\n\n",
peak_mblocks_allocated * MBLOCK_SIZE_W / (1024 * 1024 / sizeof(W_)),
- (lnat)(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)));
+ (W_)(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)));
/* Print garbage collections in each gen */
statsPrintf(" Tot time (elapsed) Avg pause Max pause\n");
@@ -856,9 +856,9 @@ void
statDescribeGens(void)
{
nat g, mut, lge, i;
- lnat gen_slop;
- lnat tot_live, tot_slop;
- lnat gen_live, gen_blocks;
+ W_ gen_slop;
+ W_ tot_live, tot_slop;
+ W_ gen_live, gen_blocks;
bdescr *bd;
generation *gen;
@@ -896,7 +896,7 @@ statDescribeGens(void)
gen_blocks += gcThreadLiveBlocks(i,g);
}
- debugBelch("%5d %7" FMT_SizeT " %9d", g, (lnat)gen->max_blocks, mut);
+ debugBelch("%5d %7" FMT_SizeT " %9d", g, (W_)gen->max_blocks, mut);
gen_slop = gen_blocks * BLOCK_SIZE_W - gen_live;