summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
authorErik de Castro Lopo <erikd@mega-nerd.com>2016-05-02 06:37:14 +1000
committerErik de Castro Lopo <erikd@mega-nerd.com>2016-05-05 08:29:27 +1000
commitdb9de7eb3e91820024f673bfdb6fb8064cfed20d (patch)
tree5e1c3ef0b6dee7f40fedbc118ba36cfe6ffdd1ee /rts/sm
parentad4392c142696d5092533480a82ed65322e9d413 (diff)
downloadhaskell-db9de7eb3e91820024f673bfdb6fb8064cfed20d.tar.gz
rts: Replace `nat` with `uint32_t`
The `nat` type was an alias for `unsigned int` with a comment saying it was at least 32 bits. We keep the typedef in case client code is using it but mark it as deprecated. Test Plan: Validated on Linux, OS X and Windows Reviewers: simonmar, austin, thomie, hvr, bgamari, hsyl20 Differential Revision: https://phabricator.haskell.org/D2166
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/BlockAlloc.c18
-rw-r--r--rts/sm/BlockAlloc.h2
-rw-r--r--rts/sm/Compact.h6
-rw-r--r--rts/sm/Evac.c24
-rw-r--r--rts/sm/GC.c74
-rw-r--r--rts/sm/GC.h8
-rw-r--r--rts/sm/GCThread.h8
-rw-r--r--rts/sm/GCUtils.c16
-rw-r--r--rts/sm/GCUtils.h8
-rw-r--r--rts/sm/HeapAlloc.h4
-rw-r--r--rts/sm/MBlock.c46
-rw-r--r--rts/sm/MarkWeak.c10
-rw-r--r--rts/sm/OSMem.h4
-rw-r--r--rts/sm/Sanity.c52
-rw-r--r--rts/sm/Scav.c18
-rw-r--r--rts/sm/Storage.c38
-rw-r--r--rts/sm/Storage.h10
-rw-r--r--rts/sm/Sweep.c2
18 files changed, 174 insertions, 174 deletions
diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c
index a07dedb137..ff1a6460a4 100644
--- a/rts/sm/BlockAlloc.c
+++ b/rts/sm/BlockAlloc.c
@@ -7,7 +7,7 @@
* This is the architecture independent part of the block allocator.
* It requires only the following support from the operating system:
*
- * void *getMBlock(nat n);
+ * void *getMBlock(uint32_t n);
*
* returns the address of an n*MBLOCK_SIZE region of memory, aligned on
* an MBLOCK_SIZE boundary. There are no other restrictions on the
@@ -140,7 +140,7 @@ static void initMBlock(void *mblock);
Be very careful with integer overflow here. If you have an
expression like (n_blocks * BLOCK_SIZE), and n_blocks is an int or
- a nat, then it will very likely overflow on a 64-bit platform.
+ a uint32_t, then it will very likely overflow on a 64-bit platform.
Always cast to StgWord (or W_ for short) first: ((W_)n_blocks * BLOCK_SIZE).
--------------------------------------------------------------------------- */
@@ -170,7 +170,7 @@ W_ hw_alloc_blocks; // high-water allocated blocks
void initBlockAllocator(void)
{
- nat i;
+ uint32_t i;
for (i=0; i < NUM_FREE_LISTS; i++) {
free_list[i] = NULL;
}
@@ -209,7 +209,7 @@ initGroup(bdescr *head)
#endif
// log base 2 (floor), needs to support up to (2^NUM_FREE_LISTS)-1
-STATIC_INLINE nat
+STATIC_INLINE uint32_t
log_2(W_ n)
{
ASSERT(n > 0 && n < (1<<NUM_FREE_LISTS));
@@ -229,12 +229,12 @@ log_2(W_ n)
}
// log base 2 (ceiling), needs to support up to (2^NUM_FREE_LISTS)-1
-STATIC_INLINE nat
+STATIC_INLINE uint32_t
log_2_ceil(W_ n)
{
ASSERT(n > 0 && n < (1<<NUM_FREE_LISTS));
#if defined(__GNUC__)
- nat r = log_2(n);
+ uint32_t r = log_2(n);
return (n & (n-1)) ? r+1 : r;
#else
W_ i, x;
@@ -250,7 +250,7 @@ log_2_ceil(W_ n)
STATIC_INLINE void
free_list_insert (bdescr *bd)
{
- nat ln;
+ uint32_t ln;
ASSERT(bd->blocks < BLOCKS_PER_MBLOCK);
ln = log_2(bd->blocks);
@@ -284,7 +284,7 @@ setup_tail (bdescr *bd)
// Take a free block group bd, and split off a group of size n from
// it. Adjust the free list as necessary, and return the new group.
static bdescr *
-split_free_block (bdescr *bd, W_ n, nat ln)
+split_free_block (bdescr *bd, W_ n, uint32_t ln)
{
bdescr *fg; // free group
@@ -732,7 +732,7 @@ countAllocdBlocks(bdescr *bd)
return n;
}
-void returnMemoryToOS(nat n /* megablocks */)
+void returnMemoryToOS(uint32_t n /* megablocks */)
{
static bdescr *bd;
StgWord size;
diff --git a/rts/sm/BlockAlloc.h b/rts/sm/BlockAlloc.h
index aebb71a913..2ba7c02c08 100644
--- a/rts/sm/BlockAlloc.h
+++ b/rts/sm/BlockAlloc.h
@@ -17,7 +17,7 @@ bdescr *allocLargeChunk (W_ min, W_ max);
extern W_ countBlocks (bdescr *bd);
extern W_ countAllocdBlocks (bdescr *bd);
-extern void returnMemoryToOS(nat n);
+extern void returnMemoryToOS(uint32_t n);
#ifdef DEBUG
void checkFreeListSanity(void);
diff --git a/rts/sm/Compact.h b/rts/sm/Compact.h
index db7a9694d2..7ff6e12d14 100644
--- a/rts/sm/Compact.h
+++ b/rts/sm/Compact.h
@@ -19,7 +19,7 @@
INLINE_HEADER void
mark(StgPtr p, bdescr *bd)
{
- nat offset_within_block = p - bd->start; // in words
+ uint32_t offset_within_block = p - bd->start; // in words
StgPtr bitmap_word = (StgPtr)bd->u.bitmap +
(offset_within_block / (sizeof(W_)*BITS_PER_BYTE));
StgWord bit_mask = (StgWord)1 << (offset_within_block & (sizeof(W_)*BITS_PER_BYTE - 1));
@@ -29,7 +29,7 @@ mark(StgPtr p, bdescr *bd)
INLINE_HEADER void
unmark(StgPtr p, bdescr *bd)
{
- nat offset_within_block = p - bd->start; // in words
+ uint32_t offset_within_block = p - bd->start; // in words
StgPtr bitmap_word = (StgPtr)bd->u.bitmap +
(offset_within_block / (sizeof(W_)*BITS_PER_BYTE));
StgWord bit_mask = (StgWord)1 << (offset_within_block & (sizeof(W_)*BITS_PER_BYTE - 1));
@@ -39,7 +39,7 @@ unmark(StgPtr p, bdescr *bd)
INLINE_HEADER StgWord
is_marked(StgPtr p, bdescr *bd)
{
- nat offset_within_block = p - bd->start; // in words
+ uint32_t offset_within_block = p - bd->start; // in words
StgPtr bitmap_word = (StgPtr)bd->u.bitmap +
(offset_within_block / (sizeof(W_)*BITS_PER_BYTE));
StgWord bit_mask = (StgWord)1 << (offset_within_block & (sizeof(W_)*BITS_PER_BYTE - 1));
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index a9f112702f..e53461de63 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -52,7 +52,7 @@ STATIC_INLINE void evacuate_large(StgPtr p);
-------------------------------------------------------------------------- */
STATIC_INLINE StgPtr
-alloc_for_copy (nat size, nat gen_no)
+alloc_for_copy (uint32_t size, uint32_t gen_no)
{
StgPtr to;
gen_workspace *ws;
@@ -91,10 +91,10 @@ alloc_for_copy (nat size, nat gen_no)
STATIC_INLINE GNUC_ATTR_HOT void
copy_tag(StgClosure **p, const StgInfoTable *info,
- StgClosure *src, nat size, nat gen_no, StgWord tag)
+ StgClosure *src, uint32_t size, uint32_t gen_no, StgWord tag)
{
StgPtr to, from;
- nat i;
+ uint32_t i;
to = alloc_for_copy(size,gen_no);
@@ -146,10 +146,10 @@ copy_tag(StgClosure **p, const StgInfoTable *info,
#if defined(PARALLEL_GC) && !defined(PROFILING)
STATIC_INLINE void
copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
- StgClosure *src, nat size, nat gen_no, StgWord tag)
+ StgClosure *src, uint32_t size, uint32_t gen_no, StgWord tag)
{
StgPtr to, from;
- nat i;
+ uint32_t i;
to = alloc_for_copy(size,gen_no);
@@ -182,11 +182,11 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
* used to optimise evacuation of TSOs.
*/
static rtsBool
-copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve,
- nat size_to_copy, nat gen_no)
+copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve,
+ uint32_t size_to_copy, uint32_t gen_no)
{
StgPtr to, from;
- nat i;
+ uint32_t i;
StgWord info;
#if defined(PARALLEL_GC)
@@ -235,7 +235,7 @@ spin:
/* Copy wrappers that don't tag the closure after copying */
STATIC_INLINE GNUC_ATTR_HOT void
copy(StgClosure **p, const StgInfoTable *info,
- StgClosure *src, nat size, nat gen_no)
+ StgClosure *src, uint32_t size, uint32_t gen_no)
{
copy_tag(p,info,src,size,gen_no,0);
}
@@ -256,7 +256,7 @@ evacuate_large(StgPtr p)
{
bdescr *bd;
generation *gen, *new_gen;
- nat gen_no, new_gen_no;
+ uint32_t gen_no, new_gen_no;
gen_workspace *ws;
bd = Bdescr(p);
@@ -401,7 +401,7 @@ REGPARM1 GNUC_ATTR_HOT void
evacuate(StgClosure **p)
{
bdescr *bd = NULL;
- nat gen_no;
+ uint32_t gen_no;
StgClosure *q;
const StgInfoTable *info;
StgWord tag;
@@ -809,7 +809,7 @@ static void
eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
// NB. for legacy reasons, p & q are swapped around :(
{
- nat field;
+ uint32_t field;
StgInfoTable *info;
StgWord info_ptr;
StgClosure *selectee;
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index d3f3ab0166..996ce8cbce 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -95,7 +95,7 @@
* flag) is when we're collecting all generations. We only attempt to
* deal with static objects and GC CAFs when doing a major GC.
*/
-nat N;
+uint32_t N;
rtsBool major_gc;
/* Data used for allocation area sizing.
@@ -104,7 +104,7 @@ static W_ g0_pcnt_kept = 30; // percentage of g0 live at last minor GC
/* Mut-list stats */
#ifdef DEBUG
-nat mutlist_MUTVARS,
+uint32_t mutlist_MUTVARS,
mutlist_MUTARRS,
mutlist_MVARS,
mutlist_TVAR,
@@ -126,15 +126,15 @@ StgWord8 the_gc_thread[sizeof(gc_thread) + 64 * sizeof(gen_workspace)];
// Number of threads running in *this* GC. Affects how many
// step->todos[] lists we have to look in to find work.
-nat n_gc_threads;
+uint32_t n_gc_threads;
// For stats:
static long copied; // *words* copied & scavenged during this GC
rtsBool work_stealing;
-nat static_flag = STATIC_FLAG_B;
-nat prev_static_flag = STATIC_FLAG_A;
+uint32_t static_flag = STATIC_FLAG_B;
+uint32_t prev_static_flag = STATIC_FLAG_A;
DECLARE_GCT
@@ -152,8 +152,8 @@ static void start_gc_threads (void);
static void scavenge_until_all_done (void);
static StgWord inc_running (void);
static StgWord dec_running (void);
-static void wakeup_gc_threads (nat me);
-static void shutdown_gc_threads (nat me);
+static void wakeup_gc_threads (uint32_t me);
+static void shutdown_gc_threads (uint32_t me);
static void collect_gct_blocks (void);
static void collect_pinned_object_blocks (void);
@@ -178,9 +178,9 @@ StgPtr mark_sp; // pointer to the next unallocated mark stack entry
-------------------------------------------------------------------------- */
void
-GarbageCollect (nat collect_gen,
+GarbageCollect (uint32_t collect_gen,
rtsBool do_heap_census,
- nat gc_type USED_IF_THREADS,
+ uint32_t gc_type USED_IF_THREADS,
Capability *cap)
{
bdescr *bd;
@@ -189,7 +189,7 @@ GarbageCollect (nat collect_gen,
#if defined(THREADED_RTS)
gc_thread *saved_gct;
#endif
- nat g, n;
+ uint32_t g, n;
// necessary if we stole a callee-saves register for gct:
#if defined(THREADED_RTS)
@@ -459,7 +459,7 @@ GarbageCollect (nat collect_gen,
par_max_copied = 0;
par_tot_copied = 0;
{
- nat i;
+ uint32_t i;
for (i=0; i < n_gc_threads; i++) {
if (n_gc_threads > 1) {
debugTrace(DEBUG_gc,"thread %d:", i);
@@ -621,7 +621,7 @@ GarbageCollect (nat collect_gen,
// add in the partial blocks in the gen_workspaces
{
- nat i;
+ uint32_t i;
for (i = 0; i < n_capabilities; i++) {
live_words += gcThreadLiveWords(i, gen->no);
live_blocks += gcThreadLiveBlocks(i, gen->no);
@@ -766,9 +766,9 @@ GarbageCollect (nat collect_gen,
#define GC_THREAD_WAITING_TO_CONTINUE 3
static void
-new_gc_thread (nat n, gc_thread *t)
+new_gc_thread (uint32_t n, gc_thread *t)
{
- nat g;
+ uint32_t g;
gen_workspace *ws;
t->cap = capabilities[n];
@@ -829,10 +829,10 @@ new_gc_thread (nat n, gc_thread *t)
void
-initGcThreads (nat from USED_IF_THREADS, nat to USED_IF_THREADS)
+initGcThreads (uint32_t from USED_IF_THREADS, uint32_t to USED_IF_THREADS)
{
#if defined(THREADED_RTS)
- nat i;
+ uint32_t i;
if (from > 0) {
gc_threads = stgReallocBytes (gc_threads, to * sizeof(gc_thread*),
@@ -861,10 +861,10 @@ initGcThreads (nat from USED_IF_THREADS, nat to USED_IF_THREADS)
void
freeGcThreads (void)
{
- nat g;
+ uint32_t g;
if (gc_threads != NULL) {
#if defined(THREADED_RTS)
- nat i;
+ uint32_t i;
for (i = 0; i < n_capabilities; i++) {
for (g = 0; g < RtsFlags.GcFlags.generations; g++)
{
@@ -933,7 +933,7 @@ any_work (void)
#if defined(THREADED_RTS)
if (work_stealing) {
- nat n;
+ uint32_t n;
// look for work to steal
for (n = 0; n < n_gc_threads; n++) {
if (n == gct->thread_index) continue;
@@ -956,7 +956,7 @@ any_work (void)
static void
scavenge_until_all_done (void)
{
- DEBUG_ONLY( nat r );
+ DEBUG_ONLY( uint32_t r );
loop:
@@ -1063,9 +1063,9 @@ gcWorkerThread (Capability *cap)
void
waitForGcThreads (Capability *cap USED_IF_THREADS)
{
- const nat n_threads = n_capabilities;
- const nat me = cap->no;
- nat i, j;
+ const uint32_t n_threads = n_capabilities;
+ const uint32_t me = cap->no;
+ uint32_t i, j;
rtsBool retry = rtsTrue;
stat_startGCSync(gc_threads[cap->no]);
@@ -1104,10 +1104,10 @@ start_gc_threads (void)
}
static void
-wakeup_gc_threads (nat me USED_IF_THREADS)
+wakeup_gc_threads (uint32_t me USED_IF_THREADS)
{
#if defined(THREADED_RTS)
- nat i;
+ uint32_t i;
if (n_gc_threads == 1) return;
@@ -1128,10 +1128,10 @@ wakeup_gc_threads (nat me USED_IF_THREADS)
// standby state, otherwise they may still be executing inside
// any_work(), and may even remain awake until the next GC starts.
static void
-shutdown_gc_threads (nat me USED_IF_THREADS)
+shutdown_gc_threads (uint32_t me USED_IF_THREADS)
{
#if defined(THREADED_RTS)
- nat i;
+ uint32_t i;
if (n_gc_threads == 1) return;
@@ -1149,9 +1149,9 @@ shutdown_gc_threads (nat me USED_IF_THREADS)
void
releaseGCThreads (Capability *cap USED_IF_THREADS)
{
- const nat n_threads = n_capabilities;
- const nat me = cap->no;
- nat i;
+ const uint32_t n_threads = n_capabilities;
+ const uint32_t me = cap->no;
+ uint32_t i;
for (i=0; i < n_threads; i++) {
if (i == me || gc_threads[i]->idle) continue;
if (gc_threads[i]->wakeup != GC_THREAD_WAITING_TO_CONTINUE)
@@ -1171,7 +1171,7 @@ releaseGCThreads (Capability *cap USED_IF_THREADS)
static void
prepare_collected_gen (generation *gen)
{
- nat i, g, n;
+ uint32_t i, g, n;
gen_workspace *ws;
bdescr *bd, *next;
@@ -1293,7 +1293,7 @@ prepare_collected_gen (generation *gen)
------------------------------------------------------------------------- */
static void
-stash_mut_list (Capability *cap, nat gen_no)
+stash_mut_list (Capability *cap, uint32_t gen_no)
{
cap->saved_mut_lists[gen_no] = cap->mut_lists[gen_no];
cap->mut_lists[gen_no] = allocBlock_sync();
@@ -1306,7 +1306,7 @@ stash_mut_list (Capability *cap, nat gen_no)
static void
prepare_uncollected_gen (generation *gen)
{
- nat i;
+ uint32_t i;
ASSERT(gen->no > 0);
@@ -1330,7 +1330,7 @@ prepare_uncollected_gen (generation *gen)
static void
collect_gct_blocks (void)
{
- nat g;
+ uint32_t g;
gen_workspace *ws;
bdescr *bd, *prev;
@@ -1380,7 +1380,7 @@ collect_gct_blocks (void)
static void
collect_pinned_object_blocks (void)
{
- nat n;
+ uint32_t n;
bdescr *bd, *prev;
for (n = 0; n < n_capabilities; n++) {
@@ -1456,7 +1456,7 @@ mark_root(void *user USED_IF_THREADS, StgClosure **root)
static void
resize_generations (void)
{
- nat g;
+ uint32_t g;
if (major_gc && RtsFlags.GcFlags.generations > 1) {
W_ live, size, min_alloc, words;
@@ -1683,7 +1683,7 @@ static void gcCAFs(void)
StgIndStatic *p, *prev;
const StgInfoTable *info;
- nat i;
+ uint32_t i;
i = 0;
p = debug_caf_list;
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index 2953d9eb0c..c32064a4dd 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -20,14 +20,14 @@
void GarbageCollect (rtsBool force_major_gc,
rtsBool do_heap_census,
- nat gc_type, Capability *cap);
+ uint32_t gc_type, Capability *cap);
typedef void (*evac_fn)(void *user, StgClosure **root);
StgClosure * isAlive ( StgClosure *p );
void markCAFs ( evac_fn evac, void *user );
-extern nat N;
+extern uint32_t N;
extern rtsBool major_gc;
extern bdescr *mark_stack_bd;
@@ -37,7 +37,7 @@ extern StgPtr mark_sp;
extern rtsBool work_stealing;
#ifdef DEBUG
-extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS,
+extern uint32_t mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS,
mutlist_TVAR,
mutlist_TVAR_WATCH_QUEUE,
mutlist_TREC_CHUNK,
@@ -51,7 +51,7 @@ extern StgWord64 whitehole_spin;
#endif
void gcWorkerThread (Capability *cap);
-void initGcThreads (nat from, nat to);
+void initGcThreads (uint32_t from, uint32_t to);
void freeGcThreads (void);
#if defined(THREADED_RTS)
diff --git a/rts/sm/GCThread.h b/rts/sm/GCThread.h
index 1fee7a68e9..b728315554 100644
--- a/rts/sm/GCThread.h
+++ b/rts/sm/GCThread.h
@@ -87,7 +87,7 @@ typedef struct gen_workspace_ {
WSDeque * todo_q;
bdescr * todo_overflow;
- nat n_todo_overflow;
+ uint32_t n_todo_overflow;
// where large objects to be scavenged go
bdescr * todo_large_objects;
@@ -126,7 +126,7 @@ typedef struct gc_thread_ {
SpinLock mut_spin;
volatile StgWord wakeup; // NB not StgWord8; only StgWord is guaranteed atomic
#endif
- nat thread_index; // a zero based index identifying the thread
+ uint32_t thread_index; // a zero based index identifying the thread
rtsBool idle; // sitting out of this GC cycle
bdescr * free_blocks; // a buffer of free blocks for this thread
@@ -155,7 +155,7 @@ typedef struct gc_thread_ {
// --------------------
// evacuate flags
- nat evac_gen_no; // Youngest generation that objects
+ uint32_t evac_gen_no; // Youngest generation that objects
// should be evacuated to in
// evacuate(). (Logically an
// argument to evacuate, but it's
@@ -200,7 +200,7 @@ typedef struct gc_thread_ {
} gc_thread;
-extern nat n_gc_threads;
+extern uint32_t n_gc_threads;
extern gc_thread **gc_threads;
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 9ecb674bb3..5edf9dedbc 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -41,7 +41,7 @@ allocBlock_sync(void)
}
static bdescr *
-allocGroup_sync(nat n)
+allocGroup_sync(uint32_t n)
{
bdescr *bd;
ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
@@ -51,11 +51,11 @@ allocGroup_sync(nat n)
}
-static nat
-allocBlocks_sync(nat n, bdescr **hd)
+static uint32_t
+allocBlocks_sync(uint32_t n, bdescr **hd)
{
bdescr *bd;
- nat i;
+ uint32_t i;
ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
bd = allocLargeChunk(1,n);
// NB. allocLargeChunk, rather than allocGroup(n), to allocate in a
@@ -112,9 +112,9 @@ grab_local_todo_block (gen_workspace *ws)
#if defined(THREADED_RTS)
bdescr *
-steal_todo_block (nat g)
+steal_todo_block (uint32_t g)
{
- nat n;
+ uint32_t n;
bdescr *bd;
// look for work to steal
@@ -191,7 +191,7 @@ push_scanned_block (bdescr *bd, gen_workspace *ws)
*/
StgPtr
-todo_block_full (nat size, gen_workspace *ws)
+todo_block_full (uint32_t size, gen_workspace *ws)
{
rtsBool urgent_to_push, can_extend;
StgPtr p;
@@ -296,7 +296,7 @@ todo_block_full (nat size, gen_workspace *ws)
}
StgPtr
-alloc_todo_block (gen_workspace *ws, nat size)
+alloc_todo_block (gen_workspace *ws, uint32_t size)
{
bdescr *bd/*, *hd, *tl */;
diff --git a/rts/sm/GCUtils.h b/rts/sm/GCUtils.h
index e77c98aa22..0f87eee3f1 100644
--- a/rts/sm/GCUtils.h
+++ b/rts/sm/GCUtils.h
@@ -22,12 +22,12 @@ bdescr *allocBlock_sync(void);
void freeChain_sync(bdescr *bd);
void push_scanned_block (bdescr *bd, gen_workspace *ws);
-StgPtr todo_block_full (nat size, gen_workspace *ws);
-StgPtr alloc_todo_block (gen_workspace *ws, nat size);
+StgPtr todo_block_full (uint32_t size, gen_workspace *ws);
+StgPtr alloc_todo_block (gen_workspace *ws, uint32_t size);
bdescr *grab_local_todo_block (gen_workspace *ws);
#if defined(THREADED_RTS)
-bdescr *steal_todo_block (nat s);
+bdescr *steal_todo_block (uint32_t s);
#endif
// Returns true if a block is partially full. This predicate is used to try
@@ -48,7 +48,7 @@ void printMutableList (bdescr *bd);
// mutable lists attached to the current gc_thread structure, which
// are the same as the mutable lists on the Capability.
INLINE_HEADER void
-recordMutableGen_GC (StgClosure *p, nat gen_no)
+recordMutableGen_GC (StgClosure *p, uint32_t gen_no)
{
bdescr *bd;
diff --git a/rts/sm/HeapAlloc.h b/rts/sm/HeapAlloc.h
index 99a62f9023..a867a484ae 100644
--- a/rts/sm/HeapAlloc.h
+++ b/rts/sm/HeapAlloc.h
@@ -168,7 +168,7 @@ INLINE_HEADER
StgBool HEAP_ALLOCED(void *p)
{
StgWord mblock;
- nat entry_no;
+ uint32_t entry_no;
MbcCacheLine entry, value;
mblock = (StgWord)p >> MBLOCK_SHIFT;
@@ -196,7 +196,7 @@ INLINE_HEADER
StgBool HEAP_ALLOCED_GC(void *p)
{
StgWord mblock;
- nat entry_no;
+ uint32_t entry_no;
MbcCacheLine entry, value;
StgBool b;
diff --git a/rts/sm/MBlock.c b/rts/sm/MBlock.c
index 2131ae6e13..11b12d1c16 100644
--- a/rts/sm/MBlock.c
+++ b/rts/sm/MBlock.c
@@ -50,10 +50,10 @@ W_ mpc_misses = 0;
be used in an interation fashion. Pass NULL if @state is not
interesting,or pass a pointer to NULL if you don't have a state.
- void *getCommittedMBlocks(nat n)
+ void *getCommittedMBlocks(uint32_t n)
return @n new mblocks, ready to be used (reserved and committed)
- void *decommitMBlocks(char *addr, nat n)
+ void *decommitMBlocks(char *addr, uint32_t n)
release memory for @n mblocks, starting at the given address
void releaseFreeMemory()
@@ -152,7 +152,7 @@ void * getNextMBlock(void **state STG_UNUSED, void *mblock)
return getAllocatedMBlock(casted_state, (W_)mblock + MBLOCK_SIZE);
}
-static void *getReusableMBlocks(nat n)
+static void *getReusableMBlocks(uint32_t n)
{
struct free_list *iter;
W_ size = MBLOCK_SIZE * (W_)n;
@@ -190,7 +190,7 @@ static void *getReusableMBlocks(nat n)
return NULL;
}
-static void *getFreshMBlocks(nat n)
+static void *getFreshMBlocks(uint32_t n)
{
W_ size = MBLOCK_SIZE * (W_)n;
void *addr = (void*)mblock_high_watermark;
@@ -207,7 +207,7 @@ static void *getFreshMBlocks(nat n)
return addr;
}
-static void *getCommittedMBlocks(nat n)
+static void *getCommittedMBlocks(uint32_t n)
{
void *p;
@@ -220,7 +220,7 @@ static void *getCommittedMBlocks(nat n)
return p;
}
-static void decommitMBlocks(char *addr, nat n)
+static void decommitMBlocks(char *addr, uint32_t n)
{
struct free_list *iter, *prev;
W_ size = MBLOCK_SIZE * (W_)n;
@@ -354,14 +354,14 @@ setHeapAlloced(void *p, StgWord8 i)
MBlockMap **mblock_maps = NULL;
-nat mblock_map_count = 0;
+uint32_t mblock_map_count = 0;
MbcCacheLine mblock_cache[MBC_ENTRIES];
static MBlockMap *
findMBlockMap(void *p)
{
- nat i;
+ uint32_t i;
StgWord32 hi = (StgWord32) (((StgWord)p) >> 32);
for( i = 0; i < mblock_map_count; i++ )
{
@@ -377,7 +377,7 @@ StgBool HEAP_ALLOCED_miss(StgWord mblock, void *p)
{
MBlockMap *map;
MBlockMapLine value;
- nat entry_no;
+ uint32_t entry_no;
entry_no = mblock & (MBC_ENTRIES-1);
@@ -416,7 +416,7 @@ setHeapAlloced(void *p, StgWord8 i)
{
StgWord mblock;
- nat entry_no;
+ uint32_t entry_no;
mblock = (StgWord)p >> MBLOCK_SHIFT;
entry_no = mblock & (MBC_ENTRIES-1);
@@ -441,14 +441,14 @@ markHeapUnalloced(void *p)
#if SIZEOF_VOID_P == 4
STATIC_INLINE
-void * mapEntryToMBlock(nat i)
+void * mapEntryToMBlock(uint32_t i)
{
return (void *)((StgWord)i << MBLOCK_SHIFT);
}
void * getFirstMBlock(void **state STG_UNUSED)
{
- nat i;
+ uint32_t i;
for (i = 0; i < MBLOCK_MAP_SIZE; i++) {
if (mblock_map[i]) return mapEntryToMBlock(i);
@@ -458,7 +458,7 @@ void * getFirstMBlock(void **state STG_UNUSED)
void * getNextMBlock(void **state STG_UNUSED, void *mblock)
{
- nat i;
+ uint32_t i;
for (i = MBLOCK_MAP_ENTRY(mblock) + 1; i < MBLOCK_MAP_SIZE; i++) {
if (mblock_map[i]) return mapEntryToMBlock(i);
@@ -471,8 +471,8 @@ void * getNextMBlock(void **state STG_UNUSED, void *mblock)
void * getNextMBlock(void **state STG_UNUSED, void *p)
{
MBlockMap *map;
- nat off, j;
- nat line_no;
+ uint32_t off, j;
+ uint32_t line_no;
MBlockMapLine line;
for (j = 0; j < mblock_map_count; j++) {
@@ -508,7 +508,7 @@ void * getNextMBlock(void **state STG_UNUSED, void *p)
void * getFirstMBlock(void **state STG_UNUSED)
{
MBlockMap *map = mblock_maps[0];
- nat line_no, off;
+ uint32_t line_no, off;
MbcCacheLine line;
for (line_no = 0; line_no < MBLOCK_MAP_ENTRIES; line_no++) {
@@ -528,11 +528,11 @@ void * getFirstMBlock(void **state STG_UNUSED)
#endif // SIZEOF_VOID_P == 8
-static void *getCommittedMBlocks(nat n)
+static void *getCommittedMBlocks(uint32_t n)
{
// The OS layer returns committed memory directly
void *ret = osGetMBlocks(n);
- nat i;
+ uint32_t i;
// fill in the table
for (i = 0; i < n; i++) {
@@ -542,10 +542,10 @@ static void *getCommittedMBlocks(nat n)
return ret;
}
-static void decommitMBlocks(void *p, nat n)
+static void decommitMBlocks(void *p, uint32_t n)
{
osFreeMBlocks(p, n);
- nat i;
+ uint32_t i;
for (i = 0; i < n; i++) {
markHeapUnalloced( (StgWord8*)p + i * MBLOCK_SIZE );
@@ -573,7 +573,7 @@ getMBlock(void)
// address.
void *
-getMBlocks(nat n)
+getMBlocks(uint32_t n)
{
void *ret;
@@ -588,7 +588,7 @@ getMBlocks(nat n)
}
void
-freeMBlocks(void *addr, nat n)
+freeMBlocks(void *addr, uint32_t n)
{
debugTrace(DEBUG_gc, "freeing %d megablock(s) at %p",n,addr);
@@ -622,7 +622,7 @@ freeAllMBlocks(void)
osFreeAllMBlocks();
#if SIZEOF_VOID_P == 8
- nat n;
+ uint32_t n;
for (n = 0; n < mblock_map_count; n++) {
stgFree(mblock_maps[n]);
}
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index d9f17337d1..c6ab5b161c 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -89,7 +89,7 @@ static void tidyThreadList (generation *gen);
void
initWeakForGC(void)
{
- nat g;
+ uint32_t g;
for (g = 0; g <= N; g++) {
generation *gen = &generations[g];
@@ -118,7 +118,7 @@ traverseWeakPtrList(void)
* become garbage, we wake them up and administer an exception.
*/
{
- nat g;
+ uint32_t g;
for (g = 0; g <= N; g++) {
tidyThreadList(&generations[g]);
@@ -156,7 +156,7 @@ traverseWeakPtrList(void)
case WeakPtrs:
{
- nat g;
+ uint32_t g;
// resurrecting threads might have made more weak pointers
// alive, so traverse those lists again:
@@ -365,7 +365,7 @@ static void checkWeakPtrSanity(StgWeak *hd, StgWeak *tl)
void collectFreshWeakPtrs()
{
- nat i;
+ uint32_t i;
generation *gen = &generations[0];
// move recently allocated weak_ptr_list to the old list as well
for (i = 0; i < n_capabilities; i++) {
@@ -390,7 +390,7 @@ void collectFreshWeakPtrs()
void
markWeakPtrList ( void )
{
- nat g;
+ uint32_t g;
for (g = 0; g <= N; g++) {
generation *gen = &generations[g];
diff --git a/rts/sm/OSMem.h b/rts/sm/OSMem.h
index 533f6f7fe6..defa0d408c 100644
--- a/rts/sm/OSMem.h
+++ b/rts/sm/OSMem.h
@@ -12,8 +12,8 @@
#include "BeginPrivate.h"
void osMemInit(void);
-void *osGetMBlocks(nat n);
-void osFreeMBlocks(char *addr, nat n);
+void *osGetMBlocks(uint32_t n);
+void osFreeMBlocks(char *addr, uint32_t n);
void osReleaseFreeMemory(void);
void osFreeAllMBlocks(void);
W_ getPageSize (void);
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 1f4c4923c4..d1ed7db977 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -33,19 +33,19 @@
Forward decls.
-------------------------------------------------------------------------- */
-static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
-static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
-static void checkClosureShallow ( StgClosure * );
-static void checkSTACK (StgStack *stack);
+static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, uint32_t );
+static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, uint32_t );
+static void checkClosureShallow ( StgClosure * );
+static void checkSTACK (StgStack *stack);
/* -----------------------------------------------------------------------------
Check stack sanity
-------------------------------------------------------------------------- */
static void
-checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
+checkSmallBitmap( StgPtr payload, StgWord bitmap, uint32_t size )
{
- nat i;
+ uint32_t i;
for(i = 0; i < size; i++, bitmap >>= 1 ) {
if ((bitmap & 1) == 0) {
@@ -55,10 +55,10 @@ checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
}
static void
-checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
+checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, uint32_t size )
{
StgWord bmp;
- nat i, j;
+ uint32_t i, j;
i = 0;
for (bmp=0; i < size; bmp++) {
@@ -98,7 +98,7 @@ checkClosureShallow( StgClosure* p )
StgOffset
checkStackFrame( StgPtr c )
{
- nat size;
+ uint32_t size;
const StgRetInfoTable* info;
info = get_ret_itbl((StgClosure *)c);
@@ -123,7 +123,7 @@ checkStackFrame( StgPtr c )
case RET_BCO: {
StgBCO *bco;
- nat size;
+ uint32_t size;
bco = (StgBCO *)*(c+1);
size = BCO_BITMAP_SIZE(bco);
checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
@@ -257,7 +257,7 @@ checkClosure( StgClosure* p )
case THUNK_0_2:
case THUNK_2_0:
{
- nat i;
+ uint32_t i;
for (i = 0; i < info->layout.payload.ptrs; i++) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
}
@@ -287,7 +287,7 @@ checkClosure( StgClosure* p )
case THUNK_STATIC:
case FUN_STATIC:
{
- nat i;
+ uint32_t i;
for (i = 0; i < info->layout.payload.ptrs; i++) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
}
@@ -395,7 +395,7 @@ checkClosure( StgClosure* p )
case MUT_ARR_PTRS_FROZEN0:
{
StgMutArrPtrs* a = (StgMutArrPtrs *)p;
- nat i;
+ uint32_t i;
for (i = 0; i < a->ptrs; i++) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
}
@@ -412,7 +412,7 @@ checkClosure( StgClosure* p )
case TREC_CHUNK:
{
- nat i;
+ uint32_t i;
StgTRecChunk *tc = (StgTRecChunk *)p;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
for (i = 0; i < tc -> next_entry_idx; i ++) {
@@ -446,7 +446,7 @@ void checkHeapChain (bdescr *bd)
if(!(bd->flags & BF_SWEPT)) {
p = bd->start;
while (p < bd->free) {
- nat size = checkClosure((StgClosure *)p);
+ uint32_t size = checkClosure((StgClosure *)p);
/* This is the smallest size of closure that can live in the heap */
ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
p += size;
@@ -463,7 +463,7 @@ void
checkHeapChunk(StgPtr start, StgPtr end)
{
StgPtr p;
- nat size;
+ uint32_t size;
for (p=start; p<end; p+=size) {
ASSERT(LOOKS_LIKE_INFO_PTR(*p));
@@ -542,7 +542,7 @@ void
checkGlobalTSOList (rtsBool checkTSOs)
{
StgTSO *tso;
- nat g;
+ uint32_t g;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (tso=generations[g].threads; tso != END_TSO_QUEUE;
@@ -585,7 +585,7 @@ checkGlobalTSOList (rtsBool checkTSOs)
-------------------------------------------------------------------------- */
static void
-checkMutableList( bdescr *mut_bd, nat gen )
+checkMutableList( bdescr *mut_bd, uint32_t gen )
{
bdescr *bd;
StgPtr q;
@@ -610,9 +610,9 @@ checkMutableList( bdescr *mut_bd, nat gen )
}
static void
-checkLocalMutableLists (nat cap_no)
+checkLocalMutableLists (uint32_t cap_no)
{
- nat g;
+ uint32_t g;
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
checkMutableList(capabilities[cap_no]->mut_lists[g], g);
}
@@ -621,7 +621,7 @@ checkLocalMutableLists (nat cap_no)
static void
checkMutableLists (void)
{
- nat i;
+ uint32_t i;
for (i = 0; i < n_capabilities; i++) {
checkLocalMutableLists(i);
}
@@ -675,7 +675,7 @@ void
checkNurserySanity (nursery *nursery)
{
bdescr *bd, *prev;
- nat blocks = 0;
+ uint32_t blocks = 0;
prev = NULL;
for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
@@ -691,7 +691,7 @@ checkNurserySanity (nursery *nursery)
static void checkGeneration (generation *gen,
rtsBool after_major_gc USED_IF_THREADS)
{
- nat n;
+ uint32_t n;
gen_workspace *ws;
ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
@@ -719,7 +719,7 @@ static void checkGeneration (generation *gen,
/* Full heap sanity check. */
static void checkFullHeap (rtsBool after_major_gc)
{
- nat g, n;
+ uint32_t g, n;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
checkGeneration(&generations[g], after_major_gc);
@@ -753,7 +753,7 @@ void checkSanity (rtsBool after_gc, rtsBool major_gc)
static void
findMemoryLeak (void)
{
- nat g, i;
+ uint32_t g, i;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (i = 0; i < n_capabilities; i++) {
markBlocks(capabilities[i]->mut_lists[g]);
@@ -839,7 +839,7 @@ genBlocks (generation *gen)
void
memInventory (rtsBool show)
{
- nat g, i;
+ uint32_t g, i;
W_ gen_blocks[RtsFlags.GcFlags.generations];
W_ nursery_blocks, retainer_blocks,
arena_blocks, exec_blocks, gc_free_blocks = 0;
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 7f64ea6b33..2fbb8f06cc 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -276,11 +276,11 @@ scavenge_AP (StgAP *ap)
static void
scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
{
- nat i, j, size;
+ uint32_t i, j, size;
StgWord bitmap;
StgClosure **p;
- size = (nat)large_srt->l.size;
+ size = (uint32_t)large_srt->l.size;
p = (StgClosure **)large_srt->srt;
for (i = 0; i < size / BITS_IN(W_); i++) {
@@ -316,9 +316,9 @@ scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
* never dereference it.
*/
STATIC_INLINE GNUC_ATTR_HOT void
-scavenge_srt (StgClosure **srt, nat srt_bitmap)
+scavenge_srt (StgClosure **srt, uint32_t srt_bitmap)
{
- nat bitmap;
+ uint32_t bitmap;
StgClosure **p;
bitmap = srt_bitmap;
@@ -359,7 +359,7 @@ STATIC_INLINE GNUC_ATTR_HOT void
scavenge_thunk_srt(const StgInfoTable *info)
{
StgThunkInfoTable *thunk_info;
- nat bitmap;
+ uint32_t bitmap;
if (!major_gc) return;
@@ -376,7 +376,7 @@ STATIC_INLINE GNUC_ATTR_HOT void
scavenge_fun_srt(const StgInfoTable *info)
{
StgFunInfoTable *fun_info;
- nat bitmap;
+ uint32_t bitmap;
if (!major_gc) return;
@@ -1559,7 +1559,7 @@ static void
scavenge_mutable_list(bdescr *bd, generation *gen)
{
StgPtr p, q;
- nat gen_no;
+ uint32_t gen_no;
gen_no = gen->no;
gct->evac_gen_no = gen_no;
@@ -1650,7 +1650,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
void
scavenge_capability_mut_lists (Capability *cap)
{
- nat g;
+ uint32_t g;
/* Mutable lists from each generation > N
* we want to *scavenge* these roots, not evacuate them: they're not
@@ -1765,7 +1765,7 @@ scavenge_static(void)
static void
scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
{
- nat i, j, b;
+ uint32_t i, j, b;
StgWord bitmap;
b = 0;
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 3e421a6e9b..b50594193c 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -55,7 +55,7 @@ generation *g0 = NULL; /* generation 0, for convenience */
generation *oldest_gen = NULL; /* oldest generation, for convenience */
nursery *nurseries = NULL; /* array of nurseries, size == n_capabilities */
-nat n_nurseries;
+uint32_t n_nurseries;
volatile StgWord next_nursery = 0;
#ifdef THREADED_RTS
@@ -66,8 +66,8 @@ volatile StgWord next_nursery = 0;
Mutex sm_mutex;
#endif
-static void allocNurseries (nat from, nat to);
-static void assignNurseriesToCapabilities (nat from, nat to);
+static void allocNurseries (uint32_t from, uint32_t to);
+static void assignNurseriesToCapabilities (uint32_t from, uint32_t to);
static void
initGeneration (generation *gen, int g)
@@ -104,7 +104,7 @@ initGeneration (generation *gen, int g)
void
initStorage (void)
{
- nat g;
+ uint32_t g;
if (generations != NULL) {
// multi-init protection
@@ -211,9 +211,9 @@ initStorage (void)
BLOCK_SIZE_W * sizeof(W_));
}
-void storageAddCapabilities (nat from, nat to)
+void storageAddCapabilities (uint32_t from, uint32_t to)
{
- nat n, g, i, new_n_nurseries;
+ uint32_t n, g, i, new_n_nurseries;
if (RtsFlags.GcFlags.nurseryChunkSize == 0) {
new_n_nurseries = to;
@@ -577,7 +577,7 @@ allocNursery (bdescr *tail, W_ blocks)
}
STATIC_INLINE void
-assignNurseryToCapability (Capability *cap, nat n)
+assignNurseryToCapability (Capability *cap, uint32_t n)
{
ASSERT(n < n_nurseries);
cap->r.rNursery = &nurseries[n];
@@ -591,9 +591,9 @@ assignNurseryToCapability (Capability *cap, nat n)
* here, everything must be stopped to call this function.
*/
static void
-assignNurseriesToCapabilities (nat from, nat to)
+assignNurseriesToCapabilities (uint32_t from, uint32_t to)
{
- nat i;
+ uint32_t i;
for (i = from; i < to; i++) {
assignNurseryToCapability(capabilities[i], next_nursery++);
@@ -601,9 +601,9 @@ assignNurseriesToCapabilities (nat from, nat to)
}
static void
-allocNurseries (nat from, nat to)
+allocNurseries (uint32_t from, uint32_t to)
{
- nat i;
+ uint32_t i;
memcount n_blocks;
if (RtsFlags.GcFlags.nurseryChunkSize) {
@@ -626,7 +626,7 @@ resetNurseries (void)
#ifdef DEBUG
bdescr *bd;
- nat n;
+ uint32_t n;
for (n = 0; n < n_nurseries; n++) {
for (bd = nurseries[n].blocks; bd; bd = bd->link) {
ASSERT(bd->gen_no == 0);
@@ -640,7 +640,7 @@ resetNurseries (void)
W_
countNurseryBlocks (void)
{
- nat i;
+ uint32_t i;
W_ blocks = 0;
for (i = 0; i < n_nurseries; i++) {
@@ -695,7 +695,7 @@ resizeNursery (nursery *nursery, W_ blocks)
static void
resizeNurseriesEach (W_ blocks)
{
- nat i;
+ uint32_t i;
for (i = 0; i < n_nurseries; i++) {
resizeNursery(&nurseries[i], blocks);
@@ -705,7 +705,7 @@ resizeNurseriesEach (W_ blocks)
void
resizeNurseriesFixed (void)
{
- nat blocks;
+ uint32_t blocks;
if (RtsFlags.GcFlags.nurseryChunkSize) {
blocks = RtsFlags.GcFlags.nurseryChunkSize;
@@ -1149,7 +1149,7 @@ calcTotalAllocated (void)
void
updateNurseriesStats (void)
{
- nat i;
+ uint32_t i;
bdescr *bd;
for (i = 0; i < n_capabilities; i++) {
@@ -1184,7 +1184,7 @@ W_ genLiveBlocks (generation *gen)
return gen->n_blocks + gen->n_large_blocks;
}
-W_ gcThreadLiveWords (nat i, nat g)
+W_ gcThreadLiveWords (uint32_t i, uint32_t g)
{
W_ a, b, c;
@@ -1196,7 +1196,7 @@ W_ gcThreadLiveWords (nat i, nat g)
return a + b + c;
}
-W_ gcThreadLiveBlocks (nat i, nat g)
+W_ gcThreadLiveBlocks (uint32_t i, uint32_t g)
{
W_ blocks;
@@ -1221,7 +1221,7 @@ extern W_
calcNeeded (rtsBool force_major, memcount *blocks_needed)
{
W_ needed = 0, blocks;
- nat g, N;
+ uint32_t g, N;
generation *gen;
if (force_major) {
diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h
index 6c6daab44d..2bd1a35176 100644
--- a/rts/sm/Storage.h
+++ b/rts/sm/Storage.h
@@ -23,7 +23,7 @@ void freeStorage(rtsBool free_heap);
// Adding more Capabilities later: this function allocates nurseries
// and initialises other storage-related things.
-void storageAddCapabilities (nat from, nat to);
+void storageAddCapabilities (uint32_t from, uint32_t to);
/* -----------------------------------------------------------------------------
Should we GC?
@@ -66,7 +66,7 @@ void dirty_TVAR(Capability *cap, StgTVar *p);
-------------------------------------------------------------------------- */
extern nursery *nurseries;
-extern nat n_nurseries;
+extern uint32_t n_nurseries;
void resetNurseries (void);
void clearNursery (Capability *cap);
@@ -104,8 +104,8 @@ StgWord countLargeAllocated (void);
StgWord countOccupied (bdescr *bd);
StgWord calcNeeded (rtsBool force_major, StgWord *blocks_needed);
-StgWord gcThreadLiveWords (nat i, nat g);
-StgWord gcThreadLiveBlocks (nat i, nat g);
+StgWord gcThreadLiveWords (uint32_t i, uint32_t g);
+StgWord gcThreadLiveBlocks (uint32_t i, uint32_t g);
StgWord genLiveWords (generation *gen);
StgWord genLiveBlocks (generation *gen);
@@ -153,7 +153,7 @@ void move_STACK (StgStack *src, StgStack *dest);
// The previous and current values of the static flag. These flip
// between STATIC_FLAG_A and STATIC_FLAG_B at each major GC.
-extern nat prev_static_flag, static_flag;
+extern uint32_t prev_static_flag, static_flag;
// In the chain of static objects built up during GC, all the link
// fields are tagged with the current static_flag value. How to mark
diff --git a/rts/sm/Sweep.c b/rts/sm/Sweep.c
index c927f300d7..2cf8399a3b 100644
--- a/rts/sm/Sweep.c
+++ b/rts/sm/Sweep.c
@@ -22,7 +22,7 @@ void
sweep(generation *gen)
{
bdescr *bd, *prev, *next;
- nat i;
+ uint32_t i;
W_ freed, resid, fragd, blocks, live;
ASSERT(countBlocks(gen->old_blocks) == gen->n_old_blocks);