diff options
author | Ben Gamari <bgamari.foss@gmail.com> | 2016-11-29 16:51:30 -0500 |
---|---|---|
committer | Ben Gamari <ben@smart-cactus.org> | 2016-11-29 16:51:30 -0500 |
commit | 428e152be6bb0fd3867e41cee82a6d5968a11a26 (patch) | |
tree | e43d217c10c052704f872cd7e1df4d335c12d376 /rts/sm | |
parent | 56d74515396c8b6360ba7898cbc4b68f0f1fb2ea (diff) | |
download | haskell-428e152be6bb0fd3867e41cee82a6d5968a11a26.tar.gz |
Use C99's bool
Test Plan: Validate on lots of platforms
Reviewers: erikd, simonmar, austin
Reviewed By: erikd, simonmar
Subscribers: michalt, thomie
Differential Revision: https://phabricator.haskell.org/D2699
Diffstat (limited to 'rts/sm')
-rw-r--r-- | rts/sm/CNF.c | 102 | ||||
-rw-r--r-- | rts/sm/Evac.c | 38 | ||||
-rw-r--r-- | rts/sm/GC.c | 60 | ||||
-rw-r--r-- | rts/sm/GC.h | 14 | ||||
-rw-r--r-- | rts/sm/GCThread.h | 8 | ||||
-rw-r--r-- | rts/sm/GCUtils.c | 2 | ||||
-rw-r--r-- | rts/sm/GCUtils.h | 2 | ||||
-rw-r--r-- | rts/sm/MarkStack.h | 2 | ||||
-rw-r--r-- | rts/sm/MarkWeak.c | 42 | ||||
-rw-r--r-- | rts/sm/MarkWeak.h | 2 | ||||
-rw-r--r-- | rts/sm/OSMem.h | 4 | ||||
-rw-r--r-- | rts/sm/Sanity.c | 14 | ||||
-rw-r--r-- | rts/sm/Sanity.h | 6 | ||||
-rw-r--r-- | rts/sm/Scav.c | 160 | ||||
-rw-r--r-- | rts/sm/Storage.c | 28 | ||||
-rw-r--r-- | rts/sm/Storage.h | 8 |
16 files changed, 246 insertions, 246 deletions
diff --git a/rts/sm/CNF.c b/rts/sm/CNF.c index 5fa148d426..2eb7cd208a 100644 --- a/rts/sm/CNF.c +++ b/rts/sm/CNF.c @@ -391,7 +391,7 @@ unroll_memcpy(StgPtr to, StgPtr from, StgWord size) *(to++) = *(from++); } -static rtsBool +static bool allocate_in_compact (StgCompactNFDataBlock *block, StgWord sizeW, StgPtr *at) { bdescr *bd; @@ -401,16 +401,16 @@ allocate_in_compact (StgCompactNFDataBlock *block, StgWord sizeW, StgPtr *at) bd = Bdescr((StgPtr)block); top = bd->start + BLOCK_SIZE_W * bd->blocks; if (bd->free + sizeW > top) - return rtsFalse; + return false; free = bd->free; bd->free += sizeW; *at = free; - return rtsTrue; + return true; } -static rtsBool +static bool block_is_full (StgCompactNFDataBlock *block) { bdescr *bd; @@ -431,7 +431,7 @@ block_is_full (StgCompactNFDataBlock *block) return (bd->free + sizeW > top); } -static rtsBool +static bool allocate_loop (Capability *cap, StgCompactNFData *str, StgWord sizeW, @@ -444,7 +444,7 @@ allocate_loop (Capability *cap, retry: if (str->nursery != NULL) { if (allocate_in_compact(str->nursery, sizeW, at)) - return rtsTrue; + return true; if (block_is_full (str->nursery)) { str->nursery = str->nursery->next; @@ -455,7 +455,7 @@ allocate_loop (Capability *cap, block = str->nursery->next; while (block != NULL) { if (allocate_in_compact(block, sizeW, at)) - return rtsTrue; + return true; block = block->next; } @@ -466,7 +466,7 @@ allocate_loop (Capability *cap, if (next_size >= BLOCKS_PER_MBLOCK * BLOCK_SIZE) next_size = BLOCKS_PER_MBLOCK * BLOCK_SIZE; if (next_size < sizeW * sizeof(StgWord) + sizeof(StgCompactNFDataBlock)) - return rtsFalse; + return false; block = compactAppendBlock(cap, str, next_size); ASSERT (str->nursery != NULL); @@ -505,13 +505,13 @@ copy_tag (Capability *cap, *p = TAG_CLOSURE(tag, (StgClosure*)to); } -STATIC_INLINE rtsBool +STATIC_INLINE bool object_in_compact (StgCompactNFData *str, StgClosure *p) { bdescr *bd; if (!HEAP_ALLOCED(p)) - return rtsFalse; + return false; bd = Bdescr((P_)p); return (bd->flags & BF_COMPACT) != 0 && @@ -694,7 +694,7 @@ scavenge_loop (Capability *cap, } #ifdef DEBUG -static rtsBool +static bool objectIsWHNFData (StgClosure *what) { switch (get_itbl(what)->type) { @@ -710,18 +710,18 @@ objectIsWHNFData (StgClosure *what) case MUT_ARR_PTRS_FROZEN0: case SMALL_MUT_ARR_PTRS_FROZEN: case SMALL_MUT_ARR_PTRS_FROZEN0: - return rtsTrue; + return true; case IND: case BLACKHOLE: return objectIsWHNFData(UNTAG_CLOSURE(((StgInd*)what)->indirectee)); default: - return rtsFalse; + return false; } } -static rtsBool +static bool verify_mut_arr_ptrs (StgCompactNFData *str, StgMutArrPtrs *a) { @@ -731,13 +731,13 @@ verify_mut_arr_ptrs (StgCompactNFData *str, q = (StgPtr)&a->payload[a->ptrs]; for (; p < q; p++) { if (!object_in_compact(str, UNTAG_CLOSURE(*(StgClosure**)p))) - return rtsFalse; + return false; } - return rtsTrue; + return true; } -static rtsBool +static bool verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) { bdescr *bd; @@ -751,23 +751,23 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) q = (StgClosure*)p; if (!LOOKS_LIKE_CLOSURE_PTR(q)) - return rtsFalse; + return false; info = get_itbl(q); switch (info->type) { case CONSTR_1_0: if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[0]))) - return rtsFalse; + return false; case CONSTR_0_1: p += sizeofW(StgClosure) + 1; break; case CONSTR_2_0: if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[1]))) - return rtsFalse; + return false; case CONSTR_1_1: if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[0]))) - return rtsFalse; + return false; case CONSTR_0_2: p += sizeofW(StgClosure) + 2; break; @@ -780,7 +780,7 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) for (i = 0; i < info->layout.payload.ptrs; i++) if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[i]))) - return rtsFalse; + return false; p += sizeofW(StgClosure) + info->layout.payload.ptrs + info->layout.payload.nptrs; @@ -794,7 +794,7 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: if (!verify_mut_arr_ptrs(str, (StgMutArrPtrs*)p)) - return rtsFalse; + return false; p += mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); break; @@ -806,7 +806,7 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) for (i = 0; i < arr->ptrs; i++) if (!object_in_compact(str, UNTAG_CLOSURE(arr->payload[i]))) - return rtsFalse; + return false; p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs; break; @@ -817,14 +817,14 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) break; default: - return rtsFalse; + return false; } } - return rtsTrue; + return true; } -static rtsBool +static bool verify_consistency_loop (StgCompactNFData *str) { StgCompactNFDataBlock *block; @@ -832,11 +832,11 @@ verify_consistency_loop (StgCompactNFData *str) block = compactGetFirstBlock(str); do { if (!verify_consistency_block(str, block)) - return rtsFalse; + return false; block = block->next; } while (block && block->owner); - return rtsTrue; + return true; } #endif @@ -938,7 +938,7 @@ compactAllocateBlock(Capability *cap, return block; } -STATIC_INLINE rtsBool +STATIC_INLINE bool any_needs_fixup(StgCompactNFDataBlock *block) { // ->next pointers are always valid, even if some blocks were @@ -947,11 +947,11 @@ any_needs_fixup(StgCompactNFDataBlock *block) do { if (block->self != block) - return rtsTrue; + return true; block = block->next; } while (block && block->owner); - return rtsFalse; + return false; } #ifdef DEBUG @@ -1029,7 +1029,7 @@ find_pointer(StgWord *fixup_table, uint32_t count, StgClosure *q) return NULL; } -static rtsBool +static bool fixup_one_pointer(StgWord *fixup_table, uint32_t count, StgClosure **p) { StgWord tag; @@ -1042,17 +1042,17 @@ fixup_one_pointer(StgWord *fixup_table, uint32_t count, StgClosure **p) block = find_pointer(fixup_table, count, q); if (block == NULL) - return rtsFalse; + return false; if (block == block->self) - return rtsTrue; + return true; q = (StgClosure*)((W_)q - (W_)block->self + (W_)block); *p = TAG_CLOSURE(tag, q); - return rtsTrue; + return true; } -static rtsBool +static bool fixup_mut_arr_ptrs (StgWord *fixup_table, uint32_t count, StgMutArrPtrs *a) @@ -1063,13 +1063,13 @@ fixup_mut_arr_ptrs (StgWord *fixup_table, q = (StgPtr)&a->payload[a->ptrs]; for (; p < q; p++) { if (!fixup_one_pointer(fixup_table, count, (StgClosure**)p)) - return rtsFalse; + return false; } - return rtsTrue; + return true; } -static rtsBool +static bool fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count) { const StgInfoTable *info; @@ -1086,7 +1086,7 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count) case CONSTR_1_0: if (!fixup_one_pointer(fixup_table, count, &((StgClosure*)p)->payload[0])) - return rtsFalse; + return false; case CONSTR_0_1: p += sizeofW(StgClosure) + 1; break; @@ -1094,11 +1094,11 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count) case CONSTR_2_0: if (!fixup_one_pointer(fixup_table, count, &((StgClosure*)p)->payload[1])) - return rtsFalse; + return false; case CONSTR_1_1: if (!fixup_one_pointer(fixup_table, count, &((StgClosure*)p)->payload[0])) - return rtsFalse; + return false; case CONSTR_0_2: p += sizeofW(StgClosure) + 2; break; @@ -1112,7 +1112,7 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count) end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { if (!fixup_one_pointer(fixup_table, count, (StgClosure **)p)) - return rtsFalse; + return false; } p += info->layout.payload.nptrs; break; @@ -1137,7 +1137,7 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count) for (i = 0; i < arr->ptrs; i++) { if (!fixup_one_pointer(fixup_table, count, &arr->payload[i])) - return rtsFalse; + return false; } p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs; @@ -1157,11 +1157,11 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count) default: debugBelch("Invalid non-NFData closure (type %d) in Compact\n", info->type); - return rtsFalse; + return false; } } - return rtsTrue; + return true; } static int @@ -1203,18 +1203,18 @@ build_fixup_table (StgCompactNFDataBlock *block, uint32_t *pcount) return table; } -static rtsBool +static bool fixup_loop(StgCompactNFDataBlock *block, StgClosure **proot) { StgWord *table; - rtsBool ok; + bool ok; uint32_t count; table = build_fixup_table (block, &count); do { if (!fixup_block(block, table, count)) { - ok = rtsFalse; + ok = false; goto out; } @@ -1277,7 +1277,7 @@ static StgClosure * maybe_fixup_internal_pointers (StgCompactNFDataBlock *block, StgClosure *root) { - rtsBool ok; + bool ok; StgClosure **proot; // Check for fast path diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c index 1323cbea6a..0581321205 100644 --- a/rts/sm/Evac.c +++ b/rts/sm/Evac.c @@ -45,7 +45,7 @@ StgWord64 whitehole_spin = 0; */ #define MAX_THUNK_SELECTOR_DEPTH 16 -static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool); +static void eval_thunk_selector (StgClosure **q, StgSelector * p, bool); STATIC_INLINE void evacuate_large(StgPtr p); /* ----------------------------------------------------------------------------- @@ -67,7 +67,7 @@ alloc_for_copy (uint32_t size, uint32_t gen_no) if (gct->eager_promotion) { gen_no = gct->evac_gen_no; } else { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; } } @@ -182,7 +182,7 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info, * pointer of an object, but reserve some padding after it. This is * used to optimise evacuation of TSOs. */ -static rtsBool +static bool copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve, uint32_t size_to_copy, uint32_t gen_no) { @@ -202,7 +202,7 @@ spin: if (IS_FORWARDING_PTR(info)) { src->header.info = (const StgInfoTable *)info; evacuate(p); // does the failed_to_evac stuff - return rtsFalse; + return false; } #else info = (W_)src->header.info; @@ -229,7 +229,7 @@ spin: LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy)); #endif - return rtsTrue; + return true; } @@ -271,7 +271,7 @@ evacuate_large(StgPtr p) * the desired destination (see comments in evacuate()). */ if (gen_no < gct->evac_gen_no) { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } RELEASE_SPIN_LOCK(&gen->sync); @@ -296,7 +296,7 @@ evacuate_large(StgPtr p) if (gct->eager_promotion) { new_gen_no = gct->evac_gen_no; } else { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; } } @@ -388,7 +388,7 @@ evacuate_compact (StgPtr p) * the desired destination (see comments in evacuate()). */ if (gen_no < gct->evac_gen_no) { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } return; @@ -404,7 +404,7 @@ evacuate_compact (StgPtr p) * the desired destination (see comments in evacuate()). */ if (gen_no < gct->evac_gen_no) { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } RELEASE_SPIN_LOCK(&gen->sync); @@ -429,7 +429,7 @@ evacuate_compact (StgPtr p) if (gct->eager_promotion) { new_gen_no = gct->evac_gen_no; } else { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; } } @@ -582,7 +582,7 @@ loop: // whether it is already in the target generation. (this is // the write barrier). if (bd->gen_no < gct->evac_gen_no) { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } return; @@ -639,7 +639,7 @@ loop: *p = TAG_CLOSURE(tag,e); if (gen_no < gct->evac_gen_no) { // optimisation if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } } @@ -767,7 +767,7 @@ loop: return; case THUNK_SELECTOR: - eval_thunk_selector(p, (StgSelector *)q, rtsTrue); + eval_thunk_selector(p, (StgSelector *)q, true); return; case IND: @@ -835,7 +835,7 @@ loop: { StgStack *new_stack; StgPtr r, s; - rtsBool mine; + bool mine; mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack), sizeofW(StgStack), gen_no); @@ -932,7 +932,7 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val) } static void -eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac) +eval_thunk_selector (StgClosure **q, StgSelector * p, bool evac) // NB. for legacy reasons, p & q are swapped around :( { uint32_t field; @@ -963,7 +963,7 @@ selector_chain: *q = (StgClosure *)p; // shortcut, behave as for: if (evac) evacuate(q); if (evac && bd->gen_no < gct->evac_gen_no) { - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } return; @@ -975,7 +975,7 @@ selector_chain: // bit is very tricky to get right. If you make changes // around here, test by compiling stage 3 with +RTS -c -RTS. if (bd->flags & BF_MARKED) { - // must call evacuate() to mark this closure if evac==rtsTrue + // must call evacuate() to mark this closure if evac==true *q = (StgClosure *)p; if (evac) evacuate(q); unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p); @@ -1164,10 +1164,10 @@ selector_loop: } gct->thunk_selector_depth++; - // rtsFalse says "don't evacuate the result". It will, + // false says "don't evacuate the result". It will, // however, update any THUNK_SELECTORs that are evaluated // along the way. - eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse); + eval_thunk_selector(&val, (StgSelector*)selectee, false); gct->thunk_selector_depth--; // did we actually manage to evaluate it? diff --git a/rts/sm/GC.c b/rts/sm/GC.c index 46530b2cd9..ea80d6dec1 100644 --- a/rts/sm/GC.c +++ b/rts/sm/GC.c @@ -97,7 +97,7 @@ * deal with static objects and GC CAFs when doing a major GC. */ uint32_t N; -rtsBool major_gc; +bool major_gc; /* Data used for allocation area sizing. */ @@ -132,7 +132,7 @@ uint32_t n_gc_threads; // For stats: static long copied; // *words* copied & scavenged during this GC -rtsBool work_stealing; +bool work_stealing; uint32_t static_flag = STATIC_FLAG_B; uint32_t prev_static_flag = STATIC_FLAG_A; @@ -153,8 +153,8 @@ static void start_gc_threads (void); static void scavenge_until_all_done (void); static StgWord inc_running (void); static StgWord dec_running (void); -static void wakeup_gc_threads (uint32_t me, rtsBool idle_cap[]); -static void shutdown_gc_threads (uint32_t me, rtsBool idle_cap[]); +static void wakeup_gc_threads (uint32_t me, bool idle_cap[]); +static void shutdown_gc_threads (uint32_t me, bool idle_cap[]); static void collect_gct_blocks (void); static void collect_pinned_object_blocks (void); @@ -180,10 +180,10 @@ StgPtr mark_sp; // pointer to the next unallocated mark stack entry void GarbageCollect (uint32_t collect_gen, - rtsBool do_heap_census, + bool do_heap_census, uint32_t gc_type USED_IF_THREADS, Capability *cap, - rtsBool idle_cap[]) + bool idle_cap[]) { bdescr *bd; generation *gen; @@ -299,7 +299,7 @@ GarbageCollect (uint32_t collect_gen, collectFreshWeakPtrs(); // check sanity *before* GC - IF_DEBUG(sanity, checkSanity(rtsFalse /* before GC */, major_gc)); + IF_DEBUG(sanity, checkSanity(false /* before GC */, major_gc)); // gather blocks allocated using allocatePinned() from each capability // and put them on the g0->large_object list. @@ -361,7 +361,7 @@ GarbageCollect (uint32_t collect_gen, for (n = 0; n < n_capabilities; n++) { if (idle_cap[n]) { markCapability(mark_root, gct, capabilities[n], - rtsTrue/*don't mark sparks*/); + true/*don't mark sparks*/); scavenge_capability_mut_lists(capabilities[n]); } } @@ -376,10 +376,10 @@ GarbageCollect (uint32_t collect_gen, if (n_gc_threads == 1) { for (n = 0; n < n_capabilities; n++) { markCapability(mark_root, gct, capabilities[n], - rtsTrue/*don't mark sparks*/); + true/*don't mark sparks*/); } } else { - markCapability(mark_root, gct, cap, rtsTrue/*don't mark sparks*/); + markCapability(mark_root, gct, cap, true/*don't mark sparks*/); } markScheduler(mark_root, gct); @@ -408,7 +408,7 @@ GarbageCollect (uint32_t collect_gen, // must be last... invariant is that everything is fully // scavenged at this point. - if (traverseWeakPtrList()) { // returns rtsTrue if evaced something + if (traverseWeakPtrList()) { // returns true if evaced something inc_running(); continue; } @@ -719,7 +719,7 @@ GarbageCollect (uint32_t collect_gen, // before resurrectThreads(), because that might overwrite some // closures, which will cause problems with THREADED where we don't // fill slop. - IF_DEBUG(sanity, checkSanity(rtsTrue /* after GC */, major_gc)); + IF_DEBUG(sanity, checkSanity(true /* after GC */, major_gc)); // If a heap census is due, we need to do it before // resurrectThreads(), for the same reason as checkSanity above: @@ -937,7 +937,7 @@ dec_running (void) return atomic_dec(&gc_running_threads); } -static rtsBool +static bool any_work (void) { int g; @@ -949,7 +949,7 @@ any_work (void) // scavenge objects in compacted generation if (mark_stack_bd != NULL && !mark_stack_empty()) { - return rtsTrue; + return true; } // Check for global work in any gen. We don't need to check for @@ -957,9 +957,9 @@ any_work (void) // which means there is no local work for this thread. for (g = 0; g < (int)RtsFlags.GcFlags.generations; g++) { ws = &gct->gens[g]; - if (ws->todo_large_objects) return rtsTrue; - if (!looksEmptyWSDeque(ws->todo_q)) return rtsTrue; - if (ws->todo_overflow) return rtsTrue; + if (ws->todo_large_objects) return true; + if (!looksEmptyWSDeque(ws->todo_q)) return true; + if (ws->todo_overflow) return true; } #if defined(THREADED_RTS) @@ -970,7 +970,7 @@ any_work (void) if (n == gct->thread_index) continue; for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { ws = &gc_threads[n]->gens[g]; - if (!looksEmptyWSDeque(ws->todo_q)) return rtsTrue; + if (!looksEmptyWSDeque(ws->todo_q)) return true; } } } @@ -981,7 +981,7 @@ any_work (void) yieldThread(); #endif - return rtsFalse; + return false; } static void @@ -1061,7 +1061,7 @@ gcWorkerThread (Capability *cap) // Every thread evacuates some roots. gct->evac_gen_no = 0; - markCapability(mark_root, gct, cap, rtsTrue/*prune sparks*/); + markCapability(mark_root, gct, cap, true/*prune sparks*/); scavenge_capability_mut_lists(cap); scavenge_until_all_done(); @@ -1092,12 +1092,12 @@ gcWorkerThread (Capability *cap) #if defined(THREADED_RTS) void -waitForGcThreads (Capability *cap USED_IF_THREADS, rtsBool idle_cap[]) +waitForGcThreads (Capability *cap USED_IF_THREADS, bool idle_cap[]) { const uint32_t n_threads = n_capabilities; const uint32_t me = cap->no; uint32_t i, j; - rtsBool retry = rtsTrue; + bool retry = true; while(retry) { for (i=0; i < n_threads; i++) { @@ -1107,13 +1107,13 @@ waitForGcThreads (Capability *cap USED_IF_THREADS, rtsBool idle_cap[]) } } for (j=0; j < 10; j++) { - retry = rtsFalse; + retry = false; for (i=0; i < n_threads; i++) { if (i == me || idle_cap[i]) continue; write_barrier(); interruptCapability(capabilities[i]); if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) { - retry = rtsTrue; + retry = true; } } if (!retry) break; @@ -1134,7 +1134,7 @@ start_gc_threads (void) static void wakeup_gc_threads (uint32_t me USED_IF_THREADS, - rtsBool idle_cap[] USED_IF_THREADS) + bool idle_cap[] USED_IF_THREADS) { #if defined(THREADED_RTS) uint32_t i; @@ -1160,7 +1160,7 @@ wakeup_gc_threads (uint32_t me USED_IF_THREADS, // any_work(), and may even remain awake until the next GC starts. static void shutdown_gc_threads (uint32_t me USED_IF_THREADS, - rtsBool idle_cap[] USED_IF_THREADS) + bool idle_cap[] USED_IF_THREADS) { #if defined(THREADED_RTS) uint32_t i; @@ -1179,7 +1179,7 @@ shutdown_gc_threads (uint32_t me USED_IF_THREADS, #if defined(THREADED_RTS) void -releaseGCThreads (Capability *cap USED_IF_THREADS, rtsBool idle_cap[]) +releaseGCThreads (Capability *cap USED_IF_THREADS, bool idle_cap[]) { const uint32_t n_threads = n_capabilities; const uint32_t me = cap->no; @@ -1451,8 +1451,8 @@ init_gc_thread (gc_thread *t) t->scan_bd = NULL; t->mut_lists = t->cap->mut_lists; t->evac_gen_no = 0; - t->failed_to_evac = rtsFalse; - t->eager_promotion = rtsTrue; + t->failed_to_evac = false; + t->eager_promotion = true; t->thunk_selector_depth = 0; t->copied = 0; t->scanned = 0; @@ -1657,7 +1657,7 @@ resize_nursery (void) long blocks; StgWord needed; - calcNeeded(rtsFalse, &needed); // approx blocks needed at next GC + calcNeeded(false, &needed); // approx blocks needed at next GC /* Guess how much will be live in generation 0 step 0 next time. * A good approximation is obtained by finding the diff --git a/rts/sm/GC.h b/rts/sm/GC.h index 44ae7e35b2..a2bf6123d6 100644 --- a/rts/sm/GC.h +++ b/rts/sm/GC.h @@ -18,9 +18,9 @@ #include "HeapAlloc.h" -void GarbageCollect (uint32_t collect_gen, - rtsBool do_heap_census, - uint32_t gc_type, Capability *cap, rtsBool idle_cap[]); +void GarbageCollect (uint32_t force_major_gc, + bool do_heap_census, + uint32_t gc_type, Capability *cap, bool idle_cap[]); typedef void (*evac_fn)(void *user, StgClosure **root); @@ -28,13 +28,13 @@ StgClosure * isAlive ( StgClosure *p ); void markCAFs ( evac_fn evac, void *user ); extern uint32_t N; -extern rtsBool major_gc; +extern bool major_gc; extern bdescr *mark_stack_bd; extern bdescr *mark_stack_top_bd; extern StgPtr mark_sp; -extern rtsBool work_stealing; +extern bool work_stealing; #ifdef DEBUG extern uint32_t mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS, @@ -55,8 +55,8 @@ void initGcThreads (uint32_t from, uint32_t to); void freeGcThreads (void); #if defined(THREADED_RTS) -void waitForGcThreads (Capability *cap, rtsBool idle_cap[]); -void releaseGCThreads (Capability *cap, rtsBool idle_cap[]); +void waitForGcThreads (Capability *cap, bool idle_cap[]); +void releaseGCThreads (Capability *cap, bool idle_cap[]); #endif #define WORK_UNIT_WORDS 128 diff --git a/rts/sm/GCThread.h b/rts/sm/GCThread.h index f940263665..89457e6467 100644 --- a/rts/sm/GCThread.h +++ b/rts/sm/GCThread.h @@ -138,7 +138,7 @@ typedef struct gc_thread_ { StgClosure* static_objects; // live static objects StgClosure* scavenged_static_objects; // static objects scavenged so far - W_ gc_count; // number of GCs this thread has done + W_ gc_count; // number of GCs this thread has done // block that is currently being scanned bdescr * scan_bd; @@ -154,7 +154,7 @@ typedef struct gc_thread_ { // -------------------- // evacuate flags - uint32_t evac_gen_no; // Youngest generation that objects + uint32_t evac_gen_no; // Youngest generation that objects // should be evacuated to in // evacuate(). (Logically an // argument to evacuate, but it's @@ -162,11 +162,11 @@ typedef struct gc_thread_ { // optimise it into a per-thread // variable). - rtsBool failed_to_evac; // failure to evacuate an object typically + bool failed_to_evac; // failure to evacuate an object typically // Causes it to be recorded in the mutable // object list - rtsBool eager_promotion; // forces promotion to the evac gen + bool eager_promotion; // forces promotion to the evac gen // instead of the to-space // corresponding to the object diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c index a515665d07..9fda2fe070 100644 --- a/rts/sm/GCUtils.c +++ b/rts/sm/GCUtils.c @@ -192,7 +192,7 @@ push_scanned_block (bdescr *bd, gen_workspace *ws) StgPtr todo_block_full (uint32_t size, gen_workspace *ws) { - rtsBool urgent_to_push, can_extend; + bool urgent_to_push, can_extend; StgPtr p; bdescr *bd; diff --git a/rts/sm/GCUtils.h b/rts/sm/GCUtils.h index 7e5a827ce0..3092262af6 100644 --- a/rts/sm/GCUtils.h +++ b/rts/sm/GCUtils.h @@ -45,7 +45,7 @@ bdescr *steal_todo_block (uint32_t s); // Returns true if a block is partially full. This predicate is used to try // to re-use partial blocks wherever possible, and to reduce wastage. // We might need to tweak the actual value. -INLINE_HEADER rtsBool +INLINE_HEADER bool isPartiallyFull(bdescr *bd) { return (bd->free + WORK_UNIT_WORDS < bd->start + BLOCK_SIZE_W); diff --git a/rts/sm/MarkStack.h b/rts/sm/MarkStack.h index d90b5e47b4..881e2b0b17 100644 --- a/rts/sm/MarkStack.h +++ b/rts/sm/MarkStack.h @@ -61,7 +61,7 @@ pop_mark_stack(void) return (StgPtr)*--mark_sp; } -INLINE_HEADER rtsBool +INLINE_HEADER bool mark_stack_empty(void) { return (((W_)mark_sp & BLOCK_MASK) == 0 && mark_stack_bd->link == NULL); diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c index 7e3e1d5818..e7dfd6e57c 100644 --- a/rts/sm/MarkWeak.c +++ b/rts/sm/MarkWeak.c @@ -84,8 +84,8 @@ StgWeak *dead_weak_ptr_list; StgTSO *resurrected_threads; static void collectDeadWeakPtrs (generation *gen); -static rtsBool tidyWeakList (generation *gen); -static rtsBool resurrectUnreachableThreads (generation *gen); +static bool tidyWeakList (generation *gen); +static bool resurrectUnreachableThreads (generation *gen); static void tidyThreadList (generation *gen); void @@ -104,15 +104,15 @@ initWeakForGC(void) resurrected_threads = END_TSO_QUEUE; } -rtsBool +bool traverseWeakPtrList(void) { - rtsBool flag = rtsFalse; + bool flag = false; switch (weak_stage) { case WeakDone: - return rtsFalse; + return false; case WeakThreads: /* Now deal with the gen->threads lists, which behave somewhat like @@ -130,18 +130,18 @@ traverseWeakPtrList(void) // key is reachable): for (g = 0; g <= N; g++) { if (tidyWeakList(&generations[g])) { - flag = rtsTrue; + flag = true; } } // if we evacuated anything new, we must scavenge thoroughly // before we can determine which threads are unreachable. - if (flag) return rtsTrue; + if (flag) return true; // Resurrect any threads which were unreachable for (g = 0; g <= N; g++) { if (resurrectUnreachableThreads(&generations[g])) { - flag = rtsTrue; + flag = true; } } @@ -151,7 +151,7 @@ traverseWeakPtrList(void) // if we evacuated anything new, we must scavenge thoroughly // before entering the WeakPtrs stage. - if (flag) return rtsTrue; + if (flag) return true; // otherwise, fall through... } @@ -164,7 +164,7 @@ traverseWeakPtrList(void) // alive, so traverse those lists again: for (g = 0; g <= N; g++) { if (tidyWeakList(&generations[g])) { - flag = rtsTrue; + flag = true; } } @@ -172,7 +172,7 @@ traverseWeakPtrList(void) * the dead weak pointers. The dead_weak_ptr list is used as a list * of pending finalizers later on. */ - if (flag == rtsFalse) { + if (flag == false) { for (g = 0; g <= N; g++) { collectDeadWeakPtrs(&generations[g]); } @@ -180,12 +180,12 @@ traverseWeakPtrList(void) weak_stage = WeakDone; // *now* we're done, } - return rtsTrue; // but one more round of scavenging, please + return true; // but one more round of scavenging, please } default: barf("traverse_weak_ptr_list"); - return rtsTrue; + return true; } } @@ -205,10 +205,10 @@ static void collectDeadWeakPtrs (generation *gen) } } -static rtsBool resurrectUnreachableThreads (generation *gen) +static bool resurrectUnreachableThreads (generation *gen) { StgTSO *t, *tmp, *next; - rtsBool flag = rtsFalse; + bool flag = false; for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) { next = t->global_link; @@ -226,18 +226,18 @@ static rtsBool resurrectUnreachableThreads (generation *gen) evacuate((StgClosure **)&tmp); tmp->global_link = resurrected_threads; resurrected_threads = tmp; - flag = rtsTrue; + flag = true; } } return flag; } -static rtsBool tidyWeakList(generation *gen) +static bool tidyWeakList(generation *gen) { StgWeak *w, **last_w, *next_w; const StgInfoTable *info; StgClosure *new; - rtsBool flag = rtsFalse; + bool flag = false; last_w = &gen->old_weak_ptr_list; for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) { @@ -267,7 +267,7 @@ static rtsBool tidyWeakList(generation *gen) new_gen = Bdescr((P_)w)->gen; gct->evac_gen_no = new_gen->no; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; // evacuate the fields of the weak ptr scavengeLiveWeak(w); @@ -276,7 +276,7 @@ static rtsBool tidyWeakList(generation *gen) debugTrace(DEBUG_weak, "putting weak pointer %p into mutable list", w); - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; recordMutableGen_GC((StgClosure *)w, new_gen->no); } @@ -287,7 +287,7 @@ static rtsBool tidyWeakList(generation *gen) // and put it on the correct weak ptr list. w->link = new_gen->weak_ptr_list; new_gen->weak_ptr_list = w; - flag = rtsTrue; + flag = true; if (gen->no != new_gen->no) { debugTrace(DEBUG_weak, diff --git a/rts/sm/MarkWeak.h b/rts/sm/MarkWeak.h index aabb954496..bd27bf551a 100644 --- a/rts/sm/MarkWeak.h +++ b/rts/sm/MarkWeak.h @@ -22,7 +22,7 @@ extern StgTSO *exception_threads; void collectFreshWeakPtrs ( void ); void initWeakForGC ( void ); -rtsBool traverseWeakPtrList ( void ); +bool traverseWeakPtrList ( void ); void markWeakPtrList ( void ); void scavengeLiveWeak ( StgWeak * ); diff --git a/rts/sm/OSMem.h b/rts/sm/OSMem.h index 69d87c201e..f6f9559c2e 100644 --- a/rts/sm/OSMem.h +++ b/rts/sm/OSMem.h @@ -18,8 +18,8 @@ void osReleaseFreeMemory(void); void osFreeAllMBlocks(void); size_t getPageSize (void); StgWord64 getPhysicalMemorySize (void); -void setExecutable (void *p, W_ len, rtsBool exec); -rtsBool osNumaAvailable(void); +void setExecutable (void *p, W_ len, bool exec); +bool osNumaAvailable(void); uint32_t osNumaNodes(void); StgWord osNumaMask(void); void osBindMBlocksToNode(void *addr, StgWord size, uint32_t node); diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c index 413aee945b..5a2923820c 100644 --- a/rts/sm/Sanity.c +++ b/rts/sm/Sanity.c @@ -557,7 +557,7 @@ checkTSO(StgTSO *tso) Optionally also check the sanity of the TSOs. */ void -checkGlobalTSOList (rtsBool checkTSOs) +checkGlobalTSOList (bool checkTSOs) { StgTSO *tso; uint32_t g; @@ -712,7 +712,7 @@ checkNurserySanity (nursery *nursery) } static void checkGeneration (generation *gen, - rtsBool after_major_gc USED_IF_THREADS) + bool after_major_gc USED_IF_THREADS) { uint32_t n; gen_workspace *ws; @@ -741,7 +741,7 @@ static void checkGeneration (generation *gen, } /* Full heap sanity check. */ -static void checkFullHeap (rtsBool after_major_gc) +static void checkFullHeap (bool after_major_gc) { uint32_t g, n; @@ -753,7 +753,7 @@ static void checkFullHeap (rtsBool after_major_gc) } } -void checkSanity (rtsBool after_gc, rtsBool major_gc) +void checkSanity (bool after_gc, bool major_gc) { checkFullHeap(after_gc && major_gc); @@ -763,7 +763,7 @@ void checkSanity (rtsBool after_gc, rtsBool major_gc) // does nothing in this case. if (after_gc) { checkMutableLists(); - checkGlobalTSOList(rtsTrue); + checkGlobalTSOList(true); } } @@ -875,14 +875,14 @@ genBlocks (generation *gen) } void -memInventory (rtsBool show) +memInventory (bool show) { uint32_t g, i; W_ gen_blocks[RtsFlags.GcFlags.generations]; W_ nursery_blocks, retainer_blocks, arena_blocks, exec_blocks, gc_free_blocks = 0; W_ live_blocks = 0, free_blocks = 0; - rtsBool leak; + bool leak; // count the blocks we current have diff --git a/rts/sm/Sanity.h b/rts/sm/Sanity.h index 273efe2dc9..63ae05d2a2 100644 --- a/rts/sm/Sanity.h +++ b/rts/sm/Sanity.h @@ -21,13 +21,13 @@ # endif /* debugging routines */ -void checkSanity ( rtsBool after_gc, rtsBool major_gc ); +void checkSanity ( bool after_gc, bool major_gc ); void checkNurserySanity ( nursery *nursery ); void checkHeapChain ( bdescr *bd ); void checkHeapChunk ( StgPtr start, StgPtr end ); void checkLargeObjects ( bdescr *bd ); void checkTSO ( StgTSO* tso ); -void checkGlobalTSOList ( rtsBool checkTSOs ); +void checkGlobalTSOList ( bool checkTSOs ); void checkStaticObjects ( StgClosure* static_objects ); void checkStackChunk ( StgPtr sp, StgPtr stack_end ); StgOffset checkStackFrame ( StgPtr sp ); @@ -35,7 +35,7 @@ StgOffset checkClosure ( const StgClosure* p ); void checkRunQueue (Capability *cap); -void memInventory (rtsBool show); +void memInventory (bool show); void checkBQ (StgTSO *bqe, StgClosure *closure); diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 595d8275cf..940f11fea4 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -51,7 +51,7 @@ static void scavenge_large_bitmap (StgPtr p, static void scavengeTSO (StgTSO *tso) { - rtsBool saved_eager; + bool saved_eager; debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id); @@ -66,7 +66,7 @@ scavengeTSO (StgTSO *tso) } saved_eager = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tso->blocked_exceptions); evacuate((StgClosure **)&tso->bq); @@ -107,10 +107,10 @@ scavengeTSO (StgTSO *tso) static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) { W_ m; - rtsBool any_failed; + bool any_failed; StgPtr p, q; - any_failed = rtsFalse; + any_failed = false; p = (StgPtr)&a->payload[0]; for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++) { @@ -119,9 +119,9 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) evacuate((StgClosure**)p); } if (gct->failed_to_evac) { - any_failed = rtsTrue; + any_failed = true; *mutArrPtrsCard(a,m) = 1; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; } else { *mutArrPtrsCard(a,m) = 0; } @@ -133,9 +133,9 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) evacuate((StgClosure**)p); } if (gct->failed_to_evac) { - any_failed = rtsTrue; + any_failed = true; *mutArrPtrsCard(a,m) = 1; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; } else { *mutArrPtrsCard(a,m) = 0; } @@ -150,9 +150,9 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a) { W_ m; StgPtr p, q; - rtsBool any_failed; + bool any_failed; - any_failed = rtsFalse; + any_failed = false; for (m = 0; m < mutArrPtrsCards(a->ptrs); m++) { if (*mutArrPtrsCard(a,m) != 0) { @@ -163,8 +163,8 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a) evacuate((StgClosure**)p); } if (gct->failed_to_evac) { - any_failed = rtsTrue; - gct->failed_to_evac = rtsFalse; + any_failed = true; + gct->failed_to_evac = false; } else { *mutArrPtrsCard(a,m) = 0; } @@ -408,7 +408,7 @@ scavenge_block (bdescr *bd) { StgPtr p, q; const StgInfoTable *info; - rtsBool saved_eager_promotion; + bool saved_eager_promotion; gen_workspace *ws; debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p", @@ -417,7 +417,7 @@ scavenge_block (bdescr *bd) gct->scan_bd = bd; gct->evac_gen_no = bd->gen_no; saved_eager_promotion = gct->eager_promotion; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; ws = &gct->gens[bd->gen->no]; @@ -441,7 +441,7 @@ scavenge_block (bdescr *bd) case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&mvar->head); evacuate((StgClosure **)&mvar->tail); evacuate((StgClosure **)&mvar->value); @@ -459,7 +459,7 @@ scavenge_block (bdescr *bd) case TVAR: { StgTVar *tvar = ((StgTVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tvar->current_value); evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; @@ -590,7 +590,7 @@ scavenge_block (bdescr *bd) case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -606,7 +606,7 @@ scavenge_block (bdescr *bd) { StgBlockingQueue *bq = (StgBlockingQueue *)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&bq->bh); evacuate((StgClosure**)&bq->owner); evacuate((StgClosure**)&bq->queue); @@ -661,7 +661,7 @@ scavenge_block (bdescr *bd) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); @@ -672,7 +672,7 @@ scavenge_block (bdescr *bd) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // always put it on the mutable list. + gct->failed_to_evac = true; // always put it on the mutable list. break; } @@ -702,7 +702,7 @@ scavenge_block (bdescr *bd) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { evacuate((StgClosure **)p); @@ -715,7 +715,7 @@ scavenge_block (bdescr *bd) ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; } - gct->failed_to_evac = rtsTrue; // always put it on the mutable list. + gct->failed_to_evac = true; // always put it on the mutable list. break; } @@ -751,7 +751,7 @@ scavenge_block (bdescr *bd) { StgStack *stack = (StgStack*)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_stack(stack->sp, stack->stack + stack->stack_size); stack->dirty = gct->failed_to_evac; @@ -765,7 +765,7 @@ scavenge_block (bdescr *bd) { StgPtr end; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { @@ -774,7 +774,7 @@ scavenge_block (bdescr *bd) p += info->layout.payload.nptrs; gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -783,7 +783,7 @@ scavenge_block (bdescr *bd) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); @@ -791,7 +791,7 @@ scavenge_block (bdescr *bd) evacuate((StgClosure **)&e->new_value); } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable p += sizeofW(StgTRecChunk); break; } @@ -816,7 +816,7 @@ scavenge_block (bdescr *bd) * the current object points to into the current generation. */ if (gct->failed_to_evac) { - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; if (bd->gen_no > 0) { recordMutableGen_GC((StgClosure *)q, bd->gen_no); } @@ -856,7 +856,7 @@ scavenge_mark_stack(void) { StgPtr p, q; const StgInfoTable *info; - rtsBool saved_eager_promotion; + bool saved_eager_promotion; gct->evac_gen_no = oldest_gen->no; saved_eager_promotion = gct->eager_promotion; @@ -873,7 +873,7 @@ scavenge_mark_stack(void) case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&mvar->head); evacuate((StgClosure **)&mvar->tail); evacuate((StgClosure **)&mvar->value); @@ -890,7 +890,7 @@ scavenge_mark_stack(void) case TVAR: { StgTVar *tvar = ((StgTVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tvar->current_value); evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; @@ -997,7 +997,7 @@ scavenge_mark_stack(void) case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -1013,7 +1013,7 @@ scavenge_mark_stack(void) { StgBlockingQueue *bq = (StgBlockingQueue *)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&bq->bh); evacuate((StgClosure**)&bq->owner); evacuate((StgClosure**)&bq->queue); @@ -1064,7 +1064,7 @@ scavenge_mark_stack(void) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); @@ -1075,7 +1075,7 @@ scavenge_mark_stack(void) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable anyhow. + gct->failed_to_evac = true; // mutable anyhow. break; } @@ -1102,14 +1102,14 @@ scavenge_mark_stack(void) // follow everything { StgPtr next; - rtsBool saved_eager; + bool saved_eager; // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. saved_eager = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { evacuate((StgClosure **)p); @@ -1122,7 +1122,7 @@ scavenge_mark_stack(void) ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; } - gct->failed_to_evac = rtsTrue; // mutable anyhow. + gct->failed_to_evac = true; // mutable anyhow. break; } @@ -1157,7 +1157,7 @@ scavenge_mark_stack(void) { StgStack *stack = (StgStack*)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_stack(stack->sp, stack->stack + stack->stack_size); stack->dirty = gct->failed_to_evac; @@ -1170,7 +1170,7 @@ scavenge_mark_stack(void) { StgPtr end; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { @@ -1178,7 +1178,7 @@ scavenge_mark_stack(void) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1187,7 +1187,7 @@ scavenge_mark_stack(void) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); @@ -1195,7 +1195,7 @@ scavenge_mark_stack(void) evacuate((StgClosure **)&e->new_value); } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1205,7 +1205,7 @@ scavenge_mark_stack(void) } if (gct->failed_to_evac) { - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; if (gct->evac_gen_no) { recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no); } @@ -1221,12 +1221,12 @@ scavenge_mark_stack(void) objects can have this property. -------------------------------------------------------------------------- */ -static rtsBool +static bool scavenge_one(StgPtr p) { const StgInfoTable *info; - rtsBool no_luck; - rtsBool saved_eager_promotion; + bool no_luck; + bool saved_eager_promotion; saved_eager_promotion = gct->eager_promotion; @@ -1239,7 +1239,7 @@ scavenge_one(StgPtr p) case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&mvar->head); evacuate((StgClosure **)&mvar->tail); evacuate((StgClosure **)&mvar->value); @@ -1256,7 +1256,7 @@ scavenge_one(StgPtr p) case TVAR: { StgTVar *tvar = ((StgTVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tvar->current_value); evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; @@ -1321,7 +1321,7 @@ scavenge_one(StgPtr p) case MUT_VAR_DIRTY: { StgPtr q = p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -1337,7 +1337,7 @@ scavenge_one(StgPtr p) { StgBlockingQueue *bq = (StgBlockingQueue *)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&bq->bh); evacuate((StgClosure**)&bq->owner); evacuate((StgClosure**)&bq->queue); @@ -1388,7 +1388,7 @@ scavenge_one(StgPtr p) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); @@ -1399,7 +1399,7 @@ scavenge_one(StgPtr p) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; break; } @@ -1423,14 +1423,14 @@ scavenge_one(StgPtr p) case SMALL_MUT_ARR_PTRS_DIRTY: { StgPtr next, q; - rtsBool saved_eager; + bool saved_eager; // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. saved_eager = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; q = p; next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { @@ -1444,7 +1444,7 @@ scavenge_one(StgPtr p) ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; } - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; break; } @@ -1479,7 +1479,7 @@ scavenge_one(StgPtr p) { StgStack *stack = (StgStack*)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_stack(stack->sp, stack->stack + stack->stack_size); stack->dirty = gct->failed_to_evac; @@ -1492,7 +1492,7 @@ scavenge_one(StgPtr p) { StgPtr end; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { @@ -1500,7 +1500,7 @@ scavenge_one(StgPtr p) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1510,7 +1510,7 @@ scavenge_one(StgPtr p) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); @@ -1518,7 +1518,7 @@ scavenge_one(StgPtr p) evacuate((StgClosure **)&e->new_value); } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1562,7 +1562,7 @@ scavenge_one(StgPtr p) } no_luck = gct->failed_to_evac; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; return (no_luck); } @@ -1636,9 +1636,9 @@ scavenge_mutable_list(bdescr *bd, generation *gen) continue; case MUT_ARR_PTRS_DIRTY: { - rtsBool saved_eager_promotion; + bool saved_eager_promotion; saved_eager_promotion = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p); @@ -1649,7 +1649,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; recordMutableGen_GC((StgClosure *)p,gen_no); continue; } @@ -1743,7 +1743,7 @@ scavenge_static(void) * in case we visit this object again. */ if (gct->failed_to_evac) { - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; recordMutableGen_GC((StgClosure *)p,oldest_gen->no); } break; @@ -1779,7 +1779,7 @@ scavenge_static(void) barf("scavenge_static: strange closure %d", (int)(info->type)); } - ASSERT(gct->failed_to_evac == rtsFalse); + ASSERT(gct->failed_to_evac == false); } } @@ -2009,20 +2009,20 @@ scavenge_large (gen_workspace *ws) is other work we can usefully be doing. ------------------------------------------------------------------------- */ -static rtsBool +static bool scavenge_find_work (void) { int g; gen_workspace *ws; - rtsBool did_something, did_anything; + bool did_something, did_anything; bdescr *bd; gct->scav_find_work++; - did_anything = rtsFalse; + did_anything = false; loop: - did_something = rtsFalse; + did_something = false; for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { ws = &gct->gens[g]; @@ -2033,26 +2033,26 @@ loop: if (ws->todo_bd->u.scan < ws->todo_free) { scavenge_block(ws->todo_bd); - did_something = rtsTrue; + did_something = true; break; } // If we have any large objects to scavenge, do them now. if (ws->todo_large_objects) { scavenge_large(ws); - did_something = rtsTrue; + did_something = true; break; } if ((bd = grab_local_todo_block(ws)) != NULL) { scavenge_block(bd); - did_something = rtsTrue; + did_something = true; break; } } if (did_something) { - did_anything = rtsTrue; + did_anything = true; goto loop; } @@ -2062,13 +2062,13 @@ loop: for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { if ((bd = steal_todo_block(g)) != NULL) { scavenge_block(bd); - did_something = rtsTrue; + did_something = true; break; } } if (did_something) { - did_anything = rtsTrue; + did_anything = true; goto loop; } } @@ -2086,10 +2086,10 @@ loop: void scavenge_loop(void) { - rtsBool work_to_do; + bool work_to_do; loop: - work_to_do = rtsFalse; + work_to_do = false; // scavenge static objects if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) { @@ -2100,7 +2100,7 @@ loop: // scavenge objects in compacted generation if (mark_stack_bd != NULL && !mark_stack_empty()) { scavenge_mark_stack(); - work_to_do = rtsTrue; + work_to_do = true; } // Order is important here: we want to deal in full blocks as diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index da1acbcf5b..ad2519588b 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -43,7 +43,7 @@ StgIndStatic *dyn_caf_list = NULL; StgIndStatic *debug_caf_list = NULL; StgIndStatic *revertible_caf_list = NULL; -rtsBool keepCAFs; +bool keepCAFs; W_ large_alloc_lim; /* GC if n_large_blocks in any nursery * reaches this. */ @@ -286,7 +286,7 @@ exitStorage (void) } void -freeStorage (rtsBool free_heap) +freeStorage (bool free_heap) { stgFree(generations); if (free_heap) freeAllMBlocks(); @@ -505,13 +505,13 @@ StgInd* newRetainedCAF (StgRegTable *reg, StgIndStatic *caf) // If we are using loadObj/unloadObj in the linker, then we want to // -// - retain all CAFs in statically linked code (keepCAFs == rtsTrue), +// - retain all CAFs in statically linked code (keepCAFs == true), // because we might link a new object that uses any of these CAFs. // // - GC CAFs in dynamically-linked code, so that we can detect when // a dynamically-linked object is unloadable. // -// So for this case, we set keepCAFs to rtsTrue, and link newCAF to newGCdCAF +// So for this case, we set keepCAFs to true, and link newCAF to newGCdCAF // for dynamically-linked code. // StgInd* newGCdCAF (StgRegTable *reg, StgIndStatic *caf) @@ -741,7 +741,7 @@ resizeNurseries (W_ blocks) resizeNurseriesEach(blocks / n_nurseries); } -rtsBool +bool getNewNursery (Capability *cap) { StgWord i; @@ -753,28 +753,28 @@ getNewNursery (Capability *cap) if (i < n_nurseries) { if (cas(&next_nursery[node], i, i+n_numa_nodes) == i) { assignNurseryToCapability(cap, i); - return rtsTrue; + return true; } } else if (n_numa_nodes > 1) { // Try to find an unused nursery chunk on other nodes. We'll get // remote memory, but the rationale is that avoiding GC is better // than avoiding remote memory access. - rtsBool lost = rtsFalse; + bool lost = false; for (n = 0; n < n_numa_nodes; n++) { if (n == node) continue; i = next_nursery[n]; if (i < n_nurseries) { if (cas(&next_nursery[n], i, i+n_numa_nodes) == i) { assignNurseryToCapability(cap, i); - return rtsTrue; + return true; } else { - lost = rtsTrue; /* lost a race */ + lost = true; /* lost a race */ } } } - if (!lost) return rtsFalse; + if (!lost) return false; } else { - return rtsFalse; + return false; } } } @@ -1244,7 +1244,7 @@ W_ gcThreadLiveBlocks (uint32_t i, uint32_t g) * blocks since all the data will be copied. */ extern W_ -calcNeeded (rtsBool force_major, memcount *blocks_needed) +calcNeeded (bool force_major, memcount *blocks_needed) { W_ needed = 0, blocks; uint32_t g, N; @@ -1442,7 +1442,7 @@ AdjustorWritable allocateExec (W_ bytes, AdjustorExecutable *exec_ret) exec_block->u.back = bd; } bd->u.back = NULL; - setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsTrue); + setExecutable(bd->start, bd->blocks * BLOCK_SIZE, true); exec_block = bd; } *(exec_block->free) = n; // store the size of this chunk @@ -1479,7 +1479,7 @@ void freeExec (void *addr) if (bd != exec_block) { debugTrace(DEBUG_gc, "free exec block %p", bd->start); dbl_link_remove(bd, &exec_block); - setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse); + setExecutable(bd->start, bd->blocks * BLOCK_SIZE, false); freeGroup(bd); } else { bd->free = bd->start; diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h index 2bd1a35176..a4e928a3eb 100644 --- a/rts/sm/Storage.h +++ b/rts/sm/Storage.h @@ -19,7 +19,7 @@ void initStorage(void); void exitStorage(void); -void freeStorage(rtsBool free_heap); +void freeStorage(bool free_heap); // Adding more Capabilities later: this function allocates nurseries // and initialises other storage-related things. @@ -30,7 +30,7 @@ void storageAddCapabilities (uint32_t from, uint32_t to); -------------------------------------------------------------------------- */ INLINE_HEADER -rtsBool doYouWantToGC(Capability *cap) +bool doYouWantToGC(Capability *cap) { return (cap->r.rCurrentNursery->link == NULL || g0->n_new_large_words >= large_alloc_lim); @@ -73,7 +73,7 @@ void clearNursery (Capability *cap); void resizeNurseries (StgWord blocks); void resizeNurseriesFixed (void); StgWord countNurseryBlocks (void); -rtsBool getNewNursery (Capability *cap); +bool getNewNursery (Capability *cap); /* ----------------------------------------------------------------------------- Allocation accounting @@ -102,7 +102,7 @@ StgWord calcTotalAllocated (void); StgWord countLargeAllocated (void); StgWord countOccupied (bdescr *bd); -StgWord calcNeeded (rtsBool force_major, StgWord *blocks_needed); +StgWord calcNeeded (bool force_major, StgWord *blocks_needed); StgWord gcThreadLiveWords (uint32_t i, uint32_t g); StgWord gcThreadLiveBlocks (uint32_t i, uint32_t g); |