diff options
author | Ben Gamari <bgamari.foss@gmail.com> | 2016-11-29 16:51:30 -0500 |
---|---|---|
committer | Ben Gamari <ben@smart-cactus.org> | 2016-11-29 16:51:30 -0500 |
commit | 428e152be6bb0fd3867e41cee82a6d5968a11a26 (patch) | |
tree | e43d217c10c052704f872cd7e1df4d335c12d376 /rts/sm/Storage.c | |
parent | 56d74515396c8b6360ba7898cbc4b68f0f1fb2ea (diff) | |
download | haskell-428e152be6bb0fd3867e41cee82a6d5968a11a26.tar.gz |
Use C99's bool
Test Plan: Validate on lots of platforms
Reviewers: erikd, simonmar, austin
Reviewed By: erikd, simonmar
Subscribers: michalt, thomie
Differential Revision: https://phabricator.haskell.org/D2699
Diffstat (limited to 'rts/sm/Storage.c')
-rw-r--r-- | rts/sm/Storage.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index da1acbcf5b..ad2519588b 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -43,7 +43,7 @@ StgIndStatic *dyn_caf_list = NULL; StgIndStatic *debug_caf_list = NULL; StgIndStatic *revertible_caf_list = NULL; -rtsBool keepCAFs; +bool keepCAFs; W_ large_alloc_lim; /* GC if n_large_blocks in any nursery * reaches this. */ @@ -286,7 +286,7 @@ exitStorage (void) } void -freeStorage (rtsBool free_heap) +freeStorage (bool free_heap) { stgFree(generations); if (free_heap) freeAllMBlocks(); @@ -505,13 +505,13 @@ StgInd* newRetainedCAF (StgRegTable *reg, StgIndStatic *caf) // If we are using loadObj/unloadObj in the linker, then we want to // -// - retain all CAFs in statically linked code (keepCAFs == rtsTrue), +// - retain all CAFs in statically linked code (keepCAFs == true), // because we might link a new object that uses any of these CAFs. // // - GC CAFs in dynamically-linked code, so that we can detect when // a dynamically-linked object is unloadable. // -// So for this case, we set keepCAFs to rtsTrue, and link newCAF to newGCdCAF +// So for this case, we set keepCAFs to true, and link newCAF to newGCdCAF // for dynamically-linked code. // StgInd* newGCdCAF (StgRegTable *reg, StgIndStatic *caf) @@ -741,7 +741,7 @@ resizeNurseries (W_ blocks) resizeNurseriesEach(blocks / n_nurseries); } -rtsBool +bool getNewNursery (Capability *cap) { StgWord i; @@ -753,28 +753,28 @@ getNewNursery (Capability *cap) if (i < n_nurseries) { if (cas(&next_nursery[node], i, i+n_numa_nodes) == i) { assignNurseryToCapability(cap, i); - return rtsTrue; + return true; } } else if (n_numa_nodes > 1) { // Try to find an unused nursery chunk on other nodes. We'll get // remote memory, but the rationale is that avoiding GC is better // than avoiding remote memory access. - rtsBool lost = rtsFalse; + bool lost = false; for (n = 0; n < n_numa_nodes; n++) { if (n == node) continue; i = next_nursery[n]; if (i < n_nurseries) { if (cas(&next_nursery[n], i, i+n_numa_nodes) == i) { assignNurseryToCapability(cap, i); - return rtsTrue; + return true; } else { - lost = rtsTrue; /* lost a race */ + lost = true; /* lost a race */ } } } - if (!lost) return rtsFalse; + if (!lost) return false; } else { - return rtsFalse; + return false; } } } @@ -1244,7 +1244,7 @@ W_ gcThreadLiveBlocks (uint32_t i, uint32_t g) * blocks since all the data will be copied. */ extern W_ -calcNeeded (rtsBool force_major, memcount *blocks_needed) +calcNeeded (bool force_major, memcount *blocks_needed) { W_ needed = 0, blocks; uint32_t g, N; @@ -1442,7 +1442,7 @@ AdjustorWritable allocateExec (W_ bytes, AdjustorExecutable *exec_ret) exec_block->u.back = bd; } bd->u.back = NULL; - setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsTrue); + setExecutable(bd->start, bd->blocks * BLOCK_SIZE, true); exec_block = bd; } *(exec_block->free) = n; // store the size of this chunk @@ -1479,7 +1479,7 @@ void freeExec (void *addr) if (bd != exec_block) { debugTrace(DEBUG_gc, "free exec block %p", bd->start); dbl_link_remove(bd, &exec_block); - setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse); + setExecutable(bd->start, bd->blocks * BLOCK_SIZE, false); freeGroup(bd); } else { bd->free = bd->start; |