summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <bgamari.foss@gmail.com>2016-11-29 16:51:30 -0500
committerBen Gamari <ben@smart-cactus.org>2016-11-29 16:51:30 -0500
commit428e152be6bb0fd3867e41cee82a6d5968a11a26 (patch)
treee43d217c10c052704f872cd7e1df4d335c12d376
parent56d74515396c8b6360ba7898cbc4b68f0f1fb2ea (diff)
downloadhaskell-428e152be6bb0fd3867e41cee82a6d5968a11a26.tar.gz
Use C99's bool
Test Plan: Validate on lots of platforms Reviewers: erikd, simonmar, austin Reviewed By: erikd, simonmar Subscribers: michalt, thomie Differential Revision: https://phabricator.haskell.org/D2699
-rw-r--r--compiler/main/DriverPipeline.hs6
-rw-r--r--docs/storage-mgt/ldv.tex4
-rw-r--r--docs/storage-mgt/rp.tex2
-rw-r--r--docs/storage-mgt/sm.tex16
-rw-r--r--ghc/hschooks.c2
-rw-r--r--includes/Cmm.h5
-rw-r--r--includes/rts/Flags.h76
-rw-r--r--includes/rts/Hpc.h2
-rw-r--r--includes/rts/OSThreads.h10
-rw-r--r--includes/rts/Threads.h2
-rw-r--r--includes/rts/Types.h8
-rw-r--r--includes/rts/storage/ClosureMacros.h10
-rw-r--r--includes/rts/storage/GC.h4
-rw-r--r--rts/AwaitEvent.h6
-rw-r--r--rts/Capability.c74
-rw-r--r--rts/Capability.h30
-rw-r--r--rts/CheckUnload.c32
-rw-r--r--rts/Hpc.c6
-rw-r--r--rts/Interpreter.c8
-rw-r--r--rts/Linker.c10
-rw-r--r--rts/LinkerInternals.h2
-rw-r--r--rts/Messages.c2
-rw-r--r--rts/Printer.c8
-rw-r--r--rts/ProfHeap.c76
-rw-r--r--rts/ProfHeap.h4
-rw-r--r--rts/Profiling.c28
-rw-r--r--rts/Proftimer.c18
-rw-r--r--rts/Proftimer.h2
-rw-r--r--rts/RaiseAsync.c22
-rw-r--r--rts/RaiseAsync.h4
-rw-r--r--rts/RetainerProfile.c28
-rw-r--r--rts/RetainerSet.c2
-rw-r--r--rts/RetainerSet.h16
-rw-r--r--rts/RtsAPI.c2
-rw-r--r--rts/RtsDllMain.c2
-rw-r--r--rts/RtsFlags.c259
-rw-r--r--rts/RtsSignals.h2
-rw-r--r--rts/RtsStartup.c12
-rw-r--r--rts/RtsUtils.c4
-rw-r--r--rts/RtsUtils.h2
-rw-r--r--rts/STM.c119
-rw-r--r--rts/STM.h2
-rw-r--r--rts/Schedule.c154
-rw-r--r--rts/Schedule.h10
-rw-r--r--rts/Sparks.h8
-rw-r--r--rts/Stable.c2
-rw-r--r--rts/Stable.h2
-rw-r--r--rts/Stats.c34
-rw-r--r--rts/Task.c20
-rw-r--r--rts/Task.h12
-rw-r--r--rts/ThreadPaused.c10
-rw-r--r--rts/Threads.c26
-rw-r--r--rts/Threads.h8
-rw-r--r--rts/Ticker.h2
-rw-r--r--rts/Ticky.c6
-rw-r--r--rts/Timer.c2
-rw-r--r--rts/Timer.h2
-rw-r--r--rts/Trace.c2
-rw-r--r--rts/WSDeque.c6
-rw-r--r--rts/WSDeque.h6
-rw-r--r--rts/Weak.c8
-rw-r--r--rts/Weak.h2
-rw-r--r--rts/hooks/OutOfHeap.c2
-rw-r--r--rts/hooks/StackOverflow.c2
-rw-r--r--rts/linker/Elf.c21
-rw-r--r--rts/linker/LoadArchive.c2
-rw-r--r--rts/linker/SymbolExtras.c2
-rw-r--r--rts/posix/OSMem.c12
-rw-r--r--rts/posix/OSThreads.c10
-rw-r--r--rts/posix/Select.c14
-rw-r--r--rts/posix/Signals.c10
-rw-r--r--rts/posix/Signals.h2
-rw-r--r--rts/posix/itimer/Pthread.c2
-rw-r--r--rts/posix/itimer/Setitimer.c2
-rw-r--r--rts/posix/itimer/TimerCreate.c2
-rw-r--r--rts/sm/CNF.c102
-rw-r--r--rts/sm/Evac.c38
-rw-r--r--rts/sm/GC.c60
-rw-r--r--rts/sm/GC.h14
-rw-r--r--rts/sm/GCThread.h8
-rw-r--r--rts/sm/GCUtils.c2
-rw-r--r--rts/sm/GCUtils.h2
-rw-r--r--rts/sm/MarkStack.h2
-rw-r--r--rts/sm/MarkWeak.c42
-rw-r--r--rts/sm/MarkWeak.h2
-rw-r--r--rts/sm/OSMem.h4
-rw-r--r--rts/sm/Sanity.c14
-rw-r--r--rts/sm/Sanity.h6
-rw-r--r--rts/sm/Scav.c160
-rw-r--r--rts/sm/Storage.c28
-rw-r--r--rts/sm/Storage.h8
-rw-r--r--rts/win32/AsyncIO.c6
-rw-r--r--rts/win32/AsyncIO.h4
-rw-r--r--rts/win32/AwaitEvent.c2
-rw-r--r--rts/win32/ConsoleHandler.c28
-rw-r--r--rts/win32/ConsoleHandler.h2
-rw-r--r--rts/win32/IOManager.c22
-rw-r--r--rts/win32/IOManager.h2
-rw-r--r--rts/win32/OSMem.c4
-rw-r--r--rts/win32/OSThreads.c16
-rw-r--r--rts/win32/ThrIOManager.c4
-rw-r--r--rts/win32/Ticker.c2
-rw-r--r--rts/win32/WorkQueue.c30
103 files changed, 954 insertions, 994 deletions
diff --git a/compiler/main/DriverPipeline.hs b/compiler/main/DriverPipeline.hs
index 167f78e7f9..ea0c6eded1 100644
--- a/compiler/main/DriverPipeline.hs
+++ b/compiler/main/DriverPipeline.hs
@@ -1641,13 +1641,13 @@ mkExtraObjToLinkIntoBinary dflags = do
<> text (show (rtsOptsEnabled dflags)) <> semi,
text " __conf.rts_opts_suggestions = "
<> text (if rtsOptsSuggestions dflags
- then "rtsTrue"
- else "rtsFalse") <> semi,
+ then "true"
+ else "false") <> semi,
case rtsOpts dflags of
Nothing -> Outputable.empty
Just opts -> text " __conf.rts_opts= " <>
text (show opts) <> semi,
- text " __conf.rts_hs_main = rtsTrue;",
+ text " __conf.rts_hs_main = true;",
text " return hs_main(argc,argv,&ZCMain_main_closure,__conf);",
char '}',
char '\n' -- final newline, to keep gcc happy
diff --git a/docs/storage-mgt/ldv.tex b/docs/storage-mgt/ldv.tex
index 897b621aa1..ab2c8b9532 100644
--- a/docs/storage-mgt/ldv.tex
+++ b/docs/storage-mgt/ldv.tex
@@ -244,7 +244,7 @@ If retainer profiling is being performed, @ldvTime@ is equal to $0$,
and @LDV_recordUse()@ causes no side effect.\footnote{Due to this
interference with LDVU profiling, retainer profiling slows down a bit;
for instance, checking @ldvTime@ against $0$ in the above example
-would always evaluate to @rtsFalse@ during retainer profiling.
+would always evaluate to @false@ during retainer profiling.
However, this is the price to be paid for our decision not to employ a
separate field for LDVU profiling.}
@@ -646,7 +646,7 @@ with LDVU profiling.
\begin{description}
\item[GC.c] invokes @LdvCensusForDead()@ before tidying up, sets @hasBeenAnyGC@ to
- @rtsTrue@, and changes @copy()@ and @copyPart()@.
+ @true@, and changes @copy()@ and @copyPart()@.
Invokes @LDV_recordDead()@ and @LDV_recordDead_FILL_SLOP_DYNAMIC()@.
\item[Itimer.c] changes @handle_tick()@.
\item[LdvProfile.c] implements the LDVU profiling engine.
diff --git a/docs/storage-mgt/rp.tex b/docs/storage-mgt/rp.tex
index 96b0875120..2f83532893 100644
--- a/docs/storage-mgt/rp.tex
+++ b/docs/storage-mgt/rp.tex
@@ -508,7 +508,7 @@ set is created. Otherwise, a new retainer set is created.
\item[@retainerSet *addElement(retainer r, retainerSet *rs)@] returns a retainer set
@rs@ augmented with @r@. If such a retainer set already exists, no new retainer set
is created. Otherwise, a new retainer set is created.
-\item[@rtsBool isMember(retainer r, retainerSet *rs)@] returns a boolean value
+\item[@bool isMember(retainer r, retainerSet *rs)@] returns a boolean value
indicating whether @r@ is a member of @rs@.
\item[@void printRetainerSetShort(FILE *, retainerSet *)@] prints a single retainer
set.
diff --git a/docs/storage-mgt/sm.tex b/docs/storage-mgt/sm.tex
index 9dee565c7d..679a4c1915 100644
--- a/docs/storage-mgt/sm.tex
+++ b/docs/storage-mgt/sm.tex
@@ -287,8 +287,8 @@ and returns it to the storage manager.
A macro in @include/StgStorage.h@.
\item[@ExtendNursery(hp, hplim)@] closes the current allocation area and
tries to find a new allocation area in the nursery.
-If it succeeds, it sets @hp@ and @hplim@ appropriately and returns @rtsTrue@;
-otherwise, it returns @rtsFalse@,
+If it succeeds, it sets @hp@ and @hplim@ appropriately and returns @true@;
+otherwise, it returns @false@,
which means that the nursery has been exhausted.
The new allocation area is not necessarily contiguous with the old one.
A macro in @Storage.h@.
@@ -477,7 +477,7 @@ collector makes an efficient use of heap memory.
\item[@void *mark\_root(StgClosure **root)@] informs the garbage collector
that @*root@ is an object in the root set. It replaces @*root@ by
the new location of the object. @GC.c@.
-\item[@void GarbageCollect(void (*get\_roots)(evac\_fn), rtsBool force\_major\_gc)@]
+\item[@void GarbageCollect(void (*get\_roots)(evac\_fn), bool force\_major\_gc)@]
performs a garbage collection.
@get_roots()@ is a function which is called by the garbage collector when
it wishes to find all the objects in the root set (other than those
@@ -487,9 +487,9 @@ Therefore it is incumbent on the caller to find the root set.
or not. If a major garbage collection is not required, the garbage collector
decides an oldest generation $g$ to garbage collect on its own.
@GC.c@.
-\item[@rtsBool doYouWantToGC(void)@] returns @rtsTrue@ if the garbage
+\item[@bool doYouWantToGC(void)@] returns @true@ if the garbage
collector is ready to perform a garbage collection. Specifically, it returns
-@rtsTrue@ if the number of allocated blocks since the last garbage collection
+@true@ if the number of allocated blocks since the last garbage collection
(@alloc_blocks@ in @Storage.c@) exceeds an approximate limit
(@alloc_blocks_lim@ in @Storage.c@).
@Storage.h@.
@@ -700,11 +700,11 @@ The overall structure of a garbage collection is as follows:
During initialization, the garbage collector first decides which generation
to garbage collect.
Specifically,
-if the argument @force_major_gc@ to @GarbageCollect()@ is @rtsFalse@,
+if the argument @force_major_gc@ to @GarbageCollect()@ is @FALSE@,
it decides the greatest generation number $N$ such
that the number of blocks allocated in step $0$ of generation $N$ exceeds
@generations[@$N$@].max_blocks@.
-If the argument @force_major_gc@ to @GarbageCollect()@ is @rtsTrue@,
+If the argument @force_major_gc@ to @GarbageCollect()@ is @true@,
$N$ is set to the greatest generation number, namely,
$@RtsFlags.GcFlags.generations@ - 1$.
The garbage collector considers up to generation $N$ for garbage collection.
@@ -805,7 +805,7 @@ The rationale is that the contents of @r@ cannot be updated any more,
and thus @r@ is always survived by @o@; @o@ is live as long as @r@ is.
Therefore, we wish @r@ to be evacuated to the same generation $M$ as @r@
currently resides (not to its next step).
-If the evacuation succeeds (indicated by a @rtsFalse@ value of a variable
+If the evacuation succeeds (indicated by a @FALSE@ value of a variable
@failed_to_evac@, declared in @GC.c@) for every object @o@, @r@ is removed
from the list @mut_once_list@ because it does not hold any backward
inter-generational pointers.\footnote{It turns out that @r@ can have only
diff --git a/ghc/hschooks.c b/ghc/hschooks.c
index 9b1be56db2..c74830dec3 100644
--- a/ghc/hschooks.c
+++ b/ghc/hschooks.c
@@ -40,7 +40,7 @@ defaultsHook (void)
// This helps particularly with large compiles, but didn't work
// very well with earlier GHCs because it caused large amounts of
// fragmentation. See rts/sm/BlockAlloc.c:allocLargeChunk().
- RtsFlags.GcFlags.heapSizeSuggestionAuto = rtsTrue;
+ RtsFlags.GcFlags.heapSizeSuggestionAuto = true;
RtsFlags.GcFlags.maxStkSize = 512*1024*1024 / sizeof(W_);
diff --git a/includes/Cmm.h b/includes/Cmm.h
index 9b7a4e4608..0623c3eeb6 100644
--- a/includes/Cmm.h
+++ b/includes/Cmm.h
@@ -50,6 +50,7 @@
CInt has the same size as an int in C on this platform
CLong has the same size as a long in C on this platform
+ CBool has the same size as a bool in C on this platform
--------------------------------------------------------------------------- */
@@ -95,6 +96,8 @@
#error Unknown long size
#endif
+#define CBool bits8
+
#define F_ float32
#define D_ float64
#define L_ bits64
@@ -229,7 +232,7 @@
* Note the syntax is slightly different to the C version of this macro.
*/
#ifdef DEBUG
-#define IF_DEBUG(c,s) if (RtsFlags_DebugFlags_##c(RtsFlags) != 0::I32) { s; }
+#define IF_DEBUG(c,s) if (RtsFlags_DebugFlags_##c(RtsFlags) != 0::CBool) { s; }
#else
#define IF_DEBUG(c,s) /* nothing */
#endif
diff --git a/includes/rts/Flags.h b/includes/rts/Flags.h
index c66aed90a3..21ff2ab1c5 100644
--- a/includes/rts/Flags.h
+++ b/includes/rts/Flags.h
@@ -45,22 +45,22 @@ typedef struct _GC_FLAGS {
uint32_t nurseryChunkSize; /* in *blocks* */
uint32_t minOldGenSize; /* in *blocks* */
uint32_t heapSizeSuggestion; /* in *blocks* */
- rtsBool heapSizeSuggestionAuto;
+ bool heapSizeSuggestionAuto;
double oldGenFactor;
double pcFreeHeap;
uint32_t generations;
- rtsBool squeezeUpdFrames;
+ bool squeezeUpdFrames;
- rtsBool compact; /* True <=> "compact all the time" */
+ bool compact; /* True <=> "compact all the time" */
double compactThreshold;
- rtsBool sweep; /* use "mostly mark-sweep" instead of copying
+ bool sweep; /* use "mostly mark-sweep" instead of copying
* for the oldest generation */
- rtsBool ringBell;
+ bool ringBell;
Time idleGCDelayTime; /* units: TIME_RESOLUTION */
- rtsBool doIdleGC;
+ bool doIdleGC;
StgWord heapBase; /* address to ask the OS for memory */
@@ -72,29 +72,29 @@ typedef struct _GC_FLAGS {
* raise it again.
*/
- rtsBool numa; /* Use NUMA */
+ bool numa; /* Use NUMA */
StgWord numaMask;
} GC_FLAGS;
/* See Note [Synchronization of flags and base APIs] */
typedef struct _DEBUG_FLAGS {
/* flags to control debugging output & extra checking in various subsystems */
- rtsBool scheduler; /* 's' */
- rtsBool interpreter; /* 'i' */
- rtsBool weak; /* 'w' */
- rtsBool gccafs; /* 'G' */
- rtsBool gc; /* 'g' */
- rtsBool block_alloc; /* 'b' */
- rtsBool sanity; /* 'S' warning: might be expensive! */
- rtsBool stable; /* 't' */
- rtsBool prof; /* 'p' */
- rtsBool linker; /* 'l' the object linker */
- rtsBool apply; /* 'a' */
- rtsBool stm; /* 'm' */
- rtsBool squeeze; /* 'z' stack squeezing & lazy blackholing */
- rtsBool hpc; /* 'c' coverage */
- rtsBool sparks; /* 'r' */
- rtsBool numa; /* '--debug-numa' */
+ bool scheduler; /* 's' */
+ bool interpreter; /* 'i' */
+ bool weak; /* 'w' */
+ bool gccafs; /* 'G' */
+ bool gc; /* 'g' */
+ bool block_alloc; /* 'b' */
+ bool sanity; /* 'S' warning: might be expensive! */
+ bool stable; /* 't' */
+ bool prof; /* 'p' */
+ bool linker; /* 'l' the object linker */
+ bool apply; /* 'a' */
+ bool stm; /* 'm' */
+ bool squeeze; /* 'z' stack squeezing & lazy blackholing */
+ bool hpc; /* 'c' coverage */
+ bool sparks; /* 'r' */
+ bool numa; /* '--debug-numa' */
} DEBUG_FLAGS;
/* See Note [Synchronization of flags and base APIs] */
@@ -125,10 +125,10 @@ typedef struct _PROFILING_FLAGS {
Time heapProfileInterval; /* time between samples */
uint32_t heapProfileIntervalTicks; /* ticks between samples (derived) */
- rtsBool includeTSOs;
+ bool includeTSOs;
- rtsBool showCCSOnException;
+ bool showCCSOnException;
uint32_t maxRetainerSetSize;
@@ -151,12 +151,12 @@ typedef struct _PROFILING_FLAGS {
/* See Note [Synchronization of flags and base APIs] */
typedef struct _TRACE_FLAGS {
int tracing;
- rtsBool timestamp; /* show timestamp in stderr output */
- rtsBool scheduler; /* trace scheduler events */
- rtsBool gc; /* trace GC events */
- rtsBool sparks_sampled; /* trace spark events by a sampled method */
- rtsBool sparks_full; /* trace spark events 100% accurately */
- rtsBool user; /* trace user events (emitted from Haskell code) */
+ bool timestamp; /* show timestamp in stderr output */
+ bool scheduler; /* trace scheduler events */
+ bool gc; /* trace GC events */
+ bool sparks_sampled; /* trace spark events by a sampled method */
+ bool sparks_full; /* trace spark events 100% accurately */
+ bool user; /* trace user events (emitted from Haskell code) */
} TRACE_FLAGS;
/* See Note [Synchronization of flags and base APIs] */
@@ -177,8 +177,8 @@ typedef struct _CONCURRENT_FLAGS {
/* See Note [Synchronization of flags and base APIs] */
typedef struct _MISC_FLAGS {
Time tickInterval; /* units: TIME_RESOLUTION */
- rtsBool install_signal_handlers;
- rtsBool machineReadable;
+ bool install_signal_handlers;
+ bool machineReadable;
StgWord linkerMemBase; /* address to ask the OS for memory
* for the linker, NULL ==> off */
} MISC_FLAGS;
@@ -186,12 +186,12 @@ typedef struct _MISC_FLAGS {
/* See Note [Synchronization of flags and base APIs] */
typedef struct _PAR_FLAGS {
uint32_t nCapabilities; /* number of threads to run simultaneously */
- rtsBool migrate; /* migrate threads between capabilities */
+ bool migrate; /* migrate threads between capabilities */
uint32_t maxLocalSparks;
- rtsBool parGcEnabled; /* enable parallel GC */
+ bool parGcEnabled; /* enable parallel GC */
uint32_t parGcGen; /* do parallel GC in this generation
* and higher only */
- rtsBool parGcLoadBalancingEnabled;
+ bool parGcLoadBalancingEnabled;
/* enable load-balancing in the
* parallel GC */
uint32_t parGcLoadBalancingGen;
@@ -209,12 +209,12 @@ typedef struct _PAR_FLAGS {
/* Use this many threads for parallel
* GC (default: use all nNodes). */
- rtsBool setAffinity; /* force thread affinity with CPUs */
+ bool setAffinity; /* force thread affinity with CPUs */
} PAR_FLAGS;
/* See Note [Synchronization of flags and base APIs] */
typedef struct _TICKY_FLAGS {
- rtsBool showTickyStats;
+ bool showTickyStats;
FILE *tickyFile;
} TICKY_FLAGS;
diff --git a/includes/rts/Hpc.h b/includes/rts/Hpc.h
index 80ad47eadf..d70d1db829 100644
--- a/includes/rts/Hpc.h
+++ b/includes/rts/Hpc.h
@@ -20,7 +20,7 @@ typedef struct _HpcModuleInfo {
StgWord32 tickCount; // number of ticks
StgWord32 hashNo; // Hash number for this module's mix info
StgWord64 *tixArr; // tix Array; local for this module
- rtsBool from_file; // data was read from the .tix file
+ bool from_file; // data was read from the .tix file
struct _HpcModuleInfo *next;
} HpcModuleInfo;
diff --git a/includes/rts/OSThreads.h b/includes/rts/OSThreads.h
index efd24066b6..2ebbd1e0f1 100644
--- a/includes/rts/OSThreads.h
+++ b/includes/rts/OSThreads.h
@@ -171,17 +171,17 @@ typedef void OSThreadProcAttr OSThreadProc(void *);
extern int createOSThread ( OSThreadId* tid, char *name,
OSThreadProc *startProc, void *param);
-extern rtsBool osThreadIsAlive ( OSThreadId id );
-extern void interruptOSThread (OSThreadId id);
+extern bool osThreadIsAlive ( OSThreadId id );
+extern void interruptOSThread (OSThreadId id);
//
// Condition Variables
//
extern void initCondition ( Condition* pCond );
extern void closeCondition ( Condition* pCond );
-extern rtsBool broadcastCondition ( Condition* pCond );
-extern rtsBool signalCondition ( Condition* pCond );
-extern rtsBool waitCondition ( Condition* pCond, Mutex* pMut );
+extern bool broadcastCondition ( Condition* pCond );
+extern bool signalCondition ( Condition* pCond );
+extern bool waitCondition ( Condition* pCond, Mutex* pMut );
//
// Mutexes
diff --git a/includes/rts/Threads.h b/includes/rts/Threads.h
index 866c4692bd..f45d351c2b 100644
--- a/includes/rts/Threads.h
+++ b/includes/rts/Threads.h
@@ -36,7 +36,7 @@ StgTSO *createStrictIOThread (Capability *cap, W_ stack_size,
StgClosure *closure);
// Suspending/resuming threads around foreign calls
-void * suspendThread (StgRegTable *, rtsBool interruptible);
+void * suspendThread (StgRegTable *, bool interruptible);
StgRegTable * resumeThread (void *);
//
diff --git a/includes/rts/Types.h b/includes/rts/Types.h
index 08baceb1cf..4b0e680ae5 100644
--- a/includes/rts/Types.h
+++ b/includes/rts/Types.h
@@ -15,21 +15,17 @@
#define RTS_TYPES_H
#include <stddef.h>
+#include <stdbool.h>
// Deprecated, use uint32_t instead.
typedef unsigned int nat __attribute__((deprecated)); /* uint32_t */
/* ullong (64|128-bit) type: only include if needed (not ANSI) */
-#if defined(__GNUC__)
+#if defined(__GNUC__)
#define LL(x) (x##LL)
#else
#define LL(x) (x##L)
#endif
-
-typedef enum {
- rtsFalse = 0,
- rtsTrue
-} rtsBool;
typedef struct StgClosure_ StgClosure;
typedef struct StgInfoTable_ StgInfoTable;
diff --git a/includes/rts/storage/ClosureMacros.h b/includes/rts/storage/ClosureMacros.h
index e485246206..f5ca5cd850 100644
--- a/includes/rts/storage/ClosureMacros.h
+++ b/includes/rts/storage/ClosureMacros.h
@@ -258,18 +258,18 @@ TAG_CLOSURE(StgWord tag,StgClosure * p)
make sense...
-------------------------------------------------------------------------- */
-INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR_NOT_NULL (StgWord p)
+INLINE_HEADER bool LOOKS_LIKE_INFO_PTR_NOT_NULL (StgWord p)
{
StgInfoTable *info = INFO_PTR_TO_STRUCT((StgInfoTable *)p);
- return (info->type != INVALID_OBJECT && info->type < N_CLOSURE_TYPES) ? rtsTrue : rtsFalse;
+ return info->type != INVALID_OBJECT && info->type < N_CLOSURE_TYPES;
}
-INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR (StgWord p)
+INLINE_HEADER bool LOOKS_LIKE_INFO_PTR (StgWord p)
{
- return (p && (IS_FORWARDING_PTR(p) || LOOKS_LIKE_INFO_PTR_NOT_NULL(p))) ? rtsTrue : rtsFalse;
+ return p && (IS_FORWARDING_PTR(p) || LOOKS_LIKE_INFO_PTR_NOT_NULL(p));
}
-INLINE_HEADER rtsBool LOOKS_LIKE_CLOSURE_PTR (const void *p)
+INLINE_HEADER bool LOOKS_LIKE_CLOSURE_PTR (const void *p)
{
return LOOKS_LIKE_INFO_PTR((StgWord)
(UNTAG_CONST_CLOSURE((const StgClosure *)(p)))->header.info);
diff --git a/includes/rts/storage/GC.h b/includes/rts/storage/GC.h
index 6dc483f1ff..f15fd2a7cf 100644
--- a/includes/rts/storage/GC.h
+++ b/includes/rts/storage/GC.h
@@ -248,7 +248,7 @@ typedef struct _GCStats {
StgDouble wall_seconds;
} GCStats;
void getGCStats (GCStats *s);
-rtsBool getGCStatsEnabled (void);
+bool getGCStatsEnabled (void);
// These don't change over execution, so do them elsewhere
// StgDouble init_cpu_seconds;
@@ -288,7 +288,7 @@ void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
/* set to disable CAF garbage collection in GHCi. */
/* (needed when dynamic libraries are used). */
-extern rtsBool keepCAFs;
+extern bool keepCAFs;
INLINE_HEADER void initBdescr(bdescr *bd, generation *gen, generation *dest)
{
diff --git a/rts/AwaitEvent.h b/rts/AwaitEvent.h
index ecc13b8ff2..f7c504b16b 100644
--- a/rts/AwaitEvent.h
+++ b/rts/AwaitEvent.h
@@ -10,15 +10,15 @@
#define AWAITEVENT_H
#if !defined(THREADED_RTS)
-/* awaitEvent(rtsBool wait)
+/* awaitEvent(bool wait)
*
* Checks for blocked threads that need to be woken.
*
* Called from STG : NO
* Locks assumed : sched_mutex
*/
-RTS_PRIVATE void awaitEvent(rtsBool wait); /* In posix/Select.c or
- * win32/AwaitEvent.c */
+RTS_PRIVATE void awaitEvent(bool wait); /* In posix/Select.c or
+ * win32/AwaitEvent.c */
#endif
#endif /* AWAITEVENT_H */
diff --git a/rts/Capability.c b/rts/Capability.c
index 6ca34f8660..e7907ef203 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -82,7 +82,7 @@ Capability * rts_unsafeGetMyCapability (void)
}
#if defined(THREADED_RTS)
-STATIC_INLINE rtsBool
+STATIC_INLINE bool
globalWorkToDo (void)
{
return sched_state >= SCHED_INTERRUPTING
@@ -96,7 +96,7 @@ findSpark (Capability *cap)
{
Capability *robbed;
StgClosurePtr spark;
- rtsBool retry;
+ bool retry;
uint32_t i = 0;
if (!emptyRunQueue(cap) || cap->n_returning_tasks != 0) {
@@ -107,7 +107,7 @@ findSpark (Capability *cap)
}
do {
- retry = rtsFalse;
+ retry = false;
// first try to get a spark from our own pool.
// We should be using reclaimSpark(), because it works without
@@ -130,7 +130,7 @@ findSpark (Capability *cap)
return spark;
}
if (!emptySparkPoolCap(cap)) {
- retry = rtsTrue;
+ retry = true;
}
if (n_capabilities == 1) { return NULL; } // makes no sense...
@@ -158,7 +158,7 @@ findSpark (Capability *cap)
if (spark == NULL && !emptySparkPoolCap(robbed)) {
// we conflicted with another thread while trying to steal;
// try again later.
- retry = rtsTrue;
+ retry = true;
}
if (spark != NULL) {
@@ -179,17 +179,17 @@ findSpark (Capability *cap)
// The result is only valid for an instant, of course, so in a sense
// is immediately invalid, and should not be relied upon for
// correctness.
-rtsBool
+bool
anySparks (void)
{
uint32_t i;
for (i=0; i < n_capabilities; i++) {
if (!emptySparkPoolCap(capabilities[i])) {
- return rtsTrue;
+ return true;
}
}
- return rtsFalse;
+ return false;
}
#endif
@@ -247,9 +247,9 @@ initCapability (Capability *cap, uint32_t i)
cap->no = i;
cap->node = capNoToNumaNode(i);
- cap->in_haskell = rtsFalse;
+ cap->in_haskell = false;
cap->idle = 0;
- cap->disabled = rtsFalse;
+ cap->disabled = false;
cap->run_queue_hd = END_TSO_QUEUE;
cap->run_queue_tl = END_TSO_QUEUE;
@@ -482,8 +482,8 @@ giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
cap->no, task->incall->tso ? "bound task" : "worker",
serialisableTaskId(task));
ACQUIRE_LOCK(&task->lock);
- if (task->wakeup == rtsFalse) {
- task->wakeup = rtsTrue;
+ if (task->wakeup == false) {
+ task->wakeup = true;
// the wakeup flag is needed because signalCondition() doesn't
// flag the condition if the thread is already runniing, but we want
// it to be sticky.
@@ -503,7 +503,7 @@ giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
#if defined(THREADED_RTS)
void
releaseCapability_ (Capability* cap,
- rtsBool always_wakeup)
+ bool always_wakeup)
{
Task *task;
@@ -586,7 +586,7 @@ void
releaseCapability (Capability* cap USED_IF_THREADS)
{
ACQUIRE_LOCK(&cap->lock);
- releaseCapability_(cap, rtsFalse);
+ releaseCapability_(cap, false);
RELEASE_LOCK(&cap->lock);
}
@@ -594,7 +594,7 @@ void
releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
{
ACQUIRE_LOCK(&cap->lock);
- releaseCapability_(cap, rtsTrue);
+ releaseCapability_(cap, true);
RELEASE_LOCK(&cap->lock);
}
@@ -620,7 +620,7 @@ enqueueWorker (Capability* cap USED_IF_THREADS)
{
debugTrace(DEBUG_sched, "%d spare workers already, exiting",
cap->n_spare_workers);
- releaseCapability_(cap,rtsFalse);
+ releaseCapability_(cap,false);
// hold the lock until after workerTaskStop; c.f. scheduleWorker()
workerTaskStop(task);
RELEASE_LOCK(&cap->lock);
@@ -648,7 +648,7 @@ static Capability * waitForWorkerCapability (Task *task)
// task->lock held, cap->lock not held
if (!task->wakeup) waitCondition(&task->cond, &task->lock);
cap = task->cap;
- task->wakeup = rtsFalse;
+ task->wakeup = false;
RELEASE_LOCK(&task->lock);
debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
@@ -713,7 +713,7 @@ static Capability * waitForReturnCapability (Task *task)
// task->lock held, cap->lock not held
if (!task->wakeup) waitCondition(&task->cond, &task->lock);
cap = task->cap;
- task->wakeup = rtsFalse;
+ task->wakeup = false;
RELEASE_LOCK(&task->lock);
// now check whether we should wake up...
@@ -843,9 +843,9 @@ void waitForCapability (Capability **pCap, Task *task)
#if defined (THREADED_RTS)
/* See Note [GC livelock] in Schedule.c for why we have gcAllowed
- and return the rtsBool */
-rtsBool /* Did we GC? */
-yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed)
+ and return the bool */
+bool /* Did we GC? */
+yieldCapability (Capability** pCap, Task *task, bool gcAllowed)
{
Capability *cap = *pCap;
@@ -861,7 +861,7 @@ yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed)
traceSparkCounters(cap);
// See Note [migrated bound threads 2]
if (task->cap == cap) {
- return rtsTrue;
+ return true;
}
}
}
@@ -870,7 +870,7 @@ yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed)
debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
// We must now release the capability and wait to be woken up again.
- task->wakeup = rtsFalse;
+ task->wakeup = false;
ACQUIRE_LOCK(&cap->lock);
@@ -879,7 +879,7 @@ yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed)
enqueueWorker(cap);
}
- releaseCapability_(cap, rtsFalse);
+ releaseCapability_(cap, false);
if (isWorker(task) || isBoundTask(task)) {
RELEASE_LOCK(&cap->lock);
@@ -906,7 +906,7 @@ yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed)
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
- return rtsFalse;
+ return false;
}
#endif /* THREADED_RTS */
@@ -954,7 +954,7 @@ prodCapability (Capability *cap, Task *task)
ACQUIRE_LOCK(&cap->lock);
if (!cap->running_task) {
cap->running_task = task;
- releaseCapability_(cap,rtsTrue);
+ releaseCapability_(cap,true);
}
RELEASE_LOCK(&cap->lock);
}
@@ -970,21 +970,21 @@ prodCapability (Capability *cap, Task *task)
#if defined (THREADED_RTS)
-rtsBool
+bool
tryGrabCapability (Capability *cap, Task *task)
{
int r;
- if (cap->running_task != NULL) return rtsFalse;
+ if (cap->running_task != NULL) return false;
r = TRY_ACQUIRE_LOCK(&cap->lock);
- if (r != 0) return rtsFalse;
+ if (r != 0) return false;
if (cap->running_task != NULL) {
RELEASE_LOCK(&cap->lock);
- return rtsFalse;
+ return false;
}
task->cap = cap;
cap->running_task = task;
RELEASE_LOCK(&cap->lock);
- return rtsTrue;
+ return true;
}
@@ -1008,7 +1008,7 @@ tryGrabCapability (Capability *cap, Task *task)
static void
shutdownCapability (Capability *cap USED_IF_THREADS,
Task *task USED_IF_THREADS,
- rtsBool safe USED_IF_THREADS)
+ bool safe USED_IF_THREADS)
{
#if defined(THREADED_RTS)
uint32_t i;
@@ -1062,7 +1062,7 @@ shutdownCapability (Capability *cap USED_IF_THREADS,
if (!emptyRunQueue(cap) || cap->spare_workers) {
debugTrace(DEBUG_sched,
"runnable threads or workers still alive, yielding");
- releaseCapability_(cap,rtsFalse); // this will wake up a worker
+ releaseCapability_(cap,false); // this will wake up a worker
RELEASE_LOCK(&cap->lock);
yieldThread();
continue;
@@ -1106,7 +1106,7 @@ shutdownCapability (Capability *cap USED_IF_THREADS,
}
void
-shutdownCapabilities(Task *task, rtsBool safe)
+shutdownCapabilities(Task *task, bool safe)
{
uint32_t i;
for (i=0; i < n_capabilities; i++) {
@@ -1157,7 +1157,7 @@ freeCapabilities (void)
void
markCapability (evac_fn evac, void *user, Capability *cap,
- rtsBool no_mark_sparks USED_IF_THREADS)
+ bool no_mark_sparks USED_IF_THREADS)
{
InCall *incall;
@@ -1191,12 +1191,12 @@ markCapabilities (evac_fn evac, void *user)
{
uint32_t n;
for (n = 0; n < n_capabilities; n++) {
- markCapability(evac, user, capabilities[n], rtsFalse);
+ markCapability(evac, user, capabilities[n], false);
}
}
#if defined(THREADED_RTS)
-rtsBool checkSparkCountInvariant (void)
+bool checkSparkCountInvariant (void)
{
SparkCounters sparks = { 0, 0, 0, 0, 0, 0 };
StgWord64 remaining = 0;
diff --git a/rts/Capability.h b/rts/Capability.h
index bbf026279f..bcc168dcbd 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -53,12 +53,12 @@ struct Capability_ {
// true if this Capability is running Haskell code, used for
// catching unsafe call-ins.
- rtsBool in_haskell;
+ bool in_haskell;
// Has there been any activity on this Capability since the last GC?
uint32_t idle;
- rtsBool disabled;
+ bool disabled;
// The run queue. The Task owning this Capability has exclusive
// access to its run queue, so can wake up threads without
@@ -204,7 +204,7 @@ struct Capability_ {
ASSERT_TASK_ID(task);
#if defined(THREADED_RTS)
-rtsBool checkSparkCountInvariant (void);
+bool checkSparkCountInvariant (void);
#endif
// Converts a *StgRegTable into a *Capability.
@@ -232,14 +232,14 @@ void moreCapabilities (uint32_t from, uint32_t to);
#if defined(THREADED_RTS)
void releaseCapability (Capability* cap);
void releaseAndWakeupCapability (Capability* cap);
-void releaseCapability_ (Capability* cap, rtsBool always_wakeup);
+void releaseCapability_ (Capability* cap, bool always_wakeup);
// assumes cap->lock is held
#else
// releaseCapability() is empty in non-threaded RTS
INLINE_HEADER void releaseCapability (Capability* cap STG_UNUSED) {};
INLINE_HEADER void releaseAndWakeupCapability (Capability* cap STG_UNUSED) {};
INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
- rtsBool always_wakeup STG_UNUSED) {};
+ bool always_wakeup STG_UNUSED) {};
#endif
// declared in includes/rts/Threads.h:
@@ -266,7 +266,7 @@ typedef enum {
//
typedef struct {
SyncType type; // The kind of synchronisation
- rtsBool *idle;
+ bool *idle;
Task *task; // The Task performing the sync
} PendingSync;
@@ -306,7 +306,7 @@ EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
// On return: *pCap is NULL if the capability was released. The
// current task should then re-acquire it using waitForCapability().
//
-rtsBool yieldCapability (Capability** pCap, Task *task, rtsBool gcAllowed);
+bool yieldCapability (Capability** pCap, Task *task, bool gcAllowed);
// Wakes up a worker thread on just one Capability, used when we
// need to service some global event.
@@ -320,7 +320,7 @@ void prodAllCapabilities (void);
// Attempt to gain control of a Capability if it is free.
//
-rtsBool tryGrabCapability (Capability *cap, Task *task);
+bool tryGrabCapability (Capability *cap, Task *task);
// Try to find a spark to run
//
@@ -328,9 +328,9 @@ StgClosure *findSpark (Capability *cap);
// True if any capabilities have sparks
//
-rtsBool anySparks (void);
+bool anySparks (void);
-INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
+INLINE_HEADER bool emptySparkPoolCap (Capability *cap);
INLINE_HEADER uint32_t sparkPoolSizeCap (Capability *cap);
INLINE_HEADER void discardSparksCap (Capability *cap);
@@ -345,7 +345,7 @@ extern void grabCapability (Capability **pCap);
// Shut down all capabilities.
//
-void shutdownCapabilities(Task *task, rtsBool wait_foreign);
+void shutdownCapabilities(Task *task, bool wait_foreign);
// cause all capabilities to context switch as soon as possible.
void contextSwitchAllCapabilities(void);
@@ -361,7 +361,7 @@ void freeCapabilities (void);
// For the GC:
void markCapability (evac_fn evac, void *user, Capability *cap,
- rtsBool no_mark_sparks USED_IF_THREADS);
+ bool no_mark_sparks USED_IF_THREADS);
void markCapabilities (evac_fn evac, void *user);
@@ -390,7 +390,7 @@ typedef struct PutMVar_ {
#ifdef THREADED_RTS
-INLINE_HEADER rtsBool emptyInbox(Capability *cap);
+INLINE_HEADER bool emptyInbox(Capability *cap);
#endif // THREADED_RTS
@@ -427,7 +427,7 @@ recordClosureMutated (Capability *cap, StgClosure *p)
#if defined(THREADED_RTS)
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
emptySparkPoolCap (Capability *cap)
{ return looksEmpty(cap->sparks); }
@@ -467,7 +467,7 @@ contextSwitchCapability (Capability *cap)
#ifdef THREADED_RTS
-INLINE_HEADER rtsBool emptyInbox(Capability *cap)
+INLINE_HEADER bool emptyInbox(Capability *cap)
{
return (cap->inbox == (Message*)END_TSO_QUEUE &&
cap->putMVars == NULL);
diff --git a/rts/CheckUnload.c b/rts/CheckUnload.c
index 03e4e22a47..ac7f78b7f4 100644
--- a/rts/CheckUnload.c
+++ b/rts/CheckUnload.c
@@ -90,7 +90,7 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
StgPtr p;
const StgInfoTable *info;
uint32_t size;
- rtsBool prim;
+ bool prim;
for (; bd != NULL; bd = bd->link) {
@@ -102,7 +102,7 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
p = bd->start;
while (p < bd->free) {
info = get_itbl((StgClosure *)p);
- prim = rtsFalse;
+ prim = false;
switch (info->type) {
@@ -140,7 +140,7 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
case BLACKHOLE:
case BLOCKING_QUEUE:
- prim = rtsTrue;
+ prim = true;
size = sizeW_fromITBL(info);
break;
@@ -152,12 +152,12 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
// blackholes when it calls raiseAsync() on the
// resurrected threads. So we know that any IND will
// be the size of a BLACKHOLE.
- prim = rtsTrue;
+ prim = true;
size = BLACKHOLE_sizeW();
break;
case BCO:
- prim = rtsTrue;
+ prim = true;
size = bco_sizeW((StgBCO *)p);
break;
@@ -169,24 +169,24 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
case MUT_PRIM:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
- prim = rtsTrue;
+ prim = true;
size = sizeW_fromITBL(info);
break;
case AP:
- prim = rtsTrue;
+ prim = true;
size = ap_sizeW((StgAP *)p);
break;
case PAP:
- prim = rtsTrue;
+ prim = true;
size = pap_sizeW((StgPAP *)p);
break;
case AP_STACK:
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- prim = rtsTrue;
+ prim = true;
size = ap_stack_sizeW(ap);
searchStackChunk(addrs, (StgPtr)ap->payload,
(StgPtr)ap->payload + ap->size);
@@ -194,7 +194,7 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
}
case ARR_WORDS:
- prim = rtsTrue;
+ prim = true;
size = arr_words_sizeW((StgArrBytes*)p);
break;
@@ -202,7 +202,7 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
- prim = rtsTrue;
+ prim = true;
size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
break;
@@ -210,18 +210,18 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
case SMALL_MUT_ARR_PTRS_DIRTY:
case SMALL_MUT_ARR_PTRS_FROZEN:
case SMALL_MUT_ARR_PTRS_FROZEN0:
- prim = rtsTrue;
+ prim = true;
size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
break;
case TSO:
- prim = rtsTrue;
+ prim = true;
size = sizeofW(StgTSO);
break;
case STACK: {
StgStack *stack = (StgStack*)p;
- prim = rtsTrue;
+ prim = true;
searchStackChunk(addrs, stack->sp,
stack->stack + stack->stack_size);
size = stack_sizeW(stack);
@@ -229,7 +229,7 @@ static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
}
case TREC_CHUNK:
- prim = rtsTrue;
+ prim = true;
size = sizeofW(StgTRecChunk);
break;
@@ -292,7 +292,7 @@ void checkUnload (StgClosure *static_objects)
for (oc = unloaded_objects; oc; oc = oc->next) {
IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
oc->fileName));
- oc->referenced = rtsFalse;
+ oc->referenced = false;
}
addrs = allocHashTable();
diff --git a/rts/Hpc.c b/rts/Hpc.c
index 70bf57b396..f2cd2007dc 100644
--- a/rts/Hpc.c
+++ b/rts/Hpc.c
@@ -120,7 +120,7 @@ readTix(void) {
while(tix_ch != ']') {
tmpModule = (HpcModuleInfo *)stgMallocBytes(sizeof(HpcModuleInfo),
"Hpc.readTix");
- tmpModule->from_file = rtsTrue;
+ tmpModule->from_file = true;
expect('T');
expect('i');
expect('x');
@@ -279,7 +279,7 @@ hs_hpc_module(char *modName,
tixArr[i] = 0;
}
tmpModule->next = modules;
- tmpModule->from_file = rtsFalse;
+ tmpModule->from_file = false;
modules = tmpModule;
insertHashTable(moduleHash, (StgWord)modName, tmpModule);
}
@@ -308,7 +308,7 @@ hs_hpc_module(char *modName,
stgFree(tmpModule->modName);
stgFree(tmpModule->tixArr);
}
- tmpModule->from_file = rtsFalse;
+ tmpModule->from_file = false;
}
}
diff --git a/rts/Interpreter.c b/rts/Interpreter.c
index 5e796cd842..7221ff64f9 100644
--- a/rts/Interpreter.c
+++ b/rts/Interpreter.c
@@ -972,13 +972,13 @@ run_BCO:
// "rts_stop_next_breakpoint" flag is true OR if the
// breakpoint flag for this particular expression is
// true
- if (rts_stop_next_breakpoint == rtsTrue ||
+ if (rts_stop_next_breakpoint == true ||
((StgWord8*)breakPoints->payload)[arg2_array_index]
- == rtsTrue)
+ == true)
{
// make sure we don't automatically stop at the
// next breakpoint
- rts_stop_next_breakpoint = rtsFalse;
+ rts_stop_next_breakpoint = false;
// allocate memory for a new AP_STACK, enough to
// store the top stack frame plus an
@@ -1617,7 +1617,7 @@ run_BCO:
Sp[0] = (W_)&stg_ret_p_info;
SAVE_THREAD_STATE();
- tok = suspendThread(&cap->r, interruptible ? rtsTrue : rtsFalse);
+ tok = suspendThread(&cap->r, interruptible);
// We already made a copy of the arguments above.
ffi_call(cif, fn, ret, argptrs);
diff --git a/rts/Linker.c b/rts/Linker.c
index eba9821a3d..07d9e6c346 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -1192,7 +1192,7 @@ static void setOcInitialStatus(ObjectCode* oc) {
ObjectCode*
mkOc( pathchar *path, char *image, int imageSize,
- rtsBool mapped, char *archiveMemberName, int misalignment ) {
+ bool mapped, char *archiveMemberName, int misalignment ) {
ObjectCode* oc;
IF_DEBUG(linker, debugBelch("mkOc: start\n"));
@@ -1357,7 +1357,7 @@ preloadObjectFile (pathchar *path)
#endif /* RTS_LINKER_USE_MMAP */
- oc = mkOc(path, image, fileSize, rtsTrue, NULL, misalignment);
+ oc = mkOc(path, image, fileSize, true, NULL, misalignment);
return oc;
}
@@ -1573,7 +1573,7 @@ HsInt resolveObjs (void)
/* -----------------------------------------------------------------------------
* delete an object from the pool
*/
-static HsInt unloadObj_ (pathchar *path, rtsBool just_purge)
+static HsInt unloadObj_ (pathchar *path, bool just_purge)
{
ObjectCode *oc, *prev, *next;
HsBool unloadedAnyObj = HS_BOOL_FALSE;
@@ -1631,7 +1631,7 @@ static HsInt unloadObj_ (pathchar *path, rtsBool just_purge)
HsInt unloadObj (pathchar *path)
{
ACQUIRE_LOCK(&linker_mutex);
- HsInt r = unloadObj_(path, rtsFalse);
+ HsInt r = unloadObj_(path, false);
RELEASE_LOCK(&linker_mutex);
return r;
}
@@ -1639,7 +1639,7 @@ HsInt unloadObj (pathchar *path)
HsInt purgeObj (pathchar *path)
{
ACQUIRE_LOCK(&linker_mutex);
- HsInt r = unloadObj_(path, rtsTrue);
+ HsInt r = unloadObj_(path, true);
RELEASE_LOCK(&linker_mutex);
return r;
}
diff --git a/rts/LinkerInternals.h b/rts/LinkerInternals.h
index befd17bfcd..da20e3b278 100644
--- a/rts/LinkerInternals.h
+++ b/rts/LinkerInternals.h
@@ -264,7 +264,7 @@ extern /*Str*/HashTable *symhash;
HsInt isAlreadyLoaded( pathchar *path );
HsInt loadOc( ObjectCode* oc );
ObjectCode* mkOc( pathchar *path, char *image, int imageSize,
- rtsBool mapped, char *archiveMemberName,
+ bool mapped, char *archiveMemberName,
int misalignment
);
diff --git a/rts/Messages.c b/rts/Messages.c
index 2177dd11c3..0bea626173 100644
--- a/rts/Messages.c
+++ b/rts/Messages.c
@@ -46,7 +46,7 @@ void sendMessage(Capability *from_cap, Capability *to_cap, Message *msg)
if (to_cap->running_task == NULL) {
to_cap->running_task = myTask();
// precond for releaseCapability_()
- releaseCapability_(to_cap,rtsFalse);
+ releaseCapability_(to_cap,false);
} else {
interruptCapability(to_cap);
}
diff --git a/rts/Printer.c b/rts/Printer.c
index cbbddd6641..5d6e585eff 100644
--- a/rts/Printer.c
+++ b/rts/Printer.c
@@ -635,7 +635,7 @@ const char *lookupGHCName( void *addr )
* rubbish like the obj-splitting symbols
*/
-static rtsBool isReal( flagword flags STG_UNUSED, const char *name )
+static bool isReal( flagword flags STG_UNUSED, const char *name )
{
#if 0
/* ToDo: make this work on BFD */
@@ -643,15 +643,15 @@ static rtsBool isReal( flagword flags STG_UNUSED, const char *name )
if (tp == N_TEXT || tp == N_DATA) {
return (name[0] == '_' && name[1] != '_');
} else {
- return rtsFalse;
+ return false;
}
#else
if (*name == '\0' ||
(name[0] == 'g' && name[1] == 'c' && name[2] == 'c') ||
(name[0] == 'c' && name[1] == 'c' && name[2] == '.')) {
- return rtsFalse;
+ return false;
}
- return rtsTrue;
+ return true;
#endif
}
diff --git a/rts/ProfHeap.c b/rts/ProfHeap.c
index a494a1b5a2..d492b89b03 100644
--- a/rts/ProfHeap.c
+++ b/rts/ProfHeap.c
@@ -98,7 +98,7 @@ static void aggregateCensusInfo( void );
static void dumpCensus( Census *census );
-static rtsBool closureSatisfiesConstraints( const StgClosure* p );
+static bool closureSatisfiesConstraints( const StgClosure* p );
/* ----------------------------------------------------------------------------
* Find the "closure identity", which is a unique pointer representing
@@ -156,14 +156,14 @@ closureIdentity( const StgClosure *p )
* Profiling type predicates
* ----------------------------------------------------------------------- */
#ifdef PROFILING
-STATIC_INLINE rtsBool
+STATIC_INLINE bool
doingLDVProfiling( void )
{
return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
|| RtsFlags.ProfFlags.bioSelector != NULL);
}
-rtsBool
+bool
doingRetainerProfiling( void )
{
return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
@@ -283,7 +283,7 @@ nextEra( void )
if (era == max_era) {
errorBelch("Maximum number of censuses reached.");
- if (rtsConfig.rts_opts_suggestions == rtsTrue) {
+ if (rtsConfig.rts_opts_suggestions == true) {
if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
errorBelch("Use `+RTS -i' to reduce censuses.");
} else {
@@ -361,7 +361,7 @@ void endProfiling( void )
#endif /* !PROFILING */
static void
-printSample(rtsBool beginSample, StgDouble sampleValue)
+printSample(bool beginSample, StgDouble sampleValue)
{
fprintf(hp_file, "%s %f\n",
(beginSample ? "BEGIN_SAMPLE" : "END_SAMPLE"),
@@ -448,8 +448,8 @@ initHeapProfiling(void)
fprintf(hp_file, "SAMPLE_UNIT \"seconds\"\n");
fprintf(hp_file, "VALUE_UNIT \"bytes\"\n");
- printSample(rtsTrue, 0);
- printSample(rtsFalse, 0);
+ printSample(true, 0);
+ printSample(false, 0);
#ifdef PROFILING
if (doingRetainerProfiling()) {
@@ -509,8 +509,8 @@ endHeapProfiling(void)
stgFree(censuses);
seconds = mut_user_time();
- printSample(rtsTrue, seconds);
- printSample(rtsFalse, seconds);
+ printSample(true, seconds);
+ printSample(false, seconds);
fclose(hp_file);
}
@@ -569,7 +569,7 @@ fprint_ccs(FILE *fp, CostCentreStack *ccs, uint32_t max_length)
fprintf(fp, "%s", buf);
}
-rtsBool
+bool
strMatchesSelector( const char* str, const char* sel )
{
const char* p;
@@ -582,14 +582,14 @@ strMatchesSelector( const char* str, const char* sel )
}
// Match if all of str used and have reached the end of a sel fragment.
if (*p == '\0' && (*sel == ',' || *sel == '\0'))
- return rtsTrue;
+ return true;
// No match. Advance sel to the start of the next elem.
while (*sel != ',' && *sel != '\0') sel++;
if (*sel == ',') sel++;
/* Run out of sel ?? */
- if (*sel == '\0') return rtsFalse;
+ if (*sel == '\0') return false;
}
}
@@ -599,31 +599,31 @@ strMatchesSelector( const char* str, const char* sel )
* Figure out whether a closure should be counted in this census, by
* testing against all the specified constraints.
* -------------------------------------------------------------------------- */
-static rtsBool
+static bool
closureSatisfiesConstraints( const StgClosure* p )
{
#if !defined(PROFILING)
(void)p; /* keep gcc -Wall happy */
- return rtsTrue;
+ return true;
#else
- rtsBool b;
+ bool b;
// The CCS has a selected field to indicate whether this closure is
// deselected by not being mentioned in the module, CC, or CCS
// selectors.
if (!p->header.prof.ccs->selected) {
- return rtsFalse;
+ return false;
}
if (RtsFlags.ProfFlags.descrSelector) {
b = strMatchesSelector( (GET_PROF_DESC(get_itbl((StgClosure *)p))),
RtsFlags.ProfFlags.descrSelector );
- if (!b) return rtsFalse;
+ if (!b) return false;
}
if (RtsFlags.ProfFlags.typeSelector) {
b = strMatchesSelector( (GET_PROF_TYPE(get_itbl((StgClosure *)p))),
RtsFlags.ProfFlags.typeSelector );
- if (!b) return rtsFalse;
+ if (!b) return false;
}
if (RtsFlags.ProfFlags.retainerSelector) {
RetainerSet *rs;
@@ -638,13 +638,13 @@ closureSatisfiesConstraints( const StgClosure* p )
for (i = 0; i < rs->num; i++) {
b = strMatchesSelector( rs->element[i]->cc->label,
RtsFlags.ProfFlags.retainerSelector );
- if (b) return rtsTrue;
+ if (b) return true;
}
}
}
- return rtsFalse;
+ return false;
}
- return rtsTrue;
+ return true;
#endif /* PROFILING */
}
@@ -766,7 +766,7 @@ dumpCensus( Census *census )
counter *ctr;
ssize_t count;
- printSample(rtsTrue, census->time);
+ printSample(true, census->time);
traceHeapProfSampleBegin(era);
#ifdef PROFILING
@@ -781,7 +781,7 @@ dumpCensus( Census *census )
(unsigned long)(census->prim) * sizeof(W_));
fprintf(hp_file, "DRAG\t%lu\n",
(unsigned long)(census->drag_total) * sizeof(W_));
- printSample(rtsFalse, census->time);
+ printSample(false, census->time);
return;
}
#endif
@@ -865,12 +865,12 @@ dumpCensus( Census *census )
fprintf(hp_file, "\t%" FMT_Word "\n", (W_)count * sizeof(W_));
}
- printSample(rtsFalse, census->time);
+ printSample(false, census->time);
}
static void heapProfObject(Census *census, StgClosure *p, size_t size,
- rtsBool prim
+ bool prim
#ifndef PROFILING
STG_UNUSED
#endif
@@ -960,7 +960,7 @@ heapCensusCompactList(Census *census, bdescr *bd)
StgCompactNFDataBlock *block = (StgCompactNFDataBlock*)bd->start;
StgCompactNFData *str = block->owner;
heapProfObject(census, (StgClosure*)str,
- compact_nfdata_full_sizeW(str), rtsTrue);
+ compact_nfdata_full_sizeW(str), true);
}
}
@@ -973,7 +973,7 @@ heapCensusChain( Census *census, bdescr *bd )
StgPtr p;
const StgInfoTable *info;
size_t size;
- rtsBool prim;
+ bool prim;
for (; bd != NULL; bd = bd->link) {
@@ -984,7 +984,7 @@ heapCensusChain( Census *census, bdescr *bd )
if (bd->flags & BF_PINNED) {
StgClosure arr;
SET_HDR(&arr, &stg_ARR_WORDS_info, CCS_PINNED);
- heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, rtsTrue);
+ heapProfObject(census, &arr, bd->blocks * BLOCK_SIZE_W, true);
continue;
}
@@ -998,14 +998,14 @@ heapCensusChain( Census *census, bdescr *bd )
if (bd->flags & BF_LARGE
&& get_itbl((StgClosure *)p)->type == ARR_WORDS) {
size = arr_words_sizeW((StgArrBytes *)p);
- prim = rtsTrue;
+ prim = true;
heapProfObject(census, (StgClosure *)p, size, prim);
continue;
}
while (p < bd->free) {
info = get_itbl((const StgClosure *)p);
- prim = rtsFalse;
+ prim = false;
switch (info->type) {
@@ -1055,7 +1055,7 @@ heapCensusChain( Census *census, bdescr *bd )
break;
case BCO:
- prim = rtsTrue;
+ prim = true;
size = bco_sizeW((StgBCO *)p);
break;
@@ -1067,7 +1067,7 @@ heapCensusChain( Census *census, bdescr *bd )
case MUT_PRIM:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
- prim = rtsTrue;
+ prim = true;
size = sizeW_fromITBL(info);
break;
@@ -1084,7 +1084,7 @@ heapCensusChain( Census *census, bdescr *bd )
break;
case ARR_WORDS:
- prim = rtsTrue;
+ prim = true;
size = arr_words_sizeW((StgArrBytes*)p);
break;
@@ -1092,7 +1092,7 @@ heapCensusChain( Census *census, bdescr *bd )
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
- prim = rtsTrue;
+ prim = true;
size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
break;
@@ -1100,12 +1100,12 @@ heapCensusChain( Census *census, bdescr *bd )
case SMALL_MUT_ARR_PTRS_DIRTY:
case SMALL_MUT_ARR_PTRS_FROZEN:
case SMALL_MUT_ARR_PTRS_FROZEN0:
- prim = rtsTrue;
+ prim = true;
size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
break;
case TSO:
- prim = rtsTrue;
+ prim = true;
#ifdef PROFILING
if (RtsFlags.ProfFlags.includeTSOs) {
size = sizeofW(StgTSO);
@@ -1121,7 +1121,7 @@ heapCensusChain( Census *census, bdescr *bd )
#endif
case STACK:
- prim = rtsTrue;
+ prim = true;
#ifdef PROFILING
if (RtsFlags.ProfFlags.includeTSOs) {
size = stack_sizeW((StgStack*)p);
@@ -1137,7 +1137,7 @@ heapCensusChain( Census *census, bdescr *bd )
#endif
case TREC_CHUNK:
- prim = rtsTrue;
+ prim = true;
size = sizeofW(StgTRecChunk);
break;
diff --git a/rts/ProfHeap.h b/rts/ProfHeap.h
index 56b2529f1b..7b12578175 100644
--- a/rts/ProfHeap.h
+++ b/rts/ProfHeap.h
@@ -14,11 +14,11 @@
void heapCensus (Time t);
uint32_t initHeapProfiling (void);
void endHeapProfiling (void);
-rtsBool strMatchesSelector (const char* str, const char* sel);
+bool strMatchesSelector (const char* str, const char* sel);
#ifdef PROFILING
// doingRetainerProfiling: `-hr` or `-hr<cc> -h<x>`
-rtsBool doingRetainerProfiling(void);
+bool doingRetainerProfiling(void);
#endif
#include "EndPrivate.h"
diff --git a/rts/Profiling.c b/rts/Profiling.c
index fb2dff5f3a..952785be18 100644
--- a/rts/Profiling.c
+++ b/rts/Profiling.c
@@ -111,7 +111,7 @@ static CostCentreStack * appendCCS ( CostCentreStack *ccs1,
CostCentreStack *ccs2 );
static CostCentreStack * actualPush_ ( CostCentreStack *ccs, CostCentre *cc,
CostCentreStack *new_ccs );
-static rtsBool ignoreCCS ( CostCentreStack *ccs );
+static bool ignoreCCS ( CostCentreStack *ccs );
static void countTickss ( CostCentreStack *ccs );
static void inheritCosts ( CostCentreStack *ccs );
static uint32_t numDigits ( StgInt i );
@@ -680,34 +680,26 @@ addToIndexTable (IndexTable *it, CostCentreStack *new_ccs,
/* We omit certain system-related CCs and CCSs from the default
* reports, so as not to cause confusion.
*/
-static rtsBool
+static bool
ignoreCC (CostCentre *cc)
{
- if (RtsFlags.CcFlags.doCostCentres < COST_CENTRES_ALL &&
+ return RtsFlags.CcFlags.doCostCentres < COST_CENTRES_ALL &&
( cc == CC_OVERHEAD
|| cc == CC_DONT_CARE
|| cc == CC_GC
|| cc == CC_SYSTEM
- || cc == CC_IDLE)) {
- return rtsTrue;
- } else {
- return rtsFalse;
- }
+ || cc == CC_IDLE);
}
-static rtsBool
+static bool
ignoreCCS (CostCentreStack *ccs)
{
- if (RtsFlags.CcFlags.doCostCentres < COST_CENTRES_ALL &&
+ return RtsFlags.CcFlags.doCostCentres < COST_CENTRES_ALL &&
( ccs == CCS_OVERHEAD
|| ccs == CCS_DONT_CARE
|| ccs == CCS_GC
|| ccs == CCS_SYSTEM
- || ccs == CCS_IDLE)) {
- return rtsTrue;
- } else {
- return rtsFalse;
- }
+ || ccs == CCS_IDLE);
}
/* -----------------------------------------------------------------------------
@@ -892,7 +884,7 @@ reportCCSProfiling( void )
fprintf(prof_file, "\ttotal alloc = %11s bytes",
showStgWord64(total_alloc * sizeof(W_),
- temp, rtsTrue/*commas*/));
+ temp, true/*commas*/));
fprintf(prof_file, " (excludes profiling overheads)\n\n");
@@ -1154,7 +1146,7 @@ fprintCCS( FILE *f, CostCentreStack *ccs )
}
// Returns: True if the call stack ended with CAF
-static rtsBool fprintCallStack (CostCentreStack *ccs)
+static bool fprintCallStack (CostCentreStack *ccs)
{
CostCentreStack *prev;
@@ -1175,7 +1167,7 @@ static rtsBool fprintCallStack (CostCentreStack *ccs)
void
fprintCCS_stderr (CostCentreStack *ccs, StgClosure *exception, StgTSO *tso)
{
- rtsBool is_caf;
+ bool is_caf;
StgPtr frame;
StgStack *stack;
CostCentreStack *prev_ccs;
diff --git a/rts/Proftimer.c b/rts/Proftimer.c
index 4f4002a0e6..0c07194ed5 100644
--- a/rts/Proftimer.c
+++ b/rts/Proftimer.c
@@ -14,22 +14,22 @@
#include "Capability.h"
#ifdef PROFILING
-static rtsBool do_prof_ticks = rtsFalse; // enable profiling ticks
+static bool do_prof_ticks = false; // enable profiling ticks
#endif
-static rtsBool do_heap_prof_ticks = rtsFalse; // enable heap profiling ticks
+static bool do_heap_prof_ticks = false; // enable heap profiling ticks
// Number of ticks until next heap census
static int ticks_to_heap_profile;
// Time for a heap profile on the next context switch
-rtsBool performHeapProfile;
+bool performHeapProfile;
void
stopProfTimer( void )
{
#ifdef PROFILING
- do_prof_ticks = rtsFalse;
+ do_prof_ticks = false;
#endif
}
@@ -37,14 +37,14 @@ void
startProfTimer( void )
{
#ifdef PROFILING
- do_prof_ticks = rtsTrue;
+ do_prof_ticks = true;
#endif
}
void
stopHeapProfTimer( void )
{
- do_heap_prof_ticks = rtsFalse;
+ do_heap_prof_ticks = false;
}
void
@@ -52,14 +52,14 @@ startHeapProfTimer( void )
{
if (RtsFlags.ProfFlags.doHeapProfile &&
RtsFlags.ProfFlags.heapProfileIntervalTicks > 0) {
- do_heap_prof_ticks = rtsTrue;
+ do_heap_prof_ticks = true;
}
}
void
initProfTimer( void )
{
- performHeapProfile = rtsFalse;
+ performHeapProfile = false;
ticks_to_heap_profile = RtsFlags.ProfFlags.heapProfileIntervalTicks;
@@ -85,7 +85,7 @@ handleProfTick(void)
ticks_to_heap_profile--;
if (ticks_to_heap_profile <= 0) {
ticks_to_heap_profile = RtsFlags.ProfFlags.heapProfileIntervalTicks;
- performHeapProfile = rtsTrue;
+ performHeapProfile = true;
}
}
}
diff --git a/rts/Proftimer.h b/rts/Proftimer.h
index 4bb063fbbc..e323b939c6 100644
--- a/rts/Proftimer.h
+++ b/rts/Proftimer.h
@@ -17,7 +17,7 @@ void handleProfTick ( void );
void stopHeapProfTimer ( void );
void startHeapProfTimer ( void );
-extern rtsBool performHeapProfile;
+extern bool performHeapProfile;
#include "EndPrivate.h"
diff --git a/rts/RaiseAsync.c b/rts/RaiseAsync.c
index c67aa4ce54..256b7c565b 100644
--- a/rts/RaiseAsync.c
+++ b/rts/RaiseAsync.c
@@ -53,7 +53,7 @@ static void throwToSendMsg (Capability *cap USED_IF_THREADS,
static void
throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
- rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
+ bool stop_at_atomically, StgUpdateFrame *stop_here)
{
// Thread already dead?
if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
@@ -69,12 +69,12 @@ throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
void
throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
{
- throwToSingleThreaded__(cap, tso, exception, rtsFalse, NULL);
+ throwToSingleThreaded__(cap, tso, exception, false, NULL);
}
void
throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
- rtsBool stop_at_atomically)
+ bool stop_at_atomically)
{
throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
}
@@ -82,7 +82,7 @@ throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
void // cannot return a different TSO
suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
{
- throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
+ throwToSingleThreaded__ (cap, tso, NULL, false, stop_here);
}
/* -----------------------------------------------------------------------------
@@ -272,7 +272,7 @@ check_target:
{
if ((target->flags & TSO_BLOCKEX) == 0) {
// It's on our run queue and not blocking exceptions
- raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
+ raiseAsync(cap, target, msg->exception, false, NULL);
return THROWTO_SUCCESS;
} else {
blockedThrowTo(cap,target,msg);
@@ -337,7 +337,7 @@ check_target:
// nobody else can wake up this TSO after we claim the message
doneWithMsgThrowTo(m);
- raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
+ raiseAsync(cap, target, msg->exception, false, NULL);
return THROWTO_SUCCESS;
}
@@ -391,7 +391,7 @@ check_target:
} else {
// revoke the MVar operation
removeFromMVarBlockedQueue(target);
- raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
+ raiseAsync(cap, target, msg->exception, false, NULL);
unlockClosure((StgClosure *)mvar, info);
return THROWTO_SUCCESS;
}
@@ -410,7 +410,7 @@ check_target:
// future, but that doesn't matter.
ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
- raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
+ raiseAsync(cap, target, msg->exception, false, NULL);
return THROWTO_SUCCESS;
}
}
@@ -429,7 +429,7 @@ check_target:
unlockTSO(target);
return THROWTO_BLOCKED;
} else {
- raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
+ raiseAsync(cap, target, msg->exception, false, NULL);
unlockTSO(target);
return THROWTO_SUCCESS;
}
@@ -476,7 +476,7 @@ check_target:
return THROWTO_BLOCKED;
} else {
removeFromQueues(cap,target);
- raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
+ raiseAsync(cap, target, msg->exception, false, NULL);
return THROWTO_SUCCESS;
}
#endif
@@ -776,7 +776,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
StgTSO *
raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
- rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
+ bool stop_at_atomically, StgUpdateFrame *stop_here)
{
const StgRetInfoTable *info;
StgPtr sp, frame;
diff --git a/rts/RaiseAsync.h b/rts/RaiseAsync.h
index 243d3e0598..3f65e25252 100644
--- a/rts/RaiseAsync.h
+++ b/rts/RaiseAsync.h
@@ -19,7 +19,7 @@
StgTSO* raiseAsync (Capability *cap,
StgTSO *tso,
StgClosure *exception,
- rtsBool stop_at_atomically,
+ bool stop_at_atomically,
StgUpdateFrame *stop_here);
void throwToSingleThreaded (Capability *cap,
@@ -29,7 +29,7 @@ void throwToSingleThreaded (Capability *cap,
void throwToSingleThreaded_ (Capability *cap,
StgTSO *tso,
StgClosure *exception,
- rtsBool stop_at_atomically);
+ bool stop_at_atomically);
void throwToSelf (Capability *cap,
StgTSO *tso,
diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c
index b9545387f5..3cbb8acc8c 100644
--- a/rts/RetainerProfile.c
+++ b/rts/RetainerProfile.c
@@ -256,9 +256,9 @@ closeTraverseStack( void )
}
/* -----------------------------------------------------------------------------
- * Returns rtsTrue if the whole stack is empty.
+ * Returns true if the whole stack is empty.
* -------------------------------------------------------------------------- */
-static INLINE rtsBool
+static INLINE bool
isEmptyRetainerStack( void )
{
return (firstStack == currentStack) && stackTop == stackLimit;
@@ -282,10 +282,10 @@ retainerStackBlocks( void )
#endif
/* -----------------------------------------------------------------------------
- * Returns rtsTrue if stackTop is at the stack boundary of the current stack,
+ * Returns true if stackTop is at the stack boundary of the current stack,
* i.e., if the current stack chunk is empty.
* -------------------------------------------------------------------------- */
-static INLINE rtsBool
+static INLINE bool
isOnBoundary( void )
{
return stackTop == currentStackBoundary;
@@ -762,7 +762,7 @@ popOff(void) {
* the next object.
* If the topmost stack element indicates no more objects are left, pop
* off the stack element until either an object can be retrieved or
- * the current stack chunk becomes empty, indicated by rtsTrue returned by
+ * the current stack chunk becomes empty, indicated by true returned by
* isOnBoundary(), in which case *c is set to NULL.
* Note:
* It is okay to call this function even when the current stack chunk
@@ -952,7 +952,7 @@ pop( StgClosure **c, StgClosure **cp, retainer *r )
barf("Invalid object *c in pop()");
return;
}
- } while (rtsTrue);
+ } while (true);
}
/* -----------------------------------------------------------------------------
@@ -1000,9 +1000,9 @@ maybeInitRetainerSet( StgClosure *c )
}
/* -----------------------------------------------------------------------------
- * Returns rtsTrue if *c is a retainer.
+ * Returns true if *c is a retainer.
* -------------------------------------------------------------------------- */
-static INLINE rtsBool
+static INLINE bool
isRetainer( StgClosure *c )
{
switch (get_itbl(c)->type) {
@@ -1040,7 +1040,7 @@ isRetainer( StgClosure *c )
// WEAK objects are roots; there is separate code in which traversing
// begins from WEAK objects.
case WEAK:
- return rtsTrue;
+ return true;
//
// False case
@@ -1080,7 +1080,7 @@ isRetainer( StgClosure *c )
// immutable arrays
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
- return rtsFalse;
+ return false;
//
// Error case
@@ -1099,7 +1099,7 @@ isRetainer( StgClosure *c )
case INVALID_OBJECT:
default:
barf("Invalid object in isRetainer(): %d", get_itbl(c)->type);
- return rtsFalse;
+ return false;
}
}
@@ -1600,8 +1600,8 @@ inner_loop:
retainerSetOfc = retainerSetOf(c);
// Now compute s:
- // isRetainer(cp) == rtsTrue => s == NULL
- // isRetainer(cp) == rtsFalse => s == cp.retainer
+ // isRetainer(cp) == true => s == NULL
+ // isRetainer(cp) == false => s == cp.retainer
if (isRetainer(cp))
s = NULL;
else
@@ -1790,7 +1790,7 @@ computeRetainerSet( void )
// object (computing sumOfNewCostExtra and updating costArray[] when
// debugging retainer profiler).
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- // NOT TRUE: even G0 has a block on its mutable list
+ // NOT true: even G0 has a block on its mutable list
// ASSERT(g != 0 || (generations[g].mut_list == NULL));
// Traversing through mut_list is necessary
diff --git a/rts/RetainerSet.c b/rts/RetainerSet.c
index 9c4cbb30f6..9c3043b84a 100644
--- a/rts/RetainerSet.c
+++ b/rts/RetainerSet.c
@@ -130,7 +130,7 @@ singleton(retainer r)
/* -----------------------------------------------------------------------------
* Finds or creates a retainer set *rs augmented with r.
* Invariants:
- * r is not a member of rs, i.e., isMember(r, rs) returns rtsFalse.
+ * r is not a member of rs, i.e., isMember(r, rs) returns false.
* rs is not NULL.
* Note:
* We could check if rs is NULL, in which case this function call
diff --git a/rts/RetainerSet.h b/rts/RetainerSet.h
index c581293e8b..55a9989926 100644
--- a/rts/RetainerSet.h
+++ b/rts/RetainerSet.h
@@ -116,9 +116,9 @@ extern RetainerSet rs_MANY;
// it is not easy either to write it as a macro (due to my lack of C
// programming experience). Sungwoo
//
-// rtsBool isMember(retainer, retainerSet *);
+// bool isMember(retainer, retainerSet *);
/*
- Returns rtsTrue if r is a member of *rs.
+ Returns true if r is a member of *rs.
Invariants:
rs is not NULL.
Note:
@@ -129,19 +129,19 @@ extern RetainerSet rs_MANY;
*/
#define BINARY_SEARCH_THRESHOLD 8
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
isMember(retainer r, RetainerSet *rs)
{
int i, left, right; // must be int, not uint32_t (because -1 can appear)
retainer ri;
- if (rs == &rs_MANY) { return rtsTrue; }
+ if (rs == &rs_MANY) { return true; }
if (rs->num < BINARY_SEARCH_THRESHOLD) {
for (i = 0; i < (int)rs->num; i++) {
ri = rs->element[i];
- if (r == ri) return rtsTrue;
- else if (r < ri) return rtsFalse;
+ if (r == ri) return true;
+ else if (r < ri) return false;
}
} else {
left = 0;
@@ -149,12 +149,12 @@ isMember(retainer r, RetainerSet *rs)
while (left <= right) {
i = (left + right) / 2;
ri = rs->element[i];
- if (r == ri) return rtsTrue;
+ if (r == ri) return true;
else if (r < ri) right = i - 1;
else left = i + 1;
}
}
- return rtsFalse;
+ return false;
}
// Finds or creates a retainer set augmented with a new retainer.
diff --git a/rts/RtsAPI.c b/rts/RtsAPI.c
index e72430743e..f009de7b64 100644
--- a/rts/RtsAPI.c
+++ b/rts/RtsAPI.c
@@ -602,7 +602,7 @@ rts_unlock (Capability *cap)
// random point in the future, which causes problems for
// freeTaskManager().
ACQUIRE_LOCK(&cap->lock);
- releaseCapability_(cap,rtsFalse);
+ releaseCapability_(cap,false);
// Finally, we can release the Task to the free list.
boundTaskExiting(task);
diff --git a/rts/RtsDllMain.c b/rts/RtsDllMain.c
index b3b10f0295..80241cd380 100644
--- a/rts/RtsDllMain.c
+++ b/rts/RtsDllMain.c
@@ -37,7 +37,7 @@ DllMain ( HINSTANCE hInstance STG_UNUSED
//case DLL_PROCESS_DETACH: shutdownHaskell();
}
- return TRUE;
+ return true;
}
#endif
diff --git a/rts/RtsFlags.c b/rts/RtsFlags.c
index aeb2fe532f..b8f0b212f0 100644
--- a/rts/RtsFlags.c
+++ b/rts/RtsFlags.c
@@ -60,10 +60,10 @@ RtsConfig rtsConfig;
const RtsConfig defaultRtsConfig = {
.rts_opts_enabled = RtsOptsSafeOnly,
- .rts_opts_suggestions = rtsTrue,
+ .rts_opts_suggestions = true,
.rts_opts = NULL,
- .rts_hs_main = rtsFalse,
- .keep_cafs = rtsFalse,
+ .rts_hs_main = false,
+ .keep_cafs = false,
.defaultsHook = FlagDefaultsHook,
.onExitHook = OnExitHook,
.stackOverflowHook = StackOverflowHook,
@@ -101,7 +101,7 @@ static void read_debug_flags(const char *arg);
#endif
#ifdef PROFILING
-static rtsBool read_heap_profiling_flag(const char *arg);
+static bool read_heap_profiling_flag(const char *arg);
#endif
#ifdef TRACING
@@ -142,52 +142,53 @@ void initRtsFlagsDefaults(void)
RtsFlags.GcFlags.minOldGenSize = (1024 * 1024) / BLOCK_SIZE;
RtsFlags.GcFlags.maxHeapSize = 0; /* off by default */
RtsFlags.GcFlags.heapSizeSuggestion = 0; /* none */
- RtsFlags.GcFlags.heapSizeSuggestionAuto = rtsFalse;
+ RtsFlags.GcFlags.heapSizeSuggestionAuto = false;
RtsFlags.GcFlags.pcFreeHeap = 3; /* 3% */
RtsFlags.GcFlags.oldGenFactor = 2;
RtsFlags.GcFlags.generations = 2;
- RtsFlags.GcFlags.squeezeUpdFrames = rtsTrue;
- RtsFlags.GcFlags.compact = rtsFalse;
+ RtsFlags.GcFlags.squeezeUpdFrames = true;
+ RtsFlags.GcFlags.compact = false;
RtsFlags.GcFlags.compactThreshold = 30.0;
- RtsFlags.GcFlags.sweep = rtsFalse;
+ RtsFlags.GcFlags.sweep = false;
RtsFlags.GcFlags.idleGCDelayTime = USToTime(300000); // 300ms
#ifdef THREADED_RTS
- RtsFlags.GcFlags.doIdleGC = rtsTrue;
+ RtsFlags.GcFlags.doIdleGC = true;
#else
- RtsFlags.GcFlags.doIdleGC = rtsFalse;
+ RtsFlags.GcFlags.doIdleGC = false;
#endif
RtsFlags.GcFlags.heapBase = 0; /* means don't care */
RtsFlags.GcFlags.allocLimitGrace = (100*1024) / BLOCK_SIZE;
- RtsFlags.GcFlags.numa = rtsFalse;
+ RtsFlags.GcFlags.numa = false;
RtsFlags.GcFlags.numaMask = 1;
-
- RtsFlags.DebugFlags.scheduler = rtsFalse;
- RtsFlags.DebugFlags.interpreter = rtsFalse;
- RtsFlags.DebugFlags.weak = rtsFalse;
- RtsFlags.DebugFlags.gccafs = rtsFalse;
- RtsFlags.DebugFlags.gc = rtsFalse;
- RtsFlags.DebugFlags.block_alloc = rtsFalse;
- RtsFlags.DebugFlags.sanity = rtsFalse;
- RtsFlags.DebugFlags.stable = rtsFalse;
- RtsFlags.DebugFlags.stm = rtsFalse;
- RtsFlags.DebugFlags.prof = rtsFalse;
- RtsFlags.DebugFlags.apply = rtsFalse;
- RtsFlags.DebugFlags.linker = rtsFalse;
- RtsFlags.DebugFlags.squeeze = rtsFalse;
- RtsFlags.DebugFlags.hpc = rtsFalse;
- RtsFlags.DebugFlags.sparks = rtsFalse;
- RtsFlags.DebugFlags.numa = rtsFalse;
+ RtsFlags.GcFlags.ringBell = false;
+
+ RtsFlags.DebugFlags.scheduler = false;
+ RtsFlags.DebugFlags.interpreter = false;
+ RtsFlags.DebugFlags.weak = false;
+ RtsFlags.DebugFlags.gccafs = false;
+ RtsFlags.DebugFlags.gc = false;
+ RtsFlags.DebugFlags.block_alloc = false;
+ RtsFlags.DebugFlags.sanity = false;
+ RtsFlags.DebugFlags.stable = false;
+ RtsFlags.DebugFlags.stm = false;
+ RtsFlags.DebugFlags.prof = false;
+ RtsFlags.DebugFlags.apply = false;
+ RtsFlags.DebugFlags.linker = false;
+ RtsFlags.DebugFlags.squeeze = false;
+ RtsFlags.DebugFlags.hpc = false;
+ RtsFlags.DebugFlags.sparks = false;
+ RtsFlags.DebugFlags.numa = false;
#if defined(PROFILING)
RtsFlags.CcFlags.doCostCentres = 0;
#endif /* PROFILING */
- RtsFlags.ProfFlags.doHeapProfile = rtsFalse;
+ RtsFlags.ProfFlags.doHeapProfile = false;
RtsFlags.ProfFlags. heapProfileInterval = USToTime(100000); // 100ms
#ifdef PROFILING
- RtsFlags.ProfFlags.includeTSOs = rtsFalse;
- RtsFlags.ProfFlags.showCCSOnException = rtsFalse;
+ RtsFlags.ProfFlags.includeTSOs = false;
+ RtsFlags.ProfFlags.showCCSOnException = false;
RtsFlags.ProfFlags.maxRetainerSetSize = 8;
RtsFlags.ProfFlags.ccsLength = 25;
RtsFlags.ProfFlags.modSelector = NULL;
@@ -201,12 +202,12 @@ void initRtsFlagsDefaults(void)
#ifdef TRACING
RtsFlags.TraceFlags.tracing = TRACE_NONE;
- RtsFlags.TraceFlags.timestamp = rtsFalse;
- RtsFlags.TraceFlags.scheduler = rtsFalse;
- RtsFlags.TraceFlags.gc = rtsFalse;
- RtsFlags.TraceFlags.sparks_sampled= rtsFalse;
- RtsFlags.TraceFlags.sparks_full = rtsFalse;
- RtsFlags.TraceFlags.user = rtsFalse;
+ RtsFlags.TraceFlags.timestamp = false;
+ RtsFlags.TraceFlags.scheduler = false;
+ RtsFlags.TraceFlags.gc = false;
+ RtsFlags.TraceFlags.sparks_sampled= false;
+ RtsFlags.TraceFlags.sparks_full = false;
+ RtsFlags.TraceFlags.user = false;
#endif
#ifdef PROFILING
@@ -217,16 +218,16 @@ void initRtsFlagsDefaults(void)
#endif
RtsFlags.ConcFlags.ctxtSwitchTime = USToTime(20000); // 20ms
- RtsFlags.MiscFlags.install_signal_handlers = rtsTrue;
- RtsFlags.MiscFlags.machineReadable = rtsFalse;
+ RtsFlags.MiscFlags.install_signal_handlers = true;
+ RtsFlags.MiscFlags.machineReadable = false;
RtsFlags.MiscFlags.linkerMemBase = 0;
#ifdef THREADED_RTS
RtsFlags.ParFlags.nCapabilities = 1;
- RtsFlags.ParFlags.migrate = rtsTrue;
+ RtsFlags.ParFlags.migrate = true;
RtsFlags.ParFlags.parGcEnabled = 1;
RtsFlags.ParFlags.parGcGen = 0;
- RtsFlags.ParFlags.parGcLoadBalancingEnabled = rtsTrue;
+ RtsFlags.ParFlags.parGcLoadBalancingEnabled = true;
RtsFlags.ParFlags.parGcLoadBalancingGen = ~0u; /* auto, based on -A */
RtsFlags.ParFlags.parGcNoSyncWithIdle = 0;
RtsFlags.ParFlags.parGcThreads = 0; /* defaults to -N */
@@ -238,7 +239,7 @@ void initRtsFlagsDefaults(void)
#endif /* THREADED_RTS */
#ifdef TICKY_TICKY
- RtsFlags.TickyFlags.showTickyStats = rtsFalse;
+ RtsFlags.TickyFlags.showTickyStats = false;
RtsFlags.TickyFlags.tickyFile = NULL;
#endif
}
@@ -433,7 +434,7 @@ usage_text[] = {
0
};
-STATIC_INLINE rtsBool strequal(const char *a, const char * b)
+STATIC_INLINE bool strequal(const char *a, const char * b)
{
return(strcmp(a, b) == 0);
}
@@ -639,7 +640,7 @@ static void checkUnsafe(RtsOptsEnabledEnum enabled)
static void procRtsOpts (int rts_argc0,
RtsOptsEnabledEnum rtsOptsEnabled)
{
- rtsBool error = rtsFalse;
+ bool error = false;
int arg;
int unchecked_arg_start;
@@ -659,16 +660,16 @@ static void procRtsOpts (int rts_argc0,
either OPTION_SAFE or OPTION_UNSAFE. To make sure we cover
every branch we use an option_checked flag which is reset
at the start each iteration and checked at the end. */
- rtsBool option_checked = rtsFalse;
+ bool option_checked = false;
// See Note [OPTION_SAFE vs OPTION_UNSAFE].
-#define OPTION_SAFE option_checked = rtsTrue;
-#define OPTION_UNSAFE checkUnsafe(rtsOptsEnabled); option_checked = rtsTrue;
+#define OPTION_SAFE option_checked = true;
+#define OPTION_UNSAFE checkUnsafe(rtsOptsEnabled); option_checked = true;
if (rts_argv[arg][0] != '-') {
fflush(stdout);
errorBelch("unexpected RTS argument: %s", rts_argv[arg]);
- error = rtsTrue;
+ error = true;
} else {
/* 0 is dash, 1 is first letter */
@@ -689,7 +690,7 @@ static void procRtsOpts (int rts_argc0,
# define TICKY_BUILD_ONLY(x) \
errorBelch("the flag %s requires the program to be built with -ticky", \
rts_argv[arg]); \
-error = rtsTrue;
+error = true;
#endif
#ifdef PROFILING
@@ -698,7 +699,7 @@ error = rtsTrue;
# define PROFILING_BUILD_ONLY(x) \
errorBelch("the flag %s requires the program to be built with -prof", \
rts_argv[arg]); \
-error = rtsTrue;
+error = true;
#endif
#ifdef TRACING
@@ -707,7 +708,7 @@ error = rtsTrue;
# define TRACING_BUILD_ONLY(x) \
errorBelch("the flag %s requires the program to be built with -eventlog or -debug", \
rts_argv[arg]); \
-error = rtsTrue;
+error = true;
#endif
#ifdef THREADED_RTS
@@ -716,7 +717,7 @@ error = rtsTrue;
# define THREADED_BUILD_ONLY(x) \
errorBelch("the flag %s requires the program to be built with -threaded", \
rts_argv[arg]); \
-error = rtsTrue;
+error = true;
#endif
#ifdef DEBUG
@@ -725,13 +726,13 @@ error = rtsTrue;
# define DEBUG_BUILD_ONLY(x) \
errorBelch("the flag %s requires the program to be built with -debug", \
rts_argv[arg]); \
-error = rtsTrue;
+error = true;
#endif
/* =========== GENERAL ========================== */
case '?':
OPTION_SAFE;
- error = rtsTrue;
+ error = true;
break;
/* This isn't going to allow us to keep related options
@@ -741,17 +742,17 @@ error = rtsTrue;
if (strequal("install-signal-handlers=yes",
&rts_argv[arg][2])) {
OPTION_UNSAFE;
- RtsFlags.MiscFlags.install_signal_handlers = rtsTrue;
+ RtsFlags.MiscFlags.install_signal_handlers = true;
}
else if (strequal("install-signal-handlers=no",
&rts_argv[arg][2])) {
OPTION_UNSAFE;
- RtsFlags.MiscFlags.install_signal_handlers = rtsFalse;
+ RtsFlags.MiscFlags.install_signal_handlers = false;
}
else if (strequal("machine-readable",
&rts_argv[arg][2])) {
OPTION_UNSAFE;
- RtsFlags.MiscFlags.machineReadable = rtsTrue;
+ RtsFlags.MiscFlags.machineReadable = true;
}
else if (strequal("info",
&rts_argv[arg][2])) {
@@ -772,11 +773,11 @@ error = rtsTrue;
if (!osNumaAvailable()) {
errorBelch("%s: OS reports NUMA is not available",
rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
}
- RtsFlags.GcFlags.numa = rtsTrue;
+ RtsFlags.GcFlags.numa = true;
RtsFlags.GcFlags.numaMask = mask;
}
#endif
@@ -791,16 +792,16 @@ error = rtsTrue;
} else {
errorBelch("%s: missing number of nodes",
rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
}
if (nNodes > MAX_NUMA_NODES) {
errorBelch("%s: Too many NUMA nodes (max %d)",
rts_argv[arg], MAX_NUMA_NODES);
- error = rtsTrue;
+ error = true;
} else {
- RtsFlags.GcFlags.numa = rtsTrue;
- RtsFlags.DebugFlags.numa = rtsTrue;
+ RtsFlags.GcFlags.numa = true;
+ RtsFlags.DebugFlags.numa = true;
RtsFlags.GcFlags.numaMask = (1<<nNodes) - 1;
}
}
@@ -808,7 +809,7 @@ error = rtsTrue;
else {
OPTION_SAFE;
errorBelch("unknown RTS option: %s",rts_argv[arg]);
- error = rtsTrue;
+ error = true;
}
break;
case 'A':
@@ -834,7 +835,7 @@ error = rtsTrue;
case 'B':
OPTION_UNSAFE;
- RtsFlags.GcFlags.ringBell = rtsTrue;
+ RtsFlags.GcFlags.ringBell = true;
unchecked_arg_start++;
goto check_rest;
@@ -844,13 +845,13 @@ error = rtsTrue;
RtsFlags.GcFlags.compactThreshold =
atof(rts_argv[arg]+2);
} else {
- RtsFlags.GcFlags.compact = rtsTrue;
+ RtsFlags.GcFlags.compact = true;
}
break;
case 'w':
OPTION_UNSAFE;
- RtsFlags.GcFlags.sweep = rtsTrue;
+ RtsFlags.GcFlags.sweep = true;
unchecked_arg_start++;
goto check_rest;
@@ -924,7 +925,7 @@ error = rtsTrue;
if (nCapabilities <= 0) {
errorBelch("bad value for -maxN");
- error = rtsTrue;
+ error = true;
}
#if defined(PROFILING)
RtsFlags.ParFlags.nCapabilities = 1;
@@ -958,7 +959,7 @@ error = rtsTrue;
case 'H':
OPTION_UNSAFE;
if (rts_argv[arg][2] == '\0') {
- RtsFlags.GcFlags.heapSizeSuggestionAuto = rtsTrue;
+ RtsFlags.GcFlags.heapSizeSuggestionAuto = true;
} else {
RtsFlags.GcFlags.heapSizeSuggestion = (uint32_t)
(decodeSize(rts_argv[arg], 2, BLOCK_SIZE, HS_WORD_MAX)
@@ -981,9 +982,9 @@ error = rtsTrue;
} else {
Time t = fsecondsToTime(atof(rts_argv[arg]+2));
if (t == 0) {
- RtsFlags.GcFlags.doIdleGC = rtsFalse;
+ RtsFlags.GcFlags.doIdleGC = false;
} else {
- RtsFlags.GcFlags.doIdleGC = rtsTrue;
+ RtsFlags.GcFlags.doIdleGC = true;
RtsFlags.GcFlags.idleGCDelayTime = t;
}
}
@@ -1018,13 +1019,13 @@ error = rtsTrue;
}
r = openStatsFile(rts_argv[arg]+2, NULL,
&RtsFlags.GcFlags.statsFile);
- if (r == -1) { error = rtsTrue; }
+ if (r == -1) { error = true; }
}
break;
case 'Z':
OPTION_UNSAFE;
- RtsFlags.GcFlags.squeezeUpdFrames = rtsFalse;
+ RtsFlags.GcFlags.squeezeUpdFrames = false;
unchecked_arg_start++;
goto check_rest;
@@ -1041,7 +1042,7 @@ error = rtsTrue;
errorBelch("flag -Pa given an argument"
" when none was expected: %s"
,rts_argv[arg]);
- error = rtsTrue;
+ error = true;
}
break;
case '\0':
@@ -1143,7 +1144,7 @@ error = rtsTrue;
if (nCapabilities <= 0) {
errorBelch("bad value for -N");
- error = rtsTrue;
+ error = true;
}
if (rtsOptsEnabled == RtsOptsSafeOnly &&
nCapabilities > (int)getNumberOfProcessors()) {
@@ -1160,11 +1161,11 @@ error = rtsTrue;
switch (rts_argv[arg][2]) {
case '1':
// backwards compat only
- RtsFlags.ParFlags.parGcEnabled = rtsFalse;
+ RtsFlags.ParFlags.parGcEnabled = false;
break;
default:
errorBelch("unknown RTS option: %s",rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
}
) break;
@@ -1175,13 +1176,13 @@ error = rtsTrue;
switch (rts_argv[arg][2]) {
case '\0':
errorBelch("incomplete RTS option: %s",rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
case 'g':
if (rts_argv[arg][3] == '\0') {
- RtsFlags.ParFlags.parGcEnabled = rtsFalse;
+ RtsFlags.ParFlags.parGcEnabled = false;
} else {
- RtsFlags.ParFlags.parGcEnabled = rtsTrue;
+ RtsFlags.ParFlags.parGcEnabled = true;
RtsFlags.ParFlags.parGcGen
= strtol(rts_argv[arg]+3, (char **) NULL, 10);
}
@@ -1189,11 +1190,11 @@ error = rtsTrue;
case 'b':
if (rts_argv[arg][3] == '\0') {
RtsFlags.ParFlags.parGcLoadBalancingEnabled =
- rtsFalse;
+ false;
}
else {
RtsFlags.ParFlags.parGcLoadBalancingEnabled =
- rtsTrue;
+ true;
RtsFlags.ParFlags.parGcLoadBalancingGen
= strtol(rts_argv[arg]+3, (char **) NULL, 10);
}
@@ -1207,24 +1208,24 @@ error = rtsTrue;
threads = strtol(rts_argv[arg]+3, (char **) NULL, 10);
if (threads <= 0) {
errorBelch("-qn must be 1 or greater");
- error = rtsTrue;
+ error = true;
} else {
RtsFlags.ParFlags.parGcThreads = threads;
}
break;
}
case 'a':
- RtsFlags.ParFlags.setAffinity = rtsTrue;
+ RtsFlags.ParFlags.setAffinity = true;
break;
case 'm':
- RtsFlags.ParFlags.migrate = rtsFalse;
+ RtsFlags.ParFlags.migrate = false;
break;
case 'w':
// -qw was removed; accepted for backwards compat
break;
default:
errorBelch("unknown RTS option: %s",rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
}
) break;
@@ -1238,7 +1239,7 @@ error = rtsTrue;
= strtol(rts_argv[arg]+2, (char **) NULL, 10);
if (RtsFlags.ParFlags.maxLocalSparks <= 0) {
errorBelch("bad value for -e");
- error = rtsTrue;
+ error = true;
}
}
) break;
@@ -1249,7 +1250,7 @@ error = rtsTrue;
OPTION_SAFE;
TICKY_BUILD_ONLY(
- RtsFlags.TickyFlags.showTickyStats = rtsTrue;
+ RtsFlags.TickyFlags.showTickyStats = true;
{
int r;
@@ -1259,7 +1260,7 @@ error = rtsTrue;
r = openStatsFile(rts_argv[arg]+2,
TICKY_FILENAME_FMT,
&RtsFlags.TickyFlags.tickyFile);
- if (r == -1) { error = rtsTrue; }
+ if (r == -1) { error = true; }
}
) break;
@@ -1289,7 +1290,7 @@ error = rtsTrue;
case '\0':
OPTION_SAFE;
errorBelch("incomplete RTS option: %s",rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
case 'b': /* heapBase in hex; undocumented */
@@ -1299,7 +1300,7 @@ error = rtsTrue;
= strToStgWord(rts_argv[arg]+3, (char **) NULL, 0);
} else {
errorBelch("-xb: requires argument");
- error = rtsTrue;
+ error = true;
}
break;
@@ -1311,7 +1312,7 @@ error = rtsTrue;
= strtol(rts_argv[arg]+3, (char **) NULL, 16);
if (RtsFlags.MiscFlags.linkerMemBase > 0x80000000) {
errorBelch("-xm: value must be <80000000");
- error = rtsTrue;
+ error = true;
}
} else {
RtsFlags.MiscFlags.linkerMemBase = 0;
@@ -1323,7 +1324,7 @@ error = rtsTrue;
an exception */
OPTION_SAFE;
PROFILING_BUILD_ONLY(
- RtsFlags.ProfFlags.showCCSOnException = rtsTrue;
+ RtsFlags.ProfFlags.showCCSOnException = true;
);
unchecked_arg_start++;
goto check_rest;
@@ -1331,7 +1332,7 @@ error = rtsTrue;
case 't': /* Include memory used by TSOs in a heap profile */
OPTION_SAFE;
PROFILING_BUILD_ONLY(
- RtsFlags.ProfFlags.includeTSOs = rtsTrue;
+ RtsFlags.ProfFlags.includeTSOs = true;
);
unchecked_arg_start++;
goto check_rest;
@@ -1351,7 +1352,7 @@ error = rtsTrue;
default:
OPTION_SAFE;
errorBelch("unknown RTS option: %s",rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
}
break; /* defensive programming */
@@ -1367,7 +1368,7 @@ error = rtsTrue;
errorBelch("flag -%c given an argument"
" when none was expected: %s",
rts_argv[arg][1],rts_argv[arg]);
- error = rtsTrue;
+ error = true;
}
break;
}
@@ -1376,7 +1377,7 @@ error = rtsTrue;
default:
OPTION_SAFE;
errorBelch("unknown RTS option: %s",rts_argv[arg]);
- error = rtsTrue;
+ error = true;
break;
}
@@ -1619,49 +1620,49 @@ static void read_debug_flags(const char* arg)
for (c = arg + 2; *c != '\0'; c++) {
switch (*c) {
case 's':
- RtsFlags.DebugFlags.scheduler = rtsTrue;
+ RtsFlags.DebugFlags.scheduler = true;
break;
case 'i':
- RtsFlags.DebugFlags.interpreter = rtsTrue;
+ RtsFlags.DebugFlags.interpreter = true;
break;
case 'w':
- RtsFlags.DebugFlags.weak = rtsTrue;
+ RtsFlags.DebugFlags.weak = true;
break;
case 'G':
- RtsFlags.DebugFlags.gccafs = rtsTrue;
+ RtsFlags.DebugFlags.gccafs = true;
break;
case 'g':
- RtsFlags.DebugFlags.gc = rtsTrue;
+ RtsFlags.DebugFlags.gc = true;
break;
case 'b':
- RtsFlags.DebugFlags.block_alloc = rtsTrue;
+ RtsFlags.DebugFlags.block_alloc = true;
break;
case 'S':
- RtsFlags.DebugFlags.sanity = rtsTrue;
+ RtsFlags.DebugFlags.sanity = true;
break;
case 't':
- RtsFlags.DebugFlags.stable = rtsTrue;
+ RtsFlags.DebugFlags.stable = true;
break;
case 'p':
- RtsFlags.DebugFlags.prof = rtsTrue;
+ RtsFlags.DebugFlags.prof = true;
break;
case 'l':
- RtsFlags.DebugFlags.linker = rtsTrue;
+ RtsFlags.DebugFlags.linker = true;
break;
case 'a':
- RtsFlags.DebugFlags.apply = rtsTrue;
+ RtsFlags.DebugFlags.apply = true;
break;
case 'm':
- RtsFlags.DebugFlags.stm = rtsTrue;
+ RtsFlags.DebugFlags.stm = true;
break;
case 'z':
- RtsFlags.DebugFlags.squeeze = rtsTrue;
+ RtsFlags.DebugFlags.squeeze = true;
break;
case 'c':
- RtsFlags.DebugFlags.hpc = rtsTrue;
+ RtsFlags.DebugFlags.hpc = true;
break;
case 'r':
- RtsFlags.DebugFlags.sparks = rtsTrue;
+ RtsFlags.DebugFlags.sparks = true;
break;
default:
bad_option( arg );
@@ -1675,11 +1676,11 @@ static void read_debug_flags(const char* arg)
#ifdef PROFILING
// Parse a "-h" flag, returning whether the parse resulted in an error.
-static rtsBool read_heap_profiling_flag(const char *arg)
+static bool read_heap_profiling_flag(const char *arg)
{
// Already parsed "-h"
- rtsBool error = rtsFalse;
+ bool error = false;
switch (arg[2]) {
case '\0':
case 'C':
@@ -1747,7 +1748,7 @@ static rtsBool read_heap_profiling_flag(const char *arg)
if (RtsFlags.ProfFlags.doHeapProfile != 0) {
errorBelch("multiple heap profile options");
- error = rtsTrue;
+ error = true;
break;
}
@@ -1782,7 +1783,7 @@ static rtsBool read_heap_profiling_flag(const char *arg)
default:
errorBelch("invalid heap profile option: %s", arg);
- error = rtsTrue;
+ error = true;
}
return error;
@@ -1793,7 +1794,7 @@ static rtsBool read_heap_profiling_flag(const char *arg)
static void read_trace_flags(const char *arg)
{
const char *c;
- rtsBool enabled = rtsTrue;
+ bool enabled = true;
/* Syntax for tracing flags currently looks like:
*
* -l To turn on eventlog tracing with default trace classes
@@ -1813,17 +1814,17 @@ static void read_trace_flags(const char *arg)
* Similarly, in future we might default to slightly less verbose
* scheduler or GC tracing.
*/
- RtsFlags.TraceFlags.scheduler = rtsTrue;
- RtsFlags.TraceFlags.gc = rtsTrue;
- RtsFlags.TraceFlags.sparks_sampled = rtsTrue;
- RtsFlags.TraceFlags.user = rtsTrue;
+ RtsFlags.TraceFlags.scheduler = true;
+ RtsFlags.TraceFlags.gc = true;
+ RtsFlags.TraceFlags.sparks_sampled = true;
+ RtsFlags.TraceFlags.user = true;
for (c = arg; *c != '\0'; c++) {
switch(*c) {
case '\0':
break;
case '-':
- enabled = rtsFalse;
+ enabled = false;
break;
case 'a':
RtsFlags.TraceFlags.scheduler = enabled;
@@ -1831,32 +1832,32 @@ static void read_trace_flags(const char *arg)
RtsFlags.TraceFlags.sparks_sampled = enabled;
RtsFlags.TraceFlags.sparks_full = enabled;
RtsFlags.TraceFlags.user = enabled;
- enabled = rtsTrue;
+ enabled = true;
break;
case 's':
RtsFlags.TraceFlags.scheduler = enabled;
- enabled = rtsTrue;
+ enabled = true;
break;
case 'p':
RtsFlags.TraceFlags.sparks_sampled = enabled;
- enabled = rtsTrue;
+ enabled = true;
break;
case 'f':
RtsFlags.TraceFlags.sparks_full = enabled;
- enabled = rtsTrue;
+ enabled = true;
break;
case 't':
RtsFlags.TraceFlags.timestamp = enabled;
- enabled = rtsTrue;
+ enabled = true;
break;
case 'g':
RtsFlags.TraceFlags.gc = enabled;
- enabled = rtsTrue;
+ enabled = true;
break;
case 'u':
RtsFlags.TraceFlags.user = enabled;
- enabled = rtsTrue;
+ enabled = true;
break;
default:
errorBelch("unknown trace option: %c",*c);
diff --git a/rts/RtsSignals.h b/rts/RtsSignals.h
index be21765dd6..de346b836d 100644
--- a/rts/RtsSignals.h
+++ b/rts/RtsSignals.h
@@ -19,7 +19,7 @@
#else
-#define signals_pending() (rtsFalse)
+#define signals_pending() (false)
#endif
diff --git a/rts/RtsStartup.c b/rts/RtsStartup.c
index 86a3228898..e4e8857989 100644
--- a/rts/RtsStartup.c
+++ b/rts/RtsStartup.c
@@ -292,7 +292,7 @@ hs_add_root(void (*init_root)(void) STG_UNUSED)
------------------------------------------------------------------------- */
static void
-hs_exit_(rtsBool wait_foreign)
+hs_exit_(bool wait_foreign)
{
uint32_t g, i;
@@ -346,7 +346,7 @@ hs_exit_(rtsBool wait_foreign)
* (e.g. pthread) may fire even after we exit, which may segfault as we've
* already freed the capabilities.
*/
- exitTimer(rtsTrue);
+ exitTimer(true);
// set the terminal settings back to what they were
#if !defined(mingw32_HOST_OS)
@@ -454,14 +454,14 @@ static void flushStdHandles(void)
void
hs_exit(void)
{
- hs_exit_(rtsTrue);
+ hs_exit_(true);
// be safe; this might be a DLL
}
void
hs_exit_nowait(void)
{
- hs_exit_(rtsFalse);
+ hs_exit_(false);
// do not wait for outstanding foreign calls to return; if they return in
// the future, they will block indefinitely.
}
@@ -478,7 +478,7 @@ shutdownHaskellAndExit(int n, int fastExit)
{
if (!fastExit) {
// we're about to exit(), no need to wait for foreign calls to return.
- hs_exit_(rtsFalse);
+ hs_exit_(false);
}
stg_exit(n);
@@ -491,7 +491,7 @@ void
shutdownHaskellAndSignal(int sig, int fastExit)
{
if (!fastExit) {
- hs_exit_(rtsFalse);
+ hs_exit_(false);
}
exitBySignal(sig);
diff --git a/rts/RtsUtils.c b/rts/RtsUtils.c
index 3a9742744f..31dc060244 100644
--- a/rts/RtsUtils.c
+++ b/rts/RtsUtils.c
@@ -155,7 +155,7 @@ heapOverflow(void)
rtsConfig.outOfHeapHook(0/*unknown request size*/,
(W_)RtsFlags.GcFlags.maxHeapSize * BLOCK_SIZE);
- heap_overflow = rtsTrue;
+ heap_overflow = true;
}
}
@@ -187,7 +187,7 @@ time_str(void)
-------------------------------------------------------------------------- */
char *
-showStgWord64(StgWord64 x, char *s, rtsBool with_commas)
+showStgWord64(StgWord64 x, char *s, bool with_commas)
{
if (with_commas) {
if (x < (StgWord64)1e3)
diff --git a/rts/RtsUtils.h b/rts/RtsUtils.h
index 2d5e5de02c..8f4e8066f2 100644
--- a/rts/RtsUtils.h
+++ b/rts/RtsUtils.h
@@ -37,7 +37,7 @@ void stgFree(void* p);
void heapOverflow(void);
char *time_str(void);
-char *showStgWord64(StgWord64, char *, rtsBool);
+char *showStgWord64(StgWord64, char *, bool);
#ifdef DEBUG
void heapCheckFail( void );
diff --git a/rts/STM.c b/rts/STM.c
index d5c2713a6f..0e09d7da19 100644
--- a/rts/STM.c
+++ b/rts/STM.c
@@ -63,8 +63,8 @@
* it contained.
*
* cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
- * contains a specified value. Return TRUE if this succeeds,
- * FALSE otherwise.
+ * contains a specified value. Return true if this succeeds,
+ * false otherwise.
*
* unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
* storing a specified value in place of the lock entry.
@@ -96,9 +96,6 @@
#include <stdio.h>
-#define TRUE 1
-#define FALSE 0
-
// ACQ_ASSERT is used for assertions which are only required for
// THREADED_RTS builds with fine-grained locking.
@@ -112,31 +109,27 @@
/*......................................................................*/
-// If SHAKE is defined then validation will sometimes spuriously fail. They help test
-// unusual code paths if genuine contention is rare
-
#define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
+// If SHAKE is defined then validation will sometimes spuriously fail. They help test
+// unusual code paths if genuine contention is rare
#ifdef SHAKE
-static const int do_shake = TRUE;
-#else
-static const int do_shake = FALSE;
-#endif
static int shake_ctr = 0;
static int shake_lim = 1;
static int shake(void) {
- if (do_shake) {
if (((shake_ctr++) % shake_lim) == 0) {
shake_ctr = 1;
shake_lim ++;
- return TRUE;
+ return true;
}
- return FALSE;
- } else {
- return FALSE;
- }
+ return false;
}
+#else
+static int shake(void) {
+ return false;
+}
+#endif
/*......................................................................*/
@@ -158,7 +151,7 @@ static int shake(void) {
__limit = TREC_CHUNK_NUM_ENTRIES; \
} \
exit_for_each: \
- if (FALSE) goto exit_for_each; \
+ if (false) goto exit_for_each; \
} while (0)
#define BREAK_FOR_EACH goto exit_for_each
@@ -179,7 +172,7 @@ static int shake(void) {
#if defined(STM_UNIPROC)
#undef IF_STM_UNIPROC
#define IF_STM_UNIPROC(__X) do { __X } while (0)
-static const StgBool config_use_read_phase = FALSE;
+static const StgBool config_use_read_phase = false;
static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
TRACE("%p : lock_stm()", trec);
@@ -221,7 +214,7 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
// Nothing -- uniproc
- return TRUE;
+ return true;
}
static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
@@ -233,7 +226,7 @@ static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
#undef IF_STM_CG_LOCK
#define IF_STM_CG_LOCK(__X) do { __X } while (0)
-static const StgBool config_use_read_phase = FALSE;
+static const StgBool config_use_read_phase = false;
static volatile StgTRecHeader *smp_locked = NULL;
static void lock_stm(StgTRecHeader *trec) {
@@ -282,7 +275,7 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
// Nothing -- protected by STM lock
- return TRUE;
+ return true;
}
static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
@@ -294,7 +287,7 @@ static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
#undef IF_STM_FG_LOCKS
#define IF_STM_FG_LOCKS(__X) do { __X } while (0)
-static const StgBool config_use_read_phase = TRUE;
+static const StgBool config_use_read_phase = true;
static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
TRACE("%p : lock_stm()", trec);
@@ -640,7 +633,7 @@ static void remove_watch_queue_entries_for_trec(Capability *cap,
dirty_TVAR(cap,s); // we modified first_watch_queue_entry
}
free_stg_tvar_watch_queue(cap, q);
- unlock_tvar(cap, trec, s, saw, FALSE);
+ unlock_tvar(cap, trec, s, saw, false);
});
}
@@ -679,16 +672,15 @@ static void merge_update_into(Capability *cap,
StgTRecHeader *t,
StgTVar *tvar,
StgClosure *expected_value,
- StgClosure *new_value) {
- int found;
-
+ StgClosure *new_value)
+{
// Look for an entry in this trec
- found = FALSE;
+ bool found = false;
FOR_EACH_ENTRY(t, e, {
StgTVar *s;
s = e -> tvar;
if (s == tvar) {
- found = TRUE;
+ found = true;
if (e -> expected_value != expected_value) {
// Must abort if the two entries start from different values
TRACE("%p : update entries inconsistent at %p (%p vs %p)",
@@ -717,10 +709,8 @@ static void merge_read_into(Capability *cap,
StgTVar *tvar,
StgClosure *expected_value)
{
- int found;
StgTRecHeader *t;
-
- found = FALSE;
+ bool found = false;
//
// See #7493
@@ -747,7 +737,7 @@ static void merge_read_into(Capability *cap,
{
FOR_EACH_ENTRY(t, e, {
if (e -> tvar == tvar) {
- found = TRUE;
+ found = true;
if (e -> expected_value != expected_value) {
// Must abort if the two entries start from different values
TRACE("%p : read entries inconsistent at %p (%p vs %p)",
@@ -808,7 +798,7 @@ static void revert_ownership(Capability *cap STG_UNUSED,
StgTVar *s;
s = e -> tvar;
if (tvar_is_locked(s, trec)) {
- unlock_tvar(cap, trec, s, e -> expected_value, TRUE);
+ unlock_tvar(cap, trec, s, e -> expected_value, true);
}
}
});
@@ -838,7 +828,7 @@ static StgBool validate_and_acquire_ownership (Capability *cap,
if (shake()) {
TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
- return FALSE;
+ return false;
}
ASSERT((trec -> state == TREC_ACTIVE) ||
@@ -853,7 +843,7 @@ static StgBool validate_and_acquire_ownership (Capability *cap,
TRACE("%p : trying to acquire %p", trec, s);
if (!cond_lock_tvar(trec, s, e -> expected_value)) {
TRACE("%p : failed to acquire %p", trec, s);
- result = FALSE;
+ result = false;
BREAK_FOR_EACH;
}
} else {
@@ -862,13 +852,13 @@ static StgBool validate_and_acquire_ownership (Capability *cap,
TRACE("%p : will need to check %p", trec, s);
if (s -> current_value != e -> expected_value) {
TRACE("%p : doesn't match", trec);
- result = FALSE;
+ result = false;
BREAK_FOR_EACH;
}
e -> num_updates = s -> num_updates;
if (s -> current_value != e -> expected_value) {
TRACE("%p : doesn't match (race)", trec);
- result = FALSE;
+ result = false;
BREAK_FOR_EACH;
} else {
TRACE("%p : need to check version %ld", trec, e -> num_updates);
@@ -897,7 +887,7 @@ static StgBool validate_and_acquire_ownership (Capability *cap,
// this kind of algorithm.
static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
- StgBool result = TRUE;
+ StgBool result = true;
ASSERT(config_use_read_phase);
IF_STM_FG_LOCKS({
@@ -913,7 +903,7 @@ static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
if (s -> current_value != e -> expected_value ||
s -> num_updates != e -> num_updates) {
TRACE("%p : mismatch", trec);
- result = FALSE;
+ result = false;
BREAK_FOR_EACH;
}
}
@@ -950,14 +940,14 @@ void stmPreGCHook (Capability *cap) {
static volatile StgInt64 max_commits = 0;
#if defined(THREADED_RTS)
-static volatile StgWord token_locked = FALSE;
+static volatile StgWord token_locked = false;
static void getTokenBatch(Capability *cap) {
- while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
+ while (cas((void *)&token_locked, false, true) == true) { /* nothing */ }
max_commits += TOKEN_BATCH_SIZE;
TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
cap -> transaction_tokens = TOKEN_BATCH_SIZE;
- token_locked = FALSE;
+ token_locked = false;
}
static void getToken(Capability *cap) {
@@ -1069,7 +1059,6 @@ void stmCondemnTransaction(Capability *cap,
StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
StgTRecHeader *t;
- StgBool result;
TRACE("%p : stmValidateNestOfTransactions", trec);
ASSERT(trec != NO_TREC);
@@ -1080,9 +1069,9 @@ StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
lock_stm(trec);
t = trec;
- result = TRUE;
+ StgBool result = true;
while (t != NO_TREC) {
- result &= validate_and_acquire_ownership(cap, t, TRUE, FALSE);
+ result &= validate_and_acquire_ownership(cap, t, true, false);
t = t -> enclosing_trec;
}
@@ -1136,7 +1125,7 @@ static void disconnect_invariant(Capability *cap,
FOR_EACH_ENTRY(last_execution, e, {
StgTVar *s = e -> tvar;
StgTVarWatchQueue *q = s -> first_watch_queue_entry;
- DEBUG_ONLY( StgBool found = FALSE );
+ DEBUG_ONLY( StgBool found = false );
TRACE(" looking for trec on tvar=%p", s);
for (q = s -> first_watch_queue_entry;
q != END_STM_WATCH_QUEUE;
@@ -1158,7 +1147,7 @@ static void disconnect_invariant(Capability *cap,
}
TRACE(" found it in watch queue entry %p", q);
free_stg_tvar_watch_queue(cap, q);
- DEBUG_ONLY( found = TRUE );
+ DEBUG_ONLY( found = true );
break;
}
}
@@ -1273,7 +1262,7 @@ StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *
q != END_STM_WATCH_QUEUE;
q = q -> next_queue_entry) {
if (watcher_is_invariant(q)) {
- StgBool found = FALSE;
+ StgBool found = false;
StgInvariantCheckQueue *q2;
TRACE("%p : Touching invariant %p", trec, q -> closure);
for (q2 = trec -> invariants_to_check;
@@ -1281,7 +1270,7 @@ StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *
q2 = q2 -> next_queue_entry) {
if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
TRACE("%p : Already found %p", trec, q -> closure);
- found = TRUE;
+ found = true;
break;
}
}
@@ -1297,7 +1286,7 @@ StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *
}
}
- unlock_tvar(cap, trec, s, old, FALSE);
+ unlock_tvar(cap, trec, s, old, false);
}
}
c = c -> prev_chunk;
@@ -1315,7 +1304,6 @@ StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *
/*......................................................................*/
StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
- int result;
StgInt64 max_commits_at_start = max_commits;
StgBool touched_invariants;
StgBool use_read_phase;
@@ -1386,7 +1374,7 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
use_read_phase = ((config_use_read_phase) && (!touched_invariants));
- result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), TRUE);
+ bool result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), true);
if (result) {
// We now know that all the updated locations hold their expected values.
ASSERT(trec -> state == TREC_ACTIVE);
@@ -1402,7 +1390,7 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
(n_capabilities * TOKEN_BATCH_SIZE));
if (((max_concurrent_commits >> 32) > 0) || shake()) {
- result = FALSE;
+ result = false;
}
}
@@ -1446,12 +1434,12 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
IF_STM_FG_LOCKS({
s -> num_updates ++;
});
- unlock_tvar(cap, trec, s, e -> new_value, TRUE);
+ unlock_tvar(cap, trec, s, e -> new_value, true);
}
ACQ_ASSERT(!tvar_is_locked(s, trec));
});
} else {
- revert_ownership(cap, trec, FALSE);
+ revert_ownership(cap, trec, false);
}
}
@@ -1468,7 +1456,6 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
StgTRecHeader *et;
- int result;
ASSERT(trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
ASSERT((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
@@ -1476,7 +1463,7 @@ StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
lock_stm(trec);
et = trec -> enclosing_trec;
- result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), TRUE);
+ bool result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), true);
if (result) {
// We now know that all the updated locations hold their expected values.
@@ -1497,13 +1484,13 @@ StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
StgTVar *s;
s = e -> tvar;
if (entry_is_update(e)) {
- unlock_tvar(cap, trec, s, e -> expected_value, FALSE);
+ unlock_tvar(cap, trec, s, e -> expected_value, false);
}
merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
});
} else {
- revert_ownership(cap, trec, FALSE);
+ revert_ownership(cap, trec, false);
}
}
@@ -1519,7 +1506,6 @@ StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
/*......................................................................*/
StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
- int result;
TRACE("%p : stmWait(%p)", trec, tso);
ASSERT(trec != NO_TREC);
ASSERT(trec -> enclosing_trec == NO_TREC);
@@ -1527,7 +1513,7 @@ StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
(trec -> state == TREC_CONDEMNED));
lock_stm(trec);
- result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
+ bool result = validate_and_acquire_ownership(cap, trec, true, true);
if (result) {
// The transaction is valid so far so we can actually start waiting.
// (Otherwise the transaction was not valid and the thread will have to
@@ -1560,14 +1546,13 @@ StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
void
stmWaitUnlock(Capability *cap, StgTRecHeader *trec) {
- revert_ownership(cap, trec, TRUE);
+ revert_ownership(cap, trec, true);
unlock_stm(trec);
}
/*......................................................................*/
StgBool stmReWait(Capability *cap, StgTSO *tso) {
- int result;
StgTRecHeader *trec = tso->trec;
TRACE("%p : stmReWait", trec);
@@ -1577,14 +1562,14 @@ StgBool stmReWait(Capability *cap, StgTSO *tso) {
(trec -> state == TREC_CONDEMNED));
lock_stm(trec);
- result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
+ bool result = validate_and_acquire_ownership(cap, trec, true, true);
TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
if (result) {
// The transaction remains valid -- do nothing because it is already on
// the wait queues
ASSERT(trec -> state == TREC_WAITING);
park_tso(tso);
- revert_ownership(cap, trec, TRUE);
+ revert_ownership(cap, trec, true);
} else {
// The transcation has become invalid. We can now remove it from the wait
// queues.
diff --git a/rts/STM.h b/rts/STM.h
index ffec009577..6dfa20d0ee 100644
--- a/rts/STM.h
+++ b/rts/STM.h
@@ -92,7 +92,7 @@ void stmCondemnTransaction(Capability *cap, StgTRecHeader *trec);
it is nested, are still valid.
Note: the caller can assume that once stmValidateTransaction has
- returned FALSE for a given trec then that transaction will never
+ returned false for a given trec then that transaction will never
again be valid -- we rely on this in Schedule.c when kicking invalid
threads at GC (in case they are stuck looping)
*/
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 41c68bec43..2c862af848 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -76,7 +76,7 @@ StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table?
* enough space, and the runtime should proceed to shut itself down in
* an orderly fashion (emitting profiling info etc.)
*/
-rtsBool heap_overflow = rtsFalse;
+bool heap_overflow = false;
/* flag that tracks whether we have done any execution in this time slice.
* LOCK: currently none, perhaps we should lock (but needs to be
@@ -120,12 +120,12 @@ static void scheduleFindWork (Capability **pcap);
static void scheduleYield (Capability **pcap, Task *task);
#endif
#if defined(THREADED_RTS)
-static rtsBool requestSync (Capability **pcap, Task *task,
- PendingSync *sync_type, SyncType *prev_sync_type);
+static bool requestSync (Capability **pcap, Task *task,
+ PendingSync *sync_type, SyncType *prev_sync_type);
static void acquireAllCapabilities(Capability *cap, Task *task);
static void releaseAllCapabilities(uint32_t n, Capability *cap, Task *task);
static void startWorkerTasks (uint32_t from USED_IF_THREADS,
- uint32_t to USED_IF_THREADS);
+ uint32_t to USED_IF_THREADS);
#endif
static void scheduleStartSignalHandlers (Capability *cap);
static void scheduleCheckBlockedThreads (Capability *cap);
@@ -136,14 +136,14 @@ static void schedulePushWork(Capability *cap, Task *task);
static void scheduleActivateSpark(Capability *cap);
#endif
static void schedulePostRunThread(Capability *cap, StgTSO *t);
-static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
-static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
- uint32_t prev_what_next );
+static bool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
+static bool scheduleHandleYield( Capability *cap, StgTSO *t,
+ uint32_t prev_what_next );
static void scheduleHandleThreadBlocked( StgTSO *t );
-static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
- StgTSO *t );
-static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
-static void scheduleDoGC(Capability **pcap, Task *task, rtsBool force_major);
+static bool scheduleHandleThreadFinished( Capability *cap, Task *task,
+ StgTSO *t );
+static bool scheduleNeedHeapProfile(bool ready_to_gc);
+static void scheduleDoGC(Capability **pcap, Task *task, bool force_major);
static void deleteThread (Capability *cap, StgTSO *tso);
static void deleteAllThreads (Capability *cap);
@@ -173,9 +173,9 @@ schedule (Capability *initialCapability, Task *task)
Capability *cap;
StgThreadReturnCode ret;
uint32_t prev_what_next;
- rtsBool ready_to_gc;
+ bool ready_to_gc;
#if defined(THREADED_RTS)
- rtsBool first = rtsTrue;
+ bool first = true;
#endif
cap = initialCapability;
@@ -246,7 +246,7 @@ schedule (Capability *initialCapability, Task *task)
case SCHED_INTERRUPTING:
debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
/* scheduleDoGC() deletes all the threads */
- scheduleDoGC(&cap,task,rtsTrue);
+ scheduleDoGC(&cap,task,true);
// after scheduleDoGC(), we must be shutting down. Either some
// other Capability did the final GC, or we did it above,
@@ -292,7 +292,7 @@ schedule (Capability *initialCapability, Task *task)
// // don't yield the first time, we want a chance to run this
// // thread for a bit, even if there are others banging at the
// // door.
- // first = rtsFalse;
+ // first = false;
// ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
}
@@ -412,7 +412,7 @@ run_thread:
// reset the interrupt flag before running Haskell code
cap->interrupt = 0;
- cap->in_haskell = rtsTrue;
+ cap->in_haskell = true;
cap->idle = 0;
dirty_TSO(cap,t);
@@ -470,7 +470,7 @@ run_thread:
barf("schedule: invalid what_next field");
}
- cap->in_haskell = rtsFalse;
+ cap->in_haskell = false;
// The TSO might have moved, eg. if it re-entered the RTS and a GC
// happened. So find the new location:
@@ -514,7 +514,7 @@ run_thread:
schedulePostRunThread(cap,t);
- ready_to_gc = rtsFalse;
+ ready_to_gc = false;
switch (ret) {
case HeapOverflow:
@@ -549,7 +549,7 @@ run_thread:
}
if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
- scheduleDoGC(&cap,task,rtsFalse);
+ scheduleDoGC(&cap,task,false);
}
} /* end of while() */
}
@@ -621,8 +621,8 @@ scheduleFindWork (Capability **pcap)
}
#if defined(THREADED_RTS)
-STATIC_INLINE rtsBool
-shouldYieldCapability (Capability *cap, Task *task, rtsBool didGcLast)
+STATIC_INLINE bool
+shouldYieldCapability (Capability *cap, Task *task, bool didGcLast)
{
// we need to yield this capability to someone else if..
// - another thread is initiating a GC, and we didn't just do a GC
@@ -660,11 +660,11 @@ static void
scheduleYield (Capability **pcap, Task *task)
{
Capability *cap = *pcap;
- int didGcLast = rtsFalse;
+ bool didGcLast = false;
// if we have work, and we don't need to give up the Capability, continue.
//
- if (!shouldYieldCapability(cap,task,rtsFalse) &&
+ if (!shouldYieldCapability(cap,task,false) &&
(!emptyRunQueue(cap) ||
!emptyInbox(cap) ||
sched_state >= SCHED_INTERRUPTING)) {
@@ -919,9 +919,9 @@ scheduleDetectDeadlock (Capability **pcap, Task *task)
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
- scheduleDoGC (pcap, task, rtsTrue/*force major GC*/);
+ scheduleDoGC (pcap, task, true/*force major GC*/);
cap = *pcap;
- // when force_major == rtsTrue. scheduleDoGC sets
+ // when force_major == true. scheduleDoGC sets
// recent_activity to ACTIVITY_DONE_GC and turns off the timer
// signal.
@@ -989,7 +989,7 @@ scheduleProcessInbox (Capability **pcap USED_IF_THREADS)
while (!emptyInbox(cap)) {
if (cap->r.rCurrentNursery->link == NULL ||
g0->n_new_large_words >= large_alloc_lim) {
- scheduleDoGC(pcap, cap->running_task, rtsFalse);
+ scheduleDoGC(pcap, cap->running_task, false);
cap = *pcap;
}
@@ -1074,7 +1074,7 @@ schedulePostRunThread (Capability *cap, StgTSO *t)
// ATOMICALLY_FRAME, aborting the (nested)
// transaction, and saving the stack of any
// partially-evaluated thunks on the heap.
- throwToSingleThreaded_(cap, t, NULL, rtsTrue);
+ throwToSingleThreaded_(cap, t, NULL, true);
// ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
}
@@ -1102,7 +1102,7 @@ schedulePostRunThread (Capability *cap, StgTSO *t)
* Handle a thread that returned to the scheduler with ThreadHeapOverflow
* -------------------------------------------------------------------------- */
-static rtsBool
+static bool
scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
{
if (cap->r.rHpLim == NULL || cap->context_switch) {
@@ -1173,24 +1173,24 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
// run queue before us and steal the large block, but in that
// case the thread will just end up requesting another large
// block.
- return rtsFalse; /* not actually GC'ing */
+ return false; /* not actually GC'ing */
}
}
// if we got here because we exceeded large_alloc_lim, then
// proceed straight to GC.
if (g0->n_new_large_words >= large_alloc_lim) {
- return rtsTrue;
+ return true;
}
// Otherwise, we just ran out of space in the current nursery.
// Grab another nursery if we can.
if (getNewNursery(cap)) {
debugTrace(DEBUG_sched, "thread %ld got a new nursery", t->id);
- return rtsFalse;
+ return false;
}
- return rtsTrue;
+ return true;
/* actual GC is done at the end of the while loop in schedule() */
}
@@ -1198,7 +1198,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
* Handle a thread that returned to the scheduler with ThreadYielding
* -------------------------------------------------------------------------- */
-static rtsBool
+static bool
scheduleHandleYield( Capability *cap, StgTSO *t, uint32_t prev_what_next )
{
/* put the thread back on the run queue. Then, if we're ready to
@@ -1216,7 +1216,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, uint32_t prev_what_next )
debugTrace(DEBUG_sched,
"--<< thread %ld (%s) stopped to switch evaluators",
(long)t->id, what_next_strs[t->what_next]);
- return rtsTrue;
+ return true;
}
// Reset the context switch flag. We don't do this just before
@@ -1236,7 +1236,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, uint32_t prev_what_next )
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
- return rtsFalse;
+ return false;
}
/* -----------------------------------------------------------------------------
@@ -1270,7 +1270,7 @@ scheduleHandleThreadBlocked( StgTSO *t
* Handle a thread that returned to the scheduler with ThreadFinished
* -------------------------------------------------------------------------- */
-static rtsBool
+static bool
scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
{
/* Need to check whether this was a main thread, and if so,
@@ -1305,7 +1305,7 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
// this thread and its return value (it gets dropped from the
// step->threads list so there's no other way to find it).
appendToRunQueue(cap,t);
- return rtsFalse;
+ return false;
#else
// this cannot happen in the threaded RTS, because a
// bound thread can only be run by the appropriate Task.
@@ -1349,27 +1349,27 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
t->bound = NULL;
task->incall->tso = NULL;
- return rtsTrue; // tells schedule() to return
+ return true; // tells schedule() to return
}
- return rtsFalse;
+ return false;
}
/* -----------------------------------------------------------------------------
* Perform a heap census
* -------------------------------------------------------------------------- */
-static rtsBool
-scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
+static bool
+scheduleNeedHeapProfile( bool ready_to_gc STG_UNUSED )
{
// When we have +RTS -i0 and we're heap profiling, do a census at
// every GC. This lets us get repeatable runs for debugging.
if (performHeapProfile ||
(RtsFlags.ProfFlags.heapProfileInterval==0 &&
RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
- return rtsTrue;
+ return true;
} else {
- return rtsFalse;
+ return false;
}
}
@@ -1386,7 +1386,7 @@ scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
#if defined(THREADED_RTS)
static void stopAllCapabilities (Capability **pCap, Task *task)
{
- rtsBool was_syncing;
+ bool was_syncing;
SyncType prev_sync_type;
PendingSync sync = {
@@ -1413,14 +1413,14 @@ static void stopAllCapabilities (Capability **pCap, Task *task)
* has some special synchronisation requirements.
*
* Returns:
- * rtsFalse if we successfully got a sync
- * rtsTrue if there was another sync request in progress,
+ * false if we successfully got a sync
+ * true if there was another sync request in progress,
* and we yielded to it. The value returned is the
* type of the other sync request.
* -------------------------------------------------------------------------- */
#if defined(THREADED_RTS)
-static rtsBool requestSync (
+static bool requestSync (
Capability **pcap, Task *task, PendingSync *new_sync,
SyncType *prev_sync_type)
{
@@ -1440,16 +1440,16 @@ static rtsBool requestSync (
debugTrace(DEBUG_sched, "someone else is trying to sync (%d)...",
sync->type);
ASSERT(*pcap);
- yieldCapability(pcap,task,rtsTrue);
+ yieldCapability(pcap,task,true);
sync = pending_sync;
} while (sync != NULL);
// NOTE: task->cap might have changed now
- return rtsTrue;
+ return true;
}
else
{
- return rtsFalse;
+ return false;
}
}
#endif
@@ -1521,12 +1521,12 @@ static void releaseAllCapabilities(uint32_t n, Capability *cap, Task *task)
static void
scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
- rtsBool force_major)
+ bool force_major)
{
Capability *cap = *pcap;
- rtsBool heap_census;
+ bool heap_census;
uint32_t collect_gen;
- rtsBool major_gc;
+ bool major_gc;
#ifdef THREADED_RTS
uint32_t gc_type;
uint32_t i;
@@ -1534,7 +1534,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
uint32_t n_gc_threads;
uint32_t n_idle_caps = 0, n_failed_trygrab_idles = 0;
StgTSO *tso;
- rtsBool *idle_cap;
+ bool *idle_cap;
// idle_cap is an array (allocated later) of size n_capabilities, where
// idle_cap[i] is rtsTrue if capability i will be idle during this GC
// cycle.
@@ -1547,7 +1547,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
return;
}
- heap_census = scheduleNeedHeapProfile(rtsTrue);
+ heap_census = scheduleNeedHeapProfile(true);
// Figure out which generation we are collecting, so that we can
// decide whether this is a parallel GC or not.
@@ -1582,7 +1582,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
{
SyncType prev_sync = 0;
- rtsBool was_syncing;
+ bool was_syncing;
do {
// If -qn is not set and we have more capabilities than cores, set
// the number of GC threads to #cores. We do this here rather than
@@ -1610,9 +1610,9 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
// We need an array of size n_capabilities, but since this may
// change each time around the loop we must allocate it afresh.
- idle_cap = (rtsBool *)stgMallocBytes(n_capabilities *
- sizeof(rtsBool),
- "scheduleDoGC");
+ idle_cap = (bool *)stgMallocBytes(n_capabilities *
+ sizeof(bool),
+ "scheduleDoGC");
sync.idle = idle_cap;
// When using +RTS -qn, we need some capabilities to be idle during
@@ -1621,21 +1621,21 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
uint32_t n_idle = need_idle;
for (i=0; i < n_capabilities; i++) {
if (capabilities[i]->disabled) {
- idle_cap[i] = rtsTrue;
+ idle_cap[i] = true;
} else if (n_idle > 0 &&
capabilities[i]->running_task == NULL) {
debugTrace(DEBUG_sched, "asking for cap %d to be idle", i);
n_idle--;
- idle_cap[i] = rtsTrue;
+ idle_cap[i] = true;
} else {
- idle_cap[i] = rtsFalse;
+ idle_cap[i] = false;
}
}
// If we didn't find enough inactive capabilities, just pick some
// more to be idle.
for (i=0; n_idle > 0 && i < n_capabilities; i++) {
if (!idle_cap[i] && i != cap->no) {
- idle_cap[i] = rtsTrue;
+ idle_cap[i] = true;
n_idle--;
}
}
@@ -1854,7 +1854,7 @@ delete_threads_and_gc:
// The heap census itself is done during GarbageCollect().
if (heap_census) {
- performHeapProfile = rtsFalse;
+ performHeapProfile = false;
}
#if defined(THREADED_RTS)
@@ -1991,7 +1991,7 @@ forkProcess(HsStablePtr *entry
RELEASE_LOCK(&task->lock);
for (i=0; i < n_capabilities; i++) {
- releaseCapability_(capabilities[i],rtsFalse);
+ releaseCapability_(capabilities[i],false);
RELEASE_LOCK(&capabilities[i]->lock);
}
@@ -2185,7 +2185,7 @@ setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
// structures, the nursery, etc.
//
for (n = new_n_capabilities; n < enabled_capabilities; n++) {
- capabilities[n]->disabled = rtsTrue;
+ capabilities[n]->disabled = true;
traceCapDisable(capabilities[n]);
}
enabled_capabilities = new_n_capabilities;
@@ -2197,7 +2197,7 @@ setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
// enable any disabled capabilities, up to the required number
for (n = enabled_capabilities;
n < new_n_capabilities && n < n_capabilities; n++) {
- capabilities[n]->disabled = rtsFalse;
+ capabilities[n]->disabled = false;
traceCapEnable(capabilities[n]);
}
enabled_capabilities = n;
@@ -2337,7 +2337,7 @@ recoverSuspendedTask (Capability *cap, Task *task)
* ------------------------------------------------------------------------- */
void *
-suspendThread (StgRegTable *reg, rtsBool interruptible)
+suspendThread (StgRegTable *reg, bool interruptible)
{
Capability *cap;
int saved_errno;
@@ -2382,8 +2382,8 @@ suspendThread (StgRegTable *reg, rtsBool interruptible)
ACQUIRE_LOCK(&cap->lock);
suspendTask(cap,task);
- cap->in_haskell = rtsFalse;
- releaseCapability_(cap,rtsFalse);
+ cap->in_haskell = false;
+ releaseCapability_(cap,false);
RELEASE_LOCK(&cap->lock);
@@ -2442,7 +2442,7 @@ resumeThread (void *task_)
}
cap->r.rCurrentTSO = tso;
- cap->in_haskell = rtsTrue;
+ cap->in_haskell = true;
errno = saved_errno;
#if mingw32_HOST_OS
SetLastError(saved_winerror);
@@ -2550,7 +2550,7 @@ void scheduleWorker (Capability *cap, Task *task)
// Capability has been shut down.
//
ACQUIRE_LOCK(&cap->lock);
- releaseCapability_(cap,rtsFalse);
+ releaseCapability_(cap,false);
workerTaskStop(task);
RELEASE_LOCK(&cap->lock);
}
@@ -2626,7 +2626,7 @@ initScheduler(void)
}
void
-exitScheduler (rtsBool wait_foreign USED_IF_THREADS)
+exitScheduler (bool wait_foreign USED_IF_THREADS)
/* see Capability.c, shutdownCapability() */
{
Task *task = NULL;
@@ -2638,7 +2638,7 @@ exitScheduler (rtsBool wait_foreign USED_IF_THREADS)
sched_state = SCHED_INTERRUPTING;
Capability *cap = task->cap;
waitForCapability(&cap,task);
- scheduleDoGC(&cap,task,rtsTrue);
+ scheduleDoGC(&cap,task,true);
ASSERT(task->incall->tso == NULL);
releaseCapability(cap);
}
@@ -2693,7 +2693,7 @@ void markScheduler (evac_fn evac USED_IF_NOT_THREADS,
-------------------------------------------------------------------------- */
static void
-performGC_(rtsBool force_major)
+performGC_(bool force_major)
{
Task *task;
Capability *cap = NULL;
@@ -2714,13 +2714,13 @@ performGC_(rtsBool force_major)
void
performGC(void)
{
- performGC_(rtsFalse);
+ performGC_(false);
}
void
performMajorGC(void)
{
- performGC_(rtsTrue);
+ performGC_(true);
}
/* ---------------------------------------------------------------------------
diff --git a/rts/Schedule.h b/rts/Schedule.h
index a8d1fb8b76..1453af41ca 100644
--- a/rts/Schedule.h
+++ b/rts/Schedule.h
@@ -21,7 +21,7 @@
* Locks assumed : none
*/
void initScheduler (void);
-void exitScheduler (rtsBool wait_foreign);
+void exitScheduler (bool wait_foreign);
void freeScheduler (void);
void markScheduler (evac_fn evac, void *user);
@@ -101,7 +101,7 @@ extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
extern StgTSO *sleeping_queue;
#endif
-extern rtsBool heap_overflow;
+extern bool heap_overflow;
#if defined(THREADED_RTS)
extern Mutex sched_mutex;
@@ -208,13 +208,13 @@ appendToBlockedQueue(StgTSO *tso)
/* Check whether various thread queues are empty
*/
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
emptyQueue (StgTSO *q)
{
return (q == END_TSO_QUEUE);
}
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
emptyRunQueue(Capability *cap)
{
return cap->n_run_queue == 0;
@@ -233,7 +233,7 @@ truncateRunQueue(Capability *cap)
#define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
#endif
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
emptyThreadQueues(Capability *cap)
{
return emptyRunQueue(cap)
diff --git a/rts/Sparks.h b/rts/Sparks.h
index 98ee3c872d..fbbac08432 100644
--- a/rts/Sparks.h
+++ b/rts/Sparks.h
@@ -38,10 +38,10 @@ INLINE_HEADER StgClosure* reclaimSpark(SparkPool *pool);
// Returns True if the spark pool is empty (can give a false positive
// if the pool is almost empty).
-INLINE_HEADER rtsBool looksEmpty(SparkPool* deque);
+INLINE_HEADER bool looksEmpty(SparkPool* deque);
INLINE_HEADER StgClosure * tryStealSpark (SparkPool *pool);
-INLINE_HEADER rtsBool fizzledSpark (StgClosure *);
+INLINE_HEADER bool fizzledSpark (StgClosure *);
void freeSparkPool (SparkPool *pool);
void createSparkThread (Capability *cap);
@@ -60,7 +60,7 @@ INLINE_HEADER StgClosure* reclaimSpark(SparkPool *pool)
return popWSDeque(pool);
}
-INLINE_HEADER rtsBool looksEmpty(SparkPool* deque)
+INLINE_HEADER bool looksEmpty(SparkPool* deque)
{
return looksEmptyWSDeque(deque);
}
@@ -96,7 +96,7 @@ INLINE_HEADER StgClosure * tryStealSpark (SparkPool *pool)
// other pools before trying again.
}
-INLINE_HEADER rtsBool fizzledSpark (StgClosure *spark)
+INLINE_HEADER bool fizzledSpark (StgClosure *spark)
{
return (GET_CLOSURE_TAG(spark) != 0 || !closure_SHOULD_SPARK(spark));
}
diff --git a/rts/Stable.c b/rts/Stable.c
index 9f34072e61..85970c809a 100644
--- a/rts/Stable.c
+++ b/rts/Stable.c
@@ -588,7 +588,7 @@ gcStableTables( void )
* -------------------------------------------------------------------------- */
void
-updateStableTables(rtsBool full)
+updateStableTables(bool full)
{
if (full && addrToStableHash != NULL && 0 != keyCountHashTable(addrToStableHash)) {
freeHashTable(addrToStableHash,NULL);
diff --git a/rts/Stable.h b/rts/Stable.h
index 4786d477f3..679b4e8e41 100644
--- a/rts/Stable.h
+++ b/rts/Stable.h
@@ -39,7 +39,7 @@ void markStableTables ( evac_fn evac, void *user );
void threadStableTables ( evac_fn evac, void *user );
void gcStableTables ( void );
-void updateStableTables ( rtsBool full );
+void updateStableTables ( bool full );
void stableLock ( void );
void stableUnlock ( void );
diff --git a/rts/Stats.c b/rts/Stats.c
index d10738aadc..8fe9adf304 100644
--- a/rts/Stats.c
+++ b/rts/Stats.c
@@ -232,20 +232,11 @@ stat_startGCSync (gc_thread *gct)
Called at the beginning of each GC
-------------------------------------------------------------------------- */
-static uint32_t rub_bell = 0;
-
void
stat_startGC (Capability *cap, gc_thread *gct)
{
- uint32_t bell = RtsFlags.GcFlags.ringBell;
-
- if (bell) {
- if (bell > 1) {
- debugBelch(" GC ");
- rub_bell = 1;
- } else {
- debugBelch("\007");
- }
+ if (RtsFlags.GcFlags.ringBell) {
+ debugBelch("\007");
}
getProcessTimes(&gct->gc_start_cpu, &gct->gc_start_elapsed);
@@ -391,11 +382,6 @@ stat_endGC (Capability *cap, gc_thread *gct,
if (slop > max_slop) max_slop = slop;
}
-
- if (rub_bell) {
- debugBelch("\b\b\b \b\b\b");
- rub_bell = 0;
- }
}
/* -----------------------------------------------------------------------------
@@ -496,7 +482,7 @@ StgInt TOTAL_CALLS=1;
/* Report the value of a counter */
#define REPORT(counter) \
{ \
- showStgWord64(counter,temp,rtsTrue/*commas*/); \
+ showStgWord64(counter,temp,true/*commas*/); \
statsPrintf(" (" #counter ") : %s\n",temp); \
}
@@ -589,21 +575,21 @@ stat_exit (void)
if (RtsFlags.GcFlags.giveStats >= SUMMARY_GC_STATS) {
showStgWord64(GC_tot_alloc*sizeof(W_),
- temp, rtsTrue/*commas*/);
+ temp, true/*commas*/);
statsPrintf("%16s bytes allocated in the heap\n", temp);
showStgWord64(GC_tot_copied*sizeof(W_),
- temp, rtsTrue/*commas*/);
+ temp, true/*commas*/);
statsPrintf("%16s bytes copied during GC\n", temp);
if ( residency_samples > 0 ) {
showStgWord64(max_residency*sizeof(W_),
- temp, rtsTrue/*commas*/);
+ temp, true/*commas*/);
statsPrintf("%16s bytes maximum residency (%" FMT_Word " sample(s))\n",
temp, residency_samples);
}
- showStgWord64(max_slop*sizeof(W_), temp, rtsTrue/*commas*/);
+ showStgWord64(max_slop*sizeof(W_), temp, true/*commas*/);
statsPrintf("%16s bytes maximum slop\n", temp);
statsPrintf("%16" FMT_SizeT " MB total memory in use (%" FMT_SizeT " MB lost due to fragmentation)\n\n",
@@ -686,11 +672,11 @@ stat_exit (void)
#endif
if (mut_cpu == 0) {
- showStgWord64(0, temp, rtsTrue/*commas*/);
+ showStgWord64(0, temp, true/*commas*/);
} else {
showStgWord64(
(StgWord64)((GC_tot_alloc*sizeof(W_)) / TimeToSecondsDbl(mut_cpu)),
- temp, rtsTrue/*commas*/);
+ temp, true/*commas*/);
}
statsPrintf(" Alloc rate %s bytes per MUT second\n\n", temp);
@@ -852,7 +838,7 @@ extern HsInt64 getAllocations( void )
/* EZY: I'm not convinced I got all the casting right. */
-extern rtsBool getGCStatsEnabled( void )
+extern bool getGCStatsEnabled( void )
{
return RtsFlags.GcFlags.giveStats != NO_GC_STATS;
}
diff --git a/rts/Task.c b/rts/Task.c
index 8ce4eccdb1..b8df4d2215 100644
--- a/rts/Task.c
+++ b/rts/Task.c
@@ -36,7 +36,7 @@ uint32_t peakWorkerCount;
static int tasksInitialized = 0;
static void freeTask (Task *task);
-static Task * newTask (rtsBool);
+static Task * newTask (bool);
#if defined(THREADED_RTS)
Mutex all_tasks_mutex;
@@ -124,7 +124,7 @@ Task* getTask (void)
if (task != NULL) {
return task;
} else {
- task = newTask(rtsFalse);
+ task = newTask(false);
#if defined(THREADED_RTS)
task->id = osThreadId();
#endif
@@ -198,7 +198,7 @@ freeTask (Task *task)
}
static Task*
-newTask (rtsBool worker)
+newTask (bool worker)
{
Task *task;
@@ -207,8 +207,8 @@ newTask (rtsBool worker)
task->cap = NULL;
task->worker = worker;
- task->stopped = rtsTrue;
- task->running_finalizers = rtsFalse;
+ task->stopped = true;
+ task->running_finalizers = false;
task->n_spare_incalls = 0;
task->spare_incalls = NULL;
task->incall = NULL;
@@ -217,7 +217,7 @@ newTask (rtsBool worker)
#if defined(THREADED_RTS)
initCondition(&task->cond);
initMutex(&task->lock);
- task->wakeup = rtsFalse;
+ task->wakeup = false;
task->node = 0;
#endif
@@ -304,7 +304,7 @@ newBoundTask (void)
task = getTask();
- task->stopped = rtsFalse;
+ task->stopped = false;
newInCall(task);
@@ -327,7 +327,7 @@ boundTaskExiting (Task *task)
// call and then a callback, so it can transform into a bound
// Task for the duration of the callback.
if (task->incall == NULL) {
- task->stopped = rtsTrue;
+ task->stopped = true;
}
debugTrace(DEBUG_sched, "task exiting");
@@ -449,8 +449,8 @@ startWorkerTask (Capability *cap)
Task *task;
// A worker always gets a fresh Task structure.
- task = newTask(rtsTrue);
- task->stopped = rtsFalse;
+ task = newTask(true);
+ task->stopped = false;
// The lock here is to synchronise with taskStart(), to make sure
// that we have finished setting up the Task structure before the
diff --git a/rts/Task.h b/rts/Task.h
index 93234591ba..e5c21931a1 100644
--- a/rts/Task.h
+++ b/rts/Task.h
@@ -128,7 +128,7 @@ typedef struct Task_ {
// or just continue immediately. It's a workaround for the fact
// that signalling a condition variable doesn't do anything if the
// thread is already running, but we want it to be sticky.
- rtsBool wakeup;
+ bool wakeup;
#endif
// If the task owns a Capability, task->cap points to it. (occasionally a
@@ -149,12 +149,12 @@ typedef struct Task_ {
uint32_t n_spare_incalls;
struct InCall_ *spare_incalls;
- rtsBool worker; // == rtsTrue if this is a worker Task
- rtsBool stopped; // == rtsTrue between newBoundTask and
+ bool worker; // == true if this is a worker Task
+ bool stopped; // == true between newBoundTask and
// boundTaskExiting, or in a worker Task.
// So that we can detect when a finalizer illegally calls back into Haskell
- rtsBool running_finalizers;
+ bool running_finalizers;
// if >= 0, this Capability will be used for in-calls
int preferred_capability;
@@ -169,7 +169,7 @@ typedef struct Task_ {
} Task;
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
isBoundTask (Task *task)
{
return (task->incall->tso != NULL);
@@ -180,7 +180,7 @@ isBoundTask (Task *task)
// (b) it has not left and re-entered Haskell, in which case
// task->incall->prev_stack would be non-NULL.
//
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
isWorker (Task *task)
{
return (task->worker && task->incall->prev_stack == NULL);
diff --git a/rts/ThreadPaused.c b/rts/ThreadPaused.c
index e9b297bfbb..c270e69a51 100644
--- a/rts/ThreadPaused.c
+++ b/rts/ThreadPaused.c
@@ -200,7 +200,7 @@ threadPaused(Capability *cap, StgTSO *tso)
uint32_t words_to_squeeze = 0;
uint32_t weight = 0;
uint32_t weight_pending = 0;
- rtsBool prev_was_update_frame = rtsFalse;
+ bool prev_was_update_frame = false;
StgWord heuristic_says_squeeze;
// Check to see whether we have threads waiting to raise
@@ -300,7 +300,7 @@ threadPaused(Capability *cap, StgTSO *tso)
// And continue with threadPaused; there might be
// yet more computation to suspend.
frame = (StgClosure *)(tso->stackobj->sp + 2);
- prev_was_update_frame = rtsFalse;
+ prev_was_update_frame = false;
continue;
}
@@ -342,7 +342,7 @@ threadPaused(Capability *cap, StgTSO *tso)
weight += weight_pending;
weight_pending = 0;
}
- prev_was_update_frame = rtsTrue;
+ prev_was_update_frame = true;
break;
case UNDERFLOW_FRAME:
@@ -355,7 +355,7 @@ threadPaused(Capability *cap, StgTSO *tso)
uint32_t frame_size = stack_frame_sizeW(frame);
weight_pending += frame_size;
frame = (StgClosure *)((StgPtr)frame + frame_size);
- prev_was_update_frame = rtsFalse;
+ prev_was_update_frame = false;
}
}
}
@@ -373,7 +373,7 @@ end:
words_to_squeeze, weight,
heuristic_says_squeeze ? "YES" : "NO");
- if (RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
+ if (RtsFlags.GcFlags.squeezeUpdFrames == true &&
heuristic_says_squeeze) {
stackSqueeze(cap, tso, (StgPtr)frame);
tso->flags |= TSO_SQUEEZED;
diff --git a/rts/Threads.c b/rts/Threads.c
index 1782da6114..f5eb9d360c 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -194,7 +194,7 @@ void rts_disableThreadAllocationLimit(StgPtr tso)
Fails fatally if the TSO is not on the queue.
-------------------------------------------------------------------------- */
-rtsBool // returns True if we modified queue
+bool // returns true if we modified queue
removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
{
StgTSO *t, *prev;
@@ -205,33 +205,33 @@ removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
if (prev) {
setTSOLink(cap,prev,t->_link);
t->_link = END_TSO_QUEUE;
- return rtsFalse;
+ return false;
} else {
*queue = t->_link;
t->_link = END_TSO_QUEUE;
- return rtsTrue;
+ return true;
}
}
}
barf("removeThreadFromQueue: not found");
}
-rtsBool // returns True if we modified head or tail
+bool // returns true if we modified head or tail
removeThreadFromDeQueue (Capability *cap,
StgTSO **head, StgTSO **tail, StgTSO *tso)
{
StgTSO *t, *prev;
- rtsBool flag = rtsFalse;
+ bool flag = false;
prev = NULL;
for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
if (t == tso) {
if (prev) {
setTSOLink(cap,prev,t->_link);
- flag = rtsFalse;
+ flag = false;
} else {
*head = t->_link;
- flag = rtsTrue;
+ flag = true;
}
t->_link = END_TSO_QUEUE;
if (*tail == tso) {
@@ -240,7 +240,7 @@ removeThreadFromDeQueue (Capability *cap,
} else {
*tail = END_TSO_QUEUE;
}
- return rtsTrue;
+ return true;
} else {
return flag;
}
@@ -503,7 +503,7 @@ isThreadBound(StgTSO* tso USED_IF_THREADS)
#if defined(THREADED_RTS)
return (tso->bound != NULL);
#endif
- return rtsFalse;
+ return false;
}
/* -----------------------------------------------------------------------------
@@ -749,7 +749,7 @@ threadStackUnderflow (Capability *cap, StgTSO *tso)
NOTE: this should be kept in sync with stg_tryPutMVarzh in PrimOps.cmm
------------------------------------------------------------------------- */
-rtsBool performTryPutMVar(Capability *cap, StgMVar *mvar, StgClosure *value)
+bool performTryPutMVar(Capability *cap, StgMVar *mvar, StgClosure *value)
{
const StgInfoTable *info;
StgMVarTSOQueue *q;
@@ -761,7 +761,7 @@ rtsBool performTryPutMVar(Capability *cap, StgMVar *mvar, StgClosure *value)
#if defined(THREADED_RTS)
unlockClosure((StgClosure*)mvar, info);
#endif
- return rtsFalse;
+ return false;
}
q = mvar->head;
@@ -774,7 +774,7 @@ loop:
mvar->value = value;
unlockClosure((StgClosure*)mvar, &stg_MVAR_DIRTY_info);
- return rtsTrue;
+ return true;
}
if (q->header.info == &stg_IND_info ||
q->header.info == &stg_MSG_NULL_info) {
@@ -819,7 +819,7 @@ loop:
unlockClosure((StgClosure*)mvar, info);
- return rtsTrue;
+ return true;
}
/* ----------------------------------------------------------------------------
diff --git a/rts/Threads.h b/rts/Threads.h
index 4588008e28..98792f4b71 100644
--- a/rts/Threads.h
+++ b/rts/Threads.h
@@ -14,7 +14,7 @@
#define END_BLOCKED_EXCEPTIONS_QUEUE ((MessageThrowTo*)END_TSO_QUEUE)
StgTSO * unblockOne (Capability *cap, StgTSO *tso);
-StgTSO * unblockOne_ (Capability *cap, StgTSO *tso, rtsBool allow_migrate);
+StgTSO * unblockOne_ (Capability *cap, StgTSO *tso, bool allow_migrate);
void checkBlockingQueues (Capability *cap, StgTSO *tso);
void tryWakeupThread (Capability *cap, StgTSO *tso);
@@ -32,8 +32,8 @@ void wakeupThreadOnCapability (Capability *cap,
void updateThunk (Capability *cap, StgTSO *tso,
StgClosure *thunk, StgClosure *val);
-rtsBool removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso);
-rtsBool removeThreadFromDeQueue (Capability *cap, StgTSO **head, StgTSO **tail, StgTSO *tso);
+bool removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso);
+bool removeThreadFromDeQueue (Capability *cap, StgTSO **head, StgTSO **tail, StgTSO *tso);
StgBool isThreadBound (StgTSO* tso);
@@ -41,7 +41,7 @@ StgBool isThreadBound (StgTSO* tso);
void threadStackOverflow (Capability *cap, StgTSO *tso);
W_ threadStackUnderflow (Capability *cap, StgTSO *tso);
-rtsBool performTryPutMVar(Capability *cap, StgMVar *mvar, StgClosure *value);
+bool performTryPutMVar(Capability *cap, StgMVar *mvar, StgClosure *value);
#ifdef DEBUG
void printThreadBlockage (StgTSO *tso);
diff --git a/rts/Ticker.h b/rts/Ticker.h
index 685a79e5d2..c276ec6017 100644
--- a/rts/Ticker.h
+++ b/rts/Ticker.h
@@ -16,7 +16,7 @@ typedef void (*TickProc)(int);
void initTicker (Time interval, TickProc handle_tick);
void startTicker (void);
void stopTicker (void);
-void exitTicker (rtsBool wait);
+void exitTicker (bool wait);
#include "EndPrivate.h"
diff --git a/rts/Ticky.c b/rts/Ticky.c
index 44d49b6551..4587097980 100644
--- a/rts/Ticky.c
+++ b/rts/Ticky.c
@@ -261,7 +261,7 @@ PrintTickyInfo(void)
*
* This of course refers to the -ticky version that uses PERM_INDs to
* determine the number of closures entered 0/1/>1. KSW 1999-04. */
- COND_PR_CTR(ENT_PERM_IND_ctr,RtsFlags.GcFlags.squeezeUpdFrames == rtsFalse,"E!NT_PERM_IND_ctr requires +RTS -Z");
+ COND_PR_CTR(ENT_PERM_IND_ctr,RtsFlags.GcFlags.squeezeUpdFrames == false,"E!NT_PERM_IND_ctr requires +RTS -Z");
PR_CTR(ENT_AP_ctr);
PR_CTR(ENT_PAP_ctr);
@@ -334,10 +334,10 @@ PrintTickyInfo(void)
PR_CTR(UPD_NEW_IND_ctr);
/* see comment on ENT_PERM_IND_ctr */
- COND_PR_CTR(UPD_NEW_PERM_IND_ctr,RtsFlags.GcFlags.squeezeUpdFrames == rtsFalse,"U!PD_NEW_PERM_IND_ctr requires +RTS -Z");
+ COND_PR_CTR(UPD_NEW_PERM_IND_ctr,RtsFlags.GcFlags.squeezeUpdFrames == false,"U!PD_NEW_PERM_IND_ctr requires +RTS -Z");
PR_CTR(UPD_OLD_IND_ctr);
/* see comment on ENT_PERM_IND_ctr */
- COND_PR_CTR(UPD_OLD_PERM_IND_ctr,RtsFlags.GcFlags.squeezeUpdFrames == rtsFalse,"U!PD_OLD_PERM_IND_ctr requires +RTS -Z");
+ COND_PR_CTR(UPD_OLD_PERM_IND_ctr,RtsFlags.GcFlags.squeezeUpdFrames == false,"U!PD_OLD_PERM_IND_ctr requires +RTS -Z");
PR_CTR(GC_SEL_ABANDONED_ctr);
PR_CTR(GC_SEL_MINOR_ctr);
diff --git a/rts/Timer.c b/rts/Timer.c
index 9136c60f20..a8b0e20a5f 100644
--- a/rts/Timer.c
+++ b/rts/Timer.c
@@ -134,7 +134,7 @@ stopTimer(void)
}
void
-exitTimer (rtsBool wait)
+exitTimer (bool wait)
{
if (RtsFlags.MiscFlags.tickInterval != 0) {
exitTicker(wait);
diff --git a/rts/Timer.h b/rts/Timer.h
index b03ef0680f..daf0cc3d7f 100644
--- a/rts/Timer.h
+++ b/rts/Timer.h
@@ -10,6 +10,6 @@
#define TIMER_H
RTS_PRIVATE void initTimer (void);
-RTS_PRIVATE void exitTimer (rtsBool wait);
+RTS_PRIVATE void exitTimer (bool wait);
#endif /* TIMER_H */
diff --git a/rts/Trace.c b/rts/Trace.c
index 4eee0263b3..91680b7b8a 100644
--- a/rts/Trace.c
+++ b/rts/Trace.c
@@ -37,7 +37,7 @@ int TRACE_cap;
static Mutex trace_utx;
#endif
-static rtsBool eventlog_enabled;
+static bool eventlog_enabled;
/* ---------------------------------------------------------------------------
Starting up / shuttting down the tracing facilities
diff --git a/rts/WSDeque.c b/rts/WSDeque.c
index 45a0b10984..b9393b1839 100644
--- a/rts/WSDeque.c
+++ b/rts/WSDeque.c
@@ -233,7 +233,7 @@ stealWSDeque (WSDeque *q)
/* enqueue an element. Should always succeed by resizing the array
(not implemented yet, silently fails in that case). */
-rtsBool
+bool
pushWSDeque (WSDeque* q, void * elem)
{
StgWord t;
@@ -267,7 +267,7 @@ pushWSDeque (WSDeque* q, void * elem)
*/
#if defined(DISCARD_NEW)
ASSERT_WSDEQUE_INVARIANTS(q);
- return rtsFalse; // we didn't push anything
+ return false; // we didn't push anything
#else
/* could make room by incrementing the top position here. In
* this case, should use CASTOP. If this fails, someone else has
@@ -291,5 +291,5 @@ pushWSDeque (WSDeque* q, void * elem)
q->bottom = b + 1;
ASSERT_WSDEQUE_INVARIANTS(q);
- return rtsTrue;
+ return true;
}
diff --git a/rts/WSDeque.h b/rts/WSDeque.h
index 5a23746562..58c7a58cb4 100644
--- a/rts/WSDeque.h
+++ b/rts/WSDeque.h
@@ -76,7 +76,7 @@ void* popWSDeque (WSDeque *q);
// Push onto the "write" end of the pool. Return true if the push
// succeeded, or false if the deque is full.
-rtsBool pushWSDeque (WSDeque *q, void *elem);
+bool pushWSDeque (WSDeque *q, void *elem);
// Removes all elements from the deque
EXTERN_INLINE void discardElements (WSDeque *q);
@@ -93,7 +93,7 @@ void * stealWSDeque (WSDeque *q);
// "guesses" whether a deque is empty. Can return false negatives in
// presence of concurrent steal() calls, and false positives in
// presence of a concurrent pushBottom().
-EXTERN_INLINE rtsBool looksEmptyWSDeque (WSDeque* q);
+EXTERN_INLINE bool looksEmptyWSDeque (WSDeque* q);
EXTERN_INLINE long dequeElements (WSDeque *q);
@@ -110,7 +110,7 @@ dequeElements (WSDeque *q)
return ((long)b - (long)t);
}
-EXTERN_INLINE rtsBool
+EXTERN_INLINE bool
looksEmptyWSDeque (WSDeque *q)
{
return (dequeElements(q) <= 0);
diff --git a/rts/Weak.c b/rts/Weak.c
index 27854c91be..1974bb972e 100644
--- a/rts/Weak.c
+++ b/rts/Weak.c
@@ -39,7 +39,7 @@ runAllCFinalizers(StgWeak *list)
task = myTask();
if (task != NULL) {
- task->running_finalizers = rtsTrue;
+ task->running_finalizers = true;
}
for (w = list; w; w = w->link) {
@@ -56,7 +56,7 @@ runAllCFinalizers(StgWeak *list)
}
if (task != NULL) {
- task->running_finalizers = rtsFalse;
+ task->running_finalizers = false;
}
}
@@ -87,7 +87,7 @@ scheduleFinalizers(Capability *cap, StgWeak *list)
task = myTask();
if (task != NULL) {
- task->running_finalizers = rtsTrue;
+ task->running_finalizers = true;
}
// count number of finalizers, and kill all the weak pointers first...
@@ -116,7 +116,7 @@ scheduleFinalizers(Capability *cap, StgWeak *list)
}
if (task != NULL) {
- task->running_finalizers = rtsFalse;
+ task->running_finalizers = false;
}
// No finalizers to run?
diff --git a/rts/Weak.h b/rts/Weak.h
index fbdf18a861..d938979802 100644
--- a/rts/Weak.h
+++ b/rts/Weak.h
@@ -13,7 +13,7 @@
#include "BeginPrivate.h"
-extern rtsBool running_finalizers;
+extern bool running_finalizers;
extern StgWeak * dead_weak_ptr_list;
void runCFinalizers(StgCFinalizerList *list);
diff --git a/rts/hooks/OutOfHeap.c b/rts/hooks/OutOfHeap.c
index 5e68750d71..3058cdd856 100644
--- a/rts/hooks/OutOfHeap.c
+++ b/rts/hooks/OutOfHeap.c
@@ -20,7 +20,7 @@ OutOfHeapHook (W_ request_size, W_ heap_size) /* both sizes in bytes */
" bytes (%" FMT_Word " MB).",
heap_size, heap_size / (1024*1024));
- if (rtsConfig.rts_opts_suggestions == rtsTrue) {
+ if (rtsConfig.rts_opts_suggestions == true) {
if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
errorBelch("Use `+RTS -M<size>' to increase it.");
diff --git a/rts/hooks/StackOverflow.c b/rts/hooks/StackOverflow.c
index 602700ad77..e1a90097e9 100644
--- a/rts/hooks/StackOverflow.c
+++ b/rts/hooks/StackOverflow.c
@@ -17,7 +17,7 @@ StackOverflowHook (W_ stack_size) /* in bytes */
errorBelch("Stack space overflow: current size %" FMT_Word " bytes.",
stack_size);
- if (rtsConfig.rts_opts_suggestions == rtsTrue) {
+ if (rtsConfig.rts_opts_suggestions == true) {
if (rtsConfig.rts_opts_enabled == RtsOptsAll) {
errorBelch("Use `+RTS -Ksize -RTS' to increase it.");
} else {
diff --git a/rts/linker/Elf.c b/rts/linker/Elf.c
index 6b3e3ac3b3..4672a5bc5e 100644
--- a/rts/linker/Elf.c
+++ b/rts/linker/Elf.c
@@ -60,9 +60,6 @@
*/
#define X86_64_ELF_NONPIC_HACK 1
-#define FALSE 0
-#define TRUE 1
-
#if defined(sparc_HOST_ARCH)
# define ELF_TARGET_SPARC /* Used inside <elf.h> */
#elif defined(i386_HOST_ARCH)
@@ -591,7 +588,7 @@ ocVerifyImage_ELF ( ObjectCode* oc )
("Portable Formats Specification, Version 1.1"). */
static int getSectionKind_ELF( Elf_Shdr *hdr, int *is_bss )
{
- *is_bss = FALSE;
+ *is_bss = false;
if (hdr->sh_type == SHT_PROGBITS
&& (hdr->sh_flags & SHF_ALLOC) && (hdr->sh_flags & SHF_EXECINSTR)) {
@@ -620,7 +617,7 @@ static int getSectionKind_ELF( Elf_Shdr *hdr, int *is_bss )
if (hdr->sh_type == SHT_NOBITS
&& (hdr->sh_flags & SHF_ALLOC) && (hdr->sh_flags & SHF_WRITE)) {
/* .bss-style section */
- *is_bss = TRUE;
+ *is_bss = true;
return SECTIONKIND_RWDATA;
}
@@ -683,7 +680,7 @@ ocGetNames_ELF ( ObjectCode* oc )
}
for (i = 0; i < shnum; i++) {
- int is_bss = FALSE;
+ int is_bss = false;
SectionKind kind = getSectionKind_ELF(&shdr[i], &is_bss);
SectionAlloc alloc = SECTION_NOMEM;
void *start = NULL, *mapped_start = NULL;
@@ -750,7 +747,7 @@ ocGetNames_ELF ( ObjectCode* oc )
// ie we should use j = shdr[i].sh_info
for (j = 0; j < nent; j++) {
- char isLocal = FALSE; /* avoids uninit-var warning */
+ char isLocal = false; /* avoids uninit-var warning */
HsBool isWeak = HS_BOOL_FALSE;
SymbolAddr* ad = NULL;
SymbolName* nm = strtab + stab[j].st_name;
@@ -773,7 +770,7 @@ ocGetNames_ELF ( ObjectCode* oc )
address. Otherwise leave ad == NULL. */
if (shndx == SHN_COMMON) {
- isLocal = FALSE;
+ isLocal = false;
ad = stgCallocBytes(1, stab[j].st_size, "ocGetNames_ELF(COMMON)");
/*
debugBelch("COMMON symbol, size %d name %s\n",
@@ -813,8 +810,8 @@ ocGetNames_ELF ( ObjectCode* oc )
ad = (SymbolAddr*)((intptr_t)sections[secno].start +
(intptr_t)stab[j].st_value);
if (ELF_ST_BIND(stab[j].st_info)==STB_LOCAL) {
- isLocal = TRUE;
- isWeak = FALSE;
+ isLocal = true;
+ isWeak = false;
} else { /* STB_GLOBAL or STB_WEAK */
#ifdef ELF_FUNCTION_DESC
/* dlsym() and the initialisation table both give us function
@@ -825,7 +822,7 @@ ocGetNames_ELF ( ObjectCode* oc )
#endif
IF_DEBUG(linker,debugBelch( "addOTabName(GLOB): %10p %s %s\n",
ad, oc->fileName, nm ));
- isLocal = FALSE;
+ isLocal = false;
isWeak = (ELF_ST_BIND(stab[j].st_info)==STB_WEAK);
}
}
@@ -1627,7 +1624,7 @@ int ocRunInit_ELF( ObjectCode *oc )
// it here, please file a bug report if it affects you.
for (i = 0; i < elf_shnum(ehdr); i++) {
init_t *init_start, *init_end, *init;
- int is_bss = FALSE;
+ int is_bss = false;
SectionKind kind = getSectionKind_ELF(&shdr[i], &is_bss);
if (kind == SECTIONKIND_CODE_OR_RODATA
&& 0 == memcmp(".init", sh_strtab + shdr[i].sh_name, 5)) {
diff --git a/rts/linker/LoadArchive.c b/rts/linker/LoadArchive.c
index 04bd455e43..f07eff7955 100644
--- a/rts/linker/LoadArchive.c
+++ b/rts/linker/LoadArchive.c
@@ -429,7 +429,7 @@ static HsInt loadArchive_ (pathchar *path)
sprintf(archiveMemberName, "%" PATH_FMT "(%.*s)",
path, (int)thisFileNameSize, fileName);
- oc = mkOc(path, image, memberSize, rtsFalse, archiveMemberName
+ oc = mkOc(path, image, memberSize, false, archiveMemberName
, misalignment);
stgFree(archiveMemberName);
diff --git a/rts/linker/SymbolExtras.c b/rts/linker/SymbolExtras.c
index 1973f7b117..468b3a9081 100644
--- a/rts/linker/SymbolExtras.c
+++ b/rts/linker/SymbolExtras.c
@@ -59,7 +59,7 @@ int ocAllocateSymbolExtras( ObjectCode* oc, int count, int first )
munmap(oc->image, n);
}
oc->image = new;
- oc->imageMapped = rtsTrue;
+ oc->imageMapped = true;
oc->fileSize = n + (sizeof(SymbolExtra) * count);
oc->symbol_extras = (SymbolExtra *) (oc->image + n);
}
diff --git a/rts/posix/OSMem.c b/rts/posix/OSMem.c
index febeffbd98..5291745d53 100644
--- a/rts/posix/OSMem.c
+++ b/rts/posix/OSMem.c
@@ -130,10 +130,10 @@ my_mmap (void *addr, W_ size, int operation)
{
if(addr) // try to allocate at address
err = vm_allocate(mach_task_self(),(vm_address_t*) &ret,
- size, FALSE);
+ size, false);
if(!addr || err) // try to allocate anywhere
err = vm_allocate(mach_task_self(),(vm_address_t*) &ret,
- size, TRUE);
+ size, true);
}
if(err) {
@@ -145,7 +145,7 @@ my_mmap (void *addr, W_ size, int operation)
}
if(operation & MEM_COMMIT) {
- vm_protect(mach_task_self(), (vm_address_t)ret, size, FALSE,
+ vm_protect(mach_task_self(), (vm_address_t)ret, size, false,
VM_PROT_READ|VM_PROT_WRITE);
}
@@ -399,7 +399,7 @@ StgWord64 getPhysicalMemorySize (void)
return physMemSize;
}
-void setExecutable (void *p, W_ len, rtsBool exec)
+void setExecutable (void *p, W_ len, bool exec)
{
StgWord pageSize = getPageSize();
@@ -562,12 +562,12 @@ void osReleaseHeapMemory(void)
#endif
-rtsBool osNumaAvailable(void)
+bool osNumaAvailable(void)
{
#if HAVE_LIBNUMA
return (numa_available() != -1);
#else
- return rtsFalse;
+ return false;
#endif
}
diff --git a/rts/posix/OSThreads.c b/rts/posix/OSThreads.c
index a52fbe5d37..45f394208f 100644
--- a/rts/posix/OSThreads.c
+++ b/rts/posix/OSThreads.c
@@ -99,19 +99,19 @@ closeCondition( Condition* pCond )
return;
}
-rtsBool
+bool
broadcastCondition ( Condition* pCond )
{
return (pthread_cond_broadcast(pCond) == 0);
}
-rtsBool
+bool
signalCondition ( Condition* pCond )
{
return (pthread_cond_signal(pCond) == 0);
}
-rtsBool
+bool
waitCondition ( Condition* pCond, Mutex* pMut )
{
return (pthread_cond_wait(pCond,pMut) == 0);
@@ -150,12 +150,12 @@ osThreadId(void)
return pthread_self();
}
-rtsBool
+bool
osThreadIsAlive(OSThreadId id STG_UNUSED)
{
// no good way to implement this on POSIX, AFAICT. Returning true
// is safe.
- return rtsTrue;
+ return true;
}
void
diff --git a/rts/posix/Select.c b/rts/posix/Select.c
index bd9ddfa3ed..3d3b70b565 100644
--- a/rts/posix/Select.c
+++ b/rts/posix/Select.c
@@ -93,10 +93,10 @@ LowResTime getDelayTarget (HsInt us)
* if this is true, then our time has expired.
* (idea due to Andy Gill).
*/
-static rtsBool wakeUpSleepingThreads (LowResTime now)
+static bool wakeUpSleepingThreads (LowResTime now)
{
StgTSO *tso;
- rtsBool flag = rtsFalse;
+ bool flag = false;
while (sleeping_queue != END_TSO_QUEUE) {
tso = sleeping_queue;
@@ -110,7 +110,7 @@ static rtsBool wakeUpSleepingThreads (LowResTime now)
(unsigned long)tso->id));
// MainCapability: this code is !THREADED_RTS
pushOnRunQueue(&MainCapability,tso);
- flag = rtsTrue;
+ flag = true;
}
return flag;
}
@@ -217,13 +217,13 @@ static enum FdState fdPollWriteState (int fd)
*
*/
void
-awaitEvent(rtsBool wait)
+awaitEvent(bool wait)
{
StgTSO *tso, *prev, *next;
fd_set rfd,wfd;
int numFound;
int maxfd = -1;
- rtsBool seen_bad_fd = rtsFalse;
+ bool seen_bad_fd = false;
struct timeval tv, *ptv;
LowResTime now;
@@ -330,7 +330,7 @@ awaitEvent(rtsBool wait)
while ((numFound = select(maxfd+1, &rfd, &wfd, NULL, ptv)) < 0) {
if (errno != EINTR) {
if ( errno == EBADF ) {
- seen_bad_fd = rtsTrue;
+ seen_bad_fd = true;
break;
} else {
sysErrorBelch("select");
@@ -418,7 +418,7 @@ awaitEvent(rtsBool wait)
debugBelch("Killing blocked thread %lu on bad fd=%i\n",
(unsigned long)tso->id, fd));
raiseAsync(&MainCapability, tso,
- (StgClosure *)blockedOnBadFD_closure, rtsFalse, NULL);
+ (StgClosure *)blockedOnBadFD_closure, false, NULL);
break;
case RTS_FD_IS_READY:
IF_DEBUG(scheduler,
diff --git a/rts/posix/Signals.c b/rts/posix/Signals.c
index 97439c5120..80909f113f 100644
--- a/rts/posix/Signals.c
+++ b/rts/posix/Signals.c
@@ -331,7 +331,7 @@ unblockUserSignals(void)
sigprocmask(SIG_SETMASK, &savedSignals, NULL);
}
-rtsBool
+bool
anyUserHandlers(void)
{
return n_haskell_handlers != 0;
@@ -585,7 +585,7 @@ empty_handler (int sig STG_UNUSED)
-------------------------------------------------------------------------- */
static void sigtstp_handler(int sig);
-static void set_sigtstp_action (rtsBool handle);
+static void set_sigtstp_action (bool handle);
static void
sigtstp_handler (int sig STG_UNUSED)
@@ -612,7 +612,7 @@ sigtstp_handler (int sig STG_UNUSED)
}
static void
-set_sigtstp_action (rtsBool handle)
+set_sigtstp_action (bool handle)
{
struct sigaction sa;
if (handle) {
@@ -728,7 +728,7 @@ initDefaultHandlers(void)
sysErrorBelch("warning: failed to install SIGUSR2 handler");
}
- set_sigtstp_action(rtsTrue);
+ set_sigtstp_action(true);
}
void
@@ -749,7 +749,7 @@ resetDefaultHandlers(void)
sysErrorBelch("warning: failed to uninstall SIGPIPE handler");
}
- set_sigtstp_action(rtsFalse);
+ set_sigtstp_action(false);
}
#endif /* RTS_USER_SIGNALS */
diff --git a/rts/posix/Signals.h b/rts/posix/Signals.h
index bb9a7b58df..d281821400 100644
--- a/rts/posix/Signals.h
+++ b/rts/posix/Signals.h
@@ -17,7 +17,7 @@
#include "BeginPrivate.h"
-rtsBool anyUserHandlers(void);
+bool anyUserHandlers(void);
#if !defined(THREADED_RTS)
extern siginfo_t pending_handler_buf[];
diff --git a/rts/posix/itimer/Pthread.c b/rts/posix/itimer/Pthread.c
index 4f0d7509ec..3b31fe4103 100644
--- a/rts/posix/itimer/Pthread.c
+++ b/rts/posix/itimer/Pthread.c
@@ -198,7 +198,7 @@ stopTicker(void)
/* There may be at most one additional tick fired after a call to this */
void
-exitTicker (rtsBool wait)
+exitTicker (bool wait)
{
ASSERT(!exited);
exited = 1;
diff --git a/rts/posix/itimer/Setitimer.c b/rts/posix/itimer/Setitimer.c
index 30dfa211a7..bdf537d478 100644
--- a/rts/posix/itimer/Setitimer.c
+++ b/rts/posix/itimer/Setitimer.c
@@ -73,7 +73,7 @@ stopTicker(void)
}
void
-exitTicker (rtsBool wait STG_UNUSED)
+exitTicker (bool wait STG_UNUSED)
{
return;
}
diff --git a/rts/posix/itimer/TimerCreate.c b/rts/posix/itimer/TimerCreate.c
index a5fe8ce5f1..bee3108d38 100644
--- a/rts/posix/itimer/TimerCreate.c
+++ b/rts/posix/itimer/TimerCreate.c
@@ -76,7 +76,7 @@ stopTicker(void)
}
void
-exitTicker (rtsBool wait STG_UNUSED)
+exitTicker (bool wait STG_UNUSED)
{
// Before deleting the timer set the signal to ignore to avoid the
// possibility of the signal being delivered after the timer is deleted.
diff --git a/rts/sm/CNF.c b/rts/sm/CNF.c
index 5fa148d426..2eb7cd208a 100644
--- a/rts/sm/CNF.c
+++ b/rts/sm/CNF.c
@@ -391,7 +391,7 @@ unroll_memcpy(StgPtr to, StgPtr from, StgWord size)
*(to++) = *(from++);
}
-static rtsBool
+static bool
allocate_in_compact (StgCompactNFDataBlock *block, StgWord sizeW, StgPtr *at)
{
bdescr *bd;
@@ -401,16 +401,16 @@ allocate_in_compact (StgCompactNFDataBlock *block, StgWord sizeW, StgPtr *at)
bd = Bdescr((StgPtr)block);
top = bd->start + BLOCK_SIZE_W * bd->blocks;
if (bd->free + sizeW > top)
- return rtsFalse;
+ return false;
free = bd->free;
bd->free += sizeW;
*at = free;
- return rtsTrue;
+ return true;
}
-static rtsBool
+static bool
block_is_full (StgCompactNFDataBlock *block)
{
bdescr *bd;
@@ -431,7 +431,7 @@ block_is_full (StgCompactNFDataBlock *block)
return (bd->free + sizeW > top);
}
-static rtsBool
+static bool
allocate_loop (Capability *cap,
StgCompactNFData *str,
StgWord sizeW,
@@ -444,7 +444,7 @@ allocate_loop (Capability *cap,
retry:
if (str->nursery != NULL) {
if (allocate_in_compact(str->nursery, sizeW, at))
- return rtsTrue;
+ return true;
if (block_is_full (str->nursery)) {
str->nursery = str->nursery->next;
@@ -455,7 +455,7 @@ allocate_loop (Capability *cap,
block = str->nursery->next;
while (block != NULL) {
if (allocate_in_compact(block, sizeW, at))
- return rtsTrue;
+ return true;
block = block->next;
}
@@ -466,7 +466,7 @@ allocate_loop (Capability *cap,
if (next_size >= BLOCKS_PER_MBLOCK * BLOCK_SIZE)
next_size = BLOCKS_PER_MBLOCK * BLOCK_SIZE;
if (next_size < sizeW * sizeof(StgWord) + sizeof(StgCompactNFDataBlock))
- return rtsFalse;
+ return false;
block = compactAppendBlock(cap, str, next_size);
ASSERT (str->nursery != NULL);
@@ -505,13 +505,13 @@ copy_tag (Capability *cap,
*p = TAG_CLOSURE(tag, (StgClosure*)to);
}
-STATIC_INLINE rtsBool
+STATIC_INLINE bool
object_in_compact (StgCompactNFData *str, StgClosure *p)
{
bdescr *bd;
if (!HEAP_ALLOCED(p))
- return rtsFalse;
+ return false;
bd = Bdescr((P_)p);
return (bd->flags & BF_COMPACT) != 0 &&
@@ -694,7 +694,7 @@ scavenge_loop (Capability *cap,
}
#ifdef DEBUG
-static rtsBool
+static bool
objectIsWHNFData (StgClosure *what)
{
switch (get_itbl(what)->type) {
@@ -710,18 +710,18 @@ objectIsWHNFData (StgClosure *what)
case MUT_ARR_PTRS_FROZEN0:
case SMALL_MUT_ARR_PTRS_FROZEN:
case SMALL_MUT_ARR_PTRS_FROZEN0:
- return rtsTrue;
+ return true;
case IND:
case BLACKHOLE:
return objectIsWHNFData(UNTAG_CLOSURE(((StgInd*)what)->indirectee));
default:
- return rtsFalse;
+ return false;
}
}
-static rtsBool
+static bool
verify_mut_arr_ptrs (StgCompactNFData *str,
StgMutArrPtrs *a)
{
@@ -731,13 +731,13 @@ verify_mut_arr_ptrs (StgCompactNFData *str,
q = (StgPtr)&a->payload[a->ptrs];
for (; p < q; p++) {
if (!object_in_compact(str, UNTAG_CLOSURE(*(StgClosure**)p)))
- return rtsFalse;
+ return false;
}
- return rtsTrue;
+ return true;
}
-static rtsBool
+static bool
verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
{
bdescr *bd;
@@ -751,23 +751,23 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
q = (StgClosure*)p;
if (!LOOKS_LIKE_CLOSURE_PTR(q))
- return rtsFalse;
+ return false;
info = get_itbl(q);
switch (info->type) {
case CONSTR_1_0:
if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[0])))
- return rtsFalse;
+ return false;
case CONSTR_0_1:
p += sizeofW(StgClosure) + 1;
break;
case CONSTR_2_0:
if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[1])))
- return rtsFalse;
+ return false;
case CONSTR_1_1:
if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[0])))
- return rtsFalse;
+ return false;
case CONSTR_0_2:
p += sizeofW(StgClosure) + 2;
break;
@@ -780,7 +780,7 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
for (i = 0; i < info->layout.payload.ptrs; i++)
if (!object_in_compact(str, UNTAG_CLOSURE(q->payload[i])))
- return rtsFalse;
+ return false;
p += sizeofW(StgClosure) + info->layout.payload.ptrs +
info->layout.payload.nptrs;
@@ -794,7 +794,7 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
if (!verify_mut_arr_ptrs(str, (StgMutArrPtrs*)p))
- return rtsFalse;
+ return false;
p += mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
break;
@@ -806,7 +806,7 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
for (i = 0; i < arr->ptrs; i++)
if (!object_in_compact(str, UNTAG_CLOSURE(arr->payload[i])))
- return rtsFalse;
+ return false;
p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs;
break;
@@ -817,14 +817,14 @@ verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
break;
default:
- return rtsFalse;
+ return false;
}
}
- return rtsTrue;
+ return true;
}
-static rtsBool
+static bool
verify_consistency_loop (StgCompactNFData *str)
{
StgCompactNFDataBlock *block;
@@ -832,11 +832,11 @@ verify_consistency_loop (StgCompactNFData *str)
block = compactGetFirstBlock(str);
do {
if (!verify_consistency_block(str, block))
- return rtsFalse;
+ return false;
block = block->next;
} while (block && block->owner);
- return rtsTrue;
+ return true;
}
#endif
@@ -938,7 +938,7 @@ compactAllocateBlock(Capability *cap,
return block;
}
-STATIC_INLINE rtsBool
+STATIC_INLINE bool
any_needs_fixup(StgCompactNFDataBlock *block)
{
// ->next pointers are always valid, even if some blocks were
@@ -947,11 +947,11 @@ any_needs_fixup(StgCompactNFDataBlock *block)
do {
if (block->self != block)
- return rtsTrue;
+ return true;
block = block->next;
} while (block && block->owner);
- return rtsFalse;
+ return false;
}
#ifdef DEBUG
@@ -1029,7 +1029,7 @@ find_pointer(StgWord *fixup_table, uint32_t count, StgClosure *q)
return NULL;
}
-static rtsBool
+static bool
fixup_one_pointer(StgWord *fixup_table, uint32_t count, StgClosure **p)
{
StgWord tag;
@@ -1042,17 +1042,17 @@ fixup_one_pointer(StgWord *fixup_table, uint32_t count, StgClosure **p)
block = find_pointer(fixup_table, count, q);
if (block == NULL)
- return rtsFalse;
+ return false;
if (block == block->self)
- return rtsTrue;
+ return true;
q = (StgClosure*)((W_)q - (W_)block->self + (W_)block);
*p = TAG_CLOSURE(tag, q);
- return rtsTrue;
+ return true;
}
-static rtsBool
+static bool
fixup_mut_arr_ptrs (StgWord *fixup_table,
uint32_t count,
StgMutArrPtrs *a)
@@ -1063,13 +1063,13 @@ fixup_mut_arr_ptrs (StgWord *fixup_table,
q = (StgPtr)&a->payload[a->ptrs];
for (; p < q; p++) {
if (!fixup_one_pointer(fixup_table, count, (StgClosure**)p))
- return rtsFalse;
+ return false;
}
- return rtsTrue;
+ return true;
}
-static rtsBool
+static bool
fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count)
{
const StgInfoTable *info;
@@ -1086,7 +1086,7 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count)
case CONSTR_1_0:
if (!fixup_one_pointer(fixup_table, count,
&((StgClosure*)p)->payload[0]))
- return rtsFalse;
+ return false;
case CONSTR_0_1:
p += sizeofW(StgClosure) + 1;
break;
@@ -1094,11 +1094,11 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count)
case CONSTR_2_0:
if (!fixup_one_pointer(fixup_table, count,
&((StgClosure*)p)->payload[1]))
- return rtsFalse;
+ return false;
case CONSTR_1_1:
if (!fixup_one_pointer(fixup_table, count,
&((StgClosure*)p)->payload[0]))
- return rtsFalse;
+ return false;
case CONSTR_0_2:
p += sizeofW(StgClosure) + 2;
break;
@@ -1112,7 +1112,7 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count)
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
if (!fixup_one_pointer(fixup_table, count, (StgClosure **)p))
- return rtsFalse;
+ return false;
}
p += info->layout.payload.nptrs;
break;
@@ -1137,7 +1137,7 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count)
for (i = 0; i < arr->ptrs; i++) {
if (!fixup_one_pointer(fixup_table, count,
&arr->payload[i]))
- return rtsFalse;
+ return false;
}
p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs;
@@ -1157,11 +1157,11 @@ fixup_block(StgCompactNFDataBlock *block, StgWord *fixup_table, uint32_t count)
default:
debugBelch("Invalid non-NFData closure (type %d) in Compact\n",
info->type);
- return rtsFalse;
+ return false;
}
}
- return rtsTrue;
+ return true;
}
static int
@@ -1203,18 +1203,18 @@ build_fixup_table (StgCompactNFDataBlock *block, uint32_t *pcount)
return table;
}
-static rtsBool
+static bool
fixup_loop(StgCompactNFDataBlock *block, StgClosure **proot)
{
StgWord *table;
- rtsBool ok;
+ bool ok;
uint32_t count;
table = build_fixup_table (block, &count);
do {
if (!fixup_block(block, table, count)) {
- ok = rtsFalse;
+ ok = false;
goto out;
}
@@ -1277,7 +1277,7 @@ static StgClosure *
maybe_fixup_internal_pointers (StgCompactNFDataBlock *block,
StgClosure *root)
{
- rtsBool ok;
+ bool ok;
StgClosure **proot;
// Check for fast path
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 1323cbea6a..0581321205 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -45,7 +45,7 @@ StgWord64 whitehole_spin = 0;
*/
#define MAX_THUNK_SELECTOR_DEPTH 16
-static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
+static void eval_thunk_selector (StgClosure **q, StgSelector * p, bool);
STATIC_INLINE void evacuate_large(StgPtr p);
/* -----------------------------------------------------------------------------
@@ -67,7 +67,7 @@ alloc_for_copy (uint32_t size, uint32_t gen_no)
if (gct->eager_promotion) {
gen_no = gct->evac_gen_no;
} else {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
}
}
@@ -182,7 +182,7 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
* pointer of an object, but reserve some padding after it. This is
* used to optimise evacuation of TSOs.
*/
-static rtsBool
+static bool
copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve,
uint32_t size_to_copy, uint32_t gen_no)
{
@@ -202,7 +202,7 @@ spin:
if (IS_FORWARDING_PTR(info)) {
src->header.info = (const StgInfoTable *)info;
evacuate(p); // does the failed_to_evac stuff
- return rtsFalse;
+ return false;
}
#else
info = (W_)src->header.info;
@@ -229,7 +229,7 @@ spin:
LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
#endif
- return rtsTrue;
+ return true;
}
@@ -271,7 +271,7 @@ evacuate_large(StgPtr p)
* the desired destination (see comments in evacuate()).
*/
if (gen_no < gct->evac_gen_no) {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
RELEASE_SPIN_LOCK(&gen->sync);
@@ -296,7 +296,7 @@ evacuate_large(StgPtr p)
if (gct->eager_promotion) {
new_gen_no = gct->evac_gen_no;
} else {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
}
}
@@ -388,7 +388,7 @@ evacuate_compact (StgPtr p)
* the desired destination (see comments in evacuate()).
*/
if (gen_no < gct->evac_gen_no) {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
return;
@@ -404,7 +404,7 @@ evacuate_compact (StgPtr p)
* the desired destination (see comments in evacuate()).
*/
if (gen_no < gct->evac_gen_no) {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
RELEASE_SPIN_LOCK(&gen->sync);
@@ -429,7 +429,7 @@ evacuate_compact (StgPtr p)
if (gct->eager_promotion) {
new_gen_no = gct->evac_gen_no;
} else {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
}
}
@@ -582,7 +582,7 @@ loop:
// whether it is already in the target generation. (this is
// the write barrier).
if (bd->gen_no < gct->evac_gen_no) {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
return;
@@ -639,7 +639,7 @@ loop:
*p = TAG_CLOSURE(tag,e);
if (gen_no < gct->evac_gen_no) { // optimisation
if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
}
@@ -767,7 +767,7 @@ loop:
return;
case THUNK_SELECTOR:
- eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
+ eval_thunk_selector(p, (StgSelector *)q, true);
return;
case IND:
@@ -835,7 +835,7 @@ loop:
{
StgStack *new_stack;
StgPtr r, s;
- rtsBool mine;
+ bool mine;
mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
sizeofW(StgStack), gen_no);
@@ -932,7 +932,7 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val)
}
static void
-eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
+eval_thunk_selector (StgClosure **q, StgSelector * p, bool evac)
// NB. for legacy reasons, p & q are swapped around :(
{
uint32_t field;
@@ -963,7 +963,7 @@ selector_chain:
*q = (StgClosure *)p;
// shortcut, behave as for: if (evac) evacuate(q);
if (evac && bd->gen_no < gct->evac_gen_no) {
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
TICK_GC_FAILED_PROMOTION();
}
return;
@@ -975,7 +975,7 @@ selector_chain:
// bit is very tricky to get right. If you make changes
// around here, test by compiling stage 3 with +RTS -c -RTS.
if (bd->flags & BF_MARKED) {
- // must call evacuate() to mark this closure if evac==rtsTrue
+ // must call evacuate() to mark this closure if evac==true
*q = (StgClosure *)p;
if (evac) evacuate(q);
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
@@ -1164,10 +1164,10 @@ selector_loop:
}
gct->thunk_selector_depth++;
- // rtsFalse says "don't evacuate the result". It will,
+ // false says "don't evacuate the result". It will,
// however, update any THUNK_SELECTORs that are evaluated
// along the way.
- eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
+ eval_thunk_selector(&val, (StgSelector*)selectee, false);
gct->thunk_selector_depth--;
// did we actually manage to evaluate it?
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 46530b2cd9..ea80d6dec1 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -97,7 +97,7 @@
* deal with static objects and GC CAFs when doing a major GC.
*/
uint32_t N;
-rtsBool major_gc;
+bool major_gc;
/* Data used for allocation area sizing.
*/
@@ -132,7 +132,7 @@ uint32_t n_gc_threads;
// For stats:
static long copied; // *words* copied & scavenged during this GC
-rtsBool work_stealing;
+bool work_stealing;
uint32_t static_flag = STATIC_FLAG_B;
uint32_t prev_static_flag = STATIC_FLAG_A;
@@ -153,8 +153,8 @@ static void start_gc_threads (void);
static void scavenge_until_all_done (void);
static StgWord inc_running (void);
static StgWord dec_running (void);
-static void wakeup_gc_threads (uint32_t me, rtsBool idle_cap[]);
-static void shutdown_gc_threads (uint32_t me, rtsBool idle_cap[]);
+static void wakeup_gc_threads (uint32_t me, bool idle_cap[]);
+static void shutdown_gc_threads (uint32_t me, bool idle_cap[]);
static void collect_gct_blocks (void);
static void collect_pinned_object_blocks (void);
@@ -180,10 +180,10 @@ StgPtr mark_sp; // pointer to the next unallocated mark stack entry
void
GarbageCollect (uint32_t collect_gen,
- rtsBool do_heap_census,
+ bool do_heap_census,
uint32_t gc_type USED_IF_THREADS,
Capability *cap,
- rtsBool idle_cap[])
+ bool idle_cap[])
{
bdescr *bd;
generation *gen;
@@ -299,7 +299,7 @@ GarbageCollect (uint32_t collect_gen,
collectFreshWeakPtrs();
// check sanity *before* GC
- IF_DEBUG(sanity, checkSanity(rtsFalse /* before GC */, major_gc));
+ IF_DEBUG(sanity, checkSanity(false /* before GC */, major_gc));
// gather blocks allocated using allocatePinned() from each capability
// and put them on the g0->large_object list.
@@ -361,7 +361,7 @@ GarbageCollect (uint32_t collect_gen,
for (n = 0; n < n_capabilities; n++) {
if (idle_cap[n]) {
markCapability(mark_root, gct, capabilities[n],
- rtsTrue/*don't mark sparks*/);
+ true/*don't mark sparks*/);
scavenge_capability_mut_lists(capabilities[n]);
}
}
@@ -376,10 +376,10 @@ GarbageCollect (uint32_t collect_gen,
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
markCapability(mark_root, gct, capabilities[n],
- rtsTrue/*don't mark sparks*/);
+ true/*don't mark sparks*/);
}
} else {
- markCapability(mark_root, gct, cap, rtsTrue/*don't mark sparks*/);
+ markCapability(mark_root, gct, cap, true/*don't mark sparks*/);
}
markScheduler(mark_root, gct);
@@ -408,7 +408,7 @@ GarbageCollect (uint32_t collect_gen,
// must be last... invariant is that everything is fully
// scavenged at this point.
- if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
+ if (traverseWeakPtrList()) { // returns true if evaced something
inc_running();
continue;
}
@@ -719,7 +719,7 @@ GarbageCollect (uint32_t collect_gen,
// before resurrectThreads(), because that might overwrite some
// closures, which will cause problems with THREADED where we don't
// fill slop.
- IF_DEBUG(sanity, checkSanity(rtsTrue /* after GC */, major_gc));
+ IF_DEBUG(sanity, checkSanity(true /* after GC */, major_gc));
// If a heap census is due, we need to do it before
// resurrectThreads(), for the same reason as checkSanity above:
@@ -937,7 +937,7 @@ dec_running (void)
return atomic_dec(&gc_running_threads);
}
-static rtsBool
+static bool
any_work (void)
{
int g;
@@ -949,7 +949,7 @@ any_work (void)
// scavenge objects in compacted generation
if (mark_stack_bd != NULL && !mark_stack_empty()) {
- return rtsTrue;
+ return true;
}
// Check for global work in any gen. We don't need to check for
@@ -957,9 +957,9 @@ any_work (void)
// which means there is no local work for this thread.
for (g = 0; g < (int)RtsFlags.GcFlags.generations; g++) {
ws = &gct->gens[g];
- if (ws->todo_large_objects) return rtsTrue;
- if (!looksEmptyWSDeque(ws->todo_q)) return rtsTrue;
- if (ws->todo_overflow) return rtsTrue;
+ if (ws->todo_large_objects) return true;
+ if (!looksEmptyWSDeque(ws->todo_q)) return true;
+ if (ws->todo_overflow) return true;
}
#if defined(THREADED_RTS)
@@ -970,7 +970,7 @@ any_work (void)
if (n == gct->thread_index) continue;
for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
ws = &gc_threads[n]->gens[g];
- if (!looksEmptyWSDeque(ws->todo_q)) return rtsTrue;
+ if (!looksEmptyWSDeque(ws->todo_q)) return true;
}
}
}
@@ -981,7 +981,7 @@ any_work (void)
yieldThread();
#endif
- return rtsFalse;
+ return false;
}
static void
@@ -1061,7 +1061,7 @@ gcWorkerThread (Capability *cap)
// Every thread evacuates some roots.
gct->evac_gen_no = 0;
- markCapability(mark_root, gct, cap, rtsTrue/*prune sparks*/);
+ markCapability(mark_root, gct, cap, true/*prune sparks*/);
scavenge_capability_mut_lists(cap);
scavenge_until_all_done();
@@ -1092,12 +1092,12 @@ gcWorkerThread (Capability *cap)
#if defined(THREADED_RTS)
void
-waitForGcThreads (Capability *cap USED_IF_THREADS, rtsBool idle_cap[])
+waitForGcThreads (Capability *cap USED_IF_THREADS, bool idle_cap[])
{
const uint32_t n_threads = n_capabilities;
const uint32_t me = cap->no;
uint32_t i, j;
- rtsBool retry = rtsTrue;
+ bool retry = true;
while(retry) {
for (i=0; i < n_threads; i++) {
@@ -1107,13 +1107,13 @@ waitForGcThreads (Capability *cap USED_IF_THREADS, rtsBool idle_cap[])
}
}
for (j=0; j < 10; j++) {
- retry = rtsFalse;
+ retry = false;
for (i=0; i < n_threads; i++) {
if (i == me || idle_cap[i]) continue;
write_barrier();
interruptCapability(capabilities[i]);
if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
- retry = rtsTrue;
+ retry = true;
}
}
if (!retry) break;
@@ -1134,7 +1134,7 @@ start_gc_threads (void)
static void
wakeup_gc_threads (uint32_t me USED_IF_THREADS,
- rtsBool idle_cap[] USED_IF_THREADS)
+ bool idle_cap[] USED_IF_THREADS)
{
#if defined(THREADED_RTS)
uint32_t i;
@@ -1160,7 +1160,7 @@ wakeup_gc_threads (uint32_t me USED_IF_THREADS,
// any_work(), and may even remain awake until the next GC starts.
static void
shutdown_gc_threads (uint32_t me USED_IF_THREADS,
- rtsBool idle_cap[] USED_IF_THREADS)
+ bool idle_cap[] USED_IF_THREADS)
{
#if defined(THREADED_RTS)
uint32_t i;
@@ -1179,7 +1179,7 @@ shutdown_gc_threads (uint32_t me USED_IF_THREADS,
#if defined(THREADED_RTS)
void
-releaseGCThreads (Capability *cap USED_IF_THREADS, rtsBool idle_cap[])
+releaseGCThreads (Capability *cap USED_IF_THREADS, bool idle_cap[])
{
const uint32_t n_threads = n_capabilities;
const uint32_t me = cap->no;
@@ -1451,8 +1451,8 @@ init_gc_thread (gc_thread *t)
t->scan_bd = NULL;
t->mut_lists = t->cap->mut_lists;
t->evac_gen_no = 0;
- t->failed_to_evac = rtsFalse;
- t->eager_promotion = rtsTrue;
+ t->failed_to_evac = false;
+ t->eager_promotion = true;
t->thunk_selector_depth = 0;
t->copied = 0;
t->scanned = 0;
@@ -1657,7 +1657,7 @@ resize_nursery (void)
long blocks;
StgWord needed;
- calcNeeded(rtsFalse, &needed); // approx blocks needed at next GC
+ calcNeeded(false, &needed); // approx blocks needed at next GC
/* Guess how much will be live in generation 0 step 0 next time.
* A good approximation is obtained by finding the
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index 44ae7e35b2..a2bf6123d6 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -18,9 +18,9 @@
#include "HeapAlloc.h"
-void GarbageCollect (uint32_t collect_gen,
- rtsBool do_heap_census,
- uint32_t gc_type, Capability *cap, rtsBool idle_cap[]);
+void GarbageCollect (uint32_t force_major_gc,
+ bool do_heap_census,
+ uint32_t gc_type, Capability *cap, bool idle_cap[]);
typedef void (*evac_fn)(void *user, StgClosure **root);
@@ -28,13 +28,13 @@ StgClosure * isAlive ( StgClosure *p );
void markCAFs ( evac_fn evac, void *user );
extern uint32_t N;
-extern rtsBool major_gc;
+extern bool major_gc;
extern bdescr *mark_stack_bd;
extern bdescr *mark_stack_top_bd;
extern StgPtr mark_sp;
-extern rtsBool work_stealing;
+extern bool work_stealing;
#ifdef DEBUG
extern uint32_t mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS,
@@ -55,8 +55,8 @@ void initGcThreads (uint32_t from, uint32_t to);
void freeGcThreads (void);
#if defined(THREADED_RTS)
-void waitForGcThreads (Capability *cap, rtsBool idle_cap[]);
-void releaseGCThreads (Capability *cap, rtsBool idle_cap[]);
+void waitForGcThreads (Capability *cap, bool idle_cap[]);
+void releaseGCThreads (Capability *cap, bool idle_cap[]);
#endif
#define WORK_UNIT_WORDS 128
diff --git a/rts/sm/GCThread.h b/rts/sm/GCThread.h
index f940263665..89457e6467 100644
--- a/rts/sm/GCThread.h
+++ b/rts/sm/GCThread.h
@@ -138,7 +138,7 @@ typedef struct gc_thread_ {
StgClosure* static_objects; // live static objects
StgClosure* scavenged_static_objects; // static objects scavenged so far
- W_ gc_count; // number of GCs this thread has done
+ W_ gc_count; // number of GCs this thread has done
// block that is currently being scanned
bdescr * scan_bd;
@@ -154,7 +154,7 @@ typedef struct gc_thread_ {
// --------------------
// evacuate flags
- uint32_t evac_gen_no; // Youngest generation that objects
+ uint32_t evac_gen_no; // Youngest generation that objects
// should be evacuated to in
// evacuate(). (Logically an
// argument to evacuate, but it's
@@ -162,11 +162,11 @@ typedef struct gc_thread_ {
// optimise it into a per-thread
// variable).
- rtsBool failed_to_evac; // failure to evacuate an object typically
+ bool failed_to_evac; // failure to evacuate an object typically
// Causes it to be recorded in the mutable
// object list
- rtsBool eager_promotion; // forces promotion to the evac gen
+ bool eager_promotion; // forces promotion to the evac gen
// instead of the to-space
// corresponding to the object
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index a515665d07..9fda2fe070 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -192,7 +192,7 @@ push_scanned_block (bdescr *bd, gen_workspace *ws)
StgPtr
todo_block_full (uint32_t size, gen_workspace *ws)
{
- rtsBool urgent_to_push, can_extend;
+ bool urgent_to_push, can_extend;
StgPtr p;
bdescr *bd;
diff --git a/rts/sm/GCUtils.h b/rts/sm/GCUtils.h
index 7e5a827ce0..3092262af6 100644
--- a/rts/sm/GCUtils.h
+++ b/rts/sm/GCUtils.h
@@ -45,7 +45,7 @@ bdescr *steal_todo_block (uint32_t s);
// Returns true if a block is partially full. This predicate is used to try
// to re-use partial blocks wherever possible, and to reduce wastage.
// We might need to tweak the actual value.
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
isPartiallyFull(bdescr *bd)
{
return (bd->free + WORK_UNIT_WORDS < bd->start + BLOCK_SIZE_W);
diff --git a/rts/sm/MarkStack.h b/rts/sm/MarkStack.h
index d90b5e47b4..881e2b0b17 100644
--- a/rts/sm/MarkStack.h
+++ b/rts/sm/MarkStack.h
@@ -61,7 +61,7 @@ pop_mark_stack(void)
return (StgPtr)*--mark_sp;
}
-INLINE_HEADER rtsBool
+INLINE_HEADER bool
mark_stack_empty(void)
{
return (((W_)mark_sp & BLOCK_MASK) == 0 && mark_stack_bd->link == NULL);
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 7e3e1d5818..e7dfd6e57c 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -84,8 +84,8 @@ StgWeak *dead_weak_ptr_list;
StgTSO *resurrected_threads;
static void collectDeadWeakPtrs (generation *gen);
-static rtsBool tidyWeakList (generation *gen);
-static rtsBool resurrectUnreachableThreads (generation *gen);
+static bool tidyWeakList (generation *gen);
+static bool resurrectUnreachableThreads (generation *gen);
static void tidyThreadList (generation *gen);
void
@@ -104,15 +104,15 @@ initWeakForGC(void)
resurrected_threads = END_TSO_QUEUE;
}
-rtsBool
+bool
traverseWeakPtrList(void)
{
- rtsBool flag = rtsFalse;
+ bool flag = false;
switch (weak_stage) {
case WeakDone:
- return rtsFalse;
+ return false;
case WeakThreads:
/* Now deal with the gen->threads lists, which behave somewhat like
@@ -130,18 +130,18 @@ traverseWeakPtrList(void)
// key is reachable):
for (g = 0; g <= N; g++) {
if (tidyWeakList(&generations[g])) {
- flag = rtsTrue;
+ flag = true;
}
}
// if we evacuated anything new, we must scavenge thoroughly
// before we can determine which threads are unreachable.
- if (flag) return rtsTrue;
+ if (flag) return true;
// Resurrect any threads which were unreachable
for (g = 0; g <= N; g++) {
if (resurrectUnreachableThreads(&generations[g])) {
- flag = rtsTrue;
+ flag = true;
}
}
@@ -151,7 +151,7 @@ traverseWeakPtrList(void)
// if we evacuated anything new, we must scavenge thoroughly
// before entering the WeakPtrs stage.
- if (flag) return rtsTrue;
+ if (flag) return true;
// otherwise, fall through...
}
@@ -164,7 +164,7 @@ traverseWeakPtrList(void)
// alive, so traverse those lists again:
for (g = 0; g <= N; g++) {
if (tidyWeakList(&generations[g])) {
- flag = rtsTrue;
+ flag = true;
}
}
@@ -172,7 +172,7 @@ traverseWeakPtrList(void)
* the dead weak pointers. The dead_weak_ptr list is used as a list
* of pending finalizers later on.
*/
- if (flag == rtsFalse) {
+ if (flag == false) {
for (g = 0; g <= N; g++) {
collectDeadWeakPtrs(&generations[g]);
}
@@ -180,12 +180,12 @@ traverseWeakPtrList(void)
weak_stage = WeakDone; // *now* we're done,
}
- return rtsTrue; // but one more round of scavenging, please
+ return true; // but one more round of scavenging, please
}
default:
barf("traverse_weak_ptr_list");
- return rtsTrue;
+ return true;
}
}
@@ -205,10 +205,10 @@ static void collectDeadWeakPtrs (generation *gen)
}
}
-static rtsBool resurrectUnreachableThreads (generation *gen)
+static bool resurrectUnreachableThreads (generation *gen)
{
StgTSO *t, *tmp, *next;
- rtsBool flag = rtsFalse;
+ bool flag = false;
for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
next = t->global_link;
@@ -226,18 +226,18 @@ static rtsBool resurrectUnreachableThreads (generation *gen)
evacuate((StgClosure **)&tmp);
tmp->global_link = resurrected_threads;
resurrected_threads = tmp;
- flag = rtsTrue;
+ flag = true;
}
}
return flag;
}
-static rtsBool tidyWeakList(generation *gen)
+static bool tidyWeakList(generation *gen)
{
StgWeak *w, **last_w, *next_w;
const StgInfoTable *info;
StgClosure *new;
- rtsBool flag = rtsFalse;
+ bool flag = false;
last_w = &gen->old_weak_ptr_list;
for (w = gen->old_weak_ptr_list; w != NULL; w = next_w) {
@@ -267,7 +267,7 @@ static rtsBool tidyWeakList(generation *gen)
new_gen = Bdescr((P_)w)->gen;
gct->evac_gen_no = new_gen->no;
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
// evacuate the fields of the weak ptr
scavengeLiveWeak(w);
@@ -276,7 +276,7 @@ static rtsBool tidyWeakList(generation *gen)
debugTrace(DEBUG_weak,
"putting weak pointer %p into mutable list",
w);
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
recordMutableGen_GC((StgClosure *)w, new_gen->no);
}
@@ -287,7 +287,7 @@ static rtsBool tidyWeakList(generation *gen)
// and put it on the correct weak ptr list.
w->link = new_gen->weak_ptr_list;
new_gen->weak_ptr_list = w;
- flag = rtsTrue;
+ flag = true;
if (gen->no != new_gen->no) {
debugTrace(DEBUG_weak,
diff --git a/rts/sm/MarkWeak.h b/rts/sm/MarkWeak.h
index aabb954496..bd27bf551a 100644
--- a/rts/sm/MarkWeak.h
+++ b/rts/sm/MarkWeak.h
@@ -22,7 +22,7 @@ extern StgTSO *exception_threads;
void collectFreshWeakPtrs ( void );
void initWeakForGC ( void );
-rtsBool traverseWeakPtrList ( void );
+bool traverseWeakPtrList ( void );
void markWeakPtrList ( void );
void scavengeLiveWeak ( StgWeak * );
diff --git a/rts/sm/OSMem.h b/rts/sm/OSMem.h
index 69d87c201e..f6f9559c2e 100644
--- a/rts/sm/OSMem.h
+++ b/rts/sm/OSMem.h
@@ -18,8 +18,8 @@ void osReleaseFreeMemory(void);
void osFreeAllMBlocks(void);
size_t getPageSize (void);
StgWord64 getPhysicalMemorySize (void);
-void setExecutable (void *p, W_ len, rtsBool exec);
-rtsBool osNumaAvailable(void);
+void setExecutable (void *p, W_ len, bool exec);
+bool osNumaAvailable(void);
uint32_t osNumaNodes(void);
StgWord osNumaMask(void);
void osBindMBlocksToNode(void *addr, StgWord size, uint32_t node);
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 413aee945b..5a2923820c 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -557,7 +557,7 @@ checkTSO(StgTSO *tso)
Optionally also check the sanity of the TSOs.
*/
void
-checkGlobalTSOList (rtsBool checkTSOs)
+checkGlobalTSOList (bool checkTSOs)
{
StgTSO *tso;
uint32_t g;
@@ -712,7 +712,7 @@ checkNurserySanity (nursery *nursery)
}
static void checkGeneration (generation *gen,
- rtsBool after_major_gc USED_IF_THREADS)
+ bool after_major_gc USED_IF_THREADS)
{
uint32_t n;
gen_workspace *ws;
@@ -741,7 +741,7 @@ static void checkGeneration (generation *gen,
}
/* Full heap sanity check. */
-static void checkFullHeap (rtsBool after_major_gc)
+static void checkFullHeap (bool after_major_gc)
{
uint32_t g, n;
@@ -753,7 +753,7 @@ static void checkFullHeap (rtsBool after_major_gc)
}
}
-void checkSanity (rtsBool after_gc, rtsBool major_gc)
+void checkSanity (bool after_gc, bool major_gc)
{
checkFullHeap(after_gc && major_gc);
@@ -763,7 +763,7 @@ void checkSanity (rtsBool after_gc, rtsBool major_gc)
// does nothing in this case.
if (after_gc) {
checkMutableLists();
- checkGlobalTSOList(rtsTrue);
+ checkGlobalTSOList(true);
}
}
@@ -875,14 +875,14 @@ genBlocks (generation *gen)
}
void
-memInventory (rtsBool show)
+memInventory (bool show)
{
uint32_t g, i;
W_ gen_blocks[RtsFlags.GcFlags.generations];
W_ nursery_blocks, retainer_blocks,
arena_blocks, exec_blocks, gc_free_blocks = 0;
W_ live_blocks = 0, free_blocks = 0;
- rtsBool leak;
+ bool leak;
// count the blocks we current have
diff --git a/rts/sm/Sanity.h b/rts/sm/Sanity.h
index 273efe2dc9..63ae05d2a2 100644
--- a/rts/sm/Sanity.h
+++ b/rts/sm/Sanity.h
@@ -21,13 +21,13 @@
# endif
/* debugging routines */
-void checkSanity ( rtsBool after_gc, rtsBool major_gc );
+void checkSanity ( bool after_gc, bool major_gc );
void checkNurserySanity ( nursery *nursery );
void checkHeapChain ( bdescr *bd );
void checkHeapChunk ( StgPtr start, StgPtr end );
void checkLargeObjects ( bdescr *bd );
void checkTSO ( StgTSO* tso );
-void checkGlobalTSOList ( rtsBool checkTSOs );
+void checkGlobalTSOList ( bool checkTSOs );
void checkStaticObjects ( StgClosure* static_objects );
void checkStackChunk ( StgPtr sp, StgPtr stack_end );
StgOffset checkStackFrame ( StgPtr sp );
@@ -35,7 +35,7 @@ StgOffset checkClosure ( const StgClosure* p );
void checkRunQueue (Capability *cap);
-void memInventory (rtsBool show);
+void memInventory (bool show);
void checkBQ (StgTSO *bqe, StgClosure *closure);
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 595d8275cf..940f11fea4 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -51,7 +51,7 @@ static void scavenge_large_bitmap (StgPtr p,
static void
scavengeTSO (StgTSO *tso)
{
- rtsBool saved_eager;
+ bool saved_eager;
debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
@@ -66,7 +66,7 @@ scavengeTSO (StgTSO *tso)
}
saved_eager = gct->eager_promotion;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&tso->blocked_exceptions);
evacuate((StgClosure **)&tso->bq);
@@ -107,10 +107,10 @@ scavengeTSO (StgTSO *tso)
static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
{
W_ m;
- rtsBool any_failed;
+ bool any_failed;
StgPtr p, q;
- any_failed = rtsFalse;
+ any_failed = false;
p = (StgPtr)&a->payload[0];
for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
{
@@ -119,9 +119,9 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
evacuate((StgClosure**)p);
}
if (gct->failed_to_evac) {
- any_failed = rtsTrue;
+ any_failed = true;
*mutArrPtrsCard(a,m) = 1;
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
} else {
*mutArrPtrsCard(a,m) = 0;
}
@@ -133,9 +133,9 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
evacuate((StgClosure**)p);
}
if (gct->failed_to_evac) {
- any_failed = rtsTrue;
+ any_failed = true;
*mutArrPtrsCard(a,m) = 1;
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
} else {
*mutArrPtrsCard(a,m) = 0;
}
@@ -150,9 +150,9 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
{
W_ m;
StgPtr p, q;
- rtsBool any_failed;
+ bool any_failed;
- any_failed = rtsFalse;
+ any_failed = false;
for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
{
if (*mutArrPtrsCard(a,m) != 0) {
@@ -163,8 +163,8 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
evacuate((StgClosure**)p);
}
if (gct->failed_to_evac) {
- any_failed = rtsTrue;
- gct->failed_to_evac = rtsFalse;
+ any_failed = true;
+ gct->failed_to_evac = false;
} else {
*mutArrPtrsCard(a,m) = 0;
}
@@ -408,7 +408,7 @@ scavenge_block (bdescr *bd)
{
StgPtr p, q;
const StgInfoTable *info;
- rtsBool saved_eager_promotion;
+ bool saved_eager_promotion;
gen_workspace *ws;
debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
@@ -417,7 +417,7 @@ scavenge_block (bdescr *bd)
gct->scan_bd = bd;
gct->evac_gen_no = bd->gen_no;
saved_eager_promotion = gct->eager_promotion;
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
ws = &gct->gens[bd->gen->no];
@@ -441,7 +441,7 @@ scavenge_block (bdescr *bd)
case MVAR_DIRTY:
{
StgMVar *mvar = ((StgMVar *)p);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&mvar->head);
evacuate((StgClosure **)&mvar->tail);
evacuate((StgClosure **)&mvar->value);
@@ -459,7 +459,7 @@ scavenge_block (bdescr *bd)
case TVAR:
{
StgTVar *tvar = ((StgTVar *)p);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->eager_promotion = saved_eager_promotion;
@@ -590,7 +590,7 @@ scavenge_block (bdescr *bd)
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
@@ -606,7 +606,7 @@ scavenge_block (bdescr *bd)
{
StgBlockingQueue *bq = (StgBlockingQueue *)p;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate(&bq->bh);
evacuate((StgClosure**)&bq->owner);
evacuate((StgClosure**)&bq->queue);
@@ -661,7 +661,7 @@ scavenge_block (bdescr *bd)
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
@@ -672,7 +672,7 @@ scavenge_block (bdescr *bd)
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
+ gct->failed_to_evac = true; // always put it on the mutable list.
break;
}
@@ -702,7 +702,7 @@ scavenge_block (bdescr *bd)
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
@@ -715,7 +715,7 @@ scavenge_block (bdescr *bd)
((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
}
- gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
+ gct->failed_to_evac = true; // always put it on the mutable list.
break;
}
@@ -751,7 +751,7 @@ scavenge_block (bdescr *bd)
{
StgStack *stack = (StgStack*)p;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
scavenge_stack(stack->sp, stack->stack + stack->stack_size);
stack->dirty = gct->failed_to_evac;
@@ -765,7 +765,7 @@ scavenge_block (bdescr *bd)
{
StgPtr end;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
@@ -774,7 +774,7 @@ scavenge_block (bdescr *bd)
p += info->layout.payload.nptrs;
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // mutable
+ gct->failed_to_evac = true; // mutable
break;
}
@@ -783,7 +783,7 @@ scavenge_block (bdescr *bd)
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
@@ -791,7 +791,7 @@ scavenge_block (bdescr *bd)
evacuate((StgClosure **)&e->new_value);
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // mutable
+ gct->failed_to_evac = true; // mutable
p += sizeofW(StgTRecChunk);
break;
}
@@ -816,7 +816,7 @@ scavenge_block (bdescr *bd)
* the current object points to into the current generation.
*/
if (gct->failed_to_evac) {
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
if (bd->gen_no > 0) {
recordMutableGen_GC((StgClosure *)q, bd->gen_no);
}
@@ -856,7 +856,7 @@ scavenge_mark_stack(void)
{
StgPtr p, q;
const StgInfoTable *info;
- rtsBool saved_eager_promotion;
+ bool saved_eager_promotion;
gct->evac_gen_no = oldest_gen->no;
saved_eager_promotion = gct->eager_promotion;
@@ -873,7 +873,7 @@ scavenge_mark_stack(void)
case MVAR_DIRTY:
{
StgMVar *mvar = ((StgMVar *)p);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&mvar->head);
evacuate((StgClosure **)&mvar->tail);
evacuate((StgClosure **)&mvar->value);
@@ -890,7 +890,7 @@ scavenge_mark_stack(void)
case TVAR:
{
StgTVar *tvar = ((StgTVar *)p);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->eager_promotion = saved_eager_promotion;
@@ -997,7 +997,7 @@ scavenge_mark_stack(void)
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
@@ -1013,7 +1013,7 @@ scavenge_mark_stack(void)
{
StgBlockingQueue *bq = (StgBlockingQueue *)p;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate(&bq->bh);
evacuate((StgClosure**)&bq->owner);
evacuate((StgClosure**)&bq->queue);
@@ -1064,7 +1064,7 @@ scavenge_mark_stack(void)
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
@@ -1075,7 +1075,7 @@ scavenge_mark_stack(void)
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // mutable anyhow.
+ gct->failed_to_evac = true; // mutable anyhow.
break;
}
@@ -1102,14 +1102,14 @@ scavenge_mark_stack(void)
// follow everything
{
StgPtr next;
- rtsBool saved_eager;
+ bool saved_eager;
// We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
saved_eager = gct->eager_promotion;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
evacuate((StgClosure **)p);
@@ -1122,7 +1122,7 @@ scavenge_mark_stack(void)
((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
}
- gct->failed_to_evac = rtsTrue; // mutable anyhow.
+ gct->failed_to_evac = true; // mutable anyhow.
break;
}
@@ -1157,7 +1157,7 @@ scavenge_mark_stack(void)
{
StgStack *stack = (StgStack*)p;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
scavenge_stack(stack->sp, stack->stack + stack->stack_size);
stack->dirty = gct->failed_to_evac;
@@ -1170,7 +1170,7 @@ scavenge_mark_stack(void)
{
StgPtr end;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
@@ -1178,7 +1178,7 @@ scavenge_mark_stack(void)
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // mutable
+ gct->failed_to_evac = true; // mutable
break;
}
@@ -1187,7 +1187,7 @@ scavenge_mark_stack(void)
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
@@ -1195,7 +1195,7 @@ scavenge_mark_stack(void)
evacuate((StgClosure **)&e->new_value);
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // mutable
+ gct->failed_to_evac = true; // mutable
break;
}
@@ -1205,7 +1205,7 @@ scavenge_mark_stack(void)
}
if (gct->failed_to_evac) {
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
if (gct->evac_gen_no) {
recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
}
@@ -1221,12 +1221,12 @@ scavenge_mark_stack(void)
objects can have this property.
-------------------------------------------------------------------------- */
-static rtsBool
+static bool
scavenge_one(StgPtr p)
{
const StgInfoTable *info;
- rtsBool no_luck;
- rtsBool saved_eager_promotion;
+ bool no_luck;
+ bool saved_eager_promotion;
saved_eager_promotion = gct->eager_promotion;
@@ -1239,7 +1239,7 @@ scavenge_one(StgPtr p)
case MVAR_DIRTY:
{
StgMVar *mvar = ((StgMVar *)p);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&mvar->head);
evacuate((StgClosure **)&mvar->tail);
evacuate((StgClosure **)&mvar->value);
@@ -1256,7 +1256,7 @@ scavenge_one(StgPtr p)
case TVAR:
{
StgTVar *tvar = ((StgTVar *)p);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
gct->eager_promotion = saved_eager_promotion;
@@ -1321,7 +1321,7 @@ scavenge_one(StgPtr p)
case MUT_VAR_DIRTY: {
StgPtr q = p;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
@@ -1337,7 +1337,7 @@ scavenge_one(StgPtr p)
{
StgBlockingQueue *bq = (StgBlockingQueue *)p;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate(&bq->bh);
evacuate((StgClosure**)&bq->owner);
evacuate((StgClosure**)&bq->queue);
@@ -1388,7 +1388,7 @@ scavenge_one(StgPtr p)
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
@@ -1399,7 +1399,7 @@ scavenge_one(StgPtr p)
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
break;
}
@@ -1423,14 +1423,14 @@ scavenge_one(StgPtr p)
case SMALL_MUT_ARR_PTRS_DIRTY:
{
StgPtr next, q;
- rtsBool saved_eager;
+ bool saved_eager;
// We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
saved_eager = gct->eager_promotion;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
q = p;
next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
@@ -1444,7 +1444,7 @@ scavenge_one(StgPtr p)
((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info;
}
- gct->failed_to_evac = rtsTrue;
+ gct->failed_to_evac = true;
break;
}
@@ -1479,7 +1479,7 @@ scavenge_one(StgPtr p)
{
StgStack *stack = (StgStack*)p;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
scavenge_stack(stack->sp, stack->stack + stack->stack_size);
stack->dirty = gct->failed_to_evac;
@@ -1492,7 +1492,7 @@ scavenge_one(StgPtr p)
{
StgPtr end;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
@@ -1500,7 +1500,7 @@ scavenge_one(StgPtr p)
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // mutable
+ gct->failed_to_evac = true; // mutable
break;
}
@@ -1510,7 +1510,7 @@ scavenge_one(StgPtr p)
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
@@ -1518,7 +1518,7 @@ scavenge_one(StgPtr p)
evacuate((StgClosure **)&e->new_value);
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsTrue; // mutable
+ gct->failed_to_evac = true; // mutable
break;
}
@@ -1562,7 +1562,7 @@ scavenge_one(StgPtr p)
}
no_luck = gct->failed_to_evac;
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
return (no_luck);
}
@@ -1636,9 +1636,9 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
continue;
case MUT_ARR_PTRS_DIRTY:
{
- rtsBool saved_eager_promotion;
+ bool saved_eager_promotion;
saved_eager_promotion = gct->eager_promotion;
- gct->eager_promotion = rtsFalse;
+ gct->eager_promotion = false;
scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
@@ -1649,7 +1649,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
}
gct->eager_promotion = saved_eager_promotion;
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
recordMutableGen_GC((StgClosure *)p,gen_no);
continue;
}
@@ -1743,7 +1743,7 @@ scavenge_static(void)
* in case we visit this object again.
*/
if (gct->failed_to_evac) {
- gct->failed_to_evac = rtsFalse;
+ gct->failed_to_evac = false;
recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
}
break;
@@ -1779,7 +1779,7 @@ scavenge_static(void)
barf("scavenge_static: strange closure %d", (int)(info->type));
}
- ASSERT(gct->failed_to_evac == rtsFalse);
+ ASSERT(gct->failed_to_evac == false);
}
}
@@ -2009,20 +2009,20 @@ scavenge_large (gen_workspace *ws)
is other work we can usefully be doing.
------------------------------------------------------------------------- */
-static rtsBool
+static bool
scavenge_find_work (void)
{
int g;
gen_workspace *ws;
- rtsBool did_something, did_anything;
+ bool did_something, did_anything;
bdescr *bd;
gct->scav_find_work++;
- did_anything = rtsFalse;
+ did_anything = false;
loop:
- did_something = rtsFalse;
+ did_something = false;
for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
ws = &gct->gens[g];
@@ -2033,26 +2033,26 @@ loop:
if (ws->todo_bd->u.scan < ws->todo_free)
{
scavenge_block(ws->todo_bd);
- did_something = rtsTrue;
+ did_something = true;
break;
}
// If we have any large objects to scavenge, do them now.
if (ws->todo_large_objects) {
scavenge_large(ws);
- did_something = rtsTrue;
+ did_something = true;
break;
}
if ((bd = grab_local_todo_block(ws)) != NULL) {
scavenge_block(bd);
- did_something = rtsTrue;
+ did_something = true;
break;
}
}
if (did_something) {
- did_anything = rtsTrue;
+ did_anything = true;
goto loop;
}
@@ -2062,13 +2062,13 @@ loop:
for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
if ((bd = steal_todo_block(g)) != NULL) {
scavenge_block(bd);
- did_something = rtsTrue;
+ did_something = true;
break;
}
}
if (did_something) {
- did_anything = rtsTrue;
+ did_anything = true;
goto loop;
}
}
@@ -2086,10 +2086,10 @@ loop:
void
scavenge_loop(void)
{
- rtsBool work_to_do;
+ bool work_to_do;
loop:
- work_to_do = rtsFalse;
+ work_to_do = false;
// scavenge static objects
if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
@@ -2100,7 +2100,7 @@ loop:
// scavenge objects in compacted generation
if (mark_stack_bd != NULL && !mark_stack_empty()) {
scavenge_mark_stack();
- work_to_do = rtsTrue;
+ work_to_do = true;
}
// Order is important here: we want to deal in full blocks as
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index da1acbcf5b..ad2519588b 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -43,7 +43,7 @@
StgIndStatic *dyn_caf_list = NULL;
StgIndStatic *debug_caf_list = NULL;
StgIndStatic *revertible_caf_list = NULL;
-rtsBool keepCAFs;
+bool keepCAFs;
W_ large_alloc_lim; /* GC if n_large_blocks in any nursery
* reaches this. */
@@ -286,7 +286,7 @@ exitStorage (void)
}
void
-freeStorage (rtsBool free_heap)
+freeStorage (bool free_heap)
{
stgFree(generations);
if (free_heap) freeAllMBlocks();
@@ -505,13 +505,13 @@ StgInd* newRetainedCAF (StgRegTable *reg, StgIndStatic *caf)
// If we are using loadObj/unloadObj in the linker, then we want to
//
-// - retain all CAFs in statically linked code (keepCAFs == rtsTrue),
+// - retain all CAFs in statically linked code (keepCAFs == true),
// because we might link a new object that uses any of these CAFs.
//
// - GC CAFs in dynamically-linked code, so that we can detect when
// a dynamically-linked object is unloadable.
//
-// So for this case, we set keepCAFs to rtsTrue, and link newCAF to newGCdCAF
+// So for this case, we set keepCAFs to true, and link newCAF to newGCdCAF
// for dynamically-linked code.
//
StgInd* newGCdCAF (StgRegTable *reg, StgIndStatic *caf)
@@ -741,7 +741,7 @@ resizeNurseries (W_ blocks)
resizeNurseriesEach(blocks / n_nurseries);
}
-rtsBool
+bool
getNewNursery (Capability *cap)
{
StgWord i;
@@ -753,28 +753,28 @@ getNewNursery (Capability *cap)
if (i < n_nurseries) {
if (cas(&next_nursery[node], i, i+n_numa_nodes) == i) {
assignNurseryToCapability(cap, i);
- return rtsTrue;
+ return true;
}
} else if (n_numa_nodes > 1) {
// Try to find an unused nursery chunk on other nodes. We'll get
// remote memory, but the rationale is that avoiding GC is better
// than avoiding remote memory access.
- rtsBool lost = rtsFalse;
+ bool lost = false;
for (n = 0; n < n_numa_nodes; n++) {
if (n == node) continue;
i = next_nursery[n];
if (i < n_nurseries) {
if (cas(&next_nursery[n], i, i+n_numa_nodes) == i) {
assignNurseryToCapability(cap, i);
- return rtsTrue;
+ return true;
} else {
- lost = rtsTrue; /* lost a race */
+ lost = true; /* lost a race */
}
}
}
- if (!lost) return rtsFalse;
+ if (!lost) return false;
} else {
- return rtsFalse;
+ return false;
}
}
}
@@ -1244,7 +1244,7 @@ W_ gcThreadLiveBlocks (uint32_t i, uint32_t g)
* blocks since all the data will be copied.
*/
extern W_
-calcNeeded (rtsBool force_major, memcount *blocks_needed)
+calcNeeded (bool force_major, memcount *blocks_needed)
{
W_ needed = 0, blocks;
uint32_t g, N;
@@ -1442,7 +1442,7 @@ AdjustorWritable allocateExec (W_ bytes, AdjustorExecutable *exec_ret)
exec_block->u.back = bd;
}
bd->u.back = NULL;
- setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsTrue);
+ setExecutable(bd->start, bd->blocks * BLOCK_SIZE, true);
exec_block = bd;
}
*(exec_block->free) = n; // store the size of this chunk
@@ -1479,7 +1479,7 @@ void freeExec (void *addr)
if (bd != exec_block) {
debugTrace(DEBUG_gc, "free exec block %p", bd->start);
dbl_link_remove(bd, &exec_block);
- setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse);
+ setExecutable(bd->start, bd->blocks * BLOCK_SIZE, false);
freeGroup(bd);
} else {
bd->free = bd->start;
diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h
index 2bd1a35176..a4e928a3eb 100644
--- a/rts/sm/Storage.h
+++ b/rts/sm/Storage.h
@@ -19,7 +19,7 @@
void initStorage(void);
void exitStorage(void);
-void freeStorage(rtsBool free_heap);
+void freeStorage(bool free_heap);
// Adding more Capabilities later: this function allocates nurseries
// and initialises other storage-related things.
@@ -30,7 +30,7 @@ void storageAddCapabilities (uint32_t from, uint32_t to);
-------------------------------------------------------------------------- */
INLINE_HEADER
-rtsBool doYouWantToGC(Capability *cap)
+bool doYouWantToGC(Capability *cap)
{
return (cap->r.rCurrentNursery->link == NULL ||
g0->n_new_large_words >= large_alloc_lim);
@@ -73,7 +73,7 @@ void clearNursery (Capability *cap);
void resizeNurseries (StgWord blocks);
void resizeNurseriesFixed (void);
StgWord countNurseryBlocks (void);
-rtsBool getNewNursery (Capability *cap);
+bool getNewNursery (Capability *cap);
/* -----------------------------------------------------------------------------
Allocation accounting
@@ -102,7 +102,7 @@ StgWord calcTotalAllocated (void);
StgWord countLargeAllocated (void);
StgWord countOccupied (bdescr *bd);
-StgWord calcNeeded (rtsBool force_major, StgWord *blocks_needed);
+StgWord calcNeeded (bool force_major, StgWord *blocks_needed);
StgWord gcThreadLiveWords (uint32_t i, uint32_t g);
StgWord gcThreadLiveBlocks (uint32_t i, uint32_t g);
diff --git a/rts/win32/AsyncIO.c b/rts/win32/AsyncIO.c
index 887f642fdd..6a05f680db 100644
--- a/rts/win32/AsyncIO.c
+++ b/rts/win32/AsyncIO.c
@@ -181,7 +181,7 @@ startupAsyncIO()
}
void
-shutdownAsyncIO(rtsBool wait_threads)
+shutdownAsyncIO(bool wait_threads)
{
ShutdownIOManager(wait_threads);
if (completed_req_event != INVALID_HANDLE_VALUE) {
@@ -216,7 +216,7 @@ shutdownAsyncIO(rtsBool wait_threads)
* to complete if the 'completedTable' is empty.
*/
int
-awaitRequests(rtsBool wait)
+awaitRequests(bool wait)
{
#ifndef THREADED_RTS
// none of this is actually used in the threaded RTS
@@ -231,7 +231,7 @@ start:
// Nothing immediately available & we won't wait
if ((!wait && completed_hw == 0)
#if 0
- // If we just return when wait==rtsFalse, we'll go into a busy
+ // If we just return when wait==false, we'll go into a busy
// wait loop, so I disabled this condition --SDM 18/12/2003
(issued_reqs == 0 && completed_hw == 0)
#endif
diff --git a/rts/win32/AsyncIO.h b/rts/win32/AsyncIO.h
index 67d5110a47..bedbf5b67b 100644
--- a/rts/win32/AsyncIO.h
+++ b/rts/win32/AsyncIO.h
@@ -17,9 +17,9 @@ addIORequest(int fd,
extern unsigned int addDelayRequest(int usecs);
extern unsigned int addDoProcRequest(void* proc, void* param);
extern int startupAsyncIO(void);
-extern void shutdownAsyncIO(rtsBool wait_threads);
+extern void shutdownAsyncIO(bool wait_threads);
-extern int awaitRequests(rtsBool wait);
+extern int awaitRequests(bool wait);
extern void abandonRequestWait(void);
extern void resetAbandonRequestWait(void);
diff --git a/rts/win32/AwaitEvent.c b/rts/win32/AwaitEvent.c
index 51581025e1..b639121c87 100644
--- a/rts/win32/AwaitEvent.c
+++ b/rts/win32/AwaitEvent.c
@@ -25,7 +25,7 @@
static uint32_t workerWaitingForRequests = 0;
void
-awaitEvent(rtsBool wait)
+awaitEvent(bool wait)
{
do {
/* Try to de-queue completed IO requests
diff --git a/rts/win32/ConsoleHandler.c b/rts/win32/ConsoleHandler.c
index 4bcbe1201b..d30fd81e67 100644
--- a/rts/win32/ConsoleHandler.c
+++ b/rts/win32/ConsoleHandler.c
@@ -15,7 +15,7 @@ extern int stg_InstallConsoleEvent(int action, StgStablePtr *handler);
static BOOL WINAPI shutdown_handler(DWORD dwCtrlType);
static BOOL WINAPI generic_handler(DWORD dwCtrlType);
-static rtsBool deliver_event = rtsTrue;
+static bool deliver_event = true;
StgInt console_handler = STG_SIG_DFL;
#if !defined(THREADED_RTS)
@@ -83,7 +83,7 @@ static BOOL WINAPI shutdown_handler(DWORD dwCtrlType)
case CTRL_CLOSE_EVENT:
/* see generic_handler() comment re: this event */
- return FALSE;
+ return false;
case CTRL_C_EVENT:
case CTRL_BREAK_EVENT:
@@ -95,11 +95,11 @@ static BOOL WINAPI shutdown_handler(DWORD dwCtrlType)
} else {
interruptStgRts();
}
- return TRUE;
+ return true;
/* shutdown + logoff events are not handled here. */
default:
- return FALSE;
+ return false;
}
}
@@ -112,14 +112,14 @@ static BOOL WINAPI shutdown_handler(DWORD dwCtrlType)
*/
void initDefaultHandlers(void)
{
- if ( !SetConsoleCtrlHandler(shutdown_handler, TRUE) ) {
+ if ( !SetConsoleCtrlHandler(shutdown_handler, true) ) {
errorBelch("warning: failed to install default console handler");
}
}
void resetDefaultHandlers(void)
{
- if ( !SetConsoleCtrlHandler(shutdown_handler, FALSE) ) {
+ if ( !SetConsoleCtrlHandler(shutdown_handler, false) ) {
errorBelch("warning: failed to uninstall default console handler");
}
}
@@ -135,7 +135,7 @@ void resetDefaultHandlers(void)
void
blockUserSignals(void)
{
- deliver_event = rtsFalse;
+ deliver_event = false;
}
@@ -147,7 +147,7 @@ blockUserSignals(void)
void
unblockUserSignals(void)
{
- deliver_event = rtsTrue;
+ deliver_event = true;
}
@@ -227,9 +227,9 @@ static BOOL WINAPI generic_handler(DWORD dwCtrlType)
* the user of the app will be unable to kill/close it. Not
* good, so disable the delivery for now.
*/
- return FALSE;
+ return false;
default:
- if (!deliver_event) return TRUE;
+ if (!deliver_event) return true;
#if defined(THREADED_RTS)
sendIOManagerEvent((StgWord8) ((dwCtrlType<<1) | 1));
@@ -242,7 +242,7 @@ static BOOL WINAPI generic_handler(DWORD dwCtrlType)
// we need to wake up awaitEvent()
abandonRequestWait();
#endif
- return TRUE;
+ return true;
}
}
@@ -260,13 +260,13 @@ rts_InstallConsoleEvent(int action, StgStablePtr *handler)
switch (action) {
case STG_SIG_IGN:
console_handler = STG_SIG_IGN;
- if ( !SetConsoleCtrlHandler(NULL, TRUE) ) {
+ if ( !SetConsoleCtrlHandler(NULL, true) ) {
errorBelch("warning: unable to ignore console events");
}
break;
case STG_SIG_DFL:
console_handler = STG_SIG_IGN;
- if ( !SetConsoleCtrlHandler(NULL, FALSE) ) {
+ if ( !SetConsoleCtrlHandler(NULL, false) ) {
errorBelch("warning: unable to restore default console event "
"handling");
}
@@ -280,7 +280,7 @@ rts_InstallConsoleEvent(int action, StgStablePtr *handler)
#endif
if (previous_hdlr < 0 || previous_hdlr == STG_SIG_HAN) {
/* Only install generic_handler() once */
- if ( !SetConsoleCtrlHandler(generic_handler, TRUE) ) {
+ if ( !SetConsoleCtrlHandler(generic_handler, true) ) {
errorBelch("warning: unable to install console event handler");
}
}
diff --git a/rts/win32/ConsoleHandler.h b/rts/win32/ConsoleHandler.h
index 0d09a67b94..cd4a5447da 100644
--- a/rts/win32/ConsoleHandler.h
+++ b/rts/win32/ConsoleHandler.h
@@ -41,7 +41,7 @@ extern StgInt stg_pending_events;
* runnable. A console handler is used to handle termination events (Ctrl+C)
* and isn't considered a 'user handler'.
*/
-#define anyUserHandlers() (rtsFalse)
+#define anyUserHandlers() (false)
/*
* Function: startSignalHandlers()
diff --git a/rts/win32/IOManager.c b/rts/win32/IOManager.c
index e4f575c394..f25b006c4d 100644
--- a/rts/win32/IOManager.c
+++ b/rts/win32/IOManager.c
@@ -81,7 +81,7 @@ IOWorkerProc(PVOID param)
* be above some threshold.
*
*/
- rc = WaitForMultipleObjects( 2, hWaits, FALSE, INFINITE );
+ rc = WaitForMultipleObjects( 2, hWaits, false, INFINITE );
if (rc == WAIT_OBJECT_0) {
// we received the exit event
@@ -307,30 +307,30 @@ StartIOManager(void)
mmresult = timeGetDevCaps(&timecaps, sizeof(timecaps));
if (mmresult != MMSYSERR_NOERROR) {
- return FALSE;
+ return false;
}
sleepResolution = timecaps.wPeriodMin;
mmresult = timeBeginPeriod(sleepResolution);
if (mmresult != MMSYSERR_NOERROR) {
- return FALSE;
+ return false;
}
wq = NewWorkQueue();
- if ( !wq ) return FALSE;
+ if ( !wq ) return false;
ioMan = (IOManagerState*)malloc(sizeof(IOManagerState));
if (!ioMan) {
FreeWorkQueue(wq);
- return FALSE;
+ return false;
}
/* A manual-reset event */
- hExit = CreateEvent ( NULL, TRUE, FALSE, NULL );
+ hExit = CreateEvent ( NULL, true, false, NULL );
if ( !hExit ) {
FreeWorkQueue(wq);
free(ioMan);
- return FALSE;
+ return false;
}
ioMan->hExitEvent = hExit;
@@ -344,7 +344,7 @@ StartIOManager(void)
ioMan->active_work_items = NULL;
ioMan->sleepResolution = sleepResolution;
- return TRUE;
+ return true;
}
/*
@@ -466,7 +466,7 @@ AddDelayRequest ( unsigned int usecs,
{
WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
unsigned int reqID;
- if (!ioMan || !wItem) return FALSE;
+ if (!ioMan || !wItem) return false;
reqID = ioMan->requestID++;
/* Fill in the blanks */
@@ -491,7 +491,7 @@ AddProcRequest ( void* proc,
{
WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
unsigned int reqID;
- if (!ioMan || !wItem) return FALSE;
+ if (!ioMan || !wItem) return false;
reqID = ioMan->requestID++;
/* Fill in the blanks */
@@ -506,7 +506,7 @@ AddProcRequest ( void* proc,
return depositWorkItem(reqID, wItem);
}
-void ShutdownIOManager ( rtsBool wait_threads )
+void ShutdownIOManager ( bool wait_threads )
{
int num;
MMRESULT mmresult;
diff --git a/rts/win32/IOManager.h b/rts/win32/IOManager.h
index 30fad49c94..94821a8475 100644
--- a/rts/win32/IOManager.h
+++ b/rts/win32/IOManager.h
@@ -81,7 +81,7 @@ extern CompletionProc onComplete;
* Starting up and shutting down.
*/
extern BOOL StartIOManager ( void );
-extern void ShutdownIOManager ( rtsBool wait_threads );
+extern void ShutdownIOManager ( bool wait_threads );
/*
* Adding I/O and delay requests. With each request a
diff --git a/rts/win32/OSMem.c b/rts/win32/OSMem.c
index b43636c198..b6b97a7199 100644
--- a/rts/win32/OSMem.c
+++ b/rts/win32/OSMem.c
@@ -428,7 +428,7 @@ StgWord64 getPhysicalMemorySize (void)
return physMemSize;
}
-void setExecutable (void *p, W_ len, rtsBool exec)
+void setExecutable (void *p, W_ len, bool exec)
{
DWORD dwOldProtect = 0;
if (VirtualProtect (p, len,
@@ -499,7 +499,7 @@ void osReleaseHeapMemory (void)
#endif
-rtsBool osNumaAvailable(void)
+bool osNumaAvailable(void)
{
return osNumaNodes() > 1;
}
diff --git a/rts/win32/OSThreads.c b/rts/win32/OSThreads.c
index b36c3e53da..652ba13b4f 100644
--- a/rts/win32/OSThreads.c
+++ b/rts/win32/OSThreads.c
@@ -63,31 +63,31 @@ closeCondition( Condition* pCond )
return;
}
-rtsBool
+bool
broadcastCondition ( Condition* pCond )
{
PulseEvent(*pCond);
- return rtsTrue;
+ return true;
}
-rtsBool
+bool
signalCondition ( Condition* pCond )
{
if (SetEvent(*pCond) == 0) {
sysErrorBelch("SetEvent");
stg_exit(EXIT_FAILURE);
}
- return rtsTrue;
+ return true;
}
-rtsBool
+bool
waitCondition ( Condition* pCond, Mutex* pMut )
{
RELEASE_LOCK(pMut);
WaitForSingleObject(*pCond, INFINITE);
/* Hmm..use WaitForMultipleObjects() ? */
ACQUIRE_LOCK(pMut);
- return rtsTrue;
+ return true;
}
void
@@ -133,7 +133,7 @@ osThreadId()
return GetCurrentThreadId();
}
-rtsBool
+bool
osThreadIsAlive(OSThreadId id)
{
DWORD exit_code;
@@ -166,7 +166,7 @@ void
initMutex (Mutex* pMut)
{
HANDLE h = CreateMutex ( NULL, /* default sec. attributes */
- FALSE, /* not owned => initially signalled */
+ TRUE, /* not owned => initially signalled */
NULL
);
*pMut = h;
diff --git a/rts/win32/ThrIOManager.c b/rts/win32/ThrIOManager.c
index 0fb5912024..56042ddc77 100644
--- a/rts/win32/ThrIOManager.c
+++ b/rts/win32/ThrIOManager.c
@@ -43,8 +43,8 @@ getIOManagerEvent (void)
if (io_manager_event == INVALID_HANDLE_VALUE) {
hRes = CreateEvent ( NULL, // no security attrs
- TRUE, // manual reset
- FALSE, // initial state,
+ true, // manual reset
+ false, // initial state,
NULL ); // event name: NULL for private events
if (hRes == NULL) {
sysErrorBelch("getIOManagerEvent");
diff --git a/rts/win32/Ticker.c b/rts/win32/Ticker.c
index dd04d84118..7bc5ed55e3 100644
--- a/rts/win32/Ticker.c
+++ b/rts/win32/Ticker.c
@@ -71,7 +71,7 @@ stopTicker(void)
}
void
-exitTicker (rtsBool wait)
+exitTicker (bool wait)
{
if (timer_queue != NULL) {
DeleteTimerQueueEx(timer_queue, wait ? INVALID_HANDLE_VALUE : NULL);
diff --git a/rts/win32/WorkQueue.c b/rts/win32/WorkQueue.c
index a995f45f6d..562d04859c 100644
--- a/rts/win32/WorkQueue.c
+++ b/rts/win32/WorkQueue.c
@@ -99,7 +99,7 @@ GetWorkQueueHandle ( WorkQueue* pq )
* Function: GetWork
*
* Fetch a work item from the queue, blocking if none available.
- * Return value indicates of FALSE indicates error/fatal condition.
+ * Return value indicates of false indicates error/fatal condition.
*/
BOOL
GetWork ( WorkQueue* pq, void** ppw )
@@ -108,11 +108,11 @@ GetWork ( WorkQueue* pq, void** ppw )
if (!pq) {
queue_error("GetWork", "NULL WorkQueue object");
- return FALSE;
+ return false;
}
if (!ppw) {
queue_error("GetWork", "NULL WorkItem object");
- return FALSE;
+ return false;
}
/* Block waiting for work item to become available */
@@ -120,7 +120,7 @@ GetWork ( WorkQueue* pq, void** ppw )
!= WAIT_OBJECT_0 ) {
queue_error_rc("GetWork.WaitForSingleObject(workAvailable)",
( (WAIT_FAILED == rc) ? GetLastError() : rc));
- return FALSE;
+ return false;
}
return FetchWork(pq,ppw);
@@ -130,7 +130,7 @@ GetWork ( WorkQueue* pq, void** ppw )
* Function: FetchWork
*
* Fetch a work item from the queue, blocking if none available.
- * Return value indicates of FALSE indicates error/fatal condition.
+ * Return value indicates of false indicates error/fatal condition.
*/
BOOL
FetchWork ( WorkQueue* pq, void** ppw )
@@ -139,11 +139,11 @@ FetchWork ( WorkQueue* pq, void** ppw )
if (!pq) {
queue_error("FetchWork", "NULL WorkQueue object");
- return FALSE;
+ return false;
}
if (!ppw) {
queue_error("FetchWork", "NULL WorkItem object");
- return FALSE;
+ return false;
}
EnterCriticalSection(&pq->queueLock);
@@ -155,17 +155,17 @@ FetchWork ( WorkQueue* pq, void** ppw )
LeaveCriticalSection(&pq->queueLock);
if ( 0 == rc ) {
queue_error_rc("FetchWork.ReleaseSemaphore()", GetLastError());
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
/*
* Function: SubmitWork
*
* Add work item to the queue, blocking if no room available.
- * Return value indicates of FALSE indicates error/fatal condition.
+ * Return value indicates of false indicates error/fatal condition.
*/
BOOL
SubmitWork ( WorkQueue* pq, void* pw )
@@ -174,11 +174,11 @@ SubmitWork ( WorkQueue* pq, void* pw )
if (!pq) {
queue_error("SubmitWork", "NULL WorkQueue object");
- return FALSE;
+ return false;
}
if (!pw) {
queue_error("SubmitWork", "NULL WorkItem object");
- return FALSE;
+ return false;
}
/* Block waiting for work item to become available */
@@ -187,7 +187,7 @@ SubmitWork ( WorkQueue* pq, void* pw )
queue_error_rc("SubmitWork.WaitForSingleObject(workAvailable)",
( (WAIT_FAILED == rc) ? GetLastError() : rc));
- return FALSE;
+ return false;
}
EnterCriticalSection(&pq->queueLock);
@@ -197,10 +197,10 @@ SubmitWork ( WorkQueue* pq, void* pw )
LeaveCriticalSection(&pq->queueLock);
if ( 0 == rc ) {
queue_error_rc("SubmitWork.ReleaseSemaphore()", GetLastError());
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
/* Error handling */