summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/BlockAlloc.c2
-rw-r--r--rts/sm/BlockAlloc.h2
-rw-r--r--rts/sm/CNF.c16
-rw-r--r--rts/sm/CNF.h2
-rw-r--r--rts/sm/Compact.c2
-rw-r--r--rts/sm/Evac.c16
-rw-r--r--rts/sm/Evac_thr.c2
-rw-r--r--rts/sm/GC.c28
-rw-r--r--rts/sm/GC.h2
-rw-r--r--rts/sm/GCThread.h2
-rw-r--r--rts/sm/GCUtils.c6
-rw-r--r--rts/sm/GCUtils.h2
-rw-r--r--rts/sm/HeapAlloc.h2
-rw-r--r--rts/sm/MBlock.c10
-rw-r--r--rts/sm/MarkWeak.c4
-rw-r--r--rts/sm/OSMem.h2
-rw-r--r--rts/sm/Sanity.c6
-rw-r--r--rts/sm/Sanity.h2
-rw-r--r--rts/sm/Scav.c6
-rw-r--r--rts/sm/Scav.h2
-rw-r--r--rts/sm/Scav_thr.c2
-rw-r--r--rts/sm/ShouldCompact.h2
-rw-r--r--rts/sm/Storage.c16
23 files changed, 68 insertions, 68 deletions
diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c
index c729c1874f..2a02ecc9c5 100644
--- a/rts/sm/BlockAlloc.c
+++ b/rts/sm/BlockAlloc.c
@@ -855,7 +855,7 @@ void returnMemoryToOS(uint32_t n /* megablocks */)
Debugging
-------------------------------------------------------------------------- */
-#ifdef DEBUG
+#if defined(DEBUG)
static void
check_tail (bdescr *bd)
{
diff --git a/rts/sm/BlockAlloc.h b/rts/sm/BlockAlloc.h
index 9b561df5db..217d669a13 100644
--- a/rts/sm/BlockAlloc.h
+++ b/rts/sm/BlockAlloc.h
@@ -19,7 +19,7 @@ extern W_ countBlocks (bdescr *bd);
extern W_ countAllocdBlocks (bdescr *bd);
extern void returnMemoryToOS(uint32_t n);
-#ifdef DEBUG
+#if defined(DEBUG)
void checkFreeListSanity(void);
W_ countFreeList(void);
void markBlocks (bdescr *bd);
diff --git a/rts/sm/CNF.c b/rts/sm/CNF.c
index 4864e601f8..72ab6f24cf 100644
--- a/rts/sm/CNF.c
+++ b/rts/sm/CNF.c
@@ -26,10 +26,10 @@
#include <string.h>
-#ifdef HAVE_UNISTD_H
+#if defined(HAVE_UNISTD_H)
#include <unistd.h>
#endif
-#ifdef HAVE_LIMITS_H
+#if defined(HAVE_LIMITS_H)
#include <limits.h>
#endif
@@ -227,7 +227,7 @@ compactAllocateBlockInternal(Capability *cap,
break;
default:
-#ifdef DEBUG
+#if defined(DEBUG)
ASSERT(!"code should not be reached");
#else
RTS_UNREACHABLE;
@@ -319,7 +319,7 @@ countCompactBlocks(bdescr *outer)
return count;
}
-#ifdef DEBUG
+#if defined(DEBUG)
// Like countCompactBlocks, but adjusts the size so each mblock is assumed to
// only contain BLOCKS_PER_MBLOCK blocks. Used in memInventory().
StgWord
@@ -639,7 +639,7 @@ StgWord shouldCompact (StgCompactNFData *str, StgClosure *p)
Sanity-checking a compact
-------------------------------------------------------------------------- */
-#ifdef DEBUG
+#if defined(DEBUG)
STATIC_INLINE void
check_object_in_compact (StgCompactNFData *str, StgClosure *p)
{
@@ -788,7 +788,7 @@ any_needs_fixup(StgCompactNFDataBlock *block)
return false;
}
-#ifdef DEBUG
+#if defined(DEBUG)
static void
spew_failing_pointer(StgWord *fixup_table, uint32_t count, StgWord address)
{
@@ -857,7 +857,7 @@ find_pointer(StgWord *fixup_table, uint32_t count, StgClosure *q)
fail:
// We should never get here
-#ifdef DEBUG
+#if defined(DEBUG)
spew_failing_pointer(fixup_table, count, address);
#endif
return NULL;
@@ -1171,7 +1171,7 @@ compactFixupPointers(StgCompactNFData *str,
dbl_link_onto(bd, &g0->compact_objects);
RELEASE_SM_LOCK;
-#ifdef DEBUG
+#if defined(DEBUG)
if (root)
verify_consistency_loop(str);
#endif
diff --git a/rts/sm/CNF.h b/rts/sm/CNF.h
index c4655dc92d..6482130d2b 100644
--- a/rts/sm/CNF.h
+++ b/rts/sm/CNF.h
@@ -29,7 +29,7 @@ StgWord compactContains(StgCompactNFData *str,
StgPtr what);
StgWord countCompactBlocks(bdescr *outer);
-#ifdef DEBUG
+#if defined(DEBUG)
StgWord countAllocdCompactBlocks(bdescr *outer);
#endif
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index 1f7f08748a..0e2fea8990 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -28,7 +28,7 @@
#include "Stable.h"
// Turn off inlining when debugging - it obfuscates things
-#ifdef DEBUG
+#if defined(DEBUG)
# undef STATIC_INLINE
# define STATIC_INLINE static
#endif
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index e515c7b440..78adf62f6c 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -115,7 +115,7 @@ copy_tag(StgClosure **p, const StgInfoTable *info,
const StgInfoTable *new_info;
new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
if (new_info != info) {
-#ifdef PROFILING
+#if defined(PROFILING)
// We copied this object at the same time as another
// thread. We'll evacuate the object again and the copy
// we just made will be discarded at the next GC, but we
@@ -136,7 +136,7 @@ copy_tag(StgClosure **p, const StgInfoTable *info,
*p = TAG_CLOSURE(tag,(StgClosure*)to);
#endif
-#ifdef PROFILING
+#if defined(PROFILING)
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
// This is safe only if we are sure that no other thread evacuates
@@ -171,7 +171,7 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
// __builtin_prefetch(to + size + 2, 1);
// }
-#ifdef PROFILING
+#if defined(PROFILING)
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
SET_EVACUAEE_FOR_LDV(from, size);
@@ -195,7 +195,7 @@ copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve,
spin:
info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
if (info == (W_)&stg_WHITEHOLE_info) {
-#ifdef PROF_SPIN
+#if defined(PROF_SPIN)
whitehole_spin++;
#endif
goto spin;
@@ -221,7 +221,7 @@ spin:
src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
*p = (StgClosure *)to;
-#ifdef PROFILING
+#if defined(PROFILING)
// We store the size of the just evacuated object in the LDV word so that
// the profiler can guess the position of the next object later.
SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
@@ -344,7 +344,7 @@ evacuate_static_object (StgClosure **link_field, StgClosure *q)
// See Note [STATIC_LINK fields] for how the link field bits work
if ((((StgWord)(link)&STATIC_BITS) | prev_static_flag) != 3) {
StgWord new_list_head = (StgWord)q | static_flag;
-#ifndef THREADED_RTS
+#if !defined(THREADED_RTS)
*link_field = gct->static_objects;
gct->static_objects = (StgClosure *)new_list_head;
#else
@@ -707,7 +707,7 @@ loop:
case THUNK_1_1:
case THUNK_2_0:
case THUNK_0_2:
-#ifdef NO_PROMOTE_THUNKS
+#if defined(NO_PROMOTE_THUNKS)
#error bitrotted
#endif
copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
@@ -1067,7 +1067,7 @@ selector_loop:
// Select the right field from the constructor
val = selectee->payload[field];
-#ifdef PROFILING
+#if defined(PROFILING)
// For the purposes of LDV profiling, we have destroyed
// the original selector thunk, p.
if (era > 0) {
diff --git a/rts/sm/Evac_thr.c b/rts/sm/Evac_thr.c
index 4fff4ec8ac..94703a609c 100644
--- a/rts/sm/Evac_thr.c
+++ b/rts/sm/Evac_thr.c
@@ -1,4 +1,4 @@
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
#define PARALLEL_GC
#include "Evac.c"
#endif
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 0dafb8c3ea..515a7fe6d8 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -104,7 +104,7 @@ bool major_gc;
static W_ g0_pcnt_kept = 30; // percentage of g0 live at last minor GC
/* Mut-list stats */
-#ifdef DEBUG
+#if defined(DEBUG)
uint32_t mutlist_MUTVARS,
mutlist_MUTARRS,
mutlist_MVARS,
@@ -199,7 +199,7 @@ GarbageCollect (uint32_t collect_gen,
saved_gct = gct;
#endif
-#ifdef PROFILING
+#if defined(PROFILING)
CostCentreStack *save_CCS[n_capabilities];
#endif
@@ -224,7 +224,7 @@ GarbageCollect (uint32_t collect_gen,
// lock the StablePtr table
stableLock();
-#ifdef DEBUG
+#if defined(DEBUG)
mutlist_MUTVARS = 0;
mutlist_MUTARRS = 0;
mutlist_MVARS = 0;
@@ -238,7 +238,7 @@ GarbageCollect (uint32_t collect_gen,
#endif
// attribute any costs to CCS_GC
-#ifdef PROFILING
+#if defined(PROFILING)
for (n = 0; n < n_capabilities; n++) {
save_CCS[n] = capabilities[n]->r.rCCCS;
capabilities[n]->r.rCCCS = CCS_GC;
@@ -291,7 +291,7 @@ GarbageCollect (uint32_t collect_gen,
debugTrace(DEBUG_gc, "GC (gen %d, using %d thread(s))",
N, n_gc_threads);
-#ifdef DEBUG
+#if defined(DEBUG)
// check for memory leaks if DEBUG is on
memInventory(DEBUG_gc);
#endif
@@ -423,7 +423,7 @@ GarbageCollect (uint32_t collect_gen,
// Now see which stable names are still alive.
gcStableTables();
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
pruneSparkQueue(capabilities[n]);
@@ -437,7 +437,7 @@ GarbageCollect (uint32_t collect_gen,
}
#endif
-#ifdef PROFILING
+#if defined(PROFILING)
// We call processHeapClosureForDead() on every closure destroyed during
// the current garbage collection, so we invoke LdvCensusForDead().
if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
@@ -699,7 +699,7 @@ GarbageCollect (uint32_t collect_gen,
checkUnload (gct->scavenged_static_objects);
}
-#ifdef PROFILING
+#if defined(PROFILING)
// resetStaticObjectForRetainerProfiling() must be called before
// zeroing below.
@@ -762,19 +762,19 @@ GarbageCollect (uint32_t collect_gen,
// extra GC trace info
IF_DEBUG(gc, statDescribeGens());
-#ifdef DEBUG
+#if defined(DEBUG)
// symbol-table based profiling
/* heapCensus(to_blocks); */ /* ToDo */
#endif
// restore enclosing cost centre
-#ifdef PROFILING
+#if defined(PROFILING)
for (n = 0; n < n_capabilities; n++) {
capabilities[n]->r.rCCCS = save_CCS[n];
}
#endif
-#ifdef DEBUG
+#if defined(DEBUG)
// check for memory leaks if DEBUG is on
memInventory(DEBUG_gc);
#endif
@@ -823,7 +823,7 @@ new_gc_thread (uint32_t n, gc_thread *t)
t->cap = capabilities[n];
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
t->id = 0;
initSpinLock(&t->gc_spin);
initSpinLock(&t->mut_spin);
@@ -1024,7 +1024,7 @@ loop:
// scavenge_loop() only exits when there's no work to do
-#ifdef DEBUG
+#if defined(DEBUG)
r = dec_running();
#else
dec_running();
@@ -1085,7 +1085,7 @@ gcWorkerThread (Capability *cap)
scavenge_until_all_done();
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
// Now that the whole heap is marked, we discard any sparks that
// were found to be unreachable. The main GC thread is currently
// marking heap reachable via weak pointers, so it is
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index a176fde3f6..c6b0c13a46 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -35,7 +35,7 @@ extern StgPtr mark_sp;
extern bool work_stealing;
-#ifdef DEBUG
+#if defined(DEBUG)
extern uint32_t mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS,
mutlist_TVAR,
mutlist_TVAR_WATCH_QUEUE,
diff --git a/rts/sm/GCThread.h b/rts/sm/GCThread.h
index 15f1f13f20..bb206db64c 100644
--- a/rts/sm/GCThread.h
+++ b/rts/sm/GCThread.h
@@ -119,7 +119,7 @@ typedef struct gen_workspace_ {
typedef struct gc_thread_ {
Capability *cap;
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
OSThreadId id; // The OS thread that this struct belongs to
SpinLock gc_spin;
SpinLock mut_spin;
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 3717faebed..0373c2b925 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -22,11 +22,11 @@
#include "GCUtils.h"
#include "Printer.h"
#include "Trace.h"
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
#include "WSDeque.h"
#endif
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
SpinLock gc_alloc_block_sync;
#endif
@@ -346,7 +346,7 @@ alloc_todo_block (gen_workspace *ws, uint32_t size)
* Debugging
* -------------------------------------------------------------------------- */
-#ifdef DEBUG
+#if defined(DEBUG)
void
printMutableList(bdescr *bd)
{
diff --git a/rts/sm/GCUtils.h b/rts/sm/GCUtils.h
index 657fb783d1..2e2d4b199d 100644
--- a/rts/sm/GCUtils.h
+++ b/rts/sm/GCUtils.h
@@ -51,7 +51,7 @@ isPartiallyFull(bdescr *bd)
}
-#ifdef DEBUG
+#if defined(DEBUG)
void printMutableList (bdescr *bd);
#endif
diff --git a/rts/sm/HeapAlloc.h b/rts/sm/HeapAlloc.h
index a74e79a86d..9a36d106bb 100644
--- a/rts/sm/HeapAlloc.h
+++ b/rts/sm/HeapAlloc.h
@@ -48,7 +48,7 @@
the 4GB block in question.
-------------------------------------------------------------------------- */
-#ifdef USE_LARGE_ADDRESS_SPACE
+#if defined(USE_LARGE_ADDRESS_SPACE)
struct mblock_address_range {
W_ begin, end;
diff --git a/rts/sm/MBlock.c b/rts/sm/MBlock.c
index 833dd8c7c2..344bd3f0e9 100644
--- a/rts/sm/MBlock.c
+++ b/rts/sm/MBlock.c
@@ -61,7 +61,7 @@ W_ mpc_misses = 0;
with recently decommitted blocks
*/
-#ifdef USE_LARGE_ADDRESS_SPACE
+#if defined(USE_LARGE_ADDRESS_SPACE)
// Large address space means we use two-step allocation: reserve
// something large upfront, and then commit as needed
@@ -591,7 +591,7 @@ void *
getMBlocksOnNode(uint32_t node, uint32_t n)
{
void *addr = getMBlocks(n);
-#ifdef DEBUG
+#if defined(DEBUG)
if (RtsFlags.DebugFlags.numa) return addr; // faking NUMA
#endif
osBindMBlocksToNode(addr, n * MBLOCK_SIZE, numa_map[node]);
@@ -619,7 +619,7 @@ freeAllMBlocks(void)
{
debugTrace(DEBUG_gc, "freeing all megablocks");
-#ifdef USE_LARGE_ADDRESS_SPACE
+#if defined(USE_LARGE_ADDRESS_SPACE)
{
struct free_list *iter, *next;
@@ -654,10 +654,10 @@ initMBlocks(void)
{
osMemInit();
-#ifdef USE_LARGE_ADDRESS_SPACE
+#if defined(USE_LARGE_ADDRESS_SPACE)
{
W_ size;
-#ifdef aarch64_HOST_ARCH
+#if defined(aarch64_HOST_ARCH)
size = (W_)1 << 38; // 1/4 TByte
#else
size = (W_)1 << 40; // 1 TByte
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index e7dfd6e57c..c7a87a2d76 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -359,7 +359,7 @@ static void tidyThreadList (generation *gen)
}
}
-#ifdef DEBUG
+#if defined(DEBUG)
static void checkWeakPtrSanity(StgWeak *hd, StgWeak *tl)
{
StgWeak *w, *prev;
@@ -411,7 +411,7 @@ markWeakPtrList ( void )
for (w = gen->weak_ptr_list; w != NULL; w = w->link) {
// w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
-#ifdef DEBUG
+#if defined(DEBUG)
{ // careful to do this assertion only reading the info ptr
// once, because during parallel GC it might change under our feet.
const StgInfoTable *info;
diff --git a/rts/sm/OSMem.h b/rts/sm/OSMem.h
index 4a19b348e5..3b0cee9630 100644
--- a/rts/sm/OSMem.h
+++ b/rts/sm/OSMem.h
@@ -38,7 +38,7 @@ roundUpToPage (size_t x)
}
-#ifdef USE_LARGE_ADDRESS_SPACE
+#if defined(USE_LARGE_ADDRESS_SPACE)
/*
If "large address space" is enabled, we allocate memory in two
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 2b91540ec5..6bfa1cb494 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -16,7 +16,7 @@
#include "PosixSource.h"
#include "Rts.h"
-#ifdef DEBUG /* whole file */
+#if defined(DEBUG) /* whole file */
#include "RtsUtils.h"
#include "sm/Storage.h"
@@ -808,7 +808,7 @@ findMemoryLeak (void)
markBlocks(capabilities[i]->pinned_object_block);
}
-#ifdef PROFILING
+#if defined(PROFILING)
// TODO:
// if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
// markRetainerBlocks();
@@ -913,7 +913,7 @@ memInventory (bool show)
}
retainer_blocks = 0;
-#ifdef PROFILING
+#if defined(PROFILING)
if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
retainer_blocks = retainerStackBlocks();
}
diff --git a/rts/sm/Sanity.h b/rts/sm/Sanity.h
index f9f1b3b787..9227e6fd18 100644
--- a/rts/sm/Sanity.h
+++ b/rts/sm/Sanity.h
@@ -8,7 +8,7 @@
#pragma once
-#ifdef DEBUG
+#if defined(DEBUG)
#include "BeginPrivate.h"
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index bbe049cc2a..ab7b69f6a8 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -86,7 +86,7 @@ scavengeTSO (StgTSO *tso)
) {
evacuate(&tso->block_info.closure);
}
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
// in the THREADED_RTS, block_info.closure must always point to a
// valid closure, because we assume this in throwTo(). In the
// non-threaded RTS it might be a FD (for
@@ -117,7 +117,7 @@ static void
evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
{
StgClosure *p = (StgClosure*)key;
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
gc_thread *old_gct = gct;
#endif
@@ -1640,7 +1640,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
p = (StgPtr)*q;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
-#ifdef DEBUG
+#if defined(DEBUG)
switch (get_itbl((StgClosure *)p)->type) {
case MUT_VAR_CLEAN:
// can happen due to concurrent writeMutVars
diff --git a/rts/sm/Scav.h b/rts/sm/Scav.h
index 865a78a242..6d265a7f40 100644
--- a/rts/sm/Scav.h
+++ b/rts/sm/Scav.h
@@ -18,7 +18,7 @@
void scavenge_loop (void);
void scavenge_capability_mut_lists (Capability *cap);
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
void scavenge_loop1 (void);
void scavenge_capability_mut_Lists1 (Capability *cap);
#endif
diff --git a/rts/sm/Scav_thr.c b/rts/sm/Scav_thr.c
index 372e779b96..af3dc7a64f 100644
--- a/rts/sm/Scav_thr.c
+++ b/rts/sm/Scav_thr.c
@@ -1,4 +1,4 @@
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
#define PARALLEL_GC
#include "Scav.c"
#endif
diff --git a/rts/sm/ShouldCompact.h b/rts/sm/ShouldCompact.h
index bce8ced288..d80076460f 100644
--- a/rts/sm/ShouldCompact.h
+++ b/rts/sm/ShouldCompact.h
@@ -18,6 +18,6 @@
#define SHOULDCOMPACT_NOTIN_CNF 2
#define SHOULDCOMPACT_PINNED 3
-#ifndef CMINUSMINUS
+#if !defined(CMINUSMINUS)
extern StgWord shouldCompact (StgCompactNFData *str, StgClosure *p);
#endif
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 43f67f4899..4aa4b12868 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -71,7 +71,7 @@ uint32_t n_nurseries;
*/
volatile StgWord next_nursery[MAX_NUMA_NODES];
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
/*
* Storage manager mutex: protects all the above state from
* simultaneous access by two STG threads.
@@ -113,7 +113,7 @@ initGeneration (generation *gen, int g)
gen->mark = 0;
gen->compact = 0;
gen->bitmap = NULL;
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
initSpinLock(&gen->sync);
#endif
gen->threads = END_TSO_QUEUE;
@@ -195,9 +195,9 @@ initStorage (void)
exec_block = NULL;
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
initSpinLock(&gc_alloc_block_sync);
-#ifdef PROF_SPIN
+#if defined(PROF_SPIN)
whitehole_spin = 0;
#endif
#endif
@@ -381,7 +381,7 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf)
orig_info = caf->header.info;
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
const StgInfoTable *cur_info;
if (orig_info == &stg_IND_STATIC_info ||
@@ -452,7 +452,7 @@ newCAF(StgRegTable *reg, StgIndStatic *caf)
regTableToCapability(reg), oldest_gen->no);
}
-#ifdef DEBUG
+#if defined(DEBUG)
// In the DEBUG rts, we keep track of live CAFs by chaining them
// onto a list debug_caf_list. This is so that we can tell if we
// ever enter a GC'd CAF, and emit a suitable barf().
@@ -642,7 +642,7 @@ resetNurseries (void)
}
assignNurseriesToCapabilities(0, n_capabilities);
-#ifdef DEBUG
+#if defined(DEBUG)
bdescr *bd;
for (n = 0; n < n_nurseries; n++) {
for (bd = nurseries[n].blocks; bd; bd = bd->link) {
@@ -1515,7 +1515,7 @@ void freeExec (void *addr)
#endif /* switch(HOST_OS) */
-#ifdef DEBUG
+#if defined(DEBUG)
// handy function for use in gdb, because Bdescr() is inlined.
extern bdescr *_bdescr (StgPtr p);