summaryrefslogtreecommitdiff
path: root/rts/sm/GC.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm/GC.c')
-rw-r--r--rts/sm/GC.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 0dafb8c3ea..515a7fe6d8 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -104,7 +104,7 @@ bool major_gc;
static W_ g0_pcnt_kept = 30; // percentage of g0 live at last minor GC
/* Mut-list stats */
-#ifdef DEBUG
+#if defined(DEBUG)
uint32_t mutlist_MUTVARS,
mutlist_MUTARRS,
mutlist_MVARS,
@@ -199,7 +199,7 @@ GarbageCollect (uint32_t collect_gen,
saved_gct = gct;
#endif
-#ifdef PROFILING
+#if defined(PROFILING)
CostCentreStack *save_CCS[n_capabilities];
#endif
@@ -224,7 +224,7 @@ GarbageCollect (uint32_t collect_gen,
// lock the StablePtr table
stableLock();
-#ifdef DEBUG
+#if defined(DEBUG)
mutlist_MUTVARS = 0;
mutlist_MUTARRS = 0;
mutlist_MVARS = 0;
@@ -238,7 +238,7 @@ GarbageCollect (uint32_t collect_gen,
#endif
// attribute any costs to CCS_GC
-#ifdef PROFILING
+#if defined(PROFILING)
for (n = 0; n < n_capabilities; n++) {
save_CCS[n] = capabilities[n]->r.rCCCS;
capabilities[n]->r.rCCCS = CCS_GC;
@@ -291,7 +291,7 @@ GarbageCollect (uint32_t collect_gen,
debugTrace(DEBUG_gc, "GC (gen %d, using %d thread(s))",
N, n_gc_threads);
-#ifdef DEBUG
+#if defined(DEBUG)
// check for memory leaks if DEBUG is on
memInventory(DEBUG_gc);
#endif
@@ -423,7 +423,7 @@ GarbageCollect (uint32_t collect_gen,
// Now see which stable names are still alive.
gcStableTables();
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
pruneSparkQueue(capabilities[n]);
@@ -437,7 +437,7 @@ GarbageCollect (uint32_t collect_gen,
}
#endif
-#ifdef PROFILING
+#if defined(PROFILING)
// We call processHeapClosureForDead() on every closure destroyed during
// the current garbage collection, so we invoke LdvCensusForDead().
if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
@@ -699,7 +699,7 @@ GarbageCollect (uint32_t collect_gen,
checkUnload (gct->scavenged_static_objects);
}
-#ifdef PROFILING
+#if defined(PROFILING)
// resetStaticObjectForRetainerProfiling() must be called before
// zeroing below.
@@ -762,19 +762,19 @@ GarbageCollect (uint32_t collect_gen,
// extra GC trace info
IF_DEBUG(gc, statDescribeGens());
-#ifdef DEBUG
+#if defined(DEBUG)
// symbol-table based profiling
/* heapCensus(to_blocks); */ /* ToDo */
#endif
// restore enclosing cost centre
-#ifdef PROFILING
+#if defined(PROFILING)
for (n = 0; n < n_capabilities; n++) {
capabilities[n]->r.rCCCS = save_CCS[n];
}
#endif
-#ifdef DEBUG
+#if defined(DEBUG)
// check for memory leaks if DEBUG is on
memInventory(DEBUG_gc);
#endif
@@ -823,7 +823,7 @@ new_gc_thread (uint32_t n, gc_thread *t)
t->cap = capabilities[n];
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
t->id = 0;
initSpinLock(&t->gc_spin);
initSpinLock(&t->mut_spin);
@@ -1024,7 +1024,7 @@ loop:
// scavenge_loop() only exits when there's no work to do
-#ifdef DEBUG
+#if defined(DEBUG)
r = dec_running();
#else
dec_running();
@@ -1085,7 +1085,7 @@ gcWorkerThread (Capability *cap)
scavenge_until_all_done();
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
// Now that the whole heap is marked, we discard any sparks that
// were found to be unreachable. The main GC thread is currently
// marking heap reachable via weak pointers, so it is