summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/GC.c22
-rw-r--r--rts/sm/GC.h19
2 files changed, 24 insertions, 17 deletions
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 0817b80758..6857c9e6c1 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -278,11 +278,7 @@ addMutListScavStats(const MutListScavStats *src,
-------------------------------------------------------------------------- */
void
-GarbageCollect (uint32_t collect_gen,
- const bool do_heap_census,
- const bool is_overflow_gc,
- const bool deadlock_detect,
- uint32_t gc_type USED_IF_THREADS,
+GarbageCollect (struct GcConfig config,
Capability *cap,
bool idle_cap[])
{
@@ -298,7 +294,7 @@ GarbageCollect (uint32_t collect_gen,
// The time we should report our heap census as occurring at, if necessary.
Time mut_time = 0;
- if (do_heap_census) {
+ if (config.do_heap_census) {
RTSStats stats;
getRTSStats(&stats);
mut_time = stats.mutator_cpu_ns;
@@ -307,6 +303,8 @@ GarbageCollect (uint32_t collect_gen,
// necessary if we stole a callee-saves register for gct:
#if defined(THREADED_RTS)
saved_gct = gct;
+#else
+ ASSERT(!config.parallel);
#endif
#if defined(PROFILING)
@@ -349,11 +347,11 @@ GarbageCollect (uint32_t collect_gen,
/* Figure out which generation to collect
*/
- N = collect_gen;
+ N = config.collect_gen;
major_gc = (N == RtsFlags.GcFlags.generations-1);
/* See Note [Deadlock detection under the nonmoving collector]. */
- deadlock_detect_gc = deadlock_detect;
+ deadlock_detect_gc = config.deadlock_detect;
#if defined(THREADED_RTS)
if (major_gc && RtsFlags.GcFlags.useNonmoving && RELAXED_LOAD(&concurrent_coll_running)) {
@@ -362,7 +360,7 @@ GarbageCollect (uint32_t collect_gen,
* TODO: Catch heap-size runaway.
*/
N--;
- collect_gen--;
+ config.collect_gen--;
major_gc = false;
}
#endif
@@ -397,7 +395,7 @@ GarbageCollect (uint32_t collect_gen,
* we set n_gc_threads, work_stealing, n_gc_idle_threads, gc_running_threads
* here
*/
- if (gc_type == SYNC_GC_PAR) {
+ if (config.parallel) {
n_gc_threads = getNumCapabilities();
n_gc_idle_threads = 0;
for (uint32_t i = 0; i < getNumCapabilities(); ++i) {
@@ -963,7 +961,7 @@ GarbageCollect (uint32_t collect_gen,
// resurrectThreads(), for the same reason as checkSanity above:
// resurrectThreads() will overwrite some closures and leave slop
// behind.
- if (do_heap_census) {
+ if (config.do_heap_census) {
debugTrace(DEBUG_sched, "performing heap census");
RELEASE_SM_LOCK;
heapCensus(mut_time);
@@ -1014,7 +1012,7 @@ GarbageCollect (uint32_t collect_gen,
#endif
// Reset the counter if the major GC was caused by a heap overflow
- consec_idle_gcs = is_overflow_gc ? 0 : consec_idle_gcs + 1;
+ consec_idle_gcs = config.overflow_gc ? 0 : consec_idle_gcs + 1;
// See Note [Scaling retained memory]
double scaled_factor =
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index 4132160c91..1f3034c1ec 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -17,11 +17,20 @@
#include "BeginPrivate.h"
-void GarbageCollect (uint32_t collect_gen,
- bool do_heap_census,
- bool is_overflow_gc,
- bool deadlock_detect,
- uint32_t gc_type,
+struct GcConfig {
+ // which generation are we requesting be collected?
+ uint32_t collect_gen;
+ // is a heap census requested?
+ bool do_heap_census;
+ // is this GC triggered by a heap overflow?
+ bool overflow_gc;
+ // is this GC triggered by a deadlock?
+ bool deadlock_detect;
+ // should we use parallel scavenging?
+ bool parallel;
+};
+
+void GarbageCollect (struct GcConfig config,
Capability *cap,
bool idle_cap[]);