summaryrefslogtreecommitdiff
path: root/rts/Schedule.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2012-08-21 12:26:16 +0100
committerSimon Marlow <marlowsd@gmail.com>2012-08-21 12:26:16 +0100
commit397606d6a4d1d180adcd6ef6ee7a9f864e4c37fd (patch)
tree6faae202183a23f62ca3f49a47394b11df6e5606 /rts/Schedule.c
parent4752385d4beb2d07a79643b87c7d27ea1f81aeb5 (diff)
downloadhaskell-397606d6a4d1d180adcd6ef6ee7a9f864e4c37fd.tar.gz
tidy up
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r--rts/Schedule.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 310e68e629..a8de843ea6 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -1451,6 +1451,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
{
Capability *cap = *pcap;
rtsBool heap_census;
+ nat collect_gen;
#ifdef THREADED_RTS
rtsBool idle_cap[n_capabilities];
rtsBool gc_type;
@@ -1465,10 +1466,16 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
return;
}
+ heap_census = scheduleNeedHeapProfile(rtsTrue);
+
+ // Figure out which generation we are collecting, so that we can
+ // decide whether this is a parallel GC or not.
+ collect_gen = calcNeeded(force_major || heap_census, NULL);
+
#ifdef THREADED_RTS
if (sched_state < SCHED_INTERRUPTING
&& RtsFlags.ParFlags.parGcEnabled
- && N >= RtsFlags.ParFlags.parGcGen
+ && collect_gen >= RtsFlags.ParFlags.parGcGen
&& ! oldest_gen->mark)
{
gc_type = SYNC_GC_PAR;
@@ -1540,7 +1547,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
if (RtsFlags.ParFlags.parGcNoSyncWithIdle == 0
|| (RtsFlags.ParFlags.parGcLoadBalancingEnabled &&
- N >= RtsFlags.ParFlags.parGcLoadBalancingGen)) {
+ collect_gen >= RtsFlags.ParFlags.parGcLoadBalancingGen)) {
for (i=0; i < n_capabilities; i++) {
if (capabilities[i].disabled) {
idle_cap[i] = tryGrabCapability(&capabilities[i], task);
@@ -1645,15 +1652,13 @@ delete_threads_and_gc:
}
#endif
- heap_census = scheduleNeedHeapProfile(rtsTrue);
-
#if defined(THREADED_RTS)
// reset pending_sync *before* GC, so that when the GC threads
// emerge they don't immediately re-enter the GC.
pending_sync = 0;
- GarbageCollect(force_major || heap_census, heap_census, gc_type, cap);
+ GarbageCollect(collect_gen, heap_census, gc_type, cap);
#else
- GarbageCollect(force_major || heap_census, heap_census, 0, cap);
+ GarbageCollect(collect_gen, heap_census, 0, cap);
#endif
traceSparkCounters(cap);