summaryrefslogtreecommitdiff
path: root/rts/Schedule.c
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2018-03-25 14:04:02 -0400
committerBen Gamari <ben@smart-cactus.org>2018-03-25 14:33:27 -0400
commitf7bbc343a624710ecf8f8f5eda620c4f35c90fc8 (patch)
tree0c409ece9e40aa9e612064ac306d75068ee03b0c /rts/Schedule.c
parentcf809950efb744ca884e0e0833a80ffd50527ca1 (diff)
downloadhaskell-f7bbc343a624710ecf8f8f5eda620c4f35c90fc8.tar.gz
Run C finalizers incrementally during mutation
With a large heap it's possible to build up a lot of finalizers between GCs. We've observed GC spending up to 50% of its time running finalizers. But there's no reason we have to run finalizers during GC, and especially no reason we have to block *all* the mutator threads while *one* GC thread runs finalizers one by one. I thought about a bunch of alternative ways to handle this, which are documented along with runSomeFinalizers() in Weak.c. The approach I settled on is to have a capability run finalizers if it is idle. So running finalizers is like a low-priority background thread. This requires some minor scheduler changes, but not much. In the future we might be able to move more GC work into here (I have my eye on freeing large blocks, for example). Test Plan: * validate * tested on our system and saw reductions in GC pauses of 40-50%. Reviewers: bgamari, niteria, osa1, erikd Reviewed By: bgamari, osa1 Subscribers: rwbarton, thomie, carter Differential Revision: https://phabricator.haskell.org/D4521
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r--rts/Schedule.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 5160cb495b..2dc850c50d 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -679,7 +679,11 @@ scheduleYield (Capability **pcap, Task *task)
// otherwise yield (sleep), and keep yielding if necessary.
do {
- didGcLast = yieldCapability(&cap,task, !didGcLast);
+ if (doIdleGCWork(cap, false)) {
+ didGcLast = false;
+ } else {
+ didGcLast = yieldCapability(&cap,task, !didGcLast);
+ }
}
while (shouldYieldCapability(cap,task,didGcLast));
@@ -1798,6 +1802,9 @@ delete_threads_and_gc:
}
#endif
+ // Do any remaining idle GC work from the previous GC
+ doIdleGCWork(cap, true /* all of it */);
+
#if defined(THREADED_RTS)
// reset pending_sync *before* GC, so that when the GC threads
// emerge they don't immediately re-enter the GC.
@@ -1807,6 +1814,11 @@ delete_threads_and_gc:
GarbageCollect(collect_gen, heap_census, 0, cap, NULL);
#endif
+ // If we're shutting down, don't leave any idle GC work to do.
+ if (sched_state == SCHED_SHUTTING_DOWN) {
+ doIdleGCWork(cap, true /* all of it */);
+ }
+
traceSparkCounters(cap);
switch (recent_activity) {