summaryrefslogtreecommitdiff
path: root/internal/gc.h
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2022-03-29 13:57:09 -0400
committerPeter Zhu <peter@peterzhu.ca>2022-03-30 09:33:17 -0400
commitdde164e968e382d50b07ad4559468885cbff33ef (patch)
treeb0de24693ab8d2e15bbc008cfe5ebd068feecfd3 /internal/gc.h
parentad808506b300435df24f12ae03338e57a056cdc6 (diff)
downloadruby-dde164e968e382d50b07ad4559468885cbff33ef.tar.gz
Decouple incremental marking step from page sizes
Currently, the number of incremental marking steps is calculated based on the number of pooled pages available. This means that if we make Ruby heap pages larger, it would run fewer incremental marking steps (which would mean each incremental marking step takes longer). This commit changes incremental marking to run after every INCREMENTAL_MARK_STEP_ALLOCATIONS number of allocations. This means that the behaviour of incremental marking remains the same regardless of the Ruby heap page size. I've benchmarked against discourse benchmarks and did not get a significant change in response times beyond the margin of error. This is expected as this new incremental marking algorithm behaves very similarly to the previous one.
Diffstat (limited to 'internal/gc.h')
-rw-r--r--internal/gc.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/internal/gc.h b/internal/gc.h
index 2a7a34799c..3346089754 100644
--- a/internal/gc.h
+++ b/internal/gc.h
@@ -79,6 +79,7 @@ typedef struct ractor_newobj_size_pool_cache {
} rb_ractor_newobj_size_pool_cache_t;
typedef struct ractor_newobj_cache {
+ size_t incremental_mark_step_allocated_slots;
rb_ractor_newobj_size_pool_cache_t size_pool_caches[SIZE_POOL_COUNT];
} rb_ractor_newobj_cache_t;