From 490b57783d80f0c5f7882c66d9fb6aa02713c9a5 Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Tue, 24 Nov 2020 10:45:12 -0800 Subject: Disable read barrier on explicit compaction request We don't need a read barrier when the user calls `GC.compact` because we don't allow allocations during GC, and all references should be "live" --- gc.c | 31 ++++++++++++++++++++++++++++--- gc.rb | 3 +-- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/gc.c b/gc.c index 3dbeeece72..ed98792d11 100644 --- a/gc.c +++ b/gc.c @@ -682,7 +682,7 @@ typedef struct rb_objspace { unsigned int dont_gc : 1; unsigned int dont_incremental : 1; unsigned int during_gc : 1; - unsigned int during_compacting : 1; + unsigned int during_compacting : 2; unsigned int gc_stressful: 1; unsigned int has_hook: 1; unsigned int during_minor_gc : 1; @@ -4389,6 +4389,11 @@ static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free); static void lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body) { + /* If this is an explicit compaction (GC.compact), we don't need a read + * barrier, so just return early. */ + if (objspace->flags.during_compacting >> 1) { + return; + } #if defined(_WIN32) DWORD old_protect; @@ -4405,6 +4410,11 @@ lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body) static void unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body) { + /* If this is an explicit compaction (GC.compact), we don't need a read + * barrier, so just return early. */ + if (objspace->flags.during_compacting >> 1) { + return; + } #if defined(_WIN32) DWORD old_protect; @@ -7030,7 +7040,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) #endif objspace->flags.during_minor_gc = FALSE; if (ruby_enable_autocompact) { - objspace->flags.during_compacting = TRUE; + objspace->flags.during_compacting |= TRUE; } objspace->profile.major_gc_count++; objspace->rgengc.uncollectible_wb_unprotected_objects = 0; @@ -8057,7 +8067,9 @@ gc_start(rb_objspace_t *objspace, int reason) /* reason may be clobbered, later, so keep set immediate_sweep here */ objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP); - objspace->flags.during_compacting = !!((unsigned)reason & GPR_FLAG_COMPACT); + + /* Explicitly enable compaction (GC.compact) */ + objspace->flags.during_compacting = (!!((unsigned)reason & GPR_FLAG_COMPACT) << 1); if (!heap_allocated_pages) return FALSE; /* heap is not ready */ if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */ @@ -9247,6 +9259,19 @@ heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data) return 0; } +static VALUE +gc_compact(rb_execution_context_t *ec, VALUE self) +{ + /* Clear the heap. */ + gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse); + + /* At this point, all references are live and the mutator is not allowed + * to run, so we don't need a read barrier. */ + gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue); + + return gc_compact_stats(ec, self); +} + static VALUE gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty) { diff --git a/gc.rb b/gc.rb index d2b0d8e86e..4e0faaf00d 100644 --- a/gc.rb +++ b/gc.rb @@ -199,8 +199,7 @@ module GC end def self.compact - Primitive.gc_start_internal true, true, true, true - Primitive.gc_compact_stats + Primitive.gc_compact end # call-seq: -- cgit v1.2.1