summaryrefslogtreecommitdiff
path: root/transient_heap.c
diff options
context:
space:
mode:
authorAaron Patterson <tenderlove@ruby-lang.org>2020-11-02 14:40:29 -0800
committerAaron Patterson <tenderlove@ruby-lang.org>2020-11-02 14:42:48 -0800
commit67b2c21c327c96d80b8a0fe02a96d417e85293e8 (patch)
treec4ea1236016fc2b7e40582a69966d35c5f8c7289 /transient_heap.c
parent79b242260bc0530503dde85eda8e79b1c2aa9a6e (diff)
downloadruby-67b2c21c327c96d80b8a0fe02a96d417e85293e8.tar.gz
Add `GC.auto_compact= true/false` and `GC.auto_compact`
* `GC.auto_compact=`, `GC.auto_compact` can be used to control when compaction runs. Setting `auto_compact=` to true will cause compaction to occurr duing major collections. At the moment, compaction adds significant overhead to major collections, so please test first! [Feature #17176]
Diffstat (limited to 'transient_heap.c')
-rw-r--r--transient_heap.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/transient_heap.c b/transient_heap.c
index 444f4fdcc4..51886d8dc1 100644
--- a/transient_heap.c
+++ b/transient_heap.c
@@ -864,26 +864,17 @@ blocks_clear_marked_index(struct transient_heap_block* block)
static void
transient_heap_block_update_refs(struct transient_heap* theap, struct transient_heap_block* block)
{
- int i=0, n=0;
+ int marked_index = block->info.last_marked_index;
- while (i<block->info.index) {
- void *ptr = &block->buff[i];
- struct transient_alloc_header *header = ptr;
+ while (marked_index >= 0) {
+ struct transient_alloc_header *header = alloc_header(block, marked_index);
asan_unpoison_memory_region(header, sizeof *header, false);
- void *poisoned = __asan_region_is_poisoned((void *)header->obj, SIZEOF_VALUE);
- asan_unpoison_object(header->obj, false);
-
header->obj = rb_gc_location(header->obj);
- if (poisoned) {
- asan_poison_object(header->obj);
- }
-
- i += header->size;
+ marked_index = header->next_marked_index;
asan_poison_memory_region(header, sizeof *header);
- n++;
}
}