summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Valentine-House <matt@eightbitraptor.com>2022-12-13 15:11:57 +0000
committerPeter Zhu <peter@peterzhu.ca>2022-12-15 09:04:30 -0500
commit9c54466e299aa91af225bc2d92a3d7755730948f (patch)
tree13d941effd31465b31b99d3ea0263a1dbee2e7be
parent723cca6d82ecaeb41d7922571a88f15439048098 (diff)
downloadruby-9c54466e299aa91af225bc2d92a3d7755730948f.tar.gz
Fix Object Movement allocation in GC
When moving Objects between size pools we have to assign a new shape. This happened during updating references - we tried to create a new shape tree that mirrored the existing tree, but based on the root shape of the new size pool. This causes allocations to happen if the new tree doesn't already exist, potentially triggering a GC, during GC. This commit changes object movement to look for a pre-existing new tree during object movement, and if that tree does not exist, we don't move the object to the new pool. This allows us to remove the shape allocation from update references. Co-Authored-By: Peter Zhu <peter@peterzhu.ca>
-rw-r--r--gc.c41
-rw-r--r--shape.c33
-rw-r--r--shape.h2
-rw-r--r--test/ruby/test_gc_compact.rb3
4 files changed, 74 insertions, 5 deletions
diff --git a/gc.c b/gc.c
index a10a2193e4..174da8195e 100644
--- a/gc.c
+++ b/gc.c
@@ -574,6 +574,7 @@ struct RMoved {
VALUE flags;
VALUE dummy;
VALUE destination;
+ shape_id_t original_shape_id;
};
#define RMOVED(obj) ((struct RMoved *)(obj))
@@ -6081,10 +6082,19 @@ invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_
object = rb_gc_location(forwarding_object);
+ shape_id_t old_shape_id = 0;
+ if (RB_TYPE_P(object, T_OBJECT)) {
+ old_shape_id = RMOVED(forwarding_object)->original_shape_id;
+ }
+
gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
/* forwarding_object is now our actual object, and "object"
* is the free slot for the original page */
+ if (old_shape_id) {
+ ROBJECT_SET_SHAPE_ID(forwarding_object, old_shape_id);
+ }
+
struct heap_page *orig_page = GET_HEAP_PAGE(object);
orig_page->free_slots++;
heap_page_add_freeobj(objspace, orig_page, object);
@@ -8448,12 +8458,28 @@ gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_p
{
GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
- rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(gc_compact_destination_pool(objspace, size_pool, src));
+ rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
+ rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
+ rb_shape_t *new_shape = NULL;
+ rb_shape_t *orig_shape = NULL;
if (gc_compact_heap_cursors_met_p(dheap)) {
return dheap != heap;
}
+ if (RB_TYPE_P(src, T_OBJECT)) {
+ orig_shape = rb_shape_get_shape(src);
+ if (dheap != heap) {
+ rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
+ new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
+
+ if (!new_shape) {
+ dest_pool = size_pool;
+ dheap = heap;
+ }
+ }
+ }
+
while (!try_move(objspace, dheap, dheap->free_pages, src)) {
struct gc_sweep_context ctx = {
.page = dheap->sweeping_page,
@@ -8478,6 +8504,15 @@ gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_p
return false;
}
}
+
+ if (orig_shape) {
+ if (new_shape) {
+ VALUE dest = rb_gc_location(src);
+ rb_shape_set_shape(dest, new_shape);
+ }
+ RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
+ }
+
return true;
}
@@ -10059,10 +10094,6 @@ gc_ref_update_object(rb_objspace_t *objspace, VALUE v)
xfree(ptr);
}
ptr = ROBJECT(v)->as.ary;
- size_t size_pool_shape_id = size_pool_idx_for_size(embed_size);
- rb_shape_t * initial_shape = rb_shape_get_shape_by_id((shape_id_t)size_pool_shape_id + SIZE_POOL_COUNT);
- rb_shape_t * new_shape = rb_shape_rebuild_shape(initial_shape, rb_shape_get_shape(v));
- rb_shape_set_shape(v, new_shape);
}
#endif
diff --git a/shape.c b/shape.c
index 86572f9fde..6227958d2c 100644
--- a/shape.c
+++ b/shape.c
@@ -404,6 +404,39 @@ rb_shape_id_offset(void)
}
rb_shape_t *
+rb_shape_traverse_from_new_root(rb_shape_t *initial_shape, rb_shape_t *dest_shape)
+{
+ RUBY_ASSERT(initial_shape->type == SHAPE_T_OBJECT);
+ rb_shape_t *next_shape = initial_shape;
+
+ if (dest_shape->type != initial_shape->type) {
+ next_shape = rb_shape_traverse_from_new_root(initial_shape, rb_shape_get_parent(dest_shape));
+ if (!next_shape) {
+ return NULL;
+ }
+ }
+
+ switch ((enum shape_type)dest_shape->type) {
+ case SHAPE_IVAR:
+ if (!next_shape->edges) {
+ return NULL;
+ }
+ if (!rb_id_table_lookup(next_shape->edges, dest_shape->edge_name, (VALUE *)&next_shape)) {
+ return NULL;
+ }
+ break;
+ case SHAPE_ROOT:
+ case SHAPE_FROZEN:
+ case SHAPE_CAPACITY_CHANGE:
+ case SHAPE_INITIAL_CAPACITY:
+ case SHAPE_T_OBJECT:
+ break;
+ }
+
+ return next_shape;
+}
+
+rb_shape_t *
rb_shape_rebuild_shape(rb_shape_t * initial_shape, rb_shape_t * dest_shape)
{
rb_shape_t * midway_shape;
diff --git a/shape.h b/shape.h
index ddb870f5e7..9a7d31c369 100644
--- a/shape.h
+++ b/shape.h
@@ -178,6 +178,8 @@ rb_shape_t * rb_shape_alloc(ID edge_name, rb_shape_t * parent);
rb_shape_t * rb_shape_alloc_with_size_pool_index(ID edge_name, rb_shape_t * parent, uint8_t size_pool_index);
rb_shape_t * rb_shape_alloc_with_parent_id(ID edge_name, shape_id_t parent_id);
+rb_shape_t *rb_shape_traverse_from_new_root(rb_shape_t *initial_shape, rb_shape_t *orig_shape);
+
bool rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id);
VALUE rb_obj_debug_shape(VALUE self, VALUE obj);
diff --git a/test/ruby/test_gc_compact.rb b/test/ruby/test_gc_compact.rb
index bae29a3162..c5fac64ca4 100644
--- a/test/ruby/test_gc_compact.rb
+++ b/test/ruby/test_gc_compact.rb
@@ -270,6 +270,9 @@ class TestGCCompact < Test::Unit::TestCase
ary = OBJ_COUNT.times.map { Foo.new }
ary.each(&:add_ivars)
+ GC.start
+ Foo.new.add_ivars
+
stats = GC.verify_compaction_references(expand_heap: true, toward: :empty)
assert_operator(stats[:moved_up][:T_OBJECT], :>=, OBJ_COUNT)