summaryrefslogtreecommitdiff
path: root/yjit/src/codegen.rs
diff options
context:
space:
mode:
Diffstat (limited to 'yjit/src/codegen.rs')
-rw-r--r--yjit/src/codegen.rs230
1 files changed, 210 insertions, 20 deletions
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 7c2c0dbe87..6686a41a8c 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -1886,6 +1886,9 @@ fn jit_chain_guard(
// up to 5 different classes, and embedded or not for each
pub const GET_IVAR_MAX_DEPTH: i32 = 10;
+// up to 5 different classes, and embedded or not for each
+pub const SET_IVAR_MAX_DEPTH: i32 = 10;
+
// hashes and arrays
pub const OPT_AREF_MAX_CHAIN_DEPTH: i32 = 2;
@@ -2010,7 +2013,7 @@ fn gen_get_ivar(
}
let ivar_index = unsafe {
- let shape_id = comptime_receiver.shape_of();
+ let shape_id = comptime_receiver.shape_id_of();
let shape = rb_shape_get_shape_by_id(shape_id);
let mut ivar_index: u32 = 0;
if rb_shape_get_iv_index(shape, ivar_name, &mut ivar_index) {
@@ -2134,33 +2137,220 @@ fn gen_getinstancevariable(
)
}
+// Generate an IV write.
+// This function doesn't deal with writing the shape, or expanding an object
+// to use an IV buffer if necessary. That is the callers responsibility
+fn gen_write_iv(
+ ctx: &mut Context,
+ asm: &mut Assembler,
+ comptime_receiver: VALUE,
+ recv: Opnd,
+ ivar_index: usize,
+ extension_needed: bool)
+{
+ // Compile time self is embedded and the ivar index lands within the object
+ let embed_test_result = comptime_receiver.embedded_p() && !extension_needed;
+
+ if embed_test_result {
+ // Find the IV offset
+ let offs = ROBJECT_OFFSET_AS_ARY + (ivar_index * SIZEOF_VALUE) as i32;
+ let ivar_opnd = Opnd::mem(64, recv, offs);
+
+ // Write the IV
+ asm.comment("write IV");
+ asm.mov(ivar_opnd, ctx.stack_pop(0));
+ } else {
+ // Compile time value is *not* embedded.
+
+ // Get a pointer to the extended table
+ let tbl_opnd = asm.load(Opnd::mem(64, recv, ROBJECT_OFFSET_AS_HEAP_IVPTR));
+
+ // Write the ivar in to the extended table
+ let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index) as i32);
+
+ asm.comment("write IV");
+ asm.mov(ivar_opnd, ctx.stack_pop(0));
+ }
+}
+
fn gen_setinstancevariable(
jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
- _ocb: &mut OutlinedCb,
+ ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let id = jit_get_arg(jit, 0).as_usize();
- let ic = jit_get_arg(jit, 1).as_u64(); // type IVC
+ let starting_context = *ctx; // make a copy for use with jit_chain_guard
- // Save the PC and SP because the callee may allocate
- // Note that this modifies REG_SP, which is why we do it first
- jit_prepare_routine_call(jit, ctx, asm);
+ // Defer compilation so we can specialize on a runtime `self`
+ if !jit_at_current_insn(jit) {
+ defer_compilation(jit, ctx, asm, ocb);
+ return EndBlock;
+ }
- // Get the operands from the stack
- let val_opnd = ctx.stack_pop(1);
+ let ivar_name = jit_get_arg(jit, 0).as_u64();
+ let comptime_receiver = jit_peek_at_self(jit);
+ let comptime_val_klass = comptime_receiver.class_of();
- // Call rb_vm_setinstancevariable(iseq, obj, id, val, ic);
- asm.ccall(
- rb_vm_setinstancevariable as *const u8,
- vec![
- Opnd::const_ptr(jit.iseq as *const u8),
- Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF),
- id.into(),
- val_opnd,
- Opnd::const_ptr(ic as *const u8),
- ]
- );
+ // Check if the comptime class uses a custom allocator
+ let custom_allocator = unsafe { rb_get_alloc_func(comptime_val_klass) };
+ let uses_custom_allocator = match custom_allocator {
+ Some(alloc_fun) => {
+ let allocate_instance = rb_class_allocate_instance as *const u8;
+ alloc_fun as *const u8 != allocate_instance
+ }
+ None => false,
+ };
+
+ // Check if the comptime receiver is a T_OBJECT
+ let receiver_t_object = unsafe { RB_TYPE_P(comptime_receiver, RUBY_T_OBJECT) };
+
+ // If the receiver isn't a T_OBJECT, or uses a custom allocator,
+ // then just write out the IV write as a function call
+ if !receiver_t_object || uses_custom_allocator {
+ asm.comment("call rb_vm_setinstancevariable()");
+
+ let ic = jit_get_arg(jit, 1).as_u64(); // type IVC
+
+ // The function could raise exceptions.
+ jit_prepare_routine_call(jit, ctx, asm);
+
+ // Save the PC and SP because the callee may allocate
+ // Note that this modifies REG_SP, which is why we do it first
+ jit_prepare_routine_call(jit, ctx, asm);
+
+ // Get the operands from the stack
+ let val_opnd = ctx.stack_pop(1);
+
+ // Call rb_vm_setinstancevariable(iseq, obj, id, val, ic);
+ asm.ccall(
+ rb_vm_setinstancevariable as *const u8,
+ vec![
+ Opnd::const_ptr(jit.iseq as *const u8),
+ Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF),
+ ivar_name.into(),
+ val_opnd,
+ Opnd::const_ptr(ic as *const u8),
+ ]
+ );
+ } else {
+ // Get the iv index
+ let ivar_index = unsafe {
+ let shape_id = comptime_receiver.shape_id_of();
+ let shape = rb_shape_get_shape_by_id(shape_id);
+ let mut ivar_index: u32 = 0;
+ if rb_shape_get_iv_index(shape, ivar_name, &mut ivar_index) {
+ Some(ivar_index as usize)
+ } else {
+ None
+ }
+ };
+
+ // Get the receiver
+ let mut recv = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF));
+
+ let recv_opnd = SelfOpnd;
+ let recv_type = ctx.get_opnd_type(recv_opnd);
+
+ // Generate a side exit
+ let side_exit = get_side_exit(jit, ocb, ctx);
+
+ // Upgrade type
+ if !recv_type.is_heap() { // Must be a heap type
+ ctx.upgrade_opnd_type(recv_opnd, Type::UnknownHeap);
+ guard_object_is_heap(asm, recv, side_exit);
+ }
+
+ let expected_shape = unsafe { rb_shape_get_shape_id(comptime_receiver) };
+ let shape_bit_size = unsafe { rb_shape_id_num_bits() }; // either 16 or 32 depending on RUBY_DEBUG
+ let shape_byte_size = shape_bit_size / 8;
+ let shape_opnd = Opnd::mem(shape_bit_size, recv, RUBY_OFFSET_RBASIC_FLAGS + (8 - shape_byte_size as i32));
+
+ asm.comment("guard shape");
+ asm.cmp(shape_opnd, Opnd::UImm(expected_shape as u64));
+ let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic).into();
+ jit_chain_guard(
+ JCC_JNE,
+ jit,
+ &starting_context,
+ asm,
+ ocb,
+ SET_IVAR_MAX_DEPTH,
+ megamorphic_side_exit,
+ );
+
+ match ivar_index {
+ // If we don't have an instance variable index, then we need to
+ // transition out of the current shape.
+ None => {
+ let mut shape = comptime_receiver.shape_of();
+
+ // If the object doesn't have the capacity to store the IV,
+ // then we'll need to allocate it.
+ let needs_extension = unsafe { (*shape).next_iv_index >= (*shape).capacity };
+
+ // We can write to the object, but we need to transition the shape
+ let ivar_index = unsafe { (*shape).next_iv_index } as usize;
+
+ if needs_extension {
+ let current_capacity = unsafe { (*shape).capacity };
+ let newsize = current_capacity * 2;
+
+ // We need to add an extended table to the object
+ // First, create an outgoing transition that increases the
+ // capacity
+ shape = unsafe {
+ rb_shape_transition_shape_capa(shape, newsize)
+ };
+
+ // Generate the C call so that runtime code will increase
+ // the capacity and set the buffer.
+ asm.ccall(rb_ensure_iv_list_size as *const u8,
+ vec![
+ recv,
+ Opnd::UImm(current_capacity.into()),
+ Opnd::UImm(newsize.into())
+ ]
+ );
+
+ // Load the receiver again after the function call
+ recv = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF))
+ }
+
+ let new_shape_id = unsafe {
+ rb_shape_id(rb_shape_get_next(shape, comptime_receiver, ivar_name))
+ };
+
+ gen_write_iv(ctx, asm, comptime_receiver, recv, ivar_index, needs_extension);
+
+ asm.comment("write shape");
+ let cleared_flags = asm.and(
+ Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS),
+ Opnd::UImm(unsafe { rb_shape_flag_mask() }.into()));
+
+ let new_flags = asm.or(cleared_flags, Opnd::UImm((new_shape_id as u64) << unsafe { rb_shape_flag_shift() }));
+
+ asm.store(Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS), new_flags);
+ },
+
+ Some(ivar_index) => {
+ // If the iv index already exists, then we don't need to
+ // transition to a new shape. The reason is because we find
+ // the iv index by searching up the shape tree. If we've
+ // made the transition already, then there's no reason to
+ // update the shape on the object. Just set the IV.
+ gen_write_iv(ctx, asm, comptime_receiver, recv, ivar_index, false);
+ },
+ }
+
+ asm.comment("write barrier");
+ asm.ccall(
+ rb_gc_writebarrier as *const u8,
+ vec![
+ recv,
+ ctx.stack_pop(1),
+ ]
+ );
+ }
KeepCompiling
}