summaryrefslogtreecommitdiff
path: root/yjit/src
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2023-03-30 14:15:59 -0700
committerGitHub <noreply@github.com>2023-03-30 14:15:59 -0700
commit9a617c067f3cffbf0bfd4e2f4b0634adb9d50bc4 (patch)
treec5ad0114d65a3b3874eb93e9487cbe5a1e95be9b /yjit/src
parent339295dd7cd6f1dea25a9f2c94081e12a5d50ff0 (diff)
downloadruby-9a617c067f3cffbf0bfd4e2f4b0634adb9d50bc4.tar.gz
YJIT: Generate side exits as late as possible (#7612)
* YJIT: Generate side exits late as possible * YJIT: s/for_stack_size/with_stack_size/ * YJIT: s/get_counter/exit_counter/
Diffstat (limited to 'yjit/src')
-rw-r--r--yjit/src/codegen.rs443
-rw-r--r--yjit/src/core.rs10
2 files changed, 215 insertions, 238 deletions
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 2ea64871b3..85520e5ae1 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -64,8 +64,12 @@ pub struct JITState {
/// PC of the instruction being compiled
pc: *mut VALUE,
+ /// stack_size when it started to compile the current instruction.
+ stack_size_for_pc: u8,
+
/// Side exit to the instruction being compiled. See :side-exit:.
- side_exit_for_pc: Option<CodePtr>,
+ /// For the current PC, CodePtr is cached for each sp_offset key.
+ side_exit_for_pc: HashMap<i8, CodePtr>,
/// Execution context when compilation started
/// This allows us to peek at run-time values
@@ -105,12 +109,13 @@ impl JITState {
JITState {
iseq: blockid.iseq,
starting_insn_idx: blockid.idx,
- starting_ctx,
+ starting_ctx: starting_ctx.clone(),
output_ptr,
insn_idx: 0,
opcode: 0,
pc: ptr::null_mut::<VALUE>(),
- side_exit_for_pc: None,
+ stack_size_for_pc: starting_ctx.get_stack_size(),
+ side_exit_for_pc: HashMap::new(),
pending_outgoing: vec![],
ec,
record_boundary_patch_point: false,
@@ -246,6 +251,28 @@ pub enum JCCKinds {
JCC_JNA,
}
+/// Counter used by get_counted_exit to count the number of exits with --yjit-stats.
+#[derive(Clone)]
+struct ExitCounter {
+ ptr: *const u8,
+ name: String,
+}
+
+/// Macro to get an ExitCounter for helpers with side exits.
+macro_rules! exit_counter {
+ ($counter_name:ident) => {
+ if (get_option!(gen_stats)) {
+ let counter = ExitCounter {
+ ptr: ptr_to_counter!($counter_name) as *const u8,
+ name: stringify!($counter_name).to_string(),
+ };
+ Some(counter)
+ } else {
+ None
+ }
+ };
+}
+
macro_rules! gen_counter_incr {
($asm:tt, $counter_name:ident) => {
if (get_option!(gen_stats)) {
@@ -264,26 +291,8 @@ macro_rules! gen_counter_incr {
}
macro_rules! counted_exit {
- ($ocb:tt, $existing_side_exit:tt, $counter_name:ident) => {
- // The counter is only incremented when stats are enabled
- if (!get_option!(gen_stats)) {
- $existing_side_exit
- } else {
- let ocb = $ocb.unwrap();
- let code_ptr = ocb.get_write_ptr();
-
- let mut ocb_asm = Assembler::new();
-
- // Increment the counter
- gen_counter_incr!(ocb_asm, $counter_name);
-
- // Jump to the existing side exit
- ocb_asm.jmp($existing_side_exit);
- ocb_asm.compile(ocb);
-
- // Pointer to the side-exit code
- code_ptr.as_side_exit()
- }
+ ($jit:tt, $ctx:tt, $ocb:tt, $counter_name:ident) => {
+ counted_exit($jit, $ctx, $ocb, exit_counter!($counter_name))
};
}
@@ -528,17 +537,57 @@ fn gen_outlined_exit(exit_pc: *mut VALUE, ctx: &Context, ocb: &mut OutlinedCb) -
// moment, so there is one unique side exit for each context. Note that
// it's incorrect to jump to the side exit after any ctx stack push operations
// since they change the logic required for reconstructing interpreter state.
-fn get_side_exit(jit: &mut JITState, ocb: &mut OutlinedCb, ctx: &Context) -> Target {
- match jit.side_exit_for_pc {
+fn side_exit(jit: &mut JITState, ctx: &Context, ocb: &mut OutlinedCb) -> Target {
+ // We use the latest ctx.sp_offset to generate a side exit to tolerate sp_offset changes by gen_save_sp.
+ // We also need to use the latest ctx when we implement stack temp register allocation in the future.
+ // However, we want to simulate an old stack_size when we take a side exit. We do that by adjusting the
+ // sp_offset because gen_outlined_exit uses ctx.sp_offset to move SP.
+ let ctx = ctx.with_stack_size(jit.stack_size_for_pc);
+
+ match jit.side_exit_for_pc.get(&ctx.get_sp_offset()) {
None => {
- let exit_code = gen_outlined_exit(jit.pc, ctx, ocb);
- jit.side_exit_for_pc = Some(exit_code);
+ let exit_code = gen_outlined_exit(jit.pc, &ctx, ocb);
+ jit.side_exit_for_pc.insert(ctx.get_sp_offset(), exit_code);
exit_code.as_side_exit()
}
Some(code_ptr) => code_ptr.as_side_exit()
}
}
+/// Get a side exit. Increment a counter in it if --yjit-stats is enabled.
+fn counted_exit(jit: &mut JITState, ctx: &Context, ocb: &mut OutlinedCb, counter: Option<ExitCounter>) -> Target {
+ let side_exit = side_exit(jit, ctx, ocb);
+
+ // The counter is only incremented when stats are enabled
+ if !get_option!(gen_stats) {
+ return side_exit;
+ }
+ let counter = match counter {
+ Some(counter) => counter,
+ None => return side_exit,
+ };
+
+ let ocb = ocb.unwrap();
+ let code_ptr = ocb.get_write_ptr();
+
+ let mut asm = Assembler::new();
+
+ // Load the pointer into a register
+ asm.comment(&format!("increment counter {}", counter.name));
+ let ptr_reg = asm.load(Opnd::const_ptr(counter.ptr));
+ let counter_opnd = Opnd::mem(64, ptr_reg, 0);
+
+ // Increment and store the updated value
+ asm.incr_counter(counter_opnd, Opnd::UImm(1));
+
+ // Jump to the existing side exit
+ asm.jmp(side_exit);
+ asm.compile(ocb);
+
+ // Pointer to the side-exit code
+ code_ptr.as_side_exit()
+}
+
// Ensure that there is an exit for the start of the block being compiled.
// Block invalidation uses this exit.
pub fn jit_ensure_block_entry_exit(jit: &mut JITState, ocb: &mut OutlinedCb) {
@@ -551,7 +600,7 @@ pub fn jit_ensure_block_entry_exit(jit: &mut JITState, ocb: &mut OutlinedCb) {
// If we're compiling the first instruction in the block.
if jit.insn_idx == jit.starting_insn_idx {
// Generate the exit with the cache in jitstate.
- let entry_exit = get_side_exit(jit, ocb, block_starting_context).unwrap_code_ptr();
+ let entry_exit = side_exit(jit, block_starting_context, ocb).unwrap_code_ptr();
jit.block_entry_exit = Some(entry_exit);
} else {
let block_entry_pc = unsafe { rb_iseq_pc_at_idx(jit.iseq, jit.starting_insn_idx.into()) };
@@ -714,7 +763,13 @@ pub fn gen_entry_prologue(cb: &mut CodeBlock, ocb: &mut OutlinedCb, iseq: IseqPt
// Generate code to check for interrupts and take a side-exit.
// Warning: this function clobbers REG0
-fn gen_check_ints(asm: &mut Assembler, side_exit: Target) {
+fn gen_check_ints(
+ jit: &mut JITState,
+ ctx: &Context,
+ asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
+ counter: Option<ExitCounter>,
+) {
// Check for interrupts
// see RUBY_VM_CHECK_INTS(ec) macro
asm.comment("RUBY_VM_CHECK_INTS(ec)");
@@ -724,7 +779,7 @@ fn gen_check_ints(asm: &mut Assembler, side_exit: Target) {
let interrupt_flag = asm.load(Opnd::mem(32, EC, RUBY_OFFSET_EC_INTERRUPT_FLAG));
asm.test(interrupt_flag, interrupt_flag);
- asm.jnz(side_exit);
+ asm.jnz(counted_exit(jit, ctx, ocb, counter));
}
// Generate a stubbed unconditional jump to the next bytecode instruction.
@@ -806,9 +861,6 @@ pub fn gen_single_block(
// For each instruction to compile
// NOTE: could rewrite this loop with a std::iter::Iterator
while insn_idx < iseq_size {
- // Set the initial stack size to check if it's safe to exit on CantCompile
- let starting_stack_size = ctx.get_stack_size();
-
// Get the current pc and opcode
let pc = unsafe { rb_iseq_pc_at_idx(iseq, insn_idx.into()) };
// try_into() call below is unfortunate. Maybe pick i32 instead of usize for opcodes.
@@ -828,7 +880,8 @@ pub fn gen_single_block(
jit.insn_idx = insn_idx;
jit.opcode = opcode;
jit.pc = pc;
- jit.side_exit_for_pc = None;
+ jit.stack_size_for_pc = ctx.get_stack_size();
+ jit.side_exit_for_pc.clear();
// If previous instruction requested to record the boundary
if jit.record_boundary_patch_point {
@@ -871,10 +924,9 @@ pub fn gen_single_block(
println!("can't compile {}", insn_name(opcode));
}
- // Using the latest ctx instead of a starting ctx to allow spilling stack temps in the future.
- // When you return CantCompile, you should not modify stack_size.
- assert_eq!(ctx.get_stack_size(), starting_stack_size, "stack_size was modified despite CantCompile");
- gen_exit(jit.pc, &ctx, &mut asm);
+ // Rewind stack_size using ctx.with_stack_size to allow stack_size changes
+ // before you return CantCompile.
+ gen_exit(jit.pc, &ctx.with_stack_size(jit.stack_size_for_pc), &mut asm);
// If this is the first instruction in the block, then
// the entry address is the address for block_entry_exit
@@ -1167,16 +1219,12 @@ fn gen_opt_plus(
};
if two_fixnums {
- // Create a side-exit to fall back to the interpreter
- // Note: we generate the side-exit before popping operands from the stack
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if !assume_bop_not_redefined(jit, ocb, INTEGER_REDEFINED_OP_FLAG, BOP_PLUS) {
return CantCompile;
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
// Get the operands from the stack
let arg1 = ctx.stack_pop(1);
@@ -1185,7 +1233,7 @@ fn gen_opt_plus(
// Add arg0 + arg1 and test for overflow
let arg0_untag = asm.sub(arg0, Opnd::Imm(1));
let out_val = asm.add(arg0_untag, arg1);
- asm.jo(side_exit);
+ asm.jo(side_exit(jit, ctx, ocb));
// Push the output on the stack
let dst = ctx.stack_push(Type::Fixnum);
@@ -1360,11 +1408,13 @@ fn gen_newrange(
}
fn guard_object_is_heap(
+ jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
object: Opnd,
object_opnd: YARVOpnd,
- side_exit: Target,
+ counter: Option<ExitCounter>,
) {
let object_type = ctx.get_opnd_type(object_opnd);
if object_type.is_heap() {
@@ -1375,11 +1425,11 @@ fn guard_object_is_heap(
// Test that the object is not an immediate
asm.test(object, (RUBY_IMMEDIATE_MASK as u64).into());
- asm.jnz(side_exit);
+ asm.jnz(counted_exit(jit, ctx, ocb, counter.clone()));
// Test that the object is not false
asm.cmp(object, Qfalse.into());
- asm.je(side_exit);
+ asm.je(counted_exit(jit, ctx, ocb, counter));
if object_type.diff(Type::UnknownHeap) != TypeDiff::Incompatible {
ctx.upgrade_opnd_type(object_opnd, Type::UnknownHeap);
@@ -1387,11 +1437,13 @@ fn guard_object_is_heap(
}
fn guard_object_is_array(
+ jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
object: Opnd,
object_opnd: YARVOpnd,
- side_exit: Target,
+ counter: Option<ExitCounter>,
) {
let object_type = ctx.get_opnd_type(object_opnd);
if object_type.is_array() {
@@ -1402,7 +1454,7 @@ fn guard_object_is_array(
Opnd::Reg(_) => object,
_ => asm.load(object),
};
- guard_object_is_heap(ctx, asm, object_reg, object_opnd, side_exit);
+ guard_object_is_heap(jit, ctx, asm, ocb, object_reg, object_opnd, counter.clone());
asm.comment("guard object is array");
@@ -1412,7 +1464,7 @@ fn guard_object_is_array(
// Compare the result with T_ARRAY
asm.cmp(flags_opnd, (RUBY_T_ARRAY as u64).into());
- asm.jne(side_exit);
+ asm.jne(counted_exit(jit, ctx, ocb, counter));
if object_type.diff(Type::TArray) != TypeDiff::Incompatible {
ctx.upgrade_opnd_type(object_opnd, Type::TArray);
@@ -1420,11 +1472,13 @@ fn guard_object_is_array(
}
fn guard_object_is_string(
+ jit: &mut JITState,
ctx: &mut Context,
asm: &mut Assembler,
+ ocb: &mut OutlinedCb,
object: Opnd,
object_opnd: YARVOpnd,
- side_exit: Target,
+ counter: Option<ExitCounter>,
) {
let object_type = ctx.get_opnd_type(object_opnd);
if object_type.is_string() {
@@ -1435,7 +1489,7 @@ fn guard_object_is_string(
Opnd::Reg(_) => object,
_ => asm.load(object),
};
- guard_object_is_heap(ctx, asm, object_reg, object_opnd, side_exit);
+ guard_object_is_heap(jit, ctx, asm, ocb, object_reg, object_opnd, counter.clone());
asm.comment("guard object is string");
@@ -1445,7 +1499,7 @@ fn guard_object_is_string(
// Compare the result with T_STRING
asm.cmp(flags_reg, Opnd::UImm(RUBY_T_STRING as u64));
- asm.jne(side_exit);
+ asm.jne(counted_exit(jit, ctx, ocb, counter));
if object_type.diff(Type::TString) != TypeDiff::Incompatible {
ctx.upgrade_opnd_type(object_opnd, Type::TString);
@@ -1509,8 +1563,6 @@ fn gen_expandarray(
return CantCompile;
}
- let side_exit = get_side_exit(jit, ocb, ctx);
-
let array_opnd = ctx.stack_opnd(0);
// num is the number of requested values. If there aren't enough in the
@@ -1528,11 +1580,13 @@ fn gen_expandarray(
// Move the array from the stack and check that it's an array.
guard_object_is_array(
+ jit,
ctx,
asm,
+ ocb,
array_opnd,
array_opnd.into(),
- counted_exit!(ocb, side_exit, expandarray_not_array),
+ exit_counter!(expandarray_not_array),
);
let array_opnd = ctx.stack_pop(1); // pop after using the type info
@@ -1547,7 +1601,7 @@ fn gen_expandarray(
// Only handle the case where the number of values in the array is greater
// than or equal to the number of values requested.
asm.cmp(array_len_opnd, num.into());
- asm.jl(counted_exit!(ocb, side_exit, expandarray_rhs_too_small));
+ asm.jl(counted_exit!(jit, ctx, ocb, expandarray_rhs_too_small));
// Load the address of the embedded array into REG1.
// (struct RArray *)(obj)->as.ary
@@ -1719,11 +1773,8 @@ fn gen_setlocal_generic(
);
asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
- // Create a side-exit to fall back to the interpreter
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
- asm.jnz(side_exit);
+ asm.jnz(side_exit(jit, ctx, ocb));
}
if level == 0 {
@@ -1895,7 +1946,7 @@ fn jit_chain_guard(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
depth_limit: i32,
- side_exit: Target,
+ counter: Option<ExitCounter>,
) {
let target0_gen_fn = match jcc {
JCC_JNE | JCC_JNZ => BranchGenFn::JNZToTarget0,
@@ -1913,7 +1964,7 @@ fn jit_chain_guard(
gen_branch(jit, asm, ocb, bid, &deeper, None, None, target0_gen_fn);
} else {
- target0_gen_fn.call(asm, side_exit.unwrap_code_ptr(), None);
+ target0_gen_fn.call(asm, counted_exit(jit, ctx, ocb, counter).unwrap_code_ptr(), None);
}
}
@@ -1992,7 +2043,6 @@ fn gen_get_ivar(
ivar_name: ID,
recv: Opnd,
recv_opnd: YARVOpnd,
- side_exit: Target,
) -> CodegenStatus {
let comptime_val_klass = comptime_receiver.class_of();
@@ -2060,7 +2110,7 @@ fn gen_get_ivar(
};
// Guard heap object (recv_opnd must be used before stack_pop)
- guard_object_is_heap(ctx, asm, recv, recv_opnd, side_exit);
+ guard_object_is_heap(jit, ctx, asm, ocb, recv, recv_opnd, None);
// Compile time self is embedded and the ivar index lands within the object
let embed_test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) };
@@ -2071,7 +2121,6 @@ fn gen_get_ivar(
asm.comment("guard shape");
asm.cmp(shape_opnd, Opnd::UImm(expected_shape as u64));
- let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
jit_chain_guard(
JCC_JNE,
jit,
@@ -2079,7 +2128,7 @@ fn gen_get_ivar(
asm,
ocb,
max_chain_depth,
- megamorphic_side_exit,
+ exit_counter!(getivar_megamorphic),
);
// Pop receiver if it's on the temp stack
@@ -2142,9 +2191,6 @@ fn gen_getinstancevariable(
let comptime_val = jit.peek_at_self();
- // Generate a side exit
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Guard that the receiver has the same class as the one from compile time.
let self_asm_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF);
@@ -2158,7 +2204,6 @@ fn gen_getinstancevariable(
ivar_name,
self_asm_opnd,
SelfOpnd,
- side_exit,
)
}
@@ -2280,11 +2325,8 @@ fn gen_setinstancevariable(
let recv_opnd = SelfOpnd;
- // Generate a side exit
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Upgrade type
- guard_object_is_heap(ctx, asm, recv, recv_opnd, side_exit);
+ guard_object_is_heap(jit, ctx, asm, ocb, recv, recv_opnd, None);
let expected_shape = unsafe { rb_shape_get_shape_id(comptime_receiver) };
let shape_id_offset = unsafe { rb_shape_id_offset() };
@@ -2292,7 +2334,6 @@ fn gen_setinstancevariable(
asm.comment("guard shape");
asm.cmp(shape_opnd, Opnd::UImm(expected_shape as u64));
- let megamorphic_side_exit = counted_exit!(ocb, side_exit, setivar_megamorphic);
jit_chain_guard(
JCC_JNE,
jit,
@@ -2300,7 +2341,7 @@ fn gen_setinstancevariable(
asm,
ocb,
SET_IVAR_MAX_DEPTH,
- megamorphic_side_exit,
+ exit_counter!(setivar_megamorphic),
);
let write_val;
@@ -2500,17 +2541,14 @@ fn gen_definedivar(
rb_shape_get_iv_index(shape, ivar_name, &mut ivar_index)
};
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Guard heap object (recv_opnd must be used before stack_pop)
- guard_object_is_heap(ctx, asm, recv, SelfOpnd, side_exit);
+ guard_object_is_heap(jit, ctx, asm, ocb, recv, SelfOpnd, None);
let shape_id_offset = unsafe { rb_shape_id_offset() };
let shape_opnd = Opnd::mem(SHAPE_ID_NUM_BITS as u8, recv, shape_id_offset);
asm.comment("guard shape");
asm.cmp(shape_opnd, Opnd::UImm(shape_id as u64));
- let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
jit_chain_guard(
JCC_JNE,
jit,
@@ -2518,7 +2556,7 @@ fn gen_definedivar(
asm,
ocb,
GET_IVAR_MAX_DEPTH,
- megamorphic_side_exit,
+ None,
);
let result = if ivar_exists { pushval } else { Qnil };
@@ -2616,7 +2654,6 @@ fn guard_two_fixnums(
ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
- side_exit: Target
) {
// Get stack operands without popping them
let arg1 = ctx.stack_opnd(0);
@@ -2628,19 +2665,19 @@ fn guard_two_fixnums(
if arg0_type.is_heap() || arg1_type.is_heap() {
asm.comment("arg is heap object");
- asm.jmp(side_exit);
+ asm.jmp(side_exit(jit, ctx, ocb));
return;
}
if arg0_type != Type::Fixnum && arg0_type.is_specific() {
asm.comment("arg0 not fixnum");
- asm.jmp(side_exit);
+ asm.jmp(side_exit(jit, ctx, ocb));
return;
}
if arg1_type != Type::Fixnum && arg1_type.is_specific() {
asm.comment("arg1 not fixnum");
- asm.jmp(side_exit);
+ asm.jmp(side_exit(jit, ctx, ocb));
return;
}
@@ -2661,7 +2698,7 @@ fn guard_two_fixnums(
asm,
ocb,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
}
if arg1_type != Type::Fixnum {
@@ -2675,7 +2712,7 @@ fn guard_two_fixnums(
asm,
ocb,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
}
@@ -2705,16 +2742,12 @@ fn gen_fixnum_cmp(
};
if two_fixnums {
- // Create a side-exit to fall back to the interpreter
- // Note: we generate the side-exit before popping operands from the stack
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if !assume_bop_not_redefined(jit, ocb, INTEGER_REDEFINED_OP_FLAG, bop) {
return CantCompile;
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
// Get the operands from the stack
let arg1 = ctx.stack_pop(1);
@@ -2780,9 +2813,6 @@ fn gen_equality_specialized(
ocb: &mut OutlinedCb,
gen_eq: bool,
) -> Option<bool> {
- // Create a side-exit to fall back to the interpreter
- let side_exit = get_side_exit(jit, ocb, ctx);
-
let a_opnd = ctx.stack_opnd(1);
let b_opnd = ctx.stack_opnd(0);
@@ -2797,7 +2827,7 @@ fn gen_equality_specialized(
return Some(false);
}
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
asm.cmp(a_opnd, b_opnd);
let val = if gen_eq {
@@ -2837,7 +2867,7 @@ fn gen_equality_specialized(
a_opnd.into(),
comptime_a,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
let equal = asm.new_label("equal");
@@ -2862,7 +2892,7 @@ fn gen_equality_specialized(
b_opnd.into(),
comptime_b,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
}
@@ -2949,9 +2979,6 @@ fn gen_opt_aref(
let comptime_idx = jit.peek_at_stack(ctx, 0);
let comptime_recv = jit.peek_at_stack(ctx, 1);
- // Create a side-exit to fall back to the interpreter
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if comptime_recv.class_of() == unsafe { rb_cArray } && comptime_idx.fixnum_p() {
if !assume_bop_not_redefined(jit, ocb, ARRAY_REDEFINED_OP_FLAG, BOP_AREF) {
return CantCompile;
@@ -2973,13 +3000,13 @@ fn gen_opt_aref(
recv_opnd.into(),
comptime_recv,
OPT_AREF_MAX_CHAIN_DEPTH,
- side_exit,
+ None,
);
// Bail if idx is not a FIXNUM
let idx_reg = asm.load(idx_opnd);
asm.test(idx_reg, (RUBY_FIXNUM_FLAG as u64).into());
- asm.jz(counted_exit!(ocb, side_exit, oaref_arg_not_fixnum));
+ asm.jz(counted_exit!(jit, ctx, ocb, oaref_arg_not_fixnum));
// Call VALUE rb_ary_entry_internal(VALUE ary, long offset).
// It never raises or allocates, so we don't need to write to cfp->pc.
@@ -3016,7 +3043,7 @@ fn gen_opt_aref(
recv_opnd.into(),
comptime_recv,
OPT_AREF_MAX_CHAIN_DEPTH,
- side_exit,
+ None,
);
// Prepare to call rb_hash_aref(). It might call #hash on the key.
@@ -3064,8 +3091,6 @@ fn gen_opt_aset(
let _val = ctx.stack_opnd(0);
if comptime_recv.class_of() == unsafe { rb_cArray } && comptime_key.fixnum_p() {
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Guard receiver is an Array
jit_guard_known_klass(
jit,
@@ -3077,7 +3102,7 @@ fn gen_opt_aset(
recv.into(),
comptime_recv,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
// Guard key is a fixnum
@@ -3091,7 +3116,7 @@ fn gen_opt_aset(
key.into(),
comptime_key,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
// We might allocate or raise
@@ -3116,8 +3141,6 @@ fn gen_opt_aset(
jump_to_next_insn(jit, ctx, asm, ocb);
return EndBlock;
} else if comptime_recv.class_of() == unsafe { rb_cHash } {
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Guard receiver is a Hash
jit_guard_known_klass(
jit,
@@ -3129,7 +3152,7 @@ fn gen_opt_aset(
recv.into(),
comptime_recv,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
// We might allocate or raise
@@ -3169,16 +3192,12 @@ fn gen_opt_and(
};
if two_fixnums {
- // Create a side-exit to fall back to the interpreter
- // Note: we generate the side-exit before popping operands from the stack
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if !assume_bop_not_redefined(jit, ocb, INTEGER_REDEFINED_OP_FLAG, BOP_AND) {
return CantCompile;
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
// Get the operands and destination from the stack
let arg1 = ctx.stack_pop(1);
@@ -3214,16 +3233,12 @@ fn gen_opt_or(
};
if two_fixnums {
- // Create a side-exit to fall back to the interpreter
- // Note: we generate the side-exit before popping operands from the stack
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if !assume_bop_not_redefined(jit, ocb, INTEGER_REDEFINED_OP_FLAG, BOP_OR) {
return CantCompile;
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
// Get the operands and destination from the stack
let arg1 = ctx.stack_pop(1);
@@ -3259,16 +3274,12 @@ fn gen_opt_minus(
};
if two_fixnums {
- // Create a side-exit to fall back to the interpreter
- // Note: we generate the side-exit before popping operands from the stack
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if !assume_bop_not_redefined(jit, ocb, INTEGER_REDEFINED_OP_FLAG, BOP_MINUS) {
return CantCompile;
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
// Get the operands and destination from the stack
let arg1 = ctx.stack_pop(1);
@@ -3276,7 +3287,7 @@ fn gen_opt_minus(
// Subtract arg0 - arg1 and test for overflow
let val_untag = asm.sub(arg0, arg1);
- asm.jo(side_exit);
+ asm.jo(side_exit(jit, ctx, ocb));
let val = asm.add(val_untag, Opnd::Imm(1));
// Push the output on the stack
@@ -3326,16 +3337,12 @@ fn gen_opt_mod(
};
if two_fixnums {
- // Create a side-exit to fall back to the interpreter
- // Note: we generate the side-exit before popping operands from the stack
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if !assume_bop_not_redefined(jit, ocb, INTEGER_REDEFINED_OP_FLAG, BOP_MOD) {
return CantCompile;
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
// Get the operands and destination from the stack
let arg1 = ctx.stack_pop(1);
@@ -3343,7 +3350,7 @@ fn gen_opt_mod(
// Check for arg0 % 0
asm.cmp(arg1, Opnd::Imm(VALUE::fixnum_from_usize(0).as_i64()));
- asm.je(side_exit);
+ asm.je(side_exit(jit, ctx, ocb));
// Call rb_fix_mod_fix(VALUE recv, VALUE obj)
let ret = asm.ccall(rb_fix_mod_fix as *const u8, vec![arg0, arg1]);
@@ -3597,7 +3604,6 @@ fn gen_opt_case_dispatch(
// Check if the key is the same value
asm.cmp(key_opnd, comptime_key.into());
- let side_exit = get_side_exit(jit, ocb, &ctx);
jit_chain_guard(
JCC_JNE,
jit,
@@ -3605,7 +3611,7 @@ fn gen_opt_case_dispatch(
asm,
ocb,
CASE_WHEN_MAX_DEPTH,
- side_exit,
+ None,
);
ctx.stack_pop(1); // Pop key_opnd
@@ -3640,8 +3646,7 @@ fn gen_branchif(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- let side_exit = get_side_exit(jit, ocb, ctx);
- gen_check_ints(asm, side_exit);
+ gen_check_ints(jit, ctx, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3696,8 +3701,7 @@ fn gen_branchunless(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- let side_exit = get_side_exit(jit, ocb, ctx);
- gen_check_ints(asm, side_exit);
+ gen_check_ints(jit, ctx, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3753,8 +3757,7 @@ fn gen_branchnil(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- let side_exit = get_side_exit(jit, ocb, ctx);
- gen_check_ints(asm, side_exit);
+ gen_check_ints(jit, ctx, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3841,8 +3844,7 @@ fn gen_jump(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- let side_exit = get_side_exit(jit, ocb, ctx);
- gen_check_ints(asm, side_exit);
+ gen_check_ints(jit, ctx, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3874,7 +3876,7 @@ fn jit_guard_known_klass(
insn_opnd: YARVOpnd,
sample_instance: VALUE,
max_chain_depth: i32,
- side_exit: Target,
+ counter: Option<ExitCounter>,
) {
let val_type = ctx.get_opnd_type(insn_opnd);
@@ -3889,7 +3891,7 @@ fn jit_guard_known_klass(
asm.comment("guard object is nil");
asm.cmp(obj_opnd, Qnil.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
ctx.upgrade_opnd_type(insn_opnd, Type::Nil);
} else if unsafe { known_klass == rb_cTrueClass } {
@@ -3898,7 +3900,7 @@ fn jit_guard_known_klass(
asm.comment("guard object is true");
asm.cmp(obj_opnd, Qtrue.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
ctx.upgrade_opnd_type(insn_opnd, Type::True);
} else if unsafe { known_klass == rb_cFalseClass } {
@@ -3908,7 +3910,7 @@ fn jit_guard_known_klass(
asm.comment("guard object is false");
assert!(Qfalse.as_i32() == 0);
asm.test(obj_opnd, obj_opnd);
- jit_chain_guard(JCC_JNZ, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNZ, jit, ctx, asm, ocb, max_chain_depth, counter);
ctx.upgrade_opnd_type(insn_opnd, Type::False);
} else if unsafe { known_klass == rb_cInteger } && sample_instance.fixnum_p() {
@@ -3918,7 +3920,7 @@ fn jit_guard_known_klass(
asm.comment("guard object is fixnum");
asm.test(obj_opnd, Opnd::Imm(RUBY_FIXNUM_FLAG as i64));
- jit_chain_guard(JCC_JZ, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JZ, jit, ctx, asm, ocb, max_chain_depth, counter);
ctx.upgrade_opnd_type(insn_opnd, Type::Fixnum);
} else if unsafe { known_klass == rb_cSymbol } && sample_instance.static_sym_p() {
assert!(!val_type.is_heap());
@@ -3930,7 +3932,7 @@ fn jit_guard_known_klass(
asm.comment("guard object is static symbol");
assert!(RUBY_SPECIAL_SHIFT == 8);
asm.cmp(obj_opnd.with_num_bits(8).unwrap(), Opnd::UImm(RUBY_SYMBOL_FLAG as u64));
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
ctx.upgrade_opnd_type(insn_opnd, Type::ImmSymbol);
}
} else if unsafe { known_klass == rb_cFloat } && sample_instance.flonum_p() {
@@ -3942,7 +3944,7 @@ fn jit_guard_known_klass(
asm.comment("guard object is flonum");
let flag_bits = asm.and(obj_opnd, Opnd::UImm(RUBY_FLONUM_MASK as u64));
asm.cmp(flag_bits, Opnd::UImm(RUBY_FLONUM_FLAG as u64));
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
ctx.upgrade_opnd_type(insn_opnd, Type::Flonum);
}
} else if unsafe {
@@ -3961,7 +3963,7 @@ fn jit_guard_known_klass(
// this situation.
asm.comment("guard known object with singleton class");
asm.cmp(obj_opnd, sample_instance.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
} else if val_type == Type::CString && unsafe { known_klass == rb_cString } {
// guard elided because the context says we've already checked
unsafe {
@@ -3975,9 +3977,9 @@ fn jit_guard_known_klass(
if !val_type.is_heap() {
asm.comment("guard not immediate");
asm.test(obj_opnd, (RUBY_IMMEDIATE_MASK as u64).into());
- jit_chain_guard(JCC_JNZ, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNZ, jit, ctx, asm, ocb, max_chain_depth, counter.clone());
asm.cmp(obj_opnd, Qfalse.into());
- jit_chain_guard(JCC_JE, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JE, jit, ctx, asm, ocb, max_chain_depth, counter.clone());
ctx.upgrade_opnd_type(insn_opnd, Type::UnknownHeap);
}
@@ -3993,7 +3995,7 @@ fn jit_guard_known_klass(
// TODO: jit_mov_gc_ptr keeps a strong reference, which leaks the class.
asm.comment("guard known class");
asm.cmp(klass_opnd, known_klass.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, side_exit);
+ jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
if known_klass == unsafe { rb_cString } {
ctx.upgrade_opnd_type(insn_opnd, Type::CString);
@@ -4006,10 +4008,11 @@ fn jit_guard_known_klass(
// Generate ancestry guard for protected callee.
// Calls to protected callees only go through when self.is_a?(klass_that_defines_the_callee).
fn jit_protected_callee_ancestry_guard(
+ jit: &mut JITState,
+ ctx: &Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
cme: *const rb_callable_method_entry_t,
- side_exit: Target,
) {
// See vm_call_method().
let def_class = unsafe { (*cme).defined_class };
@@ -4024,7 +4027,7 @@ fn jit_protected_callee_ancestry_guard(
],
);
asm.test(val, val);
- asm.jz(counted_exit!(ocb, side_exit, send_se_protected_check_failed))
+ asm.jz(counted_exit!(jit, ctx, ocb, send_se_protected_check_failed))
}
// Codegen for rb_obj_not().
@@ -4141,10 +4144,9 @@ fn jit_rb_kernel_is_a(
}
let sample_is_a = unsafe { rb_obj_is_kind_of(sample_lhs, sample_rhs) == Qtrue };
- let side_exit = get_side_exit(jit, ocb, ctx);
asm.comment("Kernel#is_a?");
asm.cmp(ctx.stack_opnd(0), sample_rhs.into());
- asm.jne(counted_exit!(ocb, side_exit, send_is_a_class_mismatch));
+ asm.jne(counted_exit!(jit, ctx, ocb, send_is_a_class_mismatch));
ctx.stack_pop(2);
@@ -4202,10 +4204,9 @@ fn jit_rb_kernel_instance_of(
let sample_instance_of = sample_lhs_real_class == sample_rhs;
- let side_exit = get_side_exit(jit, ocb, ctx);
asm.comment("Kernel#instance_of?");
asm.cmp(ctx.stack_opnd(0), sample_rhs.into());
- asm.jne(counted_exit!(ocb, side_exit, send_instance_of_class_mismatch));
+ asm.jne(counted_exit!(jit, ctx, ocb, send_instance_of_class_mismatch));
ctx.stack_pop(2);
@@ -4306,10 +4307,8 @@ fn jit_rb_int_equal(
_argc: i32,
_known_recv_class: *const VALUE,
) -> bool {
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb, side_exit);
+ guard_two_fixnums(jit, ctx, asm, ocb);
// Compare the arguments
asm.comment("rb_int_equal");
@@ -4469,11 +4468,8 @@ fn jit_rb_str_concat(
return false;
}
- // Generate a side exit
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Guard that the concat argument is a string
- guard_object_is_string(ctx, asm, ctx.stack_opnd(0), StackOpnd(0), side_exit);
+ guard_object_is_string(jit, ctx, asm, ocb, ctx.stack_opnd(0), StackOpnd(0), None);
// Guard buffers from GC since rb_str_buf_append may allocate. During the VM lock on GC,
// other Ractors may trigger global invalidation, so we need ctx.clear_local_types().
@@ -4621,9 +4617,6 @@ fn jit_obj_respond_to(
// "negative CME" earlier.
jit.assume_method_lookup_stable(ocb, target_cme);
- // Generate a side exit
- let side_exit = get_side_exit(jit, ocb, ctx);
-
if argc == 2 {
// pop include_all argument (we only use its type info)
ctx.stack_pop(1);
@@ -4635,7 +4628,7 @@ fn jit_obj_respond_to(
// This is necessary because we have no guarantee that sym_opnd is a constant
asm.comment("guard known mid");
asm.cmp(sym_opnd, mid_sym.into());
- asm.jne(side_exit);
+ asm.jne(side_exit(jit, ctx, ocb));
jit_putobject(jit, ctx, asm, result);
@@ -4905,9 +4898,6 @@ fn gen_send_cfunc(
let cfunc_argc = unsafe { get_mct_argc(cfunc) };
let mut argc = argc;
- // Create a side-exit to fall back to the interpreter
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// If the function expects a Ruby array of arguments
if cfunc_argc < 0 && cfunc_argc != -1 {
gen_counter_incr!(asm, send_cfunc_ruby_array_varg);
@@ -4973,7 +4963,7 @@ fn gen_send_cfunc(
}
// Check for interrupts
- gen_check_ints(asm, side_exit);
+ gen_check_ints(jit, ctx, asm, ocb, None);
// Stack overflow check
// #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
@@ -4981,7 +4971,7 @@ fn gen_send_cfunc(
asm.comment("stack overflow check");
let stack_limit = asm.lea(ctx.sp_opnd((SIZEOF_VALUE * 4 + 2 * RUBY_SIZEOF_CONTROL_FRAME) as isize));
asm.cmp(CFP, stack_limit);
- asm.jbe(counted_exit!(ocb, side_exit, send_se_cf_overflow));
+ asm.jbe(counted_exit!(jit, ctx, ocb, send_se_cf_overflow));
// Number of args which will be passed through to the callee
// This is adjusted by the kwargs being combined into a hash.
@@ -5058,7 +5048,7 @@ fn gen_send_cfunc(
// and if not side exit.
argc = cfunc_argc;
passed_argc = argc;
- push_splat_args(required_args, ctx, asm, ocb, side_exit)
+ push_splat_args(required_args, jit, ctx, asm, ocb)
}
// This is a .send call and we need to adjust the stack
@@ -5235,14 +5225,14 @@ fn get_array_ptr(asm: &mut Assembler, array_reg: Opnd) -> Opnd {
/// Pushes arguments from an array to the stack. Differs from push splat because
/// the array can have items left over.
-fn move_rest_args_to_stack(array: Opnd, num_args: u32, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb, side_exit: Target) {
+fn move_rest_args_to_stack(array: Opnd, num_args: u32, jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb) {
asm.comment("move_rest_args_to_stack");
let array_len_opnd = get_array_len(asm, array);
asm.comment("Side exit if length is less than required");
asm.cmp(array_len_opnd, num_args.into());
- asm.jl(counted_exit!(ocb, side_exit, send_iseq_has_rest_and_splat_not_equal));
+ asm.jl(counted_exit!(jit, ctx, ocb, send_iseq_has_rest_and_splat_not_equal));
asm.comment("Push arguments from array");
@@ -5273,18 +5263,20 @@ fn move_rest_args_to_stack(array: Opnd, num_args: u32, ctx: &mut Context, asm: &
/// Pushes arguments from an array to the stack that are passed with a splat (i.e. *args)
/// It optimistically compiles to a static size that is the exact number of arguments
/// needed for the function.
-fn push_splat_args(required_args: u32, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb, side_exit: Target) {
+fn push_splat_args(required_args: u32, jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb) {
asm.comment("push_splat_args");
let array_opnd = ctx.stack_opnd(0);
let array_reg = asm.load(array_opnd);
guard_object_is_array(
+ jit,
ctx,
asm,
+ ocb,
array_reg,
array_opnd.into(),
- counted_exit!(ocb, side_exit, send_splat_not_array),
+ exit_counter!(send_splat_not_array),
);
asm.comment("Get array length for embedded or heap");
@@ -5313,7 +5305,7 @@ fn push_splat_args(required_args: u32, ctx: &mut Context, asm: &mut Assembler, o
asm.comment("Side exit if length doesn't not equal remaining args");
asm.cmp(array_len_opnd, required_args.into());
- asm.jne(counted_exit!(ocb, side_exit, send_splatarray_length_not_equal));
+ asm.jne(counted_exit!(jit, ctx, ocb, send_splatarray_length_not_equal));
asm.comment("Check last argument is not ruby2keyword hash");
@@ -5327,7 +5319,8 @@ fn push_splat_args(required_args: u32, ctx: &mut Context, asm: &mut Assembler, o
guard_object_is_not_ruby2_keyword_hash(
asm,
last_array_value,
- counted_exit!(ocb, side_exit, send_splatarray_last_ruby_2_keywords));
+ counted_exit!(jit, ctx, ocb, send_splatarray_last_ruby_2_keywords),
+ );
asm.comment("Push arguments from array");
let array_opnd = ctx.stack_pop(1);
@@ -5418,9 +5411,6 @@ fn gen_send_iseq(
) -> CodegenStatus {
let mut argc = argc;
- // Create a side-exit to fall back to the interpreter
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// When you have keyword arguments, there is an extra object that gets
// placed on the stack the represents a bitmap of the keywords that were not
// specified at the call site. We need to keep track of the fact that this
@@ -5771,7 +5761,7 @@ fn gen_send_iseq(
SIZEOF_VALUE_I32 * (num_locals + stack_max) + 2 * (RUBY_SIZEOF_CONTROL_FRAME as i32);
let stack_limit = asm.lea(ctx.sp_opnd(locals_offs as isize));
asm.cmp(CFP, stack_limit);
- asm.jbe(counted_exit!(ocb, side_exit, send_se_cf_overflow));
+ asm.jbe(counted_exit!(jit, ctx, ocb, send_se_cf_overflow));
// push_splat_args does stack manipulation so we can no longer side exit
if let Some(array_length) = splat_array_length {
@@ -5790,7 +5780,7 @@ fn gen_send_iseq(
// all the remaining arguments. In the generated code
// we test if this is true and if not side exit.
argc = argc - 1 + array_length as i32 + remaining_opt as i32;
- push_splat_args(array_length, ctx, asm, ocb, side_exit);
+ push_splat_args(array_length, jit, ctx, asm, ocb);
for _ in 0..remaining_opt {
// We need to push nil for the optional arguments
@@ -5846,7 +5836,7 @@ fn gen_send_iseq(
// from the array and move them to the stack.
let diff = (required_num - non_rest_arg_count) as u32;
// This moves the arguments onto the stack. But it doesn't modify the array.
- move_rest_args_to_stack(array, diff, ctx, asm, ocb, side_exit);
+ move_rest_args_to_stack(array, diff, jit, ctx, asm, ocb);
// We will now slice the array to give us a new array of the correct size
let ret = asm.ccall(rb_yjit_rb_ary_subseq_length as *const u8, vec![array, Opnd::UImm(diff as u64)]);
@@ -6036,14 +6026,14 @@ fn gen_send_iseq(
let arg0_opnd = ctx.stack_opnd(0);
// Only handle the case that you don't need to_ary conversion
- let not_array_exit = counted_exit!(ocb, side_exit, invokeblock_iseq_arg0_not_array);
- guard_object_is_array(ctx, asm, arg0_opnd, arg0_opnd.into(), not_array_exit);
+ let not_array_counter = exit_counter!(invokeblock_iseq_arg0_not_array);
+ guard_object_is_array(jit, ctx, asm, ocb, arg0_opnd, arg0_opnd.into(), not_array_counter);
// Only handle the same that the array length == ISEQ's lead_num (most common)
let arg0_len_opnd = get_array_len(asm, arg0_opnd);
let lead_num = unsafe { rb_get_iseq_body_param_lead_num(iseq) };
asm.cmp(arg0_len_opnd, lead_num.into());
- asm.jne(counted_exit!(ocb, side_exit, invokeblock_iseq_arg0_wrong_len));
+ asm.jne(counted_exit!(jit, ctx, ocb, invokeblock_iseq_arg0_wrong_len));
let arg0_reg = asm.load(arg0_opnd);
let array_opnd = get_array_ptr(asm, arg0_reg);
@@ -6315,9 +6305,6 @@ fn gen_send_general(
let comptime_recv = jit.peek_at_stack(ctx, recv_idx as isize);
let comptime_recv_klass = comptime_recv.class_of();
- // Guard that the receiver has the same class as the one from compile time
- let side_exit = get_side_exit(jit, ocb, ctx);
-
// Points to the receiver operand on the stack
let recv = ctx.stack_opnd(recv_idx);
let recv_opnd: YARVOpnd = recv.into();
@@ -6344,7 +6331,6 @@ fn gen_send_general(
gen_counter_incr!(asm, num_send_polymorphic);
}
- let megamorphic_exit = counted_exit!(ocb, side_exit, send_klass_megamorphic);
jit_guard_known_klass(
jit,
ctx,
@@ -6355,7 +6341,7 @@ fn gen_send_general(
recv_opnd,
comptime_recv,
SEND_MAX_DEPTH,
- megamorphic_exit,
+ exit_counter!(send_klass_megamorphic),
);
// Do method lookup
@@ -6382,7 +6368,7 @@ fn gen_send_general(
if flags & VM_CALL_FCALL == 0 {
// otherwise we need an ancestry check to ensure the receiver is valid to be called
// as protected
- jit_protected_callee_ancestry_guard(asm, ocb, cme, side_exit);
+ jit_protected_callee_ancestry_guard(jit, ctx, asm, ocb, cme);
}
}
_ => {
@@ -6467,7 +6453,6 @@ fn gen_send_general(
ivar_name,
recv,
recv_opnd,
- side_exit,
);
}
VM_METHOD_TYPE_ATTRSET => {
@@ -6562,17 +6547,17 @@ fn gen_send_general(
jit.assume_method_lookup_stable(ocb, cme);
- let (known_class, type_mismatch_exit) = {
+ let (known_class, type_mismatch_counter) = {
if compile_time_name.string_p() {
(
unsafe { rb_cString },
- counted_exit!(ocb, side_exit, send_send_chain_not_string),
+ exit_counter!(send_send_chain_not_string),
)
} else {
(
unsafe { rb_cSymbol },
- counted_exit!(ocb, side_exit, send_send_chain_not_sym),
+ exit_counter!(send_send_chain_not_sym),
)
}
};
@@ -6588,7 +6573,7 @@ fn gen_send_general(
name_opnd.into(),
compile_time_name,
2, // We have string or symbol, so max depth is 2
- type_mismatch_exit
+ type_mismatch_counter
);
// Need to do this here so we don't have too many live
@@ -6598,7 +6583,6 @@ fn gen_send_general(
let symbol_id_opnd = asm.ccall(rb_get_symbol_id as *const u8, vec![name_opnd]);
asm.comment("chain_guard_send");
- let chain_exit = counted_exit!(ocb, side_exit, send_send_chain);
asm.cmp(symbol_id_opnd, mid.into());
jit_chain_guard(
JCC_JNE,
@@ -6607,7 +6591,7 @@ fn gen_send_general(
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
- chain_exit,
+ exit_counter!(send_send_chain),
);
// We have changed the argc, flags, mid, and cme, so we need to re-enter the match
@@ -6823,10 +6807,8 @@ fn gen_invokeblock(
);
asm.comment("guard block_handler type");
- let side_exit = get_side_exit(jit, ocb, ctx);
let tag_opnd = asm.and(block_handler_opnd, 0x3.into()); // block_handler is a tagged pointer
asm.cmp(tag_opnd, 0x1.into()); // VM_BH_ISEQ_BLOCK_P
- let tag_changed_exit = counted_exit!(ocb, side_exit, invokeblock_tag_changed);
jit_chain_guard(
JCC_JNE,
jit,
@@ -6834,7 +6816,7 @@ fn gen_invokeblock(
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
- tag_changed_exit,
+ exit_counter!(invokeblock_tag_changed),
);
let comptime_captured = unsafe { ((comptime_handler.0 & !0x3) as *const rb_captured_block).as_ref().unwrap() };
@@ -6844,7 +6826,6 @@ fn gen_invokeblock(
let captured_opnd = asm.and(block_handler_opnd, Opnd::Imm(!0x3));
let iseq_opnd = asm.load(Opnd::mem(64, captured_opnd, SIZEOF_VALUE_I32 * 2));
asm.cmp(iseq_opnd, (comptime_iseq as usize).into());
- let block_changed_exit = counted_exit!(ocb, side_exit, invokeblock_iseq_block_changed);
jit_chain_guard(
JCC_JNE,
jit,
@@ -6852,7 +6833,7 @@ fn gen_invokeblock(
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
- block_changed_exit,
+ exit_counter!(invokeblock_iseq_block_changed),
);
gen_send_iseq(
@@ -6888,10 +6869,8 @@ fn gen_invokeblock(
);
asm.comment("guard block_handler type");
- let side_exit = get_side_exit(jit, ocb, ctx);
let tag_opnd = asm.and(block_handler_opnd, 0x3.into()); // block_handler is a tagged pointer
asm.cmp(tag_opnd, 0x3.into()); // VM_BH_IFUNC_P
- let tag_changed_exit = counted_exit!(ocb, side_exit, invokeblock_tag_changed);
jit_chain_guard(
JCC_JNE,
jit,
@@ -6899,7 +6878,7 @@ fn gen_invokeblock(
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
- tag_changed_exit,
+ exit_counter!(invokeblock_tag_changed),
);
// The cfunc may not be leaf
@@ -7021,9 +7000,6 @@ fn gen_invokesuper(
return CantCompile;
}
- // Guard that the receiver has the same class as the one from compile time
- let side_exit = get_side_exit(jit, ocb, ctx);
-
asm.comment("guard known me");
let lep_opnd = gen_get_lep(jit, asm);
let ep_me_opnd = Opnd::mem(
@@ -7034,7 +7010,7 @@ fn gen_invokesuper(
let me_as_value = VALUE(me as usize);
asm.cmp(ep_me_opnd, me_as_value.into());
- asm.jne(counted_exit!(ocb, side_exit, invokesuper_me_changed));
+ asm.jne(counted_exit!(jit, ctx, ocb, invokesuper_me_changed));
if block.is_none() {
// Guard no block passed
@@ -7050,7 +7026,7 @@ fn gen_invokesuper(
SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL,
);
asm.cmp(ep_specval_opnd, VM_BLOCK_HANDLER_NONE.into());
- asm.jne(counted_exit!(ocb, side_exit, invokesuper_block));
+ asm.jne(counted_exit!(jit, ctx, ocb, invokesuper_block));
}
// We need to assume that both our current method entry and the super
@@ -7083,12 +7059,10 @@ fn gen_leave(
// Only the return value should be on the stack
assert_eq!(1, ctx.get_stack_size());
- // Create a side-exit to fall back to the interpreter
- let side_exit = get_side_exit(jit, ocb, ctx);
let ocb_asm = Assembler::new();
// Check for interrupts
- gen_check_ints(asm, counted_exit!(ocb, side_exit, leave_se_interrupt));
+ gen_check_ints(jit, ctx, asm, ocb, exit_counter!(leave_se_interrupt));
ocb_asm.compile(ocb.unwrap());
// Pop the current frame (ec->cfp++)
@@ -7198,8 +7172,6 @@ fn gen_objtostring(
let comptime_recv = jit.peek_at_stack(ctx, 0);
if unsafe { RB_TYPE_P(comptime_recv, RUBY_T_STRING) } {
- let side_exit = get_side_exit(jit, ocb, ctx);
-
jit_guard_known_klass(
jit,
ctx,
@@ -7210,7 +7182,7 @@ fn gen_objtostring(
recv.into(),
comptime_recv,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
// No work needed. The string value is already on the top of the stack.
@@ -7476,8 +7448,6 @@ fn gen_opt_getconstant_path(
if !unsafe { (*ice).ic_cref }.is_null() {
// Cache is keyed on a certain lexical scope. Use the interpreter's cache.
- let side_exit = get_side_exit(jit, ocb, ctx);
-
let inline_cache = asm.load(Opnd::const_ptr(ic as *const u8));
// Call function to verify the cache. It doesn't allocate or call methods.
@@ -7489,7 +7459,7 @@ fn gen_opt_getconstant_path(
// Check the result. SysV only specifies one byte for _Bool return values,
// so it's important we only check one bit to ignore the higher bits in the register.
asm.test(ret_val, 1.into());
- asm.jz(counted_exit!(ocb, side_exit, opt_getinlinecache_miss));
+ asm.jz(counted_exit!(jit, ctx, ocb, opt_getinlinecache_miss));
let inline_cache = asm.load(Opnd::const_ptr(ic as *const u8));
@@ -7541,7 +7511,6 @@ fn gen_getblockparamproxy(
// A mirror of the interpreter code. Checking for the case
// where it's pushing rb_block_param_proxy.
- let side_exit = get_side_exit(jit, ocb, ctx);
// EP level
let level = jit.get_arg(1).as_u32();
@@ -7565,7 +7534,7 @@ fn gen_getblockparamproxy(
SIZEOF_VALUE_I32 * (VM_ENV_DATA_INDEX_FLAGS as i32),
);
asm.test(flag_check, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM.into());
- asm.jnz(counted_exit!(ocb, side_exit, gbpp_block_param_modified));
+ asm.jnz(counted_exit!(jit, ctx, ocb, gbpp_block_param_modified));
// Load the block handler for the current frame
// note, VM_ASSERT(VM_ENV_LOCAL_P(ep))
@@ -7585,7 +7554,7 @@ fn gen_getblockparamproxy(
asm,
ocb,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
jit_putobject(jit, ctx, asm, Qnil);
@@ -7603,7 +7572,7 @@ fn gen_getblockparamproxy(
asm,
ocb,
SEND_MAX_DEPTH,
- side_exit,
+ None,
);
// Push rb_block_param_proxy. It's a root, so no need to use jit_mov_gc_ptr.
@@ -7632,7 +7601,6 @@ fn gen_getblockparam(
// A mirror of the interpreter code. Checking for the case
// where it's pushing rb_block_param_proxy.
- let side_exit = get_side_exit(jit, ocb, ctx);
// Load environment pointer EP from CFP
let ep_opnd = gen_get_ep(asm, level);
@@ -7660,7 +7628,7 @@ fn gen_getblockparam(
asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
- asm.jnz(side_exit);
+ asm.jnz(side_exit(jit, ctx, ocb));
// Convert the block handler in to a proc
// call rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
@@ -8233,15 +8201,14 @@ mod tests {
#[test]
fn test_get_side_exit() {
let (mut jit, ctx, _, _, mut ocb) = setup_codegen();
- get_side_exit(&mut jit, &mut ocb, &ctx);
+ side_exit(&mut jit, &ctx, &mut ocb);
assert!(ocb.unwrap().get_write_pos() > 0);
}
#[test]
fn test_gen_check_ints() {
- let (_, _ctx, mut asm, _cb, mut ocb) = setup_codegen();
- let side_exit = ocb.unwrap().get_write_ptr().as_side_exit();
- gen_check_ints(&mut asm, side_exit);
+ let (mut jit, ctx, mut asm, _cb, mut ocb) = setup_codegen();
+ gen_check_ints(&mut jit, &ctx, &mut asm, &mut ocb, None);
}
#[test]
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index f1b6217dd7..a8e704ca57 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -1516,6 +1516,16 @@ impl Context {
self.stack_size
}
+ /// Create a new Context instance with a given stack_size and sp_offset adjusted
+ /// accordingly. This is useful when you want to virtually rewind a stack_size for
+ /// generating a side exit while considering past sp_offset changes on gen_save_sp.
+ pub fn with_stack_size(&self, stack_size: u8) -> Context {
+ let mut ctx = self.clone();
+ ctx.sp_offset -= (ctx.get_stack_size() as isize - stack_size as isize) as i8;
+ ctx.stack_size = stack_size;
+ ctx
+ }
+
pub fn get_sp_offset(&self) -> i8 {
self.sp_offset
}