summaryrefslogtreecommitdiff
path: root/yjit
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2023-04-12 12:11:44 -0700
committerGitHub <noreply@github.com>2023-04-12 12:11:44 -0700
commit00bbd31edb068f6ee4670df53ae66c4ba0ecb9bf (patch)
tree58f791094c1230b26bb62ee63ec3b0a7f000f240 /yjit
parent0ac3f2c20e50c22d298238f602f25f84248ac7a5 (diff)
downloadruby-00bbd31edb068f6ee4670df53ae66c4ba0ecb9bf.tar.gz
YJIT: Let Assembler own Context (#7691)
* YJIT: Let Assembler own Context * Update a comment Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com> --------- Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com>
Diffstat (limited to 'yjit')
-rw-r--r--yjit/src/backend/arm64/mod.rs2
-rw-r--r--yjit/src/backend/ir.rs30
-rw-r--r--yjit/src/codegen.rs1579
-rw-r--r--yjit/src/core.rs128
-rw-r--r--yjit/src/utils.rs21
5 files changed, 809 insertions, 951 deletions
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index a1823c478e..4a052bd0ef 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -1210,7 +1210,7 @@ mod tests {
fn test_emit_cpop_all() {
let (mut asm, mut cb) = setup_asm();
- asm.cpop_all(&Context::default());
+ asm.cpop_all();
asm.compile_with_num_regs(&mut cb, 0);
}
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index a32750eae2..b5442a063b 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -896,6 +896,9 @@ pub struct Assembler
/// Names of labels
pub(super) label_names: Vec<String>,
+
+ /// Context for generating the current insn
+ pub ctx: Context,
}
impl Assembler
@@ -909,7 +912,8 @@ impl Assembler
insns: Vec::default(),
live_ranges: Vec::default(),
reg_temps: Vec::default(),
- label_names
+ label_names,
+ ctx: Context::default(),
}
}
@@ -1054,12 +1058,12 @@ impl Assembler
}
/// Allocate a register to a stack temp if available.
- pub fn alloc_temp_reg(&mut self, ctx: &mut Context, stack_idx: u8) {
+ pub fn alloc_temp_reg(&mut self, stack_idx: u8) {
if get_option!(num_temp_regs) == 0 {
return;
}
- assert_eq!(self.get_reg_temps(), ctx.get_reg_temps());
+ assert_eq!(self.get_reg_temps(), self.ctx.get_reg_temps());
let mut reg_temps = self.get_reg_temps();
// Allocate a register if there's no conflict.
@@ -1068,17 +1072,17 @@ impl Assembler
} else {
reg_temps.set(stack_idx, true);
self.set_reg_temps(reg_temps);
- ctx.set_reg_temps(reg_temps);
+ self.ctx.set_reg_temps(reg_temps);
}
}
/// Spill all live stack temps from registers to the stack
- pub fn spill_temps(&mut self, ctx: &mut Context) {
- assert_eq!(self.get_reg_temps(), ctx.get_reg_temps());
+ pub fn spill_temps(&mut self) {
+ assert_eq!(self.get_reg_temps(), self.ctx.get_reg_temps());
// Forget registers above the stack top
let mut reg_temps = self.get_reg_temps();
- for stack_idx in ctx.get_stack_size()..MAX_REG_TEMPS {
+ for stack_idx in self.ctx.get_stack_size()..MAX_REG_TEMPS {
reg_temps.set(stack_idx, false);
}
self.set_reg_temps(reg_temps);
@@ -1086,17 +1090,17 @@ impl Assembler
// Spill live stack temps
if self.get_reg_temps() != RegTemps::default() {
self.comment(&format!("spill_temps: {:08b} -> {:08b}", self.get_reg_temps().as_u8(), RegTemps::default().as_u8()));
- for stack_idx in 0..u8::min(MAX_REG_TEMPS, ctx.get_stack_size()) {
+ for stack_idx in 0..u8::min(MAX_REG_TEMPS, self.ctx.get_stack_size()) {
if self.get_reg_temps().get(stack_idx) {
- let idx = ctx.get_stack_size() - 1 - stack_idx;
- self.spill_temp(ctx.stack_opnd(idx.into()));
+ let idx = self.ctx.get_stack_size() - 1 - stack_idx;
+ self.spill_temp(self.ctx.stack_opnd(idx.into()));
}
}
}
// Every stack temp should have been spilled
assert_eq!(self.get_reg_temps(), RegTemps::default());
- ctx.set_reg_temps(self.get_reg_temps());
+ self.ctx.set_reg_temps(self.get_reg_temps());
}
/// Sets the out field on the various instructions that require allocated
@@ -1500,12 +1504,12 @@ impl Assembler {
out
}
- pub fn cpop_all(&mut self, ctx: &Context) {
+ pub fn cpop_all(&mut self) {
self.push_insn(Insn::CPopAll);
// Re-enable ccall's RegTemps assertion disabled by cpush_all.
// cpush_all + cpop_all preserve all stack temp registers, so it's safe.
- self.set_reg_temps(ctx.get_reg_temps());
+ self.set_reg_temps(self.ctx.get_reg_temps());
}
pub fn cpop_into(&mut self, opnd: Opnd) {
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index e5e6cca563..7c302707db 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -35,7 +35,6 @@ enum CodegenStatus {
/// Code generation function signature
type InsnGenFn = fn(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus;
@@ -298,7 +297,7 @@ macro_rules! gen_counter_incr {
}
macro_rules! counted_exit {
- ($jit:tt, $ctx:tt, $ocb:tt, $counter_name:ident) => {
+ ($jit:tt, $ctx:expr, $ocb:tt, $counter_name:ident) => {
counted_exit($jit, $ctx, $ocb, exit_counter!($counter_name))
};
}
@@ -320,16 +319,16 @@ fn jit_save_pc(jit: &JITState, asm: &mut Assembler) {
/// This realigns the interpreter SP with the JIT SP
/// Note: this will change the current value of REG_SP,
/// which could invalidate memory operands
-fn gen_save_sp(asm: &mut Assembler, ctx: &mut Context) {
- asm.spill_temps(ctx);
- if ctx.get_sp_offset() != 0 {
+fn gen_save_sp(asm: &mut Assembler) {
+ asm.spill_temps();
+ if asm.ctx.get_sp_offset() != 0 {
asm.comment("save SP to CFP");
- let stack_pointer = ctx.sp_opnd(0);
+ let stack_pointer = asm.ctx.sp_opnd(0);
let sp_addr = asm.lea(stack_pointer);
asm.mov(SP, sp_addr);
let cfp_sp_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
asm.mov(cfp_sp_opnd, SP);
- ctx.set_sp_offset(0);
+ asm.ctx.set_sp_offset(0);
}
}
@@ -340,16 +339,15 @@ fn gen_save_sp(asm: &mut Assembler, ctx: &mut Context) {
/// - Perform Ruby method call
fn jit_prepare_routine_call(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler
) {
jit.record_boundary_patch_point = true;
jit_save_pc(jit, asm);
- gen_save_sp(asm, ctx);
+ gen_save_sp(asm);
// In case the routine calls Ruby methods, it can set local variables
// through Kernel#binding and other means.
- ctx.clear_local_types();
+ asm.ctx.clear_local_types();
}
/// Record the current codeblock write position for rewriting into a jump into
@@ -471,7 +469,7 @@ fn gen_code_for_exit_from_stub(ocb: &mut OutlinedCb) -> CodePtr {
}
/// Generate an exit to return to the interpreter
-fn gen_exit(exit_pc: *mut VALUE, ctx: &Context, asm: &mut Assembler) {
+fn gen_exit(exit_pc: *mut VALUE, asm: &mut Assembler) {
#[cfg(all(feature = "disasm", not(test)))]
{
let opcode = unsafe { rb_vm_insn_addr2opcode((*exit_pc).as_ptr()) };
@@ -479,12 +477,12 @@ fn gen_exit(exit_pc: *mut VALUE, ctx: &Context, asm: &mut Assembler) {
}
// Spill stack temps before returning to the interpreter
- asm.spill_temps(&mut ctx.clone());
+ asm.spill_temps();
// Generate the code to exit to the interpreters
// Write the adjusted SP back into the CFP
- if ctx.get_sp_offset() != 0 {
- let sp_opnd = asm.lea(ctx.sp_opnd(0));
+ if asm.ctx.get_sp_offset() != 0 {
+ let sp_opnd = asm.lea(asm.ctx.sp_opnd(0));
asm.mov(
Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP),
sp_opnd
@@ -528,9 +526,10 @@ fn gen_outlined_exit(exit_pc: *mut VALUE, ctx: &Context, ocb: &mut OutlinedCb) -
let mut cb = ocb.unwrap();
let exit_code = cb.get_write_ptr();
let mut asm = Assembler::new();
+ asm.ctx = ctx.clone();
asm.set_reg_temps(ctx.get_reg_temps());
- gen_exit(exit_pc, ctx, &mut asm);
+ gen_exit(exit_pc, &mut asm);
asm.compile(&mut cb);
@@ -779,7 +778,6 @@ pub fn gen_entry_prologue(cb: &mut CodeBlock, ocb: &mut OutlinedCb, iseq: IseqPt
// Warning: this function clobbers REG0
fn gen_check_ints(
jit: &mut JITState,
- ctx: &Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
counter: Option<ExitCounter>,
@@ -793,20 +791,19 @@ fn gen_check_ints(
let interrupt_flag = asm.load(Opnd::mem(32, EC, RUBY_OFFSET_EC_INTERRUPT_FLAG));
asm.test(interrupt_flag, interrupt_flag);
- asm.jnz(counted_exit(jit, ctx, ocb, counter));
+ asm.jnz(counted_exit(jit, &asm.ctx, ocb, counter));
}
// Generate a stubbed unconditional jump to the next bytecode instruction.
// Blocks that are part of a guard chain can use this to share the same successor.
fn jump_to_next_insn(
jit: &mut JITState,
- current_context: &Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) {
// Reset the depth since in current usages we only ever jump to to
// chain_depth > 0 from the same instruction.
- let mut reset_depth = current_context.clone();
+ let mut reset_depth = asm.ctx.clone();
reset_depth.reset_chain_depth();
let jump_block = BlockId {
@@ -838,7 +835,7 @@ pub fn gen_single_block(
ocb: &mut OutlinedCb,
) -> Result<BlockRef, ()> {
// Limit the number of specialized versions for this block
- let mut ctx = limit_block_versions(blockid, start_ctx);
+ let ctx = limit_block_versions(blockid, start_ctx);
verify_blockid(blockid);
assert!(!(blockid.idx == 0 && ctx.get_stack_size() > 0));
@@ -864,14 +861,15 @@ pub fn gen_single_block(
// Create a backend assembler instance
let mut asm = Assembler::new();
+ asm.ctx = ctx;
#[cfg(feature = "disasm")]
if get_option_ref!(dump_disasm).is_some() {
let blockid_idx = blockid.idx;
- let chain_depth = if ctx.get_chain_depth() > 0 { format!(", chain_depth: {}", ctx.get_chain_depth()) } else { "".to_string() };
+ let chain_depth = if asm.ctx.get_chain_depth() > 0 { format!(", chain_depth: {}", asm.ctx.get_chain_depth()) } else { "".to_string() };
asm.comment(&format!("Block: {} (ISEQ offset: {}{})", iseq_get_location(blockid.iseq, blockid_idx), blockid_idx, chain_depth));
}
- asm.set_reg_temps(ctx.get_reg_temps());
+ asm.set_reg_temps(asm.ctx.get_reg_temps());
// For each instruction to compile
// NOTE: could rewrite this loop with a std::iter::Iterator
@@ -887,7 +885,7 @@ pub fn gen_single_block(
// if we run into it. This is necessary because we want to invalidate based on the
// instruction's index.
if opcode == YARVINSN_opt_getconstant_path.as_usize() && insn_idx > jit.starting_insn_idx {
- jump_to_next_insn(&mut jit, &ctx, &mut asm, ocb);
+ jump_to_next_insn(&mut jit, &mut asm, ocb);
break;
}
@@ -895,28 +893,28 @@ pub fn gen_single_block(
jit.insn_idx = insn_idx;
jit.opcode = opcode;
jit.pc = pc;
- jit.stack_size_for_pc = ctx.get_stack_size();
+ jit.stack_size_for_pc = asm.ctx.get_stack_size();
jit.side_exit_for_pc.clear();
// stack_pop doesn't immediately deallocate a register for stack temps,
// but it's safe to do so at this instruction boundary.
- assert_eq!(asm.get_reg_temps(), ctx.get_reg_temps());
- for stack_idx in ctx.get_stack_size()..MAX_REG_TEMPS {
- ctx.dealloc_temp_reg(stack_idx);
+ assert_eq!(asm.get_reg_temps(), asm.ctx.get_reg_temps());
+ for stack_idx in asm.ctx.get_stack_size()..MAX_REG_TEMPS {
+ asm.ctx.dealloc_temp_reg(stack_idx);
}
- asm.set_reg_temps(ctx.get_reg_temps());
+ asm.set_reg_temps(asm.ctx.get_reg_temps());
// If previous instruction requested to record the boundary
if jit.record_boundary_patch_point {
// Generate an exit to this instruction and record it
- let exit_pos = gen_outlined_exit(jit.pc, &ctx, ocb);
+ let exit_pos = gen_outlined_exit(jit.pc, &asm.ctx, ocb);
record_global_inval_patch(&mut asm, exit_pos);
jit.record_boundary_patch_point = false;
}
// In debug mode, verify our existing assumption
if cfg!(debug_assertions) && get_option!(verify_ctx) && jit.at_current_insn() {
- verify_ctx(&jit, &ctx);
+ verify_ctx(&jit, &asm.ctx);
}
// Lookup the codegen function for this instruction
@@ -928,16 +926,16 @@ pub fn gen_single_block(
gen_counter_incr!(asm, exec_instruction);
// Add a comment for the name of the YARV instruction
- asm.comment(&format!("Insn: {} (stack_size: {})", insn_name(opcode), ctx.get_stack_size()));
+ asm.comment(&format!("Insn: {} (stack_size: {})", insn_name(opcode), asm.ctx.get_stack_size()));
// If requested, dump instructions for debugging
if get_option!(dump_insns) {
println!("compiling {}", insn_name(opcode));
- print_str(&mut asm, &ctx, &format!("executing {}", insn_name(opcode)));
+ print_str(&mut asm, &format!("executing {}", insn_name(opcode)));
}
// Call the code generation function
- status = gen_fn(&mut jit, &mut ctx, &mut asm, ocb);
+ status = gen_fn(&mut jit, &mut asm, ocb);
}
// If we can't compile this instruction
@@ -949,7 +947,8 @@ pub fn gen_single_block(
// Rewind stack_size using ctx.with_stack_size to allow stack_size changes
// before you return CantCompile.
- gen_exit(jit.pc, &ctx.with_stack_size(jit.stack_size_for_pc), &mut asm);
+ asm.ctx = asm.ctx.with_stack_size(jit.stack_size_for_pc);
+ gen_exit(jit.pc, &mut asm);
// If this is the first instruction in the block, then
// the entry address is the address for block_entry_exit
@@ -962,7 +961,7 @@ pub fn gen_single_block(
// For now, reset the chain depth after each instruction as only the
// first instruction in the block can concern itself with the depth.
- ctx.reset_chain_depth();
+ asm.ctx.reset_chain_depth();
// Move to the next instruction to compile
insn_idx += insn_len(opcode) as u16;
@@ -998,7 +997,6 @@ pub fn gen_single_block(
fn gen_nop(
_jit: &mut JITState,
- _ctx: &mut Context,
_asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -1008,25 +1006,23 @@ fn gen_nop(
fn gen_pop(
_jit: &mut JITState,
- ctx: &mut Context,
- _asm: &mut Assembler,
+ asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Decrement SP
- ctx.stack_pop(1);
+ asm.stack_pop(1);
KeepCompiling
}
fn gen_dup(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let dup_val = ctx.stack_opnd(0);
- let (mapping, tmp_type) = ctx.get_opnd_mapping(dup_val.into());
+ let dup_val = asm.ctx.stack_opnd(0);
+ let (mapping, tmp_type) = asm.ctx.get_opnd_mapping(dup_val.into());
- let loc0 = ctx.stack_push_mapping(asm, (mapping, tmp_type));
+ let loc0 = asm.stack_push_mapping((mapping, tmp_type));
asm.mov(loc0, dup_val);
KeepCompiling
@@ -1035,7 +1031,6 @@ fn gen_dup(
// duplicate stack top n elements
fn gen_dupn(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -1046,16 +1041,16 @@ fn gen_dupn(
return CantCompile;
}
- let opnd1: Opnd = ctx.stack_opnd(1);
- let opnd0: Opnd = ctx.stack_opnd(0);
+ let opnd1: Opnd = asm.ctx.stack_opnd(1);
+ let opnd0: Opnd = asm.ctx.stack_opnd(0);
- let mapping1 = ctx.get_opnd_mapping(opnd1.into());
- let mapping0 = ctx.get_opnd_mapping(opnd0.into());
+ let mapping1 = asm.ctx.get_opnd_mapping(opnd1.into());
+ let mapping0 = asm.ctx.get_opnd_mapping(opnd0.into());
- let dst1: Opnd = ctx.stack_push_mapping(asm, mapping1);
+ let dst1: Opnd = asm.stack_push_mapping(mapping1);
asm.mov(dst1, opnd1);
- let dst0: Opnd = ctx.stack_push_mapping(asm, mapping0);
+ let dst0: Opnd = asm.stack_push_mapping(mapping0);
asm.mov(dst0, opnd0);
KeepCompiling
@@ -1063,56 +1058,51 @@ fn gen_dupn(
// Swap top 2 stack entries
fn gen_swap(
- jit: &mut JITState,
- ctx: &mut Context,
+ _jit: &mut JITState,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
- stack_swap(jit, ctx, asm, 0, 1);
+ stack_swap(asm, 0, 1);
KeepCompiling
}
fn stack_swap(
- _jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
offset0: u16,
offset1: u16,
) {
- let stack0_mem = ctx.stack_opnd(offset0 as i32);
- let stack1_mem = ctx.stack_opnd(offset1 as i32);
+ let stack0_mem = asm.ctx.stack_opnd(offset0 as i32);
+ let stack1_mem = asm.ctx.stack_opnd(offset1 as i32);
- let mapping0 = ctx.get_opnd_mapping(stack0_mem.into());
- let mapping1 = ctx.get_opnd_mapping(stack1_mem.into());
+ let mapping0 = asm.ctx.get_opnd_mapping(stack0_mem.into());
+ let mapping1 = asm.ctx.get_opnd_mapping(stack1_mem.into());
let stack0_reg = asm.load(stack0_mem);
let stack1_reg = asm.load(stack1_mem);
asm.mov(stack0_mem, stack1_reg);
asm.mov(stack1_mem, stack0_reg);
- ctx.set_opnd_mapping(stack0_mem.into(), mapping1);
- ctx.set_opnd_mapping(stack1_mem.into(), mapping0);
+ asm.ctx.set_opnd_mapping(stack0_mem.into(), mapping1);
+ asm.ctx.set_opnd_mapping(stack1_mem.into(), mapping0);
}
fn gen_putnil(
- jit: &mut JITState,
- ctx: &mut Context,
+ _jit: &mut JITState,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
- jit_putobject(jit, ctx, asm, Qnil);
+ jit_putobject(asm, Qnil);
KeepCompiling
}
-fn jit_putobject(_jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, arg: VALUE) {
+fn jit_putobject(asm: &mut Assembler, arg: VALUE) {
let val_type: Type = Type::from(arg);
- let stack_top = ctx.stack_push(asm, val_type);
+ let stack_top = asm.stack_push(val_type);
asm.mov(stack_top, arg.into());
}
fn gen_putobject_int2fix(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -1123,31 +1113,29 @@ fn gen_putobject_int2fix(
1
};
- jit_putobject(jit, ctx, asm, VALUE::fixnum_from_usize(cst_val));
+ jit_putobject(asm, VALUE::fixnum_from_usize(cst_val));
KeepCompiling
}
fn gen_putobject(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let arg: VALUE = jit.get_arg(0);
- jit_putobject(jit, ctx, asm, arg);
+ jit_putobject(asm, arg);
KeepCompiling
}
fn gen_putself(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Write it on the stack
- let stack_top = ctx.stack_push_self(asm);
+ let stack_top = asm.stack_push_self();
asm.mov(
stack_top,
Opnd::mem(VALUE_BITS, CFP, RUBY_OFFSET_CFP_SELF)
@@ -1158,14 +1146,13 @@ fn gen_putself(
fn gen_putspecialobject(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let object_type = jit.get_arg(0).as_usize();
if object_type == VM_SPECIAL_OBJECT_VMCORE.as_usize() {
- let stack_top = ctx.stack_push(asm, Type::UnknownHeap);
+ let stack_top = asm.stack_push(Type::UnknownHeap);
let frozen_core = unsafe { rb_mRubyVMFrozenCore };
asm.mov(stack_top, frozen_core.into());
KeepCompiling
@@ -1179,21 +1166,20 @@ fn gen_putspecialobject(
// set Nth stack entry to stack top
fn gen_setn(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let n = jit.get_arg(0).as_usize();
- let top_val = ctx.stack_opnd(0);
- let dst_opnd = ctx.stack_opnd(n.try_into().unwrap());
+ let top_val = asm.ctx.stack_opnd(0);
+ let dst_opnd = asm.ctx.stack_opnd(n.try_into().unwrap());
asm.mov(
dst_opnd,
top_val
);
- let mapping = ctx.get_opnd_mapping(top_val.into());
- ctx.set_opnd_mapping(dst_opnd.into(), mapping);
+ let mapping = asm.ctx.get_opnd_mapping(top_val.into());
+ asm.ctx.set_opnd_mapping(dst_opnd.into(), mapping);
KeepCompiling
}
@@ -1201,15 +1187,14 @@ fn gen_setn(
// get nth stack value, then push it
fn gen_topn(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let n = jit.get_arg(0).as_usize();
- let top_n_val = ctx.stack_opnd(n.try_into().unwrap());
- let mapping = ctx.get_opnd_mapping(top_n_val.into());
- let loc0 = ctx.stack_push_mapping(asm, mapping);
+ let top_n_val = asm.ctx.stack_opnd(n.try_into().unwrap());
+ let mapping = asm.ctx.get_opnd_mapping(top_n_val.into());
+ let loc0 = asm.stack_push_mapping(mapping);
asm.mov(loc0, top_n_val);
KeepCompiling
@@ -1218,25 +1203,23 @@ fn gen_topn(
// Pop n values off the stack
fn gen_adjuststack(
jit: &mut JITState,
- ctx: &mut Context,
- _cb: &mut Assembler,
+ asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let n = jit.get_arg(0).as_usize();
- ctx.stack_pop(n);
+ asm.stack_pop(n);
KeepCompiling
}
fn gen_opt_plus(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let two_fixnums = match ctx.two_fixnums_on_stack(jit) {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
Some(two_fixnums) => two_fixnums,
None => {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
};
@@ -1247,38 +1230,37 @@ fn gen_opt_plus(
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// Get the operands from the stack
- let arg1 = ctx.stack_pop(1);
- let arg0 = ctx.stack_pop(1);
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
// Add arg0 + arg1 and test for overflow
let arg0_untag = asm.sub(arg0, Opnd::Imm(1));
let out_val = asm.add(arg0_untag, arg1);
- asm.jo(side_exit(jit, ctx, ocb));
+ asm.jo(side_exit(jit, &asm.ctx, ocb));
// Push the output on the stack
- let dst = ctx.stack_push(asm, Type::Fixnum);
+ let dst = asm.stack_push(Type::Fixnum);
asm.mov(dst, out_val);
KeepCompiling
} else {
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
// new array initialized from top N values
fn gen_newarray(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let n = jit.get_arg(0).as_u32();
// Save the PC and SP because we are allocating
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// If n is 0, then elts is never going to be read, so we can just pass null
let values_ptr = if n == 0 {
@@ -1286,7 +1268,7 @@ fn gen_newarray(
} else {
asm.comment("load pointer to array elements");
let offset_magnitude = (SIZEOF_VALUE as u32) * n;
- let values_opnd = ctx.sp_opnd(-(offset_magnitude as isize));
+ let values_opnd = asm.ctx.sp_opnd(-(offset_magnitude as isize));
asm.lea(values_opnd)
};
@@ -1300,8 +1282,8 @@ fn gen_newarray(
]
);
- ctx.stack_pop(n.as_usize());
- let stack_ret = ctx.stack_push(asm, Type::CArray);
+ asm.stack_pop(n.as_usize());
+ let stack_ret = asm.stack_push(Type::CArray);
asm.mov(stack_ret, new_ary);
KeepCompiling
@@ -1310,14 +1292,13 @@ fn gen_newarray(
// dup array
fn gen_duparray(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let ary = jit.get_arg(0);
// Save the PC and SP because we are allocating
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// call rb_ary_resurrect(VALUE ary);
let new_ary = asm.ccall(
@@ -1325,7 +1306,7 @@ fn gen_duparray(
vec![ary.into()],
);
- let stack_ret = ctx.stack_push(asm, Type::CArray);
+ let stack_ret = asm.stack_push(Type::CArray);
asm.mov(stack_ret, new_ary);
KeepCompiling
@@ -1334,19 +1315,18 @@ fn gen_duparray(
// dup hash
fn gen_duphash(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let hash = jit.get_arg(0);
// Save the PC and SP because we are allocating
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// call rb_hash_resurrect(VALUE hash);
let hash = asm.ccall(rb_hash_resurrect as *const u8, vec![hash.into()]);
- let stack_ret = ctx.stack_push(asm, Type::Hash);
+ let stack_ret = asm.stack_push(Type::Hash);
asm.mov(stack_ret, hash);
KeepCompiling
@@ -1355,7 +1335,6 @@ fn gen_duphash(
// call to_a on the array on the stack
fn gen_splatarray(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -1363,15 +1342,15 @@ fn gen_splatarray(
// Save the PC and SP because the callee may allocate
// Note that this modifies REG_SP, which is why we do it first
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Get the operands from the stack
- let ary_opnd = ctx.stack_pop(1);
+ let ary_opnd = asm.stack_pop(1);
// Call rb_vm_splat_array(flag, ary)
let ary = asm.ccall(rb_vm_splat_array as *const u8, vec![flag.into(), ary_opnd]);
- let stack_ret = ctx.stack_push(asm, Type::TArray);
+ let stack_ret = asm.stack_push(Type::TArray);
asm.mov(stack_ret, ary);
KeepCompiling
@@ -1380,22 +1359,21 @@ fn gen_splatarray(
// concat two arrays
fn gen_concatarray(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Save the PC and SP because the callee may allocate
// Note that this modifies REG_SP, which is why we do it first
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Get the operands from the stack
- let ary2st_opnd = ctx.stack_pop(1);
- let ary1_opnd = ctx.stack_pop(1);
+ let ary2st_opnd = asm.stack_pop(1);
+ let ary1_opnd = asm.stack_pop(1);
// Call rb_vm_concat_array(ary1, ary2st)
let ary = asm.ccall(rb_vm_concat_array as *const u8, vec![ary1_opnd, ary2st_opnd]);
- let stack_ret = ctx.stack_push(asm, Type::TArray);
+ let stack_ret = asm.stack_push(Type::TArray);
asm.mov(stack_ret, ary);
KeepCompiling
@@ -1404,27 +1382,26 @@ fn gen_concatarray(
// new range initialized from top 2 values
fn gen_newrange(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let flag = jit.get_arg(0).as_usize();
// rb_range_new() allocates and can raise
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// val = rb_range_new(low, high, (int)flag);
let range_opnd = asm.ccall(
rb_range_new as *const u8,
vec![
- ctx.stack_opnd(1),
- ctx.stack_opnd(0),
+ asm.ctx.stack_opnd(1),
+ asm.ctx.stack_opnd(0),
flag.into()
]
);
- ctx.stack_pop(2);
- let stack_ret = ctx.stack_push(asm, Type::UnknownHeap);
+ asm.stack_pop(2);
+ let stack_ret = asm.stack_push(Type::UnknownHeap);
asm.mov(stack_ret, range_opnd);
KeepCompiling
@@ -1432,14 +1409,13 @@ fn gen_newrange(
fn guard_object_is_heap(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
object: Opnd,
object_opnd: YARVOpnd,
counter: Option<ExitCounter>,
) {
- let object_type = ctx.get_opnd_type(object_opnd);
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
if object_type.is_heap() {
return;
}
@@ -1448,27 +1424,26 @@ fn guard_object_is_heap(
// Test that the object is not an immediate
asm.test(object, (RUBY_IMMEDIATE_MASK as u64).into());
- asm.jnz(counted_exit(jit, ctx, ocb, counter.clone()));
+ asm.jnz(counted_exit(jit, &asm.ctx, ocb, counter.clone()));
// Test that the object is not false
asm.cmp(object, Qfalse.into());
- asm.je(counted_exit(jit, ctx, ocb, counter));
+ asm.je(counted_exit(jit, &asm.ctx, ocb, counter));
if object_type.diff(Type::UnknownHeap) != TypeDiff::Incompatible {
- ctx.upgrade_opnd_type(object_opnd, Type::UnknownHeap);
+ asm.ctx.upgrade_opnd_type(object_opnd, Type::UnknownHeap);
}
}
fn guard_object_is_array(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
object: Opnd,
object_opnd: YARVOpnd,
counter: Option<ExitCounter>,
) {
- let object_type = ctx.get_opnd_type(object_opnd);
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
if object_type.is_array() {
return;
}
@@ -1477,7 +1452,7 @@ fn guard_object_is_array(
Opnd::Reg(_) => object,
_ => asm.load(object),
};
- guard_object_is_heap(jit, ctx, asm, ocb, object_reg, object_opnd, counter.clone());
+ guard_object_is_heap(jit, asm, ocb, object_reg, object_opnd, counter.clone());
asm.comment("guard object is array");
@@ -1487,23 +1462,22 @@ fn guard_object_is_array(
// Compare the result with T_ARRAY
asm.cmp(flags_opnd, (RUBY_T_ARRAY as u64).into());
- asm.jne(counted_exit(jit, ctx, ocb, counter));
+ asm.jne(counted_exit(jit, &asm.ctx, ocb, counter));
if object_type.diff(Type::TArray) != TypeDiff::Incompatible {
- ctx.upgrade_opnd_type(object_opnd, Type::TArray);
+ asm.ctx.upgrade_opnd_type(object_opnd, Type::TArray);
}
}
fn guard_object_is_string(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
object: Opnd,
object_opnd: YARVOpnd,
counter: Option<ExitCounter>,
) {
- let object_type = ctx.get_opnd_type(object_opnd);
+ let object_type = asm.ctx.get_opnd_type(object_opnd);
if object_type.is_string() {
return;
}
@@ -1512,7 +1486,7 @@ fn guard_object_is_string(
Opnd::Reg(_) => object,
_ => asm.load(object),
};
- guard_object_is_heap(jit, ctx, asm, ocb, object_reg, object_opnd, counter.clone());
+ guard_object_is_heap(jit, asm, ocb, object_reg, object_opnd, counter.clone());
asm.comment("guard object is string");
@@ -1522,10 +1496,10 @@ fn guard_object_is_string(
// Compare the result with T_STRING
asm.cmp(flags_reg, Opnd::UImm(RUBY_T_STRING as u64));
- asm.jne(counted_exit(jit, ctx, ocb, counter));
+ asm.jne(counted_exit(jit, &asm.ctx, ocb, counter));
if object_type.diff(Type::TString) != TypeDiff::Incompatible {
- ctx.upgrade_opnd_type(object_opnd, Type::TString);
+ asm.ctx.upgrade_opnd_type(object_opnd, Type::TString);
}
}
@@ -1566,7 +1540,6 @@ fn guard_object_is_not_ruby2_keyword_hash(
// push enough nils onto the stack to fill out an array
fn gen_expandarray(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -1586,16 +1559,16 @@ fn gen_expandarray(
return CantCompile;
}
- let array_opnd = ctx.stack_opnd(0);
+ let array_opnd = asm.ctx.stack_opnd(0);
// num is the number of requested values. If there aren't enough in the
// array then we're going to push on nils.
- if ctx.get_opnd_type(array_opnd.into()) == Type::Nil {
- ctx.stack_pop(1); // pop after using the type info
+ if asm.ctx.get_opnd_type(array_opnd.into()) == Type::Nil {
+ asm.stack_pop(1); // pop after using the type info
// special case for a, b = nil pattern
// push N nils onto the stack
for _ in 0..num {
- let push_opnd = ctx.stack_push(asm, Type::Nil);
+ let push_opnd = asm.stack_push(Type::Nil);
asm.mov(push_opnd, Qnil.into());
}
return KeepCompiling;
@@ -1604,14 +1577,13 @@ fn gen_expandarray(
// Move the array from the stack and check that it's an array.
guard_object_is_array(
jit,
- ctx,
asm,
ocb,
array_opnd,
array_opnd.into(),
exit_counter!(expandarray_not_array),
);
- let array_opnd = ctx.stack_pop(1); // pop after using the type info
+ let array_opnd = asm.stack_pop(1); // pop after using the type info
// If we don't actually want any values, then just return.
if num == 0 {
@@ -1624,7 +1596,7 @@ fn gen_expandarray(
// Only handle the case where the number of values in the array is greater
// than or equal to the number of values requested.
asm.cmp(array_len_opnd, num.into());
- asm.jl(counted_exit!(jit, ctx, ocb, expandarray_rhs_too_small));
+ asm.jl(counted_exit!(jit, &asm.ctx, ocb, expandarray_rhs_too_small));
// Load the address of the embedded array into REG1.
// (struct RArray *)(obj)->as.ary
@@ -1644,7 +1616,7 @@ fn gen_expandarray(
// Loop backward through the array and push each element onto the stack.
for i in (0..num).rev() {
- let top = ctx.stack_push(asm, Type::Unknown);
+ let top = asm.stack_push(Type::Unknown);
let offset = i32::try_from(i * SIZEOF_VALUE).unwrap();
asm.mov(top, Opnd::mem(64, ary_opnd, offset));
}
@@ -1714,7 +1686,6 @@ fn gen_get_lep(jit: &mut JITState, asm: &mut Assembler) -> Opnd {
fn gen_getlocal_generic(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ep_offset: u32,
level: u32,
@@ -1730,9 +1701,9 @@ fn gen_getlocal_generic(
// Write the local at SP
let stack_top = if level == 0 {
let local_idx = ep_offset_to_local_idx(jit.get_iseq(), ep_offset);
- ctx.stack_push_local(asm, local_idx.as_usize())
+ asm.stack_push_local(local_idx.as_usize())
} else {
- ctx.stack_push(asm, Type::Unknown)
+ asm.stack_push(Type::Unknown)
};
asm.mov(stack_top, local_opnd);
@@ -1742,44 +1713,40 @@ fn gen_getlocal_generic(
fn gen_getlocal(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let idx = jit.get_arg(0).as_u32();
let level = jit.get_arg(1).as_u32();
- gen_getlocal_generic(jit, ctx, asm, idx, level)
+ gen_getlocal_generic(jit, asm, idx, level)
}
fn gen_getlocal_wc0(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let idx = jit.get_arg(0).as_u32();
- gen_getlocal_generic(jit, ctx, asm, idx, 0)
+ gen_getlocal_generic(jit, asm, idx, 0)
}
fn gen_getlocal_wc1(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let idx = jit.get_arg(0).as_u32();
- gen_getlocal_generic(jit, ctx, asm, idx, 1)
+ gen_getlocal_generic(jit, asm, idx, 1)
}
fn gen_setlocal_generic(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
ep_offset: u32,
level: u32,
) -> CodegenStatus {
- let value_type = ctx.get_opnd_type(StackOpnd(0));
+ let value_type = asm.ctx.get_opnd_type(StackOpnd(0));
// Load environment pointer EP at level
let ep_opnd = gen_get_ep(asm, level);
@@ -1797,16 +1764,16 @@ fn gen_setlocal_generic(
asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
- asm.jnz(side_exit(jit, ctx, ocb));
+ asm.jnz(side_exit(jit, &asm.ctx, ocb));
}
if level == 0 {
let local_idx = ep_offset_to_local_idx(jit.get_iseq(), ep_offset).as_usize();
- ctx.set_local_type(local_idx, value_type);
+ asm.ctx.set_local_type(local_idx, value_type);
}
// Pop the value to write from the stack
- let stack_top = ctx.stack_pop(1);
+ let stack_top = asm.stack_pop(1);
// Write the value at the environment pointer
let offs = -(SIZEOF_VALUE_I32 * ep_offset as i32);
@@ -1817,46 +1784,42 @@ fn gen_setlocal_generic(
fn gen_setlocal(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let idx = jit.get_arg(0).as_u32();
let level = jit.get_arg(1).as_u32();
- gen_setlocal_generic(jit, ctx, asm, ocb, idx, level)
+ gen_setlocal_generic(jit, asm, ocb, idx, level)
}
fn gen_setlocal_wc0(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let idx = jit.get_arg(0).as_u32();
- gen_setlocal_generic(jit, ctx, asm, ocb, idx, 0)
+ gen_setlocal_generic(jit, asm, ocb, idx, 0)
}
fn gen_setlocal_wc1(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let idx = jit.get_arg(0).as_u32();
- gen_setlocal_generic(jit, ctx, asm, ocb, idx, 1)
+ gen_setlocal_generic(jit, asm, ocb, idx, 1)
}
// new hash initialized from top N values
fn gen_newhash(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let num: u64 = jit.get_arg(0).as_u64();
// Save the PC and SP because we are allocating
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
if num != 0 {
// val = rb_hash_new_with_size(num / 2);
@@ -1870,7 +1833,7 @@ fn gen_newhash(
asm.cpush(new_hash); // x86 alignment
// Get a pointer to the values to insert into the hash
- let stack_addr_from_top = asm.lea(ctx.stack_opnd((num - 1) as i32));
+ let stack_addr_from_top = asm.lea(asm.ctx.stack_opnd((num - 1) as i32));
// rb_hash_bulk_insert(num, STACK_ADDR_FROM_TOP(num), val);
asm.ccall(
@@ -1885,13 +1848,13 @@ fn gen_newhash(
let new_hash = asm.cpop();
asm.cpop_into(new_hash); // x86 alignment
- ctx.stack_pop(num.try_into().unwrap());
- let stack_ret = ctx.stack_push(asm, Type::Hash);
+ asm.stack_pop(num.try_into().unwrap());
+ let stack_ret = asm.stack_push(Type::Hash);
asm.mov(stack_ret, new_hash);
} else {
// val = rb_hash_new();
let new_hash = asm.ccall(rb_hash_new as *const u8, vec![]);
- let stack_ret = ctx.stack_push(asm, Type::Hash);
+ let stack_ret = asm.stack_push(Type::Hash);
asm.mov(stack_ret, new_hash);
}
@@ -1900,21 +1863,20 @@ fn gen_newhash(
fn gen_putstring(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let put_val = jit.get_arg(0);
// Save the PC and SP because the callee will allocate
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
let str_opnd = asm.ccall(
rb_ec_str_resurrect as *const u8,
vec![EC, put_val.into()]
);
- let stack_top = ctx.stack_push(asm, Type::CString);
+ let stack_top = asm.stack_push(Type::CString);
asm.mov(stack_top, str_opnd);
KeepCompiling
@@ -1924,7 +1886,6 @@ fn gen_putstring(
// the caller
fn gen_checkkeyword(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -1954,7 +1915,7 @@ fn gen_checkkeyword(
asm.test(bits_opnd, Opnd::Imm(bit_test));
let ret_opnd = asm.csel_z(Qtrue.into(), Qfalse.into());
- let stack_ret = ctx.stack_push(asm, Type::UnknownImm);
+ let stack_ret = asm.stack_push(Type::UnknownImm);
asm.mov(stack_ret, ret_opnd);
KeepCompiling
@@ -1965,7 +1926,6 @@ fn gen_checkkeyword(
fn jit_chain_guard(
jcc: JCCKinds,
jit: &mut JITState,
- ctx: &Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
depth_limit: i32,
@@ -1977,8 +1937,8 @@ fn jit_chain_guard(
JCC_JBE | JCC_JNA => BranchGenFn::JBEToTarget0,
};
- if (ctx.get_chain_depth() as i32) < depth_limit {
- let mut deeper = ctx.clone();
+ if (asm.ctx.get_chain_depth() as i32) < depth_limit {
+ let mut deeper = asm.ctx.clone();
deeper.increment_chain_depth();
let bid = BlockId {
iseq: jit.iseq,
@@ -1987,7 +1947,7 @@ fn jit_chain_guard(
gen_branch(jit, asm, ocb, bid, &deeper, None, None, target0_gen_fn);
} else {
- target0_gen_fn.call(asm, counted_exit(jit, ctx, ocb, counter).unwrap_code_ptr(), None);
+ target0_gen_fn.call(asm, counted_exit(jit, &asm.ctx, ocb, counter).unwrap_code_ptr(), None);
}
}
@@ -2016,7 +1976,6 @@ pub const CASE_WHEN_MAX_DEPTH: i32 = 20;
// - no stack push or pops to ctx since the entry to the codegen of the instruction being compiled
fn gen_set_ivar(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ivar_name: ID,
flags: u32,
@@ -2025,16 +1984,16 @@ fn gen_set_ivar(
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc, ctx);
+ handle_opt_send_shift_stack(asm, argc);
}
// Save the PC and SP because the callee may allocate
// Note that this modifies REG_SP, which is why we do it first
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Get the operands from the stack
- let val_opnd = ctx.stack_pop(1);
- let recv_opnd = ctx.stack_pop(1);
+ let val_opnd = asm.stack_pop(1);
+ let recv_opnd = asm.stack_pop(1);
// Call rb_vm_set_ivar_id with the receiver, the ivar name, and the value
let val = asm.ccall(
@@ -2046,7 +2005,7 @@ fn gen_set_ivar(
],
);
- let out_opnd = ctx.stack_push(asm, Type::Unknown);
+ let out_opnd = asm.stack_push(Type::Unknown);
asm.mov(out_opnd, val);
KeepCompiling
@@ -2058,7 +2017,6 @@ fn gen_set_ivar(
// - no stack push or pops to ctx since the entry to the codegen of the instruction being compiled
fn gen_get_ivar(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
max_chain_depth: i32,
@@ -2088,7 +2046,7 @@ fn gen_get_ivar(
// Check if the comptime receiver is a T_OBJECT
let receiver_t_object = unsafe { RB_TYPE_P(comptime_receiver, RUBY_T_OBJECT) };
// Use a general C call at the last chain to avoid exits on megamorphic shapes
- let last_chain = ctx.get_chain_depth() as i32 == max_chain_depth - 1;
+ let last_chain = asm.ctx.get_chain_depth() as i32 == max_chain_depth - 1;
if last_chain {
gen_counter_incr!(asm, get_ivar_max_depth);
}
@@ -2104,20 +2062,20 @@ fn gen_get_ivar(
asm.comment("call rb_ivar_get()");
// The function could raise exceptions.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
let ivar_val = asm.ccall(rb_ivar_get as *const u8, vec![recv, Opnd::UImm(ivar_name)]);
if recv_opnd != SelfOpnd {
- ctx.stack_pop(1);
+ asm.stack_pop(1);
}
// Push the ivar on the stack
- let out_opnd = ctx.stack_push(asm, Type::Unknown);
+ let out_opnd = asm.stack_push(Type::Unknown);
asm.mov(out_opnd, ivar_val);
// Jump to next instruction. This allows guard chains to share the same successor.
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
return EndBlock;
}
@@ -2133,7 +2091,7 @@ fn gen_get_ivar(
};
// Guard heap object (recv_opnd must be used before stack_pop)
- guard_object_is_heap(jit, ctx, asm, ocb, recv, recv_opnd, None);
+ guard_object_is_heap(jit, asm, ocb, recv, recv_opnd, None);
// Compile time self is embedded and the ivar index lands within the object
let embed_test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) };
@@ -2147,7 +2105,6 @@ fn gen_get_ivar(
jit_chain_guard(
JCC_JNE,
jit,
- &ctx,
asm,
ocb,
max_chain_depth,
@@ -2156,7 +2113,7 @@ fn gen_get_ivar(
// Pop receiver if it's on the temp stack
if recv_opnd != SelfOpnd {
- ctx.stack_pop(1);
+ asm.stack_pop(1);
}
match ivar_index {
@@ -2164,7 +2121,7 @@ fn gen_get_ivar(
// when we entered the compiler. That means we can just return
// nil for this shape + iv name
None => {
- let out_opnd = ctx.stack_push(asm, Type::Nil);
+ let out_opnd = asm.stack_push(Type::Nil);
asm.mov(out_opnd, Qnil.into());
}
Some(ivar_index) => {
@@ -2176,7 +2133,7 @@ fn gen_get_ivar(
let ivar_opnd = Opnd::mem(64, recv, offs);
// Push the ivar on the stack
- let out_opnd = ctx.stack_push(asm, Type::Unknown);
+ let out_opnd = asm.stack_push(Type::Unknown);
asm.mov(out_opnd, ivar_opnd);
} else {
// Compile time value is *not* embedded.
@@ -2187,26 +2144,25 @@ fn gen_get_ivar(
// Read the ivar from the extended table
let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index) as i32);
- let out_opnd = ctx.stack_push(asm, Type::Unknown);
+ let out_opnd = asm.stack_push(Type::Unknown);
asm.mov(out_opnd, ivar_opnd);
}
}
}
// Jump to next instruction. This allows guard chains to share the same successor.
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
}
fn gen_getinstancevariable(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Defer compilation so we can specialize on a runtime `self`
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
@@ -2219,7 +2175,6 @@ fn gen_getinstancevariable(
gen_get_ivar(
jit,
- ctx,
asm,
ocb,
GET_IVAR_MAX_DEPTH,
@@ -2268,13 +2223,12 @@ fn gen_write_iv(
fn gen_setinstancevariable(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Defer compilation so we can specialize on a runtime `self`
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
@@ -2289,7 +2243,7 @@ fn gen_setinstancevariable(
return CantCompile;
}
- let (_, stack_type) = ctx.get_opnd_mapping(StackOpnd(0));
+ let (_, stack_type) = asm.ctx.get_opnd_mapping(StackOpnd(0));
// Check if the comptime class uses a custom allocator
let custom_allocator = unsafe { rb_get_alloc_func(comptime_val_klass) };
@@ -2307,17 +2261,17 @@ fn gen_setinstancevariable(
// If the receiver isn't a T_OBJECT, or uses a custom allocator,
// then just write out the IV write as a function call.
// too-complex shapes can't use index access, so we use rb_ivar_get for them too.
- if !receiver_t_object || uses_custom_allocator || comptime_receiver.shape_too_complex() || (ctx.get_chain_depth() as i32) >= SET_IVAR_MAX_DEPTH {
+ if !receiver_t_object || uses_custom_allocator || comptime_receiver.shape_too_complex() || (asm.ctx.get_chain_depth() as i32) >= SET_IVAR_MAX_DEPTH {
asm.comment("call rb_vm_setinstancevariable()");
let ic = jit.get_arg(1).as_u64(); // type IVC
// The function could raise exceptions.
// Note that this modifies REG_SP, which is why we do it first
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Get the operands from the stack
- let val_opnd = ctx.stack_pop(1);
+ let val_opnd = asm.stack_pop(1);
// Call rb_vm_setinstancevariable(iseq, obj, id, val, ic);
asm.ccall(
@@ -2349,7 +2303,7 @@ fn gen_setinstancevariable(
let recv_opnd = SelfOpnd;
// Upgrade type
- guard_object_is_heap(jit, ctx, asm, ocb, recv, recv_opnd, None);
+ guard_object_is_heap(jit, asm, ocb, recv, recv_opnd, None);
let expected_shape = unsafe { rb_shape_get_shape_id(comptime_receiver) };
let shape_id_offset = unsafe { rb_shape_id_offset() };
@@ -2360,14 +2314,13 @@ fn gen_setinstancevariable(
jit_chain_guard(
JCC_JNE,
jit,
- &ctx,
asm,
ocb,
SET_IVAR_MAX_DEPTH,
exit_counter!(setivar_megamorphic),
);
- asm.spill_temps(ctx); // for ccall (must be done before write_val is popped)
+ asm.spill_temps(); // for ccall (must be done before write_val is popped)
let write_val;
match ivar_index {
@@ -2410,7 +2363,7 @@ fn gen_setinstancevariable(
if needs_extension {
// Generate the C call so that runtime code will increase
// the capacity and set the buffer.
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
asm.ccall(rb_ensure_iv_list_size as *const u8,
vec![
recv,
@@ -2423,7 +2376,7 @@ fn gen_setinstancevariable(
recv = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF))
}
- write_val = ctx.stack_pop(1);
+ write_val = asm.stack_pop(1);
gen_write_iv(asm, comptime_receiver, recv, ivar_index, write_val, needs_extension);
asm.comment("write shape");
@@ -2441,7 +2394,7 @@ fn gen_setinstancevariable(
// the iv index by searching up the shape tree. If we've
// made the transition already, then there's no reason to
// update the shape on the object. Just set the IV.
- write_val = ctx.stack_pop(1);
+ write_val = asm.stack_pop(1);
gen_write_iv(asm, comptime_receiver, recv, ivar_index, write_val, false);
},
}
@@ -2476,7 +2429,6 @@ fn gen_setinstancevariable(
fn gen_defined(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -2486,10 +2438,10 @@ fn gen_defined(
// Save the PC and SP because the callee may allocate
// Note that this modifies REG_SP, which is why we do it first
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Get the operands from the stack
- let v_opnd = ctx.stack_pop(1);
+ let v_opnd = asm.stack_pop(1);
// Call vm_defined(ec, reg_cfp, op_type, obj, v)
let def_result = asm.ccall(rb_vm_defined as *const u8, vec![EC, CFP, op_type.into(), obj.into(), v_opnd]);
@@ -2506,7 +2458,7 @@ fn gen_defined(
} else {
Type::Unknown
};
- let stack_ret = ctx.stack_push(asm, out_type);
+ let stack_ret = asm.stack_push(out_type);
asm.mov(stack_ret, out_value);
KeepCompiling
@@ -2514,13 +2466,12 @@ fn gen_defined(
fn gen_definedivar(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Defer compilation so we can specialize base on a runtime receiver
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
@@ -2540,7 +2491,7 @@ fn gen_definedivar(
// Save the PC and SP because the callee may allocate
// Note that this modifies REG_SP, which is why we do it first
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Call rb_ivar_defined(recv, ivar_name)
let def_result = asm.ccall(rb_ivar_defined as *const u8, vec![recv.into(), ivar_name.into()]);
@@ -2553,7 +2504,7 @@ fn gen_definedivar(
// Push the return value onto the stack
let out_type = if pushval.special_const_p() { Type::UnknownImm } else { Type::Unknown };
- let stack_ret = ctx.stack_push(asm, out_type);
+ let stack_ret = asm.stack_push(out_type);
asm.mov(stack_ret, out_value);
return KeepCompiling
@@ -2567,7 +2518,7 @@ fn gen_definedivar(
};
// Guard heap object (recv_opnd must be used before stack_pop)
- guard_object_is_heap(jit, ctx, asm, ocb, recv, SelfOpnd, None);
+ guard_object_is_heap(jit, asm, ocb, recv, SelfOpnd, None);
let shape_id_offset = unsafe { rb_shape_id_offset() };
let shape_opnd = Opnd::mem(SHAPE_ID_NUM_BITS as u8, recv, shape_id_offset);
@@ -2577,7 +2528,6 @@ fn gen_definedivar(
jit_chain_guard(
JCC_JNE,
jit,
- ctx,
asm,
ocb,
GET_IVAR_MAX_DEPTH,
@@ -2585,17 +2535,16 @@ fn gen_definedivar(
);
let result = if ivar_exists { pushval } else { Qnil };
- jit_putobject(jit, ctx, asm, result);
+ jit_putobject(asm, result);
// Jump to next instruction. This allows guard chains to share the same successor.
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
return EndBlock;
}
fn gen_checktype(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -2603,17 +2552,18 @@ fn gen_checktype(
// Only three types are emitted by compile.c at the moment
if let RUBY_T_STRING | RUBY_T_ARRAY | RUBY_T_HASH = type_val {
- let val_type = ctx.get_opnd_type(StackOpnd(0));
- let val = asm.load(ctx.stack_pop(1));
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val = asm.stack_pop(1);
+ let val = asm.load(val);
// Check if we know from type information
match val_type.known_value_type() {
Some(value_type) => {
if value_type == type_val {
- jit_putobject(jit, ctx, asm, Qtrue);
+ jit_putobject(asm, Qtrue);
return KeepCompiling;
} else {
- jit_putobject(jit, ctx, asm, Qfalse);
+ jit_putobject(asm, Qfalse);
return KeepCompiling;
}
},
@@ -2639,7 +2589,7 @@ fn gen_checktype(
let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
asm.write_label(ret);
- let stack_ret = ctx.stack_push(asm, Type::UnknownImm);
+ let stack_ret = asm.stack_push(Type::UnknownImm);
asm.mov(stack_ret, ret_opnd);
KeepCompiling
@@ -2650,16 +2600,15 @@ fn gen_checktype(
fn gen_concatstrings(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let n = jit.get_arg(0).as_usize();
// Save the PC and SP because we are allocating
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
- let values_ptr = asm.lea(ctx.sp_opnd(-((SIZEOF_VALUE as isize) * n as isize)));
+ let values_ptr = asm.lea(asm.ctx.sp_opnd(-((SIZEOF_VALUE as isize) * n as isize)));
// call rb_str_concat_literals(size_t n, const VALUE *strings);
let return_value = asm.ccall(
@@ -2667,8 +2616,8 @@ fn gen_concatstrings(
vec![n.into(), values_ptr]
);
- ctx.stack_pop(n);
- let stack_ret = ctx.stack_push(asm, Type::CString);
+ asm.stack_pop(n);
+ let stack_ret = asm.stack_push(Type::CString);
asm.mov(stack_ret, return_value);
KeepCompiling
@@ -2676,33 +2625,32 @@ fn gen_concatstrings(
fn guard_two_fixnums(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) {
// Get stack operands without popping them
- let arg1 = ctx.stack_opnd(0);
- let arg0 = ctx.stack_opnd(1);
+ let arg1 = asm.ctx.stack_opnd(0);
+ let arg0 = asm.ctx.stack_opnd(1);
// Get the stack operand types
- let arg1_type = ctx.get_opnd_type(arg1.into());
- let arg0_type = ctx.get_opnd_type(arg0.into());
+ let arg1_type = asm.ctx.get_opnd_type(arg1.into());
+ let arg0_type = asm.ctx.get_opnd_type(arg0.into());
if arg0_type.is_heap() || arg1_type.is_heap() {
asm.comment("arg is heap object");
- asm.jmp(side_exit(jit, ctx, ocb));
+ asm.jmp(side_exit(jit, &asm.ctx, ocb));
return;
}
if arg0_type != Type::Fixnum && arg0_type.is_specific() {
asm.comment("arg0 not fixnum");
- asm.jmp(side_exit(jit, ctx, ocb));
+ asm.jmp(side_exit(jit, &asm.ctx, ocb));
return;
}
if arg1_type != Type::Fixnum && arg1_type.is_specific() {
asm.comment("arg1 not fixnum");
- asm.jmp(side_exit(jit, ctx, ocb));
+ asm.jmp(side_exit(jit, &asm.ctx, ocb));
return;
}
@@ -2719,7 +2667,6 @@ fn guard_two_fixnums(
jit_chain_guard(
JCC_JZ,
jit,
- &ctx,
asm,
ocb,
SEND_MAX_DEPTH,
@@ -2733,7 +2680,6 @@ fn guard_two_fixnums(
jit_chain_guard(
JCC_JZ,
jit,
- &ctx,
asm,
ocb,
SEND_MAX_DEPTH,
@@ -2742,8 +2688,8 @@ fn guard_two_fixnums(
}
// Set stack types in context
- ctx.upgrade_opnd_type(arg1.into(), Type::Fixnum);
- ctx.upgrade_opnd_type(arg0.into(), Type::Fixnum);
+ asm.ctx.upgrade_opnd_type(arg1.into(), Type::Fixnum);
+ asm.ctx.upgrade_opnd_type(arg0.into(), Type::Fixnum);
}
// Conditional move operation used by comparison operators
@@ -2751,17 +2697,16 @@ type CmovFn = fn(cb: &mut Assembler, opnd0: Opnd, opnd1: Opnd) -> Opnd;
fn gen_fixnum_cmp(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
cmov_op: CmovFn,
bop: ruby_basic_operators,
) -> CodegenStatus {
- let two_fixnums = match ctx.two_fixnums_on_stack(jit) {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
Some(two_fixnums) => two_fixnums,
None => {
// Defer compilation so we can specialize based on a runtime receiver
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
};
@@ -2772,60 +2717,56 @@ fn gen_fixnum_cmp(
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// Get the operands from the stack
- let arg1 = ctx.stack_pop(1);
- let arg0 = ctx.stack_pop(1);
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
// Compare the arguments
asm.cmp(arg0, arg1);
let bool_opnd = cmov_op(asm, Qtrue.into(), Qfalse.into());
// Push the output on the stack
- let dst = ctx.stack_push(asm, Type::UnknownImm);
+ let dst = asm.stack_push(Type::UnknownImm);
asm.mov(dst, bool_opnd);
KeepCompiling
} else {
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_lt(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- gen_fixnum_cmp(jit, ctx, asm, ocb, Assembler::csel_l, BOP_LT)
+ gen_fixnum_cmp(jit, asm, ocb, Assembler::csel_l, BOP_LT)
}
fn gen_opt_le(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- gen_fixnum_cmp(jit, ctx, asm, ocb, Assembler::csel_le, BOP_LE)
+ gen_fixnum_cmp(jit, asm, ocb, Assembler::csel_le, BOP_LE)
}
fn gen_opt_ge(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- gen_fixnum_cmp(jit, ctx, asm, ocb, Assembler::csel_ge, BOP_GE)
+ gen_fixnum_cmp(jit, asm, ocb, Assembler::csel_ge, BOP_GE)
}
fn gen_opt_gt(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- gen_fixnum_cmp(jit, ctx, asm, ocb, Assembler::csel_g, BOP_GT)
+ gen_fixnum_cmp(jit, asm, ocb, Assembler::csel_g, BOP_GT)
}
// Implements specialized equality for either two fixnum or two strings
@@ -2833,15 +2774,14 @@ fn gen_opt_gt(
// if code was generated, otherwise Some(false).
fn gen_equality_specialized(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
gen_eq: bool,
) -> Option<bool> {
- let a_opnd = ctx.stack_opnd(1);
- let b_opnd = ctx.stack_opnd(0);
+ let a_opnd = asm.ctx.stack_opnd(1);
+ let b_opnd = asm.ctx.stack_opnd(0);
- let two_fixnums = match ctx.two_fixnums_on_stack(jit) {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
Some(two_fixnums) => two_fixnums,
None => return None,
};
@@ -2852,7 +2792,7 @@ fn gen_equality_specialized(
return Some(false);
}
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
asm.cmp(a_opnd, b_opnd);
let val = if gen_eq {
@@ -2862,8 +2802,8 @@ fn gen_equality_specialized(
};
// Push the output on the stack
- ctx.stack_pop(2);
- let dst = ctx.stack_push(asm, Type::UnknownImm);
+ asm.stack_pop(2);
+ let dst = asm.stack_push(Type::UnknownImm);
asm.mov(dst, val);
return Some(true);
@@ -2872,8 +2812,8 @@ fn gen_equality_specialized(
if !jit.at_current_insn() {
return None;
}
- let comptime_a = jit.peek_at_stack(ctx, 1);
- let comptime_b = jit.peek_at_stack(ctx, 0);
+ let comptime_a = jit.peek_at_stack(&asm.ctx, 1);
+ let comptime_b = jit.peek_at_stack(&asm.ctx, 0);
if unsafe { comptime_a.class_of() == rb_cString && comptime_b.class_of() == rb_cString } {
if !assume_bop_not_redefined(jit, ocb, STRING_REDEFINED_OP_FLAG, BOP_EQ) {
@@ -2884,7 +2824,6 @@ fn gen_equality_specialized(
// Guard that a is a String
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
unsafe { rb_cString },
@@ -2899,20 +2838,19 @@ fn gen_equality_specialized(
let ret = asm.new_label("ret");
// Spill for ccall. For safety, unconditionally spill temps before branching.
- asm.spill_temps(ctx);
+ asm.spill_temps();
// If they are equal by identity, return true
asm.cmp(a_opnd, b_opnd);
asm.je(equal);
// Otherwise guard that b is a T_STRING (from type info) or String (from runtime guard)
- let btype = ctx.get_opnd_type(b_opnd.into());
+ let btype = asm.ctx.get_opnd_type(b_opnd.into());
if btype.known_value_type() != Some(RUBY_T_STRING) {
// Note: any T_STRING is valid here, but we check for a ::String for simplicity
// To pass a mutable static variable (rb_cString) requires an unsafe block
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
unsafe { rb_cString },
@@ -2931,8 +2869,8 @@ fn gen_equality_specialized(
);
// Push the output on the stack
- ctx.stack_pop(2);
- let dst = ctx.stack_push(asm, Type::UnknownImm);
+ asm.stack_pop(2);
+ let dst = asm.stack_push(Type::UnknownImm);
asm.mov(dst, val);
asm.jmp(ret);
@@ -2949,42 +2887,39 @@ fn gen_equality_specialized(
fn gen_opt_eq(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let specialized = match gen_equality_specialized(jit, ctx, asm, ocb, true) {
+ let specialized = match gen_equality_specialized(jit, asm, ocb, true) {
Some(specialized) => specialized,
None => {
// Defer compilation so we can specialize base on a runtime receiver
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
};
if specialized {
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
} else {
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_neq(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// opt_neq is passed two rb_call_data as arguments:
// first for ==, second for !=
let cd = jit.get_arg(1).as_ptr();
- return gen_send_general(jit, ctx, asm, ocb, cd, None);
+ return gen_send_general(jit, asm, ocb, cd, None);
}
fn gen_opt_aref(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -2999,13 +2934,13 @@ fn gen_opt_aref(
// Defer compilation so we can specialize base on a runtime receiver
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
// Specialize base on compile time values
- let comptime_idx = jit.peek_at_stack(ctx, 0);
- let comptime_recv = jit.peek_at_stack(ctx, 1);
+ let comptime_idx = jit.peek_at_stack(&asm.ctx, 0);
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, 1);
if comptime_recv.class_of() == unsafe { rb_cArray } && comptime_idx.fixnum_p() {
if !assume_bop_not_redefined(jit, ocb, ARRAY_REDEFINED_OP_FLAG, BOP_AREF) {
@@ -3013,14 +2948,13 @@ fn gen_opt_aref(
}
// Get the stack operands
- let idx_opnd = ctx.stack_opnd(0);
- let recv_opnd = ctx.stack_opnd(1);
+ let idx_opnd = asm.ctx.stack_opnd(0);
+ let recv_opnd = asm.ctx.stack_opnd(1);
// Guard that the receiver is an ::Array
// BOP_AREF check above is only good for ::Array.
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
unsafe { rb_cArray },
@@ -3034,37 +2968,36 @@ fn gen_opt_aref(
// Bail if idx is not a FIXNUM
let idx_reg = asm.load(idx_opnd);
asm.test(idx_reg, (RUBY_FIXNUM_FLAG as u64).into());
- asm.jz(counted_exit!(jit, ctx, ocb, oaref_arg_not_fixnum));
+ asm.jz(counted_exit!(jit, &asm.ctx, ocb, oaref_arg_not_fixnum));
// Call VALUE rb_ary_entry_internal(VALUE ary, long offset).
// It never raises or allocates, so we don't need to write to cfp->pc.
{
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let idx_reg = asm.rshift(idx_reg, Opnd::UImm(1)); // Convert fixnum to int
let val = asm.ccall(rb_ary_entry_internal as *const u8, vec![recv_opnd, idx_reg]);
// Pop the argument and the receiver
- ctx.stack_pop(2);
+ asm.stack_pop(2);
// Push the return value onto the stack
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
}
// Jump to next instruction. This allows guard chains to share the same successor.
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
return EndBlock;
} else if comptime_recv.class_of() == unsafe { rb_cHash } {
if !assume_bop_not_redefined(jit, ocb, HASH_REDEFINED_OP_FLAG, BOP_AREF) {
return CantCompile;
}
- let recv_opnd = ctx.stack_opnd(1);
+ let recv_opnd = asm.ctx.stack_opnd(1);
// Guard that the receiver is a hash
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
unsafe { rb_cHash },
@@ -3076,54 +3009,52 @@ fn gen_opt_aref(
);
// Prepare to call rb_hash_aref(). It might call #hash on the key.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Call rb_hash_aref
- let key_opnd = ctx.stack_opnd(0);
- let recv_opnd = ctx.stack_opnd(1);
+ let key_opnd = asm.ctx.stack_opnd(0);
+ let recv_opnd = asm.ctx.stack_opnd(1);
let val = asm.ccall(rb_hash_aref as *const u8, vec![recv_opnd, key_opnd]);
// Pop the key and the receiver
- ctx.stack_pop(2);
+ asm.stack_pop(2);
// Push the return value onto the stack
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
// Jump to next instruction. This allows guard chains to share the same successor.
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
} else {
// General case. Call the [] method.
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_aset(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Defer compilation so we can specialize on a runtime `self`
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
- let comptime_recv = jit.peek_at_stack(ctx, 2);
- let comptime_key = jit.peek_at_stack(ctx, 1);
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, 2);
+ let comptime_key = jit.peek_at_stack(&asm.ctx, 1);
// Get the operands from the stack
- let recv = ctx.stack_opnd(2);
- let key = ctx.stack_opnd(1);
- let _val = ctx.stack_opnd(0);
+ let recv = asm.ctx.stack_opnd(2);
+ let key = asm.ctx.stack_opnd(1);
+ let _val = asm.ctx.stack_opnd(0);
if comptime_recv.class_of() == unsafe { rb_cArray } && comptime_key.fixnum_p() {
// Guard receiver is an Array
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
unsafe { rb_cArray },
@@ -3137,7 +3068,6 @@ fn gen_opt_aset(
// Guard key is a fixnum
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
unsafe { rb_cInteger },
@@ -3149,31 +3079,30 @@ fn gen_opt_aset(
);
// We might allocate or raise
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Call rb_ary_store
- let recv = ctx.stack_opnd(2);
- let key = asm.load(ctx.stack_opnd(1));
+ let recv = asm.ctx.stack_opnd(2);
+ let key = asm.load(asm.ctx.stack_opnd(1));
let key = asm.rshift(key, Opnd::UImm(1)); // FIX2LONG(key)
- let val = ctx.stack_opnd(0);
+ let val = asm.ctx.stack_opnd(0);
asm.ccall(rb_ary_store as *const u8, vec![recv, key, val]);
// rb_ary_store returns void
// stored value should still be on stack
- let val = asm.load(ctx.stack_opnd(0));
+ let val = asm.load(asm.ctx.stack_opnd(0));
// Push the return value onto the stack
- ctx.stack_pop(3);
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ asm.stack_pop(3);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
return EndBlock;
} else if comptime_recv.class_of() == unsafe { rb_cHash } {
// Guard receiver is a Hash
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
unsafe { rb_cHash },
@@ -3185,37 +3114,36 @@ fn gen_opt_aset(
);
// We might allocate or raise
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Call rb_hash_aset
- let recv = ctx.stack_opnd(2);
- let key = ctx.stack_opnd(1);
- let val = ctx.stack_opnd(0);
+ let recv = asm.ctx.stack_opnd(2);
+ let key = asm.ctx.stack_opnd(1);
+ let val = asm.ctx.stack_opnd(0);
let ret = asm.ccall(rb_hash_aset as *const u8, vec![recv, key, val]);
// Push the return value onto the stack
- ctx.stack_pop(3);
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ asm.stack_pop(3);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, ret);
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
} else {
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_and(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let two_fixnums = match ctx.two_fixnums_on_stack(jit) {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
Some(two_fixnums) => two_fixnums,
None => {
// Defer compilation so we can specialize on a runtime `self`
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
};
@@ -3226,37 +3154,36 @@ fn gen_opt_and(
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// Get the operands and destination from the stack
- let arg1 = ctx.stack_pop(1);
- let arg0 = ctx.stack_pop(1);
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
// Do the bitwise and arg0 & arg1
let val = asm.and(arg0, arg1);
// Push the output on the stack
- let dst = ctx.stack_push(asm, Type::Fixnum);
+ let dst = asm.stack_push(Type::Fixnum);
asm.store(dst, val);
KeepCompiling
} else {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_or(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let two_fixnums = match ctx.two_fixnums_on_stack(jit) {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
Some(two_fixnums) => two_fixnums,
None => {
// Defer compilation so we can specialize on a runtime `self`
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
};
@@ -3267,37 +3194,36 @@ fn gen_opt_or(
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// Get the operands and destination from the stack
- let arg1 = ctx.stack_pop(1);
- let arg0 = ctx.stack_pop(1);
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
// Do the bitwise or arg0 | arg1
let val = asm.or(arg0, arg1);
// Push the output on the stack
- let dst = ctx.stack_push(asm, Type::Fixnum);
+ let dst = asm.stack_push(Type::Fixnum);
asm.store(dst, val);
KeepCompiling
} else {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_minus(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let two_fixnums = match ctx.two_fixnums_on_stack(jit) {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
Some(two_fixnums) => two_fixnums,
None => {
// Defer compilation so we can specialize on a runtime `self`
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
};
@@ -3308,59 +3234,56 @@ fn gen_opt_minus(
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// Get the operands and destination from the stack
- let arg1 = ctx.stack_pop(1);
- let arg0 = ctx.stack_pop(1);
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
// Subtract arg0 - arg1 and test for overflow
let val_untag = asm.sub(arg0, arg1);
- asm.jo(side_exit(jit, ctx, ocb));
+ asm.jo(side_exit(jit, &asm.ctx, ocb));
let val = asm.add(val_untag, Opnd::Imm(1));
// Push the output on the stack
- let dst = ctx.stack_push(asm, Type::Fixnum);
+ let dst = asm.stack_push(Type::Fixnum);
asm.store(dst, val);
KeepCompiling
} else {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_mult(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
fn gen_opt_div(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
fn gen_opt_mod(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- let two_fixnums = match ctx.two_fixnums_on_stack(jit) {
+ let two_fixnums = match asm.ctx.two_fixnums_on_stack(jit) {
Some(two_fixnums) => two_fixnums,
None => {
// Defer compilation so we can specialize on a runtime `self`
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
};
@@ -3371,75 +3294,70 @@ fn gen_opt_mod(
}
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// Get the operands and destination from the stack
- asm.spill_temps(ctx); // for ccall (must be done before stack_pop)
- let arg1 = ctx.stack_pop(1);
- let arg0 = ctx.stack_pop(1);
+ asm.spill_temps(); // for ccall (must be done before stack_pop)
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
// Check for arg0 % 0
asm.cmp(arg1, Opnd::Imm(VALUE::fixnum_from_usize(0).as_i64()));
- asm.je(side_exit(jit, ctx, ocb));
+ asm.je(side_exit(jit, &asm.ctx, ocb));
// Call rb_fix_mod_fix(VALUE recv, VALUE obj)
let ret = asm.ccall(rb_fix_mod_fix as *const u8, vec![arg0, arg1]);
// Push the return value onto the stack
// When the two arguments are fixnums, the modulo output is always a fixnum
- let stack_ret = ctx.stack_push(asm, Type::Fixnum);
+ let stack_ret = asm.stack_push(Type::Fixnum);
asm.mov(stack_ret, ret);
KeepCompiling
} else {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
}
fn gen_opt_ltlt(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
fn gen_opt_nil_p(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
fn gen_opt_empty_p(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
fn gen_opt_succ(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Delegate to send, call the method on the recv
- gen_opt_send_without_block(jit, ctx, asm, ocb)
+ gen_opt_send_without_block(jit, asm, ocb)
}
fn gen_opt_str_freeze(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3450,7 +3368,7 @@ fn gen_opt_str_freeze(
let str = jit.get_arg(0);
// Push the return value onto the stack
- let stack_ret = ctx.stack_push(asm, Type::CString);
+ let stack_ret = asm.stack_push(Type::CString);
asm.mov(stack_ret, str.into());
KeepCompiling
@@ -3458,7 +3376,6 @@ fn gen_opt_str_freeze(
fn gen_opt_str_uminus(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3469,7 +3386,7 @@ fn gen_opt_str_uminus(
let str = jit.get_arg(0);
// Push the return value onto the stack
- let stack_ret = ctx.stack_push(asm, Type::CString);
+ let stack_ret = asm.stack_push(Type::CString);
asm.mov(stack_ret, str.into());
KeepCompiling
@@ -3477,21 +3394,20 @@ fn gen_opt_str_uminus(
fn gen_opt_newarray_max(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let num = jit.get_arg(0).as_u32();
// Save the PC and SP because we may allocate
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
extern "C" {
fn rb_vm_opt_newarray_max(ec: EcPtr, num: u32, elts: *const VALUE) -> VALUE;
}
let offset_magnitude = (SIZEOF_VALUE as u32) * num;
- let values_opnd = ctx.sp_opnd(-(offset_magnitude as isize));
+ let values_opnd = asm.ctx.sp_opnd(-(offset_magnitude as isize));
let values_ptr = asm.lea(values_opnd);
let val_opnd = asm.ccall(
@@ -3503,8 +3419,8 @@ fn gen_opt_newarray_max(
],
);
- ctx.stack_pop(num.as_usize());
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val_opnd);
KeepCompiling
@@ -3512,7 +3428,6 @@ fn gen_opt_newarray_max(
fn gen_opt_newarray_min(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3520,14 +3435,14 @@ fn gen_opt_newarray_min(
let num = jit.get_arg(0).as_u32();
// Save the PC and SP because we may allocate
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
extern "C" {
fn rb_vm_opt_newarray_min(ec: EcPtr, num: u32, elts: *const VALUE) -> VALUE;
}
let offset_magnitude = (SIZEOF_VALUE as u32) * num;
- let values_opnd = ctx.sp_opnd(-(offset_magnitude as isize));
+ let values_opnd = asm.ctx.sp_opnd(-(offset_magnitude as isize));
let values_ptr = asm.lea(values_opnd);
let val_opnd = asm.ccall(
@@ -3539,8 +3454,8 @@ fn gen_opt_newarray_min(
],
);
- ctx.stack_pop(num.as_usize());
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ asm.stack_pop(num.as_usize());
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val_opnd);
KeepCompiling
@@ -3548,43 +3463,38 @@ fn gen_opt_newarray_min(
fn gen_opt_not(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- return gen_opt_send_without_block(jit, ctx, asm, ocb);
+ return gen_opt_send_without_block(jit, asm, ocb);
}
fn gen_opt_size(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- return gen_opt_send_without_block(jit, ctx, asm, ocb);
+ return gen_opt_send_without_block(jit, asm, ocb);
}
fn gen_opt_length(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- return gen_opt_send_without_block(jit, ctx, asm, ocb);
+ return gen_opt_send_without_block(jit, asm, ocb);
}
fn gen_opt_regexpmatch2(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
- return gen_opt_send_without_block(jit, ctx, asm, ocb);
+ return gen_opt_send_without_block(jit, asm, ocb);
}
fn gen_opt_case_dispatch(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3596,7 +3506,7 @@ fn gen_opt_case_dispatch(
// hash lookup, at least for small hashes, but it's worth revisiting this
// assumption in the future.
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
@@ -3605,8 +3515,8 @@ fn gen_opt_case_dispatch(
// Try to reorder case/else branches so that ones that are actually used come first.
// Supporting only Fixnum for now so that the implementation can be an equality check.
- let key_opnd = ctx.stack_opnd(0);
- let comptime_key = jit.peek_at_stack(ctx, 0);
+ let key_opnd = asm.ctx.stack_opnd(0);
+ let comptime_key = jit.peek_at_stack(&asm.ctx, 0);
// Check that all cases are fixnums to avoid having to register BOP assumptions on
// all the types that case hashes support. This spends compile time to save memory.
@@ -3637,13 +3547,12 @@ fn gen_opt_case_dispatch(
jit_chain_guard(
JCC_JNE,
jit,
- &ctx,
asm,
ocb,
CASE_WHEN_MAX_DEPTH,
None,
);
- ctx.stack_pop(1); // Pop key_opnd
+ asm.stack_pop(1); // Pop key_opnd
// Get the offset for the compile-time key
let mut offset = 0;
@@ -3658,17 +3567,16 @@ fn gen_opt_case_dispatch(
// Jump to the offset of case or else
let jump_idx = jit.next_insn_idx() as u32 + jump_offset;
let jump_block = BlockId { iseq: jit.iseq, idx: jump_idx.try_into().unwrap() };
- gen_direct_jump(jit, &ctx, jump_block, asm);
+ gen_direct_jump(jit, &asm.ctx.clone(), jump_block, asm);
EndBlock
} else {
- ctx.stack_pop(1); // Pop key_opnd
+ asm.stack_pop(1); // Pop key_opnd
KeepCompiling // continue with === branches
}
}
fn gen_branchif(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3676,7 +3584,7 @@ fn gen_branchif(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- gen_check_ints(jit, ctx, asm, ocb, None);
+ gen_check_ints(jit, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3693,27 +3601,28 @@ fn gen_branchif(
// Test if any bit (outside of the Qnil bit) is on
// See RB_TEST()
- let val_type = ctx.get_opnd_type(StackOpnd(0));
- let val_opnd = ctx.stack_pop(1);
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val_opnd = asm.stack_pop(1);
incr_counter!(branch_insn_count);
if let Some(result) = val_type.known_truthy() {
let target = if result { jump_block } else { next_block };
- gen_direct_jump(jit, ctx, target, asm);
+ gen_direct_jump(jit, &asm.ctx.clone(), target, asm);
incr_counter!(branch_known_count);
} else {
asm.test(val_opnd, Opnd::Imm(!Qnil.as_i64()));
// Generate the branch instructions
+ let ctx = asm.ctx.clone();
gen_branch(
jit,
asm,
ocb,
jump_block,
- ctx,
+ &ctx,
Some(next_block),
- Some(ctx),
+ Some(&ctx),
BranchGenFn::BranchIf(Cell::new(BranchShape::Default)),
);
}
@@ -3723,7 +3632,6 @@ fn gen_branchif(
fn gen_branchunless(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3731,7 +3639,7 @@ fn gen_branchunless(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- gen_check_ints(jit, ctx, asm, ocb, None);
+ gen_check_ints(jit, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3746,14 +3654,14 @@ fn gen_branchunless(
idx: jump_idx.try_into().unwrap(),
};
- let val_type = ctx.get_opnd_type(StackOpnd(0));
- let val_opnd = ctx.stack_pop(1);
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val_opnd = asm.stack_pop(1);
incr_counter!(branch_insn_count);
if let Some(result) = val_type.known_truthy() {
let target = if result { next_block } else { jump_block };
- gen_direct_jump(jit, ctx, target, asm);
+ gen_direct_jump(jit, &asm.ctx.clone(), target, asm);
incr_counter!(branch_known_count);
} else {
// Test if any bit (outside of the Qnil bit) is on
@@ -3762,14 +3670,15 @@ fn gen_branchunless(
asm.test(val_opnd, not_qnil.into());
// Generate the branch instructions
+ let ctx = asm.ctx.clone();
gen_branch(
jit,
asm,
ocb,
jump_block,
- ctx,
+ &ctx,
Some(next_block),
- Some(ctx),
+ Some(&ctx),
BranchGenFn::BranchUnless(Cell::new(BranchShape::Default)),
);
}
@@ -3779,7 +3688,6 @@ fn gen_branchunless(
fn gen_branchnil(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3787,7 +3695,7 @@ fn gen_branchnil(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- gen_check_ints(jit, ctx, asm, ocb, None);
+ gen_check_ints(jit, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3802,27 +3710,28 @@ fn gen_branchnil(
idx: jump_idx.try_into().unwrap(),
};
- let val_type = ctx.get_opnd_type(StackOpnd(0));
- let val_opnd = ctx.stack_pop(1);
+ let val_type = asm.ctx.get_opnd_type(StackOpnd(0));
+ let val_opnd = asm.stack_pop(1);
incr_counter!(branch_insn_count);
if let Some(result) = val_type.known_nil() {
let target = if result { jump_block } else { next_block };
- gen_direct_jump(jit, ctx, target, asm);
+ gen_direct_jump(jit, &asm.ctx.clone(), target, asm);
incr_counter!(branch_known_count);
} else {
// Test if the value is Qnil
asm.cmp(val_opnd, Opnd::UImm(Qnil.into()));
// Generate the branch instructions
+ let ctx = asm.ctx.clone();
gen_branch(
jit,
asm,
ocb,
jump_block,
- ctx,
+ &ctx,
Some(next_block),
- Some(ctx),
+ Some(&ctx),
BranchGenFn::BranchNil(Cell::new(BranchShape::Default)),
);
}
@@ -3832,18 +3741,18 @@ fn gen_branchnil(
fn gen_throw(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let throw_state = jit.get_arg(0).as_u64();
- let throwobj = asm.load(ctx.stack_pop(1));
+ let throwobj = asm.stack_pop(1);
+ let throwobj = asm.load(throwobj);
// THROW_DATA_NEW allocates. Save SP for GC and PC for allocation tracing as
// well as handling the catch table. However, not using jit_prepare_routine_call
// since we don't need a patch point for this implementation.
jit_save_pc(jit, asm);
- gen_save_sp(asm, ctx);
+ gen_save_sp(asm);
// rb_vm_throw verifies it's a valid throw, sets ec->tag->state, and returns throw
// data, which is throwobj or a vm_throw_data wrapping it. When ec->tag->state is
@@ -3866,7 +3775,6 @@ fn gen_throw(
fn gen_jump(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -3874,7 +3782,7 @@ fn gen_jump(
// Check for interrupts, but only on backward branches that may create loops
if jump_offset < 0 {
- gen_check_ints(jit, ctx, asm, ocb, None);
+ gen_check_ints(jit, asm, ocb, None);
}
// Get the branch target instruction offsets
@@ -3885,7 +3793,7 @@ fn gen_jump(
};
// Generate the jump instruction
- gen_direct_jump(jit, ctx, jump_block, asm);
+ gen_direct_jump(jit, &asm.ctx.clone(), jump_block, asm);
EndBlock
}
@@ -3898,7 +3806,6 @@ fn gen_jump(
/// Recompile as contingency if possible, or take side exit a last resort.
fn jit_guard_known_klass(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
known_klass: VALUE,
@@ -3908,7 +3815,7 @@ fn jit_guard_known_klass(
max_chain_depth: i32,
counter: Option<ExitCounter>,
) {
- let val_type = ctx.get_opnd_type(insn_opnd);
+ let val_type = asm.ctx.get_opnd_type(insn_opnd);
if val_type.known_class() == Some(known_klass) {
// We already know from type information that this is a match
@@ -3921,18 +3828,18 @@ fn jit_guard_known_klass(
asm.comment("guard object is nil");
asm.cmp(obj_opnd, Qnil.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
+ jit_chain_guard(JCC_JNE, jit, asm, ocb, max_chain_depth, counter);
- ctx.upgrade_opnd_type(insn_opnd, Type::Nil);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::Nil);
} else if unsafe { known_klass == rb_cTrueClass } {
assert!(!val_type.is_heap());
assert!(val_type.is_unknown());
asm.comment("guard object is true");
asm.cmp(obj_opnd, Qtrue.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
+ jit_chain_guard(JCC_JNE, jit, asm, ocb, max_chain_depth, counter);
- ctx.upgrade_opnd_type(insn_opnd, Type::True);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::True);
} else if unsafe { known_klass == rb_cFalseClass } {
assert!(!val_type.is_heap());
assert!(val_type.is_unknown());
@@ -3940,9 +3847,9 @@ fn jit_guard_known_klass(
asm.comment("guard object is false");
assert!(Qfalse.as_i32() == 0);
asm.test(obj_opnd, obj_opnd);
- jit_chain_guard(JCC_JNZ, jit, ctx, asm, ocb, max_chain_depth, counter);
+ jit_chain_guard(JCC_JNZ, jit, asm, ocb, max_chain_depth, counter);
- ctx.upgrade_opnd_type(insn_opnd, Type::False);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::False);
} else if unsafe { known_klass == rb_cInteger } && sample_instance.fixnum_p() {
// We will guard fixnum and bignum as though they were separate classes
// BIGNUM can be handled by the general else case below
@@ -3950,8 +3857,8 @@ fn jit_guard_known_klass(
asm.comment("guard object is fixnum");
asm.test(obj_opnd, Opnd::Imm(RUBY_FIXNUM_FLAG as i64));
- jit_chain_guard(JCC_JZ, jit, ctx, asm, ocb, max_chain_depth, counter);
- ctx.upgrade_opnd_type(insn_opnd, Type::Fixnum);
+ jit_chain_guard(JCC_JZ, jit, asm, ocb, max_chain_depth, counter);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::Fixnum);
} else if unsafe { known_klass == rb_cSymbol } && sample_instance.static_sym_p() {
assert!(!val_type.is_heap());
// We will guard STATIC vs DYNAMIC as though they were separate classes
@@ -3962,8 +3869,8 @@ fn jit_guard_known_klass(
asm.comment("guard object is static symbol");
assert!(RUBY_SPECIAL_SHIFT == 8);
asm.cmp(obj_opnd.with_num_bits(8).unwrap(), Opnd::UImm(RUBY_SYMBOL_FLAG as u64));
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
- ctx.upgrade_opnd_type(insn_opnd, Type::ImmSymbol);
+ jit_chain_guard(JCC_JNE, jit, asm, ocb, max_chain_depth, counter);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::ImmSymbol);
}
} else if unsafe { known_klass == rb_cFloat } && sample_instance.flonum_p() {
assert!(!val_type.is_heap());
@@ -3974,8 +3881,8 @@ fn jit_guard_known_klass(
asm.comment("guard object is flonum");
let flag_bits = asm.and(obj_opnd, Opnd::UImm(RUBY_FLONUM_MASK as u64));
asm.cmp(flag_bits, Opnd::UImm(RUBY_FLONUM_FLAG as u64));
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
- ctx.upgrade_opnd_type(insn_opnd, Type::Flonum);
+ jit_chain_guard(JCC_JNE, jit, asm, ocb, max_chain_depth, counter);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::Flonum);
}
} else if unsafe {
FL_TEST(known_klass, VALUE(RUBY_FL_SINGLETON as usize)) != VALUE(0)
@@ -3993,7 +3900,7 @@ fn jit_guard_known_klass(
// this situation.
asm.comment("guard known object with singleton class");
asm.cmp(obj_opnd, sample_instance.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
+ jit_chain_guard(JCC_JNE, jit, asm, ocb, max_chain_depth, counter);
} else if val_type == Type::CString && unsafe { known_klass == rb_cString } {
// guard elided because the context says we've already checked
unsafe {
@@ -4007,11 +3914,11 @@ fn jit_guard_known_klass(
if !val_type.is_heap() {
asm.comment("guard not immediate");
asm.test(obj_opnd, (RUBY_IMMEDIATE_MASK as u64).into());
- jit_chain_guard(JCC_JNZ, jit, ctx, asm, ocb, max_chain_depth, counter.clone());
+ jit_chain_guard(JCC_JNZ, jit, asm, ocb, max_chain_depth, counter.clone());
asm.cmp(obj_opnd, Qfalse.into());
- jit_chain_guard(JCC_JE, jit, ctx, asm, ocb, max_chain_depth, counter.clone());
+ jit_chain_guard(JCC_JE, jit, asm, ocb, max_chain_depth, counter.clone());
- ctx.upgrade_opnd_type(insn_opnd, Type::UnknownHeap);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::UnknownHeap);
}
// If obj_opnd isn't already a register, load it.
@@ -4025,12 +3932,12 @@ fn jit_guard_known_klass(
// TODO: jit_mov_gc_ptr keeps a strong reference, which leaks the class.
asm.comment("guard known class");
asm.cmp(klass_opnd, known_klass.into());
- jit_chain_guard(JCC_JNE, jit, ctx, asm, ocb, max_chain_depth, counter);
+ jit_chain_guard(JCC_JNE, jit, asm, ocb, max_chain_depth, counter);
if known_klass == unsafe { rb_cString } {
- ctx.upgrade_opnd_type(insn_opnd, Type::CString);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::CString);
} else if known_klass == unsafe { rb_cArray } {
- ctx.upgrade_opnd_type(insn_opnd, Type::CArray);
+ asm.ctx.upgrade_opnd_type(insn_opnd, Type::CArray);
}
}
}
@@ -4039,7 +3946,6 @@ fn jit_guard_known_klass(
// Calls to protected callees only go through when self.is_a?(klass_that_defines_the_callee).
fn jit_protected_callee_ancestry_guard(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
cme: *const rb_callable_method_entry_t,
@@ -4049,7 +3955,7 @@ fn jit_protected_callee_ancestry_guard(
// Note: PC isn't written to current control frame as rb_is_kind_of() shouldn't raise.
// VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass);
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let val = asm.ccall(
rb_obj_is_kind_of as *mut u8,
vec![
@@ -4058,7 +3964,7 @@ fn jit_protected_callee_ancestry_guard(
],
);
asm.test(val, val);
- asm.jz(counted_exit!(jit, ctx, ocb, send_se_protected_check_failed))
+ asm.jz(counted_exit!(jit, &asm.ctx, ocb, send_se_protected_check_failed))
}
// Codegen for rb_obj_not().
@@ -4066,7 +3972,6 @@ fn jit_protected_callee_ancestry_guard(
// arity guards.
fn jit_rb_obj_not(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4075,20 +3980,20 @@ fn jit_rb_obj_not(
_argc: i32,
_known_recv_class: *const VALUE,
) -> bool {
- let recv_opnd = ctx.get_opnd_type(StackOpnd(0));
+ let recv_opnd = asm.ctx.get_opnd_type(StackOpnd(0));
match recv_opnd.known_truthy() {
Some(false) => {
asm.comment("rb_obj_not(nil_or_false)");
- ctx.stack_pop(1);
- let out_opnd = ctx.stack_push(asm, Type::True);
+ asm.stack_pop(1);
+ let out_opnd = asm.stack_push(Type::True);
asm.mov(out_opnd, Qtrue.into());
},
Some(true) => {
// Note: recv_opnd != Type::Nil && recv_opnd != Type::False.
asm.comment("rb_obj_not(truthy)");
- ctx.stack_pop(1);
- let out_opnd = ctx.stack_push(asm, Type::False);
+ asm.stack_pop(1);
+ let out_opnd = asm.stack_push(Type::False);
asm.mov(out_opnd, Qfalse.into());
},
_ => {
@@ -4102,7 +4007,6 @@ fn jit_rb_obj_not(
// Codegen for rb_true()
fn jit_rb_true(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4112,8 +4016,8 @@ fn jit_rb_true(
_known_recv_class: *const VALUE,
) -> bool {
asm.comment("nil? == true");
- ctx.stack_pop(1);
- let stack_ret = ctx.stack_push(asm, Type::True);
+ asm.stack_pop(1);
+ let stack_ret = asm.stack_push(Type::True);
asm.mov(stack_ret, Qtrue.into());
true
}
@@ -4121,7 +4025,6 @@ fn jit_rb_true(
// Codegen for rb_false()
fn jit_rb_false(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4131,8 +4034,8 @@ fn jit_rb_false(
_known_recv_class: *const VALUE,
) -> bool {
asm.comment("nil? == false");
- ctx.stack_pop(1);
- let stack_ret = ctx.stack_push(asm, Type::False);
+ asm.stack_pop(1);
+ let stack_ret = asm.stack_push(Type::False);
asm.mov(stack_ret, Qfalse.into());
true
}
@@ -4140,7 +4043,6 @@ fn jit_rb_false(
/// Codegen for Kernel#is_a?
fn jit_rb_kernel_is_a(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4166,8 +4068,8 @@ fn jit_rb_kernel_is_a(
// - In general, for any two Class instances A, B, `A < B` does not change at runtime.
// Class#superclass is stable.
- let sample_rhs = jit.peek_at_stack(ctx, 0);
- let sample_lhs = jit.peek_at_stack(ctx, 1);
+ let sample_rhs = jit.peek_at_stack(&asm.ctx, 0);
+ let sample_lhs = jit.peek_at_stack(&asm.ctx, 1);
// We are not allowing module here because the module hierachy can change at runtime.
if !unsafe { RB_TYPE_P(sample_rhs, RUBY_T_CLASS) } {
@@ -4176,16 +4078,16 @@ fn jit_rb_kernel_is_a(
let sample_is_a = unsafe { rb_obj_is_kind_of(sample_lhs, sample_rhs) == Qtrue };
asm.comment("Kernel#is_a?");
- asm.cmp(ctx.stack_opnd(0), sample_rhs.into());
- asm.jne(counted_exit!(jit, ctx, ocb, send_is_a_class_mismatch));
+ asm.cmp(asm.ctx.stack_opnd(0), sample_rhs.into());
+ asm.jne(counted_exit!(jit, &asm.ctx, ocb, send_is_a_class_mismatch));
- ctx.stack_pop(2);
+ asm.stack_pop(2);
if sample_is_a {
- let stack_ret = ctx.stack_push(asm, Type::True);
+ let stack_ret = asm.stack_push(Type::True);
asm.mov(stack_ret, Qtrue.into());
} else {
- let stack_ret = ctx.stack_push(asm, Type::False);
+ let stack_ret = asm.stack_push(Type::False);
asm.mov(stack_ret, Qfalse.into());
}
return true;
@@ -4194,7 +4096,6 @@ fn jit_rb_kernel_is_a(
/// Codegen for Kernel#instance_of?
fn jit_rb_kernel_instance_of(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4220,8 +4121,8 @@ fn jit_rb_kernel_instance_of(
// - For a particular `CLASS_OF(lhs)`, `rb_obj_class(lhs)` does not change.
// (because for any singleton class `s`, `s.superclass.equal?(s.attached_object.class)`)
- let sample_rhs = jit.peek_at_stack(ctx, 0);
- let sample_lhs = jit.peek_at_stack(ctx, 1);
+ let sample_rhs = jit.peek_at_stack(&asm.ctx, 0);
+ let sample_lhs = jit.peek_at_stack(&asm.ctx, 1);
// Filters out cases where the C implementation raises
if unsafe { !(RB_TYPE_P(sample_rhs, RUBY_T_CLASS) || RB_TYPE_P(sample_rhs, RUBY_T_MODULE)) } {
@@ -4236,16 +4137,16 @@ fn jit_rb_kernel_instance_of(
let sample_instance_of = sample_lhs_real_class == sample_rhs;
asm.comment("Kernel#instance_of?");
- asm.cmp(ctx.stack_opnd(0), sample_rhs.into());
- asm.jne(counted_exit!(jit, ctx, ocb, send_instance_of_class_mismatch));
+ asm.cmp(asm.ctx.stack_opnd(0), sample_rhs.into());
+ asm.jne(counted_exit!(jit, &asm.ctx, ocb, send_instance_of_class_mismatch));
- ctx.stack_pop(2);
+ asm.stack_pop(2);
if sample_instance_of {
- let stack_ret = ctx.stack_push(asm, Type::True);
+ let stack_ret = asm.stack_push(Type::True);
asm.mov(stack_ret, Qtrue.into());
} else {
- let stack_ret = ctx.stack_push(asm, Type::False);
+ let stack_ret = asm.stack_push(Type::False);
asm.mov(stack_ret, Qfalse.into());
}
return true;
@@ -4253,7 +4154,6 @@ fn jit_rb_kernel_instance_of(
fn jit_rb_mod_eqq(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4267,20 +4167,20 @@ fn jit_rb_mod_eqq(
}
asm.comment("Module#===");
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
// By being here, we know that the receiver is a T_MODULE or a T_CLASS, because Module#=== can
// only live on these objects. With that, we can call rb_obj_is_kind_of() without
// jit_prepare_routine_call() or a control frame push because it can't raise, allocate, or call
// Ruby methods with these inputs.
// Note the difference in approach from Kernel#is_a? because we don't get a free guard for the
// right hand side.
- let lhs = ctx.stack_opnd(1); // the module
- let rhs = ctx.stack_opnd(0);
+ let lhs = asm.ctx.stack_opnd(1); // the module
+ let rhs = asm.ctx.stack_opnd(0);
let ret = asm.ccall(rb_obj_is_kind_of as *const u8, vec![rhs, lhs]);
// Return the result
- ctx.stack_pop(2);
- let stack_ret = ctx.stack_push(asm, Type::UnknownImm);
+ asm.stack_pop(2);
+ let stack_ret = asm.stack_push(Type::UnknownImm);
asm.mov(stack_ret, ret);
return true;
@@ -4290,7 +4190,6 @@ fn jit_rb_mod_eqq(
// object identity comparison
fn jit_rb_obj_equal(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4300,13 +4199,13 @@ fn jit_rb_obj_equal(
_known_recv_class: *const VALUE,
) -> bool {
asm.comment("equal?");
- let obj1 = ctx.stack_pop(1);
- let obj2 = ctx.stack_pop(1);
+ let obj1 = asm.stack_pop(1);
+ let obj2 = asm.stack_pop(1);
asm.cmp(obj1, obj2);
let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
- let stack_ret = ctx.stack_push(asm, Type::UnknownImm);
+ let stack_ret = asm.stack_push(Type::UnknownImm);
asm.mov(stack_ret, ret_opnd);
true
}
@@ -4315,7 +4214,6 @@ fn jit_rb_obj_equal(
// object identity comparison
fn jit_rb_obj_not_equal(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4324,13 +4222,12 @@ fn jit_rb_obj_not_equal(
_argc: i32,
_known_recv_class: *const VALUE,
) -> bool {
- gen_equality_specialized(jit, ctx, asm, ocb, false) == Some(true)
+ gen_equality_specialized(jit, asm, ocb, false) == Some(true)
}
// Codegen for rb_int_equal()
fn jit_rb_int_equal(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4340,23 +4237,22 @@ fn jit_rb_int_equal(
_known_recv_class: *const VALUE,
) -> bool {
// Check that both operands are fixnums
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// Compare the arguments
asm.comment("rb_int_equal");
- let arg1 = ctx.stack_pop(1);
- let arg0 = ctx.stack_pop(1);
+ let arg1 = asm.stack_pop(1);
+ let arg0 = asm.stack_pop(1);
asm.cmp(arg0, arg1);
let ret_opnd = asm.csel_e(Qtrue.into(), Qfalse.into());
- let stack_ret = ctx.stack_push(asm, Type::UnknownImm);
+ let stack_ret = asm.stack_push(Type::UnknownImm);
asm.mov(stack_ret, ret_opnd);
true
}
fn jit_rb_int_mul(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4365,27 +4261,26 @@ fn jit_rb_int_mul(
_argc: i32,
_known_recv_class: *const VALUE,
) -> bool {
- if ctx.two_fixnums_on_stack(jit) != Some(true) {
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
return false;
}
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
// rb_fix_mul_fix may allocate memory for Bignum
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
asm.comment("Integer#*");
- let obj = ctx.stack_pop(1);
- let recv = ctx.stack_pop(1);
+ let obj = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
let ret = asm.ccall(rb_fix_mul_fix as *const u8, vec![recv, obj]);
- let ret_opnd = ctx.stack_push(asm, Type::Unknown);
+ let ret_opnd = asm.stack_push(Type::Unknown);
asm.mov(ret_opnd, ret);
true
}
fn jit_rb_int_div(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4394,30 +4289,29 @@ fn jit_rb_int_div(
_argc: i32,
_known_recv_class: *const VALUE,
) -> bool {
- if ctx.two_fixnums_on_stack(jit) != Some(true) {
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
return false;
}
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
asm.comment("Integer#/");
- asm.spill_temps(ctx); // for ccall (must be done before stack_pop)
- let obj = ctx.stack_pop(1);
- let recv = ctx.stack_pop(1);
+ asm.spill_temps(); // for ccall (must be done before stack_pop)
+ let obj = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
// Check for arg0 % 0
asm.cmp(obj, VALUE::fixnum_from_usize(0).as_i64().into());
- asm.je(side_exit(jit, ctx, ocb));
+ asm.je(side_exit(jit, &asm.ctx, ocb));
let ret = asm.ccall(rb_fix_div_fix as *const u8, vec![recv, obj]);
- let ret_opnd = ctx.stack_push(asm, Type::Fixnum);
+ let ret_opnd = asm.stack_push(Type::Fixnum);
asm.mov(ret_opnd, ret);
true
}
fn jit_rb_int_aref(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4429,19 +4323,19 @@ fn jit_rb_int_aref(
if argc != 1 {
return false;
}
- if ctx.two_fixnums_on_stack(jit) != Some(true) {
+ if asm.ctx.two_fixnums_on_stack(jit) != Some(true) {
return false;
}
- guard_two_fixnums(jit, ctx, asm, ocb);
+ guard_two_fixnums(jit, asm, ocb);
asm.comment("Integer#[]");
- asm.spill_temps(ctx); // for ccall (must be done before stack_pop)
- let obj = ctx.stack_pop(1);
- let recv = ctx.stack_pop(1);
+ asm.spill_temps(); // for ccall (must be done before stack_pop)
+ let obj = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
let ret = asm.ccall(rb_fix_aref as *const u8, vec![recv, obj]);
- let ret_opnd = ctx.stack_push(asm, Type::Fixnum);
+ let ret_opnd = asm.stack_push(Type::Fixnum);
asm.mov(ret_opnd, ret);
true
}
@@ -4449,7 +4343,6 @@ fn jit_rb_int_aref(
/// If string is frozen, duplicate it to get a non-frozen string. Otherwise, return it.
fn jit_rb_str_uplus(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4464,24 +4357,25 @@ fn jit_rb_str_uplus(
}
// We allocate when we dup the string
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
asm.comment("Unary plus on string");
- let recv_opnd = asm.load(ctx.stack_pop(1));
+ let recv_opnd = asm.stack_pop(1);
+ let recv_opnd = asm.load(recv_opnd);
let flags_opnd = asm.load(Opnd::mem(64, recv_opnd, RUBY_OFFSET_RBASIC_FLAGS));
asm.test(flags_opnd, Opnd::Imm(RUBY_FL_FREEZE as i64));
let ret_label = asm.new_label("stack_ret");
// String#+@ can only exist on T_STRING
- let stack_ret = ctx.stack_push(asm, Type::TString);
+ let stack_ret = asm.stack_push(Type::TString);
// If the string isn't frozen, we just return it.
asm.mov(stack_ret, recv_opnd);
asm.jz(ret_label);
// Str is frozen - duplicate it
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let ret_opnd = asm.ccall(rb_str_dup as *const u8, vec![recv_opnd]);
asm.mov(stack_ret, ret_opnd);
@@ -4492,7 +4386,6 @@ fn jit_rb_str_uplus(
fn jit_rb_str_bytesize(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4503,11 +4396,11 @@ fn jit_rb_str_bytesize(
) -> bool {
asm.comment("String#bytesize");
- asm.spill_temps(ctx); // for ccall (must be done before stack_pop)
- let recv = ctx.stack_pop(1);
+ asm.spill_temps(); // for ccall (must be done before stack_pop)
+ let recv = asm.stack_pop(1);
let ret_opnd = asm.ccall(rb_str_bytesize as *const u8, vec![recv]);
- let out_opnd = ctx.stack_push(asm, Type::Fixnum);
+ let out_opnd = asm.stack_push(Type::Fixnum);
asm.mov(out_opnd, ret_opnd);
true
@@ -4519,7 +4412,6 @@ fn jit_rb_str_bytesize(
// this situation happens a lot in some workloads.
fn jit_rb_str_to_s(
_jit: &mut JITState,
- _ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4540,7 +4432,6 @@ fn jit_rb_str_to_s(
// Codegen for rb_str_empty_p()
fn jit_rb_str_empty_p(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4554,7 +4445,7 @@ fn jit_rb_str_empty_p(
"same offset to len embedded or not so we can use one code path to read the length",
);
- let recv_opnd = ctx.stack_pop(1);
+ let recv_opnd = asm.stack_pop(1);
asm.comment("get string length");
let str_len_opnd = Opnd::mem(
@@ -4565,7 +4456,7 @@ fn jit_rb_str_empty_p(
asm.cmp(str_len_opnd, Opnd::UImm(0));
let string_empty = asm.csel_e(Qtrue.into(), Qfalse.into());
- let out_opnd = ctx.stack_push(asm, Type::UnknownImm);
+ let out_opnd = asm.stack_push(Type::UnknownImm);
asm.mov(out_opnd, string_empty);
return true;
@@ -4576,7 +4467,6 @@ fn jit_rb_str_empty_p(
// This is common in Erb and similar templating languages.
fn jit_rb_str_concat(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4589,21 +4479,21 @@ fn jit_rb_str_concat(
// as the argument. We only specially optimise string arguments.
// If the peeked-at compile time argument is something other than
// a string, assume it won't be a string later either.
- let comptime_arg = jit.peek_at_stack(ctx, 0);
+ let comptime_arg = jit.peek_at_stack(&asm.ctx, 0);
if ! unsafe { RB_TYPE_P(comptime_arg, RUBY_T_STRING) } {
return false;
}
// Guard that the concat argument is a string
- guard_object_is_string(jit, ctx, asm, ocb, ctx.stack_opnd(0), StackOpnd(0), None);
+ guard_object_is_string(jit, asm, ocb, asm.ctx.stack_opnd(0), StackOpnd(0), None);
// Guard buffers from GC since rb_str_buf_append may allocate. During the VM lock on GC,
// other Ractors may trigger global invalidation, so we need ctx.clear_local_types().
// PC is used on errors like Encoding::CompatibilityError raised by rb_str_buf_append.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
- let concat_arg = ctx.stack_pop(1);
- let recv = ctx.stack_pop(1);
+ let concat_arg = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
// Test if string encodings differ. If different, use rb_str_append. If the same,
// use rb_yjit_str_simple_append, which calls rb_str_cat.
@@ -4624,19 +4514,19 @@ fn jit_rb_str_concat(
asm.jnz(enc_mismatch);
// If encodings match, call the simple append function and jump to return
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let ret_opnd = asm.ccall(rb_yjit_str_simple_append as *const u8, vec![recv, concat_arg]);
let ret_label = asm.new_label("func_return");
- let stack_ret = ctx.stack_push(asm, Type::CString);
+ let stack_ret = asm.stack_push(Type::CString);
asm.mov(stack_ret, ret_opnd);
- ctx.stack_pop(1); // forget stack_ret to re-push after ccall
+ asm.stack_pop(1); // forget stack_ret to re-push after ccall
asm.jmp(ret_label);
// If encodings are different, use a slower encoding-aware concatenate
asm.write_label(enc_mismatch);
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let ret_opnd = asm.ccall(rb_str_buf_append as *const u8, vec![recv, concat_arg]);
- let stack_ret = ctx.stack_push(asm, Type::CString);
+ let stack_ret = asm.stack_push(Type::CString);
asm.mov(stack_ret, ret_opnd);
// Drop through to return
@@ -4648,7 +4538,6 @@ fn jit_rb_str_concat(
// Codegen for rb_ary_empty_p()
fn jit_rb_ary_empty_p(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4657,14 +4546,14 @@ fn jit_rb_ary_empty_p(
_argc: i32,
_known_recv_class: *const VALUE,
) -> bool {
- let array_opnd = ctx.stack_pop(1);
+ let array_opnd = asm.stack_pop(1);
let array_reg = asm.load(array_opnd);
let len_opnd = get_array_len(asm, array_reg);
asm.test(len_opnd, len_opnd);
let bool_val = asm.csel_z(Qtrue.into(), Qfalse.into());
- let out_opnd = ctx.stack_push(asm, Type::UnknownImm);
+ let out_opnd = asm.stack_push(Type::UnknownImm);
asm.store(out_opnd, bool_val);
return true;
@@ -4672,7 +4561,6 @@ fn jit_rb_ary_empty_p(
fn jit_rb_ary_push(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4684,20 +4572,19 @@ fn jit_rb_ary_push(
asm.comment("Array#<<");
// rb_ary_push allocates memory for buffer extension
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
- let item_opnd = ctx.stack_pop(1);
- let ary_opnd = ctx.stack_pop(1);
+ let item_opnd = asm.stack_pop(1);
+ let ary_opnd = asm.stack_pop(1);
let ret = asm.ccall(rb_ary_push as *const u8, vec![ary_opnd, item_opnd]);
- let ret_opnd = ctx.stack_push(asm, Type::TArray);
+ let ret_opnd = asm.stack_push(Type::TArray);
asm.mov(ret_opnd, ret);
true
}
fn jit_obj_respond_to(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4718,7 +4605,7 @@ fn jit_obj_respond_to(
let recv_class = unsafe { *known_recv_class };
// Get the method_id from compile time. We will later add a guard against it.
- let mid_sym = jit.peek_at_stack(ctx, (argc - 1) as isize);
+ let mid_sym = jit.peek_at_stack(&asm.ctx, (argc - 1) as isize);
if !mid_sym.static_sym_p() {
return false
}
@@ -4730,7 +4617,7 @@ fn jit_obj_respond_to(
Some(false)
} else {
// Get value from type information (may or may not be known)
- ctx.get_opnd_type(StackOpnd(0)).known_truthy()
+ asm.ctx.get_opnd_type(StackOpnd(0)).known_truthy()
};
let target_cme = unsafe { rb_callable_method_entry_or_negative(recv_class, mid) };
@@ -4772,25 +4659,24 @@ fn jit_obj_respond_to(
if argc == 2 {
// pop include_all argument (we only use its type info)
- ctx.stack_pop(1);
+ asm.stack_pop(1);
}
- let sym_opnd = ctx.stack_pop(1);
- let _recv_opnd = ctx.stack_pop(1);
+ let sym_opnd = asm.stack_pop(1);
+ let _recv_opnd = asm.stack_pop(1);
// This is necessary because we have no guarantee that sym_opnd is a constant
asm.comment("guard known mid");
asm.cmp(sym_opnd, mid_sym.into());
- asm.jne(side_exit(jit, ctx, ocb));
+ asm.jne(side_exit(jit, &asm.ctx, ocb));
- jit_putobject(jit, ctx, asm, result);
+ jit_putobject(asm, result);
true
}
fn jit_rb_f_block_given_p(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4807,8 +4693,8 @@ fn jit_rb_f_block_given_p(
Opnd::mem(64, ep_opnd, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL)
);
- ctx.stack_pop(1);
- let out_opnd = ctx.stack_push(asm, Type::UnknownImm);
+ asm.stack_pop(1);
+ let out_opnd = asm.stack_push(Type::UnknownImm);
// Return `block_handler != VM_BLOCK_HANDLER_NONE`
asm.cmp(block_handler, VM_BLOCK_HANDLER_NONE.into());
@@ -4820,7 +4706,6 @@ fn jit_rb_f_block_given_p(
fn jit_thread_s_current(
_jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
_ci: *const rb_callinfo,
@@ -4830,7 +4715,7 @@ fn jit_thread_s_current(
_known_recv_class: *const VALUE,
) -> bool {
asm.comment("Thread.current");
- ctx.stack_pop(1);
+ asm.stack_pop(1);
// ec->thread_ptr
let ec_thread_opnd = asm.load(Opnd::mem(64, EC, RUBY_OFFSET_EC_THREAD_PTR));
@@ -4838,7 +4723,7 @@ fn jit_thread_s_current(
// thread->self
let thread_self = Opnd::mem(64, ec_thread_opnd, RUBY_OFFSET_THREAD_SELF);
- let stack_ret = ctx.stack_push(asm, Type::UnknownHeap);
+ let stack_ret = asm.stack_push(Type::UnknownHeap);
asm.mov(stack_ret, thread_self);
true
}
@@ -5037,7 +4922,6 @@ fn gen_push_frame(
fn gen_send_cfunc(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
ci: *const rb_callinfo,
@@ -5106,25 +4990,25 @@ fn gen_send_cfunc(
if kw_arg.is_null() && flags & VM_CALL_OPT_SEND == 0 {
let codegen_p = lookup_cfunc_codegen(unsafe { (*cme).def });
if let Some(known_cfunc_codegen) = codegen_p {
- if known_cfunc_codegen(jit, ctx, asm, ocb, ci, cme, block, argc, recv_known_klass) {
+ if known_cfunc_codegen(jit, asm, ocb, ci, cme, block, argc, recv_known_klass) {
// cfunc codegen generated code. Terminate the block so
// there isn't multiple calls in the same block.
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
return EndBlock;
}
}
}
// Check for interrupts
- gen_check_ints(jit, ctx, asm, ocb, None);
+ gen_check_ints(jit, asm, ocb, None);
// Stack overflow check
// #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin)
// REG_CFP <= REG_SP + 4 * SIZEOF_VALUE + sizeof(rb_control_frame_t)
asm.comment("stack overflow check");
- let stack_limit = asm.lea(ctx.sp_opnd((SIZEOF_VALUE * 4 + 2 * RUBY_SIZEOF_CONTROL_FRAME) as isize));
+ let stack_limit = asm.lea(asm.ctx.sp_opnd((SIZEOF_VALUE * 4 + 2 * RUBY_SIZEOF_CONTROL_FRAME) as isize));
asm.cmp(CFP, stack_limit);
- asm.jbe(counted_exit!(jit, ctx, ocb, send_se_cf_overflow));
+ asm.jbe(counted_exit!(jit, &asm.ctx, ocb, send_se_cf_overflow));
// Number of args which will be passed through to the callee
// This is adjusted by the kwargs being combined into a hash.
@@ -5149,7 +5033,7 @@ fn gen_send_cfunc(
let block_arg = flags & VM_CALL_ARGS_BLOCKARG != 0;
let block_arg_type = if block_arg {
- Some(ctx.get_opnd_type(StackOpnd(0)))
+ Some(asm.ctx.get_opnd_type(StackOpnd(0)))
} else {
None
};
@@ -5170,11 +5054,11 @@ fn gen_send_cfunc(
match block_arg_type {
Some(Type::Nil) => {
// We have a nil block arg, so let's pop it off the args
- ctx.stack_pop(1);
+ asm.stack_pop(1);
}
Some(Type::BlockParamProxy) => {
// We don't need the actual stack value
- ctx.stack_pop(1);
+ asm.stack_pop(1);
}
None => {
// Nothing to do
@@ -5201,23 +5085,23 @@ fn gen_send_cfunc(
// and if not side exit.
argc = cfunc_argc;
passed_argc = argc;
- push_splat_args(required_args, jit, ctx, asm, ocb)
+ push_splat_args(required_args, jit, asm, ocb)
}
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc, ctx);
+ handle_opt_send_shift_stack(asm, argc);
}
// Points to the receiver operand on the stack
- let recv = ctx.stack_opnd(argc);
+ let recv = asm.ctx.stack_opnd(argc);
// Store incremented PC into current control frame in case callee raises.
jit_save_pc(jit, asm);
// Increment the stack pointer by 3 (in the callee)
// sp += 3
- let sp = asm.lea(ctx.sp_opnd((SIZEOF_VALUE as isize) * 3));
+ let sp = asm.lea(asm.ctx.sp_opnd((SIZEOF_VALUE as isize) * 3));
let specval = if block_arg_type == Some(Type::BlockParamProxy) {
SpecVal::BlockParamProxy
@@ -5253,27 +5137,27 @@ fn gen_send_cfunc(
let imemo_ci = VALUE(ci as usize);
assert_ne!(0, unsafe { rb_IMEMO_TYPE_P(imemo_ci, imemo_callinfo) },
"we assume all callinfos with kwargs are on the GC heap");
- let sp = asm.lea(ctx.sp_opnd(0));
- asm.spill_temps(ctx); // for ccall
+ let sp = asm.lea(asm.ctx.sp_opnd(0));
+ asm.spill_temps(); // for ccall
let kwargs = asm.ccall(build_kwhash as *const u8, vec![imemo_ci.into(), sp]);
// Replace the stack location at the start of kwargs with the new hash
- let stack_opnd = ctx.stack_opnd(argc - passed_argc);
+ let stack_opnd = asm.ctx.stack_opnd(argc - passed_argc);
asm.mov(stack_opnd, kwargs);
}
// Copy SP because REG_SP will get overwritten
- let sp = asm.lea(ctx.sp_opnd(0));
+ let sp = asm.lea(asm.ctx.sp_opnd(0));
// Arguments must be spilled before popped from ctx
- asm.spill_temps(ctx);
+ asm.spill_temps();
// Pop the C function arguments from the stack (in the caller)
- ctx.stack_pop((argc + 1).try_into().unwrap());
+ asm.stack_pop((argc + 1).try_into().unwrap());
// Write interpreter SP into CFP.
// Needed in case the callee yields to the block.
- gen_save_sp(asm, ctx);
+ gen_save_sp(asm);
// Non-variadic method
let args = if cfunc_argc >= 0 {
@@ -5308,7 +5192,7 @@ fn gen_send_cfunc(
record_global_inval_patch(asm, CodegenGlobals::get_outline_full_cfunc_return_pos());
// Push the return value on the Ruby stack
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, ret);
// Pop the stack frame (ec->cfp++)
@@ -5318,14 +5202,14 @@ fn gen_send_cfunc(
asm.store(ec_cfp_opnd, CFP);
// cfunc calls may corrupt types
- ctx.clear_local_types();
+ asm.ctx.clear_local_types();
// Note: the return block of gen_send_iseq() has ctx->sp_offset == 1
// which allows for sharing the same successor.
// Jump (fall through) to the call continuation block
// We do this to end the current block after the call
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
}
@@ -5382,14 +5266,14 @@ fn get_array_ptr(asm: &mut Assembler, array_reg: Opnd) -> Opnd {
/// Pushes arguments from an array to the stack. Differs from push splat because
/// the array can have items left over.
-fn move_rest_args_to_stack(array: Opnd, num_args: u32, jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb) {
+fn move_rest_args_to_stack(array: Opnd, num_args: u32, jit: &mut JITState, asm: &mut Assembler, ocb: &mut OutlinedCb) {
asm.comment("move_rest_args_to_stack");
let array_len_opnd = get_array_len(asm, array);
asm.comment("Side exit if length is less than required");
asm.cmp(array_len_opnd, num_args.into());
- asm.jl(counted_exit!(jit, ctx, ocb, send_iseq_has_rest_and_splat_not_equal));
+ asm.jl(counted_exit!(jit, &asm.ctx, ocb, send_iseq_has_rest_and_splat_not_equal));
asm.comment("Push arguments from array");
@@ -5412,7 +5296,7 @@ fn move_rest_args_to_stack(array: Opnd, num_args: u32, jit: &mut JITState, ctx:
let ary_opnd = asm.csel_nz(ary_opnd, heap_ptr_opnd);
for i in 0..num_args {
- let top = ctx.stack_push(asm, Type::Unknown);
+ let top = asm.stack_push(Type::Unknown);
asm.mov(top, Opnd::mem(64, ary_opnd, i as i32 * SIZEOF_VALUE_I32));
}
}
@@ -5420,15 +5304,14 @@ fn move_rest_args_to_stack(array: Opnd, num_args: u32, jit: &mut JITState, ctx:
/// Pushes arguments from an array to the stack that are passed with a splat (i.e. *args)
/// It optimistically compiles to a static size that is the exact number of arguments
/// needed for the function.
-fn push_splat_args(required_args: u32, jit: &mut JITState, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb) {
+fn push_splat_args(required_args: u32, jit: &mut JITState, asm: &mut Assembler, ocb: &mut OutlinedCb) {
asm.comment("push_splat_args");
- let array_opnd = ctx.stack_opnd(0);
+ let array_opnd = asm.ctx.stack_opnd(0);
let array_reg = asm.load(array_opnd);
guard_object_is_array(
jit,
- ctx,
asm,
ocb,
array_reg,
@@ -5450,7 +5333,7 @@ fn push_splat_args(required_args: u32, jit: &mut JITState, ctx: &mut Context, as
asm.test(flags_opnd, (RARRAY_EMBED_FLAG as u64).into());
// Need to repeat this here to deal with register allocation
- let array_opnd = ctx.stack_opnd(0);
+ let array_opnd = asm.ctx.stack_opnd(0);
let array_reg = asm.load(array_opnd);
let array_len_opnd = Opnd::mem(
@@ -5462,12 +5345,12 @@ fn push_splat_args(required_args: u32, jit: &mut JITState, ctx: &mut Context, as
asm.comment("Side exit if length doesn't not equal remaining args");
asm.cmp(array_len_opnd, required_args.into());
- asm.jne(counted_exit!(jit, ctx, ocb, send_splatarray_length_not_equal));
+ asm.jne(counted_exit!(jit, &asm.ctx, ocb, send_splatarray_length_not_equal));
asm.comment("Check last argument is not ruby2keyword hash");
// Need to repeat this here to deal with register allocation
- let array_reg = asm.load(ctx.stack_opnd(0));
+ let array_reg = asm.load(asm.ctx.stack_opnd(0));
let ary_opnd = get_array_ptr(asm, array_reg);
@@ -5476,11 +5359,11 @@ fn push_splat_args(required_args: u32, jit: &mut JITState, ctx: &mut Context, as
guard_object_is_not_ruby2_keyword_hash(
asm,
last_array_value,
- counted_exit!(jit, ctx, ocb, send_splatarray_last_ruby_2_keywords),
+ counted_exit!(jit, &asm.ctx, ocb, send_splatarray_last_ruby_2_keywords),
);
asm.comment("Push arguments from array");
- let array_opnd = ctx.stack_pop(1);
+ let array_opnd = asm.stack_pop(1);
if required_args > 0 {
// Load the address of the embedded array
@@ -5502,7 +5385,7 @@ fn push_splat_args(required_args: u32, jit: &mut JITState, ctx: &mut Context, as
let ary_opnd = asm.csel_nz(ary_opnd, heap_ptr_opnd);
for i in 0..required_args {
- let top = ctx.stack_push(asm, Type::Unknown);
+ let top = asm.stack_push(Type::Unknown);
asm.mov(top, Opnd::mem(64, ary_opnd, i as i32 * SIZEOF_VALUE_I32));
}
@@ -5512,7 +5395,6 @@ fn push_splat_args(required_args: u32, jit: &mut JITState, ctx: &mut Context, as
fn gen_send_bmethod(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
ci: *const rb_callinfo,
@@ -5548,12 +5430,11 @@ fn gen_send_bmethod(
}
let frame_type = VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA;
- gen_send_iseq(jit, ctx, asm, ocb, iseq, ci, frame_type, Some(capture.ep), cme, block, flags, argc, None)
+ gen_send_iseq(jit, asm, ocb, iseq, ci, frame_type, Some(capture.ep), cme, block, flags, argc, None)
}
fn gen_send_iseq(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
iseq: *const rb_iseq_t,
@@ -5705,7 +5586,7 @@ fn gen_send_iseq(
let block_arg = flags & VM_CALL_ARGS_BLOCKARG != 0;
let block_arg_type = if block_arg {
- Some(ctx.get_opnd_type(StackOpnd(0)))
+ Some(asm.ctx.get_opnd_type(StackOpnd(0)))
} else {
None
};
@@ -5832,7 +5713,7 @@ fn gen_send_iseq(
}
}
let splat_array_length = if flags & VM_CALL_ARGS_SPLAT != 0 && !iseq_has_rest {
- let array = jit.peek_at_stack(ctx, if block_arg { 1 } else { 0 }) ;
+ let array = jit.peek_at_stack(&asm.ctx, if block_arg { 1 } else { 0 }) ;
let array_length = if array == Qnil {
0
} else {
@@ -5853,11 +5734,11 @@ fn gen_send_iseq(
match block_arg_type {
Some(Type::Nil) => {
// We have a nil block arg, so let's pop it off the args
- ctx.stack_pop(1);
+ asm.stack_pop(1);
}
Some(Type::BlockParamProxy) => {
// We don't need the actual stack value
- ctx.stack_pop(1);
+ asm.stack_pop(1);
}
None => {
// Nothing to do
@@ -5881,7 +5762,7 @@ fn gen_send_iseq(
// The callee may allocate, e.g. Integer#abs on a Bignum.
// Save SP for GC, save PC for allocation tracing, and prepare
// for global invalidation after GC's VM lock contention.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
}
// Call the builtin func (ec, recv, arg1, arg2, ...)
@@ -5889,14 +5770,14 @@ fn gen_send_iseq(
// Copy self and arguments
for i in 0..=builtin_argc {
- let stack_opnd = ctx.stack_opnd(builtin_argc - i);
+ let stack_opnd = asm.ctx.stack_opnd(builtin_argc - i);
args.push(stack_opnd);
}
- ctx.stack_pop((builtin_argc + 1).try_into().unwrap());
+ asm.stack_pop((builtin_argc + 1).try_into().unwrap());
let val = asm.ccall(unsafe { (*builtin_info).func_ptr as *const u8 }, args);
// Push the return value
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
// Note: assuming that the leaf builtin doesn't change local variables here.
@@ -5916,9 +5797,9 @@ fn gen_send_iseq(
let stack_max: i32 = unsafe { get_iseq_body_stack_max(iseq) }.try_into().unwrap();
let locals_offs =
SIZEOF_VALUE_I32 * (num_locals + stack_max) + 2 * (RUBY_SIZEOF_CONTROL_FRAME as i32);
- let stack_limit = asm.lea(ctx.sp_opnd(locals_offs as isize));
+ let stack_limit = asm.lea(asm.ctx.sp_opnd(locals_offs as isize));
asm.cmp(CFP, stack_limit);
- asm.jbe(counted_exit!(jit, ctx, ocb, send_se_cf_overflow));
+ asm.jbe(counted_exit!(jit, &asm.ctx, ocb, send_se_cf_overflow));
// push_splat_args does stack manipulation so we can no longer side exit
if let Some(array_length) = splat_array_length {
@@ -5937,30 +5818,30 @@ fn gen_send_iseq(
// all the remaining arguments. In the generated code
// we test if this is true and if not side exit.
argc = argc - 1 + array_length as i32 + remaining_opt as i32;
- push_splat_args(array_length, jit, ctx, asm, ocb);
+ push_splat_args(array_length, jit, asm, ocb);
for _ in 0..remaining_opt {
// We need to push nil for the optional arguments
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, Qnil.into());
}
}
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc, ctx);
+ handle_opt_send_shift_stack(asm, argc);
}
if iseq_has_rest {
// We are going to allocate so setting pc and sp.
jit_save_pc(jit, asm);
- gen_save_sp(asm, ctx);
+ gen_save_sp(asm);
if flags & VM_CALL_ARGS_SPLAT != 0 {
let non_rest_arg_count = argc - 1;
// We start by dupping the array because someone else might have
// a reference to it.
- let array = ctx.stack_pop(1);
+ let array = asm.stack_pop(1);
let array = asm.ccall(
rb_ary_dup as *const u8,
vec![array],
@@ -5973,7 +5854,7 @@ fn gen_send_iseq(
// diff is >0 so no need to worry about null pointer
asm.comment("load pointer to array elements");
let offset_magnitude = SIZEOF_VALUE as u32 * diff;
- let values_opnd = ctx.sp_opnd(-(offset_magnitude as isize));
+ let values_opnd = asm.ctx.sp_opnd(-(offset_magnitude as isize));
let values_ptr = asm.lea(values_opnd);
asm.comment("prepend stack values to rest array");
@@ -5981,9 +5862,9 @@ fn gen_send_iseq(
rb_ary_unshift_m as *const u8,
vec![Opnd::UImm(diff as u64), values_ptr, array],
);
- ctx.stack_pop(diff as usize);
+ asm.stack_pop(diff as usize);
- let stack_ret = ctx.stack_push(asm, Type::TArray);
+ let stack_ret = asm.stack_push(Type::TArray);
asm.mov(stack_ret, array);
// We now should have the required arguments
// and an array of all the rest arguments
@@ -5993,12 +5874,12 @@ fn gen_send_iseq(
// from the array and move them to the stack.
let diff = (required_num - non_rest_arg_count) as u32;
// This moves the arguments onto the stack. But it doesn't modify the array.
- move_rest_args_to_stack(array, diff, jit, ctx, asm, ocb);
+ move_rest_args_to_stack(array, diff, jit, asm, ocb);
// We will now slice the array to give us a new array of the correct size
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let ret = asm.ccall(rb_yjit_rb_ary_subseq_length as *const u8, vec![array, Opnd::UImm(diff as u64)]);
- let stack_ret = ctx.stack_push(asm, Type::TArray);
+ let stack_ret = asm.stack_push(Type::TArray);
asm.mov(stack_ret, ret);
// We now should have the required arguments
@@ -6007,7 +5888,7 @@ fn gen_send_iseq(
} else {
// The arguments are equal so we can just push to the stack
assert!(non_rest_arg_count == required_num);
- let stack_ret = ctx.stack_push(asm, Type::TArray);
+ let stack_ret = asm.stack_push(Type::TArray);
asm.mov(stack_ret, array);
}
} else {
@@ -6020,7 +5901,7 @@ fn gen_send_iseq(
} else {
asm.comment("load pointer to array elements");
let offset_magnitude = SIZEOF_VALUE as u32 * n;
- let values_opnd = ctx.sp_opnd(-(offset_magnitude as isize));
+ let values_opnd = asm.ctx.sp_opnd(-(offset_magnitude as isize));
asm.lea(values_opnd)
};
@@ -6032,8 +5913,8 @@ fn gen_send_iseq(
values_ptr
]
);
- ctx.stack_pop(n.as_usize());
- let stack_ret = ctx.stack_push(asm, Type::CArray);
+ asm.stack_pop(n.as_usize());
+ let stack_ret = asm.stack_push(Type::CArray);
asm.mov(stack_ret, new_ary);
}
}
@@ -6100,7 +5981,7 @@ fn gen_send_iseq(
// filling in (which is done in the next loop). Also increments
// argc so that the callee's SP is recorded correctly.
argc += 1;
- let default_arg = ctx.stack_push(asm, Type::Unknown);
+ let default_arg = asm.stack_push(Type::Unknown);
// callee_idx - keyword->required_num is used in a couple of places below.
let req_num: isize = unsafe { (*keyword).required_num }.try_into().unwrap();
@@ -6156,7 +6037,7 @@ fn gen_send_iseq(
let offset1: u16 = (argc - 1 - kwarg_idx_i32 - args_before_kw)
.try_into()
.unwrap();
- stack_swap(jit, ctx, asm, offset0, offset1);
+ stack_swap(asm, offset0, offset1);
// Next we're going to do some bookkeeping on our end so
// that we know the order that the arguments are
@@ -6172,8 +6053,8 @@ fn gen_send_iseq(
// pushed onto the stack that represents the parameters that weren't
// explicitly given a value and have a non-constant default.
let unspec_opnd = VALUE::fixnum_from_usize(unspecified_bits).as_u64();
- asm.spill_temps(ctx); // avoid using a register for unspecified_bits
- asm.mov(ctx.stack_opnd(-1), unspec_opnd.into());
+ asm.spill_temps(); // avoid using a register for unspecified_bits
+ asm.mov(asm.ctx.stack_opnd(-1), unspec_opnd.into());
}
// Same as vm_callee_setup_block_arg_arg0_check and vm_callee_setup_block_arg_arg0_splat
@@ -6182,43 +6063,43 @@ fn gen_send_iseq(
// side exits, so you still need to allow side exits here if block_arg0_splat is true.
// Note that you can't have side exits after this arg0 splat.
if block_arg0_splat {
- let arg0_opnd = ctx.stack_opnd(0);
+ let arg0_opnd = asm.ctx.stack_opnd(0);
// Only handle the case that you don't need to_ary conversion
let not_array_counter = exit_counter!(invokeblock_iseq_arg0_not_array);
- guard_object_is_array(jit, ctx, asm, ocb, arg0_opnd, arg0_opnd.into(), not_array_counter);
+ guard_object_is_array(jit, asm, ocb, arg0_opnd, arg0_opnd.into(), not_array_counter);
// Only handle the same that the array length == ISEQ's lead_num (most common)
let arg0_len_opnd = get_array_len(asm, arg0_opnd);
let lead_num = unsafe { rb_get_iseq_body_param_lead_num(iseq) };
asm.cmp(arg0_len_opnd, lead_num.into());
- asm.jne(counted_exit!(jit, ctx, ocb, invokeblock_iseq_arg0_wrong_len));
+ asm.jne(counted_exit!(jit, &asm.ctx, ocb, invokeblock_iseq_arg0_wrong_len));
let arg0_reg = asm.load(arg0_opnd);
let array_opnd = get_array_ptr(asm, arg0_reg);
asm.comment("push splat arg0 onto the stack");
- ctx.stack_pop(argc.try_into().unwrap());
+ asm.stack_pop(argc.try_into().unwrap());
for i in 0..lead_num {
- let stack_opnd = ctx.stack_push(asm, Type::Unknown);
+ let stack_opnd = asm.stack_push(Type::Unknown);
asm.mov(stack_opnd, Opnd::mem(64, array_opnd, SIZEOF_VALUE_I32 * i));
}
argc = lead_num;
}
// Spill stack temps to let the callee use them
- asm.spill_temps(ctx);
+ asm.spill_temps();
// Points to the receiver operand on the stack unless a captured environment is used
let recv = match captured_opnd {
Some(captured_opnd) => asm.load(Opnd::mem(64, captured_opnd, 0)), // captured->self
- _ => ctx.stack_opnd(argc),
+ _ => asm.ctx.stack_opnd(argc),
};
let captured_self = captured_opnd.is_some();
let sp_offset = (argc as isize) + if captured_self { 0 } else { 1 };
// Store the updated SP on the current frame (pop arguments and receiver)
asm.comment("store caller sp");
- let caller_sp = asm.lea(ctx.sp_opnd((SIZEOF_VALUE as isize) * -sp_offset));
+ let caller_sp = asm.lea(asm.ctx.sp_opnd((SIZEOF_VALUE as isize) * -sp_offset));
asm.store(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP), caller_sp);
// Store the next PC in the current frame
@@ -6227,7 +6108,7 @@ fn gen_send_iseq(
// Adjust the callee's stack pointer
let offs =
(SIZEOF_VALUE as isize) * (3 + (num_locals as isize) + if doing_kw_call { 1 } else { 0 });
- let callee_sp = asm.lea(ctx.sp_opnd(offs));
+ let callee_sp = asm.lea(asm.ctx.sp_opnd(offs));
let specval = if let Some(prev_ep) = prev_ep {
// We've already side-exited if the callee expects a block, so we
@@ -6273,32 +6154,33 @@ fn gen_send_iseq(
// Set the argument types in the callee's context
for arg_idx in 0..argc {
let stack_offs: u8 = (argc - arg_idx - 1).try_into().unwrap();
- let arg_type = ctx.get_opnd_type(StackOpnd(stack_offs));
+ let arg_type = asm.ctx.get_opnd_type(StackOpnd(stack_offs));
callee_ctx.set_local_type(arg_idx.try_into().unwrap(), arg_type);
}
let recv_type = if captured_self {
Type::Unknown // we don't track the type information of captured->self for now
} else {
- ctx.get_opnd_type(StackOpnd(argc.try_into().unwrap()))
+ asm.ctx.get_opnd_type(StackOpnd(argc.try_into().unwrap()))
};
callee_ctx.upgrade_opnd_type(SelfOpnd, recv_type);
// The callee might change locals through Kernel#binding and other means.
- ctx.clear_local_types();
+ asm.ctx.clear_local_types();
// Pop arguments and receiver in return context, push the return value
// After the return, sp_offset will be 1. The codegen for leave writes
// the return value in case of JIT-to-JIT return.
- let mut return_ctx = ctx.clone();
- return_ctx.stack_pop(sp_offset.try_into().unwrap());
- let return_val = return_ctx.stack_push(asm, Type::Unknown);
+ let mut return_asm = Assembler::new();
+ return_asm.ctx = asm.ctx.clone();
+ return_asm.stack_pop(sp_offset.try_into().unwrap());
+ let return_val = return_asm.stack_push(Type::Unknown);
if return_val.stack_idx() < MAX_REG_TEMPS {
// The callee writes a return value on stack. Update reg_temps accordingly.
- return_ctx.dealloc_temp_reg(return_val.stack_idx());
+ return_asm.ctx.dealloc_temp_reg(return_val.stack_idx());
}
- return_ctx.set_sp_offset(1);
- return_ctx.reset_chain_depth();
+ return_asm.ctx.set_sp_offset(1);
+ return_asm.ctx.reset_chain_depth();
// Write the JIT return address on the callee frame
gen_branch(
@@ -6306,7 +6188,7 @@ fn gen_send_iseq(
asm,
ocb,
return_block,
- &return_ctx,
+ &return_asm.ctx,
None,
None,
BranchGenFn::JITReturn,
@@ -6328,7 +6210,6 @@ fn gen_send_iseq(
fn gen_struct_aref(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
ci: *const rb_callinfo,
@@ -6361,7 +6242,7 @@ fn gen_struct_aref(
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc, ctx);
+ handle_opt_send_shift_stack(asm, argc);
}
// All structs from the same Struct class should have the same
@@ -6372,7 +6253,8 @@ fn gen_struct_aref(
asm.comment("struct aref");
- let recv = asm.load(ctx.stack_pop(1));
+ let recv = asm.stack_pop(1);
+ let recv = asm.load(recv);
let val = if embedded != VALUE(0) {
Opnd::mem(64, recv, RUBY_OFFSET_RSTRUCT_AS_ARY + (SIZEOF_VALUE_I32 * off))
@@ -6381,16 +6263,15 @@ fn gen_struct_aref(
Opnd::mem(64, rstruct_ptr, SIZEOF_VALUE_I32 * off)
};
- let ret = ctx.stack_push(asm, Type::Unknown);
+ let ret = asm.stack_push(Type::Unknown);
asm.mov(ret, val);
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
}
fn gen_struct_aset(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
ci: *const rb_callinfo,
@@ -6405,7 +6286,7 @@ fn gen_struct_aset(
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc, ctx);
+ handle_opt_send_shift_stack(asm, argc);
}
let off: i32 = unsafe { get_cme_def_body_optimized_index(cme) }
@@ -6418,22 +6299,21 @@ fn gen_struct_aset(
asm.comment("struct aset");
- asm.spill_temps(ctx); // for ccall (must be done before stack_pop)
- let val = ctx.stack_pop(1);
- let recv = ctx.stack_pop(1);
+ asm.spill_temps(); // for ccall (must be done before stack_pop)
+ let val = asm.stack_pop(1);
+ let recv = asm.stack_pop(1);
let val = asm.ccall(RSTRUCT_SET as *const u8, vec![recv, (off as i64).into(), val]);
- let ret = ctx.stack_push(asm, Type::Unknown);
+ let ret = asm.stack_push(Type::Unknown);
asm.mov(ret, val);
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
}
fn gen_send_general(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
cd: *const rb_call_data,
@@ -6462,16 +6342,16 @@ fn gen_send_general(
// Defer compilation so we can specialize on class of receiver
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
let recv_idx = argc + if flags & VM_CALL_ARGS_BLOCKARG != 0 { 1 } else { 0 };
- let comptime_recv = jit.peek_at_stack(ctx, recv_idx as isize);
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, recv_idx as isize);
let comptime_recv_klass = comptime_recv.class_of();
// Points to the receiver operand on the stack
- let recv = ctx.stack_opnd(recv_idx);
+ let recv = asm.ctx.stack_opnd(recv_idx);
let recv_opnd: YARVOpnd = recv.into();
// Log the name of the method we're calling to
@@ -6489,16 +6369,15 @@ fn gen_send_general(
// Gather some statistics about sends
gen_counter_incr!(asm, num_send);
- if let Some(_known_klass) = ctx.get_opnd_type(recv_opnd).known_class() {
+ if let Some(_known_klass) = asm.ctx.get_opnd_type(recv_opnd).known_class() {
gen_counter_incr!(asm, num_send_known_class);
}
- if ctx.get_chain_depth() > 1 {
+ if asm.ctx.get_chain_depth() > 1 {
gen_counter_incr!(asm, num_send_polymorphic);
}
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
comptime_recv_klass,
@@ -6533,7 +6412,7 @@ fn gen_send_general(
if flags & VM_CALL_FCALL == 0 {
// otherwise we need an ancestry check to ensure the receiver is valid to be called
// as protected
- jit_protected_callee_ancestry_guard(jit, ctx, asm, ocb, cme);
+ jit_protected_callee_ancestry_guard(jit, asm, ocb, cme);
}
}
_ => {
@@ -6553,12 +6432,11 @@ fn gen_send_general(
VM_METHOD_TYPE_ISEQ => {
let iseq = unsafe { get_def_iseq_ptr((*cme).def) };
let frame_type = VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL;
- return gen_send_iseq(jit, ctx, asm, ocb, iseq, ci, frame_type, None, cme, block, flags, argc, None);
+ return gen_send_iseq(jit, asm, ocb, iseq, ci, frame_type, None, cme, block, flags, argc, None);
}
VM_METHOD_TYPE_CFUNC => {
return gen_send_cfunc(
jit,
- ctx,
asm,
ocb,
ci,
@@ -6610,7 +6488,6 @@ fn gen_send_general(
return gen_get_ivar(
jit,
- ctx,
asm,
ocb,
SEND_MAX_DEPTH,
@@ -6641,7 +6518,7 @@ fn gen_send_general(
return CantCompile;
} else {
let ivar_name = unsafe { get_cme_def_body_attr_id(cme) };
- return gen_set_ivar(jit, ctx, asm, ivar_name, flags, argc);
+ return gen_set_ivar(jit, asm, ivar_name, flags, argc);
}
}
// Block method, e.g. define_method(:foo) { :my_block }
@@ -6650,7 +6527,7 @@ fn gen_send_general(
gen_counter_incr!(asm, send_args_splat_bmethod);
return CantCompile;
}
- return gen_send_bmethod(jit, ctx, asm, ocb, ci, cme, block, flags, argc);
+ return gen_send_bmethod(jit, asm, ocb, ci, cme, block, flags, argc);
}
VM_METHOD_TYPE_ALIAS => {
// Retrieve the aliased method and re-enter the switch
@@ -6689,7 +6566,7 @@ fn gen_send_general(
argc -= 1;
- let compile_time_name = jit.peek_at_stack(ctx, argc as isize);
+ let compile_time_name = jit.peek_at_stack(&asm.ctx, argc as isize);
if !compile_time_name.string_p() && !compile_time_name.static_sym_p() {
gen_counter_incr!(asm, send_send_chain_not_string_or_sym);
@@ -6727,10 +6604,9 @@ fn gen_send_general(
}
};
- let name_opnd = ctx.stack_opnd(argc);
+ let name_opnd = asm.ctx.stack_opnd(argc);
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
known_class,
@@ -6745,7 +6621,7 @@ fn gen_send_general(
// values for the register allocator.
let name_opnd = asm.load(name_opnd);
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let symbol_id_opnd = asm.ccall(rb_get_symbol_id as *const u8, vec![name_opnd]);
asm.comment("chain_guard_send");
@@ -6753,7 +6629,6 @@ fn gen_send_general(
jit_chain_guard(
JCC_JNE,
jit,
- &ctx,
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
@@ -6791,16 +6666,16 @@ fn gen_send_general(
// If this is a .send call we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc, ctx);
+ handle_opt_send_shift_stack(asm, argc);
}
// About to reset the SP, need to load this here
let recv_load = asm.load(recv);
- let sp = asm.lea(ctx.sp_opnd(0));
+ let sp = asm.lea(asm.ctx.sp_opnd(0));
// Save the PC and SP because the callee can make Ruby calls
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
let kw_splat = flags & VM_CALL_KW_SPLAT;
let stack_argument_pointer = asm.lea(Opnd::mem(64, sp, -(argc) * SIZEOF_VALUE_I32));
@@ -6814,9 +6689,9 @@ fn gen_send_general(
VM_BLOCK_HANDLER_NONE.into(),
]);
- ctx.stack_pop(argc as usize + 1);
+ asm.stack_pop(argc as usize + 1);
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, ret);
return KeepCompiling;
@@ -6832,7 +6707,6 @@ fn gen_send_general(
}
return gen_struct_aref(
jit,
- ctx,
asm,
ocb,
ci,
@@ -6849,7 +6723,6 @@ fn gen_send_general(
}
return gen_struct_aset(
jit,
- ctx,
asm,
ocb,
ci,
@@ -6907,46 +6780,43 @@ fn gen_send_general(
///--+------+--------+------+------
///
/// We do this for our compiletime context and the actual stack
-fn handle_opt_send_shift_stack(asm: &mut Assembler, argc: i32, ctx: &mut Context) {
+fn handle_opt_send_shift_stack(asm: &mut Assembler, argc: i32) {
asm.comment("shift_stack");
for j in (0..argc).rev() {
- let opnd = ctx.stack_opnd(j);
- let opnd2 = ctx.stack_opnd(j + 1);
+ let opnd = asm.ctx.stack_opnd(j);
+ let opnd2 = asm.ctx.stack_opnd(j + 1);
asm.mov(opnd2, opnd);
}
- ctx.shift_stack(argc as usize);
+ asm.ctx.shift_stack(argc as usize);
}
fn gen_opt_send_without_block(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let cd = jit.get_arg(0).as_ptr();
- gen_send_general(jit, ctx, asm, ocb, cd, None)
+ gen_send_general(jit, asm, ocb, cd, None)
}
fn gen_send(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
let cd = jit.get_arg(0).as_ptr();
let block = jit.get_arg(1).as_optional_ptr();
- return gen_send_general(jit, ctx, asm, ocb, cd, block);
+ return gen_send_general(jit, asm, ocb, cd, block);
}
fn gen_invokeblock(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
@@ -6978,7 +6848,6 @@ fn gen_invokeblock(
jit_chain_guard(
JCC_JNE,
jit,
- ctx,
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
@@ -6995,7 +6864,6 @@ fn gen_invokeblock(
jit_chain_guard(
JCC_JNE,
jit,
- ctx,
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
@@ -7004,7 +6872,6 @@ fn gen_invokeblock(
gen_send_iseq(
jit,
- ctx,
asm,
ocb,
comptime_iseq,
@@ -7040,7 +6907,6 @@ fn gen_invokeblock(
jit_chain_guard(
JCC_JNE,
jit,
- ctx,
asm,
ocb,
SEND_MAX_CHAIN_DEPTH,
@@ -7048,28 +6914,28 @@ fn gen_invokeblock(
);
// The cfunc may not be leaf
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
extern "C" {
fn rb_vm_yield_with_cfunc(ec: EcPtr, captured: *const rb_captured_block, argc: c_int, argv: *const VALUE) -> VALUE;
}
asm.comment("call ifunc");
let captured_opnd = asm.and(block_handler_opnd, Opnd::Imm(!0x3));
- let argv = asm.lea(ctx.sp_opnd((-argc * SIZEOF_VALUE_I32) as isize));
+ let argv = asm.lea(asm.ctx.sp_opnd((-argc * SIZEOF_VALUE_I32) as isize));
let ret = asm.ccall(
rb_vm_yield_with_cfunc as *const u8,
vec![EC, captured_opnd, argc.into(), argv],
);
- ctx.stack_pop(argc.try_into().unwrap());
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ asm.stack_pop(argc.try_into().unwrap());
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, ret);
// cfunc calls may corrupt types
- ctx.clear_local_types();
+ asm.ctx.clear_local_types();
// Share the successor with other chains
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
} else if comptime_handler.symbol_p() {
gen_counter_incr!(asm, invokeblock_symbol);
@@ -7082,7 +6948,6 @@ fn gen_invokeblock(
fn gen_invokesuper(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7091,7 +6956,7 @@ fn gen_invokesuper(
// Defer compilation so we can specialize on class of receiver
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
@@ -7147,7 +7012,7 @@ fn gen_invokesuper(
// cheaper calculations first, but since we specialize on the method entry
// and so only have to do this once at compile time this is fine to always
// check and side exit.
- let comptime_recv = jit.peek_at_stack(ctx, argc as isize);
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, argc as isize);
if unsafe { rb_obj_is_kind_of(comptime_recv, current_defined_class) } == VALUE(0) {
return CantCompile;
}
@@ -7176,7 +7041,7 @@ fn gen_invokesuper(
let me_as_value = VALUE(me as usize);
asm.cmp(ep_me_opnd, me_as_value.into());
- asm.jne(counted_exit!(jit, ctx, ocb, invokesuper_me_changed));
+ asm.jne(counted_exit!(jit, &asm.ctx, ocb, invokesuper_me_changed));
if block.is_none() {
// Guard no block passed
@@ -7192,7 +7057,7 @@ fn gen_invokesuper(
SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL,
);
asm.cmp(ep_specval_opnd, VM_BLOCK_HANDLER_NONE.into());
- asm.jne(counted_exit!(jit, ctx, ocb, invokesuper_block));
+ asm.jne(counted_exit!(jit, &asm.ctx, ocb, invokesuper_block));
}
// We need to assume that both our current method entry and the super
@@ -7201,16 +7066,16 @@ fn gen_invokesuper(
jit.assume_method_lookup_stable(ocb, cme);
// Method calls may corrupt types
- ctx.clear_local_types();
+ asm.ctx.clear_local_types();
match cme_def_type {
VM_METHOD_TYPE_ISEQ => {
let iseq = unsafe { get_def_iseq_ptr((*cme).def) };
let frame_type = VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL;
- gen_send_iseq(jit, ctx, asm, ocb, iseq, ci, frame_type, None, cme, block, ci_flags, argc, None)
+ gen_send_iseq(jit, asm, ocb, iseq, ci, frame_type, None, cme, block, ci_flags, argc, None)
}
VM_METHOD_TYPE_CFUNC => {
- gen_send_cfunc(jit, ctx, asm, ocb, ci, cme, block, ptr::null(), ci_flags, argc)
+ gen_send_cfunc(jit, asm, ocb, ci, cme, block, ptr::null(), ci_flags, argc)
}
_ => unreachable!(),
}
@@ -7218,17 +7083,16 @@ fn gen_invokesuper(
fn gen_leave(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Only the return value should be on the stack
- assert_eq!(1, ctx.get_stack_size());
+ assert_eq!(1, asm.ctx.get_stack_size());
let ocb_asm = Assembler::new();
// Check for interrupts
- gen_check_ints(jit, ctx, asm, ocb, exit_counter!(leave_se_interrupt));
+ gen_check_ints(jit, asm, ocb, exit_counter!(leave_se_interrupt));
ocb_asm.compile(ocb.unwrap());
// Pop the current frame (ec->cfp++)
@@ -7239,7 +7103,7 @@ fn gen_leave(
asm.mov(Opnd::mem(64, EC, RUBY_OFFSET_EC_CFP), CFP);
// Load the return value
- let retval_opnd = ctx.stack_pop(1);
+ let retval_opnd = asm.stack_pop(1);
// Move the return value into the C return register for gen_leave_exit()
asm.mov(C_RET_OPND, retval_opnd);
@@ -7259,21 +7123,20 @@ fn gen_leave(
fn gen_getglobal(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
let gid = jit.get_arg(0).as_usize();
// Save the PC and SP because we might make a Ruby call for warning
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
let val_opnd = asm.ccall(
rb_gvar_get as *const u8,
vec![ gid.into() ]
);
- let top = ctx.stack_push(asm, Type::Unknown);
+ let top = asm.stack_push(Type::Unknown);
asm.mov(top, val_opnd);
KeepCompiling
@@ -7281,7 +7144,6 @@ fn gen_getglobal(
fn gen_setglobal(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7289,13 +7151,14 @@ fn gen_setglobal(
// Save the PC and SP because we might make a Ruby call for
// Kernel#set_trace_var
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
+ let val = asm.stack_pop(1);
asm.ccall(
rb_gvar_set as *const u8,
vec![
gid.into(),
- ctx.stack_pop(1),
+ val,
],
);
@@ -7304,20 +7167,19 @@ fn gen_setglobal(
fn gen_anytostring(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Save the PC and SP since we might call #to_s
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
- let str = ctx.stack_pop(1);
- let val = ctx.stack_pop(1);
+ let str = asm.stack_pop(1);
+ let val = asm.stack_pop(1);
let val = asm.ccall(rb_obj_as_string_result as *const u8, vec![str, val]);
// Push the return value
- let stack_ret = ctx.stack_push(asm, Type::TString);
+ let stack_ret = asm.stack_push(Type::TString);
asm.mov(stack_ret, val);
KeepCompiling
@@ -7325,22 +7187,20 @@ fn gen_anytostring(
fn gen_objtostring(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
- let recv = ctx.stack_opnd(0);
- let comptime_recv = jit.peek_at_stack(ctx, 0);
+ let recv = asm.ctx.stack_opnd(0);
+ let comptime_recv = jit.peek_at_stack(&asm.ctx, 0);
if unsafe { RB_TYPE_P(comptime_recv, RUBY_T_STRING) } {
jit_guard_known_klass(
jit,
- ctx,
asm,
ocb,
comptime_recv.class_of(),
@@ -7355,24 +7215,23 @@ fn gen_objtostring(
KeepCompiling
} else {
let cd = jit.get_arg(0).as_ptr();
- gen_send_general(jit, ctx, asm, ocb, cd, None)
+ gen_send_general(jit, asm, ocb, cd, None)
}
}
fn gen_intern(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
// Save the PC and SP because we might allocate
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
- let str = ctx.stack_pop(1);
+ let str = asm.stack_pop(1);
let sym = asm.ccall(rb_str_intern as *const u8, vec![str]);
// Push the return value
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, sym);
KeepCompiling
@@ -7380,7 +7239,6 @@ fn gen_intern(
fn gen_toregexp(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7389,10 +7247,10 @@ fn gen_toregexp(
// Save the PC and SP because this allocates an object and could
// raise an exception.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
- let values_ptr = asm.lea(ctx.sp_opnd(-((SIZEOF_VALUE as isize) * (cnt as isize))));
- ctx.stack_pop(cnt);
+ let values_ptr = asm.lea(asm.ctx.sp_opnd(-((SIZEOF_VALUE as isize) * (cnt as isize))));
+ asm.stack_pop(cnt);
let ary = asm.ccall(
rb_ary_tmp_new_from_values as *const u8,
@@ -7421,11 +7279,11 @@ fn gen_toregexp(
asm.cpop_into(ary);
// The value we want to push on the stack is in RAX right now
- let stack_ret = ctx.stack_push(asm, Type::UnknownHeap);
+ let stack_ret = asm.stack_push(Type::UnknownHeap);
asm.mov(stack_ret, val);
// Clear the temp array.
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
asm.ccall(rb_ary_clear as *const u8, vec![ary]);
KeepCompiling
@@ -7433,7 +7291,6 @@ fn gen_toregexp(
fn gen_getspecial(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7450,7 +7307,7 @@ fn gen_getspecial(
// Fetch a "special" backref based on a char encoded by shifting by 1
// Can raise if matchdata uninitialized
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// call rb_backref_get()
asm.comment("rb_backref_get");
@@ -7477,7 +7334,7 @@ fn gen_getspecial(
_ => panic!("invalid back-ref"),
};
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
KeepCompiling
@@ -7485,7 +7342,7 @@ fn gen_getspecial(
// Fetch the N-th match from the last backref based on type shifted by 1
// Can raise if matchdata uninitialized
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// call rb_backref_get()
asm.comment("rb_backref_get");
@@ -7501,7 +7358,7 @@ fn gen_getspecial(
]
);
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
KeepCompiling
@@ -7510,12 +7367,11 @@ fn gen_getspecial(
fn gen_getclassvariable(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
// rb_vm_getclassvariable can raise exceptions.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
let val_opnd = asm.ccall(
rb_vm_getclassvariable as *const u8,
@@ -7527,7 +7383,7 @@ fn gen_getclassvariable(
],
);
- let top = ctx.stack_push(asm, Type::Unknown);
+ let top = asm.stack_push(Type::Unknown);
asm.mov(top, val_opnd);
KeepCompiling
@@ -7535,20 +7391,20 @@ fn gen_getclassvariable(
fn gen_setclassvariable(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
// rb_vm_setclassvariable can raise exceptions.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
+ let val = asm.stack_pop(1);
asm.ccall(
rb_vm_setclassvariable as *const u8,
vec![
Opnd::mem(64, CFP, RUBY_OFFSET_CFP_ISEQ),
CFP,
Opnd::UImm(jit.get_arg(0).as_u64()),
- ctx.stack_pop(1),
+ val,
Opnd::UImm(jit.get_arg(1).as_u64()),
],
);
@@ -7558,7 +7414,6 @@ fn gen_setclassvariable(
fn gen_getconstant(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7566,10 +7421,10 @@ fn gen_getconstant(
let id = jit.get_arg(0).as_usize();
// vm_get_ev_const can raise exceptions.
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
- let allow_nil_opnd = ctx.stack_pop(1);
- let klass_opnd = ctx.stack_pop(1);
+ let allow_nil_opnd = asm.stack_pop(1);
+ let klass_opnd = asm.stack_pop(1);
extern "C" {
fn rb_vm_get_ev_const(ec: EcPtr, klass: VALUE, id: ID, allow_nil: VALUE) -> VALUE;
@@ -7585,7 +7440,7 @@ fn gen_getconstant(
],
);
- let top = ctx.stack_push(asm, Type::Unknown);
+ let top = asm.stack_push(Type::Unknown);
asm.mov(top, val_opnd);
KeepCompiling
@@ -7593,7 +7448,6 @@ fn gen_getconstant(
fn gen_opt_getconstant_path(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7618,7 +7472,7 @@ fn gen_opt_getconstant_path(
let inline_cache = asm.load(Opnd::const_ptr(ic as *const u8));
// Call function to verify the cache. It doesn't allocate or call methods.
- asm.spill_temps(ctx); // for ccall
+ asm.spill_temps(); // for ccall
let ret_val = asm.ccall(
rb_vm_ic_hit_p as *const u8,
vec![inline_cache, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_EP)]
@@ -7627,7 +7481,7 @@ fn gen_opt_getconstant_path(
// Check the result. SysV only specifies one byte for _Bool return values,
// so it's important we only check one bit to ignore the higher bits in the register.
asm.test(ret_val, 1.into());
- asm.jz(counted_exit!(jit, ctx, ocb, opt_getinlinecache_miss));
+ asm.jz(counted_exit!(jit, &asm.ctx, ocb, opt_getinlinecache_miss));
let inline_cache = asm.load(Opnd::const_ptr(ic as *const u8));
@@ -7644,7 +7498,7 @@ fn gen_opt_getconstant_path(
));
// Push ic->entry->value
- let stack_top = ctx.stack_push(asm, Type::Unknown);
+ let stack_top = asm.stack_push(Type::Unknown);
asm.store(stack_top, ic_entry_val);
} else {
// Optimize for single ractor mode.
@@ -7656,10 +7510,10 @@ fn gen_opt_getconstant_path(
// constants referenced within the current block.
jit.assume_stable_constant_names(ocb, idlist);
- jit_putobject(jit, ctx, asm, unsafe { (*ice).value });
+ jit_putobject(asm, unsafe { (*ice).value });
}
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
}
@@ -7668,12 +7522,11 @@ fn gen_opt_getconstant_path(
// explicit block parameters.
fn gen_getblockparamproxy(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
if !jit.at_current_insn() {
- defer_compilation(jit, ctx, asm, ocb);
+ defer_compilation(jit, asm, ocb);
return EndBlock;
}
@@ -7702,7 +7555,7 @@ fn gen_getblockparamproxy(
SIZEOF_VALUE_I32 * (VM_ENV_DATA_INDEX_FLAGS as i32),
);
asm.test(flag_check, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM.into());
- asm.jnz(counted_exit!(jit, ctx, ocb, gbpp_block_param_modified));
+ asm.jnz(counted_exit!(jit, &asm.ctx, ocb, gbpp_block_param_modified));
// Load the block handler for the current frame
// note, VM_ASSERT(VM_ENV_LOCAL_P(ep))
@@ -7718,14 +7571,13 @@ fn gen_getblockparamproxy(
jit_chain_guard(
JCC_JNZ,
jit,
- &ctx,
asm,
ocb,
SEND_MAX_DEPTH,
None,
);
- jit_putobject(jit, ctx, asm, Qnil);
+ jit_putobject(asm, Qnil);
} else {
// Block handler is a tagged pointer. Look at the tag. 0x03 is from VM_BH_ISEQ_BLOCK_P().
let block_handler = asm.and(block_handler, 0x3.into());
@@ -7736,7 +7588,6 @@ fn gen_getblockparamproxy(
jit_chain_guard(
JCC_JNZ,
jit,
- &ctx,
asm,
ocb,
SEND_MAX_DEPTH,
@@ -7746,18 +7597,17 @@ fn gen_getblockparamproxy(
// Push rb_block_param_proxy. It's a root, so no need to use jit_mov_gc_ptr.
assert!(!unsafe { rb_block_param_proxy }.special_const_p());
- let top = ctx.stack_push(asm, Type::BlockParamProxy);
+ let top = asm.stack_push(Type::BlockParamProxy);
asm.mov(top, Opnd::const_ptr(unsafe { rb_block_param_proxy }.as_ptr()));
}
- jump_to_next_insn(jit, ctx, asm, ocb);
+ jump_to_next_insn(jit, asm, ocb);
EndBlock
}
fn gen_getblockparam(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7765,7 +7615,7 @@ fn gen_getblockparam(
let level = jit.get_arg(1).as_u32();
// Save the PC and SP because we might allocate
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// A mirror of the interpreter code. Checking for the case
// where it's pushing rb_block_param_proxy.
@@ -7796,7 +7646,7 @@ fn gen_getblockparam(
asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
- asm.jnz(side_exit(jit, ctx, ocb));
+ asm.jnz(side_exit(jit, &asm.ctx, ocb));
// Convert the block handler in to a proc
// call rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
@@ -7830,7 +7680,7 @@ fn gen_getblockparam(
asm.write_label(frame_flag_modified);
// Push the proc on the stack
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
let ep_opnd = gen_get_ep(asm, level);
asm.mov(stack_ret, Opnd::mem(64, ep_opnd, offs));
@@ -7839,7 +7689,6 @@ fn gen_getblockparam(
fn gen_invokebuiltin(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7852,22 +7701,22 @@ fn gen_invokebuiltin(
}
// If the calls don't allocate, do they need up to date PC, SP?
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Call the builtin func (ec, recv, arg1, arg2, ...)
let mut args = vec![EC, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF)];
// Copy arguments from locals
for i in 0..bf_argc {
- let stack_opnd = ctx.stack_opnd((bf_argc - i - 1) as i32);
+ let stack_opnd = asm.ctx.stack_opnd((bf_argc - i - 1) as i32);
args.push(stack_opnd);
}
let val = asm.ccall(unsafe { (*bf).func_ptr } as *const u8, args);
// Push the return value
- ctx.stack_pop(bf_argc);
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ asm.stack_pop(bf_argc);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
KeepCompiling
@@ -7878,7 +7727,6 @@ fn gen_invokebuiltin(
// stack uses the argument locals (and self) from the current method.
fn gen_opt_invokebuiltin_delegate(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
_ocb: &mut OutlinedCb,
) -> CodegenStatus {
@@ -7892,7 +7740,7 @@ fn gen_opt_invokebuiltin_delegate(
}
// If the calls don't allocate, do they need up to date PC, SP?
- jit_prepare_routine_call(jit, ctx, asm);
+ jit_prepare_routine_call(jit, asm);
// Call the builtin func (ec, recv, arg1, arg2, ...)
let mut args = vec![EC, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF)];
@@ -7912,7 +7760,7 @@ fn gen_opt_invokebuiltin_delegate(
let val = asm.ccall(unsafe { (*bf).func_ptr } as *const u8, args);
// Push the return value
- let stack_ret = ctx.stack_push(asm, Type::Unknown);
+ let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, val);
KeepCompiling
@@ -8030,7 +7878,6 @@ fn get_gen_fn(opcode: VALUE) -> Option<InsnGenFn> {
// See yjit_reg_method().
type MethodGenFn = fn(
jit: &mut JITState,
- ctx: &mut Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
ci: *const rb_callinfo,
@@ -8365,8 +8212,8 @@ mod tests {
#[test]
fn test_gen_exit() {
- let (_, ctx, mut asm, mut cb, _) = setup_codegen();
- gen_exit(0 as *mut VALUE, &ctx, &mut asm);
+ let (_, _ctx, mut asm, mut cb, _) = setup_codegen();
+ gen_exit(0 as *mut VALUE, &mut asm);
asm.compile(&mut cb);
assert!(cb.get_write_pos() > 0);
}
@@ -8380,14 +8227,14 @@ mod tests {
#[test]
fn test_gen_check_ints() {
- let (mut jit, ctx, mut asm, _cb, mut ocb) = setup_codegen();
- gen_check_ints(&mut jit, &ctx, &mut asm, &mut ocb, None);
+ let (mut jit, _ctx, mut asm, _cb, mut ocb) = setup_codegen();
+ gen_check_ints(&mut jit, &mut asm, &mut ocb, None);
}
#[test]
fn test_gen_nop() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- let status = gen_nop(&mut jit, &mut context, &mut asm, &mut ocb);
+ let (mut jit, context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let status = gen_nop(&mut jit, &mut asm, &mut ocb);
asm.compile(&mut cb);
assert_eq!(status, KeepCompiling);
@@ -8398,9 +8245,9 @@ mod tests {
#[test]
fn test_gen_pop() {
let (mut jit, _, mut asm, _cb, mut ocb) = setup_codegen();
- let mut context = Context::default();
- context.stack_push(&mut asm, Type::Fixnum);
- let status = gen_pop(&mut jit, &mut context, &mut asm, &mut ocb);
+ let context = Context::default();
+ asm.stack_push(Type::Fixnum);
+ let status = gen_pop(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, KeepCompiling);
let mut default = Context::default();
@@ -8410,15 +8257,15 @@ mod tests {
#[test]
fn test_gen_dup() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- context.stack_push(&mut asm, Type::Fixnum);
- let status = gen_dup(&mut jit, &mut context, &mut asm, &mut ocb);
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
+ asm.stack_push(Type::Fixnum);
+ let status = gen_dup(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, KeepCompiling);
// Did we duplicate the type information for the Fixnum type?
- assert_eq!(Type::Fixnum, context.get_opnd_type(StackOpnd(0)));
- assert_eq!(Type::Fixnum, context.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(0)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(1)));
asm.compile(&mut cb);
assert!(cb.get_write_pos() > 0); // Write some movs
@@ -8426,22 +8273,22 @@ mod tests {
#[test]
fn test_gen_dupn() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- context.stack_push(&mut asm, Type::Fixnum);
- context.stack_push(&mut asm, Type::Flonum);
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
+ asm.stack_push(Type::Fixnum);
+ asm.stack_push(Type::Flonum);
let mut value_array: [u64; 2] = [0, 2]; // We only compile for n == 2
let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
jit.pc = pc;
- let status = gen_dupn(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_dupn(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, KeepCompiling);
- assert_eq!(Type::Fixnum, context.get_opnd_type(StackOpnd(3)));
- assert_eq!(Type::Flonum, context.get_opnd_type(StackOpnd(2)));
- assert_eq!(Type::Fixnum, context.get_opnd_type(StackOpnd(1)));
- assert_eq!(Type::Flonum, context.get_opnd_type(StackOpnd(0)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(3)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::Fixnum, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
// TODO: this is writing zero bytes on x86. Why?
asm.compile(&mut cb);
@@ -8450,14 +8297,14 @@ mod tests {
#[test]
fn test_gen_swap() {
- let (mut jit, mut context, mut asm, _cb, mut ocb) = setup_codegen();
- context.stack_push(&mut asm, Type::Fixnum);
- context.stack_push(&mut asm, Type::Flonum);
+ let (mut jit, _context, mut asm, _cb, mut ocb) = setup_codegen();
+ asm.stack_push(Type::Fixnum);
+ asm.stack_push(Type::Flonum);
- let status = gen_swap(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_swap(&mut jit, &mut asm, &mut ocb);
- let (_, tmp_type_top) = context.get_opnd_mapping(StackOpnd(0));
- let (_, tmp_type_next) = context.get_opnd_mapping(StackOpnd(1));
+ let (_, tmp_type_top) = asm.ctx.get_opnd_mapping(StackOpnd(0));
+ let (_, tmp_type_next) = asm.ctx.get_opnd_mapping(StackOpnd(1));
assert_eq!(status, KeepCompiling);
assert_eq!(tmp_type_top, Type::Fixnum);
@@ -8466,10 +8313,10 @@ mod tests {
#[test]
fn test_putnil() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- let status = gen_putnil(&mut jit, &mut context, &mut asm, &mut ocb);
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let status = gen_putnil(&mut jit, &mut asm, &mut ocb);
- let (_, tmp_type_top) = context.get_opnd_mapping(StackOpnd(0));
+ let (_, tmp_type_top) = asm.ctx.get_opnd_mapping(StackOpnd(0));
assert_eq!(status, KeepCompiling);
assert_eq!(tmp_type_top, Type::Nil);
@@ -8480,15 +8327,15 @@ mod tests {
#[test]
fn test_putobject_qtrue() {
// Test gen_putobject with Qtrue
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
let mut value_array: [u64; 2] = [0, Qtrue.into()];
let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
jit.pc = pc;
- let status = gen_putobject(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_putobject(&mut jit, &mut asm, &mut ocb);
- let (_, tmp_type_top) = context.get_opnd_mapping(StackOpnd(0));
+ let (_, tmp_type_top) = asm.ctx.get_opnd_mapping(StackOpnd(0));
assert_eq!(status, KeepCompiling);
assert_eq!(tmp_type_top, Type::True);
@@ -8499,16 +8346,16 @@ mod tests {
#[test]
fn test_putobject_fixnum() {
// Test gen_putobject with a Fixnum to test another conditional branch
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
// The Fixnum 7 is encoded as 7 * 2 + 1, or 15
let mut value_array: [u64; 2] = [0, 15];
let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
jit.pc = pc;
- let status = gen_putobject(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_putobject(&mut jit, &mut asm, &mut ocb);
- let (_, tmp_type_top) = context.get_opnd_mapping(StackOpnd(0));
+ let (_, tmp_type_top) = asm.ctx.get_opnd_mapping(StackOpnd(0));
assert_eq!(status, KeepCompiling);
assert_eq!(tmp_type_top, Type::Fixnum);
@@ -8518,11 +8365,11 @@ mod tests {
#[test]
fn test_int2fix() {
- let (mut jit, mut context, mut asm, _cb, mut ocb) = setup_codegen();
+ let (mut jit, _context, mut asm, _cb, mut ocb) = setup_codegen();
jit.opcode = YARVINSN_putobject_INT2FIX_0_.as_usize();
- let status = gen_putobject_int2fix(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_putobject_int2fix(&mut jit, &mut asm, &mut ocb);
- let (_, tmp_type_top) = context.get_opnd_mapping(StackOpnd(0));
+ let (_, tmp_type_top) = asm.ctx.get_opnd_mapping(StackOpnd(0));
// Right now we're not testing the generated machine code to make sure a literal 1 or 0 was pushed. I've checked locally.
assert_eq!(status, KeepCompiling);
@@ -8531,8 +8378,8 @@ mod tests {
#[test]
fn test_putself() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- let status = gen_putself(&mut jit, &mut context, &mut asm, &mut ocb);
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
+ let status = gen_putself(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, KeepCompiling);
asm.compile(&mut cb);
@@ -8541,22 +8388,22 @@ mod tests {
#[test]
fn test_gen_setn() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- context.stack_push(&mut asm, Type::Fixnum);
- context.stack_push(&mut asm, Type::Flonum);
- context.stack_push(&mut asm, Type::CString);
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
+ asm.stack_push(Type::Fixnum);
+ asm.stack_push(Type::Flonum);
+ asm.stack_push(Type::CString);
let mut value_array: [u64; 2] = [0, 2];
let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
jit.pc = pc;
- let status = gen_setn(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_setn(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, KeepCompiling);
- assert_eq!(Type::CString, context.get_opnd_type(StackOpnd(2)));
- assert_eq!(Type::Flonum, context.get_opnd_type(StackOpnd(1)));
- assert_eq!(Type::CString, context.get_opnd_type(StackOpnd(0)));
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(0)));
asm.compile(&mut cb);
assert!(cb.get_write_pos() > 0);
@@ -8564,21 +8411,21 @@ mod tests {
#[test]
fn test_gen_topn() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- context.stack_push(&mut asm, Type::Flonum);
- context.stack_push(&mut asm, Type::CString);
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
+ asm.stack_push(Type::Flonum);
+ asm.stack_push(Type::CString);
let mut value_array: [u64; 2] = [0, 1];
let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
jit.pc = pc;
- let status = gen_topn(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_topn(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, KeepCompiling);
- assert_eq!(Type::Flonum, context.get_opnd_type(StackOpnd(2)));
- assert_eq!(Type::CString, context.get_opnd_type(StackOpnd(1)));
- assert_eq!(Type::Flonum, context.get_opnd_type(StackOpnd(0)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(2)));
+ assert_eq!(Type::CString, asm.ctx.get_opnd_type(StackOpnd(1)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
asm.compile(&mut cb);
assert!(cb.get_write_pos() > 0); // Write some movs
@@ -8586,20 +8433,20 @@ mod tests {
#[test]
fn test_gen_adjuststack() {
- let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
- context.stack_push(&mut asm, Type::Flonum);
- context.stack_push(&mut asm, Type::CString);
- context.stack_push(&mut asm, Type::Fixnum);
+ let (mut jit, _context, mut asm, mut cb, mut ocb) = setup_codegen();
+ asm.stack_push(Type::Flonum);
+ asm.stack_push(Type::CString);
+ asm.stack_push(Type::Fixnum);
let mut value_array: [u64; 3] = [0, 2, 0];
let pc: *mut VALUE = &mut value_array as *mut u64 as *mut VALUE;
jit.pc = pc;
- let status = gen_adjuststack(&mut jit, &mut context, &mut asm, &mut ocb);
+ let status = gen_adjuststack(&mut jit, &mut asm, &mut ocb);
assert_eq!(status, KeepCompiling);
- assert_eq!(Type::Flonum, context.get_opnd_type(StackOpnd(0)));
+ assert_eq!(Type::Flonum, asm.ctx.get_opnd_type(StackOpnd(0)));
asm.compile(&mut cb);
assert!(cb.get_write_pos() == 0); // No instructions written
@@ -8607,9 +8454,9 @@ mod tests {
#[test]
fn test_gen_leave() {
- let (mut jit, mut context, mut asm, _cb, mut ocb) = setup_codegen();
+ let (mut jit, _context, mut asm, _cb, mut ocb) = setup_codegen();
// Push return value
- context.stack_push(&mut asm, Type::Fixnum);
- gen_leave(&mut jit, &mut context, &mut asm, &mut ocb);
+ asm.stack_push(Type::Fixnum);
+ gen_leave(&mut jit, &mut asm, &mut ocb);
}
}
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index d257860fbf..c2dc4733e3 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -1622,61 +1622,9 @@ impl Context {
self.set_reg_temps(reg_temps);
}
- /// Push one new value on the temp stack with an explicit mapping
- /// Return a pointer to the new stack top
- pub fn stack_push_mapping(&mut self, asm: &mut Assembler, (mapping, temp_type): (TempMapping, Type)) -> Opnd {
- // If type propagation is disabled, store no types
- if get_option!(no_type_prop) {
- return self.stack_push_mapping(asm, (mapping, Type::Unknown));
- }
-
- let stack_size: usize = self.stack_size.into();
-
- // Keep track of the type and mapping of the value
- if stack_size < MAX_TEMP_TYPES {
- self.temp_mapping[stack_size] = mapping;
- self.temp_types[stack_size] = temp_type;
-
- if let MapToLocal(idx) = mapping {
- assert!((idx as usize) < MAX_LOCAL_TYPES);
- }
- }
-
- // Allocate a register to the stack operand
- assert_eq!(self.reg_temps, asm.get_reg_temps());
- if self.stack_size < MAX_REG_TEMPS {
- asm.alloc_temp_reg(self, self.stack_size);
- }
-
- self.stack_size += 1;
- self.sp_offset += 1;
-
- return self.stack_opnd(0);
- }
-
- /// Push one new value on the temp stack
- /// Return a pointer to the new stack top
- pub fn stack_push(&mut self, asm: &mut Assembler, val_type: Type) -> Opnd {
- return self.stack_push_mapping(asm, (MapToStack, val_type));
- }
-
- /// Push the self value on the stack
- pub fn stack_push_self(&mut self, asm: &mut Assembler) -> Opnd {
- return self.stack_push_mapping(asm, (MapToSelf, Type::Unknown));
- }
-
- /// Push a local variable on the stack
- pub fn stack_push_local(&mut self, asm: &mut Assembler, local_idx: usize) -> Opnd {
- if local_idx >= MAX_LOCAL_TYPES {
- return self.stack_push(asm, Type::Unknown);
- }
-
- return self.stack_push_mapping(asm, (MapToLocal((local_idx as u8).into()), Type::Unknown));
- }
-
// Pop N values off the stack
// Return a pointer to the stack top before the pop operation
- pub fn stack_pop(&mut self, n: usize) -> Opnd {
+ fn stack_pop(&mut self, n: usize) -> Opnd {
assert!(n <= self.stack_size.into());
let top = self.stack_opnd(0);
@@ -1978,6 +1926,66 @@ impl Context {
}
}
+impl Assembler {
+ /// Push one new value on the temp stack with an explicit mapping
+ /// Return a pointer to the new stack top
+ pub fn stack_push_mapping(&mut self, (mapping, temp_type): (TempMapping, Type)) -> Opnd {
+ // If type propagation is disabled, store no types
+ if get_option!(no_type_prop) {
+ return self.stack_push_mapping((mapping, Type::Unknown));
+ }
+
+ let stack_size: usize = self.ctx.stack_size.into();
+
+ // Keep track of the type and mapping of the value
+ if stack_size < MAX_TEMP_TYPES {
+ self.ctx.temp_mapping[stack_size] = mapping;
+ self.ctx.temp_types[stack_size] = temp_type;
+
+ if let MapToLocal(idx) = mapping {
+ assert!((idx as usize) < MAX_LOCAL_TYPES);
+ }
+ }
+
+ // Allocate a register to the stack operand
+ assert_eq!(self.ctx.reg_temps, self.get_reg_temps());
+ if self.ctx.stack_size < MAX_REG_TEMPS {
+ self.alloc_temp_reg(self.ctx.stack_size);
+ }
+
+ self.ctx.stack_size += 1;
+ self.ctx.sp_offset += 1;
+
+ return self.ctx.stack_opnd(0);
+ }
+
+ /// Push one new value on the temp stack
+ /// Return a pointer to the new stack top
+ pub fn stack_push(&mut self, val_type: Type) -> Opnd {
+ return self.stack_push_mapping((MapToStack, val_type));
+ }
+
+ /// Push the self value on the stack
+ pub fn stack_push_self(&mut self) -> Opnd {
+ return self.stack_push_mapping((MapToSelf, Type::Unknown));
+ }
+
+ /// Push a local variable on the stack
+ pub fn stack_push_local(&mut self, local_idx: usize) -> Opnd {
+ if local_idx >= MAX_LOCAL_TYPES {
+ return self.stack_push(Type::Unknown);
+ }
+
+ return self.stack_push_mapping((MapToLocal((local_idx as u8).into()), Type::Unknown));
+ }
+
+ // Pop N values off the stack
+ // Return a pointer to the stack top before the pop operation
+ pub fn stack_pop(&mut self, n: usize) -> Opnd {
+ self.ctx.stack_pop(n)
+ }
+}
+
impl BlockId {
/// Print Ruby source location for debugging
#[cfg(debug_assertions)]
@@ -2556,6 +2564,7 @@ fn gen_branch_stub(
let stub_addr = ocb.get_write_ptr();
let mut asm = Assembler::new();
+ asm.ctx = ctx.clone();
asm.set_reg_temps(ctx.reg_temps);
asm.comment("branch stub hit");
@@ -2566,7 +2575,7 @@ fn gen_branch_stub(
}
// Spill temps to the VM stack as well for jit.peek_at_stack()
- asm.spill_temps(&mut ctx.clone());
+ asm.spill_temps();
// Set up the arguments unique to this stub for:
//
@@ -2756,15 +2765,14 @@ pub fn gen_direct_jump(jit: &mut JITState, ctx: &Context, target0: BlockId, asm:
/// Create a stub to force the code up to this point to be executed
pub fn defer_compilation(
jit: &mut JITState,
- cur_ctx: &Context,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) {
- if cur_ctx.chain_depth != 0 {
+ if asm.ctx.chain_depth != 0 {
panic!("Double defer!");
}
- let mut next_ctx = cur_ctx.clone();
+ let mut next_ctx = asm.ctx.clone();
if next_ctx.chain_depth == u8::MAX {
panic!("max block version chain depth reached!");
@@ -3169,9 +3177,9 @@ mod tests {
assert_eq!(Context::default().diff(&Context::default()), TypeDiff::Compatible(0));
// Try pushing an operand and getting its type
- let mut ctx = Context::default();
- ctx.stack_push(&mut Assembler::new(), Type::Fixnum);
- let top_type = ctx.get_opnd_type(StackOpnd(0));
+ let mut asm = Assembler::new();
+ asm.stack_push(Type::Fixnum);
+ let top_type = asm.ctx.get_opnd_type(StackOpnd(0));
assert!(top_type == Type::Fixnum);
// TODO: write more tests for Context type diff
diff --git a/yjit/src/utils.rs b/yjit/src/utils.rs
index 5291e05c96..d9a75b5302 100644
--- a/yjit/src/utils.rs
+++ b/yjit/src/utils.rs
@@ -1,7 +1,6 @@
#![allow(dead_code)] // Some functions for print debugging in here
use crate::backend::ir::*;
-use crate::core::Context;
use crate::cruby::*;
use std::slice;
@@ -142,7 +141,7 @@ macro_rules! c_callable {
}
pub(crate) use c_callable;
-pub fn print_int(asm: &mut Assembler, ctx: &Context, opnd: Opnd) {
+pub fn print_int(asm: &mut Assembler, opnd: Opnd) {
c_callable!{
fn print_int_fn(val: i64) {
println!("{}", val);
@@ -165,11 +164,11 @@ pub fn print_int(asm: &mut Assembler, ctx: &Context, opnd: Opnd) {
};
asm.ccall(print_int_fn as *const u8, vec![argument]);
- asm.cpop_all(ctx);
+ asm.cpop_all();
}
/// Generate code to print a pointer
-pub fn print_ptr(asm: &mut Assembler, ctx: &Context, opnd: Opnd) {
+pub fn print_ptr(asm: &mut Assembler, opnd: Opnd) {
c_callable!{
fn print_ptr_fn(ptr: *const u8) {
println!("{:p}", ptr);
@@ -180,11 +179,11 @@ pub fn print_ptr(asm: &mut Assembler, ctx: &Context, opnd: Opnd) {
asm.cpush_all();
asm.ccall(print_ptr_fn as *const u8, vec![opnd]);
- asm.cpop_all(ctx);
+ asm.cpop_all();
}
/// Generate code to print a value
-pub fn print_value(asm: &mut Assembler, ctx: &Context, opnd: Opnd) {
+pub fn print_value(asm: &mut Assembler, opnd: Opnd) {
c_callable!{
fn print_value_fn(val: VALUE) {
unsafe { rb_obj_info_dump(val) }
@@ -195,11 +194,11 @@ pub fn print_value(asm: &mut Assembler, ctx: &Context, opnd: Opnd) {
asm.cpush_all();
asm.ccall(print_value_fn as *const u8, vec![opnd]);
- asm.cpop_all(ctx);
+ asm.cpop_all();
}
/// Generate code to print constant string to stdout
-pub fn print_str(asm: &mut Assembler, ctx: &Context, str: &str) {
+pub fn print_str(asm: &mut Assembler, str: &str) {
c_callable!{
fn print_str_cfun(ptr: *const u8, num_bytes: usize) {
unsafe {
@@ -223,7 +222,7 @@ pub fn print_str(asm: &mut Assembler, ctx: &Context, str: &str) {
let opnd = asm.lea_label(string_data);
asm.ccall(print_str_cfun as *const u8, vec![opnd, Opnd::UImm(str.len() as u64)]);
- asm.cpop_all(ctx);
+ asm.cpop_all();
}
#[cfg(test)]
@@ -263,7 +262,7 @@ mod tests {
let mut asm = Assembler::new();
let mut cb = CodeBlock::new_dummy(1024);
- print_int(&mut asm, &Context::default(), Opnd::Imm(42));
+ print_int(&mut asm, Opnd::Imm(42));
asm.compile(&mut cb);
}
@@ -272,7 +271,7 @@ mod tests {
let mut asm = Assembler::new();
let mut cb = CodeBlock::new_dummy(1024);
- print_str(&mut asm, &Context::default(), "Hello, world!");
+ print_str(&mut asm, "Hello, world!");
asm.compile(&mut cb);
}
}