summaryrefslogtreecommitdiff
path: root/yjit/src/backend
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2023-02-22 13:22:41 -0800
committerGitHub <noreply@github.com>2023-02-22 16:22:41 -0500
commite9e4e1cb46a1595a83127dd90091bc0951c7d4a9 (patch)
treed01c4753f3a726a49d31a126fd0fc74f469de8bd /yjit/src/backend
parent4f48debdcf59f038cad0a5cf6f6b26c37648778f (diff)
downloadruby-e9e4e1cb46a1595a83127dd90091bc0951c7d4a9.tar.gz
YJIT: Introduce Opnd::Stack (#7352)
Diffstat (limited to 'yjit/src/backend')
-rw-r--r--yjit/src/backend/arm64/mod.rs10
-rw-r--r--yjit/src/backend/ir.rs26
-rw-r--r--yjit/src/backend/x86_64/mod.rs2
3 files changed, 33 insertions, 5 deletions
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index d06f05dcb1..fc26b9d893 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -54,6 +54,7 @@ impl From<Opnd> for A64Opnd {
},
Opnd::InsnOut { .. } => panic!("attempted to lower an Opnd::InsnOut"),
Opnd::Value(_) => panic!("attempted to lower an Opnd::Value"),
+ Opnd::Stack { .. } => panic!("attempted to lower an Opnd::Stack"),
Opnd::None => panic!(
"Attempted to lower an Opnd::None. This often happens when an out operand was not allocated for an instruction because the output of the instruction was not used. Please ensure you are using the output."
),
@@ -252,7 +253,7 @@ impl Assembler
/// do follow that encoding, and if they don't then we load them first.
fn split_bitmask_immediate(asm: &mut Assembler, opnd: Opnd, dest_num_bits: u8) -> Opnd {
match opnd {
- Opnd::Reg(_) | Opnd::InsnOut { .. } => opnd,
+ Opnd::Reg(_) | Opnd::InsnOut { .. } | Opnd::Stack { .. } => opnd,
Opnd::Mem(_) => split_load_operand(asm, opnd),
Opnd::Imm(imm) => {
if imm == 0 {
@@ -295,7 +296,7 @@ impl Assembler
asm.load(opnd)
}
},
- Opnd::None | Opnd::Value(_) => unreachable!()
+ Opnd::None | Opnd::Value(_) | Opnd::Stack { .. } => unreachable!()
}
}
@@ -863,6 +864,9 @@ impl Assembler
let ptr_offset: u32 = (cb.get_write_pos() as u32) - (SIZEOF_VALUE as u32);
insn_gc_offsets.push(ptr_offset);
},
+ Opnd::Stack { .. } => {
+ unreachable!("Stack operand was not lowered before arm64_emit");
+ }
Opnd::None => {
unreachable!("Attempted to load from None operand");
}
@@ -1072,7 +1076,7 @@ impl Assembler
/// Optimize and compile the stored instructions
pub fn compile_with_regs(self, cb: &mut CodeBlock, regs: Vec<Reg>) -> Vec<u32>
{
- let mut asm = self.arm64_split().alloc_regs(regs);
+ let mut asm = self.lower_stack().arm64_split().alloc_regs(regs);
// Create label instances in the code block
for (idx, name) in asm.label_names.iter().enumerate() {
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index 1dea189f24..dd0390f39c 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -7,7 +7,7 @@ use std::fmt;
use std::convert::From;
use std::io::Write;
use std::mem::take;
-use crate::cruby::{VALUE};
+use crate::cruby::{VALUE, SIZEOF_VALUE_I32};
use crate::virtualmem::{CodePtr};
use crate::asm::{CodeBlock, uimm_num_bits, imm_num_bits};
use crate::core::{Context, Type, TempMapping};
@@ -72,6 +72,9 @@ pub enum Opnd
// Output of a preceding instruction in this block
InsnOut{ idx: usize, num_bits: u8 },
+ // Pointer to a slot on the VM stack
+ Stack { idx: i32, sp_offset: i16, num_bits: u8 },
+
// Low-level operands, for lowering
Imm(i64), // Raw signed immediate
UImm(u64), // Raw unsigned immediate
@@ -85,6 +88,7 @@ impl fmt::Debug for Opnd {
match self {
Self::None => write!(fmt, "None"),
Value(val) => write!(fmt, "Value({val:?})"),
+ Stack { idx, sp_offset, .. } => write!(fmt, "SP[{}]", *sp_offset as i32 - idx - 1),
InsnOut { idx, num_bits } => write!(fmt, "Out{num_bits}({idx})"),
Imm(signed) => write!(fmt, "{signed:x}_i64"),
UImm(unsigned) => write!(fmt, "{unsigned:x}_u64"),
@@ -158,6 +162,7 @@ impl Opnd
Opnd::Reg(reg) => Some(Opnd::Reg(reg.with_num_bits(num_bits))),
Opnd::Mem(Mem { base, disp, .. }) => Some(Opnd::Mem(Mem { base, disp, num_bits })),
Opnd::InsnOut { idx, .. } => Some(Opnd::InsnOut { idx, num_bits }),
+ Opnd::Stack { idx, sp_offset, .. } => Some(Opnd::Stack { idx, sp_offset, num_bits }),
_ => None,
}
}
@@ -914,6 +919,25 @@ impl Assembler
Target::Label(label_idx)
}
+ /// Convert Stack operands to memory operands
+ pub fn lower_stack(mut self) -> Assembler
+ {
+ let mut asm = Assembler::new_with_label_names(take(&mut self.label_names));
+ let mut iterator = self.into_draining_iter();
+
+ while let Some((index, mut insn)) = iterator.next_unmapped() {
+ let mut opnd_iter = insn.opnd_iter_mut();
+ while let Some(opnd) = opnd_iter.next() {
+ if let Opnd::Stack { idx, sp_offset, num_bits } = *opnd {
+ *opnd = Opnd::mem(num_bits, SP, (sp_offset as i32 - idx - 1) * SIZEOF_VALUE_I32);
+ }
+ }
+ asm.push_insn(insn);
+ }
+
+ asm
+ }
+
/// Sets the out field on the various instructions that require allocated
/// registers because their output is used as the operand on a subsequent
/// instruction. This is our implementation of the linear scan algorithm.
diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs
index 297a0fd852..0ea943b75e 100644
--- a/yjit/src/backend/x86_64/mod.rs
+++ b/yjit/src/backend/x86_64/mod.rs
@@ -701,7 +701,7 @@ impl Assembler
/// Optimize and compile the stored instructions
pub fn compile_with_regs(self, cb: &mut CodeBlock, regs: Vec<Reg>) -> Vec<u32>
{
- let mut asm = self.x86_split().alloc_regs(regs);
+ let mut asm = self.lower_stack().x86_split().alloc_regs(regs);
// Create label instances in the code block
for (idx, name) in asm.label_names.iter().enumerate() {