summaryrefslogtreecommitdiff
path: root/yjit/src/backend
diff options
context:
space:
mode:
authorTakashi Kokubun <takashikkbn@gmail.com>2023-04-14 14:00:10 -0700
committerGitHub <noreply@github.com>2023-04-14 17:00:10 -0400
commit4501fb8b467cd40da5e160b82db7ea1a10d9e7ca (patch)
treeda36a1703c584d45de5286d8a94cb985743e5341 /yjit/src/backend
parentd83e59e6b8b52002cc46a14d7d4dc69416379029 (diff)
downloadruby-4501fb8b467cd40da5e160b82db7ea1a10d9e7ca.tar.gz
YJIT: Introduce Target::SideExit (#7712)
* YJIT: Introduce Target::SideExit * YJIT: Obviate Insn::SideExitContext * YJIT: Avoid cloning a Context for each insn
Diffstat (limited to 'yjit/src/backend')
-rw-r--r--yjit/src/backend/arm64/mod.rs55
-rw-r--r--yjit/src/backend/ir.rs115
-rw-r--r--yjit/src/backend/tests.rs2
-rw-r--r--yjit/src/backend/x86_64/mod.rs61
4 files changed, 180 insertions, 53 deletions
diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs
index 4a052bd0ef..8aa8124089 100644
--- a/yjit/src/backend/arm64/mod.rs
+++ b/yjit/src/backend/arm64/mod.rs
@@ -2,8 +2,10 @@
#![allow(unused_variables)]
#![allow(unused_imports)]
+use std::mem::take;
+
use crate::asm::x86_64::jmp_ptr;
-use crate::asm::{CodeBlock};
+use crate::asm::{CodeBlock, OutlinedCb};
use crate::asm::arm64::*;
use crate::codegen::{JITState, CodegenGlobals};
use crate::core::Context;
@@ -374,7 +376,7 @@ impl Assembler
}
}
- let mut asm_local = Assembler::new_with_label_names(std::mem::take(&mut self.label_names));
+ let mut asm_local = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits));
let asm = &mut asm_local;
let mut iterator = self.into_draining_iter();
@@ -675,7 +677,7 @@ impl Assembler
/// Emit platform-specific machine code
/// Returns a list of GC offsets. Can return failure to signal caller to retry.
- fn arm64_emit(&mut self, cb: &mut CodeBlock) -> Result<Vec<u32>, ()> {
+ fn arm64_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Result<Vec<u32>, ()> {
/// Determine how many instructions it will take to represent moving
/// this value into a register. Note that the return value of this
/// function must correspond to how many instructions are used to
@@ -765,6 +767,9 @@ impl Assembler
bcond(cb, CONDITION, InstructionOffset::from_bytes(bytes));
});
},
+ Target::SideExit { .. } => {
+ unreachable!("Target::SideExit should have been compiled by compile_side_exit")
+ },
};
}
@@ -780,6 +785,20 @@ impl Assembler
ldr_post(cb, opnd, A64Opnd::new_mem(64, C_SP_REG, C_SP_STEP));
}
+ /// Compile a side exit if Target::SideExit is given.
+ fn compile_side_exit(
+ target: Target,
+ asm: &mut Assembler,
+ ocb: &mut Option<&mut OutlinedCb>,
+ ) -> Target {
+ if let Target::SideExit { counter, context } = target {
+ let side_exit = asm.get_side_exit(&context.unwrap(), counter, ocb.as_mut().unwrap());
+ Target::SideExitPtr(side_exit)
+ } else {
+ target
+ }
+ }
+
// dbg!(&self.insns);
// List of GC offsets
@@ -1016,12 +1035,12 @@ impl Assembler
br(cb, opnd.into());
},
Insn::Jmp(target) => {
- match target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(dst_ptr) => {
- emit_jmp_ptr(cb, *dst_ptr, true);
+ emit_jmp_ptr(cb, dst_ptr, true);
},
Target::SideExitPtr(dst_ptr) => {
- emit_jmp_ptr(cb, *dst_ptr, false);
+ emit_jmp_ptr(cb, dst_ptr, false);
},
Target::Label(label_idx) => {
// Here we're going to save enough space for
@@ -1029,27 +1048,30 @@ impl Assembler
// instruction once we know the offset. We're going
// to assume we can fit into a single b instruction.
// It will panic otherwise.
- cb.label_ref(*label_idx, 4, |cb, src_addr, dst_addr| {
+ cb.label_ref(label_idx, 4, |cb, src_addr, dst_addr| {
let bytes: i32 = (dst_addr - (src_addr - 4)).try_into().unwrap();
b(cb, InstructionOffset::from_bytes(bytes));
});
},
+ Target::SideExit { .. } => {
+ unreachable!("Target::SideExit should have been compiled by compile_side_exit")
+ },
};
},
Insn::Je(target) | Insn::Jz(target) => {
- emit_conditional_jump::<{Condition::EQ}>(cb, *target);
+ emit_conditional_jump::<{Condition::EQ}>(cb, compile_side_exit(*target, self, ocb));
},
Insn::Jne(target) | Insn::Jnz(target) => {
- emit_conditional_jump::<{Condition::NE}>(cb, *target);
+ emit_conditional_jump::<{Condition::NE}>(cb, compile_side_exit(*target, self, ocb));
},
Insn::Jl(target) => {
- emit_conditional_jump::<{Condition::LT}>(cb, *target);
+ emit_conditional_jump::<{Condition::LT}>(cb, compile_side_exit(*target, self, ocb));
},
Insn::Jbe(target) => {
- emit_conditional_jump::<{Condition::LS}>(cb, *target);
+ emit_conditional_jump::<{Condition::LS}>(cb, compile_side_exit(*target, self, ocb));
},
Insn::Jo(target) => {
- emit_conditional_jump::<{Condition::VS}>(cb, *target);
+ emit_conditional_jump::<{Condition::VS}>(cb, compile_side_exit(*target, self, ocb));
},
Insn::IncrCounter { mem, value } => {
let label = cb.new_label("incr_counter_loop".to_string());
@@ -1121,7 +1143,7 @@ impl Assembler
}
/// Optimize and compile the stored instructions
- pub fn compile_with_regs(self, cb: &mut CodeBlock, regs: Vec<Reg>) -> Vec<u32>
+ pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Vec<u32>
{
let asm = self.lower_stack();
let asm = asm.arm64_split();
@@ -1135,14 +1157,15 @@ impl Assembler
let start_ptr = cb.get_write_ptr();
let starting_label_state = cb.get_label_state();
- let gc_offsets = asm.arm64_emit(cb)
+ let mut ocb = ocb; // for &mut
+ let gc_offsets = asm.arm64_emit(cb, &mut ocb)
.unwrap_or_else(|_err| {
// we want to lower jumps to labels to b.cond instructions, which have a 1 MiB
// range limit. We can easily exceed the limit in case the jump straddles two pages.
// In this case, we retry with a fresh page.
cb.set_label_state(starting_label_state);
cb.next_page(start_ptr, emit_jmp_ptr_with_invalidation);
- asm.arm64_emit(cb).expect("should not fail when writing to a fresh code page")
+ asm.arm64_emit(cb, &mut ocb).expect("should not fail when writing to a fresh code page")
});
if cb.has_dropped_bytes() {
@@ -1180,7 +1203,7 @@ mod tests {
let opnd = asm.add(Opnd::Reg(X0_REG), Opnd::Reg(X1_REG));
asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
- asm.compile_with_regs(&mut cb, vec![X3_REG]);
+ asm.compile_with_regs(&mut cb, None, vec![X3_REG]);
// Assert that only 2 instructions were written.
assert_eq!(8, cb.get_write_pos());
diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs
index d22a5ff55b..1bf2dca04e 100644
--- a/yjit/src/backend/ir.rs
+++ b/yjit/src/backend/ir.rs
@@ -3,13 +3,15 @@
#![allow(unused_imports)]
use std::cell::Cell;
+use std::collections::HashMap;
use std::fmt;
use std::convert::From;
use std::io::Write;
use std::mem::take;
+use crate::codegen::{gen_outlined_exit, gen_counted_exit};
use crate::cruby::{VALUE, SIZEOF_VALUE_I32};
use crate::virtualmem::{CodePtr};
-use crate::asm::{CodeBlock, uimm_num_bits, imm_num_bits};
+use crate::asm::{CodeBlock, uimm_num_bits, imm_num_bits, OutlinedCb};
use crate::core::{Context, Type, TempMapping, RegTemps, MAX_REG_TEMPS, MAX_TEMP_TYPES};
use crate::options::*;
use crate::stats::*;
@@ -280,13 +282,22 @@ impl From<VALUE> for Opnd {
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Target
{
- CodePtr(CodePtr), // Pointer to a piece of YJIT-generated code
- SideExitPtr(CodePtr), // Pointer to a side exit code
- Label(usize), // A label within the generated code
+ /// Pointer to a piece of YJIT-generated code
+ CodePtr(CodePtr),
+ /// Side exit with a counter
+ SideExit { counter: Option<Counter>, context: Option<SideExitContext> },
+ /// Pointer to a side exit code
+ SideExitPtr(CodePtr),
+ /// A label within the generated code
+ Label(usize),
}
impl Target
{
+ pub fn side_exit(counter: Option<Counter>) -> Target {
+ Target::SideExit { counter, context: None }
+ }
+
pub fn unwrap_label_idx(&self) -> usize {
match self {
Target::Label(idx) => *idx,
@@ -500,6 +511,25 @@ impl Insn {
InsnOpndMutIterator::new(self)
}
+ /// Get a mutable reference to a Target if it exists.
+ pub(super) fn target_mut(&mut self) -> Option<&mut Target> {
+ match self {
+ Insn::Jbe(target) |
+ Insn::Je(target) |
+ Insn::Jl(target) |
+ Insn::Jmp(target) |
+ Insn::Jne(target) |
+ Insn::Jnz(target) |
+ Insn::Jo(target) |
+ Insn::Jz(target) |
+ Insn::Label(target) |
+ Insn::LeaLabel { target, .. } => {
+ Some(target)
+ }
+ _ => None,
+ }
+ }
+
/// Returns a string that describes which operation this instruction is
/// performing. This is used for debugging.
fn op(&self) -> &'static str {
@@ -880,10 +910,19 @@ impl fmt::Debug for Insn {
}
}
+/// Set of variables used for generating side exits
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct SideExitContext {
+ /// PC of the instruction being compiled
+ pub pc: *mut VALUE,
+
+ /// Context when it started to compile the instruction
+ pub ctx: Context,
+}
+
/// Object into which we assemble instructions to be
/// optimized and lowered
-pub struct Assembler
-{
+pub struct Assembler {
pub(super) insns: Vec<Insn>,
/// Parallel vec with insns
@@ -899,21 +938,33 @@ pub struct Assembler
/// Context for generating the current insn
pub ctx: Context,
+
+ /// Side exit caches for each SideExitContext
+ pub(super) side_exits: HashMap<SideExitContext, CodePtr>,
+
+ /// PC for Target::SideExit
+ side_exit_pc: Option<*mut VALUE>,
+
+ /// Stack size for Target::SideExit
+ side_exit_stack_size: Option<u8>,
}
impl Assembler
{
pub fn new() -> Self {
- Self::new_with_label_names(Vec::default())
+ Self::new_with_label_names(Vec::default(), HashMap::default())
}
- pub fn new_with_label_names(label_names: Vec<String>) -> Self {
+ pub fn new_with_label_names(label_names: Vec<String>, side_exits: HashMap<SideExitContext, CodePtr>) -> Self {
Self {
insns: Vec::default(),
live_ranges: Vec::default(),
reg_temps: Vec::default(),
label_names,
ctx: Context::default(),
+ side_exits,
+ side_exit_pc: None,
+ side_exit_stack_size: None,
}
}
@@ -924,6 +975,12 @@ impl Assembler
regs.drain(0..num_regs).collect()
}
+ /// Set a context for generating side exits
+ pub fn set_side_exit_context(&mut self, pc: *mut VALUE, stack_size: u8) {
+ self.side_exit_pc = Some(pc);
+ self.side_exit_stack_size = Some(stack_size);
+ }
+
/// Build an Opnd::InsnOut from the current index of the assembler and the
/// given number of bits.
pub(super) fn next_opnd_out(&self, num_bits: u8) -> Opnd {
@@ -973,6 +1030,18 @@ impl Assembler
}
}
+ // Set a side exit context to Target::SideExit
+ let mut insn = insn;
+ if let Some(Target::SideExit { context, .. }) = insn.target_mut() {
+ // We should skip this when this instruction is being copied from another Assembler.
+ if context.is_none() {
+ *context = Some(SideExitContext {
+ pc: self.side_exit_pc.unwrap(),
+ ctx: self.ctx.with_stack_size(self.side_exit_stack_size.unwrap()),
+ });
+ }
+ }
+
self.insns.push(insn);
self.live_ranges.push(insn_idx);
self.reg_temps.push(reg_temps);
@@ -983,6 +1052,26 @@ impl Assembler
*self.reg_temps.last().unwrap_or(&RegTemps::default())
}
+ /// Get a cached side exit, wrapping a counter if specified
+ pub fn get_side_exit(&mut self, side_exit_context: &SideExitContext, counter: Option<Counter>, ocb: &mut OutlinedCb) -> CodePtr {
+ // Drop type information from a cache key
+ let mut side_exit_context = side_exit_context.clone();
+ side_exit_context.ctx = side_exit_context.ctx.get_generic_ctx();
+
+ // Get a cached side exit
+ let side_exit = match self.side_exits.get(&side_exit_context) {
+ None => {
+ let exit_code = gen_outlined_exit(side_exit_context.pc, &side_exit_context.ctx, ocb);
+ self.side_exits.insert(side_exit_context.clone(), exit_code);
+ exit_code
+ }
+ Some(code_ptr) => *code_ptr,
+ };
+
+ // Wrap a counter if needed
+ gen_counted_exit(side_exit, ocb, counter)
+ }
+
/// Create a new label instance that we can jump to
pub fn new_label(&mut self, name: &str) -> Target
{
@@ -1016,7 +1105,7 @@ impl Assembler
}
}
- let mut asm = Assembler::new_with_label_names(take(&mut self.label_names));
+ let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits));
let regs = Assembler::get_temp_regs();
let reg_temps = take(&mut self.reg_temps);
let mut iterator = self.into_draining_iter();
@@ -1172,7 +1261,7 @@ impl Assembler
}
let live_ranges: Vec<usize> = take(&mut self.live_ranges);
- let mut asm = Assembler::new_with_label_names(take(&mut self.label_names));
+ let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits));
let mut iterator = self.into_draining_iter();
while let Some((index, mut insn)) = iterator.next_unmapped() {
@@ -1305,13 +1394,13 @@ impl Assembler
/// Compile the instructions down to machine code
/// NOTE: should compile return a list of block labels to enable
/// compiling multiple blocks at a time?
- pub fn compile(self, cb: &mut CodeBlock) -> Vec<u32>
+ pub fn compile(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>) -> Vec<u32>
{
#[cfg(feature = "disasm")]
let start_addr = cb.get_write_ptr();
let alloc_regs = Self::get_alloc_regs();
- let gc_offsets = self.compile_with_regs(cb, alloc_regs);
+ let gc_offsets = self.compile_with_regs(cb, ocb, alloc_regs);
#[cfg(feature = "disasm")]
if let Some(dump_disasm) = get_option_ref!(dump_disasm) {
@@ -1327,7 +1416,7 @@ impl Assembler
{
let mut alloc_regs = Self::get_alloc_regs();
let alloc_regs = alloc_regs.drain(0..num_regs).collect();
- self.compile_with_regs(cb, alloc_regs)
+ self.compile_with_regs(cb, None, alloc_regs)
}
/// Consume the assembler by creating a new draining iterator.
diff --git a/yjit/src/backend/tests.rs b/yjit/src/backend/tests.rs
index 3098c7e3b0..8ba9f61d25 100644
--- a/yjit/src/backend/tests.rs
+++ b/yjit/src/backend/tests.rs
@@ -199,7 +199,7 @@ fn test_alloc_ccall_regs() {
let out2 = asm.ccall(0 as *const u8, vec![out1]);
asm.mov(EC, out2);
let mut cb = CodeBlock::new_dummy(1024);
- asm.compile_with_regs(&mut cb, Assembler::get_alloc_regs());
+ asm.compile_with_regs(&mut cb, None, Assembler::get_alloc_regs());
}
#[test]
diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs
index 170fc888e0..de38cd98d5 100644
--- a/yjit/src/backend/x86_64/mod.rs
+++ b/yjit/src/backend/x86_64/mod.rs
@@ -7,6 +7,7 @@ use std::mem::take;
use crate::asm::*;
use crate::asm::x86_64::*;
use crate::codegen::{JITState};
+use crate::core::Context;
use crate::cruby::*;
use crate::backend::ir::*;
use crate::codegen::CodegenGlobals;
@@ -115,7 +116,7 @@ impl Assembler
fn x86_split(mut self) -> Assembler
{
let live_ranges: Vec<usize> = take(&mut self.live_ranges);
- let mut asm = Assembler::new_with_label_names(take(&mut self.label_names));
+ let mut asm = Assembler::new_with_label_names(take(&mut self.label_names), take(&mut self.side_exits));
let mut iterator = self.into_draining_iter();
while let Some((index, mut insn)) = iterator.next_unmapped() {
@@ -381,7 +382,7 @@ impl Assembler
}
/// Emit platform-specific machine code
- pub fn x86_emit(&mut self, cb: &mut CodeBlock) -> Vec<u32>
+ pub fn x86_emit(&mut self, cb: &mut CodeBlock, ocb: &mut Option<&mut OutlinedCb>) -> Vec<u32>
{
/// For some instructions, we want to be able to lower a 64-bit operand
/// without requiring more registers to be available in the register
@@ -411,6 +412,19 @@ impl Assembler
}
}
+ /// Compile a side exit if Target::SideExit is given.
+ fn compile_side_exit(
+ target: Target,
+ asm: &mut Assembler,
+ ocb: &mut Option<&mut OutlinedCb>,
+ ) -> Target {
+ if let Target::SideExit { counter, context } = target {
+ let side_exit = asm.get_side_exit(&context.unwrap(), counter, ocb.as_mut().unwrap());
+ Target::SideExitPtr(side_exit)
+ } else {
+ target
+ }
+ }
fn emit_csel(cb: &mut CodeBlock, truthy: Opnd, falsy: Opnd, out: Opnd, cmov_fn: fn(&mut CodeBlock, X86Opnd, X86Opnd)) {
if out != truthy {
@@ -426,8 +440,8 @@ impl Assembler
// For each instruction
let start_write_pos = cb.get_write_pos();
- let mut insns_idx: usize = 0;
- while let Some(insn) = self.insns.get(insns_idx) {
+ let mut insn_idx: usize = 0;
+ while let Some(insn) = self.insns.get(insn_idx) {
let src_ptr = cb.get_write_ptr();
let had_dropped_bytes = cb.has_dropped_bytes();
let old_label_state = cb.get_label_state();
@@ -626,58 +640,66 @@ impl Assembler
// Conditional jump to a label
Insn::Jmp(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jmp_ptr(cb, code_ptr),
Target::Label(label_idx) => jmp_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
}
Insn::Je(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => je_ptr(cb, code_ptr),
Target::Label(label_idx) => je_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
}
Insn::Jne(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jne_ptr(cb, code_ptr),
Target::Label(label_idx) => jne_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
}
Insn::Jl(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jl_ptr(cb, code_ptr),
Target::Label(label_idx) => jl_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
},
Insn::Jbe(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jbe_ptr(cb, code_ptr),
Target::Label(label_idx) => jbe_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
},
Insn::Jz(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jz_ptr(cb, code_ptr),
Target::Label(label_idx) => jz_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
}
Insn::Jnz(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jnz_ptr(cb, code_ptr),
Target::Label(label_idx) => jnz_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
}
Insn::Jo(target) => {
- match *target {
+ match compile_side_exit(*target, self, ocb) {
Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jo_ptr(cb, code_ptr),
Target::Label(label_idx) => jo_label(cb, label_idx),
+ Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"),
}
}
@@ -724,13 +746,6 @@ impl Assembler
nop(cb, (cb.jmp_ptr_bytes() - code_size) as u32);
}
}
-
- // We want to keep the panic here because some instructions that
- // we feed to the backend could get lowered into other
- // instructions. So it's possible that some of our backend
- // instructions can never make it to the emit stage.
- #[allow(unreachable_patterns)]
- _ => panic!("unsupported instruction passed to x86 backend: {:?}", insn)
};
// On failure, jump to the next page and retry the current insn
@@ -738,7 +753,7 @@ impl Assembler
// Reset cb states before retrying the current Insn
cb.set_label_state(old_label_state);
} else {
- insns_idx += 1;
+ insn_idx += 1;
gc_offsets.append(&mut insn_gc_offsets);
}
}
@@ -747,8 +762,7 @@ impl Assembler
}
/// Optimize and compile the stored instructions
- pub fn compile_with_regs(self, cb: &mut CodeBlock, regs: Vec<Reg>) -> Vec<u32>
- {
+ pub fn compile_with_regs(self, cb: &mut CodeBlock, ocb: Option<&mut OutlinedCb>, regs: Vec<Reg>) -> Vec<u32> {
let asm = self.lower_stack();
let asm = asm.x86_split();
let mut asm = asm.alloc_regs(regs);
@@ -759,7 +773,8 @@ impl Assembler
assert!(label_idx == idx);
}
- let gc_offsets = asm.x86_emit(cb);
+ let mut ocb = ocb; // for &mut
+ let gc_offsets = asm.x86_emit(cb, &mut ocb);
if cb.has_dropped_bytes() {
cb.clear_labels();