summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Robertson <dan@dlrobertson.com>2019-02-02 16:34:09 +0000
committerDan Robertson <dan@dlrobertson.com>2019-02-27 10:21:50 -0500
commita618ad6335f7cb70005884542f0548ef29f23b7e (patch)
tree88d5ba249064ee40bc5cdec76a7c0ced9236b664
parent1a6e9e24083c3250f76ca1ad6a0142d9ab3223d0 (diff)
downloadrust-a618ad6335f7cb70005884542f0548ef29f23b7e.tar.gz
Refactor FunctionCx::codgen_terminator
- Move closures defined in codegen_terminator into a separate helper structure and implementation. - Create helper functions for each of the complex match arms on the terminators kind in codegen_terminator.
-rw-r--r--src/librustc_codegen_ssa/mir/block.rs1442
1 files changed, 771 insertions, 671 deletions
diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs
index f40aa0cb6d1..684dfac991b 100644
--- a/src/librustc_codegen_ssa/mir/block.rs
+++ b/src/librustc_codegen_ssa/mir/block.rs
@@ -13,6 +13,8 @@ use crate::meth;
use crate::traits::*;
+use std::borrow::Cow;
+
use syntax::symbol::Symbol;
use syntax_pos::Pos;
@@ -21,764 +23,862 @@ use super::place::PlaceRef;
use super::operand::{OperandValue, OperandRef};
use super::operand::OperandValue::{Pair, Ref, Immediate};
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
- pub fn codegen_block(
- &mut self,
- bb: mir::BasicBlock,
- ) {
- let mut bx = self.build_block(bb);
- let data = &self.mir[bb];
+/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
+/// e.g., creating a basic block, calling a function, etc.
+struct TerminatorCodegenHelper<'a, 'tcx> {
+ bb: &'a mir::BasicBlock,
+ terminator: &'a mir::Terminator<'tcx>,
+ funclet_bb: Option<mir::BasicBlock>,
+}
- debug!("codegen_block({:?}={:?})", bb, data);
+impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
+ /// Returns the associated funclet from `FunctionCx::funclets` for the
+ /// `funclet_bb` member if it is not `None`.
+ fn funclet<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
+ &self,
+ fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
+ ) -> Option<&'c Bx::Funclet> {
+ match self.funclet_bb {
+ Some(funcl) => fx.funclets[funcl].as_ref(),
+ None => None,
+ }
+ }
- for statement in &data.statements {
- bx = self.codegen_statement(bx, statement);
+ fn lltarget<'b, 'c, Bx: BuilderMethods<'b, 'tcx>>(
+ &self,
+ fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> (Bx::BasicBlock, bool) {
+ let span = self.terminator.source_info.span;
+ let lltarget = fx.blocks[target];
+ let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
+ match (self.funclet_bb, target_funclet) {
+ (None, None) => (lltarget, false),
+ (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) =>
+ (lltarget, false),
+ // jump *into* cleanup - need a landing pad if GNU
+ (None, Some(_)) => (fx.landing_pad_to(target), false),
+ (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
+ (Some(_), Some(_)) => (fx.landing_pad_to(target), true),
}
+ }
- self.codegen_terminator(bx, bb, data.terminator());
+ /// Create a basic block.
+ fn llblock<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
+ &self,
+ fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> Bx::BasicBlock {
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // MSVC cross-funclet jump - need a trampoline
+
+ debug!("llblock: creating cleanup trampoline for {:?}", target);
+ let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
+ let mut trampoline = fx.new_block(name);
+ trampoline.cleanup_ret(self.funclet(fx).unwrap(),
+ Some(lltarget));
+ trampoline.llbb()
+ } else {
+ lltarget
+ }
}
- fn codegen_terminator(
- &mut self,
- mut bx: Bx,
- bb: mir::BasicBlock,
- terminator: &mir::Terminator<'tcx>
+ fn funclet_br<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
+ &self,
+ fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
+ bx: &mut Bx,
+ target: mir::BasicBlock,
) {
- debug!("codegen_terminator: {:?}", terminator);
-
- // Create the cleanup bundle, if needed.
- let tcx = self.cx.tcx();
- let span = terminator.source_info.span;
- let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // micro-optimization: generate a `ret` rather than a jump
+ // to a trampoline.
+ bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ } else {
+ bx.br(lltarget);
+ }
+ }
- // HACK(eddyb) force the right lifetimes, NLL can't figure them out.
- fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
- funclet_bb: Option<mir::BasicBlock>
- ) -> impl for<'b> Fn(
- &'b FunctionCx<'a, 'tcx, Bx>,
- ) -> Option<&'b Bx::Funclet> {
- move |this| {
- match funclet_bb {
- Some(funclet_bb) => this.funclets[funclet_bb].as_ref(),
- None => None,
- }
+ /// Call `fn_ptr` of `fn_ty` with the arguments `llargs`, the optional
+ /// return destination `destination` and the cleanup function `cleanup`.
+ fn do_call<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
+ &self,
+ fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
+ bx: &mut Bx,
+ fn_ty: FnType<'tcx, Ty<'tcx>>,
+ fn_ptr: Bx::Value,
+ llargs: &[Bx::Value],
+ destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
+ cleanup: Option<mir::BasicBlock>,
+ ) {
+ if let Some(cleanup) = cleanup {
+ let ret_bx = if let Some((_, target)) = destination {
+ fx.blocks[target]
+ } else {
+ fx.unreachable_block()
+ };
+ let invokeret = bx.invoke(fn_ptr,
+ &llargs,
+ ret_bx,
+ self.llblock(fx, cleanup),
+ self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_ty, invokeret);
+
+ if let Some((ret_dest, target)) = destination {
+ let mut ret_bx = fx.build_block(target);
+ fx.set_debug_loc(&mut ret_bx, self.terminator.source_info);
+ fx.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret);
}
- }
- let funclet = funclet_closure_factory(funclet_bb);
-
- let lltarget = |this: &mut Self, target: mir::BasicBlock| {
- let lltarget = this.blocks[target];
- let target_funclet = this.cleanup_kinds[target].funclet_bb(target);
- match (funclet_bb, target_funclet) {
- (None, None) => (lltarget, false),
- (Some(f), Some(t_f))
- if f == t_f || !base::wants_msvc_seh(tcx.sess)
- => (lltarget, false),
- (None, Some(_)) => {
- // jump *into* cleanup - need a landing pad if GNU
- (this.landing_pad_to(target), false)
- }
- (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", terminator),
- (Some(_), Some(_)) => {
- (this.landing_pad_to(target), true)
- }
+ } else {
+ let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_ty, llret);
+ if fx.mir[*self.bb].is_cleanup {
+ // Cleanup is always the cold path. Don't inline
+ // drop glue. Also, when there is a deeply-nested
+ // struct, there are "symmetry" issues that cause
+ // exponential inlining - see issue #41696.
+ bx.do_not_inline(llret);
}
- };
-
- let llblock = |this: &mut Self, target: mir::BasicBlock| {
- let (lltarget, is_cleanupret) = lltarget(this, target);
- if is_cleanupret {
- // MSVC cross-funclet jump - need a trampoline
- debug!("llblock: creating cleanup trampoline for {:?}", target);
- let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
- let mut trampoline = this.new_block(name);
- trampoline.cleanup_ret(funclet(this).unwrap(), Some(lltarget));
- trampoline.llbb()
+ if let Some((ret_dest, target)) = destination {
+ fx.store_return(bx, ret_dest, &fn_ty.ret, llret);
+ self.funclet_br(fx, bx, target);
} else {
- lltarget
+ bx.unreachable();
}
- };
-
- let funclet_br =
- |this: &mut Self, bx: &mut Bx, target: mir::BasicBlock| {
- let (lltarget, is_cleanupret) = lltarget(this, target);
- if is_cleanupret {
- // micro-optimization: generate a `ret` rather than a jump
- // to a trampoline.
- bx.cleanup_ret(funclet(this).unwrap(), Some(lltarget));
- } else {
- bx.br(lltarget);
- }
- };
+ }
+ }
+}
- let do_call = |
- this: &mut Self,
- bx: &mut Bx,
- fn_ty: FnType<'tcx, Ty<'tcx>>,
- fn_ptr: Bx::Value,
- llargs: &[Bx::Value],
- destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
- cleanup: Option<mir::BasicBlock>
- | {
- if let Some(cleanup) = cleanup {
- let ret_bx = if let Some((_, target)) = destination {
- this.blocks[target]
- } else {
- this.unreachable_block()
- };
- let invokeret = bx.invoke(fn_ptr,
- &llargs,
- ret_bx,
- llblock(this, cleanup),
- funclet(this));
- bx.apply_attrs_callsite(&fn_ty, invokeret);
-
- if let Some((ret_dest, target)) = destination {
- let mut ret_bx = this.build_block(target);
- this.set_debug_loc(&mut ret_bx, terminator.source_info);
- this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret);
- }
+/// Codegen implementations for some terminator variants.
+impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ /// Generates code for a `Resume` terminator.
+ fn codegen_resume_terminator<'b>(
+ &mut self,
+ helper: TerminatorCodegenHelper<'b, 'tcx>,
+ mut bx: Bx,
+ ) {
+ if let Some(funclet) = helper.funclet(self) {
+ bx.cleanup_ret(funclet, None);
+ } else {
+ let slot = self.get_personality_slot(&mut bx);
+ let lp0 = slot.project_field(&mut bx, 0);
+ let lp0 = bx.load_operand(lp0).immediate();
+ let lp1 = slot.project_field(&mut bx, 1);
+ let lp1 = bx.load_operand(lp1).immediate();
+ slot.storage_dead(&mut bx);
+
+ if !bx.sess().target.target.options.custom_unwind_resume {
+ let mut lp = bx.const_undef(self.landing_pad_type());
+ lp = bx.insert_value(lp, lp0, 0);
+ lp = bx.insert_value(lp, lp1, 1);
+ bx.resume(lp);
} else {
- let llret = bx.call(fn_ptr, &llargs, funclet(this));
- bx.apply_attrs_callsite(&fn_ty, llret);
- if this.mir[bb].is_cleanup {
- // Cleanup is always the cold path. Don't inline
- // drop glue. Also, when there is a deeply-nested
- // struct, there are "symmetry" issues that cause
- // exponential inlining - see issue #41696.
- bx.do_not_inline(llret);
- }
-
- if let Some((ret_dest, target)) = destination {
- this.store_return(bx, ret_dest, &fn_ty.ret, llret);
- funclet_br(this, bx, target);
- } else {
- bx.unreachable();
- }
+ bx.call(bx.eh_unwind_resume(), &[lp0],
+ helper.funclet(self));
+ bx.unreachable();
}
- };
+ }
+ }
- self.set_debug_loc(&mut bx, terminator.source_info);
- match terminator.kind {
- mir::TerminatorKind::Resume => {
- if let Some(funclet) = funclet(self) {
- bx.cleanup_ret(funclet, None);
+ fn codegen_switchint_terminator<'b>(
+ &mut self,
+ helper: TerminatorCodegenHelper<'b, 'tcx>,
+ mut bx: Bx,
+ discr: &mir::Operand<'tcx>,
+ switch_ty: Ty<'tcx>,
+ values: &Cow<'tcx, [u128]>,
+ targets: &Vec<mir::BasicBlock>,
+ ) {
+ let discr = self.codegen_operand(&mut bx, &discr);
+ if targets.len() == 2 {
+ // If there are two targets, emit br instead of switch
+ let lltrue = helper.llblock(self, targets[0]);
+ let llfalse = helper.llblock(self, targets[1]);
+ if switch_ty == bx.tcx().types.bool {
+ // Don't generate trivial icmps when switching on bool
+ if let [0] = values[..] {
+ bx.cond_br(discr.immediate(), llfalse, lltrue);
} else {
- let slot = self.get_personality_slot(&mut bx);
- let lp0 = slot.project_field(&mut bx, 0);
- let lp0 = bx.load_operand(lp0).immediate();
- let lp1 = slot.project_field(&mut bx, 1);
- let lp1 = bx.load_operand(lp1).immediate();
- slot.storage_dead(&mut bx);
-
- if !bx.sess().target.target.options.custom_unwind_resume {
- let mut lp = bx.const_undef(self.landing_pad_type());
- lp = bx.insert_value(lp, lp0, 0);
- lp = bx.insert_value(lp, lp1, 1);
- bx.resume(lp);
- } else {
- bx.call(bx.eh_unwind_resume(), &[lp0], funclet(self));
- bx.unreachable();
- }
+ assert_eq!(&values[..], &[1]);
+ bx.cond_br(discr.immediate(), lltrue, llfalse);
}
+ } else {
+ let switch_llty = bx.immediate_backend_type(
+ bx.layout_of(switch_ty)
+ );
+ let llval = bx.const_uint_big(switch_llty, values[0]);
+ let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
+ bx.cond_br(cmp, lltrue, llfalse);
+ }
+ } else {
+ let (otherwise, targets) = targets.split_last().unwrap();
+ let switch = bx.switch(discr.immediate(),
+ helper.llblock(self, *otherwise),
+ values.len());
+ let switch_llty = bx.immediate_backend_type(
+ bx.layout_of(switch_ty)
+ );
+ for (&value, target) in values.iter().zip(targets) {
+ let llval = bx.const_uint_big(switch_llty, value);
+ let llbb = helper.llblock(self, *target);
+ bx.add_case(switch, llval, llbb)
}
+ }
+ }
- mir::TerminatorKind::Abort => {
- bx.abort();
- bx.unreachable();
+ fn codegen_return_terminator<'b>(
+ &mut self,
+ mut bx: Bx,
+ ) {
+ if self.fn_ty.variadic {
+ if let Some(va_list) = self.va_list_ref {
+ bx.va_end(va_list.llval);
+ }
+ }
+ let llval = match self.fn_ty.ret.mode {
+ PassMode::Ignore(IgnoreMode::Zst) | PassMode::Indirect(..) => {
+ bx.ret_void();
+ return;
}
- mir::TerminatorKind::Goto { target } => {
- funclet_br(self, &mut bx, target);
+ PassMode::Ignore(IgnoreMode::CVarArgs) => {
+ bug!("C-variadic arguments should never be the return type");
}
- mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
- let discr = self.codegen_operand(&mut bx, discr);
- if targets.len() == 2 {
- // If there are two targets, emit br instead of switch
- let lltrue = llblock(self, targets[0]);
- let llfalse = llblock(self, targets[1]);
- if switch_ty == bx.tcx().types.bool {
- // Don't generate trivial icmps when switching on bool
- if let [0] = values[..] {
- bx.cond_br(discr.immediate(), llfalse, lltrue);
- } else {
- assert_eq!(&values[..], &[1]);
- bx.cond_br(discr.immediate(), lltrue, llfalse);
- }
- } else {
- let switch_llty = bx.immediate_backend_type(
- bx.layout_of(switch_ty)
- );
- let llval = bx.const_uint_big(switch_llty, values[0]);
- let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
- bx.cond_br(cmp, lltrue, llfalse);
- }
+ PassMode::Direct(_) | PassMode::Pair(..) => {
+ let op =
+ self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE));
+ if let Ref(llval, _, align) = op.val {
+ bx.load(llval, align)
} else {
- let (otherwise, targets) = targets.split_last().unwrap();
- let switch = bx.switch(discr.immediate(),
- llblock(self, *otherwise),
- values.len());
- let switch_llty = bx.immediate_backend_type(
- bx.layout_of(switch_ty)
- );
- for (&value, target) in values.iter().zip(targets) {
- let llval = bx.const_uint_big(switch_llty, value);
- let llbb = llblock(self, *target);
- bx.add_case(switch, llval, llbb)
- }
+ op.immediate_or_packed_pair(&mut bx)
}
}
- mir::TerminatorKind::Return => {
- if self.fn_ty.variadic {
- if let Some(va_list) = self.va_list_ref {
- bx.va_end(va_list.llval);
+ PassMode::Cast(cast_ty) => {
+ let op = match self.locals[mir::RETURN_PLACE] {
+ LocalRef::Operand(Some(op)) => op,
+ LocalRef::Operand(None) => bug!("use of return before def"),
+ LocalRef::Place(cg_place) => {
+ OperandRef {
+ val: Ref(cg_place.llval, None, cg_place.align),
+ layout: cg_place.layout
+ }
}
- }
- let llval = match self.fn_ty.ret.mode {
- PassMode::Ignore(IgnoreMode::Zst) | PassMode::Indirect(..) => {
- bx.ret_void();
- return;
+ LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+ };
+ let llslot = match op.val {
+ Immediate(_) | Pair(..) => {
+ let scratch =
+ PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret");
+ op.val.store(&mut bx, scratch);
+ scratch.llval
}
-
- PassMode::Ignore(IgnoreMode::CVarArgs) => {
- bug!("C variadic arguments should never be the return type");
+ Ref(llval, _, align) => {
+ assert_eq!(align, op.layout.align.abi,
+ "return place is unaligned!");
+ llval
}
+ };
+ let addr = bx.pointercast(llslot, bx.type_ptr_to(
+ bx.cast_backend_type(&cast_ty)
+ ));
+ bx.load(addr, self.fn_ty.ret.layout.align.abi)
+ }
+ };
+ bx.ret(llval);
+ }
- PassMode::Direct(_) | PassMode::Pair(..) => {
- let op =
- self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE));
- if let Ref(llval, _, align) = op.val {
- bx.load(llval, align)
- } else {
- op.immediate_or_packed_pair(&mut bx)
- }
- }
- PassMode::Cast(cast_ty) => {
- let op = match self.locals[mir::RETURN_PLACE] {
- LocalRef::Operand(Some(op)) => op,
- LocalRef::Operand(None) => bug!("use of return before def"),
- LocalRef::Place(cg_place) => {
- OperandRef {
- val: Ref(cg_place.llval, None, cg_place.align),
- layout: cg_place.layout
- }
- }
- LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
- };
- let llslot = match op.val {
- Immediate(_) | Pair(..) => {
- let scratch =
- PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret");
- op.val.store(&mut bx, scratch);
- scratch.llval
- }
- Ref(llval, _, align) => {
- assert_eq!(align, op.layout.align.abi,
- "return place is unaligned!");
- llval
- }
- };
- let addr = bx.pointercast(llslot, bx.type_ptr_to(
- bx.cast_backend_type(&cast_ty)
- ));
- bx.load(addr, self.fn_ty.ret.layout.align.abi)
- }
- };
- bx.ret(llval);
+ fn codegen_drop_terminator<'b>(
+ &mut self,
+ helper: TerminatorCodegenHelper<'b, 'tcx>,
+ mut bx: Bx,
+ location: &mir::Place<'tcx>,
+ target: mir::BasicBlock,
+ unwind: Option<mir::BasicBlock>,
+ ) {
+ let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
+ let ty = self.monomorphize(&ty);
+ let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
+ // we don't actually need to drop anything.
+ helper.funclet_br(self, &mut bx, target);
+ return
+ }
+
+ let place = self.codegen_place(&mut bx, location);
+ let (args1, args2);
+ let mut args = if let Some(llextra) = place.llextra {
+ args2 = [place.llval, llextra];
+ &args2[..]
+ } else {
+ args1 = [place.llval];
+ &args1[..]
+ };
+ let (drop_fn, fn_ty) = match ty.sty {
+ ty::Dynamic(..) => {
+ let sig = drop_fn.fn_sig(self.cx.tcx());
+ let sig = self.cx.tcx().normalize_erasing_late_bound_regions(
+ ty::ParamEnv::reveal_all(),
+ &sig,
+ );
+ let fn_ty = bx.new_vtable(sig, &[]);
+ let vtable = args[1];
+ args = &args[..1];
+ (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
}
+ _ => {
+ (bx.get_fn(drop_fn),
+ bx.fn_type_of_instance(&drop_fn))
+ }
+ };
+ helper.do_call(self, &mut bx, fn_ty, drop_fn, args,
+ Some((ReturnDest::Nothing, target)),
+ unwind);
+ }
- mir::TerminatorKind::Unreachable => {
- bx.unreachable();
+ fn codegen_assert_terminator<'b>(
+ &mut self,
+ helper: TerminatorCodegenHelper<'b, 'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ cond: &mir::Operand<'tcx>,
+ expected: bool,
+ msg: &mir::AssertMessage<'tcx>,
+ target: mir::BasicBlock,
+ cleanup: Option<mir::BasicBlock>,
+ ) {
+ let span = terminator.source_info.span;
+ let cond = self.codegen_operand(&mut bx, cond).immediate();
+ let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
+
+ // This case can currently arise only from functions marked
+ // with #[rustc_inherit_overflow_checks] and inlined from
+ // another crate (mostly core::num generic/#[inline] fns),
+ // while the current crate doesn't use overflow checks.
+ // NOTE: Unlike binops, negation doesn't have its own
+ // checked operation, just a comparison with the minimum
+ // value, so we have to check for the assert message.
+ if !bx.check_overflow() {
+ if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
+ const_cond = Some(expected);
}
+ }
- mir::TerminatorKind::Drop { ref location, target, unwind } => {
- let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
- let ty = self.monomorphize(&ty);
- let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty);
-
- if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
- // we don't actually need to drop anything.
- funclet_br(self, &mut bx, target);
- return
- }
+ // Don't codegen the panic block if success if known.
+ if const_cond == Some(expected) {
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
- let place = self.codegen_place(&mut bx, location);
- let (args1, args2);
- let mut args = if let Some(llextra) = place.llextra {
- args2 = [place.llval, llextra];
- &args2[..]
- } else {
- args1 = [place.llval];
- &args1[..]
- };
- let (drop_fn, fn_ty) = match ty.sty {
- ty::Dynamic(..) => {
- let sig = drop_fn.fn_sig(tcx);
- let sig = tcx.normalize_erasing_late_bound_regions(
- ty::ParamEnv::reveal_all(),
- &sig,
- );
- let fn_ty = bx.new_vtable(sig, &[]);
- let vtable = args[1];
- args = &args[..1];
- (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
- }
- _ => {
- (bx.get_fn(drop_fn),
- bx.fn_type_of_instance(&drop_fn))
- }
- };
- do_call(self, &mut bx, fn_ty, drop_fn, args,
- Some((ReturnDest::Nothing, target)),
- unwind);
+ // Pass the condition through llvm.expect for branch hinting.
+ let cond = bx.expect(cond, expected);
+
+ // Create the failure block and the conditional branch to it.
+ let lltarget = helper.llblock(self, target);
+ let panic_block = self.new_block("panic");
+ if expected {
+ bx.cond_br(cond, lltarget, panic_block.llbb());
+ } else {
+ bx.cond_br(cond, panic_block.llbb(), lltarget);
+ }
+
+ // After this point, bx is the block for the call to panic.
+ bx = panic_block;
+ self.set_debug_loc(&mut bx, terminator.source_info);
+
+ // Get the location information.
+ let loc = bx.sess().source_map().lookup_char_pos(span.lo());
+ let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
+ let filename = bx.const_str_slice(filename);
+ let line = bx.const_u32(loc.line as u32);
+ let col = bx.const_u32(loc.col.to_usize() as u32 + 1);
+ let align = self.cx.tcx().data_layout.aggregate_align.abi
+ .max(self.cx.tcx().data_layout.i32_align.abi)
+ .max(self.cx.tcx().data_layout.pointer_align.abi);
+
+ // Put together the arguments to the panic entry point.
+ let (lang_item, args) = match *msg {
+ EvalErrorKind::BoundsCheck { ref len, ref index } => {
+ let len = self.codegen_operand(&mut bx, len).immediate();
+ let index = self.codegen_operand(&mut bx, index).immediate();
+
+ let file_line_col = bx.const_struct(&[filename, line, col], false);
+ let file_line_col = bx.static_addr_of(
+ file_line_col,
+ align,
+ Some("panic_bounds_check_loc")
+ );
+ (lang_items::PanicBoundsCheckFnLangItem,
+ vec![file_line_col, index, len])
}
+ _ => {
+ let str = msg.description();
+ let msg_str = Symbol::intern(str).as_str();
+ let msg_str = bx.const_str_slice(msg_str);
+ let msg_file_line_col = bx.const_struct(
+ &[msg_str, filename, line, col],
+ false
+ );
+ let msg_file_line_col = bx.static_addr_of(
+ msg_file_line_col,
+ align,
+ Some("panic_loc")
+ );
+ (lang_items::PanicFnLangItem,
+ vec![msg_file_line_col])
+ }
+ };
- mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
- let cond = self.codegen_operand(&mut bx, cond).immediate();
- let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
-
- // This case can currently arise only from functions marked
- // with #[rustc_inherit_overflow_checks] and inlined from
- // another crate (mostly core::num generic/#[inline] fns),
- // while the current crate doesn't use overflow checks.
- // NOTE: Unlike binops, negation doesn't have its own
- // checked operation, just a comparison with the minimum
- // value, so we have to check for the assert message.
- if !bx.check_overflow() {
- if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
- const_cond = Some(expected);
- }
- }
+ // Obtain the panic entry point.
+ let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
+ let instance = ty::Instance::mono(bx.tcx(), def_id);
+ let fn_ty = bx.fn_type_of_instance(&instance);
+ let llfn = bx.get_fn(instance);
- // Don't codegen the panic block if success if known.
- if const_cond == Some(expected) {
- funclet_br(self, &mut bx, target);
- return;
- }
+ // Codegen the actual panic invoke/call.
+ helper.do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup);
+ }
- // Pass the condition through llvm.expect for branch hinting.
- let cond = bx.expect(cond, expected);
+ fn codegen_call_terminator<'b>(
+ &mut self,
+ helper: TerminatorCodegenHelper<'b, 'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ func: &mir::Operand<'tcx>,
+ args: &Vec<mir::Operand<'tcx>>,
+ destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>,
+ cleanup: Option<mir::BasicBlock>,
+ ) {
+ let span = terminator.source_info.span;
+ // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
+ let callee = self.codegen_operand(&mut bx, func);
+
+ let (instance, mut llfn) = match callee.layout.ty.sty {
+ ty::FnDef(def_id, substs) => {
+ (Some(ty::Instance::resolve(bx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs).unwrap()),
+ None)
+ }
+ ty::FnPtr(_) => {
+ (None, Some(callee.immediate()))
+ }
+ _ => bug!("{} is not callable", callee.layout.ty),
+ };
+ let def = instance.map(|i| i.def);
+ let sig = callee.layout.ty.fn_sig(bx.tcx());
+ let sig = bx.tcx().normalize_erasing_late_bound_regions(
+ ty::ParamEnv::reveal_all(),
+ &sig,
+ );
+ let abi = sig.abi;
+
+ // Handle intrinsics old codegen wants Expr's for, ourselves.
+ let intrinsic = match def {
+ Some(ty::InstanceDef::Intrinsic(def_id)) =>
+ Some(bx.tcx().item_name(def_id).as_str()),
+ _ => None
+ };
+ let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
- // Create the failure block and the conditional branch to it.
- let lltarget = llblock(self, target);
- let panic_block = self.new_block("panic");
- if expected {
- bx.cond_br(cond, lltarget, panic_block.llbb());
- } else {
- bx.cond_br(cond, panic_block.llbb(), lltarget);
- }
+ if intrinsic == Some("transmute") {
+ if let Some(destination_ref) = destination.as_ref() {
+ let &(ref dest, target) = destination_ref;
+ self.codegen_transmute(&mut bx, &args[0], dest);
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ // If we are trying to transmute to an uninhabited type,
+ // it is likely there is no allotted destination. In fact,
+ // transmuting to an uninhabited type is UB, which means
+ // we can do what we like. Here, we declare that transmuting
+ // into an uninhabited type is impossible, so anything following
+ // it must be unreachable.
+ assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited);
+ bx.unreachable();
+ }
+ return;
+ }
- // After this point, bx is the block for the call to panic.
- bx = panic_block;
- self.set_debug_loc(&mut bx, terminator.source_info);
+ // The "spoofed" `VaList` added to a C-variadic functions signature
+ // should not be included in the `extra_args` calculation.
+ let extra_args_start_idx = sig.inputs().len() - if sig.variadic { 1 } else { 0 };
+ let extra_args = &args[extra_args_start_idx..];
+ let extra_args = extra_args.iter().map(|op_arg| {
+ let op_ty = op_arg.ty(self.mir, bx.tcx());
+ self.monomorphize(&op_ty)
+ }).collect::<Vec<_>>();
+
+ let fn_ty = match def {
+ Some(ty::InstanceDef::Virtual(..)) => {
+ bx.new_vtable(sig, &extra_args)
+ }
+ Some(ty::InstanceDef::DropGlue(_, None)) => {
+ // Empty drop glue; a no-op.
+ let &(_, target) = destination.as_ref().unwrap();
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+ _ => bx.new_fn_type(sig, &extra_args)
+ };
- // Get the location information.
+ // Emit a panic or a no-op for `panic_if_uninhabited`.
+ if intrinsic == Some("panic_if_uninhabited") {
+ let ty = instance.unwrap().substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ if layout.abi.is_uninhabited() {
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
let filename = bx.const_str_slice(filename);
let line = bx.const_u32(loc.line as u32);
let col = bx.const_u32(loc.col.to_usize() as u32 + 1);
- let align = tcx.data_layout.aggregate_align.abi
- .max(tcx.data_layout.i32_align.abi)
- .max(tcx.data_layout.pointer_align.abi);
-
- // Put together the arguments to the panic entry point.
- let (lang_item, args) = match *msg {
- EvalErrorKind::BoundsCheck { ref len, ref index } => {
- let len = self.codegen_operand(&mut bx, len).immediate();
- let index = self.codegen_operand(&mut bx, index).immediate();
-
- let file_line_col = bx.const_struct(&[filename, line, col], false);
- let file_line_col = bx.static_addr_of(
- file_line_col,
- align,
- Some("panic_bounds_check_loc")
- );
- (lang_items::PanicBoundsCheckFnLangItem,
- vec![file_line_col, index, len])
- }
- _ => {
- let str = msg.description();
- let msg_str = Symbol::intern(str).as_str();
- let msg_str = bx.const_str_slice(msg_str);
- let msg_file_line_col = bx.const_struct(
- &[msg_str, filename, line, col],
- false
- );
- let msg_file_line_col = bx.static_addr_of(
- msg_file_line_col,
- align,
- Some("panic_loc")
- );
- (lang_items::PanicFnLangItem,
- vec![msg_file_line_col])
- }
- };
+ let align = self.cx.tcx().data_layout.aggregate_align.abi
+ .max(self.cx.tcx().data_layout.i32_align.abi)
+ .max(self.cx.tcx().data_layout.pointer_align.abi);
+
+ let str = format!(
+ "Attempted to instantiate uninhabited type {}",
+ ty
+ );
+ let msg_str = Symbol::intern(&str).as_str();
+ let msg_str = bx.const_str_slice(msg_str);
+ let msg_file_line_col = bx.const_struct(
+ &[msg_str, filename, line, col],
+ false,
+ );
+ let msg_file_line_col = bx.static_addr_of(
+ msg_file_line_col,
+ align,
+ Some("panic_loc"),
+ );
// Obtain the panic entry point.
- let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
+ let def_id =
+ common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
let instance = ty::Instance::mono(bx.tcx(), def_id);
let fn_ty = bx.fn_type_of_instance(&instance);
let llfn = bx.get_fn(instance);
// Codegen the actual panic invoke/call.
- do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup);
- }
-
- mir::TerminatorKind::DropAndReplace { .. } => {
- bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_ty,
+ llfn,
+ &[msg_file_line_col],
+ destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)),
+ cleanup,
+ );
+ } else {
+ // a NOP
+ helper.funclet_br(self, &mut bx, destination.as_ref().unwrap().1)
}
+ return;
+ }
- mir::TerminatorKind::Call {
- ref func,
- ref args,
- ref destination,
- cleanup,
- from_hir_call: _
- } => {
- // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
- let callee = self.codegen_operand(&mut bx, func);
-
- let (instance, mut llfn) = match callee.layout.ty.sty {
- ty::FnDef(def_id, substs) => {
- (Some(ty::Instance::resolve(bx.tcx(),
- ty::ParamEnv::reveal_all(),
- def_id,
- substs).unwrap()),
- None)
- }
- ty::FnPtr(_) => {
- (None, Some(callee.immediate()))
- }
- _ => bug!("{} is not callable", callee.layout.ty)
- };
- let def = instance.map(|i| i.def);
- let sig = callee.layout.ty.fn_sig(bx.tcx());
- let sig = bx.tcx().normalize_erasing_late_bound_regions(
- ty::ParamEnv::reveal_all(),
- &sig,
- );
- let abi = sig.abi;
+ // The arguments we'll be passing. Plus one to account for outptr, if used.
+ let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
+ let mut llargs = Vec::with_capacity(arg_count);
- // Handle intrinsics old codegen wants Expr's for, ourselves.
- let intrinsic = match def {
- Some(ty::InstanceDef::Intrinsic(def_id))
- => Some(bx.tcx().item_name(def_id).as_str()),
- _ => None
- };
- let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
+ // Prepare the return value destination
+ let ret_dest = if let Some((ref dest, _)) = *destination {
+ let is_intrinsic = intrinsic.is_some();
+ self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs,
+ is_intrinsic)
+ } else {
+ ReturnDest::Nothing
+ };
- if intrinsic == Some("transmute") {
- if let Some(destination_ref) = destination.as_ref() {
- let &(ref dest, target) = destination_ref;
- self.codegen_transmute(&mut bx, &args[0], dest);
- funclet_br(self, &mut bx, target);
- } else {
- // If we are trying to transmute to an uninhabited type,
- // it is likely there is no allotted destination. In fact,
- // transmuting to an uninhabited type is UB, which means
- // we can do what we like. Here, we declare that transmuting
- // into an uninhabited type is impossible, so anything following
- // it must be unreachable.
- assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited);
- bx.unreachable();
- }
- return;
- }
+ if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
+ let dest = match ret_dest {
+ _ if fn_ty.ret.is_indirect() => llargs[0],
+ ReturnDest::Nothing =>
+ bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret))),
+ ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) =>
+ dst.llval,
+ ReturnDest::DirectOperand(_) =>
+ bug!("Cannot use direct operand with an intrinsic call"),
+ };
- // The "spoofed" `VaList` added to a C-variadic functions signature
- // should not be included in the `extra_args` calculation.
- let extra_args_start_idx = sig.inputs().len() - if sig.variadic { 1 } else { 0 };
- let extra_args = &args[extra_args_start_idx..];
- let extra_args = extra_args.iter().map(|op_arg| {
- let op_ty = op_arg.ty(self.mir, bx.tcx());
- self.monomorphize(&op_ty)
- }).collect::<Vec<_>>();
-
- let fn_ty = match def {
- Some(ty::InstanceDef::Virtual(..)) => {
- bx.new_vtable(sig, &extra_args)
- }
- Some(ty::InstanceDef::DropGlue(_, None)) => {
- // empty drop glue - a nop.
- let &(_, target) = destination.as_ref().unwrap();
- funclet_br(self, &mut bx, target);
- return;
- }
- _ => bx.new_fn_type(sig, &extra_args)
- };
+ let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| {
+ // The indices passed to simd_shuffle* in the
+ // third argument must be constant. This is
+ // checked by const-qualification, which also
+ // promotes any complex rvalues to constants.
+ if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") {
+ match *arg {
+ // The shuffle array argument is usually not an explicit constant,
+ // but specified directly in the code. This means it gets promoted
+ // and we can then extract the value by evaluating the promoted.
+ mir::Operand::Copy(mir::Place::Promoted(box(index, ty))) |
+ mir::Operand::Move(mir::Place::Promoted(box(index, ty))) => {
+ let param_env = ty::ParamEnv::reveal_all();
+ let cid = mir::interpret::GlobalId {
+ instance: self.instance,
+ promoted: Some(index),
+ };
+ let c = bx.tcx().const_eval(param_env.and(cid));
+ let (llval, ty) = self.simd_shuffle_indices(
+ &bx,
+ terminator.source_info.span,
+ ty,
+ c,
+ );
+ return OperandRef {
+ val: Immediate(llval),
+ layout: bx.layout_of(ty),
+ };
- // emit a panic or a NOP for `panic_if_uninhabited`
- if intrinsic == Some("panic_if_uninhabited") {
- let ty = instance.unwrap().substs.type_at(0);
- let layout = bx.layout_of(ty);
- if layout.abi.is_uninhabited() {
- let loc = bx.sess().source_map().lookup_char_pos(span.lo());
- let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
- let filename = bx.const_str_slice(filename);
- let line = bx.const_u32(loc.line as u32);
- let col = bx.const_u32(loc.col.to_usize() as u32 + 1);
- let align = tcx.data_layout.aggregate_align.abi
- .max(tcx.data_layout.i32_align.abi)
- .max(tcx.data_layout.pointer_align.abi);
-
- let str = format!(
- "Attempted to instantiate uninhabited type {}",
- ty
- );
- let msg_str = Symbol::intern(&str).as_str();
- let msg_str = bx.const_str_slice(msg_str);
- let msg_file_line_col = bx.const_struct(
- &[msg_str, filename, line, col],
- false,
- );
- let msg_file_line_col = bx.static_addr_of(
- msg_file_line_col,
- align,
- Some("panic_loc"),
- );
-
- // Obtain the panic entry point.
- let def_id =
- common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
- let instance = ty::Instance::mono(bx.tcx(), def_id);
- let fn_ty = bx.fn_type_of_instance(&instance);
- let llfn = bx.get_fn(instance);
-
- // Codegen the actual panic invoke/call.
- do_call(
- self,
- &mut bx,
- fn_ty,
- llfn,
- &[msg_file_line_col],
- destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)),
- cleanup,
- );
- } else {
- // a NOP
- funclet_br(self, &mut bx, destination.as_ref().unwrap().1);
+ }
+ mir::Operand::Copy(_) |
+ mir::Operand::Move(_) => {
+ span_bug!(span, "shuffle indices must be constant");
+ }
+ mir::Operand::Constant(ref constant) => {
+ let c = self.eval_mir_constant(&bx, constant);
+ let (llval, ty) = self.simd_shuffle_indices(
+ &bx,
+ constant.span,
+ constant.ty,
+ c,
+ );
+ return OperandRef {
+ val: Immediate(llval),
+ layout: bx.layout_of(ty)
+ };
+ }
}
- return;
}
- // The arguments we'll be passing. Plus one to account for outptr, if used.
- let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
- let mut llargs = Vec::with_capacity(arg_count);
-
- // Prepare the return value destination
- let ret_dest = if let Some((ref dest, _)) = *destination {
- let is_intrinsic = intrinsic.is_some();
- self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs,
- is_intrinsic)
- } else {
- ReturnDest::Nothing
- };
-
- if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
- let dest = match ret_dest {
- _ if fn_ty.ret.is_indirect() => llargs[0],
- ReturnDest::Nothing => {
- bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret)))
- }
- ReturnDest::IndirectOperand(dst, _) |
- ReturnDest::Store(dst) => dst.llval,
- ReturnDest::DirectOperand(_) =>
- bug!("Cannot use direct operand with an intrinsic call")
- };
-
- let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| {
- // The indices passed to simd_shuffle* in the
- // third argument must be constant. This is
- // checked by const-qualification, which also
- // promotes any complex rvalues to constants.
- if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") {
- match *arg {
- // The shuffle array argument is usually not an explicit constant,
- // but specified directly in the code. This means it gets promoted
- // and we can then extract the value by evaluating the promoted.
- mir::Operand::Copy(mir::Place::Promoted(box(index, ty))) |
- mir::Operand::Move(mir::Place::Promoted(box(index, ty))) => {
- let param_env = ty::ParamEnv::reveal_all();
- let cid = mir::interpret::GlobalId {
- instance: self.instance,
- promoted: Some(index),
- };
- let c = bx.tcx().const_eval(param_env.and(cid));
- let (llval, ty) = self.simd_shuffle_indices(
- &bx,
- terminator.source_info.span,
- ty,
- c,
- );
- return OperandRef {
- val: Immediate(llval),
- layout: bx.layout_of(ty),
- };
-
- },
- mir::Operand::Copy(_) |
- mir::Operand::Move(_) => {
- span_bug!(span, "shuffle indices must be constant");
- }
- mir::Operand::Constant(ref constant) => {
- let c = self.eval_mir_constant(&bx, constant);
- let (llval, ty) = self.simd_shuffle_indices(
- &bx,
- constant.span,
- constant.ty,
- c,
- );
- return OperandRef {
- val: Immediate(llval),
- layout: bx.layout_of(ty)
- };
- }
- }
- }
+ self.codegen_operand(&mut bx, arg)
+ }).collect();
- self.codegen_operand(&mut bx, arg)
- }).collect();
+ let callee_ty = instance.as_ref().unwrap().ty(bx.tcx());
+ bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
+ terminator.source_info.span);
- let callee_ty = instance.as_ref().unwrap().ty(bx.tcx());
- bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
- terminator.source_info.span);
+ if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+ self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval);
+ }
- if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
- self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval);
- }
+ if let Some((_, target)) = *destination {
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ bx.unreachable();
+ }
- if let Some((_, target)) = *destination {
- funclet_br(self, &mut bx, target);
- } else {
- bx.unreachable();
- }
+ return;
+ }
- return;
- }
+ // Split the rust-call tupled arguments off.
+ let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+ let (tup, args) = args.split_last().unwrap();
+ (args, Some(tup))
+ } else {
+ (&args[..], None)
+ };
- // Split the rust-call tupled arguments off.
- let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
- let (tup, args) = args.split_last().unwrap();
- (args, Some(tup))
- } else {
- (&args[..], None)
+ // Useful determining if the current argument is the "spoofed" `VaList`
+ let last_arg_idx = if sig.inputs().is_empty() {
+ None
+ } else {
+ Some(sig.inputs().len() - 1)
+ };
+ 'make_args: for (i, arg) in first_args.iter().enumerate() {
+ // If this is a C-variadic function the function signature contains
+ // an "spoofed" `VaList`. This argument is ignored, but we need to
+ // populate it with a dummy operand so that the users real arguments
+ // are not overwritten.
+ let i = if sig.variadic && last_arg_idx.map(|x| x == i).unwrap_or(false) {
+ let layout = match self.cx.tcx().lang_items().va_list() {
+ Some(did) => bx.cx().layout_of(bx.tcx().type_of(did)),
+ None => bug!("`va_list` language item required for C-variadics"),
};
-
- // Useful determining if the current argument is the "spoofed" `VaList`
- let last_arg_idx = if sig.inputs().is_empty() {
- None
- } else {
- Some(sig.inputs().len() - 1)
+ let op = OperandRef {
+ val: OperandValue::Immediate(
+ bx.cx().const_undef(bx.cx().immediate_backend_type(layout)
+ )),
+ layout: layout,
};
- 'make_args: for (i, arg) in first_args.iter().enumerate() {
- // If this is a C-variadic function the function signature contains
- // an "spoofed" `VaList`. This argument is ignored, but we need to
- // populate it with a dummy operand so that the users real arguments
- // are not overwritten.
- let i = if sig.variadic && last_arg_idx.map(|x| x == i).unwrap_or(false) {
- let layout = match tcx.lang_items().va_list() {
- Some(did) => bx.cx().layout_of(bx.tcx().type_of(did)),
- None => bug!("va_list language item required for C variadics"),
- };
- let op = OperandRef {
- val: OperandValue::Immediate(
- bx.cx().const_undef(bx.cx().immediate_backend_type(layout))
- ),
- layout: layout,
- };
- self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]);
- if i + 1 < fn_ty.args.len() {
- i + 1
- } else {
- break 'make_args
- }
- } else {
- i
- };
- let mut op = self.codegen_operand(&mut bx, arg);
-
- if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
- if let Pair(..) = op.val {
- // In the case of Rc<Self>, we need to explicitly pass a
- // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
- // that is understood elsewhere in the compiler as a method on
- // `dyn Trait`.
- // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
- // we get a value of a built-in pointer type
- 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_region_ptr()
- {
- 'iter_fields: for i in 0..op.layout.fields.count() {
- let field = op.extract_field(&mut bx, i);
- if !field.layout.is_zst() {
- // we found the one non-zero-sized field that is allowed
- // now find *its* non-zero-sized field, or stop if it's a
- // pointer
- op = field;
- continue 'descend_newtypes
- }
- }
-
- span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]);
+ if i + 1 < fn_ty.args.len() {
+ i + 1
+ } else {
+ break 'make_args
+ }
+ } else {
+ i
+ };
+ let mut op = self.codegen_operand(&mut bx, arg);
+
+ if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
+ if let Pair(..) = op.val {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ 'iter_fields: for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&mut bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes
}
+ }
- // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
- // data pointer and vtable. Look up the method in the vtable, and pass
- // the data pointer as the first argument
- match op.val {
- Pair(data_ptr, meta) => {
- llfn = Some(meth::VirtualIndex::from_index(idx)
- .get_fn(&mut bx, meta, &fn_ty));
- llargs.push(data_ptr);
- continue 'make_args
- }
- other => bug!("expected a Pair, got {:?}", other)
- }
- } else if let Ref(data_ptr, Some(meta), _) = op.val {
- // by-value dynamic dispatch
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ }
+
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ match op.val {
+ Pair(data_ptr, meta) => {
llfn = Some(meth::VirtualIndex::from_index(idx)
.get_fn(&mut bx, meta, &fn_ty));
llargs.push(data_ptr);
- continue;
- } else {
- span_bug!(span, "can't codegen a virtual call on {:?}", op);
- }
- }
-
- // The callee needs to own the argument memory if we pass it
- // by-ref, so make a local copy of non-immediate constants.
- match (arg, op.val) {
- (&mir::Operand::Copy(_), Ref(_, None, _)) |
- (&mir::Operand::Constant(_), Ref(_, None, _)) => {
- let tmp = PlaceRef::alloca(&mut bx, op.layout, "const");
- op.val.store(&mut bx, tmp);
- op.val = Ref(tmp.llval, None, tmp.align);
+ continue 'make_args
}
- _ => {}
+ other => bug!("expected a Pair, got {:?}", other),
}
-
- self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]);
+ } else if let Ref(data_ptr, Some(meta), _) = op.val {
+ // by-value dynamic dispatch
+ llfn = Some(meth::VirtualIndex::from_index(idx)
+ .get_fn(&mut bx, meta, &fn_ty));
+ llargs.push(data_ptr);
+ continue;
+ } else {
+ span_bug!(span, "can't codegen a virtual call on {:?}", op);
}
- if let Some(tup) = untuple {
- self.codegen_arguments_untupled(&mut bx, tup, &mut llargs,
- &fn_ty.args[first_args.len()..])
+ }
+
+ // The callee needs to own the argument memory if we pass it
+ // by-ref, so make a local copy of non-immediate constants.
+ match (arg, op.val) {
+ (&mir::Operand::Copy(_), Ref(_, None, _)) |
+ (&mir::Operand::Constant(_), Ref(_, None, _)) => {
+ let tmp = PlaceRef::alloca(&mut bx, op.layout, "const");
+ op.val.store(&mut bx, tmp);
+ op.val = Ref(tmp.llval, None, tmp.align);
}
+ _ => {}
+ }
- let fn_ptr = match (llfn, instance) {
- (Some(llfn), _) => llfn,
- (None, Some(instance)) => bx.get_fn(instance),
- _ => span_bug!(span, "no llfn for call"),
- };
+ self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]);
+ }
+ if let Some(tup) = untuple {
+ self.codegen_arguments_untupled(&mut bx, tup, &mut llargs,
+ &fn_ty.args[first_args.len()..])
+ }
+
+ let fn_ptr = match (llfn, instance) {
+ (Some(llfn), _) => llfn,
+ (None, Some(instance)) => bx.get_fn(instance),
+ _ => span_bug!(span, "no llfn for call"),
+ };
+
+ helper.do_call(self, &mut bx, fn_ty, fn_ptr, &llargs,
+ destination.as_ref().map(|&(_, target)| (ret_dest, target)),
+ cleanup);
+ }
+}
+
+impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_block(
+ &mut self,
+ bb: mir::BasicBlock,
+ ) {
+ let mut bx = self.build_block(bb);
+ let data = &self.mir[bb];
+
+ debug!("codegen_block({:?}={:?})", bb, data);
+
+ for statement in &data.statements {
+ bx = self.codegen_statement(bx, statement);
+ }
+
+ self.codegen_terminator(bx, bb, data.terminator());
+ }
- do_call(self, &mut bx, fn_ty, fn_ptr, &llargs,
- destination.as_ref().map(|&(_, target)| (ret_dest, target)),
- cleanup);
+ fn codegen_terminator(
+ &mut self,
+ mut bx: Bx,
+ bb: mir::BasicBlock,
+ terminator: &mir::Terminator<'tcx>
+ ) {
+ debug!("codegen_terminator: {:?}", terminator);
+
+ // Create the cleanup bundle, if needed.
+ let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
+ let helper = TerminatorCodegenHelper {
+ bb: &bb, terminator, funclet_bb
+ };
+
+ self.set_debug_loc(&mut bx, terminator.source_info);
+ match terminator.kind {
+ mir::TerminatorKind::Resume => {
+ self.codegen_resume_terminator(helper, bx)
+ }
+
+ mir::TerminatorKind::Abort => {
+ bx.abort();
+ bx.unreachable();
+ }
+
+ mir::TerminatorKind::Goto { target } => {
+ helper.funclet_br(self, &mut bx, target);
+ }
+
+ mir::TerminatorKind::SwitchInt {
+ ref discr, switch_ty, ref values, ref targets
+ } => {
+ self.codegen_switchint_terminator(helper, bx, discr, switch_ty,
+ values, targets);
+ }
+
+ mir::TerminatorKind::Return => {
+ self.codegen_return_terminator(bx);
+ }
+
+ mir::TerminatorKind::Unreachable => {
+ bx.unreachable();
+ }
+
+ mir::TerminatorKind::Drop { ref location, target, unwind } => {
+ self.codegen_drop_terminator(helper, bx, location, target, unwind);
+ }
+
+ mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
+ self.codegen_assert_terminator(helper, bx, terminator, cond,
+ expected, msg, target, cleanup);
+ }
+
+ mir::TerminatorKind::DropAndReplace { .. } => {
+ bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
+ }
+
+ mir::TerminatorKind::Call {
+ ref func,
+ ref args,
+ ref destination,
+ cleanup,
+ from_hir_call: _
+ } => {
+ self.codegen_call_terminator(helper, bx, terminator, func,
+ args, destination, cleanup);
}
mir::TerminatorKind::GeneratorDrop |
mir::TerminatorKind::Yield { .. } => bug!("generator ops in codegen"),