rustc_trans: remove the bulk of old trans and most of its support code.

This commit is contained in:
Eduard Burtescu 2016-08-16 17:41:38 +03:00
parent cb9b0ed91b
commit d0654ae5e5
29 changed files with 462 additions and 10729 deletions

File diff suppressed because it is too large Load Diff

View File

@ -53,14 +53,9 @@ use rustc::ty::{self, Ty, TyCtxt};
use syntax::ast;
use syntax::attr;
use syntax::attr::IntType;
use _match;
use abi::FAT_PTR_ADDR;
use base::InitAlloca;
use build::*;
use cleanup;
use cleanup::CleanupMethods;
use common::*;
use datum;
use debuginfo::DebugLoc;
use glue;
use machine;
@ -69,6 +64,12 @@ use type_::Type;
use type_of;
use value::Value;
#[derive(Copy, Clone, PartialEq)]
pub enum BranchKind {
Switch,
Single
}
type Hint = attr::ReprAttr;
// Representation of the context surrounding an unsized type. I want
@ -178,14 +179,6 @@ impl MaybeSizedValue {
}
}
/// Convenience for `represent_type`. There should probably be more or
/// these, for places in trans where the `Ty` isn't directly
/// available.
pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
node: ast::NodeId) -> Rc<Repr<'tcx>> {
represent_type(bcx.ccx(), node_id_type(bcx, node))
}
/// Decides how to represent a given type.
pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>)
@ -201,38 +194,8 @@ pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
repr
}
const fn repeat_u8_as_u32(val: u8) -> u32 {
(val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32
}
const fn repeat_u8_as_u64(val: u8) -> u64 {
(repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64
}
/// `DTOR_NEEDED_HINT` is a stack-local hint that just means
/// "we do not know whether the destructor has run or not; check the
/// drop-flag embedded in the value itself."
pub const DTOR_NEEDED_HINT: u8 = 0x3d;
/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
/// definitely been moved; you do not need to run its destructor."
///
/// (However, for now, such values may still end up being explicitly
/// zeroed by the generated code; this is the distinction between
/// `datum::DropFlagInfo::ZeroAndMaintain` versus
/// `datum::DropFlagInfo::DontZeroJustUse`.)
pub const DTOR_MOVED_HINT: u8 = 0x2d;
pub const DTOR_NEEDED: u8 = 0xd4;
#[allow(dead_code)]
pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED);
pub const DTOR_DONE: u8 = 0x1d;
#[allow(dead_code)]
pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE);
fn dtor_to_init_u8(dtor: bool) -> u8 {
if dtor { DTOR_NEEDED } else { 0 }
if dtor { 1 } else { 0 }
}
pub trait GetDtorType<'tcx> { fn dtor_type(self) -> Ty<'tcx>; }
@ -240,10 +203,6 @@ impl<'a, 'tcx> GetDtorType<'tcx> for TyCtxt<'a, 'tcx, 'tcx> {
fn dtor_type(self) -> Ty<'tcx> { self.types.u8 }
}
fn dtor_active(flag: u8) -> bool {
flag != 0
}
fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Repr<'tcx> {
match t.sty {
@ -873,22 +832,19 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>,
/// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant.
///
/// This should ideally be less tightly tied to `_match`.
pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
r: &Repr<'tcx>,
scrutinee: ValueRef,
range_assert: bool)
-> (_match::BranchKind, Option<ValueRef>) {
-> (BranchKind, Option<ValueRef>) {
match *r {
CEnum(..) | General(..) |
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
(_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None,
range_assert)))
(BranchKind::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, range_assert)))
}
Univariant(..) => {
// N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
(_match::Single, None)
(BranchKind::Single, None)
}
}
}
@ -1001,21 +957,12 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
val);
}
General(ity, ref cases, dtor) => {
if dtor_active(dtor) {
let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr,
cases[discr.0 as usize].fields.len() - 2);
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr);
}
General(ity, _, _) => {
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
StructGEP(bcx, val, 0));
}
Univariant(ref st, dtor) => {
Univariant(_, _) => {
assert_eq!(discr, Disr(0));
if dtor_active(dtor) {
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED),
StructGEP(bcx, val, st.fields.len() - 1));
}
}
RawNullablePointer { nndiscr, nnty, ..} => {
if discr != nndiscr {
@ -1046,28 +993,6 @@ fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) {
}
}
/// The number of fields in a given case; for use when obtaining this
/// information from the type or definition is less convenient.
pub fn num_args(r: &Repr, discr: Disr) -> usize {
match *r {
CEnum(..) => 0,
Univariant(ref st, dtor) => {
assert_eq!(discr, Disr(0));
st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
}
General(_, ref cases, dtor) => {
cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
}
RawNullablePointer { nndiscr, ref nullfields, .. } => {
if discr == nndiscr { 1 } else { nullfields.len() }
}
StructWrappedNullablePointer { ref nonnull, nndiscr,
ref nullfields, .. } => {
if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() }
}
}
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
@ -1218,108 +1143,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
bcx.pointercast(byte_ptr, ll_fty.ptr_to())
}
pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
r: &Repr<'tcx>,
value: ValueRef,
mut f: F)
-> Block<'blk, 'tcx> where
F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>,
{
let fcx = bcx.fcx;
match *r {
Univariant(ref st, _) => {
f(bcx, st, value)
}
General(ity, ref cases, _) => {
let ccx = bcx.ccx();
// See the comments in trans/base.rs for more information (inside
// iter_structural_ty), but the gist here is that if the enum's
// discriminant is *not* in the range that we're expecting (in which
// case we'll take the fall-through branch on the switch
// instruction) then we can't just optimize this to an Unreachable
// block.
//
// Currently we still have filling drop, so this means that the drop
// glue for enums may be called when the enum has been paved over
// with the "I've been dropped" value. In this case the default
// branch of the switch instruction will actually be taken at
// runtime, so the basic block isn't actually unreachable, so we
// need to make it do something with defined behavior. In this case
// we just return early from the function.
//
// Note that this is also why the `trans_get_discr` below has
// `false` to indicate that loading the discriminant should
// not have a range assert.
let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void");
RetVoid(ret_void_cx, DebugLoc::None);
let discr_val = trans_get_discr(bcx, r, value, None, false);
let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len());
let bcx_next = fcx.new_temp_block("enum-variant-iter-next");
for (discr, case) in cases.iter().enumerate() {
let mut variant_cx = fcx.new_temp_block(
&format!("enum-variant-iter-{}", &discr.to_string())
);
let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true);
AddCase(llswitch, rhs_val, variant_cx.llbb);
let fields = case.fields.iter().map(|&ty|
type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>();
let real_ty = Type::struct_(ccx, &fields[..], case.packed);
let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
variant_cx = f(variant_cx, case, variant_value);
Br(variant_cx, bcx_next.llbb, DebugLoc::None);
}
bcx_next
}
_ => bug!()
}
}
/// Access the struct drop flag, if present.
pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
r: &Repr<'tcx>,
val: ValueRef)
-> datum::DatumBlock<'blk, 'tcx, datum::Expr>
{
let tcx = bcx.tcx();
let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type());
match *r {
Univariant(ref st, dtor) if dtor_active(dtor) => {
let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1);
datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
}
General(_, _, dtor) if dtor_active(dtor) => {
let fcx = bcx.fcx;
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
bcx, tcx.dtor_type(), "drop_flag",
InitAlloca::Uninit("drop flag itself has no dtor"),
cleanup::CustomScope(custom_cleanup_scope), |bcx, _| {
debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}",
tcx.dtor_type());
bcx
}
));
bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
let ptr = struct_field_ptr(&variant_cx.build(), st,
MaybeSizedValue::sized(value),
(st.fields.len() - 1), false);
datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
.store_to(variant_cx, scratch.val)
});
let expr_datum = scratch.to_expr_datum();
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
datum::DatumBlock::new(bcx, expr_datum)
}
_ => bug!("tried to get drop flag of non-droppable type")
}
}
/// Construct a constant value, suitable for initializing a
/// GlobalVariable, given a case and constant values for its fields.
/// Note that this may have a different LLVM type (and different
@ -1458,28 +1281,6 @@ fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
#[inline]
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
/// Get the discriminant of a constant value.
pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr {
match *r {
CEnum(ity, _, _) => {
match ity {
attr::SignedInt(..) => Disr(const_to_int(val) as u64),
attr::UnsignedInt(..) => Disr(const_to_uint(val)),
}
}
General(ity, _, _) => {
match ity {
attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64),
attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0])))
}
}
Univariant(..) => Disr(0),
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
bug!("const discrim access of non c-like enum")
}
}
}
/// Extract a field of a constant value, as appropriate for its
/// representation.
///

View File

@ -14,28 +14,29 @@ use llvm::{self, ValueRef};
use base;
use build::*;
use common::*;
use datum::{Datum, Lvalue};
use type_of;
use type_::Type;
use rustc::hir as ast;
use rustc::hir;
use rustc::ty::Ty;
use std::ffi::CString;
use syntax::ast::AsmDialect;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ia: &ast::InlineAsm,
outputs: Vec<Datum<'tcx, Lvalue>>,
ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef>) {
let mut ext_constraints = vec![];
let mut output_types = vec![];
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() {
for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
let val = if out.is_rw || out.is_indirect {
Some(base::load_ty(bcx, out_datum.val, out_datum.ty))
Some(base::load_ty(bcx, val, ty))
} else {
None
};
@ -46,7 +47,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if out.is_indirect {
indirect_outputs.push(val.unwrap());
} else {
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
output_types.push(type_of::type_of(bcx.ccx(), ty));
}
}
if !indirect_outputs.is_empty() {
@ -100,9 +101,9 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, datum)) in outputs.enumerate() {
for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
Store(bcx, v, datum.val);
Store(bcx, v, val);
}
// Store expn_id in a metadata node so we can map LLVM errors

File diff suppressed because it is too large Load Diff

View File

@ -15,39 +15,29 @@
//! closure.
pub use self::CalleeData::*;
pub use self::CallArgs::*;
use arena::TypedArena;
use back::symbol_names;
use llvm::{self, ValueRef, get_params};
use llvm::{ValueRef, get_params};
use middle::cstore::LOCAL_CRATE;
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::traits;
use rustc::hir::map as hir_map;
use abi::{Abi, FnType};
use adt;
use attributes;
use base;
use base::*;
use build::*;
use cleanup;
use cleanup::CleanupMethods;
use closure;
use common::{self, Block, Result, CrateContext, FunctionContext, C_undef};
use common::{self, Block, Result, CrateContext, FunctionContext};
use consts;
use datum::*;
use debuginfo::DebugLoc;
use declare;
use expr;
use glue;
use inline;
use intrinsic;
use machine::llalign_of_min;
use meth;
use monomorphize::{self, Instance};
use trans_item::TransItem;
use type_::Type;
use type_of;
use value::Value;
use Disr;
@ -56,7 +46,6 @@ use rustc::hir;
use syntax_pos::DUMMY_SP;
use errors;
use syntax::ptr::P;
#[derive(Debug)]
pub enum CalleeData {
@ -80,10 +69,10 @@ pub struct Callee<'tcx> {
impl<'tcx> Callee<'tcx> {
/// Function pointer.
pub fn ptr(datum: Datum<'tcx, Rvalue>) -> Callee<'tcx> {
pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> {
Callee {
data: Fn(datum.val),
ty: datum.ty
data: Fn(llfn),
ty: ty
}
}
@ -132,7 +121,10 @@ impl<'tcx> Callee<'tcx> {
abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic
} => Intrinsic,
_ => return Callee::ptr(get_fn(ccx, def_id, substs))
_ => {
let (llfn, ty) = get_fn(ccx, def_id, substs);
return Callee::ptr(llfn, ty);
}
};
Callee {
@ -163,7 +155,8 @@ impl<'tcx> Callee<'tcx> {
// That is because default methods have the same ID as the
// trait method used to look up the impl method that ended
// up here, so calling Callee::def would infinitely recurse.
Callee::ptr(get_fn(ccx, mth.method.def_id, mth.substs))
let (llfn, ty) = get_fn(ccx, mth.method.def_id, mth.substs);
Callee::ptr(llfn, ty)
}
traits::VtableClosure(vtable_closure) => {
// The substitutions should have no type parameters remaining
@ -180,7 +173,7 @@ impl<'tcx> Callee<'tcx> {
_ => bug!("expected fn item type, found {}",
method_ty)
};
Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
Callee::ptr(llfn, fn_ptr_ty)
}
traits::VtableFnPointer(vtable_fn_pointer) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
@ -192,7 +185,7 @@ impl<'tcx> Callee<'tcx> {
_ => bug!("expected fn item type, found {}",
method_ty)
};
Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
Callee::ptr(llfn, fn_ptr_ty)
}
traits::VtableObject(ref data) => {
Callee {
@ -236,30 +229,22 @@ impl<'tcx> Callee<'tcx> {
/// function.
pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
args: CallArgs<'a, 'tcx>,
dest: Option<expr::Dest>)
args: &[ValueRef],
dest: Option<ValueRef>)
-> Result<'blk, 'tcx> {
trans_call_inner(bcx, debug_loc, self, args, dest)
}
/// Turn the callee into a function pointer.
pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>)
-> Datum<'tcx, Rvalue> {
let fn_ptr_ty = match self.ty.sty {
ty::TyFnDef(_, _, f) => ccx.tcx().mk_fn_ptr(f),
_ => self.ty
};
pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
match self.data {
Fn(llfn) => {
immediate_rvalue(llfn, fn_ptr_ty)
}
Fn(llfn) => llfn,
Virtual(idx) => {
let llfn = meth::trans_object_shim(ccx, self.ty, idx);
immediate_rvalue(llfn, fn_ptr_ty)
meth::trans_object_shim(ccx, self.ty, idx)
}
NamedTupleConstructor(_) => match self.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
return get_fn(ccx, def_id, substs);
return get_fn(ccx, def_id, substs).0;
}
_ => bug!("expected fn item type, found {}", self.ty)
},
@ -310,7 +295,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
let llfnpointer = match bare_fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => {
// Function definitions have to be turned into a pointer.
let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
let llfn = Callee::def(ccx, def_id, substs).reify(ccx);
if !is_by_ref {
// A by-value fn item is ignored, so the shim has
// the same signature as the original function.
@ -380,7 +365,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
let mut bcx = fcx.init(false, None);
let mut bcx = fcx.init(false);
let llargs = get_params(fcx.llfn);
@ -394,17 +379,13 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
}
});
assert!(!fcx.needs_ret_allocas);
let dest = fcx.llretslotptr.get().map(|_|
expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
);
let dest = fcx.llretslotptr.get();
let callee = Callee {
data: Fn(llfnpointer),
ty: bare_fn_ty
};
bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx;
fcx.finish(bcx, DebugLoc::None);
@ -424,7 +405,7 @@ pub fn trans_fn_pointer_shim<'a, 'tcx>(
fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> Datum<'tcx, Rvalue> {
-> (ValueRef, Ty<'tcx>) {
let tcx = ccx.tcx();
debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs);
@ -475,7 +456,7 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
_ => bug!("expected fn item type, found {}", fn_ty)
};
assert_eq!(type_of::type_of(ccx, fn_ptr_ty), common::val_ty(val));
return immediate_rvalue(val, fn_ptr_ty);
return (val, fn_ptr_ty);
}
// Find the actual function pointer.
@ -490,7 +471,7 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let instance = Instance::mono(ccx.shared(), def_id);
if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
return immediate_rvalue(llfn, fn_ptr_ty);
return (llfn, fn_ptr_ty);
}
let local_id = ccx.tcx().map.as_local_node_id(def_id);
@ -573,17 +554,17 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ccx.instances().borrow_mut().insert(instance, llfn);
immediate_rvalue(llfn, fn_ptr_ty)
(llfn, fn_ptr_ty)
}
// ______________________________________________________________________
// Translating calls
fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
callee: Callee<'tcx>,
args: CallArgs<'a, 'tcx>,
dest: Option<expr::Dest>)
args: &[ValueRef],
opt_llretslot: Option<ValueRef>)
-> Result<'blk, 'tcx> {
// Introduce a temporary cleanup scope that will contain cleanups
// for the arguments while they are being evaluated. The purpose
@ -595,65 +576,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let abi = callee.ty.fn_abi();
let sig = callee.ty.fn_sig();
let output = bcx.tcx().erase_late_bound_regions(&sig.output());
let output = bcx.tcx().normalize_associated_type(&output);
let extra_args = match args {
ArgExprs(args) if abi != Abi::RustCall => {
args[sig.0.inputs.len()..].iter().map(|expr| {
common::expr_ty_adjusted(bcx, expr)
}).collect()
}
_ => vec![]
};
let fn_ty = callee.direct_fn_type(ccx, &extra_args);
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(ccx, &[]);
let mut callee = match callee.data {
Intrinsic => {
assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic);
assert!(dest.is_some());
return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty,
args, dest.unwrap(),
debug_loc);
}
NamedTupleConstructor(disr) => {
assert!(dest.is_some());
return base::trans_named_tuple_constructor(bcx,
callee.ty,
disr,
args,
dest.unwrap(),
debug_loc);
NamedTupleConstructor(_) | Intrinsic => {
bug!("{:?} calls should not go through Callee::call", callee);
}
f => f
};
// Generate a location to store the result. If the user does
// not care about the result, just make a stack slot.
let opt_llretslot = dest.and_then(|dest| match dest {
expr::SaveIn(dst) => Some(dst),
expr::Ignore => {
let needs_drop = || bcx.fcx.type_needs_drop(output);
if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() {
// Push the out-pointer if we use an out-pointer for this
// return type, otherwise push "undef".
if fn_ty.ret.is_ignore() {
Some(C_undef(fn_ty.ret.original_ty.ptr_to()))
} else {
let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret");
call_lifetime_start(bcx, llresult);
Some(llresult)
}
} else {
None
}
}
});
// If there no destination, return must be direct, with no cast.
if opt_llretslot.is_none() {
assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
@ -669,17 +601,24 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
llargs.push(llretslot);
}
let arg_cleanup_scope = fcx.push_custom_cleanup_scope();
bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs,
cleanup::CustomScope(arg_cleanup_scope));
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
match callee {
Virtual(idx) => {
llargs.push(args[0]);
let fn_ptr = meth::get_virtual_method(bcx, args[1], idx);
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
callee = Fn(PointerCast(bcx, fn_ptr, llty));
llargs.extend_from_slice(&args[2..]);
}
_ => llargs.extend_from_slice(args)
}
let llfn = match callee {
Fn(f) => f,
_ => bug!("expected fn pointer callee, found {:?}", callee)
};
let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
if !bcx.unreachable.get() {
fn_ty.apply_attrs_callsite(llret);
@ -695,283 +634,9 @@ fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
}
fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope);
// If the caller doesn't care about the result of this fn call,
// drop the temporary slot we made.
match (dest, opt_llretslot) {
(Some(expr::Ignore), Some(llretslot)) => {
// drop the value if it is not being saved.
bcx = glue::drop_ty(bcx, llretslot, output, debug_loc);
call_lifetime_end(bcx, llretslot);
}
_ => {}
}
// FIXME(canndrew): This is_never should really be an is_uninhabited
if output.is_never() {
if fn_ret.0.is_never() {
Unreachable(bcx);
}
Result::new(bcx, llret)
}
pub enum CallArgs<'a, 'tcx> {
/// Supply value of arguments as a list of expressions that must be
/// translated. This is used in the common case of `foo(bar, qux)`.
ArgExprs(&'a [P<hir::Expr>]),
/// Supply value of arguments as a list of LLVM value refs; frequently
/// used with lang items and so forth, when the argument is an internal
/// value.
ArgVals(&'a [ValueRef]),
/// For overloaded operators: `(lhs, Option(rhs))`.
/// `lhs` is the left-hand-side and `rhs` is the datum
/// of the right-hand-side argument (if any).
ArgOverloadedOp(Datum<'tcx, Expr>, Option<Datum<'tcx, Expr>>),
/// Supply value of arguments as a list of expressions that must be
/// translated, for overloaded call operators.
ArgOverloadedCall(Vec<&'a hir::Expr>),
}
fn trans_args_under_call_abi<'blk, 'tcx>(
mut bcx: Block<'blk, 'tcx>,
arg_exprs: &[P<hir::Expr>],
callee: &mut CalleeData,
fn_ty: &FnType,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx>
{
let mut arg_idx = 0;
// Translate the `self` argument first.
let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
bcx = trans_arg_datum(bcx,
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
// Now untuple the rest of the arguments.
let tuple_expr = &arg_exprs[1];
let tuple_type = common::node_id_type(bcx, tuple_expr.id);
match tuple_type.sty {
ty::TyTuple(ref field_types) => {
let tuple_datum = unpack_datum!(bcx,
expr::trans(bcx, &tuple_expr));
let tuple_lvalue_datum =
unpack_datum!(bcx,
tuple_datum.to_lvalue_datum(bcx,
"args",
tuple_expr.id));
let repr = adt::represent_type(bcx.ccx(), tuple_type);
let repr_ptr = &repr;
for (i, field_type) in field_types.iter().enumerate() {
let arg_datum = tuple_lvalue_datum.get_element(
bcx,
field_type,
|srcval| {
adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i)
}).to_expr_datum();
bcx = trans_arg_datum(bcx,
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
}
}
_ => {
span_bug!(tuple_expr.span,
"argument to `.call()` wasn't a tuple?!")
}
};
bcx
}
pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
abi: Abi,
fn_ty: &FnType,
callee: &mut CalleeData,
args: CallArgs<'a, 'tcx>,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx> {
debug!("trans_args(abi={})", abi);
let _icx = push_ctxt("trans_args");
let mut bcx = bcx;
let mut arg_idx = 0;
// First we figure out the caller's view of the types of the arguments.
// This will be needed if this is a generic call, because the callee has
// to cast her view of the arguments to the caller's view.
match args {
ArgExprs(arg_exprs) => {
if abi == Abi::RustCall {
// This is only used for direct calls to the `call`,
// `call_mut` or `call_once` functions.
return trans_args_under_call_abi(bcx,
arg_exprs, callee, fn_ty,
llargs,
arg_cleanup_scope)
}
for arg_expr in arg_exprs {
let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr));
bcx = trans_arg_datum(bcx,
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
}
}
ArgOverloadedCall(arg_exprs) => {
for expr in arg_exprs {
let arg_datum =
unpack_datum!(bcx, expr::trans(bcx, expr));
bcx = trans_arg_datum(bcx,
arg_datum,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
}
}
ArgOverloadedOp(lhs, rhs) => {
bcx = trans_arg_datum(bcx, lhs,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
if let Some(rhs) = rhs {
bcx = trans_arg_datum(bcx, rhs,
callee, fn_ty, &mut arg_idx,
arg_cleanup_scope,
llargs);
}
}
ArgVals(vs) => {
match *callee {
Virtual(idx) => {
llargs.push(vs[0]);
let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx);
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(PointerCast(bcx, fn_ptr, llty));
llargs.extend_from_slice(&vs[2..]);
}
_ => llargs.extend_from_slice(vs)
}
}
}
bcx
}
fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
arg_datum: Datum<'tcx, Expr>,
callee: &mut CalleeData,
fn_ty: &FnType,
next_idx: &mut usize,
arg_cleanup_scope: cleanup::ScopeId,
llargs: &mut Vec<ValueRef>)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_arg_datum");
let mut bcx = bcx;
debug!("trans_arg_datum({:?})", arg_datum);
let arg = &fn_ty.args[*next_idx];
*next_idx += 1;
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty));
}
// Determine whether we want a by-ref datum even if not appropriate.
let want_by_ref = arg.is_indirect() || arg.cast.is_some();
let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty);
let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) {
(true, arg_datum.val)
} else {
// Make this an rvalue, since we are going to be
// passing ownership.
let arg_datum = unpack_datum!(
bcx, arg_datum.to_rvalue_datum(bcx, "arg"));
// Now that arg_datum is owned, get it into the appropriate
// mode (ref vs value).
let arg_datum = unpack_datum!(bcx, if want_by_ref {
arg_datum.to_ref_datum(bcx)
} else {
arg_datum.to_appropriate_datum(bcx)
});
// Technically, ownership of val passes to the callee.
// However, we must cleanup should we panic before the
// callee is actually invoked.
(arg_datum.kind.is_by_ref(),
arg_datum.add_clean(bcx.fcx, arg_cleanup_scope))
};
if arg.is_ignore() {
return bcx;
}
debug!("--- trans_arg_datum passing {:?}", Value(val));
if fat_ptr {
// Fat pointers should be passed without any transformations.
assert!(!arg.is_indirect() && arg.cast.is_none());
llargs.push(Load(bcx, expr::get_dataptr(bcx, val)));
let info_arg = &fn_ty.args[*next_idx];
*next_idx += 1;
assert!(!info_arg.is_indirect() && info_arg.cast.is_none());
let info = Load(bcx, expr::get_meta(bcx, val));
if let Virtual(idx) = *callee {
// We have to grab the fn pointer from the vtable when
// handling the first argument, ensure that here.
assert_eq!(*next_idx, 2);
assert!(info_arg.is_ignore());
let fn_ptr = meth::get_virtual_method(bcx, info, idx);
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(PointerCast(bcx, fn_ptr, llty));
} else {
assert!(!info_arg.is_ignore());
llargs.push(info);
}
return bcx;
}
let mut val = val;
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if arg.original_ty == Type::i1(bcx.ccx()) {
// We store bools as i8 so we need to truncate to i1.
val = LoadRangeAssert(bcx, val, 0, 2, llvm::False);
val = Trunc(bcx, val, arg.original_ty);
} else if let Some(ty) = arg.cast {
val = Load(bcx, PointerCast(bcx, val, ty.ptr_to()));
if !bcx.unreachable.get() {
let llalign = llalign_of_min(bcx.ccx(), arg.ty);
unsafe {
llvm::LLVMSetAlignment(val, llalign);
}
}
} else {
val = Load(bcx, val);
}
}
llargs.push(val);
bcx
}

View File

@ -114,37 +114,22 @@
//! code for `expr` itself is responsible for freeing any other byproducts
//! that may be in play.
pub use self::ScopeId::*;
pub use self::CleanupScopeKind::*;
pub use self::EarlyExitLabel::*;
pub use self::Heap::*;
use llvm::{BasicBlockRef, ValueRef};
use base;
use build;
use common;
use common::{Block, FunctionContext, NodeIdAndSpan, LandingPad};
use datum::{Datum, Lvalue};
use debuginfo::{DebugLoc, ToDebugLoc};
use common::{Block, FunctionContext, LandingPad};
use debuginfo::{DebugLoc};
use glue;
use middle::region;
use type_::Type;
use value::Value;
use rustc::ty::{Ty, TyCtxt};
use std::fmt;
use syntax::ast;
pub struct CleanupScope<'blk, 'tcx: 'blk> {
// The id of this cleanup scope. If the id is None,
// this is a *temporary scope* that is pushed during trans to
// cleanup miscellaneous garbage that trans may generate whose
// lifetime is a subset of some expression. See module doc for
// more details.
kind: CleanupScopeKind<'blk, 'tcx>,
use rustc::ty::Ty;
pub struct CleanupScope<'tcx> {
// Cleanups to run upon scope exit.
cleanups: Vec<CleanupObj<'tcx>>,
cleanups: Vec<DropValue<'tcx>>,
// The debug location any drop calls generated for this scope will be
// associated with.
@ -159,37 +144,9 @@ pub struct CustomScopeIndex {
index: usize
}
pub const EXIT_BREAK: usize = 0;
pub const EXIT_LOOP: usize = 1;
pub const EXIT_MAX: usize = 2;
pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
CustomScopeKind,
AstScopeKind(ast::NodeId),
LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
}
impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CustomScopeKind => write!(f, "CustomScopeKind"),
AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
LoopScopeKind(nid, ref blks) => {
write!(f, "LoopScopeKind({}, [", nid)?;
for blk in blks {
write!(f, "{:p}, ", blk)?;
}
write!(f, "])")
}
}
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum EarlyExitLabel {
UnwindExit(UnwindKind),
ReturnExit,
LoopExit(ast::NodeId, usize)
}
#[derive(Copy, Clone, Debug)]
@ -205,97 +162,8 @@ pub struct CachedEarlyExit {
last_cleanup: usize,
}
pub trait Cleanup<'tcx> {
fn must_unwind(&self) -> bool;
fn is_lifetime_end(&self) -> bool;
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx>;
}
pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
#[derive(Copy, Clone, Debug)]
pub enum ScopeId {
AstScope(ast::NodeId),
CustomScope(CustomScopeIndex)
}
#[derive(Copy, Clone, Debug)]
pub struct DropHint<K>(pub ast::NodeId, pub K);
pub type DropHintDatum<'tcx> = DropHint<Datum<'tcx, Lvalue>>;
pub type DropHintValue = DropHint<ValueRef>;
impl<K> DropHint<K> {
pub fn new(id: ast::NodeId, k: K) -> DropHint<K> { DropHint(id, k) }
}
impl DropHint<ValueRef> {
pub fn value(&self) -> ValueRef { self.1 }
}
pub trait DropHintMethods {
type ValueKind;
fn to_value(&self) -> Self::ValueKind;
}
impl<'tcx> DropHintMethods for DropHintDatum<'tcx> {
type ValueKind = DropHintValue;
fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) }
}
impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Invoked when we start to trans the code contained within a new cleanup scope.
fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
debug!("push_ast_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(debug_loc.id));
// FIXME(#2202) -- currently closure bodies have a parent
// region, which messes up the assertion below, since there
// are no cleanup scopes on the stack at the start of
// trans'ing a closure body. I think though that this should
// eventually be fixed by closure bodies not having a parent
// region, though that's a touch unclear, and it might also be
// better just to narrow this assertion more (i.e., by
// excluding id's that correspond to closure bodies only). For
// now we just say that if there is already an AST scope on the stack,
// this new AST scope had better be its immediate child.
let top_scope = self.top_ast_scope();
let region_maps = &self.ccx.tcx().region_maps;
if top_scope.is_some() {
assert!((region_maps
.opt_encl_scope(region_maps.node_extent(debug_loc.id))
.map(|s|s.node_id(region_maps)) == top_scope)
||
(region_maps
.opt_encl_scope(region_maps.lookup_code_extent(
region::CodeExtentData::DestructionScope(debug_loc.id)))
.map(|s|s.node_id(region_maps)) == top_scope));
}
self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
debug_loc.debug_loc()));
}
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
debug!("push_loop_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(id));
assert_eq!(Some(id), self.top_ast_scope());
// Just copy the debuginfo source location from the enclosing scope
let debug_loc = self.scopes
.borrow()
.last()
.unwrap()
.debug_loc;
self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
}
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
@ -306,53 +174,14 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
.map(|opt_scope| opt_scope.debug_loc)
.unwrap_or(DebugLoc::None);
self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
self.push_scope(CleanupScope::new(debug_loc));
CustomScopeIndex { index: index }
}
fn push_custom_cleanup_scope_with_debug_loc(&self,
debug_loc: NodeIdAndSpan)
-> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
self.push_scope(CleanupScope::new(CustomScopeKind,
debug_loc.debug_loc()));
CustomScopeIndex { index: index }
}
/// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
/// stack, and generates the code to do its cleanups for normal exit.
fn pop_and_trans_ast_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
-> Block<'blk, 'tcx> {
debug!("pop_and_trans_ast_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
let scope = self.pop_scope();
self.trans_scope_cleanups(bcx, &scope)
}
/// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
/// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
/// branching to a block generated by `normal_exit_block`.
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId) {
debug!("pop_loop_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
let _ = self.pop_scope();
}
/// Removes the top cleanup scope from the stack without executing its cleanups. The top
/// cleanup scope must be the temporary scope `custom_scope`.
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex) {
pub fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex) {
debug!("pop_custom_cleanup_scope({})", custom_scope.index);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
let _ = self.pop_scope();
@ -360,10 +189,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and
/// generates the code to do its cleanups for normal exit.
fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx> {
pub fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx> {
debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
@ -371,100 +200,27 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
self.trans_scope_cleanups(bcx, &scope)
}
/// Returns the id of the top-most loop scope
fn top_loop_scope(&self) -> ast::NodeId {
for scope in self.scopes.borrow().iter().rev() {
if let LoopScopeKind(id, _) = scope.kind {
return id;
}
}
bug!("no loop scope found");
}
/// Returns a block to branch to which will perform all pending cleanups and
/// then break/continue (depending on `exit`) out of the loop with id
/// `cleanup_scope`
fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: usize) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
}
/// Returns a block to branch to which will perform all pending cleanups and
/// then return from this function
fn return_exit_block(&'blk self) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(ReturnExit)
}
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef) {
let drop = box LifetimeEnd {
ptr: val,
};
debug!("schedule_lifetime_end({:?}, val={:?})",
cleanup_scope, Value(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of
/// `ty`
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>,
drop_hint: Option<DropHintDatum<'tcx>>) {
pub fn schedule_drop_mem(&self,
cleanup_scope: CustomScopeIndex,
val: ValueRef,
ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
let drop_hint = drop_hint.map(|hint|hint.to_value());
let drop = box DropValue {
let drop = DropValue {
is_immediate: false,
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: false,
drop_hint: drop_hint,
};
debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
drop.fill_on_drop,
drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>,
drop_hint: Option<DropHintDatum<'tcx>>) {
if !self.type_needs_drop(ty) { return; }
let drop_hint = drop_hint.map(|datum|datum.to_value());
let drop = box DropValue {
is_immediate: false,
val: val,
ty: ty,
fill_on_drop: true,
skip_dtor: false,
drop_hint: drop_hint,
};
debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?},
fill_on_drop={}, skip_dtor={}, has_drop_hint={})",
cleanup_scope,
Value(val),
ty,
drop.fill_on_drop,
drop.skip_dtor,
drop_hint.is_some());
self.schedule_clean(cleanup_scope, drop as CleanupObj);
self.schedule_clean(cleanup_scope, drop);
}
/// Issue #23611: Schedules a (deep) drop of the contents of
@ -472,110 +228,55 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
fn schedule_drop_adt_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
pub fn schedule_drop_adt_contents(&self,
cleanup_scope: CustomScopeIndex,
val: ValueRef,
ty: Ty<'tcx>) {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !self.type_needs_drop(ty) { return; }
let drop = box DropValue {
let drop = DropValue {
is_immediate: false,
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: true,
drop_hint: None,
};
debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
drop.fill_on_drop,
drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
self.schedule_clean(cleanup_scope, drop);
}
/// Schedules a (deep) drop of `val`, which is an instance of `ty`
fn schedule_drop_immediate(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
pub fn schedule_drop_immediate(&self,
cleanup_scope: CustomScopeIndex,
val: ValueRef,
ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
let drop = Box::new(DropValue {
let drop = DropValue {
is_immediate: true,
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: false,
drop_hint: None,
});
};
debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
drop.fill_on_drop,
drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a call to `free(val)`. Note that this is a shallow operation.
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>) {
let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
debug!("schedule_free_value({:?}, val={:?}, heap={:?})",
cleanup_scope, Value(val), heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>) {
match cleanup_scope {
AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
}
}
/// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
/// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
/// scope.
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: CleanupObj<'tcx>) {
debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
cleanup_scope);
for scope in self.scopes.borrow_mut().iter_mut().rev() {
if scope.kind.is_ast_with_id(cleanup_scope) {
scope.cleanups.push(cleanup);
scope.cached_landing_pad = None;
return;
} else {
// will be adding a cleanup to some enclosing scope
scope.clear_cached_exits();
}
}
bug!("no cleanup scope {} found",
self.ccx.tcx().map.node_to_string(cleanup_scope));
self.schedule_clean(cleanup_scope, drop);
}
/// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: CleanupObj<'tcx>) {
fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) {
debug!("schedule_clean_in_custom_scope(custom_scope={})",
custom_scope.index);
@ -588,14 +289,14 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
}
/// Returns true if there are pending cleanups that should execute on panic.
fn needs_invoke(&self) -> bool {
pub fn needs_invoke(&self) -> bool {
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
/// Returns a basic block to branch to in the event of a panic. This block
/// will run the panic cleanups and eventually resume the exception that
/// caused the landing pad to be run.
fn get_landing_pad(&'blk self) -> BasicBlockRef {
pub fn get_landing_pad(&'blk self) -> BasicBlockRef {
let _icx = base::push_ctxt("get_landing_pad");
debug!("get_landing_pad");
@ -625,25 +326,6 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
return llbb;
}
}
impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Returns the id of the current top-most AST scope, if any.
fn top_ast_scope(&self) -> Option<ast::NodeId> {
for scope in self.scopes.borrow().iter().rev() {
match scope.kind {
CustomScopeKind | LoopScopeKind(..) => {}
AstScopeKind(i) => {
return Some(i);
}
}
}
None
}
fn top_nonempty_cleanup_scope(&self) -> Option<usize> {
self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
}
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
self.is_valid_custom_scope(custom_scope) &&
@ -652,14 +334,13 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
let scopes = self.scopes.borrow();
custom_scope.index < scopes.len() &&
(*scopes)[custom_scope.index].kind.is_temp()
custom_scope.index < scopes.len()
}
/// Generates the cleanups for `scope` into `bcx`
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
bcx: Block<'blk, 'tcx>,
scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> {
let mut bcx = bcx;
if !bcx.unreachable.get() {
@ -674,11 +355,11 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
self.scopes.borrow().len()
}
fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
fn push_scope(&self, scope: CleanupScope<'tcx>) {
self.scopes.borrow_mut().push(scope)
}
fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
fn pop_scope(&self) -> CleanupScope<'tcx> {
debug!("popping cleanup scope {}, {} scopes remaining",
self.top_scope(|s| s.block_name("")),
self.scopes_len() - 1);
@ -686,7 +367,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
self.scopes.borrow_mut().pop().unwrap()
}
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R {
f(self.scopes.borrow().last().unwrap())
}
@ -738,7 +419,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
UnwindExit(val) => {
// Generate a block that will resume unwinding to the
// calling function
let bcx = self.new_block("resume", None);
let bcx = self.new_block("resume");
match val {
UnwindKind::LandingPad => {
let addr = self.landingpad_alloca.get()
@ -755,15 +436,6 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
prev_llbb = bcx.llbb;
break;
}
ReturnExit => {
prev_llbb = self.get_llreturn();
break
}
LoopExit(id, _) => {
bug!("cannot exit from scope {}, not in scope", id);
}
}
}
@ -782,20 +454,6 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
skip = last_cleanup;
break;
}
// If we are searching for a loop exit,
// and this scope is that loop, then stop popping and set
// `prev_llbb` to the appropriate exit block from the loop.
let scope = popped_scopes.last().unwrap();
match label {
UnwindExit(..) | ReturnExit => { }
LoopExit(id, exit) => {
if let Some(exit) = scope.kind.early_exit_block(id, exit) {
prev_llbb = exit;
break
}
}
}
}
debug!("trans_cleanups_to_exit_scope: popped {} scopes",
@ -826,7 +484,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
let bcx_in = self.new_block(&name[..], None);
let bcx_in = self.new_block(&name[..]);
let exit_label = label.start(bcx_in);
let mut bcx_out = bcx_in;
let len = scope.cleanups.len();
@ -869,7 +527,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
Some(llbb) => return llbb,
None => {
let name = last_scope.block_name("unwind");
pad_bcx = self.new_block(&name[..], None);
pad_bcx = self.new_block(&name[..]);
last_scope.cached_landing_pad = Some(pad_bcx.llbb);
}
}
@ -923,12 +581,9 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
}
}
impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
fn new(kind: CleanupScopeKind<'blk, 'tcx>,
debug_loc: DebugLoc)
-> CleanupScope<'blk, 'tcx> {
impl<'tcx> CleanupScope<'tcx> {
fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> {
CleanupScope {
kind: kind,
debug_loc: debug_loc,
cleanups: vec!(),
cached_early_exits: vec!(),
@ -936,11 +591,6 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
}
}
fn clear_cached_exits(&mut self) {
self.cached_early_exits = vec!();
self.cached_landing_pad = None;
}
fn cached_early_exit(&self,
label: EarlyExitLabel)
-> Option<(BasicBlockRef, usize)> {
@ -961,62 +611,13 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
/// True if this scope has cleanups that need unwinding
fn needs_invoke(&self) -> bool {
self.cached_landing_pad.is_some() ||
self.cleanups.iter().any(|c| c.must_unwind())
!self.cleanups.is_empty()
}
/// Returns a suitable name to use for the basic block that handles this cleanup scope
fn block_name(&self, prefix: &str) -> String {
match self.kind {
CustomScopeKind => format!("{}_custom_", prefix),
AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
}
}
/// Manipulate cleanup scope for call arguments. Conceptually, each
/// argument to a call is an lvalue, and performing the call moves each
/// of the arguments into a new rvalue (which gets cleaned up by the
/// callee). As an optimization, instead of actually performing all of
/// those moves, trans just manipulates the cleanup scope to obtain the
/// same effect.
pub fn drop_non_lifetime_clean(&mut self) {
self.cleanups.retain(|c| c.is_lifetime_end());
self.clear_cached_exits();
}
}
impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
fn is_temp(&self) -> bool {
match *self {
CustomScopeKind => true,
LoopScopeKind(..) | AstScopeKind(..) => false,
}
}
fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
match *self {
CustomScopeKind | LoopScopeKind(..) => false,
AstScopeKind(i) => i == id
}
}
fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
match *self {
CustomScopeKind | AstScopeKind(..) => false,
LoopScopeKind(i, _) => i == id
}
}
/// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
fn early_exit_block(&self,
id: ast::NodeId,
exit: usize) -> Option<BasicBlockRef> {
match *self {
LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
_ => None,
}
format!("{}_custom_", prefix)
}
}
@ -1057,7 +658,6 @@ impl EarlyExitLabel {
bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu())));
*self
}
label => label,
}
}
}
@ -1080,20 +680,10 @@ pub struct DropValue<'tcx> {
is_immediate: bool,
val: ValueRef,
ty: Ty<'tcx>,
fill_on_drop: bool,
skip_dtor: bool,
drop_hint: Option<DropHintValue>,
}
impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
fn must_unwind(&self) -> bool {
true
}
fn is_lifetime_end(&self) -> bool {
false
}
impl<'tcx> DropValue<'tcx> {
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
@ -1107,180 +697,8 @@ impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else {
glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint)
glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
};
if self.fill_on_drop {
base::drop_done_fill_mem(bcx, self.val, self.ty);
}
bcx
}
}
#[derive(Copy, Clone, Debug)]
pub enum Heap {
HeapExchange
}
#[derive(Copy, Clone)]
pub struct FreeValue<'tcx> {
ptr: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>
}
impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
fn must_unwind(&self) -> bool {
true
}
fn is_lifetime_end(&self) -> bool {
false
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
match self.heap {
HeapExchange => {
glue::trans_exchange_free_ty(bcx,
self.ptr,
self.content_ty,
debug_loc)
}
}
}
}
#[derive(Copy, Clone)]
pub struct LifetimeEnd {
ptr: ValueRef,
}
impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
fn must_unwind(&self) -> bool {
false
}
fn is_lifetime_end(&self) -> bool {
true
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
debug_loc.apply(bcx.fcx);
base::call_lifetime_end(bcx, self.ptr);
bcx
}
}
pub fn temporary_scope(tcx: TyCtxt,
id: ast::NodeId)
-> ScopeId {
match tcx.region_maps.temporary_scope(id) {
Some(scope) => {
let r = AstScope(scope.node_id(&tcx.region_maps));
debug!("temporary_scope({}) = {:?}", id, r);
r
}
None => {
bug!("no temporary scope available for expr {}", id)
}
}
}
pub fn var_scope(tcx: TyCtxt,
id: ast::NodeId)
-> ScopeId {
let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps));
debug!("var_scope({}) = {:?}", id, r);
r
}
///////////////////////////////////////////////////////////////////////////
// These traits just exist to put the methods into this file.
pub trait CleanupMethods<'blk, 'tcx> {
fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
exits: [Block<'blk, 'tcx>; EXIT_MAX]);
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
fn push_custom_cleanup_scope_with_debug_loc(&self,
debug_loc: NodeIdAndSpan)
-> CustomScopeIndex;
fn pop_and_trans_ast_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
-> Block<'blk, 'tcx>;
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId);
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex);
fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx>;
fn top_loop_scope(&self) -> ast::NodeId;
fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: usize) -> BasicBlockRef;
fn return_exit_block(&'blk self) -> BasicBlockRef;
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef);
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>,
drop_hint: Option<DropHintDatum<'tcx>>);
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>,
drop_hint: Option<DropHintDatum<'tcx>>);
fn schedule_drop_adt_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_immediate(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>);
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: CleanupObj<'tcx>);
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: CleanupObj<'tcx>);
fn needs_invoke(&self) -> bool;
fn get_landing_pad(&'blk self) -> BasicBlockRef;
}
trait CleanupHelperMethods<'blk, 'tcx> {
fn top_ast_scope(&self) -> Option<ast::NodeId>;
fn top_nonempty_cleanup_scope(&self) -> Option<usize>;
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn trans_scope_cleanups(&self,
bcx: Block<'blk, 'tcx>,
scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef;
fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
fn scopes_len(&self) -> usize;
fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
}

View File

@ -10,113 +10,21 @@
use arena::TypedArena;
use back::symbol_names;
use llvm::{self, ValueRef, get_param, get_params};
use llvm::{self, ValueRef, get_params};
use rustc::hir::def_id::DefId;
use abi::{Abi, FnType};
use adt;
use attributes;
use base::*;
use build::*;
use callee::{self, ArgVals, Callee};
use cleanup::{CleanupMethods, CustomScope, ScopeId};
use callee::{self, Callee};
use common::*;
use datum::{ByRef, Datum, lvalue_scratch_datum};
use datum::{rvalue_scratch_datum, Rvalue};
use debuginfo::{self, DebugLoc};
use debuginfo::{DebugLoc};
use declare;
use expr;
use monomorphize::{Instance};
use value::Value;
use Disr;
use rustc::ty::{self, Ty, TyCtxt};
use session::config::FullDebugInfo;
use syntax::ast;
use rustc::hir;
use libc::c_uint;
fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
closure_def_id: DefId,
arg_scope_id: ScopeId,
id: ast::NodeId) {
let _icx = push_ctxt("closure::load_closure_environment");
let kind = kind_for_closure(bcx.ccx(), closure_def_id);
let env_arg = &bcx.fcx.fn_ty.args[0];
let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize;
// Special case for small by-value selfs.
let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() {
let closure_ty = node_id_type(bcx, id);
let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val;
env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv);
llenv
} else {
get_param(bcx.fcx.llfn, env_idx as c_uint)
};
// Store the pointer to closure data in an alloca for debug info because that's what the
// llvm.dbg.declare intrinsic expects
let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo {
let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr");
Store(bcx, llenv, alloc);
Some(alloc)
} else {
None
};
bcx.tcx().with_freevars(id, |fv| {
for (i, freevar) in fv.iter().enumerate() {
let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
closure_expr_id: id };
let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap();
let mut upvar_ptr = StructGEP(bcx, llenv, i);
let captured_by_ref = match upvar_capture {
ty::UpvarCapture::ByValue => false,
ty::UpvarCapture::ByRef(..) => {
upvar_ptr = Load(bcx, upvar_ptr);
true
}
};
let node_id = freevar.def.var_id();
bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr);
if kind == ty::ClosureKind::FnOnce && !captured_by_ref {
let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id);
bcx.fcx.schedule_drop_mem(arg_scope_id,
upvar_ptr,
node_id_type(bcx, node_id),
hint)
}
if let Some(env_pointer_alloca) = env_pointer_alloca {
debuginfo::create_captured_var_metadata(
bcx,
node_id,
env_pointer_alloca,
i,
captured_by_ref,
freevar.span);
}
}
})
}
pub enum ClosureEnv {
NotClosure,
Closure(DefId, ast::NodeId),
}
impl ClosureEnv {
pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) {
if let ClosureEnv::Closure(def_id, id) = self {
load_closure_environment(bcx, def_id, arg_scope, id);
}
}
}
fn get_self_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_id: DefId,
fn_ty: Ty<'tcx>)
@ -184,55 +92,12 @@ fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
pub fn trans_closure_body_via_mir<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_def_id: DefId,
closure_substs: ty::ClosureSubsts<'tcx>) {
use syntax::ast::DUMMY_NODE_ID;
use syntax_pos::DUMMY_SP;
use syntax::ptr::P;
trans_closure_expr(Dest::Ignore(ccx),
&hir::FnDecl {
inputs: P::new(),
output: hir::Return(P(hir::Ty {
id: DUMMY_NODE_ID,
span: DUMMY_SP,
node: hir::Ty_::TyNever,
})),
variadic: false
},
&hir::Block {
stmts: P::new(),
expr: None,
id: DUMMY_NODE_ID,
rules: hir::DefaultBlock,
span: DUMMY_SP
},
DUMMY_NODE_ID,
closure_def_id,
closure_substs);
}
pub enum Dest<'a, 'tcx: 'a> {
SaveIn(Block<'a, 'tcx>, ValueRef),
Ignore(&'a CrateContext<'a, 'tcx>)
}
pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
decl: &hir::FnDecl,
body: &hir::Block,
id: ast::NodeId,
closure_def_id: DefId, // (*)
closure_substs: ty::ClosureSubsts<'tcx>)
-> Option<Block<'a, 'tcx>>
{
// (*) Note that in the case of inlined functions, the `closure_def_id` will be the
// defid of the closure in its original crate, whereas `id` will be the id of the local
// inlined copy.
debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})",
id, closure_def_id, closure_substs);
debug!("trans_closure_body_via_mir(closure_def_id={:?}, closure_substs={:?})",
closure_def_id, closure_substs);
let ccx = match dest {
Dest::SaveIn(bcx, _) => bcx.ccx(),
Dest::Ignore(ccx) => ccx
};
let tcx = ccx.tcx();
let _icx = push_ctxt("closure::trans_closure_expr");
@ -275,52 +140,13 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
};
trans_closure(ccx,
decl,
body,
llfn,
Instance::new(closure_def_id, param_substs),
id,
&sig,
Abi::RustCall,
ClosureEnv::Closure(closure_def_id, id));
Abi::RustCall);
ccx.instances().borrow_mut().insert(instance, llfn);
}
// Don't hoist this to the top of the function. It's perfectly legitimate
// to have a zero-size closure (in which case dest will be `Ignore`) and
// we must still generate the closure body.
let (mut bcx, dest_addr) = match dest {
Dest::SaveIn(bcx, p) => (bcx, p),
Dest::Ignore(_) => {
debug!("trans_closure_expr() ignoring result");
return None;
}
};
let repr = adt::represent_type(ccx, node_id_type(bcx, id));
// Create the closure.
tcx.with_freevars(id, |fv| {
for (i, freevar) in fv.iter().enumerate() {
let datum = expr::trans_var(bcx, freevar.def);
let upvar_slot_dest = adt::trans_field_ptr(
bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i);
let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
closure_expr_id: id };
match tcx.upvar_capture(upvar_id).unwrap() {
ty::UpvarCapture::ByValue => {
bcx = datum.store_to(bcx, upvar_slot_dest);
}
ty::UpvarCapture::ByRef(..) => {
Store(bcx, datum.to_llref(), upvar_slot_dest);
}
}
}
});
adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0));
Some(bcx)
}
pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
@ -337,23 +163,7 @@ pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
if !ccx.sess().target.target.options.allows_weak_linkage &&
!ccx.sess().opts.single_codegen_unit() {
if let Some(node_id) = ccx.tcx().map.as_local_node_id(closure_def_id) {
// If the closure is defined in the local crate, we can always just
// translate it.
let (decl, body) = match ccx.tcx().map.expect_expr(node_id).node {
hir::ExprClosure(_, ref decl, ref body, _) => (decl, body),
_ => { unreachable!() }
};
trans_closure_expr(Dest::Ignore(ccx),
decl,
body,
node_id,
closure_def_id,
substs);
} else {
trans_closure_body_via_mir(ccx, closure_def_id, substs);
}
trans_closure_body_via_mir(ccx, closure_def_id, substs);
}
// If the closure is a Fn closure, but a FnOnce is needed (etc),
@ -453,28 +263,21 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena);
let mut bcx = fcx.init(false, None);
let mut bcx = fcx.init(false);
// the first argument (`self`) will be the (by value) closure env.
let self_scope = fcx.push_custom_cleanup_scope();
let self_scope_id = CustomScope(self_scope);
let mut llargs = get_params(fcx.llfn);
let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
let env_arg = &fcx.fn_ty.args[0];
let llenv = if env_arg.is_indirect() {
Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef))
.add_clean(&fcx, self_scope_id)
llargs[self_idx]
} else {
unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self",
InitAlloca::Dropped,
self_scope_id, |bcx, llval| {
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval);
bcx.fcx.schedule_lifetime_end(self_scope_id, llval);
bcx
})).val
let scratch = alloc_ty(bcx, closure_ty, "self");
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch);
scratch
};
debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
@ -491,15 +294,19 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
llargs[self_idx] = llenv;
}
let dest =
fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
let dest = fcx.llretslotptr.get();
let callee = Callee {
data: callee::Fn(llreffn),
ty: llref_fn_ty
};
bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx;
// Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds.
let self_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_drop_mem(self_scope, llenv, closure_ty);
bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx;
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);

View File

@ -16,7 +16,6 @@ use session::Session;
use llvm;
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef};
use rustc::cfg;
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
@ -30,7 +29,6 @@ use builder::Builder;
use callee::Callee;
use cleanup;
use consts;
use datum;
use debuginfo::{self, DebugLoc};
use declare;
use machine;
@ -43,7 +41,6 @@ use rustc::ty::layout::Layout;
use rustc::traits::{self, SelectionContext, Reveal};
use rustc::ty::fold::TypeFoldable;
use rustc::hir;
use util::nodemap::NodeMap;
use arena::TypedArena;
use libc::{c_uint, c_char};
@ -208,10 +205,6 @@ pub struct NodeIdAndSpan {
pub span: Span,
}
pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan {
NodeIdAndSpan { id: expr.id, span: expr.span }
}
/// The concrete version of ty::FieldDef. The name is the field index if
/// the field is numeric.
pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>);
@ -257,17 +250,6 @@ impl<'a, 'tcx> VariantInfo<'tcx> {
}
}
}
/// Return the variant corresponding to a given node (e.g. expr)
pub fn of_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
Self::from_ty(tcx, ty, Some(tcx.expect_def(id)))
}
pub fn field_index(&self, name: ast::Name) -> usize {
self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| {
bug!("unknown field `{}`", name)
})
}
}
pub struct BuilderRef_res {
@ -292,37 +274,6 @@ pub fn validate_substs(substs: &Substs) {
assert!(!substs.types.needs_infer());
}
// work around bizarre resolve errors
type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
#[derive(Clone, Debug)]
struct HintEntry<'tcx> {
// The datum for the dropflag-hint itself; note that many
// source-level Lvalues will be associated with the same
// dropflag-hint datum.
datum: cleanup::DropHintDatum<'tcx>,
}
pub struct DropFlagHintsMap<'tcx> {
// Maps NodeId for expressions that read/write unfragmented state
// to that state's drop-flag "hint." (A stack-local hint
// indicates either that (1.) it is certain that no-drop is
// needed, or (2.) inline drop-flag must be consulted.)
node_map: NodeMap<HintEntry<'tcx>>,
}
impl<'tcx> DropFlagHintsMap<'tcx> {
pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } }
pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) }
pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) {
self.node_map.insert(id, HintEntry { datum: datum });
}
pub fn hint_datum(&self, id: ast::NodeId) -> Option<cleanup::DropHintDatum<'tcx>> {
self.node_map.get(&id).map(|t|t.datum)
}
}
// Function context. Every LLVM function we create will have one of
// these.
pub struct FunctionContext<'a, 'tcx: 'a> {
@ -352,12 +303,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// A marker for the place where we want to insert the function's static
// allocas, so that LLVM will coalesce them into a single alloca call.
pub alloca_insert_pt: Cell<Option<ValueRef>>,
pub llreturn: Cell<Option<BasicBlockRef>>,
// If the function has any nested return's, including something like:
// fn foo() -> Option<Foo> { Some(Foo { x: return None }) }, then
// we use a separate alloca for each return
pub needs_ret_allocas: bool,
// When working with landingpad-based exceptions this value is alloca'd and
// later loaded when using the resume instruction. This ends up being
@ -367,17 +312,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// Note that for cleanuppad-based exceptions this is not used.
pub landingpad_alloca: Cell<Option<ValueRef>>,
// Maps the DefId's for local variables to the allocas created for
// them in llallocas.
pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
// Same as above, but for closure upvars
pub llupvars: RefCell<NodeMap<ValueRef>>,
// Carries info about drop-flags for local bindings (longer term,
// paths) for the code being compiled.
pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
// Describes the return/argument LLVM types and their ABI handling.
pub fn_ty: FnType,
@ -402,9 +336,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
pub debug_context: debuginfo::FunctionDebugContext,
// Cleanup scopes.
pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
pub cfg: Option<cfg::CFG>,
pub scopes: RefCell<Vec<cleanup::CleanupScope<'tcx>>>,
}
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
@ -420,70 +352,18 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
}
}
pub fn get_llreturn(&self) -> BasicBlockRef {
if self.llreturn.get().is_none() {
self.llreturn.set(Some(unsafe {
llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn,
"return\0".as_ptr() as *const _)
}))
}
self.llreturn.get().unwrap()
}
pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef {
if self.needs_ret_allocas {
base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name)
} else {
self.llretslotptr.get().unwrap()
}
}
pub fn new_block(&'a self,
name: &str,
opt_node_id: Option<ast::NodeId>)
name: &str)
-> Block<'a, 'tcx> {
unsafe {
let name = CString::new(name).unwrap();
let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
name.as_ptr());
BlockS::new(llbb, opt_node_id, self)
BlockS::new(llbb, self)
}
}
pub fn new_id_block(&'a self,
name: &str,
node_id: ast::NodeId)
-> Block<'a, 'tcx> {
self.new_block(name, Some(node_id))
}
pub fn new_temp_block(&'a self,
name: &str)
-> Block<'a, 'tcx> {
self.new_block(name, None)
}
pub fn join_blocks(&'a self,
id: ast::NodeId,
in_cxs: &[Block<'a, 'tcx>])
-> Block<'a, 'tcx> {
let out = self.new_id_block("join", id);
let mut reachable = false;
for bcx in in_cxs {
if !bcx.unreachable.get() {
build::Br(*bcx, out.llbb, DebugLoc::None);
reachable = true;
}
}
if !reachable {
build::Unreachable(out);
}
return out;
}
pub fn monomorphize<T>(&self, value: &T) -> T
where T: TransNormalize<'tcx>
{
@ -523,7 +403,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
let tcx = ccx.tcx();
match tcx.lang_items.eh_personality() {
Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx).val
Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx)
}
_ => {
if let Some(llpersonality) = ccx.eh_personality().get() {
@ -565,12 +445,12 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
let unwresume = ccx.eh_unwind_resume();
if let Some(llfn) = unwresume.get() {
return Callee::ptr(datum::immediate_rvalue(llfn, ty));
return Callee::ptr(llfn, ty);
}
let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty);
attributes::unwind(llfn, true);
unwresume.set(Some(llfn));
Callee::ptr(datum::immediate_rvalue(llfn, ty))
Callee::ptr(llfn, ty)
}
}
@ -593,10 +473,6 @@ pub struct BlockS<'blk, 'tcx: 'blk> {
// kind of landing pad its in, otherwise this is none.
pub lpad: Cell<Option<&'blk LandingPad>>,
// AST node-id associated with this block, if any. Used for
// debugging purposes only.
pub opt_node_id: Option<ast::NodeId>,
// The function context for the function to which this block is
// attached.
pub fcx: &'blk FunctionContext<'blk, 'tcx>,
@ -606,7 +482,6 @@ pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
pub fn new(llbb: BasicBlockRef,
opt_node_id: Option<ast::NodeId>,
fcx: &'blk FunctionContext<'blk, 'tcx>)
-> Block<'blk, 'tcx> {
fcx.block_arena.alloc(BlockS {
@ -614,7 +489,6 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
terminated: Cell::new(false),
unreachable: Cell::new(false),
lpad: Cell::new(None),
opt_node_id: opt_node_id,
fcx: fcx
})
}
@ -883,13 +757,6 @@ pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef {
}
}
pub fn C_floating(s: &str, t: Type) -> ValueRef {
unsafe {
let s = CString::new(s).unwrap();
llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr())
}
}
pub fn C_floating_f64(f: f64, t: Type) -> ValueRef {
unsafe {
llvm::LLVMConstReal(t.to_ref(), f)
@ -1099,24 +966,6 @@ pub fn is_null(val: ValueRef) -> bool {
}
}
pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
bcx.fcx.monomorphize(&t)
}
pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> {
let tcx = bcx.tcx();
let t = tcx.node_id_to_type(id);
monomorphize_type(bcx, t)
}
pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
node_id_type(bcx, ex.id)
}
pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex))
}
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
/// guarantee to us that all nested obligations *could be* resolved if we wanted to.

File diff suppressed because it is too large Load Diff

View File

@ -79,7 +79,6 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
stats: Stats,
check_overflow: bool,
check_drop_flag_for_sanity: bool,
mir_map: &'a MirMap<'tcx>,
mir_cache: RefCell<DepTrackingMap<MirCache<'tcx>>>,
@ -424,8 +423,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet,
check_overflow: bool,
check_drop_flag_for_sanity: bool)
check_overflow: bool)
-> SharedCrateContext<'b, 'tcx> {
let (metadata_llcx, metadata_llmod) = unsafe {
create_context_and_module(&tcx.sess, "metadata")
@ -500,7 +498,6 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
fn_stats: RefCell::new(Vec::new()),
},
check_overflow: check_overflow,
check_drop_flag_for_sanity: check_drop_flag_for_sanity,
use_dll_storage_attrs: use_dll_storage_attrs,
translation_items: RefCell::new(FnvHashSet()),
trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
@ -964,13 +961,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
self.shared.check_overflow
}
pub fn check_drop_flag_for_sanity(&self) -> bool {
// This controls whether we emit a conditional llvm.debugtrap
// guarded on whether the dropflag is one of its (two) valid
// values.
self.shared.check_drop_flag_for_sanity
}
pub fn use_dll_storage_attrs(&self) -> bool {
self.shared.use_dll_storage_attrs()
}

View File

@ -1,434 +0,0 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::ValueRef;
use rustc::hir::def::Def;
use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem};
use rustc::ty::subst::Substs;
use base::*;
use basic_block::BasicBlock;
use build::*;
use callee::{Callee, ArgVals};
use cleanup::CleanupMethods;
use cleanup;
use common::*;
use consts;
use debuginfo;
use debuginfo::{DebugLoc, ToDebugLoc};
use expr;
use machine;
use rustc::hir;
use syntax::ast;
use syntax::parse::token::InternedString;
use syntax::parse::token;
pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
s: &hir::Stmt)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_stmt");
let fcx = cx.fcx;
debug!("trans_stmt({:?})", s);
if cx.unreachable.get() {
return cx;
}
if cx.sess().asm_comments() {
add_span_comment(cx, s.span, &format!("{:?}", s));
}
let mut bcx = cx;
let id = s.node.id();
let cleanup_debug_loc =
debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false);
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
match s.node {
hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => {
bcx = trans_stmt_semi(bcx, &e);
}
hir::StmtDecl(ref d, _) => {
match d.node {
hir::DeclLocal(ref local) => {
bcx = init_local(bcx, &local);
debuginfo::create_local_var_metadata(bcx, &local);
}
// Inner items are visited by `trans_item`/`trans_meth`.
hir::DeclItem(_) => {},
}
}
}
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, s.node.id());
return bcx;
}
pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_stmt_semi");
if cx.unreachable.get() {
return cx;
}
let ty = expr_ty(cx, e);
if cx.fcx.type_needs_drop(ty) {
expr::trans_to_lvalue(cx, e, "stmt").bcx
} else {
expr::trans_into(cx, e, expr::Ignore)
}
}
pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
b: &hir::Block,
mut dest: expr::Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_block");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
let mut bcx = bcx;
let cleanup_debug_loc =
debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true);
fcx.push_ast_cleanup_scope(cleanup_debug_loc);
for s in &b.stmts {
bcx = trans_stmt(bcx, s);
}
if dest != expr::Ignore {
let block_ty = node_id_type(bcx, b.id);
if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) {
dest = expr::Ignore;
} else if b.expr.is_some() {
// If the block has an expression, but that expression isn't reachable,
// don't save into the destination given, ignore it.
if let Some(ref cfg) = bcx.fcx.cfg {
if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) {
dest = expr::Ignore;
}
}
}
}
match b.expr {
Some(ref e) => {
if !bcx.unreachable.get() {
bcx = expr::trans_into(bcx, &e, dest);
}
}
None => {
assert!(dest == expr::Ignore || bcx.unreachable.get());
}
}
bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id);
return bcx;
}
pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if_id: ast::NodeId,
cond: &hir::Expr,
thn: &hir::Block,
els: Option<&hir::Expr>,
dest: expr::Dest)
-> Block<'blk, 'tcx> {
debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})",
bcx.to_str(), if_id, cond, thn.id, dest);
let _icx = push_ctxt("trans_if");
if bcx.unreachable.get() {
return bcx;
}
let mut bcx = bcx;
let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool());
// Drop branches that are known to be impossible
if let Some(cv) = const_to_opt_uint(cond_val) {
if cv == 1 {
// if true { .. } [else { .. }]
bcx = trans_block(bcx, &thn, dest);
DebugLoc::None.apply(bcx.fcx);
} else {
if let Some(elexpr) = els {
bcx = expr::trans_into(bcx, &elexpr, dest);
DebugLoc::None.apply(bcx.fcx);
}
}
return bcx;
}
let name = format!("then-block-{}-", thn.id);
let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id);
let then_bcx_out = trans_block(then_bcx_in, &thn, dest);
DebugLoc::None.apply(bcx.fcx);
let cond_source_loc = cond.debug_loc();
let next_bcx;
match els {
Some(elexpr) => {
let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id);
let else_bcx_out = expr::trans_into(else_bcx_in, &elexpr, dest);
next_bcx = bcx.fcx.join_blocks(if_id,
&[then_bcx_out, else_bcx_out]);
CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc);
}
None => {
next_bcx = bcx.fcx.new_id_block("next-block", if_id);
Br(then_bcx_out, next_bcx.llbb, DebugLoc::None);
CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc);
}
}
// Clear the source location because it is still set to whatever has been translated
// right before.
DebugLoc::None.apply(next_bcx.fcx);
next_bcx
}
pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
loop_expr: &hir::Expr,
cond: &hir::Expr,
body: &hir::Block)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_while");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
// bcx
// |
// cond_bcx_in <--------+
// | |
// cond_bcx_out |
// | | |
// | body_bcx_in |
// cleanup_blk | |
// | body_bcx_out --+
// next_bcx_in
let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id);
let cond_bcx_in = fcx.new_id_block("while_cond", cond.id);
let body_bcx_in = fcx.new_id_block("while_body", body.id);
fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]);
Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc());
// compile the block where we will handle loop cleanups
let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK);
// compile the condition
let Result {bcx: cond_bcx_out, val: cond_val} =
expr::trans(cond_bcx_in, cond).to_llbool();
CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc());
// loop body:
let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None);
fcx.pop_loop_cleanup_scope(loop_expr.id);
return next_bcx_in;
}
pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
loop_expr: &hir::Expr,
body: &hir::Block)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_loop");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
// bcx
// |
// body_bcx_in
// |
// body_bcx_out
//
// next_bcx
//
// Links between body_bcx_in and next_bcx are created by
// break statements.
let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id);
let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id);
fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]);
Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc());
let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None);
fcx.pop_loop_cleanup_scope(loop_expr.id);
// If there are no predecessors for the next block, we just translated an endless loop and the
// next block is unreachable
if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() {
Unreachable(next_bcx_in);
}
return next_bcx_in;
}
pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &hir::Expr,
opt_label: Option<ast::Name>,
exit: usize)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_break_cont");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
// Locate loop that we will break to
let loop_id = match opt_label {
None => fcx.top_loop_scope(),
Some(_) => {
match bcx.tcx().expect_def(expr.id) {
Def::Label(loop_id) => loop_id,
r => {
bug!("{:?} in def-map for label", r)
}
}
}
};
// Generate appropriate cleanup code and branch
let cleanup_llbb = fcx.normal_exit_block(loop_id, exit);
Br(bcx, cleanup_llbb, expr.debug_loc());
Unreachable(bcx); // anything afterwards should be ignored
return bcx;
}
pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &hir::Expr,
label_opt: Option<ast::Name>)
-> Block<'blk, 'tcx> {
return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK);
}
pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &hir::Expr,
label_opt: Option<ast::Name>)
-> Block<'blk, 'tcx> {
return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP);
}
pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
return_expr: &hir::Expr,
retval_expr: Option<&hir::Expr>)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_ret");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
let mut bcx = bcx;
if let Some(x) = retval_expr {
let dest = if fcx.llretslotptr.get().is_some() {
expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
} else {
expr::Ignore
};
bcx = expr::trans_into(bcx, &x, dest);
match dest {
expr::SaveIn(slot) if fcx.needs_ret_allocas => {
Store(bcx, slot, fcx.llretslotptr.get().unwrap());
}
_ => {}
}
}
let cleanup_llbb = fcx.return_exit_block();
Br(bcx, cleanup_llbb, return_expr.debug_loc());
Unreachable(bcx);
return bcx;
}
pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
call_info: NodeIdAndSpan,
fail_str: InternedString)
-> Block<'blk, 'tcx> {
let ccx = bcx.ccx();
let _icx = push_ctxt("trans_fail_value");
if bcx.unreachable.get() {
return bcx;
}
let v_str = C_str_slice(ccx, fail_str);
let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
let filename = token::intern_and_get_ident(&loc.file.name);
let filename = C_str_slice(ccx, filename);
let line = C_u32(ccx, loc.line as u32);
let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const));
let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc");
let args = vec!(expr_file_line);
let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicFnLangItem);
Callee::def(ccx, did, Substs::empty(ccx.tcx()))
.call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
}
pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
call_info: NodeIdAndSpan,
index: ValueRef,
len: ValueRef)
-> Block<'blk, 'tcx> {
let ccx = bcx.ccx();
let _icx = push_ctxt("trans_fail_bounds_check");
if bcx.unreachable.get() {
return bcx;
}
// Extract the file/line from the span
let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
let filename = token::intern_and_get_ident(&loc.file.name);
// Invoke the lang item
let filename = C_str_slice(ccx, filename);
let line = C_u32(ccx, loc.line as u32);
let file_line_const = C_struct(ccx, &[filename, line], false);
let align = machine::llalign_of_min(ccx, val_ty(file_line_const));
let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc");
let args = vec!(file_line, index, len);
let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicBoundsCheckFnLangItem);
Callee::def(ccx, did, Substs::empty(ccx.tcx()))
.call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
}

View File

@ -1,828 +0,0 @@
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! ## The Datum module
//!
//! A `Datum` encapsulates the result of evaluating a Rust expression. It
//! contains a `ValueRef` indicating the result, a `Ty` describing
//! the Rust type, but also a *kind*. The kind indicates whether the datum
//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of
//! rvalues -- whether or not the value is "by ref" or "by value".
//!
//! The datum API is designed to try and help you avoid memory errors like
//! forgetting to arrange cleanup or duplicating a value. The type of the
//! datum incorporates the kind, and thus reflects whether it has cleanup
//! scheduled:
//!
//! - `Datum<Lvalue>` -- by ref, cleanup scheduled
//! - `Datum<Rvalue>` -- by value or by ref, no cleanup scheduled
//! - `Datum<Expr>` -- either `Datum<Lvalue>` or `Datum<Rvalue>`
//!
//! Rvalue and expr datums are noncopyable, and most of the methods on
//! datums consume the datum itself (with some notable exceptions). This
//! reflects the fact that datums may represent affine values which ought
//! to be consumed exactly once, and if you were to try to (for example)
//! store an affine value multiple times, you would be duplicating it,
//! which would certainly be a bug.
//!
//! Some of the datum methods, however, are designed to work only on
//! copyable values such as ints or pointers. Those methods may borrow the
//! datum (`&self`) rather than consume it, but they always include
//! assertions on the type of the value represented to check that this
//! makes sense. An example is `shallow_copy()`, which duplicates
//! a datum value.
//!
//! Translating an expression always yields a `Datum<Expr>` result, but
//! the methods `to_[lr]value_datum()` can be used to coerce a
//! `Datum<Expr>` into a `Datum<Lvalue>` or `Datum<Rvalue>` as
//! needed. Coercing to an lvalue is fairly common, and generally occurs
//! whenever it is necessary to inspect a value and pull out its
//! subcomponents (for example, a match, or indexing expression). Coercing
//! to an rvalue is more unusual; it occurs when moving values from place
//! to place, such as in an assignment expression or parameter passing.
//!
//! ### Lvalues in detail
//!
//! An lvalue datum is one for which cleanup has been scheduled. Lvalue
//! datums are always located in memory, and thus the `ValueRef` for an
//! LLVM value is always a pointer to the actual Rust value. This means
//! that if the Datum has a Rust type of `int`, then the LLVM type of the
//! `ValueRef` will be `int*` (pointer to int).
//!
//! Because lvalues already have cleanups scheduled, the memory must be
//! zeroed to prevent the cleanup from taking place (presuming that the
//! Rust type needs drop in the first place, otherwise it doesn't
//! matter). The Datum code automatically performs this zeroing when the
//! value is stored to a new location, for example.
//!
//! Lvalues usually result from evaluating lvalue expressions. For
//! example, evaluating a local variable `x` yields an lvalue, as does a
//! reference to a field like `x.f` or an index `x[i]`.
//!
//! Lvalue datums can also arise by *converting* an rvalue into an lvalue.
//! This is done with the `to_lvalue_datum` method defined on
//! `Datum<Expr>`. Basically this method just schedules cleanup if the
//! datum is an rvalue, possibly storing the value into a stack slot first
//! if needed. Converting rvalues into lvalues occurs in constructs like
//! `&foo()` or `match foo() { ref x => ... }`, where the user is
//! implicitly requesting a temporary.
//!
//! ### Rvalues in detail
//!
//! Rvalues datums are values with no cleanup scheduled. One must be
//! careful with rvalue datums to ensure that cleanup is properly
//! arranged, usually by converting to an lvalue datum or by invoking the
//! `add_clean` method.
//!
//! ### Scratch datums
//!
//! Sometimes you need some temporary scratch space. The functions
//! `[lr]value_scratch_datum()` can be used to get temporary stack
//! space. As their name suggests, they yield lvalues and rvalues
//! respectively. That is, the slot from `lvalue_scratch_datum` will have
//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not.
pub use self::Expr::*;
pub use self::RvalueMode::*;
use llvm::ValueRef;
use adt;
use base::*;
use build::{Load, Store};
use common::*;
use cleanup;
use cleanup::{CleanupMethods, DropHintDatum, DropHintMethods};
use expr;
use tvec;
use value::Value;
use rustc::ty::Ty;
use std::fmt;
use syntax::ast;
use syntax_pos::DUMMY_SP;
/// A `Datum` encapsulates the result of evaluating an expression. It
/// describes where the value is stored, what Rust type the value has,
/// whether it is addressed by reference, and so forth. Please refer
/// the section on datums in `README.md` for more details.
#[derive(Clone, Copy)]
pub struct Datum<'tcx, K> {
/// The llvm value. This is either a pointer to the Rust value or
/// the value itself, depending on `kind` below.
pub val: ValueRef,
/// The rust type of the value.
pub ty: Ty<'tcx>,
/// Indicates whether this is by-ref or by-value.
pub kind: K,
}
impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Datum({:?}, {:?}, {:?})",
Value(self.val), self.ty, self.kind)
}
}
pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
pub bcx: Block<'blk, 'tcx>,
pub datum: Datum<'tcx, K>,
}
#[derive(Debug)]
pub enum Expr {
/// a fresh value that was produced and which has no cleanup yet
/// because it has not yet "landed" into its permanent home
RvalueExpr(Rvalue),
/// `val` is a pointer into memory for which a cleanup is scheduled
/// (and thus has type *T). If you move out of an Lvalue, you must
/// zero out the memory (FIXME #5016).
LvalueExpr(Lvalue),
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum DropFlagInfo {
DontZeroJustUse(ast::NodeId),
ZeroAndMaintain(ast::NodeId),
None,
}
impl DropFlagInfo {
pub fn must_zero(&self) -> bool {
match *self {
DropFlagInfo::DontZeroJustUse(..) => false,
DropFlagInfo::ZeroAndMaintain(..) => true,
DropFlagInfo::None => true,
}
}
pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
-> Option<DropHintDatum<'tcx>> {
let id = match *self {
DropFlagInfo::None => return None,
DropFlagInfo::DontZeroJustUse(id) |
DropFlagInfo::ZeroAndMaintain(id) => id,
};
let hints = bcx.fcx.lldropflag_hints.borrow();
let retval = hints.hint_datum(id);
assert!(retval.is_some(), "An id (={}) means must have a hint", id);
retval
}
}
// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients
// may not realize that subparts of an Lvalue can have a subset of
// drop-flags associated with them, while this as written will just
// memcpy the drop_flag_info. But, it is an easier way to get `_match`
// off the ground to just let this be `Copy` for now.
#[derive(Copy, Clone, Debug)]
pub struct Lvalue {
pub source: &'static str,
pub drop_flag_info: DropFlagInfo
}
#[derive(Debug)]
pub struct Rvalue {
pub mode: RvalueMode
}
/// Classifies what action we should take when a value is moved away
/// with respect to its drop-flag.
///
/// Long term there will be no need for this classification: all flags
/// (which will be stored on the stack frame) will have the same
/// interpretation and maintenance code associated with them.
#[derive(Copy, Clone, Debug)]
pub enum HintKind {
/// When the value is moved, set the drop-flag to "dropped"
/// (i.e. "zero the flag", even when the specific representation
/// is not literally 0) and when it is reinitialized, set the
/// drop-flag back to "initialized".
ZeroAndMaintain,
/// When the value is moved, do not set the drop-flag to "dropped"
/// However, continue to read the drop-flag in deciding whether to
/// drop. (In essence, the path/fragment in question will never
/// need to be dropped at the points where it is moved away by
/// this code, but we are defending against the scenario where
/// some *other* code could move away (or drop) the value and thus
/// zero-the-flag, which is why we will still read from it.
DontZeroJustUse,
}
impl Lvalue { // Constructors for various Lvalues.
pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue {
debug!("Lvalue at {} no drop flag info", source);
Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
}
pub fn new_dropflag_hint(source: &'static str) -> Lvalue {
debug!("Lvalue at {} is drop flag hint", source);
Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
}
pub fn new_with_hint<'blk, 'tcx>(source: &'static str,
bcx: Block<'blk, 'tcx>,
id: ast::NodeId,
k: HintKind) -> Lvalue {
let (opt_id, info) = {
let hint_available = Lvalue::has_dropflag_hint(bcx, id) &&
bcx.tcx().sess.nonzeroing_move_hints();
let info = match k {
HintKind::ZeroAndMaintain if hint_available =>
DropFlagInfo::ZeroAndMaintain(id),
HintKind::DontZeroJustUse if hint_available =>
DropFlagInfo::DontZeroJustUse(id),
_ =>
DropFlagInfo::None,
};
(Some(id), info)
};
debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info);
Lvalue { source: source, drop_flag_info: info }
}
} // end Lvalue constructor methods.
impl Lvalue {
fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
id: ast::NodeId) -> bool {
let hints = bcx.fcx.lldropflag_hints.borrow();
hints.has_hint(id)
}
pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
-> Option<DropHintDatum<'tcx>> {
self.drop_flag_info.hint_datum(bcx)
}
}
impl Rvalue {
pub fn new(m: RvalueMode) -> Rvalue {
Rvalue { mode: m }
}
}
// Make Datum linear for more type safety.
impl Drop for Rvalue {
fn drop(&mut self) { }
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum RvalueMode {
/// `val` is a pointer to the actual value (and thus has type *T)
ByRef,
/// `val` is the actual value (*only used for immediates* like ints, ptrs)
ByValue,
}
pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> {
return Datum::new(val, ty, Rvalue::new(ByValue));
}
pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
val: ValueRef,
ty: Ty<'tcx>)
-> DatumBlock<'blk, 'tcx, Rvalue> {
return DatumBlock::new(bcx, immediate_rvalue(val, ty))
}
/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should
/// initialize the memory.
///
/// The flag `zero` indicates how the temporary space itself should be
/// initialized at the outset of the function; the only time that
/// `InitAlloca::Uninit` is a valid value for `zero` is when the
/// caller can prove that either (1.) the code injected by `populate`
/// onto `bcx` always dominates the end of `scope`, or (2.) the data
/// being allocated has no associated destructor.
pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
ty: Ty<'tcx>,
name: &str,
zero: InitAlloca,
scope: cleanup::ScopeId,
populate: F)
-> DatumBlock<'blk, 'tcx, Lvalue> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
{
// Very subtle: potentially initialize the scratch memory at point where it is alloca'ed.
// (See discussion at Issue 30530.)
let scratch = alloc_ty_init(bcx, ty, zero, name);
debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}",
scope, Value(scratch), ty);
// Subtle. Populate the scratch memory *before* scheduling cleanup.
let bcx = populate(bcx, scratch);
bcx.fcx.schedule_drop_mem(scope, scratch, ty, None);
DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum")))
}
/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
/// it. If `zero` is true, the space will be zeroed when it is allocated; this is normally not
/// necessary, but in the case of automatic rooting in match statements it is possible to have
/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them.
/// You must arrange any cleanups etc yourself!
pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ty: Ty<'tcx>,
name: &str)
-> Datum<'tcx, Rvalue> {
let scratch = alloc_ty(bcx, ty, name);
call_lifetime_start(bcx, scratch);
Datum::new(scratch, ty, Rvalue::new(ByRef))
}
/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending
/// on whether type is immediate or not.
pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> RvalueMode {
if type_is_immediate(ccx, ty) {
ByValue
} else {
ByRef
}
}
fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode,
fcx: &FunctionContext<'a, 'tcx>,
scope: cleanup::ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}",
scope, Value(val), ty);
match mode {
ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
ByRef => {
fcx.schedule_lifetime_end(scope, val);
fcx.schedule_drop_mem(scope, val, ty, None);
}
}
}
pub trait KindOps {
/// Take appropriate action after the value in `datum` has been
/// stored to a new location.
fn post_store<'blk, 'tcx>(&self,
bcx: Block<'blk, 'tcx>,
val: ValueRef,
ty: Ty<'tcx>)
-> Block<'blk, 'tcx>;
/// True if this mode is a reference mode, meaning that the datum's
/// val field is a pointer to the actual value
fn is_by_ref(&self) -> bool;
/// Converts to an Expr kind
fn to_expr_kind(self) -> Expr;
}
impl KindOps for Rvalue {
fn post_store<'blk, 'tcx>(&self,
bcx: Block<'blk, 'tcx>,
_val: ValueRef,
_ty: Ty<'tcx>)
-> Block<'blk, 'tcx> {
// No cleanup is scheduled for an rvalue, so we don't have
// to do anything after a move to cancel or duplicate it.
if self.is_by_ref() {
call_lifetime_end(bcx, _val);
}
bcx
}
fn is_by_ref(&self) -> bool {
self.mode == ByRef
}
fn to_expr_kind(self) -> Expr {
RvalueExpr(self)
}
}
impl KindOps for Lvalue {
/// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel
/// cleanup. If an @T lvalue is copied, we must increment the reference count.
fn post_store<'blk, 'tcx>(&self,
bcx: Block<'blk, 'tcx>,
val: ValueRef,
ty: Ty<'tcx>)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("<Lvalue as KindOps>::post_store");
if bcx.fcx.type_needs_drop(ty) {
// cancel cleanup of affine values:
// 1. if it has drop-hint, mark as moved; then code
// aware of drop-hint won't bother calling the
// drop-glue itself.
if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) {
let moved_hint_byte = adt::DTOR_MOVED_HINT;
let hint_llval = hint_datum.to_value().value();
Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval);
}
// 2. if the drop info says its necessary, drop-fill the memory.
if self.drop_flag_info.must_zero() {
let () = drop_done_fill_mem(bcx, val, ty);
}
bcx
} else {
// FIXME (#5016) would be nice to assert this, but we have
// to allow for e.g. DontZeroJustUse flags, for now.
//
// (The dropflag hint construction should be taking
// !type_needs_drop into account; earlier analysis phases
// may not have all the info they need to include such
// information properly, I think; in particular the
// fragments analysis works on a non-monomorphized view of
// the code.)
//
// assert_eq!(self.drop_flag_info, DropFlagInfo::None);
bcx
}
}
fn is_by_ref(&self) -> bool {
true
}
fn to_expr_kind(self) -> Expr {
LvalueExpr(self)
}
}
impl KindOps for Expr {
fn post_store<'blk, 'tcx>(&self,
bcx: Block<'blk, 'tcx>,
val: ValueRef,
ty: Ty<'tcx>)
-> Block<'blk, 'tcx> {
match *self {
LvalueExpr(ref l) => l.post_store(bcx, val, ty),
RvalueExpr(ref r) => r.post_store(bcx, val, ty),
}
}
fn is_by_ref(&self) -> bool {
match *self {
LvalueExpr(ref l) => l.is_by_ref(),
RvalueExpr(ref r) => r.is_by_ref()
}
}
fn to_expr_kind(self) -> Expr {
self
}
}
impl<'tcx> Datum<'tcx, Rvalue> {
/// Schedules a cleanup for this datum in the given scope. That means that this datum is no
/// longer an rvalue datum; hence, this function consumes the datum and returns the contained
/// ValueRef.
pub fn add_clean<'a>(self,
fcx: &FunctionContext<'a, 'tcx>,
scope: cleanup::ScopeId)
-> ValueRef {
add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
self.val
}
/// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not
/// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`.
pub fn to_lvalue_datum_in_scope<'blk>(self,
bcx: Block<'blk, 'tcx>,
name: &str,
scope: cleanup::ScopeId)
-> DatumBlock<'blk, 'tcx, Lvalue> {
let fcx = bcx.fcx;
match self.kind.mode {
ByRef => {
add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
DatumBlock::new(bcx, Datum::new(
self.val,
self.ty,
Lvalue::new("datum::to_lvalue_datum_in_scope")))
}
ByValue => {
lvalue_scratch_datum(
bcx, self.ty, name, InitAlloca::Dropped, scope,
|bcx, llval| {
debug!("populate call for Datum::to_lvalue_datum_in_scope \
self.ty={:?}", self.ty);
// do not call_lifetime_start here; the
// `InitAlloc::Dropped` will start scratch
// value's lifetime at open of function body.
let bcx = self.store_to(bcx, llval);
bcx.fcx.schedule_lifetime_end(scope, llval);
bcx
})
}
}
}
pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
-> DatumBlock<'blk, 'tcx, Rvalue> {
let mut bcx = bcx;
match self.kind.mode {
ByRef => DatumBlock::new(bcx, self),
ByValue => {
let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
bcx = self.store_to(bcx, scratch.val);
DatumBlock::new(bcx, scratch)
}
}
}
pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
-> DatumBlock<'blk, 'tcx, Rvalue> {
match self.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
self.to_ref_datum(bcx)
}
ByValue => {
match self.kind.mode {
ByValue => DatumBlock::new(bcx, self),
ByRef => {
let llval = load_ty(bcx, self.val, self.ty);
call_lifetime_end(bcx, self.val);
DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue)))
}
}
}
}
}
}
/// Methods suitable for "expr" datums that could be either lvalues or
/// rvalues. These include coercions into lvalues/rvalues but also a number
/// of more general operations. (Some of those operations could be moved to
/// the more general `impl<K> Datum<K>`, but it's convenient to have them
/// here since we can `match self.kind` rather than having to implement
/// generic methods in `KindOps`.)
impl<'tcx> Datum<'tcx, Expr> {
fn match_kind<R, F, G>(self, if_lvalue: F, if_rvalue: G) -> R where
F: FnOnce(Datum<'tcx, Lvalue>) -> R,
G: FnOnce(Datum<'tcx, Rvalue>) -> R,
{
let Datum { val, ty, kind } = self;
match kind {
LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)),
RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)),
}
}
/// Asserts that this datum *is* an lvalue and returns it.
#[allow(dead_code)] // potentially useful
pub fn assert_lvalue(self) -> Datum<'tcx, Lvalue> {
self.match_kind(
|d| d,
|_| bug!("assert_lvalue given rvalue"))
}
pub fn store_to_dest<'blk>(self,
bcx: Block<'blk, 'tcx>,
dest: expr::Dest,
expr_id: ast::NodeId)
-> Block<'blk, 'tcx> {
match dest {
expr::Ignore => {
self.add_clean_if_rvalue(bcx, expr_id);
bcx
}
expr::SaveIn(addr) => {
self.store_to(bcx, addr)
}
}
}
/// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value
/// that may need drop.
pub fn add_clean_if_rvalue<'blk>(self,
bcx: Block<'blk, 'tcx>,
expr_id: ast::NodeId) {
self.match_kind(
|_| { /* Nothing to do, cleanup already arranged */ },
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.add_clean(bcx.fcx, scope);
})
}
pub fn to_lvalue_datum<'blk>(self,
bcx: Block<'blk, 'tcx>,
name: &str,
expr_id: ast::NodeId)
-> DatumBlock<'blk, 'tcx, Lvalue> {
debug!("to_lvalue_datum self: {:?}", self);
self.match_kind(
|l| DatumBlock::new(bcx, l),
|r| {
let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
r.to_lvalue_datum_in_scope(bcx, name, scope)
})
}
/// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled).
pub fn to_rvalue_datum<'blk>(self,
bcx: Block<'blk, 'tcx>,
name: &'static str)
-> DatumBlock<'blk, 'tcx, Rvalue> {
self.match_kind(
|l| {
let mut bcx = bcx;
match l.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
let scratch = rvalue_scratch_datum(bcx, l.ty, name);
bcx = l.store_to(bcx, scratch.val);
DatumBlock::new(bcx, scratch)
}
ByValue => {
let v = load_ty(bcx, l.val, l.ty);
bcx = l.kind.post_store(bcx, l.val, l.ty);
DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue)))
}
}
},
|r| DatumBlock::new(bcx, r))
}
}
/// Methods suitable only for lvalues. These include the various
/// operations to extract components out of compound data structures,
/// such as extracting the field from a struct or a particular element
/// from an array.
impl<'tcx> Datum<'tcx, Lvalue> {
/// Converts a datum into a by-ref value. The datum type must be one which is always passed by
/// reference.
pub fn to_llref(self) -> ValueRef {
self.val
}
// Extracts a component of a compound data structure (e.g., a field from a
// struct). Note that if self is an opened, unsized type then the returned
// datum may also be unsized _without the size information_. It is the
// callers responsibility to package the result in some way to make a valid
// datum in that case (e.g., by making a fat pointer or opened pair).
pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>,
gep: F)
-> Datum<'tcx, Lvalue> where
F: FnOnce(adt::MaybeSizedValue) -> ValueRef,
{
let val = if type_is_sized(bcx.tcx(), self.ty) {
let val = adt::MaybeSizedValue::sized(self.val);
gep(val)
} else {
let val = adt::MaybeSizedValue::unsized_(
Load(bcx, expr::get_dataptr(bcx, self.val)),
Load(bcx, expr::get_meta(bcx, self.val)));
gep(val)
};
Datum {
val: val,
kind: Lvalue::new("Datum::get_element"),
ty: ty,
}
}
pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>)
-> (ValueRef, ValueRef) {
//! Converts a vector into the slice pair.
tvec::get_base_and_len(bcx, self.val, self.ty)
}
}
/// Generic methods applicable to any sort of datum.
impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> {
pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> {
Datum { val: val, ty: ty, kind: kind }
}
pub fn to_expr_datum(self) -> Datum<'tcx, Expr> {
let Datum { val, ty, kind } = self;
Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
}
/// Moves or copies this value into a new home, as appropriate depending on the type of the
/// datum. This method consumes the datum, since it would be incorrect to go on using the datum
/// if the value represented is affine (and hence the value is moved).
pub fn store_to<'blk>(self,
bcx: Block<'blk, 'tcx>,
dst: ValueRef)
-> Block<'blk, 'tcx> {
self.shallow_copy_raw(bcx, dst);
self.kind.post_store(bcx, self.val, self.ty)
}
/// Helper function that performs a shallow copy of this value into `dst`, which should be a
/// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized
/// memory (either newly allocated, zeroed, or dropped).
///
/// This function is private to datums because it leaves memory in an unstable state, where the
/// source value has been copied but not zeroed. Public methods are `store_to` (if you no
/// longer need the source value) or `shallow_copy` (if you wish the source value to remain
/// valid).
fn shallow_copy_raw<'blk>(&self,
bcx: Block<'blk, 'tcx>,
dst: ValueRef)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("copy_to_no_check");
if type_is_zero_size(bcx.ccx(), self.ty) {
return bcx;
}
if self.kind.is_by_ref() {
memcpy_ty(bcx, dst, self.val, self.ty);
} else {
store_ty(bcx, self.val, dst, self.ty);
}
return bcx;
}
/// Copies the value into a new location. This function always preserves the existing datum as
/// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine
/// values (since they must never be duplicated).
pub fn shallow_copy<'blk>(&self,
bcx: Block<'blk, 'tcx>,
dst: ValueRef)
-> Block<'blk, 'tcx> {
/*!
* Copies the value into a new location. This function always
* preserves the existing datum as a valid value. Therefore,
* it does not consume `self` and, also, cannot be applied to
* affine values (since they must never be duplicated).
*/
assert!(!self.ty.moves_by_default(bcx.tcx(),
&bcx.tcx().empty_parameter_environment(), DUMMY_SP));
self.shallow_copy_raw(bcx, dst)
}
/// See the `appropriate_rvalue_mode()` function
pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
-> RvalueMode {
appropriate_rvalue_mode(ccx, self.ty)
}
/// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of
/// responsibility to cleanup the value). For this to work, the value must be something
/// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is
/// naturally passed around by value, and not by reference.
pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
assert!(!bcx.fcx.type_needs_drop(self.ty));
assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue);
if self.kind.is_by_ref() {
load_ty(bcx, self.val, self.ty)
} else {
self.val
}
}
pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
assert!(self.ty.is_bool());
self.to_llscalarish(bcx)
}
}
impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> {
pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>)
-> DatumBlock<'blk, 'tcx, K> {
DatumBlock { bcx: bcx, datum: datum }
}
}
impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> {
pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> {
DatumBlock::new(self.bcx, self.datum.to_expr_datum())
}
}
impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> {
pub fn store_to_dest(self,
dest: expr::Dest,
expr_id: ast::NodeId) -> Block<'blk, 'tcx> {
let DatumBlock { bcx, datum } = self;
datum.store_to_dest(bcx, dest, expr_id)
}
pub fn to_llbool(self) -> Result<'blk, 'tcx> {
let DatumBlock { datum, bcx } = self;
Result::new(bcx, datum.to_llbool(bcx))
}
}

View File

@ -15,58 +15,15 @@ use super::utils::{DIB, span_start};
use llvm;
use llvm::debuginfo::{DIScope, DISubprogram};
use common::{CrateContext, FunctionContext};
use rustc::hir::pat_util;
use rustc::mir::repr::{Mir, VisibilityScope};
use rustc::util::nodemap::NodeMap;
use libc::c_uint;
use std::ptr;
use syntax_pos::{Span, Pos};
use syntax::{ast, codemap};
use syntax_pos::Pos;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use rustc::hir::{self, PatKind};
// This procedure builds the *scope map* for a given function, which maps any
// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
//
// This builder procedure walks the AST in execution order and keeps track of
// what belongs to which scope, creating DIScope DIEs along the way, and
// introducing *artificial* lexical scope descriptors where necessary. These
// artificial scopes allow GDB to correctly handle name shadowing.
pub fn create_scope_map(cx: &CrateContext,
args: &[hir::Arg],
fn_entry_block: &hir::Block,
fn_metadata: DISubprogram,
fn_ast_id: ast::NodeId)
-> NodeMap<DIScope> {
let mut scope_map = NodeMap();
let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None });
scope_map.insert(fn_ast_id, fn_metadata);
// Push argument identifiers onto the stack so arguments integrate nicely
// with variable shadowing.
for arg in args {
pat_util::pat_bindings(&arg.pat, |_, node_id, _, path1| {
scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
name: Some(path1.node) });
scope_map.insert(node_id, fn_metadata);
})
}
// Clang creates a separate scope for function bodies, so let's do this too.
with_new_scope(cx,
fn_entry_block.span,
&mut scope_stack,
&mut scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, fn_entry_block, scope_stack, scope_map);
});
return scope_map;
}
/// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
/// If debuginfo is disabled, the returned vector is empty.
@ -141,405 +98,3 @@ fn make_mir_scope(ccx: &CrateContext,
loc.col.to_usize() as c_uint)
};
}
// local helper functions for walking the AST.
fn with_new_scope<F>(cx: &CrateContext,
scope_span: Span,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>,
inner_walk: F) where
F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
{
// Create a new lexical scope and push it onto the stack
let loc = span_start(cx, scope_span);
let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMRustDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None });
inner_walk(cx, scope_stack, scope_map);
// pop artificial scopes
while scope_stack.last().unwrap().name.is_some() {
scope_stack.pop();
}
if scope_stack.last().unwrap().scope_metadata != scope_metadata {
span_bug!(scope_span, "debuginfo: Inconsistency in scope management.");
}
scope_stack.pop();
}
struct ScopeStackEntry {
scope_metadata: DIScope,
name: Option<ast::Name>
}
fn walk_block(cx: &CrateContext,
block: &hir::Block,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
// The interesting things here are statements and the concluding expression.
for statement in &block.stmts {
scope_map.insert(statement.node.id(),
scope_stack.last().unwrap().scope_metadata);
match statement.node {
hir::StmtDecl(ref decl, _) =>
walk_decl(cx, &decl, scope_stack, scope_map),
hir::StmtExpr(ref exp, _) |
hir::StmtSemi(ref exp, _) =>
walk_expr(cx, &exp, scope_stack, scope_map),
}
}
if let Some(ref exp) = block.expr {
walk_expr(cx, &exp, scope_stack, scope_map);
}
}
fn walk_decl(cx: &CrateContext,
decl: &hir::Decl,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
match *decl {
codemap::Spanned { node: hir::DeclLocal(ref local), .. } => {
scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &local.pat, scope_stack, scope_map);
if let Some(ref exp) = local.init {
walk_expr(cx, &exp, scope_stack, scope_map);
}
}
_ => ()
}
}
fn walk_pattern(cx: &CrateContext,
pat: &hir::Pat,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
// order to put them into the scope map. The above functions don't do that.
match pat.node {
PatKind::Binding(_, ref path1, ref sub_pat_opt) => {
// LLVM does not properly generate 'DW_AT_start_scope' fields
// for variable DIEs. For this reason we have to introduce
// an artificial scope at bindings whenever a variable with
// the same name is declared in *any* parent scope.
//
// Otherwise the following error occurs:
//
// let x = 10;
//
// do_something(); // 'gdb print x' correctly prints 10
//
// {
// do_something(); // 'gdb print x' prints 0, because it
// // already reads the uninitialized 'x'
// // from the next line...
// let x = 100;
// do_something(); // 'gdb print x' correctly prints 100
// }
// Is there already a binding with that name?
// N.B.: this comparison must be UNhygienic... because
// gdb knows nothing about the context, so any two
// variables with the same name will cause the problem.
let name = path1.node;
let need_new_scope = scope_stack
.iter()
.any(|entry| entry.name == Some(name));
if need_new_scope {
// Create a new lexical scope and push it onto the stack
let loc = span_start(cx, pat.span);
let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path);
let parent_scope = scope_stack.last().unwrap().scope_metadata;
let scope_metadata = unsafe {
llvm::LLVMRustDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
scope_stack.push(ScopeStackEntry {
scope_metadata: scope_metadata,
name: Some(name)
});
} else {
// Push a new entry anyway so the name can be found
let prev_metadata = scope_stack.last().unwrap().scope_metadata;
scope_stack.push(ScopeStackEntry {
scope_metadata: prev_metadata,
name: Some(name)
});
}
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pat) = *sub_pat_opt {
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
}
PatKind::Wild => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
PatKind::TupleStruct(_, ref sub_pats, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for p in sub_pats {
walk_pattern(cx, &p, scope_stack, scope_map);
}
}
PatKind::Path(..) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
PatKind::Struct(_, ref field_pats, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for &codemap::Spanned {
node: hir::FieldPat { pat: ref sub_pat, .. },
..
} in field_pats {
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
}
PatKind::Tuple(ref sub_pats, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in sub_pats {
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
}
PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
PatKind::Lit(ref exp) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &exp, scope_stack, scope_map);
}
PatKind::Range(ref exp1, ref exp2) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &exp1, scope_stack, scope_map);
walk_expr(cx, &exp2, scope_stack, scope_map);
}
PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in front_sub_pats {
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
if let Some(ref sub_pat) = *middle_sub_pats {
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
for sub_pat in back_sub_pats {
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
}
}
}
fn walk_expr(cx: &CrateContext,
exp: &hir::Expr,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut NodeMap<DIScope>) {
scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
match exp.node {
hir::ExprLit(_) |
hir::ExprBreak(_) |
hir::ExprAgain(_) |
hir::ExprPath(..) => {}
hir::ExprCast(ref sub_exp, _) |
hir::ExprType(ref sub_exp, _) |
hir::ExprAddrOf(_, ref sub_exp) |
hir::ExprField(ref sub_exp, _) |
hir::ExprTupField(ref sub_exp, _) =>
walk_expr(cx, &sub_exp, scope_stack, scope_map),
hir::ExprBox(ref sub_expr) => {
walk_expr(cx, &sub_expr, scope_stack, scope_map);
}
hir::ExprRet(ref exp_opt) => match *exp_opt {
Some(ref sub_exp) => walk_expr(cx, &sub_exp, scope_stack, scope_map),
None => ()
},
hir::ExprUnary(_, ref sub_exp) => {
walk_expr(cx, &sub_exp, scope_stack, scope_map);
}
hir::ExprAssignOp(_, ref lhs, ref rhs) |
hir::ExprIndex(ref lhs, ref rhs) |
hir::ExprBinary(_, ref lhs, ref rhs) => {
walk_expr(cx, &lhs, scope_stack, scope_map);
walk_expr(cx, &rhs, scope_stack, scope_map);
}
hir::ExprVec(ref init_expressions) |
hir::ExprTup(ref init_expressions) => {
for ie in init_expressions {
walk_expr(cx, &ie, scope_stack, scope_map);
}
}
hir::ExprAssign(ref sub_exp1, ref sub_exp2) |
hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
walk_expr(cx, &sub_exp1, scope_stack, scope_map);
walk_expr(cx, &sub_exp2, scope_stack, scope_map);
}
hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
walk_expr(cx, &cond_exp, scope_stack, scope_map);
with_new_scope(cx,
then_block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &then_block, scope_stack, scope_map);
});
match *opt_else_exp {
Some(ref else_exp) =>
walk_expr(cx, &else_exp, scope_stack, scope_map),
_ => ()
}
}
hir::ExprWhile(ref cond_exp, ref loop_body, _) => {
walk_expr(cx, &cond_exp, scope_stack, scope_map);
with_new_scope(cx,
loop_body.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &loop_body, scope_stack, scope_map);
})
}
hir::ExprLoop(ref block, _) |
hir::ExprBlock(ref block) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
walk_block(cx, &block, scope_stack, scope_map);
})
}
hir::ExprClosure(_, ref decl, ref block, _) => {
with_new_scope(cx,
block.span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for &hir::Arg { pat: ref pattern, .. } in &decl.inputs {
walk_pattern(cx, &pattern, scope_stack, scope_map);
}
walk_block(cx, &block, scope_stack, scope_map);
})
}
hir::ExprCall(ref fn_exp, ref args) => {
walk_expr(cx, &fn_exp, scope_stack, scope_map);
for arg_exp in args {
walk_expr(cx, &arg_exp, scope_stack, scope_map);
}
}
hir::ExprMethodCall(_, _, ref args) => {
for arg_exp in args {
walk_expr(cx, &arg_exp, scope_stack, scope_map);
}
}
hir::ExprMatch(ref discriminant_exp, ref arms, _) => {
walk_expr(cx, &discriminant_exp, scope_stack, scope_map);
// For each arm we have to first walk the pattern as these might
// introduce new artificial scopes. It should be sufficient to
// walk only one pattern per arm, as they all must contain the
// same binding names.
for arm_ref in arms {
let arm_span = arm_ref.pats[0].span;
with_new_scope(cx,
arm_span,
scope_stack,
scope_map,
|cx, scope_stack, scope_map| {
for pat in &arm_ref.pats {
walk_pattern(cx, &pat, scope_stack, scope_map);
}
if let Some(ref guard_exp) = arm_ref.guard {
walk_expr(cx, &guard_exp, scope_stack, scope_map)
}
walk_expr(cx, &arm_ref.body, scope_stack, scope_map);
})
}
}
hir::ExprStruct(_, ref fields, ref base_exp) => {
for &hir::Field { expr: ref exp, .. } in fields {
walk_expr(cx, &exp, scope_stack, scope_map);
}
match *base_exp {
Some(ref exp) => walk_expr(cx, &exp, scope_stack, scope_map),
None => ()
}
}
hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
for output in outputs {
walk_expr(cx, output, scope_stack, scope_map);
}
for input in inputs {
walk_expr(cx, input, scope_stack, scope_map);
}
}
}
}

View File

@ -14,11 +14,10 @@ use self::MemberDescriptionFactory::*;
use self::EnumDiscriminantInfo::*;
use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of,
get_namespace_and_span_for_item, create_DIArray,
fn_should_be_ignored, is_node_local_to_unit};
get_namespace_and_span_for_item, create_DIArray, is_node_local_to_unit};
use super::namespace::mangled_name_of_item;
use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name};
use super::{declare_local, VariableKind, VariableAccess, CrateDebugContext};
use super::{CrateDebugContext};
use context::SharedCrateContext;
use session::Session;
@ -26,16 +25,13 @@ use llvm::{self, ValueRef};
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
use rustc::hir::def_id::DefId;
use rustc::hir::pat_util;
use rustc::ty::subst::Substs;
use rustc::hir::map as hir_map;
use rustc::hir::{self, PatKind};
use rustc::hir;
use {type_of, adt, machine, monomorphize};
use common::{self, CrateContext, FunctionContext, Block};
use _match::{BindingInfo, TransBindingMode};
use common::{CrateContext, FunctionContext};
use type_::Type;
use rustc::ty::{self, Ty};
use session::config::{self, FullDebugInfo};
use session::config;
use util::nodemap::FnvHashMap;
use util::common::path2cstr;
@ -1863,226 +1859,3 @@ pub fn create_global_var_metadata(cx: &CrateContext,
ptr::null_mut());
}
}
/// Creates debug information for the given local variable.
///
/// This function assumes that there's a datum for each pattern component of the
/// local in `bcx.fcx.lllocals`.
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let locals = bcx.fcx.lllocals.borrow();
pat_util::pat_bindings(&local.pat, |_, node_id, span, var_name| {
let datum = match locals.get(&node_id) {
Some(datum) => datum,
None => {
span_bug!(span,
"no entry in lllocals table for {}",
node_id);
}
};
if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
span_bug!(span, "debuginfo::create_local_var_metadata() - \
Referenced variable location is not an alloca!");
}
let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
declare_local(bcx,
var_name.node,
datum.ty,
scope_metadata,
VariableAccess::DirectVariable { alloca: datum.val },
VariableKind::LocalVariable,
span);
})
}
/// Creates debug information for a variable captured in a closure.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
node_id: ast::NodeId,
env_pointer: ValueRef,
env_index: usize,
captured_by_ref: bool,
span: Span) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let cx = bcx.ccx();
let ast_item = cx.tcx().map.find(node_id);
let variable_name = match ast_item {
None => {
span_bug!(span, "debuginfo::create_captured_var_metadata: node not found");
}
Some(hir_map::NodeLocal(pat)) => {
match pat.node {
PatKind::Binding(_, ref path1, _) => {
path1.node
}
_ => {
span_bug!(span,
"debuginfo::create_captured_var_metadata() - \
Captured var-id refers to unexpected \
hir_map variant: {:?}",
ast_item);
}
}
}
_ => {
span_bug!(span,
"debuginfo::create_captured_var_metadata() - \
Captured var-id refers to unexpected \
hir_map variant: {:?}",
ast_item);
}
};
let variable_type = common::node_id_type(bcx, node_id);
let scope_metadata = bcx.fcx.debug_context.get_ref(span).fn_metadata;
// env_pointer is the alloca containing the pointer to the environment,
// so it's type is **EnvironmentType. In order to find out the type of
// the environment we have to "dereference" two times.
let llvm_env_data_type = common::val_ty(env_pointer).element_type()
.element_type();
let byte_offset_of_var_in_env = machine::llelement_offset(cx,
llvm_env_data_type,
env_index);
let address_operations = unsafe {
[llvm::LLVMRustDIBuilderCreateOpDeref(),
llvm::LLVMRustDIBuilderCreateOpPlus(),
byte_offset_of_var_in_env as i64,
llvm::LLVMRustDIBuilderCreateOpDeref()]
};
let address_op_count = if captured_by_ref {
address_operations.len()
} else {
address_operations.len() - 1
};
let variable_access = VariableAccess::IndirectVariable {
alloca: env_pointer,
address_operations: &address_operations[..address_op_count]
};
declare_local(bcx,
variable_name,
variable_type,
scope_metadata,
variable_access,
VariableKind::CapturedVariable,
span);
}
/// Creates debug information for a local variable introduced in the head of a
/// match-statement arm.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
variable_name: ast::Name,
binding: BindingInfo<'tcx>) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
let aops = unsafe {
[llvm::LLVMRustDIBuilderCreateOpDeref()]
};
// Regardless of the actual type (`T`) we're always passed the stack slot
// (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove
// bindings we actually have `T**`. So to get the actual variable we need to
// dereference once more. For ByCopy we just use the stack slot we created
// for the binding.
let var_access = match binding.trmode {
TransBindingMode::TrByCopy(llbinding) |
TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable {
alloca: llbinding
},
TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable {
alloca: binding.llmatch,
address_operations: &aops
},
TransBindingMode::TrByRef => VariableAccess::DirectVariable {
alloca: binding.llmatch
}
};
declare_local(bcx,
variable_name,
binding.ty,
scope_metadata,
var_access,
VariableKind::LocalVariable,
binding.span);
}
/// Creates debug information for the given function argument.
///
/// This function assumes that there's a datum for each pattern component of the
/// argument in `bcx.fcx.lllocals`.
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) {
if bcx.unreachable.get() ||
fn_should_be_ignored(bcx.fcx) ||
bcx.sess().opts.debuginfo != FullDebugInfo {
return;
}
let scope_metadata = bcx
.fcx
.debug_context
.get_ref(arg.pat.span)
.fn_metadata;
let locals = bcx.fcx.lllocals.borrow();
pat_util::pat_bindings(&arg.pat, |_, node_id, span, var_name| {
let datum = match locals.get(&node_id) {
Some(v) => v,
None => {
span_bug!(span, "no entry in lllocals table for {}", node_id);
}
};
if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
span_bug!(span, "debuginfo::create_argument_metadata() - \
Referenced variable location is not an alloca!");
}
let argument_index = {
let counter = &bcx
.fcx
.debug_context
.get_ref(span)
.argument_counter;
let argument_index = counter.get();
counter.set(argument_index + 1);
argument_index
};
declare_local(bcx,
var_name.node,
datum.ty,
scope_metadata,
VariableAccess::DirectVariable { alloca: datum.val },
VariableKind::ArgumentVariable(argument_index),
span);
})
}

View File

@ -56,13 +56,7 @@ mod source_loc;
pub use self::create_scope_map::create_mir_scopes;
pub use self::source_loc::start_emitting_source_locations;
pub use self::source_loc::get_cleanup_debug_loc_for_ast_node;
pub use self::source_loc::with_source_location_override;
pub use self::metadata::create_match_binding_metadata;
pub use self::metadata::create_argument_metadata;
pub use self::metadata::create_captured_var_metadata;
pub use self::metadata::create_global_var_metadata;
pub use self::metadata::create_local_var_metadata;
#[allow(non_upper_case_globals)]
const DW_TAG_auto_variable: c_uint = 0x100;
@ -142,7 +136,6 @@ impl FunctionDebugContext {
pub struct FunctionDebugContextData {
scope_map: RefCell<NodeMap<DIScope>>,
fn_metadata: DISubprogram,
argument_counter: Cell<usize>,
source_locations_enabled: Cell<bool>,
source_location_override: Cell<bool>,
}
@ -307,7 +300,6 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let fn_debug_context = box FunctionDebugContextData {
scope_map: RefCell::new(NodeMap()),
fn_metadata: fn_metadata,
argument_counter: Cell::new(1),
source_locations_enabled: Cell::new(false),
source_location_override: Cell::new(false),
};
@ -455,25 +447,6 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
}
}
/// Computes the scope map for a function given its declaration and body.
pub fn fill_scope_map_for_function<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
fn_decl: &hir::FnDecl,
top_level_block: &hir::Block,
fn_ast_id: ast::NodeId) {
match fcx.debug_context {
FunctionDebugContext::RegularContext(box ref data) => {
let scope_map = create_scope_map::create_scope_map(fcx.ccx,
&fn_decl.inputs,
top_level_block,
data.fn_metadata,
fn_ast_id);
*data.scope_map.borrow_mut() = scope_map;
}
FunctionDebugContext::DebugInfoDisabled |
FunctionDebugContext::FunctionWithoutDebugInfo => {}
}
}
pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
variable_name: ast::Name,
variable_type: Ty<'tcx>,

View File

@ -17,73 +17,11 @@ use super::{FunctionDebugContext, DebugLoc};
use llvm;
use llvm::debuginfo::DIScope;
use builder::Builder;
use common::{NodeIdAndSpan, CrateContext, FunctionContext};
use common::{CrateContext, FunctionContext};
use libc::c_uint;
use std::ptr;
use syntax_pos::{self, Span, Pos};
use syntax::ast;
pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
node_id: ast::NodeId,
node_span: Span,
is_block: bool)
-> NodeIdAndSpan {
// A debug location needs two things:
// (1) A span (of which only the beginning will actually be used)
// (2) An AST node-id which will be used to look up the lexical scope
// for the location in the functions scope-map
//
// This function will calculate the debug location for compiler-generated
// cleanup calls that are executed when control-flow leaves the
// scope identified by `node_id`.
//
// For everything but block-like things we can simply take id and span of
// the given expression, meaning that from a debugger's view cleanup code is
// executed at the same source location as the statement/expr itself.
//
// Blocks are a special case. Here we want the cleanup to be linked to the
// closing curly brace of the block. The *scope* the cleanup is executed in
// is up to debate: It could either still be *within* the block being
// cleaned up, meaning that locals from the block are still visible in the
// debugger.
// Or it could be in the scope that the block is contained in, so any locals
// from within the block are already considered out-of-scope and thus not
// accessible in the debugger anymore.
//
// The current implementation opts for the second option: cleanup of a block
// already happens in the parent scope of the block. The main reason for
// this decision is that scoping becomes controlflow dependent when variable
// shadowing is involved and it's impossible to decide statically which
// scope is actually left when the cleanup code is executed.
// In practice it shouldn't make much of a difference.
let mut cleanup_span = node_span;
if is_block {
// Not all blocks actually have curly braces (e.g. simple closure
// bodies), in which case we also just want to return the span of the
// whole expression.
let code_snippet = cx.sess().codemap().span_to_snippet(node_span);
if let Ok(code_snippet) = code_snippet {
let bytes = code_snippet.as_bytes();
if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" {
cleanup_span = Span {
lo: node_span.hi - syntax_pos::BytePos(1),
hi: node_span.hi,
expn_id: node_span.expn_id
};
}
}
}
NodeIdAndSpan {
id: node_id,
span: cleanup_span
}
}
use syntax_pos::Pos;
/// Sets the current debug location at the beginning of the span.
///
@ -129,35 +67,6 @@ pub fn set_source_location(fcx: &FunctionContext,
set_debug_location(fcx.ccx, builder, dbg_loc);
}
/// This function makes sure that all debug locations emitted while executing
/// `wrapped_function` are set to the given `debug_loc`.
pub fn with_source_location_override<F, R>(fcx: &FunctionContext,
debug_loc: DebugLoc,
wrapped_function: F) -> R
where F: FnOnce() -> R
{
match fcx.debug_context {
FunctionDebugContext::DebugInfoDisabled => {
wrapped_function()
}
FunctionDebugContext::FunctionWithoutDebugInfo => {
set_debug_location(fcx.ccx, None, UnknownLocation);
wrapped_function()
}
FunctionDebugContext::RegularContext(box ref function_debug_context) => {
if function_debug_context.source_location_override.get() {
wrapped_function()
} else {
debug_loc.apply(fcx);
function_debug_context.source_location_override.set(true);
let result = wrapped_function();
function_debug_context.source_location_override.set(false);
result
}
}
}
}
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,

View File

@ -10,7 +10,7 @@
// Utility Functions.
use super::{FunctionDebugContext, CrateDebugContext};
use super::{CrateDebugContext};
use super::namespace::item_namespace;
use rustc::hir::def_id::DefId;
@ -18,7 +18,7 @@ use rustc::hir::def_id::DefId;
use llvm;
use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
use machine;
use common::{CrateContext, FunctionContext};
use common::{CrateContext};
use type_::Type;
use syntax_pos::{self, Span};
@ -70,13 +70,6 @@ pub fn DIB(cx: &CrateContext) -> DIBuilderRef {
cx.dbg_cx().as_ref().unwrap().builder
}
pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
match fcx.debug_context {
FunctionDebugContext::RegularContext(_) => false,
_ => true
}
}
pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId)
-> (DIScope, Span) {
let containing_scope = item_namespace(cx, DefId {

File diff suppressed because it is too large Load Diff

View File

@ -21,15 +21,11 @@ use rustc::ty::subst::{Substs};
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use adt;
use adt::GetDtorType; // for tcx.dtor_type()
use base::*;
use build::*;
use callee::{Callee, ArgVals};
use cleanup;
use cleanup::CleanupMethods;
use callee::{Callee};
use common::*;
use debuginfo::DebugLoc;
use expr;
use machine::*;
use monomorphize;
use trans_item::TransItem;
@ -51,7 +47,7 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx()))
.call(bcx, debug_loc, ArgVals(&args), None).bcx
.call(bcx, debug_loc, &args, None).bcx
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
@ -133,20 +129,18 @@ pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false, None)
drop_ty_core(bcx, v, t, debug_loc, false)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool,
drop_hint: Option<cleanup::DropHintValue>)
skip_dtor: bool)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor);
let _icx = push_ctxt("drop_ty");
let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
@ -162,23 +156,8 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v
};
match drop_hint {
Some(drop_hint) => {
let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
let moved_val =
C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
let may_need_drop =
ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
bcx = with_cond(bcx, may_need_drop, |cx| {
Call(cx, glue, &[ptr], debug_loc);
cx
})
}
None => {
// No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], debug_loc);
}
}
// No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], debug_loc);
}
bcx
}
@ -193,7 +172,7 @@ pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let vp = alloc_ty(bcx, t, "");
call_lifetime_start(bcx, vp);
store_ty(bcx, v, vp, t);
let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor);
call_lifetime_end(bcx, vp);
bcx
}
@ -273,7 +252,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena);
let bcx = fcx.init(false, None);
let bcx = fcx.init(false);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
@ -288,40 +267,6 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fcx.finish(bcx, DebugLoc::None);
}
fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
struct_data: ValueRef)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
let repr = adt::represent_type(bcx.ccx(), t);
let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data));
let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
bcx
} else {
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
let drop_flag_neither_initialized_nor_cleared =
And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], DebugLoc::None);
cx
})
};
let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
with_cond(bcx, drop_flag_dtor_needed, |cx| {
trans_struct_drop(cx, t, struct_data)
})
}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef)
@ -343,14 +288,17 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t);
let (sized_args, unsized_args);
let args: &[ValueRef] = if type_is_sized(tcx, t) {
sized_args = [v0];
&sized_args
} else {
unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
unsized_args = [
Load(bcx, get_dataptr(bcx, v0)),
Load(bcx, get_meta(bcx, v0))
];
&unsized_args
};
@ -364,7 +312,7 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
};
let dtor_did = def.destructor().unwrap();
bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
.call(bcx, DebugLoc::None, ArgVals(args), None).bcx;
.call(bcx, DebugLoc::None, args, None).bcx;
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
@ -492,9 +440,6 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
let inttype = Type::int(bcx.ccx());
let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false);
match t.sty {
ty::TyBox(content_ty) => {
// Support for TyBox is built-in and its drop glue is
@ -502,59 +447,33 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
let llval = expr::get_dataptr(bcx, v0);
let llval = get_dataptr(bcx, v0);
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = expr::get_meta(bcx, v0);
let info = Load(bcx, info);
let (llsize, llalign) =
size_and_align_of_dst(&bcx.build(), content_ty, info);
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = get_meta(bcx, v0);
let info = Load(bcx, info);
let (llsize, llalign) =
size_and_align_of_dst(&bcx.build(), content_ty, info);
// `Box<ZeroSizeType>` does not allocate.
let needs_free = ICmp(bcx,
llvm::IntNE,
llsize,
C_uint(bcx.ccx(), 0u64),
DebugLoc::None);
with_cond(bcx, needs_free, |bcx| {
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
})
// `Box<ZeroSizeType>` does not allocate.
let needs_free = ICmp(bcx,
llvm::IntNE,
llsize,
C_uint(bcx.ccx(), 0u64),
DebugLoc::None);
with_cond(bcx, needs_free, |bcx| {
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
})
} else {
let llval = v0;
let llbox = Load(bcx, llval);
let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
let drop_flag_not_dropped_already =
ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
})
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
}
}
ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
match (def.dtor_kind(), skip_dtor) {
(ty::TraitDtor(true), false) => {
// FIXME(16758) Since the struct is unsized, it is hard to
// find the drop flag (which is at the end of the struct).
// Lets just ignore the flag and pretend everything will be
// OK.
if type_is_sized(bcx.tcx(), t) {
trans_struct_drop_flag(bcx, t, v0)
} else {
// Give the user a heads up that we are doing something
// stupid and dangerous.
bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \
because the struct is unsized. See issue \
#16758", t));
trans_struct_drop(bcx, t, v0)
}
}
(ty::TraitDtor(false), false) => {
(ty::TraitDtor(_), false) => {
trans_struct_drop(bcx, t, v0)
}
(ty::NoDtor, _) | (_, true) => {
@ -568,8 +487,8 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let data_ptr = expr::get_dataptr(bcx, v0);
let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
let data_ptr = get_dataptr(bcx, v0);
let vtable_ptr = Load(bcx, get_meta(bcx, v0));
let dtor = Load(bcx, vtable_ptr);
Call(bcx,
dtor,

View File

@ -14,21 +14,14 @@ use arena::TypedArena;
use intrinsics::{self, Intrinsic};
use libc;
use llvm;
use llvm::{ValueRef, TypeKind};
use rustc::ty::subst::Substs;
use llvm::{ValueRef};
use abi::{Abi, FnType};
use adt;
use base::*;
use build::*;
use callee::{self, Callee};
use cleanup;
use cleanup::CleanupMethods;
use common::*;
use consts;
use datum::*;
use debuginfo::DebugLoc;
use declare;
use expr;
use glue;
use type_of;
use machine;
@ -37,11 +30,9 @@ use rustc::ty::{self, Ty};
use Disr;
use rustc::hir;
use syntax::ast;
use syntax::ptr::P;
use syntax::parse::token;
use rustc::session::Session;
use rustc_const_eval::fatal_const_eval_err;
use syntax_pos::{Span, DUMMY_SP};
use std::cmp::Ordering;
@ -98,8 +89,8 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
args: callee::CallArgs<'a, 'tcx>,
dest: expr::Dest,
llargs: &[ValueRef],
llresult: ValueRef,
call_debug_location: DebugLoc)
-> Result<'blk, 'tcx> {
let fcx = bcx.fcx;
@ -127,210 +118,19 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
};
let cleanup_scope = fcx.push_custom_cleanup_scope();
// For `transmute` we can just trans the input expr directly into dest
if name == "transmute" {
let llret_ty = type_of::type_of(ccx, ret_ty);
match args {
callee::ArgExprs(arg_exprs) => {
assert_eq!(arg_exprs.len(), 1);
let (in_type, out_type) = (substs.types[0],
substs.types[1]);
let llintype = type_of::type_of(ccx, in_type);
let llouttype = type_of::type_of(ccx, out_type);
let in_type_size = machine::llbitsize_of_real(ccx, llintype);
let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
let llfnty = val_ty(llfn);
let llresult = match dest {
expr::SaveIn(d) => d,
expr::Ignore => alloc_ty(bcx, out_type, "ret")
};
Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
if dest == expr::Ignore {
bcx = glue::drop_ty(bcx, llresult, out_type,
call_debug_location);
}
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
return Result::new(bcx, llresult);
}
}
// This should be caught by the intrinsicck pass
assert_eq!(in_type_size, out_type_size);
let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
use llvm::TypeKind::*;
match llkind {
Half | Float | Double | X86_FP80 | FP128 |
PPC_FP128 | Integer | Vector | X86_MMX => true,
_ => false
}
};
// An approximation to which types can be directly cast via
// LLVM's bitcast. This doesn't cover pointer -> pointer casts,
// but does, importantly, cover SIMD types.
let in_kind = llintype.kind();
let ret_kind = llret_ty.kind();
let bitcast_compatible =
(nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
};
let dest = if bitcast_compatible {
// if we're here, the type is scalar-like (a primitive, a
// SIMD type or a pointer), and so can be handled as a
// by-value ValueRef and can also be directly bitcast to the
// target type. Doing this special case makes conversions
// like `u32x4` -> `u64x2` much nicer for LLVM and so more
// efficient (these are done efficiently implicitly in C
// with the `__m128i` type and so this means Rust doesn't
// lose out there).
let expr = &arg_exprs[0];
let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
let val = if datum.kind.is_by_ref() {
load_ty(bcx, datum.val, datum.ty)
} else {
from_immediate(bcx, datum.val)
};
let cast_val = BitCast(bcx, val, llret_ty);
match dest {
expr::SaveIn(d) => {
// this often occurs in a sequence like `Store(val,
// d); val2 = Load(d)`, so disappears easily.
Store(bcx, cast_val, d);
}
expr::Ignore => {}
}
dest
} else {
// The types are too complicated to do with a by-value
// bitcast, so pointer cast instead. We need to cast the
// dest so the types work out.
let dest = match dest {
expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
expr::Ignore => expr::Ignore
};
bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
dest
};
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
return match dest {
expr::SaveIn(d) => Result::new(bcx, d),
expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
};
}
_ => {
bug!("expected expr as argument for transmute");
}
}
}
// For `move_val_init` we can evaluate the destination address
// (the first argument) and then trans the source value (the
// second argument) directly into the resulting destination
// address.
if name == "move_val_init" {
if let callee::ArgExprs(ref exprs) = args {
let (dest_expr, source_expr) = if exprs.len() != 2 {
bug!("expected two exprs as arguments for `move_val_init` intrinsic");
} else {
(&exprs[0], &exprs[1])
};
// evaluate destination address
let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
let dest_datum = unpack_datum!(
bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
let dest_datum = unpack_datum!(
bcx, dest_datum.to_appropriate_datum(bcx));
// `expr::trans_into(bcx, expr, dest)` is equiv to
//
// `trans(bcx, expr).store_to_dest(dest)`,
//
// which for `dest == expr::SaveIn(addr)`, is equivalent to:
//
// `trans(bcx, expr).store_to(bcx, addr)`.
let lldest = expr::Dest::SaveIn(dest_datum.val);
bcx = expr::trans_into(bcx, source_expr, lldest);
let llresult = C_nil(ccx);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
return Result::new(bcx, llresult);
} else {
bug!("expected two exprs as arguments for `move_val_init` intrinsic");
}
}
// save the actual AST arguments for later (some places need to do
// const-evaluation on them)
let expr_arguments = match args {
callee::ArgExprs(args) => Some(args),
_ => None,
};
// Push the arguments.
let mut llargs = Vec::new();
bcx = callee::trans_args(bcx,
Abi::RustIntrinsic,
fn_ty,
&mut callee::Intrinsic,
args,
&mut llargs,
cleanup::CustomScope(cleanup_scope));
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
// These are the only intrinsic functions that diverge.
if name == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap"));
Call(bcx, llfn, &[], call_debug_location);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
} else if &name[..] == "unreachable" {
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_nil(ccx));
}
let llret_ty = type_of::type_of(ccx, ret_ty);
// Get location to store the result. If the user does
// not care about the result, just make a stack slot
let llresult = match dest {
expr::SaveIn(d) => d,
expr::Ignore => {
if !type_is_zero_size(ccx, ret_ty) {
let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
call_lifetime_start(bcx, llresult);
llresult
} else {
C_undef(llret_ty.ptr_to())
}
}
};
let simple = get_simple_intrinsic(ccx, &name);
let llval = match (simple, &name[..]) {
(Some(llfn), _) => {
@ -382,16 +182,20 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
(_, "drop_in_place") => {
let tp_ty = substs.types[0];
let ptr = if type_is_sized(tcx, tp_ty) {
let is_sized = type_is_sized(tcx, tp_ty);
let ptr = if is_sized {
llargs[0]
} else {
let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
scratch.val
let scratch = alloc_ty(bcx, tp_ty, "drop");
call_lifetime_start(bcx, scratch);
Store(bcx, llargs[0], get_dataptr(bcx, scratch));
Store(bcx, llargs[1], get_meta(bcx, scratch));
scratch
};
glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
if !is_sized {
call_lifetime_end(bcx, ptr);
}
C_nil(ccx)
}
(_, "type_name") => {
@ -403,11 +207,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
C_u64(ccx, ccx.tcx().type_id_hash(substs.types[0]))
}
(_, "init_dropped") => {
let tp_ty = substs.types[0];
if !type_is_zero_size(ccx, tp_ty) {
drop_done_fill_mem(bcx, llresult, tp_ty);
}
C_nil(ccx)
span_bug!(span, "init_dropped intrinsic unsupported");
}
(_, "init") => {
let tp_ty = substs.types[0];
@ -511,8 +311,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, "volatile_store") => {
let tp_ty = substs.types[0];
if type_is_fat_ptr(bcx.tcx(), tp_ty) {
VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0]));
VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0]));
} else {
let val = if fn_ty.args[1].is_indirect() {
Load(bcx, llargs[1])
@ -621,9 +421,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
(_, name) if name.starts_with("simd_") => {
generic_simd_intrinsic(bcx, name,
substs,
callee_ty,
expr_arguments,
&llargs,
ret_ty, llret_ty,
call_debug_location,
@ -868,13 +666,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let llargs = if !any_changes_needed {
// no aggregates to flatten, so no change needed
llargs
llargs.to_vec()
} else {
// there are some aggregates that need to be flattened
// in the LLVM call, so we need to run over the types
// again to find them and extract the arguments
intr.inputs.iter()
.zip(&llargs)
.zip(llargs)
.zip(&arg_tys)
.flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
.collect()
@ -919,17 +717,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
}
// If we made a temporary stack slot, let's clean it up
match dest {
expr::Ignore => {
bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
call_lifetime_end(bcx, llresult);
}
expr::SaveIn(_) => {}
}
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Result::new(bcx, llresult)
}
@ -1064,10 +851,10 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
SetPersonalityFn(bcx, bcx.fcx.eh_personality());
let normal = bcx.fcx.new_temp_block("normal");
let catchswitch = bcx.fcx.new_temp_block("catchswitch");
let catchpad = bcx.fcx.new_temp_block("catchpad");
let caught = bcx.fcx.new_temp_block("caught");
let normal = bcx.fcx.new_block("normal");
let catchswitch = bcx.fcx.new_block("catchswitch");
let catchpad = bcx.fcx.new_block("catchpad");
let caught = bcx.fcx.new_block("caught");
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
@ -1123,7 +910,7 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let tcx = ccx.tcx();
let tydesc = match tcx.lang_items.msvc_try_filter() {
Some(did) => ::consts::get_static(ccx, did).to_llref(),
Some(did) => ::consts::get_static(ccx, did),
None => bug!("msvc_try_filter not defined"),
};
let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
@ -1184,8 +971,8 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
let then = bcx.fcx.new_temp_block("then");
let catch = bcx.fcx.new_temp_block("catch");
let then = bcx.fcx.new_block("then");
let catch = bcx.fcx.new_block("catch");
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
@ -1240,8 +1027,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
let bcx = fcx.init(true, None);
trans(bcx);
trans(fcx.init(true));
fcx.cleanup();
llfn
}
@ -1283,9 +1069,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
fn generic_simd_intrinsic<'blk, 'tcx, 'a>
(bcx: Block<'blk, 'tcx>,
name: &str,
substs: &'tcx Substs<'tcx>,
callee_ty: Ty<'tcx>,
args: Option<&[P<hir::Expr>]>,
llargs: &[ValueRef],
ret_ty: Ty<'tcx>,
llret_ty: Type,
@ -1386,20 +1170,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>
let total_len = in_len as u64 * 2;
let vector = match args {
Some(args) => {
match consts::const_expr(bcx.ccx(), &args[2], substs, None,
// this should probably help simd error reporting
consts::TrueConst::Yes) {
Ok((vector, _)) => vector,
Err(err) => {
fatal_const_eval_err(bcx.tcx(), err.as_inner(), span,
"shuffle indices");
}
}
}
None => llargs[2]
};
let vector = llargs[2];
let indices: Option<Vec<_>> = (0..n)
.map(|i| {

View File

@ -110,17 +110,13 @@ mod collector;
mod common;
mod consts;
mod context;
mod controlflow;
mod datum;
mod debuginfo;
mod declare;
mod disr;
mod expr;
mod glue;
mod inline;
mod intrinsic;
mod machine;
mod _match;
mod meth;
mod mir;
mod monomorphize;

View File

@ -20,13 +20,12 @@ use rustc::traits::{self, Reveal};
use abi::FnType;
use base::*;
use build::*;
use callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim};
use callee::{Callee, Virtual, trans_fn_pointer_shim};
use closure;
use common::*;
use consts;
use debuginfo::DebugLoc;
use declare;
use expr;
use glue;
use machine;
use type_::Type;
@ -96,25 +95,21 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
let mut bcx = fcx.init(false, None);
assert!(!fcx.needs_ret_allocas);
let mut bcx = fcx.init(false);
let dest =
fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
let dest = fcx.llretslotptr.get();
debug!("trans_object_shim: method_offset_in_vtable={}",
vtable_index);
let llargs = get_params(fcx.llfn);
let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]);
let callee = Callee {
data: Virtual(vtable_index),
ty: method_ty
};
bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx;
bcx = callee.call(bcx, DebugLoc::None,
&llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx;
fcx.finish(bcx, DebugLoc::None);
@ -160,7 +155,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
get_vtable_methods(tcx, id, substs)
.into_iter()
.map(|opt_mth| opt_mth.map_or(nullptr, |mth| {
Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val
Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx)
}))
.collect::<Vec<_>>()
.into_iter()

View File

@ -9,7 +9,7 @@
// except according to those terms.
use llvm::{self, ValueRef};
use rustc_const_eval::ErrKind;
use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err};
use rustc::middle::lang_items;
use rustc::ty;
use rustc::mir::repr as mir;
@ -78,7 +78,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
let trampoline = this.fcx.new_block(name, None).build();
let trampoline = this.fcx.new_block(name).build();
trampoline.set_personality_fn(this.fcx.eh_personality());
trampoline.cleanup_ret(cp, Some(lltarget));
trampoline.llbb()
@ -291,7 +291,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target);
let panic_block = self.fcx.new_block("panic", None);
let panic_block = self.fcx.new_block("panic");
if expected {
bcx.cond_br(cond, lltarget, panic_block.llbb);
} else {
@ -354,9 +354,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// is also constant, then we can produce a warning.
if const_cond == Some(!expected) {
if let Some(err) = const_err {
let _ = consts::const_err(bcx.ccx(), span,
Err::<(), _>(err),
consts::TrueConst::No);
let err = ConstEvalErr{ span: span, kind: err };
let mut diag = bcx.tcx().sess.struct_span_warn(
span, "this expression will panic at run-time");
note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag);
diag.emit();
}
}
@ -364,7 +366,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx(), def_id,
bcx.ccx().empty_substs_for_def_id(def_id));
let llfn = callee.reify(bcx.ccx()).val;
let llfn = callee.reify(bcx.ccx());
// Translate the actual panic invoke/call.
if let Some(unwind) = cleanup {
@ -497,28 +499,27 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let fn_ptr = match callee.data {
NamedTupleConstructor(_) => {
// FIXME translate this like mir::Rvalue::Aggregate.
callee.reify(bcx.ccx()).val
callee.reify(bcx.ccx())
}
Intrinsic => {
use callee::ArgVals;
use expr::{Ignore, SaveIn};
use intrinsic::trans_intrinsic_call;
let (dest, llargs) = match ret_dest {
_ if fn_ty.ret.is_indirect() => {
(SaveIn(llargs[0]), &llargs[1..])
(llargs[0], &llargs[1..])
}
ReturnDest::Nothing => {
(C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..])
}
ReturnDest::Nothing => (Ignore, &llargs[..]),
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => (SaveIn(dst), &llargs[..]),
ReturnDest::Store(dst) => (dst, &llargs[..]),
ReturnDest::DirectOperand(_) =>
bug!("Cannot use direct operand with an intrinsic call")
};
bcx.with_block(|bcx| {
trans_intrinsic_call(bcx, callee.ty, &fn_ty,
ArgVals(llargs), dest,
debug_loc);
&llargs, dest, debug_loc);
});
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
@ -766,7 +767,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let target = self.bcx(target_bb);
let block = self.fcx.new_block("cleanup", None);
let block = self.fcx.new_block("cleanup");
self.landing_pads[target_bb] = Some(block);
let bcx = block.build();
@ -809,7 +810,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
self.unreachable_block.unwrap_or_else(|| {
let bl = self.fcx.new_block("unreachable", None);
let bl = self.fcx.new_block("unreachable");
bl.build().unreachable();
self.unreachable_block = Some(bl);
bl
@ -878,10 +879,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx(), def_id, substs);
let datum = f.reify(bcx.ccx());
let ty = match f.ty.sty {
ty::TyFnDef(_, _, f) => bcx.tcx().mk_fn_ptr(f),
_ => f.ty
};
val = OperandRef {
val: Immediate(datum.val),
ty: datum.ty
val: Immediate(f.reify(bcx.ccx())),
ty: ty
};
}
}

View File

@ -10,10 +10,10 @@
use llvm::{self, ValueRef};
use rustc::middle::const_val::ConstVal;
use rustc_const_eval::ErrKind;
use rustc_const_eval::{ErrKind, ConstEvalErr, report_const_eval_err};
use rustc_const_math::ConstInt::*;
use rustc_const_math::ConstFloat::*;
use rustc_const_math::ConstMathErr;
use rustc_const_math::{ConstInt, ConstIsize, ConstUsize, ConstMathErr};
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::mir::repr as mir;
@ -28,12 +28,14 @@ use callee::Callee;
use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty};
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral};
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint};
use consts::{self, ConstEvalFailure, TrueConst, to_const_int};
use common::{const_to_opt_int, const_to_opt_uint};
use consts;
use monomorphize::{self, Instance};
use type_of;
use type_::Type;
use value::Value;
use syntax::ast;
use syntax_pos::{Span, DUMMY_SP};
use std::ptr;
@ -230,7 +232,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
mut instance: Instance<'tcx>,
args: IndexVec<mir::Arg, Const<'tcx>>)
-> Result<Const<'tcx>, ConstEvalFailure> {
-> Result<Const<'tcx>, ConstEvalErr> {
// Try to resolve associated constants.
if let Some(trait_id) = ccx.tcx().trait_of_item(instance.def) {
let trait_ref = ty::TraitRef::new(trait_id, instance.substs);
@ -261,7 +263,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
value)
}
fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> {
fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
let mut bb = mir::START_BLOCK;
@ -320,10 +322,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
ErrKind::Math(err.clone())
}
};
match consts::const_err(self.ccx, span, Err(err), TrueConst::Yes) {
Ok(()) => {}
Err(err) => if failure.is_ok() { failure = Err(err); }
}
let err = ConstEvalErr{ span: span, kind: err };
report_const_eval_err(tcx, &err, span, "expression").emit();
failure = Err(err);
}
target
}
@ -370,7 +372,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
-> Result<ConstLvalue<'tcx>, ConstEvalFailure> {
-> Result<ConstLvalue<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
if let Some(index) = self.mir.local_index(lvalue) {
@ -386,7 +388,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
ConstLvalue {
base: Base::Static(consts::get_static(self.ccx, def_id).val),
base: Base::Static(consts::get_static(self.ccx, def_id)),
llextra: ptr::null_mut(),
ty: lvalue.ty(self.mir, tcx).to_ty(tcx)
}
@ -411,11 +413,18 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
} else if let ty::TyStr = projected_ty.sty {
(Base::Str(base), extra)
} else {
let val = consts::load_const(self.ccx, base, projected_ty);
let v = base;
let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v);
let mut val = unsafe { llvm::LLVMGetInitializer(v) };
if val.is_null() {
span_bug!(span, "dereference of non-constant pointer `{:?}`",
Value(base));
}
if projected_ty.is_bool() {
unsafe {
val = llvm::LLVMConstTrunc(val, Type::i1(self.ccx).to_ref());
}
}
(Base::Value(val), extra)
}
}
@ -462,7 +471,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
-> Result<Const<'tcx>, ConstEvalFailure> {
-> Result<Const<'tcx>, ConstEvalErr> {
match *operand {
mir::Operand::Consume(ref lvalue) => {
Ok(self.const_lvalue(lvalue, span)?.to_const(span))
@ -497,7 +506,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
dest_ty: Ty<'tcx>, span: Span)
-> Result<Const<'tcx>, ConstEvalFailure> {
-> Result<Const<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
let val = match *rvalue {
mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
@ -565,7 +574,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
match operand.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
Callee::def(self.ccx, def_id, substs)
.reify(self.ccx).val
.reify(self.ccx)
}
_ => {
span_bug!(span, "{} cannot be reified to a fn ptr",
@ -782,6 +791,54 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
match t.sty {
ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type {
ast::IntTy::I8 => {
assert_eq!(input as i8 as i64, input);
Some(ConstInt::I8(input as i8))
},
ast::IntTy::I16 => {
assert_eq!(input as i16 as i64, input);
Some(ConstInt::I16(input as i16))
},
ast::IntTy::I32 => {
assert_eq!(input as i32 as i64, input);
Some(ConstInt::I32(input as i32))
},
ast::IntTy::I64 => {
Some(ConstInt::I64(input))
},
ast::IntTy::Is => {
ConstIsize::new(input, tcx.sess.target.int_type)
.ok().map(ConstInt::Isize)
},
}),
ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type {
ast::UintTy::U8 => {
assert_eq!(input as u8 as u64, input);
Some(ConstInt::U8(input as u8))
},
ast::UintTy::U16 => {
assert_eq!(input as u16 as u64, input);
Some(ConstInt::U16(input as u16))
},
ast::UintTy::U32 => {
assert_eq!(input as u32 as u64, input);
Some(ConstInt::U32(input as u32))
},
ast::UintTy::U64 => {
Some(ConstInt::U64(input))
},
ast::UintTy::Us => {
ConstUsize::new(input, tcx.sess.target.uint_type)
.ok().map(ConstInt::Usize)
},
}),
_ => None,
}
}
pub fn const_scalar_binop(op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
@ -902,25 +959,17 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
};
match result {
Ok(v) => v,
Err(ConstEvalFailure::Compiletime(_)) => {
// We've errored, so we don't have to produce working code.
let llty = type_of::type_of(bcx.ccx(), ty);
Const::new(C_undef(llty), ty)
}
Err(ConstEvalFailure::Runtime(err)) => {
span_bug!(constant.span,
"MIR constant {:?} results in runtime panic: {:?}",
constant, err.description())
}
}
result.unwrap_or_else(|_| {
// We've errored, so we don't have to produce working code.
let llty = type_of::type_of(bcx.ccx(), ty);
Const::new(C_undef(llty), ty)
})
}
}
pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId)
-> Result<ValueRef, ConstEvalFailure> {
-> Result<ValueRef, ConstEvalErr> {
let instance = Instance::mono(ccx.shared(), def_id);
MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval)
}

View File

@ -109,7 +109,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
let const_ty = self.monomorphized_lvalue_ty(lvalue);
LvalueRef::new_sized(consts::get_static(ccx, def_id).val,
LvalueRef::new_sized(consts::get_static(ccx, def_id),
LvalueTy::from_ty(const_ty))
},
mir::Lvalue::Projection(box mir::Projection {

View File

@ -145,7 +145,7 @@ impl<'tcx> LocalRef<'tcx> {
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let bcx = fcx.init(true, None).build();
let bcx = fcx.init(true).build();
let mir = bcx.mir();
// Analyze the temps to determine which must be lvalues
@ -207,9 +207,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> =
mir.basic_blocks().indices().map(|bb| {
if bb == mir::START_BLOCK {
fcx.new_block("start", None)
fcx.new_block("start")
} else {
fcx.new_block(&format!("{:?}", bb), None)
fcx.new_block(&format!("{:?}", bb))
}
}).collect();

View File

@ -17,7 +17,6 @@ use asm;
use base;
use callee::Callee;
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
use datum::{Datum, Lvalue};
use debuginfo::DebugLoc;
use adt;
use machine;
@ -157,8 +156,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output);
Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
Lvalue::new("out"))
(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
}).collect();
let input_vals = inputs.iter().map(|input| {
@ -202,7 +200,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
ty::TyFnDef(def_id, substs, _) => {
OperandValue::Immediate(
Callee::def(bcx.ccx(), def_id, substs)
.reify(bcx.ccx()).val)
.reify(bcx.ccx()))
}
_ => {
bug!("{} cannot be reified to a fn ptr", operand.ty)

View File

@ -88,13 +88,13 @@ impl<'a, 'tcx> TransItem<'tcx> {
let def_id = ccx.tcx().map.local_def_id(node_id);
let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*)
let item = ccx.tcx().map.expect_item(node_id);
if let hir::ItemStatic(_, m, ref expr) = item.node {
match consts::trans_static(&ccx, m, expr, item.id, &item.attrs) {
if let hir::ItemStatic(_, m, _) = item.node {
match consts::trans_static(&ccx, m, item.id, &item.attrs) {
Ok(_) => { /* Cool, everything's alright. */ },
Err(err) => {
// FIXME: shouldn't this be a `span_err`?
fatal_const_eval_err(
ccx.tcx(), &err, expr.span, "static");
ccx.tcx(), &err, item.span, "static");
}
};
} else {

View File

@ -13,347 +13,10 @@
use llvm;
use llvm::ValueRef;
use base::*;
use base;
use build::*;
use cleanup;
use cleanup::CleanupMethods;
use common::*;
use consts;
use datum::*;
use debuginfo::DebugLoc;
use expr::{Dest, Ignore, SaveIn};
use expr;
use machine::llsize_of_alloc;
use type_::Type;
use type_of;
use value::Value;
use rustc::ty::{self, Ty};
use rustc::hir;
use rustc_const_eval::eval_length;
use syntax::ast;
use syntax::parse::token::InternedString;
#[derive(Copy, Clone, Debug)]
struct VecTypes<'tcx> {
unit_ty: Ty<'tcx>,
llunit_ty: Type
}
pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &hir::Expr,
dest: expr::Dest)
-> Block<'blk, 'tcx> {
//!
//
// [...] allocates a fixed-size array and moves it around "by value".
// In this case, it means that the caller has already given us a location
// to store the array of the suitable size, so all we have to do is
// generate the content.
debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest);
let vt = vec_types_from_expr(bcx, expr);
return match dest {
Ignore => write_content(bcx, &vt, expr, expr, dest),
SaveIn(lldest) => {
// lldest will have type *[T x N], but we want the type *T,
// so use GEP to convert:
let lldest = StructGEP(bcx, lldest, 0);
write_content(bcx, &vt, expr, expr, SaveIn(lldest))
}
};
}
/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
/// caller must make the reference). "..." is similar except that the memory can be statically
/// allocated and we return a reference (strings are always by-ref).
pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
slice_expr: &hir::Expr,
content_expr: &hir::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let fcx = bcx.fcx;
let mut bcx = bcx;
debug!("trans_slice_vec(slice_expr={:?})",
slice_expr);
let vec_ty = node_id_type(bcx, slice_expr.id);
// Handle the "..." case (returns a slice since strings are always unsized):
if let hir::ExprLit(ref lit) = content_expr.node {
if let ast::LitKind::Str(ref s, _) = lit.node {
let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
bcx = trans_lit_str(bcx,
content_expr,
s.clone(),
SaveIn(scratch.val));
return DatumBlock::new(bcx, scratch.to_expr_datum());
}
}
// Handle the &[...] case:
let vt = vec_types_from_expr(bcx, content_expr);
let count = elements_required(bcx, content_expr);
debug!(" vt={:?}, count={}", vt, count);
let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count);
// Always create an alloca even if zero-sized, to preserve
// the non-null invariant of the inner slice ptr
let llfixed;
// Issue 30018: ensure state is initialized as dropped if necessary.
if fcx.type_needs_drop(vt.unit_ty) {
llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, "");
} else {
let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop");
llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, "");
call_lifetime_start(bcx, llfixed);
};
if count > 0 {
// Arrange for the backing array to be cleaned up.
let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
fcx.schedule_lifetime_end(cleanup_scope, llfixed);
fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None);
// Generate the content into the backing array.
// llfixed has type *[T x N], but we want the type *T,
// so use GEP to convert
bcx = write_content(bcx, &vt, slice_expr, content_expr,
SaveIn(StructGEP(bcx, llfixed, 0)));
};
immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
}
/// Literal strings translate to slices into static memory. This is different from
/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lit_expr: &hir::Expr,
str_lit: InternedString,
dest: Dest)
-> Block<'blk, 'tcx> {
debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest);
match dest {
Ignore => bcx,
SaveIn(lldest) => {
let bytes = str_lit.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), str_lit, false);
let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
Store(bcx, llcstr, expr::get_dataptr(bcx, lldest));
Store(bcx, llbytes, expr::get_meta(bcx, lldest));
bcx
}
}
}
fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
vt: &VecTypes<'tcx>,
vstore_expr: &hir::Expr,
content_expr: &hir::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::write_content");
let fcx = bcx.fcx;
let mut bcx = bcx;
debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})",
vt, dest, vstore_expr);
match content_expr.node {
hir::ExprLit(ref lit) => {
match lit.node {
ast::LitKind::Str(ref s, _) => {
match dest {
Ignore => return bcx,
SaveIn(lldest) => {
let bytes = s.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
if !bcx.unreachable.get() {
base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1);
}
return bcx;
}
}
}
_ => {
span_bug!(content_expr.span, "unexpected evec content");
}
}
}
hir::ExprVec(ref elements) => {
match dest {
Ignore => {
for element in elements {
bcx = expr::trans_into(bcx, &element, Ignore);
}
}
SaveIn(lldest) => {
let temp_scope = fcx.push_custom_cleanup_scope();
for (i, element) in elements.iter().enumerate() {
let lleltptr = GEPi(bcx, lldest, &[i]);
debug!("writing index {} with lleltptr={:?}",
i, Value(lleltptr));
bcx = expr::trans_into(bcx, &element,
SaveIn(lleltptr));
let scope = cleanup::CustomScope(temp_scope);
// Issue #30822: mark memory as dropped after running destructor
fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None);
}
fcx.pop_custom_cleanup_scope(temp_scope);
}
}
return bcx;
}
hir::ExprRepeat(ref element, ref count_expr) => {
match dest {
Ignore => {
return expr::trans_into(bcx, &element, Ignore);
}
SaveIn(lldest) => {
match eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap() {
0 => expr::trans_into(bcx, &element, Ignore),
1 => expr::trans_into(bcx, &element, SaveIn(lldest)),
count => {
let elem = unpack_datum!(bcx, expr::trans(bcx, &element));
let bcx = iter_vec_loop(bcx, lldest, vt,
C_uint(bcx.ccx(), count),
|set_bcx, lleltptr, _| {
elem.shallow_copy(set_bcx, lleltptr)
});
bcx
}
}
}
}
}
_ => {
span_bug!(content_expr.span, "unexpected vec content");
}
}
}
fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr)
-> VecTypes<'tcx> {
let vec_ty = node_id_type(bcx, vec_expr.id);
vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx()))
}
fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
-> VecTypes<'tcx> {
VecTypes {
unit_ty: unit_ty,
llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
}
}
fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize {
//! Figure out the number of elements we need to store this content
match content_expr.node {
hir::ExprLit(ref lit) => {
match lit.node {
ast::LitKind::Str(ref s, _) => s.len(),
_ => {
span_bug!(content_expr.span, "unexpected evec content")
}
}
},
hir::ExprVec(ref es) => es.len(),
hir::ExprRepeat(_, ref count_expr) => {
eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap()
}
_ => span_bug!(content_expr.span, "unexpected vec content")
}
}
/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
/// which should be by ref.
pub fn get_fixed_base_and_len(bcx: Block,
llval: ValueRef,
vec_length: usize)
-> (ValueRef, ValueRef) {
let ccx = bcx.ccx();
let base = expr::get_dataptr(bcx, llval);
let len = C_uint(ccx, vec_length);
(base, len)
}
/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be
/// by-reference. If you have a datum, you would probably prefer to call
/// `Datum::get_base_and_len()` which will handle any conversions for you.
pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llval: ValueRef,
vec_ty: Ty<'tcx>)
-> (ValueRef, ValueRef) {
match vec_ty.sty {
ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n),
ty::TySlice(_) | ty::TyStr => {
let base = Load(bcx, expr::get_dataptr(bcx, llval));
let len = Load(bcx, expr::get_meta(bcx, llval));
(base, len)
}
// Only used for pattern matching.
ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
let inner = if type_is_sized(bcx.tcx(), ty) {
Load(bcx, llval)
} else {
llval
};
get_base_and_len(bcx, inner, ty)
},
_ => bug!("unexpected type in get_base_and_len"),
}
}
fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
vt: &VecTypes<'tcx>,
count: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_loop");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
let loop_bcx = fcx.new_temp_block("expr_repeat");
let next_bcx = fcx.new_temp_block("expr_repeat: next");
Br(bcx, loop_bcx.llbb, DebugLoc::None);
let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
&[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
let bcx = loop_bcx;
let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
data_ptr
} else {
InBoundsGEP(bcx, data_ptr, &[loop_counter])
};
let bcx = f(bcx, lleltptr, vt.unit_ty);
let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
next_bcx
}
use rustc::ty::Ty;
pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
@ -366,24 +29,42 @@ pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
let _icx = push_ctxt("tvec::iter_vec_raw");
let fcx = bcx.fcx;
let vt = vec_types(bcx, unit_ty);
if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
if type_is_zero_size(bcx.ccx(), unit_ty) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
iter_vec_loop(bcx, data_ptr, &vt, len, f)
if bcx.unreachable.get() {
return bcx;
}
let loop_bcx = fcx.new_block("expr_repeat");
let next_bcx = fcx.new_block("expr_repeat: next");
Br(bcx, loop_bcx.llbb, DebugLoc::None);
let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
&[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
let bcx = loop_bcx;
let bcx = f(bcx, data_ptr, unit_ty);
let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
let cond_val = ICmp(bcx, llvm::IntULT, plusone, len, DebugLoc::None);
CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
next_bcx
} else {
// Calculate the last pointer address we want to handle.
let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
// Now perform the iteration.
let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
let header_bcx = fcx.new_block("iter_vec_loop_header");
Br(bcx, header_bcx.llbb, DebugLoc::None);
let data_ptr =
Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
let not_yet_at_end =
ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
let next_bcx = fcx.new_temp_block("iter_vec_next");
let body_bcx = fcx.new_block("iter_vec_loop_body");
let next_bcx = fcx.new_block("iter_vec_next");
CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
let body_bcx = f(body_bcx, data_ptr, unit_ty);
AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,