Use LvalueRef instead of MaybeSizedValue

This commit is contained in:
Mark Simulacrum 2017-01-01 15:50:15 -07:00
parent 4c9995a3f9
commit 8038489357
10 changed files with 119 additions and 123 deletions

View File

@ -48,6 +48,7 @@ use std;
use llvm::{ValueRef, True, IntEQ, IntNE};
use rustc::ty::layout;
use rustc::ty::{self, Ty, AdtKind};
use mir::lvalue::LvalueRef;
use common::*;
use builder::Builder;
use glue;
@ -64,32 +65,6 @@ pub enum BranchKind {
Single
}
#[derive(Copy, Clone)]
pub struct MaybeSizedValue {
pub value: ValueRef,
pub meta: ValueRef,
}
impl MaybeSizedValue {
pub fn sized(value: ValueRef) -> MaybeSizedValue {
MaybeSizedValue {
value: value,
meta: std::ptr::null_mut()
}
}
pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue {
MaybeSizedValue {
value: value,
meta: meta
}
}
pub fn has_meta(&self) -> bool {
!self.meta.is_null()
}
}
/// Given an enum, struct, closure, or tuple, extracts fields.
/// Treats closures as a struct with one variant.
/// `empty_if_no_variants` is a switch to deal with empty enums.
@ -500,11 +475,11 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
val: MaybeSizedValue,
val: LvalueRef<'tcx>,
discr: Disr,
ix: usize
) -> ValueRef {
let t = val.ty.to_ty(bcx.tcx());
let l = bcx.ccx.layout_of(t);
debug!("trans_field_ptr on {} represented as {:#?}", t, l);
// Note: if this ever needs to generate conditionals (e.g., if we
@ -520,7 +495,7 @@ pub fn trans_field_ptr<'a, 'tcx>(
layout::Vector { count, .. } => {
assert_eq!(discr.0, 0);
assert!((ix as u64) < count);
bcx.struct_gep(val.value, ix)
bcx.struct_gep(val.llval, ix)
}
layout::General { discr: d, ref variants, .. } => {
let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false);
@ -532,7 +507,7 @@ pub fn trans_field_ptr<'a, 'tcx>(
layout::UntaggedUnion { .. } => {
let fields = compute_fields(bcx.ccx, t, 0, false);
let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
bcx.pointercast(val.value, ty.ptr_to())
bcx.pointercast(val.llval, ty.ptr_to())
}
layout::RawNullablePointer { nndiscr, .. } |
layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => {
@ -541,14 +516,14 @@ pub fn trans_field_ptr<'a, 'tcx>(
// (e.d., Result of Either with (), as one side.)
let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
bcx.pointercast(val.value, ty.ptr_to())
bcx.pointercast(val.llval, ty.ptr_to())
}
layout::RawNullablePointer { nndiscr, .. } => {
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
assert_eq!(ix, 0);
assert_eq!(discr.0, nndiscr);
let ty = type_of::type_of(bcx.ccx, nnty);
bcx.pointercast(val.value, ty.ptr_to())
bcx.pointercast(val.llval, ty.ptr_to())
}
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
assert_eq!(discr.0, nndiscr);
@ -564,7 +539,7 @@ fn struct_field_ptr<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
st: &layout::Struct,
fields: &Vec<Ty<'tcx>>,
val: MaybeSizedValue,
val: LvalueRef,
ix: usize,
needs_cast: bool
) -> ValueRef {
@ -576,9 +551,9 @@ fn struct_field_ptr<'a, 'tcx>(
type_of::in_memory_type_of(ccx, fields[i])
}).collect::<Vec<_>>();
let real_ty = Type::struct_(ccx, &fields[..], st.packed);
bcx.pointercast(val.value, real_ty.ptr_to())
bcx.pointercast(val.llval, real_ty.ptr_to())
} else {
val.value
val.llval
};
// Simple case - we can just GEP the field
@ -600,7 +575,7 @@ fn struct_field_ptr<'a, 'tcx>(
}
// There's no metadata available, log the case and just do the GEP.
if !val.has_meta() {
if !val.has_extra() {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(ptr_val));
return bcx.struct_gep(ptr_val, ix);
@ -621,7 +596,7 @@ fn struct_field_ptr<'a, 'tcx>(
// The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
// the `y` field has 16-bit alignment.
let meta = val.meta;
let meta = val.llextra;
let offset = st.offsets[ix].bytes();

View File

@ -47,6 +47,7 @@ use session::config::{self, NoDebugInfo};
use rustc_incremental::IncrementalHashesMap;
use session::{self, DataTypeKind, Session};
use abi::{self, Abi, FnType};
use mir::lvalue::LvalueRef;
use adt;
use attributes;
use builder::Builder;
@ -278,8 +279,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
monomorphize::field_ty(bcx.tcx(), substs_b, f)
});
let src = adt::MaybeSizedValue::sized(src);
let dst = adt::MaybeSizedValue::sized(dst);
let src = LvalueRef::new_sized_ty(src, src_ty);
let dst = LvalueRef::new_sized_ty(dst, dst_ty);
let iter = src_fields.zip(dst_fields).enumerate();
for (i, (src_fty, dst_fty)) in iter {
@ -287,8 +288,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
continue;
}
let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i);
let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i);
let src_f = adt::trans_field_ptr(bcx, src, Disr(0), i);
let dst_f = adt::trans_field_ptr(bcx, dst, Disr(0), i);
if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty, None);
} else {
@ -620,11 +621,12 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// final ret value
bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot")
};
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
// Can return unsized value
let dest_val = LvalueRef::new_sized_ty(dest, sig.output());
let mut llarg_idx = fn_ty.ret.is_indirect() as usize;
let mut arg_idx = 0;
for (i, arg_ty) in sig.inputs().iter().enumerate() {
let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i);
let lldestptr = adt::trans_field_ptr(&bcx, dest_val, Disr::from(disr), i);
let arg = &fn_ty.args[arg_idx];
arg_idx += 1;
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {

View File

@ -26,7 +26,7 @@ use base;
use builder::Builder;
use common::{self, CrateContext, SharedCrateContext};
use cleanup::CleanupScope;
use adt::MaybeSizedValue;
use mir::lvalue::LvalueRef;
use consts;
use declare;
use value::Value;
@ -364,7 +364,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
// Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds.
let self_scope = CleanupScope::schedule_drop_mem(
&bcx, MaybeSizedValue::sized(llenv), closure_ty
&bcx, LvalueRef::new_sized_ty(llenv, closure_ty)
);
let llfn = callee.reify(bcx.ccx);

View File

@ -20,12 +20,12 @@
use llvm::BasicBlockRef;
use base;
use adt::MaybeSizedValue;
use mir::lvalue::LvalueRef;
use rustc::mir::tcx::LvalueTy;
use builder::Builder;
use common::Funclet;
use glue;
use type_::Type;
use rustc::ty::Ty;
pub struct CleanupScope<'tcx> {
// Cleanup to run upon scope exit.
@ -37,14 +37,13 @@ pub struct CleanupScope<'tcx> {
#[derive(Copy, Clone)]
pub struct DropValue<'tcx> {
val: MaybeSizedValue,
ty: Ty<'tcx>,
val: LvalueRef<'tcx>,
skip_dtor: bool,
}
impl<'tcx> DropValue<'tcx> {
fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) {
glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet)
glue::call_drop_glue(bcx, self.val, self.skip_dtor, funclet)
}
/// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary
@ -96,12 +95,16 @@ impl<'tcx> DropValue<'tcx> {
impl<'a, 'tcx> CleanupScope<'tcx> {
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
pub fn schedule_drop_mem(
bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx>
) -> CleanupScope<'tcx> {
if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
if let LvalueTy::Downcast { .. } = val.ty {
bug!("Cannot drop downcast ty yet");
}
if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) {
return CleanupScope::noop();
}
let drop = DropValue {
val: val,
ty: ty,
skip_dtor: false,
};
@ -114,15 +117,19 @@ impl<'a, 'tcx> CleanupScope<'tcx> {
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
pub fn schedule_drop_adt_contents(
bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx>
) -> CleanupScope<'tcx> {
if let LvalueTy::Downcast { .. } = val.ty {
bug!("Cannot drop downcast ty yet");
}
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) {
return CleanupScope::noop();
}
let drop = DropValue {
val: val,
ty: ty,
skip_dtor: true,
};

View File

@ -22,7 +22,8 @@ use rustc::ty::subst::{Substs};
use rustc::traits;
use rustc::ty::{self, AdtKind, Ty, TypeFoldable};
use rustc::ty::subst::Kind;
use adt::{self, MaybeSizedValue};
use mir::lvalue::LvalueRef;
use adt;
use base::*;
use callee::Callee;
use cleanup::CleanupScope;
@ -39,11 +40,8 @@ use builder::Builder;
use syntax_pos::DUMMY_SP;
pub fn trans_exchange_free_ty<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
ptr: MaybeSizedValue,
content_ty: Ty<'tcx>
) {
pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) {
let content_ty = ptr.ty.to_ty(bcx.tcx());
let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let callee = Callee::def(bcx.ccx, def_id, substs);
@ -51,7 +49,7 @@ pub fn trans_exchange_free_ty<'a, 'tcx>(
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let llret = bcx.call(callee.reify(bcx.ccx),
&[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize], None);
&[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize], None);
fn_ty.apply_attrs_callsite(llret);
}
@ -94,17 +92,17 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t
}
}
fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) {
call_drop_glue(bcx, args, t, false, None)
fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: LvalueRef<'tcx>) {
call_drop_glue(bcx, args, false, None)
}
pub fn call_drop_glue<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
mut args: MaybeSizedValue,
t: Ty<'tcx>,
mut args: LvalueRef<'tcx>,
skip_dtor: bool,
funclet: Option<&'a Funclet>,
) {
let t = args.ty.to_ty(bcx.tcx());
// NB: v is an *alias* of type t here, not a direct value.
debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor);
if bcx.ccx.shared().type_needs_drop(t) {
@ -117,11 +115,11 @@ pub fn call_drop_glue<'a, 'tcx>(
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx.shared(), t);
if glue_type != t {
args.value = bcx.pointercast(args.value, type_of(ccx, glue_type).ptr_to());
args.llval = bcx.pointercast(args.llval, type_of(ccx, glue_type).ptr_to());
}
// No drop-hint ==> call standard drop glue
bcx.call(glue, &[args.value, args.meta][..1 + args.has_meta() as usize],
bcx.call(glue, &[args.llval, args.llextra][..1 + args.has_extra() as usize],
funclet.map(|b| b.bundle()));
}
}
@ -194,9 +192,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
let value = get_param(llfn, 0);
let ptr = if ccx.shared().type_is_sized(t) {
MaybeSizedValue::sized(value)
LvalueRef::new_sized_ty(value, t)
} else {
MaybeSizedValue::unsized_(value, get_param(llfn, 1))
LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t)
};
let skip_dtor = match g {
@ -211,14 +209,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) {
let llbox = bcx.load(get_dataptr(&bcx, ptr.value));
let info = bcx.load(get_meta(&bcx, ptr.value));
MaybeSizedValue::unsized_(llbox, info)
let llbox = bcx.load(get_dataptr(&bcx, ptr.llval));
let info = bcx.load(get_meta(&bcx, ptr.llval));
LvalueRef::new_unsized_ty(llbox, info, content_ty)
} else {
MaybeSizedValue::sized(bcx.load(ptr.value))
LvalueRef::new_sized_ty(bcx.load(ptr.llval), content_ty)
};
drop_ty(&bcx, ptr, content_ty);
trans_exchange_free_ty(&bcx, ptr, content_ty);
drop_ty(&bcx, ptr);
trans_exchange_free_ty(&bcx, ptr);
bcx
}
ty::TyDynamic(..) => {
@ -226,8 +224,8 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let dtor = bcx.load(ptr.meta);
bcx.call(dtor, &[ptr.value], None);
let dtor = bcx.load(ptr.llextra);
bcx.call(dtor, &[ptr.llval], None);
bcx
}
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
@ -245,7 +243,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
let contents_scope = if !shallow_drop {
CleanupScope::schedule_drop_adt_contents(&bcx, ptr, t)
CleanupScope::schedule_drop_adt_contents(&bcx, ptr)
} else {
CleanupScope::noop()
};
@ -262,7 +260,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs);
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let llret;
let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize];
let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize];
if let Some(landing_pad) = contents_scope.landing_pad {
let normal_bcx = bcx.build_new_block("normal-return");
llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None);
@ -279,7 +277,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
}
_ => {
if bcx.ccx.shared().type_needs_drop(t) {
drop_structural_ty(bcx, ptr, t)
drop_structural_ty(bcx, ptr)
} else {
bcx
}
@ -396,60 +394,57 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
}
// Iterates through the elements of a structural type, dropping them.
fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>,
ptr: MaybeSizedValue,
t: Ty<'tcx>)
-> Builder<'a, 'tcx> {
fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) -> Builder<'a, 'tcx> {
fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
av: adt::MaybeSizedValue,
av: LvalueRef<'tcx>,
variant: &'tcx ty::VariantDef,
substs: &Substs<'tcx>) {
let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
drop_ty(&cx, MaybeSizedValue::sized(field_ptr), arg);
let field_ptr = adt::trans_field_ptr(&cx, av, Disr::from(variant.disr_val), i);
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg));
}
}
let mut cx = cx;
let t = ptr.ty.to_ty(cx.tcx());
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i);
drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty);
let llupvar = adt::trans_field_ptr(&cx, ptr, Disr(0), i);
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty));
}
}
ty::TyArray(_, n) => {
let base = get_dataptr(&cx, ptr.value);
let base = get_dataptr(&cx, ptr.llval);
let len = C_uint(cx.ccx, n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty)));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty)));
}
ty::TyTuple(ref args) => {
for (i, arg) in args.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i);
drop_ty(&cx, MaybeSizedValue::sized(llfld_a), *arg);
let llfld_a = adt::trans_field_ptr(&cx, ptr, Disr(0), i);
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg));
}
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i);
let llfld_a = adt::trans_field_ptr(&cx, ptr, Disr::from(discr), i);
let ptr = if cx.ccx.shared().type_is_sized(field_ty) {
MaybeSizedValue::sized(llfld_a)
LvalueRef::new_sized_ty(llfld_a, field_ty)
} else {
MaybeSizedValue::unsized_(llfld_a, ptr.meta)
LvalueRef::new_unsized_ty(llfld_a, ptr.llextra, field_ty)
};
drop_ty(&cx, ptr, field_ty);
drop_ty(&cx, ptr);
}
}
AdtKind::Union => {
@ -461,16 +456,16 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>,
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
match adt::trans_switch(&cx, t, ptr.value, false) {
match adt::trans_switch(&cx, t, ptr.llval, false) {
(adt::BranchKind::Single, None) => {
if n_variants != 0 {
assert!(n_variants == 1);
iter_variant(&cx, t, ptr, &adt.variants[0], substs);
iter_variant(&cx, ptr, &adt.variants[0], substs);
}
}
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
let tcx = cx.tcx();
drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize);
drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize));
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
@ -496,7 +491,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: Builder<'a, 'tcx>,
let variant_cx = cx.build_new_block(&variant_cx_name);
let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
iter_variant(&variant_cx, t, ptr, variant, substs);
iter_variant(&variant_cx, ptr, variant, substs);
variant_cx.br(next_cx.llbb());
}
cx = next_cx;

View File

@ -16,6 +16,7 @@ use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType};
use adt;
use mir::lvalue::LvalueRef;
use base::*;
use common::*;
use declare;
@ -549,10 +550,10 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// destructors, and the contents are SIMD
// etc.
assert!(!bcx.ccx.shared().type_needs_drop(arg_type));
let arg = adt::MaybeSizedValue::sized(llarg);
let arg = LvalueRef::new_sized_ty(llarg, arg_type);
(0..contents.len())
.map(|i| {
bcx.load(adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i))
bcx.load(adt::trans_field_ptr(bcx, arg, Disr(0), i))
})
.collect()
}

View File

@ -14,7 +14,7 @@ use rustc::middle::lang_items;
use rustc::ty::{self, layout};
use rustc::mir;
use abi::{Abi, FnType, ArgType};
use adt::{self, MaybeSizedValue};
use adt;
use base::{self, Lifetime};
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
use builder::Builder;
@ -37,7 +37,7 @@ use std::cmp;
use super::{MirContext, LocalRef};
use super::analyze::CleanupKind;
use super::constant::Const;
use super::lvalue::{LvalueRef};
use super::lvalue::LvalueRef;
use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, Immediate};
@ -251,11 +251,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} else {
lvalue.llval
};
MaybeSizedValue::sized(value)
LvalueRef::new_sized_ty(value, ty)
} else {
MaybeSizedValue::unsized_(lvalue.llval, lvalue.llextra)
LvalueRef::new_unsized_ty(lvalue.llval, lvalue.llextra, ty)
};
let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize];
let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize];
if let Some(unwind) = unwind {
bcx.invoke(
drop_fn,
@ -707,9 +707,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Handle both by-ref and immediate tuples.
match tuple.val {
Ref(llval) => {
let base = adt::MaybeSizedValue::sized(llval);
for (n, &ty) in arg_types.iter().enumerate() {
let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n);
let ptr = adt::trans_field_ptr(
bcx, LvalueRef::new_sized_ty(llval, tuple.ty), Disr(0), n
);
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty);
Pair(lldata, llextra)

View File

@ -44,6 +44,18 @@ impl<'tcx> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
}
pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> {
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty))
}
pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> {
LvalueRef {
llval: llval,
llextra: llextra,
ty: LvalueTy::from_ty(ty),
}
}
pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
let ty = self.ty.to_ty(ccx.tcx());
match ty.sty {
@ -55,6 +67,10 @@ impl<'tcx> LvalueRef<'tcx> {
_ => bug!("unexpected type `{}` in LvalueRef::len", ty)
}
}
pub fn has_extra(&self) -> bool {
!self.llextra.is_null()
}
}
impl<'a, 'tcx> MirContext<'a, 'tcx> {
@ -132,11 +148,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let discr = discr as u64;
let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx));
let base = if is_sized {
adt::MaybeSizedValue::sized(tr_base.llval)
LvalueRef::new_sized_ty(tr_base.llval, base_ty)
} else {
adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
LvalueRef::new_unsized_ty(tr_base.llval, tr_base.llextra, base_ty)
};
let llprojected = adt::trans_field_ptr(bcx, base_ty, base, Disr(discr),
let llprojected = adt::trans_field_ptr(bcx, base, Disr(discr),
field.index());
let llextra = if is_sized {
ptr::null_mut()

View File

@ -38,7 +38,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx};
pub use self::constant::trans_static_initializer;
use self::analyze::CleanupKind;
use self::lvalue::{LvalueRef};
use self::lvalue::LvalueRef;
use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
@ -578,7 +578,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
mod analyze;
mod block;
mod constant;
mod lvalue;
pub mod lvalue;
mod operand;
mod rvalue;
mod statement;

View File

@ -110,10 +110,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx, op.ty) {
let val = adt::MaybeSizedValue::sized(dest.llval);
let val = LvalueRef::new_sized_ty(dest.llval, dest_ty);
let field_index = active_field_index.unwrap_or(i);
let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr,
field_index);
let lldest_i = adt::trans_field_ptr(&bcx, val, disr, field_index);
self.store_operand(&bcx, lldest_i, op, None);
}
}