rustc_trans: keep a layout instead of a type in {Lvalue,Operand}Ref.

This commit is contained in:
Eduard-Mihai Burtescu 2017-09-20 18:17:23 +03:00
parent 88f70323e4
commit 1477119344
17 changed files with 373 additions and 413 deletions

View File

@ -771,6 +771,15 @@ impl Abi {
}
}
/// Returns true if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
match *self {
Abi::Scalar(_) => false,
Abi::Vector { count, .. } => count == 0,
Abi::Aggregate { sized, size, .. } => sized && size.bytes() == 0
}
}
pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
let dl = cx.data_layout();
@ -1377,7 +1386,7 @@ impl<'a, 'tcx> Layout {
no_explicit_discriminants {
// Nullable pointer optimization
for i in 0..2 {
if !variants[1 - i].iter().all(|f| f.size(dl).bytes() == 0) {
if !variants[1 - i].iter().all(|f| f.is_zst()) {
continue;
}
@ -1456,7 +1465,7 @@ impl<'a, 'tcx> Layout {
for i in st.fields.index_by_increasing_offset() {
let field = field_layouts[i];
let field_align = field.align(dl);
if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
if !field.is_zst() || field_align.abi() != 1 {
start_align = start_align.min(field_align);
break;
}
@ -2145,6 +2154,11 @@ impl<'a, 'tcx> FullLayout<'tcx> {
self.abi.is_packed()
}
/// Returns true if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
self.abi.is_zst()
}
pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
self.abi.size(cx)
}

View File

@ -697,7 +697,7 @@ impl<'a, 'tcx> FnType<'tcx> {
if ty.is_bool() {
arg.attrs.set(ArgAttribute::ZExt);
} else {
if arg.layout.size(ccx).bytes() == 0 {
if arg.layout.is_zst() {
// For some forsaken reason, x86_64-pc-windows-gnu
// doesn't ignore zero-sized struct arguments.
// The same is true for s390x-unknown-linux-gnu.

View File

@ -16,10 +16,9 @@ use type_::Type;
use builder::Builder;
use rustc::hir;
use rustc::ty::Ty;
use rustc::ty::layout::Align;
use mir::lvalue::{LvalueRef, Alignment};
use mir::lvalue::LvalueRef;
use mir::operand::OperandValue;
use std::ffi::CString;
use syntax::ast::AsmDialect;
@ -29,7 +28,7 @@ use libc::{c_uint, c_char};
pub fn trans_inline_asm<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>,
outputs: Vec<LvalueRef<'tcx>>,
mut inputs: Vec<ValueRef>
) {
let mut ext_constraints = vec![];
@ -37,21 +36,15 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
let val = if out.is_rw || out.is_indirect {
Some(LvalueRef::new_sized(val, ty,
Alignment::Packed(Align::from_bytes(1, 1).unwrap())).load(bcx))
} else {
None
};
for (i, (out, lvalue)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
inputs.push(val.unwrap().immediate());
inputs.push(lvalue.load(bcx).immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
indirect_outputs.push(val.unwrap().immediate());
indirect_outputs.push(lvalue.load(bcx).immediate());
} else {
output_types.push(bcx.ccx.llvm_type_of(ty));
output_types.push(bcx.ccx.llvm_type_of(lvalue.layout.ty));
}
}
if !indirect_outputs.is_empty() {
@ -106,9 +99,9 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &(val, _))) in outputs.enumerate() {
for (i, (_, &lvalue)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
bcx.store(v, val, None);
OperandValue::Immediate(v).store(bcx, lvalue);
}
// Store mark in a metadata node so we can map LLVM errors

View File

@ -40,7 +40,7 @@ use rustc::middle::lang_items::StartFnLangItem;
use rustc::middle::trans::{Linkage, Visibility, Stats};
use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::Align;
use rustc::ty::layout::{Align, FullLayout};
use rustc::ty::maps::Providers;
use rustc::dep_graph::{DepNode, DepKind, DepConstructor};
use rustc::middle::cstore::{self, LinkMeta, LinkagePreference};
@ -55,10 +55,7 @@ use builder::Builder;
use callee;
use common::{C_bool, C_bytes_in_context, C_i32, C_usize};
use collector::{self, TransItemCollectionMode};
use common::{C_struct_in_context, C_array};
use common::CrateContext;
use common::{type_is_zero_size, val_ty};
use common;
use common::{self, C_struct_in_context, C_array, CrateContext, val_ty};
use consts;
use context::{self, LocalCrateContext, SharedCrateContext};
use debuginfo;
@ -88,7 +85,7 @@ use syntax::attr;
use rustc::hir;
use syntax::ast;
use mir::operand::{OperandRef, OperandValue};
use mir::operand::OperandValue;
pub use rustc_trans_utils::{find_exported_symbols, check_for_rustc_errors_attr};
pub use rustc_trans_utils::trans_item::linkage_by_name;
@ -249,8 +246,8 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
src: LvalueRef<'tcx>,
dst: LvalueRef<'tcx>) {
let src_ty = src.ty.to_ty(bcx.tcx());
let dst_ty = dst.ty.to_ty(bcx.tcx());
let src_ty = src.layout.ty;
let dst_ty = dst.layout.ty;
let coerce_ptr = || {
let (base, info) = match src.load(bcx).val {
OperandValue::Pair(base, info) => {
@ -266,10 +263,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
}
OperandValue::Ref(..) => bug!()
};
OperandRef {
val: OperandValue::Pair(base, info),
ty: dst_ty
}.store(bcx, dst);
OperandValue::Pair(base, info).store(bcx, dst);
};
match (&src_ty.sty, &dst_ty.sty) {
(&ty::TyRef(..), &ty::TyRef(..)) |
@ -288,15 +282,12 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let src_f = src.project_field(bcx, i);
let dst_f = dst.project_field(bcx, i);
let src_f_ty = src_f.ty.to_ty(bcx.tcx());
let dst_f_ty = dst_f.ty.to_ty(bcx.tcx());
if type_is_zero_size(bcx.ccx, dst_f_ty) {
if dst_f.layout.is_zst() {
continue;
}
if src_f_ty == dst_f_ty {
memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f_ty,
if src_f.layout.ty == dst_f.layout.ty {
memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout,
(src_f.alignment | dst_f.alignment).non_abi());
} else {
coerce_unsized_into(bcx, src_f, dst_f);
@ -409,17 +400,17 @@ pub fn memcpy_ty<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
dst: ValueRef,
src: ValueRef,
t: Ty<'tcx>,
layout: FullLayout<'tcx>,
align: Option<Align>,
) {
let ccx = bcx.ccx;
let size = ccx.size_of(t).bytes();
let size = layout.size(ccx).bytes();
if size == 0 {
return;
}
let align = align.unwrap_or_else(|| ccx.align_of(t));
let align = align.unwrap_or_else(|| layout.align(ccx));
call_memcpy(bcx, dst, src, C_usize(ccx, size), align);
}

View File

@ -58,7 +58,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
off: Size)
-> Result<(), Memory> {
if !off.is_abi_aligned(layout.align(ccx)) {
if layout.size(ccx).bytes() > 0 {
if !layout.is_zst() {
return Err(Memory);
}
return Ok(());

View File

@ -58,9 +58,7 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
match layout.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true,
layout::Abi::Aggregate { .. } => {
!layout.is_unsized() && layout.size(ccx).bytes() == 0
}
layout::Abi::Aggregate { .. } => layout.is_zst()
}
}
@ -83,12 +81,6 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
}
}
/// Identify types which have size zero at runtime.
pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
let layout = ccx.layout_of(ty);
!layout.is_unsized() && layout.size(ccx).bytes() == 0
}
pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All))
}

View File

@ -335,8 +335,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
signature.extend(inputs.iter().map(|&t| {
let t = match t.sty {
ty::TyArray(ct, _)
if (ct == cx.tcx().types.u8) ||
(cx.layout_of(ct).size(cx).bytes() == 0) => {
if (ct == cx.tcx().types.u8) || cx.layout_of(ct).is_zst() => {
cx.tcx().mk_imm_ptr(ct)
}
_ => t

View File

@ -22,7 +22,7 @@ use declare;
use glue;
use type_::Type;
use rustc::ty::{self, Ty};
use rustc::ty::layout::HasDataLayout;
use rustc::ty::layout::{HasDataLayout, LayoutOf};
use rustc::hir;
use syntax::ast;
use syntax::symbol::Symbol;
@ -86,7 +86,7 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
/// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
fn_ty: &FnType<'tcx>,
args: &[OperandRef<'tcx>],
llresult: ValueRef,
span: Span) {
@ -105,7 +105,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let name = &*tcx.item_name(def_id);
let llret_ty = ccx.llvm_type_of(ret_ty);
let result = LvalueRef::new_sized(llresult, ret_ty, Alignment::AbiAligned);
let result = LvalueRef::new_sized(llresult, fn_ty.ret.layout, Alignment::AbiAligned);
let simple = get_simple_intrinsic(ccx, name);
let llval = match name {
@ -179,7 +179,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
}
"init" => {
let ty = substs.type_at(0);
if !type_is_zero_size(ccx, ty) {
if !ccx.layout_of(ty).is_zst() {
// Just zero out the stack slot.
// If we store a zero constant, LLVM will drown in vreg allocation for large data
// structures, and the generated code will be awful. (A telltale sign of this is
@ -247,7 +247,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
},
"volatile_store" => {
let tp_ty = substs.type_at(0);
let dst = LvalueRef::new_sized(args[0].immediate(), tp_ty, Alignment::AbiAligned);
let dst = args[0].deref(bcx.ccx);
if let OperandValue::Pair(a, b) = args[1].val {
bcx.volatile_store(a, dst.project_field(bcx, 0).llval);
bcx.volatile_store(b, dst.project_field(bcx, 1).llval);
@ -255,7 +255,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let val = if let OperandValue::Ref(ptr, align) = args[1].val {
bcx.load(ptr, align.non_abi())
} else {
if type_is_zero_size(ccx, tp_ty) {
if dst.layout.is_zst() {
return;
}
from_immediate(bcx, args[1].immediate())
@ -393,13 +393,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
},
"discriminant_value" => {
let val_ty = substs.type_at(0);
let adt_val = LvalueRef::new_sized(args[0].immediate(),
val_ty,
Alignment::AbiAligned);
match val_ty.sty {
match substs.type_at(0).sty {
ty::TyAdt(adt, ..) if adt.is_enum() => {
adt_val.trans_get_discr(bcx, ret_ty)
args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty)
}
_ => C_null(llret_ty)
}
@ -612,12 +608,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// This assumes the type is "simple", i.e. no
// destructors, and the contents are SIMD
// etc.
assert!(!bcx.ccx.shared().type_needs_drop(arg.ty));
assert!(!bcx.ccx.shared().type_needs_drop(arg.layout.ty));
let (ptr, align) = match arg.val {
OperandValue::Ref(ptr, align) => (ptr, align),
_ => bug!()
};
let arg = LvalueRef::new_sized(ptr, arg.ty, align);
let arg = LvalueRef::new_sized(ptr, arg.layout, align);
(0..contents.len()).map(|i| {
arg.project_field(bcx, i).load(bcx).immediate()
}).collect()
@ -685,8 +681,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
} else {
OperandRef {
val: OperandValue::Immediate(llval),
ty: ret_ty
}.unpack_if_pair(bcx).store(bcx, result);
layout: result.layout
}.unpack_if_pair(bcx).val.store(bcx, result);
}
}
}

View File

@ -18,6 +18,7 @@ use rustc::mir::{self, Location, TerminatorKind, Literal};
use rustc::mir::visit::{Visitor, LvalueContext};
use rustc::mir::traversal;
use rustc::ty;
use rustc::ty::layout::LayoutOf;
use common;
use super::MirContext;
@ -34,7 +35,7 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
ty.is_box() ||
ty.is_region_ptr() ||
ty.is_simd() ||
common::type_is_zero_size(mircx.ccx, ty)
mircx.ccx.layout_of(ty).is_zst()
{
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.

View File

@ -11,7 +11,7 @@
use llvm::{self, ValueRef, BasicBlockRef};
use rustc::middle::lang_items;
use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::{self, TypeFoldable};
use rustc::ty::layout::LayoutOf;
use rustc::traits;
use rustc::mir;
@ -116,11 +116,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
fn_ty: FnType<'tcx>,
fn_ptr: ValueRef,
llargs: &[ValueRef],
destination: Option<(ReturnDest<'tcx>, Ty<'tcx>, mir::BasicBlock)>,
destination: Option<(ReturnDest<'tcx>, mir::BasicBlock)>,
cleanup: Option<mir::BasicBlock>
| {
if let Some(cleanup) = cleanup {
let ret_bcx = if let Some((_, _, target)) = destination {
let ret_bcx = if let Some((_, target)) = destination {
this.blocks[target]
} else {
this.unreachable_block()
@ -132,12 +132,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
cleanup_bundle);
fn_ty.apply_attrs_callsite(invokeret);
if let Some((ret_dest, ret_ty, target)) = destination {
if let Some((ret_dest, target)) = destination {
let ret_bcx = this.get_builder(target);
this.set_debug_loc(&ret_bcx, terminator.source_info);
let op = OperandRef {
val: Immediate(invokeret),
ty: ret_ty,
layout: fn_ty.ret.layout,
};
this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op);
}
@ -152,10 +152,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
}
if let Some((ret_dest, ret_ty, target)) = destination {
if let Some((ret_dest, target)) = destination {
let op = OperandRef {
val: Immediate(llret),
ty: ret_ty,
layout: fn_ty.ret.layout,
};
this.store_return(&bcx, ret_dest, &fn_ty.ret, op);
funclet_br(this, bcx, target);
@ -227,14 +227,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
ty: tr_lvalue.ty.to_ty(bcx.tcx())
layout: tr_lvalue.layout
}
}
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout.ty, "ret");
op.store(&bcx, scratch);
let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret");
op.val.store(&bcx, scratch);
scratch.llval
}
Ref(llval, align) => {
@ -282,7 +282,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize];
do_call(self, bcx, fn_ty, drop_fn, args,
Some((ReturnDest::Nothing, tcx.mk_nil(), target)),
Some((ReturnDest::Nothing, target)),
unwind);
}
@ -427,7 +427,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.trans_operand(&bcx, func);
let (instance, mut llfn) = match callee.ty.sty {
let (instance, mut llfn) = match callee.layout.ty.sty {
ty::TyFnDef(def_id, substs) => {
(Some(ty::Instance::resolve(bcx.ccx.tcx(),
ty::ParamEnv::empty(traits::Reveal::All),
@ -438,10 +438,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
ty::TyFnPtr(_) => {
(None, Some(callee.immediate()))
}
_ => bug!("{} is not callable", callee.ty)
_ => bug!("{} is not callable", callee.layout.ty)
};
let def = instance.map(|i| i.def);
let sig = callee.ty.fn_sig(bcx.tcx());
let sig = callee.layout.ty.fn_sig(bcx.tcx());
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
let abi = sig.abi;
@ -520,7 +520,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let val = self.trans_constant(&bcx, constant);
return OperandRef {
val: Immediate(val.llval),
ty: val.ty
layout: bcx.ccx.layout_of(val.ty)
};
}
}
@ -539,7 +539,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Make a fake operand for store_return
let op = OperandRef {
val: Ref(dst.llval, Alignment::AbiAligned),
ty: sig.output(),
layout: fn_ty.ret.layout,
};
self.store_return(&bcx, ret_dest, &fn_ty.ret, op);
}
@ -577,8 +577,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// The callee needs to own the argument memory if we pass it
// by-ref, so make a local copy of non-immediate constants.
if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) {
let tmp = LvalueRef::alloca(&bcx, op.ty, "const");
op.store(&bcx, tmp);
let tmp = LvalueRef::alloca(&bcx, op.layout, "const");
op.val.store(&bcx, tmp);
op.val = Ref(tmp.llval, tmp.alignment);
}
@ -596,7 +596,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
do_call(self, bcx, fn_ty, fn_ptr, &llargs,
destination.as_ref().map(|&(_, target)| (ret_dest, sig.output(), target)),
destination.as_ref().map(|&(_, target)| (ret_dest, target)),
cleanup);
}
mir::TerminatorKind::GeneratorDrop |
@ -617,7 +617,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let imm_op = |x| OperandRef {
val: Immediate(x),
// We won't be checking the type again.
ty: bcx.tcx().types.err
layout: bcx.ccx.layout_of(bcx.tcx().types.never)
};
self.trans_argument(bcx, imm_op(a), llargs, &arg.nested[0]);
self.trans_argument(bcx, imm_op(b), llargs, &arg.nested[1]);
@ -638,8 +638,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg");
op.store(bcx, scratch);
let scratch = LvalueRef::alloca(bcx, arg.layout, "arg");
op.val.store(bcx, scratch);
(scratch.llval, Alignment::AbiAligned, true)
} else {
(op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false)
@ -650,8 +650,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
let scratch = LvalueRef::alloca(bcx, arg.layout.ty, "arg");
base::memcpy_ty(bcx, scratch.llval, llval, op.ty, align.non_abi());
let scratch = LvalueRef::alloca(bcx, arg.layout, "arg");
base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align.non_abi());
(scratch.llval, Alignment::AbiAligned, true)
}
Ref(llval, align) => (llval, align, true)
@ -682,16 +682,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
args: &[ArgType<'tcx>]) {
let tuple = self.trans_operand(bcx, operand);
let arg_types = match tuple.ty.sty {
let arg_types = match tuple.layout.ty.sty {
ty::TyTuple(ref tys, _) => tys,
_ => span_bug!(self.mir.span,
"bad final argument to \"rust-call\" fn {:?}", tuple.ty)
"bad final argument to \"rust-call\" fn {:?}", tuple.layout.ty)
};
// Handle both by-ref and immediate tuples.
match tuple.val {
Ref(llval, align) => {
let tuple_ptr = LvalueRef::new_sized(llval, tuple.ty, align);
let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align);
for n in 0..arg_types.len() {
let field_ptr = tuple_ptr.project_field(bcx, n);
self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[n]);
@ -699,15 +699,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
Immediate(llval) => {
let layout = bcx.ccx.layout_of(tuple.ty);
for (n, &ty) in arg_types.iter().enumerate() {
let mut elem = bcx.extract_value(llval, layout.llvm_field_index(n));
let mut elem = bcx.extract_value(llval, tuple.layout.llvm_field_index(n));
// Truncate bools to i1, if needed
elem = base::to_immediate(bcx, elem, ty);
// If the tuple is immediate, the elements are as well
let op = OperandRef {
val: Immediate(elem),
ty,
layout: bcx.ccx.layout_of(ty),
};
self.trans_argument(bcx, op, llargs, &args[n]);
}
@ -719,7 +718,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Pair is always made up of immediates
let op = OperandRef {
val: Immediate(elem),
ty,
layout: bcx.ccx.layout_of(ty),
};
self.trans_argument(bcx, op, llargs, &args[n]);
}
@ -733,11 +732,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
if let Some(slot) = self.personality_slot {
slot
} else {
let ty = ccx.tcx().intern_tup(&[
let layout = ccx.layout_of(ccx.tcx().intern_tup(&[
ccx.tcx().mk_mut_ptr(ccx.tcx().types.u8),
ccx.tcx().types.i32
], false);
let slot = LvalueRef::alloca(bcx, ty, "personalityslot");
], false));
let slot = LvalueRef::alloca(bcx, layout, "personalityslot");
self.personality_slot = Some(slot);
slot
}
@ -764,7 +763,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let bcx = self.new_block("cleanup");
let ccx = bcx.ccx;
let llpersonality = self.ccx.eh_personality();
let llretty = self.landing_pad_type();
let lp = bcx.landing_pad(llretty, llpersonality, 1, self.llfn);
@ -772,10 +770,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let slot = self.get_personality_slot(&bcx);
slot.storage_live(&bcx);
OperandRef {
val: Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)),
ty: slot.ty.to_ty(ccx.tcx())
}.store(&bcx, slot);
Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)).store(&bcx, slot);
bcx.br(target_bb);
bcx.llbb()
@ -806,24 +801,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>,
dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
dest: &mir::Lvalue<'tcx>, fn_ret: &ArgType<'tcx>,
llargs: &mut Vec<ValueRef>, is_intrinsic: bool)
-> ReturnDest<'tcx> {
// If the return is ignored, we can just return a do-nothing ReturnDest
if fn_ret_ty.is_ignore() {
if fn_ret.is_ignore() {
return ReturnDest::Nothing;
}
let dest = if let mir::Lvalue::Local(index) = *dest {
let ret_ty = self.monomorphized_lvalue_ty(dest);
match self.locals[index] {
LocalRef::Lvalue(dest) => dest,
LocalRef::Operand(None) => {
// Handle temporary lvalues, specifically Operand ones, as
// they don't have allocas
return if fn_ret_ty.is_indirect() {
return if fn_ret.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret");
tmp.storage_live(bcx);
llargs.push(tmp.llval);
ReturnDest::IndirectOperand(tmp, index)
@ -831,7 +825,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret");
tmp.storage_live(bcx);
ReturnDest::IndirectOperand(tmp, index)
} else {
@ -845,7 +839,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} else {
self.trans_lvalue(bcx, dest)
};
if fn_ret_ty.is_indirect() {
if fn_ret.is_indirect() {
match dest.alignment {
Alignment::AbiAligned => {
llargs.push(dest.llval);
@ -873,18 +867,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
match self.locals[index] {
LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, lvalue),
LocalRef::Operand(None) => {
let lvalue_ty = self.monomorphized_lvalue_ty(dst);
assert!(!lvalue_ty.has_erasable_regions());
let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp");
let dst_layout = bcx.ccx.layout_of(self.monomorphized_lvalue_ty(dst));
assert!(!dst_layout.ty.has_erasable_regions());
let lvalue = LvalueRef::alloca(bcx, dst_layout, "transmute_temp");
lvalue.storage_live(bcx);
self.trans_transmute_into(bcx, src, lvalue);
let op = lvalue.load(bcx);
lvalue.storage_dead(bcx);
self.locals[index] = LocalRef::Operand(Some(op));
}
LocalRef::Operand(Some(_)) => {
let ty = self.monomorphized_lvalue_ty(dst);
assert!(common::type_is_zero_size(bcx.ccx, ty),
LocalRef::Operand(Some(op)) => {
assert!(op.layout.is_zst(),
"assigning to initialized SSAtemp");
}
}
@ -897,14 +890,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>,
src: &mir::Operand<'tcx>,
dst: LvalueRef<'tcx>) {
let val = self.trans_operand(bcx, src);
let llty = bcx.ccx.llvm_type_of(val.ty);
let src = self.trans_operand(bcx, src);
let llty = bcx.ccx.llvm_type_of(src.layout.ty);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
let in_type = val.ty;
let out_type = dst.ty.to_ty(bcx.tcx());
let align = bcx.ccx.align_of(in_type).min(bcx.ccx.align_of(out_type));
val.store(bcx,
LvalueRef::new_sized(cast_ptr, val.ty, Alignment::Packed(align)));
let align = src.layout.align(bcx.ccx).min(dst.layout.align(bcx.ccx));
src.val.store(bcx,
LvalueRef::new_sized(cast_ptr, src.layout, Alignment::Packed(align)));
}
@ -927,7 +918,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() {
let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret");
let tmp = LvalueRef::alloca(bcx, op.layout, "tmp_ret");
tmp.storage_live(bcx);
ret_ty.store(bcx, op.immediate(), tmp);
let op = tmp.load(bcx);

View File

@ -158,7 +158,7 @@ impl<'a, 'tcx> Const<'tcx> {
OperandRef {
val,
ty: self.ty
layout: ccx.layout_of(self.ty)
}
}
}
@ -1100,11 +1100,11 @@ fn trans_const_adt<'a, 'tcx>(
_ => 0,
};
let discr_ty = l.field(ccx, 0).ty;
let discr = Const::new(C_int(ccx.llvm_type_of(discr_ty), discr as i64),
discr_ty);
let discr = C_int(ccx.llvm_type_of(discr_ty), discr as i64);
if let layout::Abi::Scalar(_) = l.abi {
discr
Const::new(discr, t)
} else {
let discr = Const::new(discr, discr_ty);
build_const_struct(ccx, l.for_variant(variant_index), vals, Some(discr))
}
}

View File

@ -9,7 +9,7 @@
// except according to those terms.
use llvm::{self, ValueRef};
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, FullLayout, LayoutOf};
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
@ -86,36 +86,44 @@ pub struct LvalueRef<'tcx> {
pub llextra: ValueRef,
/// Monomorphized type of this lvalue, including variant information
pub ty: LvalueTy<'tcx>,
pub layout: FullLayout<'tcx>,
/// Whether this lvalue is known to be aligned according to its layout
pub alignment: Alignment,
}
impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn new_sized(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> {
LvalueRef { llval, llextra: ptr::null_mut(), ty: LvalueTy::from_ty(ty), alignment }
pub fn new_sized(llval: ValueRef,
layout: FullLayout<'tcx>,
alignment: Alignment)
-> LvalueRef<'tcx> {
LvalueRef {
llval,
llextra: ptr::null_mut(),
layout,
alignment
}
}
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, ty);
pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str)
-> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, layout);
let tmp = bcx.alloca(
bcx.ccx.llvm_type_of(ty), name, bcx.ccx.over_align_of(ty));
assert!(!ty.has_param_types());
Self::new_sized(tmp, ty, Alignment::AbiAligned)
bcx.ccx.llvm_type_of(layout.ty), name, layout.over_align(bcx.ccx));
Self::new_sized(tmp, layout, Alignment::AbiAligned)
}
pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
let ty = self.ty.to_ty(ccx.tcx());
match ty.sty {
ty::TyArray(_, n) => {
common::C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap())
}
ty::TySlice(_) | ty::TyStr => {
assert!(self.llextra != ptr::null_mut());
if let layout::FieldPlacement::Array { count, .. } = *self.layout.fields {
if self.layout.is_unsized() {
assert!(self.has_extra());
assert_eq!(count, 0);
self.llextra
} else {
common::C_usize(ccx, count)
}
_ => bug!("unexpected type `{}` in LvalueRef::len", ty)
} else {
bug!("unexpected layout `{:#?}` in LvalueRef::len", self.layout)
}
}
@ -128,15 +136,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
assert!(!self.has_extra());
let ty = self.ty.to_ty(bcx.tcx());
if common::type_is_zero_size(bcx.ccx, ty) {
return OperandRef::new_zst(bcx.ccx, ty);
if self.layout.is_zst() {
return OperandRef::new_zst(bcx.ccx, self.layout);
}
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let val = if common::type_is_fat_ptr(bcx.ccx, self.layout.ty) {
let data = self.project_field(bcx, abi::FAT_PTR_ADDR);
let lldata = if ty.is_region_ptr() || ty.is_box() {
let lldata = if self.layout.ty.is_region_ptr() || self.layout.ty.is_box() {
bcx.load_nonnull(data.llval, data.alignment.non_abi())
} else {
bcx.load(data.llval, data.alignment.non_abi())
@ -153,11 +159,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
};
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx, ty) {
} else if common::type_is_imm_pair(bcx.ccx, self.layout.ty) {
OperandValue::Pair(
self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(),
self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate())
} else if common::type_is_immediate(bcx.ccx, ty) {
} else if common::type_is_immediate(bcx.ccx, self.layout.ty) {
let mut const_llval = ptr::null_mut();
unsafe {
let global = llvm::LLVMIsAGlobalVariable(self.llval);
@ -168,48 +174,43 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let llval = if !const_llval.is_null() {
const_llval
} else if ty.is_bool() {
} else if self.layout.ty.is_bool() {
bcx.load_range_assert(self.llval, 0, 2, llvm::False,
self.alignment.non_abi())
} else if ty.is_char() {
} else if self.layout.ty.is_char() {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
bcx.load_range_assert(self.llval, 0, 0x10FFFF + 1, llvm::False,
self.alignment.non_abi())
} else if ty.is_region_ptr() || ty.is_box() || ty.is_fn() {
} else if self.layout.ty.is_region_ptr() ||
self.layout.ty.is_box() ||
self.layout.ty.is_fn() {
bcx.load_nonnull(self.llval, self.alignment.non_abi())
} else {
bcx.load(self.llval, self.alignment.non_abi())
};
OperandValue::Immediate(base::to_immediate(bcx, llval, ty))
OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout.ty))
} else {
OperandValue::Ref(self.llval, self.alignment)
};
OperandRef { val, ty }
OperandRef { val, layout: self.layout }
}
/// Access a field, at a point when the value's case is known.
pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> {
let ccx = bcx.ccx;
let mut l = ccx.layout_of(self.ty.to_ty(bcx.tcx()));
match self.ty {
LvalueTy::Ty { .. } => {}
LvalueTy::Downcast { variant_index, .. } => {
l = l.for_variant(variant_index)
}
}
let field = l.field(ccx, ix);
let offset = l.fields.offset(ix).bytes();
let field = self.layout.field(ccx, ix);
let offset = self.layout.fields.offset(ix).bytes();
let alignment = self.alignment | Alignment::from(l);
let alignment = self.alignment | Alignment::from(self.layout);
// Unions and newtypes only use an offset of 0.
let has_llvm_fields = match *l.fields {
let has_llvm_fields = match *self.layout.fields {
layout::FieldPlacement::Union(_) => false,
layout::FieldPlacement::Array { .. } => true,
layout::FieldPlacement::Arbitrary { .. } => {
match l.abi {
match self.layout.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false,
layout::Abi::Aggregate { .. } => true
}
@ -219,7 +220,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let simple = || {
LvalueRef {
llval: if has_llvm_fields {
bcx.struct_gep(self.llval, l.llvm_field_index(ix))
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
} else {
assert_eq!(offset, 0);
let ty = ccx.llvm_type_of(field.ty);
@ -230,7 +231,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
} else {
ptr::null_mut()
},
ty: LvalueTy::from_ty(field.ty),
layout: field,
alignment,
}
};
@ -238,7 +239,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
// Simple case - we can just GEP the field
// * Packed struct - There is no alignment padding
// * Field is sized - pointer is properly aligned already
if l.is_packed() || !field.is_unsized() {
if self.layout.is_packed() || !field.is_unsized() {
return simple();
}
@ -301,29 +302,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
LvalueRef {
llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
llextra: self.llextra,
ty: LvalueTy::from_ty(field.ty),
layout: field,
alignment,
}
}
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx()));
let cast_to = bcx.ccx.immediate_llvm_type_of(cast_to);
match *l.layout {
match *self.layout.layout {
layout::Layout::Univariant { .. } |
layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0),
_ => {}
}
let discr = self.project_field(bcx, 0);
let discr_layout = bcx.ccx.layout_of(discr.ty.to_ty(bcx.tcx()));
let discr_scalar = match discr_layout.abi {
let discr_scalar = match discr.layout.abi {
layout::Abi::Scalar(discr) => discr,
_ => bug!("discriminant not scalar: {:#?}", discr_layout)
_ => bug!("discriminant not scalar: {:#?}", discr.layout)
};
let (min, max) = match *l.layout {
let (min, max) = match *self.layout.layout {
layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end),
_ => (0, u64::max_value()),
};
@ -349,7 +347,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
bcx.load(discr.llval, discr.alignment.non_abi())
}
};
match *l.layout {
match *self.layout.layout {
layout::Layout::General { .. } => {
let signed = match discr_scalar {
layout::Int(_, signed) => signed,
@ -359,29 +357,28 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
}
layout::Layout::NullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { llvm::IntEQ } else { llvm::IntNE };
let zero = C_null(bcx.ccx.llvm_type_of(discr_layout.ty));
let zero = C_null(bcx.ccx.llvm_type_of(discr.layout.ty));
bcx.intcast(bcx.icmp(cmp, lldiscr, zero), cast_to, false)
}
_ => bug!("{} is not an enum", l.ty)
_ => bug!("{} is not an enum", self.layout.ty)
}
}
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
let l = bcx.ccx.layout_of(self.ty.to_ty(bcx.tcx()));
let to = l.ty.ty_adt_def().unwrap()
let to = self.layout.ty.ty_adt_def().unwrap()
.discriminant_for_variant(bcx.tcx(), variant_index)
.to_u128_unchecked() as u64;
match *l.layout {
match *self.layout.layout {
layout::Layout::General { .. } => {
let ptr = self.project_field(bcx, 0);
bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx())), to as i64),
bcx.store(C_int(bcx.ccx.llvm_type_of(ptr.layout.ty), to as i64),
ptr.llval, ptr.alignment.non_abi());
}
layout::Layout::NullablePointer { nndiscr, .. } => {
if to != nndiscr {
let use_memset = match l.abi {
let use_memset = match self.layout.abi {
layout::Abi::Scalar(_) => false,
_ => target_sets_discr_via_memset(bcx)
};
@ -391,13 +388,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
// than storing null to single target field.
let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
let fill_byte = C_u8(bcx.ccx, 0);
let (size, align) = l.size_and_align(bcx.ccx);
let (size, align) = self.layout.size_and_align(bcx.ccx);
let size = C_usize(bcx.ccx, size.bytes());
let align = C_u32(bcx.ccx, align.abi() as u32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
let ptr = self.project_field(bcx, 0);
bcx.store(C_null(bcx.ccx.llvm_type_of(ptr.ty.to_ty(bcx.tcx()))),
bcx.store(C_null(bcx.ccx.llvm_type_of(ptr.layout.ty)),
ptr.llval, ptr.alignment.non_abi());
}
}
@ -410,48 +407,40 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef)
-> LvalueRef<'tcx> {
let ptr = bcx.inbounds_gep(self.llval, &[common::C_usize(bcx.ccx, 0), llindex]);
let elem_ty = self.ty.to_ty(bcx.tcx()).builtin_index().unwrap();
LvalueRef::new_sized(ptr, elem_ty, self.alignment)
LvalueRef {
llval: bcx.inbounds_gep(self.llval, &[common::C_usize(bcx.ccx, 0), llindex]),
llextra: ptr::null_mut(),
layout: self.layout.field(bcx.ccx, 0),
alignment: self.alignment
}
}
pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
-> LvalueRef<'tcx> {
let ty = self.ty.to_ty(bcx.tcx());
if let ty::TyAdt(adt_def, substs) = ty.sty {
let mut downcast = *self;
downcast.ty = LvalueTy::Downcast {
adt_def,
substs,
variant_index,
};
let mut downcast = *self;
downcast.layout = self.layout.for_variant(variant_index);
// If this is an enum, cast to the appropriate variant struct type.
let layout = bcx.ccx.layout_of(ty);
match *layout.layout {
layout::Layout::NullablePointer { .. } |
layout::Layout::General { .. } => {
let variant_layout = layout.for_variant(variant_index);
let variant_ty = Type::struct_(bcx.ccx,
&type_of::struct_llfields(bcx.ccx, variant_layout),
variant_layout.is_packed());
downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
}
_ => {}
// If this is an enum, cast to the appropriate variant struct type.
match *self.layout.layout {
layout::Layout::NullablePointer { .. } |
layout::Layout::General { .. } => {
let variant_ty = Type::struct_(bcx.ccx,
&type_of::struct_llfields(bcx.ccx, downcast.layout),
downcast.layout.is_packed());
downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
}
downcast
} else {
bug!("unexpected type `{}` in LvalueRef::project_downcast", ty)
_ => {}
}
downcast
}
pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) {
bcx.lifetime_start(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx())));
bcx.lifetime_start(self.llval, self.layout.size(bcx.ccx));
}
pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) {
bcx.lifetime_end(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx())));
bcx.lifetime_end(self.llval, self.layout.size(bcx.ccx));
}
}
@ -480,7 +469,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Lvalue::Local(_) => bug!(), // handled above
mir::Lvalue::Static(box mir::Static { def_id, ty }) => {
LvalueRef::new_sized(consts::get_static(ccx, def_id),
self.monomorphize(&ty),
ccx.layout_of(self.monomorphize(&ty)),
Alignment::AbiAligned)
},
mir::Lvalue::Projection(box mir::Projection {
@ -488,7 +477,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
elem: mir::ProjectionElem::Deref
}) => {
// Load the pointer from its location.
self.trans_consume(bcx, base).deref()
self.trans_consume(bcx, base).deref(bcx.ccx)
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
@ -521,23 +510,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::ProjectionElem::Subslice { from, to } => {
let mut subslice = tr_base.project_index(bcx,
C_usize(bcx.ccx, from as u64));
subslice.ty = tr_base.ty.projection_ty(tcx, &projection.elem);
subslice.ty = self.monomorphize(&subslice.ty);
let projected_ty = LvalueTy::Ty { ty: tr_base.layout.ty }
.projection_ty(tcx, &projection.elem).to_ty(bcx.tcx());
subslice.layout = bcx.ccx.layout_of(self.monomorphize(&projected_ty));
match subslice.ty.to_ty(tcx).sty {
ty::TyArray(..) => {}
ty::TySlice(..) => {
assert!(tr_base.has_extra());
subslice.llextra = bcx.sub(tr_base.llextra,
C_usize(bcx.ccx, (from as u64) + (to as u64)));
}
_ => bug!("unexpected type {:?} in Subslice", subslice.ty)
if subslice.layout.is_unsized() {
assert!(tr_base.has_extra());
subslice.llextra = bcx.sub(tr_base.llextra,
C_usize(bcx.ccx, (from as u64) + (to as u64)));
}
// Cast the lvalue pointer type to the new
// array or slice type (*[%_; new_len]).
subslice.llval = bcx.pointercast(subslice.llval,
bcx.ccx.llvm_type_of(subslice.ty.to_ty(tcx)).ptr_to());
bcx.ccx.llvm_type_of(subslice.layout.ty).ptr_to());
subslice
}

View File

@ -11,8 +11,8 @@
use libc::c_uint;
use llvm::{self, ValueRef, BasicBlockRef};
use llvm::debuginfo::DIScope;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::LayoutOf;
use rustc::ty::{self, TypeFoldable};
use rustc::ty::layout::{LayoutOf, FullLayout};
use rustc::mir::{self, Mir};
use rustc::ty::subst::Substs;
use rustc::infer::TransNormalize;
@ -177,12 +177,12 @@ enum LocalRef<'tcx> {
}
impl<'a, 'tcx> LocalRef<'tcx> {
fn new_operand(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> LocalRef<'tcx> {
if common::type_is_zero_size(ccx, ty) {
fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> LocalRef<'tcx> {
if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
// we need something in the operand.
LocalRef::Operand(Some(OperandRef::new_zst(ccx, ty)))
LocalRef::Operand(Some(OperandRef::new_zst(ccx, layout)))
} else {
LocalRef::Operand(None)
}
@ -253,7 +253,8 @@ pub fn trans_mir<'a, 'tcx: 'a>(
let mut allocate_local = |local| {
let decl = &mir.local_decls[local];
let ty = mircx.monomorphize(&decl.ty);
let layout = bcx.ccx.layout_of(mircx.monomorphize(&decl.ty));
assert!(!layout.ty.has_erasable_regions());
if let Some(name) = decl.name {
// User variable
@ -262,15 +263,14 @@ pub fn trans_mir<'a, 'tcx: 'a>(
if !lvalue_locals.contains(local.index()) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
return LocalRef::new_operand(bcx.ccx, ty);
return LocalRef::new_operand(bcx.ccx, layout);
}
debug!("alloc: {:?} ({}) -> lvalue", local, name);
assert!(!ty.has_erasable_regions());
let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
let lvalue = LvalueRef::alloca(&bcx, layout, &name.as_str());
if dbg {
let (scope, span) = mircx.debug_loc(decl.source_info);
declare_local(&bcx, &mircx.debug_context, name, ty, scope,
declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope,
VariableAccess::DirectVariable { alloca: lvalue.llval },
VariableKind::LocalVariable, span);
}
@ -280,17 +280,18 @@ pub fn trans_mir<'a, 'tcx: 'a>(
if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return pointer) -> lvalue", local);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, ty, Alignment::AbiAligned))
LocalRef::Lvalue(LvalueRef::new_sized(llretptr,
layout,
Alignment::AbiAligned))
} else if lvalue_locals.contains(local.index()) {
debug!("alloc: {:?} -> lvalue", local);
assert!(!ty.has_erasable_regions());
LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local)))
LocalRef::Lvalue(LvalueRef::alloca(&bcx, layout, &format!("{:?}", local)))
} else {
// If this is an immediate local, we do not create an
// alloca in advance. Instead we wait until we see the
// definition and update the operand there.
debug!("alloc: {:?} -> operand", local);
LocalRef::new_operand(bcx.ccx, ty)
LocalRef::new_operand(bcx.ccx, layout)
}
}
};
@ -381,7 +382,6 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
mir.args_iter().enumerate().map(|(arg_index, local)| {
let arg_decl = &mir.local_decls[local];
let arg_ty = mircx.monomorphize(&arg_decl.ty);
let name = if let Some(name) = arg_decl.name {
name.as_str().to_string()
@ -395,12 +395,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// to reconstruct it into a tuple local variable, from multiple
// individual LLVM function arguments.
let arg_ty = mircx.monomorphize(&arg_decl.ty);
let tupled_arg_tys = match arg_ty.sty {
ty::TyTuple(ref tys, _) => tys,
_ => bug!("spread argument isn't a tuple?!")
};
let lvalue = LvalueRef::alloca(bcx, arg_ty, &name);
let lvalue = LvalueRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name);
for i in 0..tupled_arg_tys.len() {
let arg = &mircx.fn_ty.args[idx];
idx += 1;
@ -439,7 +440,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1;
LvalueRef::new_sized(llarg, arg_ty, Alignment::AbiAligned)
LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned)
} else if !lvalue_locals.contains(local.index()) &&
!arg.nested.is_empty() {
assert_eq!(arg.nested.len(), 2);
@ -453,15 +454,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let mut b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
if common::type_is_fat_ptr(bcx.ccx, arg.layout.ty) {
// FIXME(eddyb) As we can't perfectly represent the data and/or
// vtable pointer in a fat pointers in Rust's typesystem, and
// because we split fat pointers into two ArgType's, they're
// not the right type so we have to cast them for now.
let pointee = match arg_ty.sty {
let pointee = match arg.layout.ty.sty {
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty,
ty::TyAdt(def, _) if def.is_box() => arg_ty.boxed_ty(),
ty::TyAdt(def, _) if def.is_box() => arg.layout.ty.boxed_ty(),
_ => bug!()
};
let data_llty = bcx.ccx.llvm_type_of(pointee);
@ -475,13 +476,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
return LocalRef::Operand(Some(OperandRef {
val: OperandValue::Pair(a, b),
ty: arg_ty
layout: arg.layout
}));
} else if !lvalue_locals.contains(local.index()) &&
!arg.is_indirect() && arg.cast.is_none() &&
arg_scope.is_none() {
if arg.is_ignore() {
return LocalRef::new_operand(bcx.ccx, arg_ty);
return LocalRef::new_operand(bcx.ccx, arg.layout);
}
// We don't have to cast or keep the argument in the alloca.
@ -495,11 +496,11 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llarg_idx += 1;
let operand = OperandRef {
val: OperandValue::Immediate(llarg),
ty: arg_ty
layout: arg.layout
};
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else {
let tmp = LvalueRef::alloca(bcx, arg_ty, &name);
let tmp = LvalueRef::alloca(bcx, arg.layout, &name);
arg.store_fn_arg(bcx, &mut llarg_idx, tmp);
tmp
};
@ -523,7 +524,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
bcx,
&mircx.debug_context,
arg_decl.name.unwrap_or(keywords::Invalid.name()),
arg_ty,
arg.layout.ty,
scope,
variable_access,
VariableKind::ArgumentVariable(arg_index + 1),
@ -533,15 +534,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
}
// Or is it the closure environment?
let (closure_ty, env_ref) = match arg_ty.sty {
ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (mt.ty, true),
_ => (arg_ty, false)
let (closure_layout, env_ref) = match arg.layout.ty.sty {
ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bcx.ccx.layout_of(mt.ty), true),
_ => (arg.layout, false)
};
let upvar_tys = match closure_ty.sty {
let upvar_tys = match closure_layout.ty.sty {
ty::TyClosure(def_id, substs) |
ty::TyGenerator(def_id, substs, _) => substs.upvar_tys(def_id, tcx),
_ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_ty)
_ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty)
};
// Store the pointer to closure data in an alloca for debuginfo
@ -552,17 +553,17 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
let alloc_ty = tcx.mk_mut_ptr(arg_ty);
let alloc = LvalueRef::alloca(bcx, alloc_ty, "__debuginfo_env_ptr");
let alloc = LvalueRef::alloca(bcx,
bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
"__debuginfo_env_ptr");
bcx.store(lvalue.llval, alloc.llval, None);
alloc.llval
} else {
lvalue.llval
};
let layout = bcx.ccx.layout_of(closure_ty);
for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
let byte_offset_of_var_in_env = layout.fields.offset(i).bytes();
let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
let ops = unsafe {
[llvm::LLVMRustDIBuilderCreateOpDeref(),

View File

@ -9,10 +9,9 @@
// except according to those terms.
use llvm::ValueRef;
use rustc::ty::{self, Ty};
use rustc::ty::layout::LayoutOf;
use rustc::ty;
use rustc::ty::layout::{LayoutOf, FullLayout};
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use base;
@ -42,6 +41,22 @@ pub enum OperandValue {
Pair(ValueRef, ValueRef)
}
impl fmt::Debug for OperandValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OperandValue::Ref(r, align) => {
write!(f, "Ref({:?}, {:?})", Value(r), align)
}
OperandValue::Immediate(i) => {
write!(f, "Immediate({:?})", Value(i))
}
OperandValue::Pair(a, b) => {
write!(f, "Pair({:?}, {:?})", Value(a), Value(b))
}
}
}
}
/// An `OperandRef` is an "SSA" reference to a Rust value, along with
/// its type.
///
@ -55,35 +70,24 @@ pub struct OperandRef<'tcx> {
// The value.
pub val: OperandValue,
// The type of value being returned.
pub ty: Ty<'tcx>
// The layout of value, based on its Rust type.
pub layout: FullLayout<'tcx>,
}
impl<'tcx> fmt::Debug for OperandRef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.val {
OperandValue::Ref(r, align) => {
write!(f, "OperandRef(Ref({:?}, {:?}) @ {:?})",
Value(r), align, self.ty)
}
OperandValue::Immediate(i) => {
write!(f, "OperandRef(Immediate({:?}) @ {:?})",
Value(i), self.ty)
}
OperandValue::Pair(a, b) => {
write!(f, "OperandRef(Pair({:?}, {:?}) @ {:?})",
Value(a), Value(b), self.ty)
}
}
write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
}
}
impl<'a, 'tcx> OperandRef<'tcx> {
pub fn new_zst(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> OperandRef<'tcx> {
assert!(common::type_is_zero_size(ccx, ty));
let llty = ccx.llvm_type_of(ty);
Const::new(C_undef(llty), ty).to_operand(ccx)
layout: FullLayout<'tcx>) -> OperandRef<'tcx> {
assert!(layout.is_zst());
let llty = ccx.llvm_type_of(layout.ty);
// FIXME(eddyb) ZSTs should always be immediate, not pairs.
// This hack only exists to unpack a constant undef pair.
Const::new(C_undef(llty), layout.ty).to_operand(ccx)
}
/// Asserts that this operand refers to a scalar and returns
@ -95,8 +99,8 @@ impl<'a, 'tcx> OperandRef<'tcx> {
}
}
pub fn deref(self) -> LvalueRef<'tcx> {
let projected_ty = self.ty.builtin_deref(true, ty::NoPreference)
pub fn deref(self, ccx: &CrateContext<'a, 'tcx>) -> LvalueRef<'tcx> {
let projected_ty = self.layout.ty.builtin_deref(true, ty::NoPreference)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
@ -106,7 +110,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
LvalueRef {
llval: llptr,
llextra,
ty: LvalueTy::from_ty(projected_ty),
layout: ccx.layout_of(projected_ty),
alignment: Alignment::AbiAligned,
}
}
@ -115,15 +119,14 @@ impl<'a, 'tcx> OperandRef<'tcx> {
/// Immediate aggregate with the two values.
pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
if let OperandValue::Pair(a, b) = self.val {
let llty = bcx.ccx.llvm_type_of(self.ty);
let llty = bcx.ccx.llvm_type_of(self.layout.ty);
debug!("Operand::pack_if_pair: packing {:?} into {:?}", self, llty);
// Reconstruct the immediate aggregate.
let mut llpair = C_undef(llty);
let elems = [a, b];
let layout = bcx.ccx.layout_of(self.ty);
for i in 0..2 {
let elem = base::from_immediate(bcx, elems[i]);
llpair = bcx.insert_value(llpair, elem, layout.llvm_field_index(i));
llpair = bcx.insert_value(llpair, elem, self.layout.llvm_field_index(i));
}
self.val = OperandValue::Immediate(llpair);
}
@ -135,33 +138,33 @@ impl<'a, 'tcx> OperandRef<'tcx> {
pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
if let OperandValue::Immediate(llval) = self.val {
// Deconstruct the immediate aggregate.
if common::type_is_imm_pair(bcx.ccx, self.ty) {
if common::type_is_imm_pair(bcx.ccx, self.layout.ty) {
debug!("Operand::unpack_if_pair: unpacking {:?}", self);
let layout = bcx.ccx.layout_of(self.ty);
let a = bcx.extract_value(llval, self.layout.llvm_field_index(0));
let a = base::to_immediate(bcx, a, self.layout.field(bcx.ccx, 0).ty);
let a = bcx.extract_value(llval, layout.llvm_field_index(0));
let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0).ty);
let b = bcx.extract_value(llval, layout.llvm_field_index(1));
let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1).ty);
let b = bcx.extract_value(llval, self.layout.llvm_field_index(1));
let b = base::to_immediate(bcx, b, self.layout.field(bcx.ccx, 1).ty);
self.val = OperandValue::Pair(a, b);
}
}
self
}
}
impl<'a, 'tcx> OperandValue {
pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: LvalueRef<'tcx>) {
debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless.
if common::type_is_zero_size(bcx.ccx, self.ty) {
if dest.layout.is_zst() {
return;
}
match self.val {
match self {
OperandValue::Ref(r, source_align) =>
base::memcpy_ty(bcx, dest.llval, r, self.ty,
base::memcpy_ty(bcx, dest.llval, r, dest.layout,
(source_align | dest.alignment).non_abi()),
OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi());
@ -169,12 +172,12 @@ impl<'a, 'tcx> OperandRef<'tcx> {
OperandValue::Pair(a, b) => {
// See comment above about zero-sized values.
let dest_a = dest.project_field(bcx, 0);
if !common::type_is_zero_size(bcx.ccx, dest_a.ty.to_ty(bcx.tcx())) {
if !dest_a.layout.is_zst() {
let a = base::from_immediate(bcx, a);
bcx.store(a, dest_a.llval, dest_a.alignment.non_abi());
}
let dest_b = dest.project_field(bcx, 1);
if !common::type_is_zero_size(bcx.ccx, dest_b.ty.to_ty(bcx.tcx())) {
if !dest_b.layout.is_zst() {
let b = base::from_immediate(bcx, b);
bcx.store(b, dest_b.llval, dest_b.alignment.non_abi());
}
@ -217,7 +220,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let llval = [a, b][f.index()];
let op = OperandRef {
val: OperandValue::Immediate(llval),
ty: self.monomorphize(&ty)
layout: bcx.ccx.layout_of(self.monomorphize(&ty))
};
// Handle nested pairs.
@ -251,7 +254,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let operand = val.to_operand(bcx.ccx);
if let OperandValue::Ref(ptr, align) = operand.val {
// If this is a OperandValue::Ref to an immediate constant, load it.
LvalueRef::new_sized(ptr, operand.ty, align).load(bcx)
LvalueRef::new_sized(ptr, operand.layout, align).load(bcx)
} else {
operand
}

View File

@ -49,7 +49,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let tr_operand = self.trans_operand(&bcx, operand);
// FIXME: consider not copying constants through stack. (fixable by translating
// constants into OperandValue::Ref, why dont we do that yet if we dont?)
tr_operand.store(&bcx, dest);
tr_operand.val.store(&bcx, dest);
bcx
}
@ -60,7 +60,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
temp.store(&bcx, dest);
temp.val.store(&bcx, dest);
return bcx;
}
@ -80,14 +80,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// index into the struct, and this case isn't
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
let scratch = LvalueRef::alloca(&bcx, operand.layout, "__unsize_temp");
scratch.storage_live(&bcx);
operand.store(&bcx, scratch);
operand.val.store(&bcx, scratch);
base::coerce_unsized_into(&bcx, scratch, dest);
scratch.storage_dead(&bcx);
}
OperandValue::Ref(llref, align) => {
let source = LvalueRef::new_sized(llref, operand.ty, align);
let source = LvalueRef::new_sized(llref, operand.layout, align);
base::coerce_unsized_into(&bcx, source, dest);
}
}
@ -98,8 +98,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let tr_elem = self.trans_operand(&bcx, elem);
// Do not generate the loop for zero-sized elements or empty arrays.
let dest_ty = dest.ty.to_ty(bcx.tcx());
if common::type_is_zero_size(bcx.ccx, dest_ty) {
if dest.layout.is_zst() {
return bcx;
}
@ -107,9 +106,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
if let OperandValue::Immediate(v) = tr_elem.val {
let align = dest.alignment.non_abi()
.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
.unwrap_or_else(|| tr_elem.layout.align(bcx.ccx));
let align = C_i32(bcx.ccx, align.abi() as i32);
let size = C_usize(bcx.ccx, bcx.ccx.size_of(dest_ty).bytes());
let size = C_usize(bcx.ccx, dest.layout.size(bcx.ccx).bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
@ -139,8 +138,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
tr_elem.store(&body_bcx,
LvalueRef::new_sized(current, tr_elem.ty, dest.alignment));
tr_elem.val.store(&body_bcx,
LvalueRef::new_sized(current, tr_elem.layout, dest.alignment));
let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]);
body_bcx.br(header_bcx.llbb());
@ -164,9 +163,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx, op.ty) {
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
op.store(&bcx, dest.project_field(&bcx, field_index));
op.val.store(&bcx, dest.project_field(&bcx, field_index));
}
}
bcx
@ -175,7 +174,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
_ => {
assert!(self.rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
temp.store(&bcx, dest);
temp.val.store(&bcx, dest);
bcx
}
}
@ -189,32 +188,32 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
let operand = self.trans_operand(&bcx, source);
debug!("cast operand is {:?}", operand);
let cast_ty = self.monomorphize(&cast_ty);
let cast = bcx.ccx.layout_of(self.monomorphize(&mir_cast_ty));
let val = match *kind {
mir::CastKind::ReifyFnPointer => {
match operand.ty.sty {
match operand.layout.ty.sty {
ty::TyFnDef(def_id, substs) => {
OperandValue::Immediate(
callee::resolve_and_get_fn(bcx.ccx, def_id, substs))
}
_ => {
bug!("{} cannot be reified to a fn ptr", operand.ty)
bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
}
}
}
mir::CastKind::ClosureFnPointer => {
match operand.ty.sty {
match operand.layout.ty.sty {
ty::TyClosure(def_id, substs) => {
let instance = monomorphize::resolve_closure(
bcx.ccx.tcx(), def_id, substs, ty::ClosureKind::FnOnce);
OperandValue::Immediate(callee::get_fn(bcx.ccx, instance))
}
_ => {
bug!("{} cannot be cast to a fn ptr", operand.ty)
bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
}
}
}
@ -225,7 +224,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::CastKind::Unsize => {
// unsize targets other than to a fat pointer currently
// can't be operands.
assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
assert!(common::type_is_fat_ptr(bcx.ccx, cast.ty));
match operand.val {
OperandValue::Pair(lldata, llextra) => {
@ -235,14 +234,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// &'a fmt::Debug+Send => &'a fmt::Debug,
// So we need to pointercast the base to ensure
// the types match up.
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty);
let lldata = bcx.pointercast(lldata, llcast_ty);
OperandValue::Pair(lldata, llextra)
}
OperandValue::Immediate(lldata) => {
// "standard" unsize
let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
operand.ty, cast_ty);
operand.layout.ty, cast.ty);
OperandValue::Pair(lldata, llextra)
}
OperandValue::Ref(..) => {
@ -251,16 +250,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
}
mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => {
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
if common::type_is_fat_ptr(bcx.ccx, cast.ty) {
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty);
let data_cast = bcx.pointercast(data_ptr, llcast_ty);
OperandValue::Pair(data_cast, meta)
} else { // cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
// pointer-cast of that pointer to desired pointer type.
let llcast_ty = bcx.ccx.immediate_llvm_type_of(cast_ty);
let llcast_ty = bcx.ccx.immediate_llvm_type_of(cast.ty);
let llval = bcx.pointercast(data_ptr, llcast_ty);
OperandValue::Immediate(llval)
}
@ -269,15 +268,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
mir::CastKind::Misc => {
debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
let ll_t_in = bcx.ccx.immediate_llvm_type_of(operand.ty);
let ll_t_out = bcx.ccx.immediate_llvm_type_of(cast_ty);
debug_assert!(common::type_is_immediate(bcx.ccx, cast.ty));
let r_t_in = CastTy::from_ty(operand.layout.ty)
.expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
let ll_t_in = bcx.ccx.immediate_llvm_type_of(operand.layout.ty);
let ll_t_out = bcx.ccx.immediate_llvm_type_of(cast.ty);
let llval = operand.immediate();
let l = bcx.ccx.layout_of(operand.ty);
if let Layout::General { ref discr_range, .. } = *l.layout {
if let Layout::General { ref discr_range, .. } = *operand.layout.layout {
if discr_range.end > discr_range.start {
// We want `table[e as usize]` to not
// have bound checks, and this is the most
@ -291,7 +290,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
let signed = match l.abi {
let signed = match operand.layout.abi {
layout::Abi::Scalar(layout::Int(_, signed)) => signed,
_ => false
};
@ -326,49 +325,43 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out),
(CastTy::Float, CastTy::Int(_)) =>
cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out),
_ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
_ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
};
OperandValue::Immediate(newval)
}
};
let operand = OperandRef {
(bcx, OperandRef {
val,
ty: cast_ty
};
(bcx, operand)
layout: cast
})
}
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ref_ty = bcx.tcx().mk_ref(
bcx.tcx().types.re_erased,
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
);
let ty = tr_lvalue.layout.ty;
// Note: lvalues are indirect, so storing the `llval` into the
// destination effectively creates a reference.
let operand = if !bcx.ccx.shared().type_has_metadata(ty) {
OperandRef {
val: OperandValue::Immediate(tr_lvalue.llval),
ty: ref_ty,
}
let val = if !bcx.ccx.shared().type_has_metadata(ty) {
OperandValue::Immediate(tr_lvalue.llval)
} else {
OperandRef {
val: OperandValue::Pair(tr_lvalue.llval,
tr_lvalue.llextra),
ty: ref_ty,
}
OperandValue::Pair(tr_lvalue.llval, tr_lvalue.llextra)
};
(bcx, operand)
(bcx, OperandRef {
val,
layout: self.ccx.layout_of(self.ccx.tcx().mk_ref(
self.ccx.tcx().types.re_erased,
ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
)),
})
}
mir::Rvalue::Len(ref lvalue) => {
let size = self.evaluate_array_len(&bcx, lvalue);
let operand = OperandRef {
val: OperandValue::Immediate(size),
ty: bcx.tcx().types.usize,
layout: bcx.ccx.layout_of(bcx.tcx().types.usize),
};
(bcx, operand)
}
@ -376,14 +369,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.trans_operand(&bcx, lhs);
let rhs = self.trans_operand(&bcx, rhs);
let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.layout.ty) {
match (lhs.val, rhs.val) {
(OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra)) => {
self.trans_fat_ptr_binop(&bcx, op,
lhs_addr, lhs_extra,
rhs_addr, rhs_extra,
lhs.ty)
lhs.layout.ty)
}
_ => bug!()
}
@ -391,11 +384,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
} else {
self.trans_scalar_binop(&bcx, op,
lhs.immediate(), rhs.immediate(),
lhs.ty)
lhs.layout.ty)
};
let operand = OperandRef {
val: OperandValue::Immediate(llresult),
ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
layout: bcx.ccx.layout_of(
op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty)),
};
(bcx, operand)
}
@ -404,12 +398,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let rhs = self.trans_operand(&bcx, rhs);
let result = self.trans_scalar_checked_binop(&bcx, op,
lhs.immediate(), rhs.immediate(),
lhs.ty);
let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
lhs.layout.ty);
let val_ty = op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty);
let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
let operand = OperandRef {
val: result,
ty: operand_ty
layout: bcx.ccx.layout_of(operand_ty)
};
(bcx, operand)
@ -418,7 +412,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Rvalue::UnaryOp(op, ref operand) => {
let operand = self.trans_operand(&bcx, operand);
let lloperand = operand.immediate();
let is_float = operand.ty.is_fp();
let is_float = operand.layout.ty.is_fp();
let llval = match op {
mir::UnOp::Not => bcx.not(lloperand),
mir::UnOp::Neg => if is_float {
@ -429,7 +423,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
(bcx, OperandRef {
val: OperandValue::Immediate(llval),
ty: operand.ty,
layout: operand.layout,
})
}
@ -439,7 +433,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
.trans_get_discr(&bcx, discr_ty);
(bcx, OperandRef {
val: OperandValue::Immediate(discr),
ty: discr_ty
layout: self.ccx.layout_of(discr_ty)
})
}
@ -449,7 +443,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let tcx = bcx.tcx();
(bcx, OperandRef {
val: OperandValue::Immediate(val),
ty: tcx.types.usize,
layout: self.ccx.layout_of(tcx.types.usize),
})
}
@ -458,14 +452,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let (size, align) = bcx.ccx.size_and_align_of(content_ty);
let llsize = C_usize(bcx.ccx, size.bytes());
let llalign = C_usize(bcx.ccx, align.abi());
let box_ty = bcx.tcx().mk_box(content_ty);
let llty_ptr = bcx.ccx.llvm_type_of(box_ty);
let box_layout = bcx.ccx.layout_of(bcx.tcx().mk_box(content_ty));
let llty_ptr = bcx.ccx.llvm_type_of(box_layout.ty);
// Allocate space:
let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
Ok(id) => id,
Err(s) => {
bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
bcx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
}
};
let instance = ty::Instance::mono(bcx.tcx(), def_id);
@ -474,7 +468,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let operand = OperandRef {
val: OperandValue::Immediate(val),
ty: box_ty,
layout: box_layout,
};
(bcx, operand)
}
@ -487,7 +481,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(self.mir, self.ccx.tcx());
(bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty)))
(bcx, OperandRef::new_zst(self.ccx,
self.ccx.layout_of(self.monomorphize(&ty))))
}
}
}
@ -500,11 +495,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// because trans_lvalue() panics if Local is operand.
if let mir::Lvalue::Local(index) = *lvalue {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if common::type_is_zero_size(bcx.ccx, op.ty) {
if let ty::TyArray(_, n) = op.ty.sty {
let n = n.val.to_const_int().unwrap().to_u64().unwrap();
return common::C_usize(bcx.ccx, n);
}
if let ty::TyArray(_, n) = op.layout.ty.sty {
let n = n.val.to_const_int().unwrap().to_u64().unwrap();
return common::C_usize(bcx.ccx, n);
}
}
}
@ -709,7 +702,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(self.mir, self.ccx.tcx());
let ty = self.monomorphize(&ty);
common::type_is_zero_size(self.ccx, ty)
self.ccx.layout_of(ty).is_zst()
}
}

View File

@ -11,7 +11,6 @@
use rustc::mir;
use asm;
use common;
use builder::Builder;
use super::MirContext;
@ -37,18 +36,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
self.locals[index] = LocalRef::Operand(Some(operand));
bcx
}
LocalRef::Operand(Some(_)) => {
let ty = self.monomorphized_lvalue_ty(lvalue);
if !common::type_is_zero_size(bcx.ccx, ty) {
LocalRef::Operand(Some(op)) => {
if !op.layout.is_zst() {
span_bug!(statement.source_info.span,
"operand {:?} already assigned",
rvalue);
} else {
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand
self.trans_rvalue_operand(bcx, rvalue).0
}
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand
self.trans_rvalue_operand(bcx, rvalue).0
}
}
} else {
@ -75,8 +72,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output);
(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
self.trans_lvalue(&bcx, output)
}).collect();
let input_vals = inputs.iter().map(|input| {

View File

@ -209,14 +209,7 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> {
/// Returns alignment if it is different than the primitive alignment.
pub fn over_align_of(&self, ty: Ty<'tcx>) -> Option<Align> {
let layout = self.layout_of(ty);
let align = layout.align(self);
let primitive_align = layout.primitive_align(self);
if align != primitive_align {
Some(align)
} else {
None
}
self.layout_of(ty).over_align(self)
}
/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
@ -275,10 +268,21 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> {
}
pub trait LayoutLlvmExt {
fn over_align(&self, ccx: &CrateContext) -> Option<Align>;
fn llvm_field_index(&self, index: usize) -> u64;
}
impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> {
fn over_align(&self, ccx: &CrateContext) -> Option<Align> {
let align = self.align(ccx);
let primitive_align = self.primitive_align(ccx);
if align != primitive_align {
Some(align)
} else {
None
}
}
fn llvm_field_index(&self, index: usize) -> u64 {
if let layout::Abi::Scalar(_) = self.abi {
bug!("FullLayout::llvm_field_index({:?}): not applicable", self);