From f16068e577a916122ff1f24719aad2b80e40c975 Mon Sep 17 00:00:00 2001 From: Austin Hicks Date: Sun, 28 Aug 2016 20:44:19 -0400 Subject: [PATCH] Completely kill `represent_type` and the `adt::Repr` type that goes with it. --- src/librustc/ty/context.rs | 4 + src/librustc/ty/layout.rs | 41 +- src/librustc/ty/util.rs | 9 + src/librustc_trans/abi.rs | 16 +- src/librustc_trans/adt.rs | 1052 +++++++--------------- src/librustc_trans/base.rs | 30 +- src/librustc_trans/common.rs | 6 - src/librustc_trans/context.rs | 6 +- src/librustc_trans/debuginfo/metadata.rs | 92 +- src/librustc_trans/debuginfo/mod.rs | 4 +- src/librustc_trans/glue.rs | 23 +- src/librustc_trans/intrinsic.rs | 8 +- src/librustc_trans/machine.rs | 29 - src/librustc_trans/mir/block.rs | 8 +- src/librustc_trans/mir/constant.rs | 21 +- src/librustc_trans/mir/lvalue.rs | 3 +- src/librustc_trans/mir/rvalue.rs | 15 +- src/librustc_trans/mir/statement.rs | 3 +- src/librustc_trans/type_.rs | 21 + src/librustc_trans/type_of.rs | 38 +- 20 files changed, 517 insertions(+), 912 deletions(-) diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index d5e5f4402bb..c24dd4aaed6 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -489,6 +489,9 @@ pub struct GlobalCtxt<'tcx> { /// Cache for layouts computed from types. pub layout_cache: RefCell, &'tcx Layout>>, + //Used to prevent layout from recursing too deeply. + pub layout_depth: Cell, + /// Map from function to the `#[derive]` mode that it's defining. Only used /// by `rustc-macro` crates. pub derive_macros: RefCell>, @@ -760,6 +763,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { crate_name: token::intern_and_get_ident(crate_name), data_layout: data_layout, layout_cache: RefCell::new(FnvHashMap()), + layout_depth: Cell::new(0), derive_macros: RefCell::new(NodeMap()), }, f) } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 5e7a2bc0266..c8bcda8c530 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -328,6 +328,33 @@ pub enum Integer { } impl Integer { + + pub fn size(&self) -> Size { + match *self { + I1 => Size::from_bits(1), + I8 => Size::from_bytes(1), + I16 => Size::from_bytes(2), + I32 => Size::from_bytes(4), + I64 => Size::from_bytes(8), + } + } + + pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>, + signed: bool) -> Ty<'tcx> { + match (*self, signed) { + (I1, false) => tcx.types.u8, + (I8, false) => tcx.types.u8, + (I16, false) => tcx.types.u16, + (I32, false) => tcx.types.u32, + (I64, false) => tcx.types.u64, + (I1, true) => tcx.types.i8, + (I8, true) => tcx.types.i8, + (I16, true) => tcx.types.i16, + (I32, true) => tcx.types.i32, + (I64, true) => tcx.types.i64, + } + } + /// Find the smallest Integer type which can represent the signed value. pub fn fit_signed(x: i64) -> Integer { match x { @@ -912,7 +939,7 @@ impl<'a, 'gcx, 'tcx> Layout { Univariant { variant: unit, non_zero: false } } - // Tuples. + // Tuples and closures. ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) | ty::TyTuple(tys) => { let mut st = Struct::new(dl, false); @@ -975,7 +1002,7 @@ impl<'a, 'gcx, 'tcx> Layout { if def.variants.len() == 1 { // Struct, or union, or univariant enum equivalent to a struct. // (Typechecking will reject discriminant-sizing attrs.) - assert!(!def.is_enum() || hint == attr::ReprAny); + let fields = def.variants[0].fields.iter().map(|field| { field.ty(tcx, substs).layout(infcx) }); @@ -1003,6 +1030,16 @@ impl<'a, 'gcx, 'tcx> Layout { } } + if def.variants.len() == 1 && hint == attr::ReprAny{ + // Equivalent to a struct/tuple/newtype. + let fields = def.variants[0].fields.iter().map(|field| { + field.ty(tcx, substs).layout(infcx) + }); + let mut st = Struct::new(dl, false); + st.extend(dl, fields, ty)?; + return success(Univariant { variant: st, non_zero: false }); + } + // Cache the substituted and normalized variant field types. let variants = def.variants.iter().map(|v| { v.fields.iter().map(|field| field.ty(tcx, substs)).collect::>() diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index d834a7d485a..c8fd27f066c 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -608,10 +608,19 @@ impl<'a, 'tcx> ty::TyS<'tcx> { } } + let rec_limit = tcx.sess.recursion_limit.get(); + let depth = tcx.layout_depth.get(); + if depth > rec_limit { + tcx.sess.fatal( + &format!("overflow representing the type `{}`", self)); + } + + tcx.layout_depth.set(depth+1); let layout = Layout::compute_uncached(self, infcx)?; if can_cache { tcx.layout_cache.borrow_mut().insert(self, layout); } + tcx.layout_depth.set(depth); Ok(layout) } diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 1a6c34b55af..683ad76952a 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -24,7 +24,7 @@ use cabi_s390x; use cabi_mips; use cabi_mips64; use cabi_asmjs; -use machine::{llalign_of_min, llsize_of, llsize_of_real, llsize_of_store}; +use machine::{llalign_of_min, llsize_of, llsize_of_alloc}; use type_::Type; use type_of; @@ -102,7 +102,7 @@ impl ArgType { // Wipe old attributes, likely not valid through indirection. self.attrs = llvm::Attributes::default(); - let llarg_sz = llsize_of_real(ccx, self.ty); + let llarg_sz = llsize_of_alloc(ccx, self.ty); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also @@ -200,7 +200,7 @@ impl ArgType { base::call_memcpy(bcx, bcx.pointercast(dst, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), - C_uint(ccx, llsize_of_store(ccx, self.ty)), + C_uint(ccx, llsize_of_alloc(ccx, self.ty)), cmp::min(llalign_of_min(ccx, self.ty), llalign_of_min(ccx, ty)) as u32); @@ -327,7 +327,7 @@ impl FnType { if let Layout::CEnum { signed, .. } = *ccx.layout_of(ty) { arg.signedness = Some(signed); } - if llsize_of_real(ccx, arg.ty) == 0 { + if llsize_of_alloc(ccx, arg.ty) == 0 { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. // The same is true for s390x-unknown-linux-gnu. @@ -358,7 +358,7 @@ impl FnType { ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) => { let llty = type_of::sizing_type_of(ccx, ty); - let llsz = llsize_of_real(ccx, llty); + let llsz = llsize_of_alloc(ccx, llty); ret.attrs.set_dereferenceable(llsz); } _ => {} @@ -427,7 +427,7 @@ impl FnType { } else { if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { let llty = type_of::sizing_type_of(ccx, inner); - let llsz = llsize_of_real(ccx, llty); + let llsz = llsize_of_alloc(ccx, llty); arg.attrs.set_dereferenceable(llsz); } args.push(arg); @@ -469,8 +469,8 @@ impl FnType { return; } - let size = llsize_of_real(ccx, llty); - if size > llsize_of_real(ccx, ccx.int_type()) { + let size = llsize_of_alloc(ccx, llty); + if size > llsize_of_alloc(ccx, ccx.int_type()) { arg.make_indirect(ccx); } else if size > 0 { // We want to pass small aggregates as immediates, but using diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index fdbee50992d..e3b15c8e2b9 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -45,20 +45,17 @@ pub use self::Repr::*; use super::Disr; use std; -use std::rc::Rc; use llvm::{ValueRef, True, IntEQ, IntNE}; -use rustc::ty::subst::Substs; -use rustc::ty::{self, AdtKind, Ty, TyCtxt}; -use syntax::ast; +use rustc::ty::layout; +use rustc::ty::{self, Ty, AdtKind}; use syntax::attr; use syntax::attr::IntType; -use abi::FAT_PTR_ADDR; -use base; use build::*; use common::*; use debuginfo::DebugLoc; use glue; +use base; use machine; use monomorphize; use type_::Type; @@ -159,507 +156,34 @@ impl MaybeSizedValue { } } -/// Decides how to represent a given type. -pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) - -> Rc> { - debug!("Representing: {}", t); - if let Some(repr) = cx.adt_reprs().borrow().get(&t) { - return repr.clone(); - } - - let repr = Rc::new(represent_type_uncached(cx, t)); - debug!("Represented as: {:?}", repr); - cx.adt_reprs().borrow_mut().insert(t, repr.clone()); - repr -} - -fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) -> Repr<'tcx> { +//Given an enum, struct, closure, or tuple, extracts fields. +//treats closures as a struct with one variant. +//`empty_if_no_variants` is a switch to deal with empty enums. +//if true, `variant_index` is disregarded and an empty Vec returned in this case. +fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + variant_index: usize, + empty_if_no_variants: bool) -> Vec> { match t.sty { - ty::TyTuple(ref elems) => { - Univariant(mk_struct(cx, &elems[..], false, t)) - } - ty::TyClosure(_, ref substs) => { - Univariant(mk_struct(cx, &substs.upvar_tys, false, t)) - } - ty::TyAdt(def, substs) => match def.adt_kind() { - AdtKind::Struct => { - let ftys = def.struct_variant().fields.iter().map(|field| { - monomorphize::field_ty(cx.tcx(), substs, field) - }).collect::>(); - let packed = cx.tcx().lookup_packed(def.did); - - Univariant(mk_struct(cx, &ftys[..], packed, t)) - } - AdtKind::Union => { - let ftys = def.struct_variant().fields.iter().map(|field| { - monomorphize::field_ty(cx.tcx(), substs, field) - }).collect::>(); - let packed = cx.tcx().lookup_packed(def.did); - UntaggedUnion(mk_union(cx, &ftys[..], packed, t)) - } - AdtKind::Enum => { - let cases = get_cases(cx.tcx(), def, substs); - let hint = *cx.tcx().lookup_repr_hints(def.did).get(0) - .unwrap_or(&attr::ReprAny); - - if cases.is_empty() { - // Uninhabitable; represent as unit - // (Typechecking will reject discriminant-sizing attrs.) - assert_eq!(hint, attr::ReprAny); - return Univariant(mk_struct(cx, &[], false, t)); - } - - if cases.iter().all(|c| c.tys.is_empty()) { - // All bodies empty -> intlike - let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect(); - let bounds = IntBounds { - ulo: discrs.iter().min().unwrap().0, - uhi: discrs.iter().max().unwrap().0, - slo: discrs.iter().map(|n| n.0 as i64).min().unwrap(), - shi: discrs.iter().map(|n| n.0 as i64).max().unwrap() - }; - return mk_cenum(cx, hint, &bounds); - } - - // Since there's at least one - // non-empty body, explicit discriminants should have - // been rejected by a checker before this point. - if !cases.iter().enumerate().all(|(i,c)| c.discr == Disr::from(i)) { - bug!("non-C-like enum {} with specified discriminants", - cx.tcx().item_path_str(def.did)); - } - - if cases.len() == 1 && hint == attr::ReprAny { - // Equivalent to a struct or tuple. - return Univariant(mk_struct(cx, &cases[0].tys, false, t)); - } - - if cases.len() == 2 && hint == attr::ReprAny { - // Nullable pointer optimization - let mut discr = 0; - while discr < 2 { - if cases[1 - discr].is_zerolen(cx, t) { - let st = mk_struct(cx, &cases[discr].tys, - false, t); - match cases[discr].find_ptr(cx) { - Some(ref df) if df.len() == 1 && st.fields.len() == 1 => { - return RawNullablePointer { - nndiscr: Disr::from(discr), - nnty: st.fields[0], - nullfields: cases[1 - discr].tys.clone() - }; - } - Some(mut discrfield) => { - discrfield.push(0); - discrfield.reverse(); - return StructWrappedNullablePointer { - nndiscr: Disr::from(discr), - nonnull: st, - discrfield: discrfield, - nullfields: cases[1 - discr].tys.clone() - }; - } - None => {} - } - } - discr += 1; - } - } - - // The general case. - assert!((cases.len() - 1) as i64 >= 0); - let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64, - slo: 0, shi: (cases.len() - 1) as i64 }; - let min_ity = range_to_inttype(cx, hint, &bounds); - - // Create the set of structs that represent each variant - // Use the minimum integer type we figured out above - let fields : Vec<_> = cases.iter().map(|c| { - let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity)); - ftys.extend_from_slice(&c.tys); - mk_struct(cx, &ftys, false, t) - }).collect(); - - - // Check to see if we should use a different type for the - // discriminant. If the overall alignment of the type is - // the same as the first field in each variant, we can safely use - // an alignment-sized type. - // We increase the size of the discriminant to avoid LLVM copying - // padding when it doesn't need to. This normally causes unaligned - // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about it's contents and - // won't be so conservative. - // This check is needed to avoid increasing the size of types when - // the alignment of the first field is smaller than the overall - // alignment of the type. - let (_, align) = union_size_and_align(&fields); - let mut use_align = true; - for st in &fields { - // Get the first non-zero-sized field - let field = st.fields.iter().skip(1).filter(|ty| { - let t = type_of::sizing_type_of(cx, **ty); - machine::llsize_of_real(cx, t) != 0 || - // This case is only relevant for zero-sized types with large alignment - machine::llalign_of_min(cx, t) != 1 - }).next(); - - if let Some(field) = field { - let field_align = type_of::align_of(cx, *field); - if field_align != align { - use_align = false; - break; - } - } - } - - // If the alignment is smaller than the chosen discriminant size, don't use the - // alignment as the final size. - let min_ty = ll_inttype(&cx, min_ity); - let min_size = machine::llsize_of_real(cx, min_ty); - if (align as u64) < min_size { - use_align = false; - } - - let ity = if use_align { - // Use the overall alignment - match align { - 1 => attr::UnsignedInt(ast::UintTy::U8), - 2 => attr::UnsignedInt(ast::UintTy::U16), - 4 => attr::UnsignedInt(ast::UintTy::U32), - 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => - attr::UnsignedInt(ast::UintTy::U64), - _ => min_ity // use min_ity as a fallback - } - } else { - min_ity - }; - - let fields : Vec<_> = cases.iter().map(|c| { - let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity)); - ftys.extend_from_slice(&c.tys); - mk_struct(cx, &ftys[..], false, t) - }).collect(); - - ensure_enum_fits_in_address_space(cx, &fields[..], t); - - General(ity, fields) - } + ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { + Vec::default() }, - _ => bug!("adt::represent_type called on non-ADT type: {}", t) + ty::TyAdt(ref def, ref substs) => { + def.variants[variant_index].fields.iter().map(|f| { + monomorphize::field_ty(cx.tcx(), substs, f) + }).collect::>() + }, + ty::TyTuple(fields) => fields.to_vec(), + ty::TyClosure(_, substs) => { + if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} + substs.upvar_tys.to_vec() + }, + _ => bug!("{} is not a type that can have fields.", t) } } -// this should probably all be in ty -struct Case<'tcx> { - discr: Disr, - tys: Vec> -} - /// This represents the (GEP) indices to follow to get to the discriminant field pub type DiscrField = Vec; -fn find_discr_field_candidate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - mut path: DiscrField) - -> Option { - match ty.sty { - // Fat &T/&mut T/Box i.e. T is [T], str, or Trait - ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => { - path.push(FAT_PTR_ADDR); - Some(path) - }, - - // Regular thin pointer: &T/&mut T/Box - ty::TyRef(..) | ty::TyBox(..) => Some(path), - - // Function pointer: `fn() -> i32` - ty::TyFnPtr(_) => Some(path), - - // Is this the NonZero lang item wrapping a pointer or integer type? - ty::TyAdt(def, substs) if Some(def.did) == tcx.lang_items.non_zero() => { - let nonzero_fields = &def.struct_variant().fields; - assert_eq!(nonzero_fields.len(), 1); - let field_ty = monomorphize::field_ty(tcx, substs, &nonzero_fields[0]); - match field_ty.sty { - ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => { - path.extend_from_slice(&[0, FAT_PTR_ADDR]); - Some(path) - }, - ty::TyRawPtr(..) | ty::TyInt(..) | ty::TyUint(..) => { - path.push(0); - Some(path) - }, - _ => None - } - }, - - // Perhaps one of the fields of this struct is non-zero - // let's recurse and find out - ty::TyAdt(def, substs) if def.is_struct() => { - for (j, field) in def.struct_variant().fields.iter().enumerate() { - let field_ty = monomorphize::field_ty(tcx, substs, field); - if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None - }, - - // Perhaps one of the upvars of this struct is non-zero - // Let's recurse and find out! - ty::TyClosure(_, ref substs) => { - for (j, &ty) in substs.upvar_tys.iter().enumerate() { - if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None - }, - - // Can we use one of the fields in this tuple? - ty::TyTuple(ref tys) => { - for (j, &ty) in tys.iter().enumerate() { - if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None - }, - - // Is this a fixed-size array of something non-zero - // with at least one element? - ty::TyArray(ety, d) if d > 0 => { - if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) { - vpath.push(0); - Some(vpath) - } else { - None - } - }, - - // Anything else is not a pointer - _ => None - } -} - -impl<'tcx> Case<'tcx> { - fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool { - mk_struct(cx, &self.tys, false, scapegoat).size == 0 - } - - fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option { - for (i, &ty) in self.tys.iter().enumerate() { - if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) { - path.push(i); - return Some(path); - } - } - None - } -} - -fn get_cases<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - adt: ty::AdtDef<'tcx>, - substs: &Substs<'tcx>) - -> Vec> { - adt.variants.iter().map(|vi| { - let field_tys = vi.fields.iter().map(|field| { - monomorphize::field_ty(tcx, substs, field) - }).collect(); - Case { discr: Disr::from(vi.disr_val), tys: field_tys } - }).collect() -} - -fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - tys: &[Ty<'tcx>], packed: bool, - scapegoat: Ty<'tcx>) - -> Struct<'tcx> { - let sized = tys.iter().all(|&ty| type_is_sized(cx.tcx(), ty)); - let lltys : Vec = if sized { - tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect() - } else { - tys.iter().filter(|&ty| type_is_sized(cx.tcx(), *ty)) - .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() - }; - - ensure_struct_fits_in_address_space(cx, &lltys[..], packed, scapegoat); - - let llty_rec = Type::struct_(cx, &lltys[..], packed); - Struct { - size: machine::llsize_of_alloc(cx, llty_rec), - align: machine::llalign_of_min(cx, llty_rec), - sized: sized, - packed: packed, - fields: tys.to_vec(), - } -} - -fn mk_union<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - tys: &[Ty<'tcx>], packed: bool, - _scapegoat: Ty<'tcx>) - -> Union<'tcx> { - let mut min_size = 0; - let mut align = 0; - for llty in tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)) { - let field_size = machine::llsize_of_alloc(cx, llty); - if min_size < field_size { - min_size = field_size; - } - let field_align = machine::llalign_of_min(cx, llty); - if align < field_align { - align = field_align; - } - } - - Union { - min_size: min_size, - align: if packed { 1 } else { align }, - packed: packed, - fields: tys.to_vec(), - } -} - -#[derive(Debug)] -struct IntBounds { - slo: i64, - shi: i64, - ulo: u64, - uhi: u64 -} - -fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - hint: Hint, bounds: &IntBounds) - -> Repr<'tcx> { - let it = range_to_inttype(cx, hint, bounds); - match it { - attr::SignedInt(_) => CEnum(it, Disr(bounds.slo as u64), Disr(bounds.shi as u64)), - attr::UnsignedInt(_) => CEnum(it, Disr(bounds.ulo), Disr(bounds.uhi)) - } -} - -fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType { - debug!("range_to_inttype: {:?} {:?}", hint, bounds); - // Lists of sizes to try. u64 is always allowed as a fallback. - #[allow(non_upper_case_globals)] - const choose_shortest: &'static [IntType] = &[ - attr::UnsignedInt(ast::UintTy::U8), attr::SignedInt(ast::IntTy::I8), - attr::UnsignedInt(ast::UintTy::U16), attr::SignedInt(ast::IntTy::I16), - attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)]; - #[allow(non_upper_case_globals)] - const at_least_32: &'static [IntType] = &[ - attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)]; - - let attempts; - match hint { - attr::ReprInt(ity) => { - if !bounds_usable(cx, ity, bounds) { - bug!("representation hint insufficient for discriminant range") - } - return ity; - } - attr::ReprExtern => { - attempts = match &cx.sess().target.target.arch[..] { - // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32` - // appears to be used on Linux and NetBSD, but some systems may use the variant - // corresponding to `choose_shortest`. However, we don't run on those yet...? - "arm" => at_least_32, - _ => at_least_32, - } - } - attr::ReprAny => { - attempts = choose_shortest; - }, - attr::ReprPacked => { - bug!("range_to_inttype: found ReprPacked on an enum"); - } - attr::ReprSimd => { - bug!("range_to_inttype: found ReprSimd on an enum"); - } - } - for &ity in attempts { - if bounds_usable(cx, ity, bounds) { - return ity; - } - } - return attr::UnsignedInt(ast::UintTy::U64); -} - -pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type { - match ity { - attr::SignedInt(t) => Type::int_from_ty(cx, t), - attr::UnsignedInt(t) => Type::uint_from_ty(cx, t) - } -} - -fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool { - debug!("bounds_usable: {:?} {:?}", ity, bounds); - match ity { - attr::SignedInt(_) => { - let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true); - let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true); - bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64 - } - attr::UnsignedInt(_) => { - let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false); - let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false); - bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64 - } - } -} - -pub fn ty_of_inttype<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ity: IntType) -> Ty<'tcx> { - match ity { - attr::SignedInt(t) => tcx.mk_mach_int(t), - attr::UnsignedInt(t) => tcx.mk_mach_uint(t) - } -} - -// LLVM doesn't like types that don't fit in the address space -fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fields: &[Type], - packed: bool, - scapegoat: Ty<'tcx>) { - let mut offset = 0; - for &llty in fields { - // Invariant: offset < ccx.obj_size_bound() <= 1<<61 - if !packed { - let type_align = machine::llalign_of_min(ccx, llty); - offset = roundup(offset, type_align); - } - // type_align is a power-of-2, so still offset < ccx.obj_size_bound() - // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound() - // so the sum is less than 1<<62 (and therefore can't overflow). - offset += machine::llsize_of_alloc(ccx, llty); - - if offset >= ccx.obj_size_bound() { - ccx.report_overbig_object(scapegoat); - } - } -} - -fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) { - let size = sts.iter().map(|st| st.size).max().unwrap(); - let align = sts.iter().map(|st| st.align).max().unwrap(); - (roundup(size, align), align) -} - -fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fields: &[Struct], - scapegoat: Ty<'tcx>) { - let (total_size, _) = union_size_and_align(fields); - - if total_size >= ccx.obj_size_bound() { - ccx.report_overbig_object(scapegoat); - } -} - - /// LLVM-level types are a little complicated. /// /// C-like enums need to be actual ints, not wrapped in a struct, @@ -668,8 +192,8 @@ fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, /// For nominal types, in some cases, we need to use LLVM named structs /// and fill in the actual contents in a second pass to prevent /// unbounded recursion; see also the comments in `trans::type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { - generic_type_of(cx, r, None, false, false) +pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { + generic_type_of(cx, t, None, false, false) } @@ -677,41 +201,65 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { // this out, but if you call this on an unsized type without realising it, you // are going to get the wrong type (it will not include the unsized parts of it). pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, dst: bool) -> Type { - generic_type_of(cx, r, None, true, dst) + t: Ty<'tcx>, dst: bool) -> Type { + generic_type_of(cx, t, None, true, dst) } pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, name: &str) -> Type { - generic_type_of(cx, r, Some(name), false, false) + t: Ty<'tcx>, name: &str) -> Type { + generic_type_of(cx, t, Some(name), false, false) } pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, llty: &mut Type) { - match *r { - CEnum(..) | General(..) | UntaggedUnion(..) | RawNullablePointer { .. } => { } - Univariant(ref st) | StructWrappedNullablePointer { nonnull: ref st, .. } => - llty.set_struct_body(&struct_llfields(cx, st, false, false), - st.packed) + t: Ty<'tcx>, llty: &mut Type) { + let l = cx.layout_of(t); + debug!("finish_type_of: {} with layout {:#?}", t, l); + match *l { + layout::CEnum { .. } | layout::General { .. } + | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { } + layout::Univariant { ..} + | layout::StructWrappedNullablePointer { .. } + | layout::Vector { .. } => { + let (nonnull_variant, packed) = match *l { + layout::Univariant { ref variant, .. } => (0, variant.packed), + layout::Vector { .. } => (0, true), + layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => + (nndiscr, nonnull.packed), + _ => unreachable!() + }; + let fields = compute_fields(cx, t, nonnull_variant as usize, true); + llty.set_struct_body(&struct_llfields(cx, &fields, false, false), + packed) + }, + _ => bug!("This function cannot handle {} with layout {:#?}", t, l) } } fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, + t: Ty<'tcx>, name: Option<&str>, sizing: bool, dst: bool) -> Type { - debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {}", - r, name, sizing, dst); - match *r { - CEnum(ity, ..) => ll_inttype(cx, ity), - RawNullablePointer { nnty, .. } => - type_of::sizing_type_of(cx, nnty), - StructWrappedNullablePointer { nonnull: ref st, .. } => { + let l = cx.layout_of(t); + debug!("adt::generic_type_of t: {:?} name: {:?} sizing: {} dst: {}", + t, name, sizing, dst); + match *l { + layout::CEnum { discr, .. } => Type::from_integer(cx, discr), + layout::RawNullablePointer { nndiscr, .. } => { + let (def, substs) = match t.sty { + ty::TyAdt(d, s) => (d, s), + _ => bug!("{} is not an ADT", t) + }; + let nnty = monomorphize::field_ty(cx.tcx(), substs, + &def.variants[nndiscr as usize].fields[0]); + type_of::sizing_type_of(cx, nnty) + } + layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { + let fields = compute_fields(cx, t, nndiscr as usize, false); match name { None => { - Type::struct_(cx, &struct_llfields(cx, st, sizing, dst), - st.packed) + Type::struct_(cx, &struct_llfields(cx, &fields, sizing, dst), + nonnull.packed) } Some(name) => { assert_eq!(sizing, false); @@ -719,11 +267,14 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } - Univariant(ref st) => { + layout::Univariant { ref variant, .. } => { + //note that this case also handles empty enums. + //Thus the true as the final parameter here. + let fields = compute_fields(cx, t, 0, true); match name { None => { - let fields = struct_llfields(cx, st, sizing, dst); - Type::struct_(cx, &fields, st.packed) + let fields = struct_llfields(cx, &fields, sizing, dst); + Type::struct_(cx, &fields, variant.packed) } Some(name) => { // Hypothesis: named_struct's can never need a @@ -733,35 +284,27 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } - UntaggedUnion(ref un) => { + layout::Vector { element, count } => { + let elem_ty = Type::from_primitive(cx, element); + Type::vector(&elem_ty, count) + } + layout::UntaggedUnion { ref variants, .. }=> { // Use alignment-sized ints to fill all the union storage. - let (size, align) = (roundup(un.min_size, un.align), un.align); - - let align_s = align as u64; - assert_eq!(size % align_s, 0); // Ensure division in align_units comes out evenly - let align_units = size / align_s; - let fill_ty = match align_s { - 1 => Type::array(&Type::i8(cx), align_units), - 2 => Type::array(&Type::i16(cx), align_units), - 4 => Type::array(&Type::i32(cx), align_units), - 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => - Type::array(&Type::i64(cx), align_units), - a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4), - align_units), - _ => bug!("unsupported union alignment: {}", align) - }; + let size = variants.stride().bytes(); + let align = variants.align.abi(); + let fill = union_fill(cx, size, align); match name { None => { - Type::struct_(cx, &[fill_ty], un.packed) + Type::struct_(cx, &[fill], variants.packed) } Some(name) => { let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill_ty], un.packed); + llty.set_struct_body(&[fill], variants.packed); llty } } } - General(ity, ref sts) => { + layout::General { discr, size, align, .. } => { // We need a representation that has: // * The alignment of the most-aligned field // * The size of the largest variant (rounded up to that alignment) @@ -774,29 +317,20 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // of the size. // // FIXME #10604: this breaks when vector types are present. - let (size, align) = union_size_and_align(&sts[..]); - let align_s = align as u64; - let discr_ty = ll_inttype(cx, ity); - let discr_size = machine::llsize_of_alloc(cx, discr_ty); - let padded_discr_size = roundup(discr_size, align); - assert_eq!(size % align_s, 0); // Ensure division in align_units comes out evenly - let align_units = (size - padded_discr_size) / align_s; - let fill_ty = match align_s { - 1 => Type::array(&Type::i8(cx), align_units), - 2 => Type::array(&Type::i16(cx), align_units), - 4 => Type::array(&Type::i32(cx), align_units), - 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => - Type::array(&Type::i64(cx), align_units), - a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4), - align_units), - _ => bug!("unsupported enum alignment: {}", align) - }; - assert_eq!(machine::llalign_of_min(cx, fill_ty), align); + let size = size.bytes(); + let align = align.abi(); + let discr_ty = Type::from_integer(cx, discr); + let discr_size = discr.size().bytes(); + let padded_discr_size = roundup(discr_size, align as u32); + let variant_part_size = size-padded_discr_size; + let variant_fill = union_fill(cx, variant_part_size, align); + + assert_eq!(machine::llalign_of_min(cx, variant_fill), align as u32); assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly let fields: Vec = [discr_ty, Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size), - fill_ty].iter().cloned().collect(); + variant_fill].iter().cloned().collect(); match name { None => { Type::struct_(cx, &fields[..], false) @@ -808,100 +342,127 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } } + _ => bug!("Unsupported type {} represented as {:#?}", t, l) } } -fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>, +fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { + assert_eq!(size%align, 0); + let align_units = size/align; + match align { + 1 => Type::array(&Type::i8(cx), align_units), + 2 => Type::array(&Type::i16(cx), align_units), + 4 => Type::array(&Type::i32(cx), align_units), + 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => + Type::array(&Type::i64(cx), align_units), + a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4), + align_units), + _ => bug!("unsupported union alignment: {}", align) + } +} + + +fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec>, sizing: bool, dst: bool) -> Vec { if sizing { - st.fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty)) + fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty)) .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() } else { - st.fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect() + fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect() } } /// Obtain a representation of the discriminant sufficient to translate /// destructuring; this may or may not involve the actual discriminant. pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, + t: Ty<'tcx>, scrutinee: ValueRef, range_assert: bool) -> (BranchKind, Option) { - match *r { - CEnum(..) | General(..) | - RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - (BranchKind::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, range_assert))) + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum { .. } | layout::General { .. } | + layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { + (BranchKind::Switch, Some(trans_get_discr(bcx, t, scrutinee, None, range_assert))) } - Univariant(..) | UntaggedUnion(..) => { + layout::Univariant { .. } | layout::UntaggedUnion { .. } => { // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). (BranchKind::Single, None) - } + }, + _ => bug!("{} is not an enum.", t) } } -pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool { - match *r { - CEnum(ity, ..) => ity.is_signed(), - General(ity, _) => ity.is_signed(), - Univariant(..) | UntaggedUnion(..) => false, - RawNullablePointer { .. } => false, - StructWrappedNullablePointer { .. } => false, +pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { + match *l { + layout::CEnum { signed, .. }=> signed, + _ => false, } } /// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, +pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, cast_to: Option, range_assert: bool) -> ValueRef { - debug!("trans_get_discr r: {:?}", r); - let val = match *r { - CEnum(ity, min, max) => { - load_discr(bcx, ity, scrutinee, min, max, range_assert) + let (def, substs) = match t.sty { + ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs), + _ => bug!("{} is not an enum", t) + }; + + debug!("trans_get_discr t: {:?}", t); + let l = bcx.ccx().layout_of(t); + + let val = match *l { + layout::CEnum { discr, min, max, .. } => { + load_discr(bcx, discr, scrutinee, min, max, range_assert) } - General(ity, ref cases) => { + layout::General { discr, .. } => { let ptr = StructGEP(bcx, scrutinee, 0); - load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1), + load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1, range_assert) } - Univariant(..) | UntaggedUnion(..) => C_u8(bcx.ccx(), 0), - RawNullablePointer { nndiscr, nnty, .. } => { - let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; - let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); + layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx(), 0), + layout::RawNullablePointer { nndiscr, .. } => { + let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; + let llptrty = type_of::sizing_type_of(bcx.ccx(), + monomorphize::field_ty(bcx.ccx().tcx(), substs, + &def.variants[nndiscr as usize].fields[0])); ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) } - StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) - } + }, + _ => bug!("{} is not an enum", t) }; match cast_to { None => val, - Some(llty) => if is_discr_signed(r) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } + Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } } } -fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField, +fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath, scrutinee: ValueRef) -> ValueRef { - let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]); + let llptrptr = GEPi(bcx, scrutinee, + &discrfield.iter().map(|f| *f as usize).collect::>()[..]); let llptr = Load(bcx, llptrptr); - let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; + let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None) } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr, +fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { - let llty = ll_inttype(bcx.ccx(), ity); + let llty = Type::from_integer(bcx.ccx(), ity); assert_eq!(val_ty(ptr), llty.ptr_to()); - let bits = machine::llbitsize_of_real(bcx.ccx(), llty); + let bits = ity.size().bits(); assert!(bits <= 64); let bits = bits as usize; - let mask = Disr(!0u64 >> (64 - bits)); + let mask = !0u64 >> (64 - bits); // For a (max) discr of -1, max will be `-1 as usize`, which overflows. // However, that is fine here (it would still represent the full range), - if max.wrapping_add(Disr(1)) & mask == min & mask || !range_assert { + if max.wrapping_add(1) & mask == min & mask || !range_assert { // i.e., if the range is everything. The lo==hi case would be // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the @@ -910,7 +471,7 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr, } else { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. - LoadRangeAssert(bcx, ptr, min.0, max.0.wrapping_add(1), /* signed: */ True) + LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True) } } @@ -918,54 +479,54 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr, /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) +pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { - match *r { - CEnum(ity, ..) => { - C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum { discr, .. } + | layout::General { discr, .. }=> { + C_integral(Type::from_integer(bcx.ccx(), discr), value.0, true) } - General(ity, _) => { - C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) + layout::RawNullablePointer { .. } | +layout::StructWrappedNullablePointer { .. } => { + assert!(value == Disr(0) || value == Disr(1)); + C_bool(bcx.ccx(), value != Disr(0)) } - Univariant(..) | UntaggedUnion(..) => { - bug!("no cases for univariants, structs or unions") - } - RawNullablePointer { .. } | - StructWrappedNullablePointer { .. } => { - assert!(discr == Disr(0) || discr == Disr(1)); - C_bool(bcx.ccx(), discr != Disr(0)) + _ => { + bug!("{} does not have a discriminant. Represented as {:#?}", t, l); } } } /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - val: ValueRef, discr: Disr) { - match *r { - CEnum(ity, min, max) => { - assert_discr_in_range(ity, min, max, discr); - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), +pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, + val: ValueRef, to: Disr) { + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum{ discr, min, max, .. } => { + assert_discr_in_range(Disr(min), Disr(max), to); + Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), val); } - General(ity, _) => { - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), + layout::General{ discr, .. } => { + Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), StructGEP(bcx, val, 0)); } - Univariant(_) => { - assert_eq!(discr, Disr(0)); + layout::Univariant { .. } + | layout::UntaggedUnion { .. } + | layout::Vector { .. } => { + assert_eq!(to, Disr(0)); } - UntaggedUnion(..) => { - assert_eq!(discr, Disr(0)); - } - RawNullablePointer { nndiscr, nnty, ..} => { - if discr != nndiscr { + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + if to.0 != nndiscr { let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); Store(bcx, C_null(llptrty), val); } } - StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { - if discr != nndiscr { + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { + if to.0 != nndiscr { if target_sets_discr_via_memset(bcx) { // Issue #34427: As workaround for LLVM bug on // ARM, use memset of 0 on whole struct rather @@ -973,16 +534,18 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, let b = B(bcx); let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to()); let fill_byte = C_u8(b.ccx, 0); - let size = C_uint(b.ccx, nonnull.size); - let align = C_i32(b.ccx, nonnull.align as i32); + let size = C_uint(b.ccx, nonnull.stride().bytes()); + let align = C_i32(b.ccx, nonnull.align.abi() as i32); base::call_memset(&b, llptr, fill_byte, size, align, false); } else { - let llptrptr = GEPi(bcx, val, &discrfield[..]); + let path = discrfield.iter().map(|&i| i as usize).collect::>(); + let llptrptr = GEPi(bcx, val, &path[..]); let llptrty = val_ty(llptrptr).element_type(); Store(bcx, C_null(llptrty), llptrptr); } } } + _ => bug!("Cannot handle {} represented as {:#?}", t, l) } } @@ -990,52 +553,59 @@ fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } -fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) { - match ity { - attr::UnsignedInt(_) => { - assert!(min <= discr); - assert!(discr <= max); - }, - attr::SignedInt(_) => { - assert!(min.0 as i64 <= discr.0 as i64); - assert!(discr.0 as i64 <= max.0 as i64); - }, +fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { + if min <= max { + assert!(min <= discr && discr <= max) + } else { + assert!(min <= discr || discr <= max) } } /// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, +pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { - trans_field_ptr_builder(&bcx.build(), r, val, discr, ix) + trans_field_ptr_builder(&bcx.build(), t, val, discr, ix) } /// Access a field, at a point when the value's case is known. pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - r: &Repr<'tcx>, + t: Ty<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { + let l = bcx.ccx().layout_of(t); + debug!("trans_field_ptr_builder on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. - match *r { - CEnum(..) => { - bug!("element access in C-like enum") - } - Univariant(ref st) => { + match *l { + layout::Univariant { ref variant, .. } => { assert_eq!(discr, Disr(0)); - struct_field_ptr(bcx, st, val, ix, false) + struct_field_ptr(bcx, &variant, + &compute_fields(bcx.ccx(), t, 0, false), + val, ix, false) } - General(_, ref cases) => { - struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true) + layout::Vector { count, .. } => { + assert_eq!(discr.0, 0); + assert!((ix as u64) < count); + bcx.struct_gep(val.value, ix) } - UntaggedUnion(ref un) => { - let ty = type_of::in_memory_type_of(bcx.ccx(), un.fields[ix]); + layout::General { discr: d, ref variants, .. } => { + let mut fields = compute_fields(bcx.ccx(), t, discr.0 as usize, false); + fields.insert(0, d.to_ty(&bcx.ccx().tcx(), false)); + struct_field_ptr(bcx, &variants[discr.0 as usize], + &fields, + val, ix + 1, true) + } + layout::UntaggedUnion { .. } => { + let fields = compute_fields(bcx.ccx(), t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]); if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } - RawNullablePointer { nndiscr, ref nullfields, .. } | - StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => { + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { + let nullfields = compute_fields(bcx.ccx(), t, (1-nndiscr) as usize, false); // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); @@ -1045,32 +615,36 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } - RawNullablePointer { nndiscr, nnty, .. } => { + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; assert_eq!(ix, 0); - assert_eq!(discr, nndiscr); + assert_eq!(discr.0, nndiscr); let ty = type_of::type_of(bcx.ccx(), nnty); if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } - StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr, nndiscr); - struct_field_ptr(bcx, nonnull, val, ix, false) + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + assert_eq!(discr.0, nndiscr); + struct_field_ptr(bcx, &nonnull, + &compute_fields(bcx.ccx(), t, discr.0 as usize, false), + val, ix, false) } + _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) } } fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - st: &Struct<'tcx>, val: MaybeSizedValue, + st: &layout::Struct, fields: &Vec>, val: MaybeSizedValue, ix: usize, needs_cast: bool) -> ValueRef { let ccx = bcx.ccx(); - let fty = st.fields[ix]; + let fty = fields[ix]; let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); if bcx.is_unreachable() { return C_undef(ll_fty.ptr_to()); } let ptr_val = if needs_cast { - let fields = st.fields.iter().map(|&ty| { + let fields = fields.iter().map(|&ty| { type_of::in_memory_type_of(ccx, ty) }).collect::>(); let real_ty = Type::struct_(ccx, &fields[..], st.packed); @@ -1124,7 +698,7 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // Calculate the unaligned offset of the unsized field. let mut offset = 0; - for &ty in &st.fields[0..ix] { + for &ty in &fields[0..ix] { let llty = type_of::sizing_type_of(ccx, ty); let type_align = type_of::align_of(ccx, ty); offset = roundup(offset, type_align); @@ -1177,82 +751,76 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, /// Currently the returned value has the same size as the type, but /// this could be changed in the future to avoid allocating unnecessary /// space after values of shorter-than-maximum cases. -pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr, +pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, discr: Disr, vals: &[ValueRef]) -> ValueRef { - match *r { - CEnum(ity, min, max) => { + let l = ccx.layout_of(t); + let dl = &ccx.tcx().data_layout; + match *l { + layout::CEnum { discr: d, min, max, .. } => { assert_eq!(vals.len(), 0); - assert_discr_in_range(ity, min, max, discr); - C_integral(ll_inttype(ccx, ity), discr.0, true) + assert_discr_in_range(Disr(min), Disr(max), discr); + C_integral(Type::from_integer(ccx, d), discr.0, true) } - General(ity, ref cases) => { - let case = &cases[discr.0 as usize]; - let (max_sz, _) = union_size_and_align(&cases[..]); - let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true); - let mut f = vec![lldiscr]; - f.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, case, &f[..]); - contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]); + layout::General { discr: d, ref variants, .. } => { + let variant = &variants[discr.0 as usize]; + let lldiscr = C_integral(Type::from_integer(ccx, d), discr.0 as u64, true); + let mut vals_with_discr = vec![lldiscr]; + vals_with_discr.extend_from_slice(vals); + let mut contents = build_const_struct(ccx, &variant.offset_after_field[..], + &vals_with_discr[..], variant.packed); + let needed_padding = l.size(dl).bytes() - variant.min_size().bytes(); + if needed_padding > 0 { + contents.push(padding(ccx, needed_padding)); + } C_struct(ccx, &contents[..], false) } - UntaggedUnion(ref un) => { + layout::UntaggedUnion { ref variants, .. }=> { assert_eq!(discr, Disr(0)); - let contents = build_const_union(ccx, un, vals[0]); - C_struct(ccx, &contents, un.packed) + let contents = build_const_union(ccx, variants, vals[0]); + C_struct(ccx, &contents, variants.packed) } - Univariant(ref st) => { + layout::Univariant { ref variant, .. } => { assert_eq!(discr, Disr(0)); - let contents = build_const_struct(ccx, st, vals); - C_struct(ccx, &contents[..], st.packed) + let contents = build_const_struct(ccx, + &variant.offset_after_field[..], vals, variant.packed); + C_struct(ccx, &contents[..], variant.packed) } - RawNullablePointer { nndiscr, nnty, .. } => { - if discr == nndiscr { + layout::Vector { .. } => { + C_vector(vals) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(ccx, t, nndiscr as usize, false)[0]; + if discr.0 == nndiscr { assert_eq!(vals.len(), 1); vals[0] } else { C_null(type_of::sizing_type_of(ccx, nnty)) } } - StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - if discr == nndiscr { + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + if discr.0 == nndiscr { C_struct(ccx, &build_const_struct(ccx, - nonnull, - vals), + &nonnull.offset_after_field[..], + vals, nonnull.packed), false) } else { - let vals = nonnull.fields.iter().map(|&ty| { + let fields = compute_fields(ccx, t, nndiscr as usize, false); + let vals = fields.iter().map(|&ty| { // Always use null even if it's not the `discrfield`th // field; see #8506. C_null(type_of::sizing_type_of(ccx, ty)) }).collect::>(); C_struct(ccx, &build_const_struct(ccx, - nonnull, - &vals[..]), + &nonnull.offset_after_field[..], + &vals[..], + false), false) } } + _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) } } -/// Compute struct field offsets relative to struct begin. -fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &Struct<'tcx>) -> Vec { - let mut offsets = vec!(); - - let mut offset = 0; - for &ty in &st.fields { - let llty = type_of::sizing_type_of(ccx, ty); - if !st.packed { - let type_align = type_of::align_of(ccx, ty); - offset = roundup(offset, type_align); - } - offsets.push(offset); - offset += machine::llsize_of_alloc(ccx, llty); - } - assert_eq!(st.fields.len(), offsets.len()); - offsets -} - /// Building structs is a little complicated, because we might need to /// insert padding if a field's value is less aligned than its type. /// @@ -1262,17 +830,27 @@ fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &Struct<'tcx>, vals: &[ValueRef]) + offset_after_field: &[layout::Size], + vals: &[ValueRef], + packed: bool) -> Vec { - assert_eq!(vals.len(), st.fields.len()); + assert_eq!(vals.len(), offset_after_field.len()); - let target_offsets = compute_struct_field_offsets(ccx, st); + if vals.len() == 0 { + return Vec::new(); + } // offset of current value let mut offset = 0; let mut cfields = Vec::new(); - for (&val, target_offset) in vals.iter().zip(target_offsets) { - if !st.packed { + for (&val, target_offset) in + vals.iter().zip( + offset_after_field.iter().map(|i| i.bytes()) + ) { + assert!(!is_undef(val)); + cfields.push(val); + offset += machine::llsize_of_alloc(ccx, val_ty(val)); + if !packed { let val_align = machine::llalign_of_min(ccx, val_ty(val)); offset = roundup(offset, val_align); } @@ -1280,27 +858,24 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, cfields.push(padding(ccx, target_offset - offset)); offset = target_offset; } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); } - assert!(st.sized && offset <= st.size); - if offset != st.size { - cfields.push(padding(ccx, st.size - offset)); + let size = offset_after_field.last().unwrap(); + if offset < size.bytes() { + cfields.push(padding(ccx, size.bytes() - offset)); } cfields } fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - un: &Union<'tcx>, + un: &layout::Union, field_val: ValueRef) -> Vec { let mut cfields = vec![field_val]; let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); - let size = roundup(un.min_size, un.align); + let size = un.stride().bytes(); if offset != size { cfields.push(padding(ccx, size - offset)); } @@ -1321,18 +896,21 @@ fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } /// /// (Not to be confused with `common::const_get_elt`, which operates on /// raw LLVM-level structs and arrays.) -pub fn const_get_field(r: &Repr, val: ValueRef, _discr: Disr, +pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + val: ValueRef, _discr: Disr, ix: usize) -> ValueRef { - match *r { - CEnum(..) => bug!("element access in C-like enum const"), - Univariant(..) => const_struct_field(val, ix), - UntaggedUnion(..) => const_struct_field(val, 0), - General(..) => const_struct_field(val, ix + 1), - RawNullablePointer { .. } => { + let l = ccx.layout_of(t); + match *l { + layout::CEnum { .. } => bug!("element access in C-like enum const"), + layout::Univariant { .. } | layout::Vector { .. } => const_struct_field(val, ix), + layout::UntaggedUnion { .. } => const_struct_field(val, 0), + layout::General { .. } => const_struct_field(val, ix + 1), + layout::RawNullablePointer { .. } => { assert_eq!(ix, 0); val }, - StructWrappedNullablePointer{ .. } => const_struct_field(val, ix) + layout::StructWrappedNullablePointer{ .. } => const_struct_field(val, ix), + _ => bug!("{} does not have fields.", t) } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 1e05b31eead..446042b839a 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -466,32 +466,27 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, store_fat_ptr(bcx, base, info, dst, dst_ty); } - // This can be extended to enums and tuples in the future. - (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { assert_eq!(def_a, def_b); - let src_repr = adt::represent_type(bcx.ccx(), src_ty); - let src_fields = match &*src_repr { - &adt::Repr::Univariant(ref s) => &s.fields, - _ => bug!("struct has non-univariant repr"), - }; - let dst_repr = adt::represent_type(bcx.ccx(), dst_ty); - let dst_fields = match &*dst_repr { - &adt::Repr::Univariant(ref s) => &s.fields, - _ => bug!("struct has non-univariant repr"), - }; + let src_fields = def_a.variants[0].fields.iter().map(|f| { + monomorphize::field_ty(bcx.tcx(), substs_a, f) + }); + let dst_fields = def_b.variants[0].fields.iter().map(|f| { + monomorphize::field_ty(bcx.tcx(), substs_b, f) + }); let src = adt::MaybeSizedValue::sized(src); let dst = adt::MaybeSizedValue::sized(dst); - let iter = src_fields.iter().zip(dst_fields).enumerate(); + let iter = src_fields.zip(dst_fields).enumerate(); for (i, (src_fty, dst_fty)) in iter { if type_is_zero_size(bcx.ccx(), dst_fty) { continue; } - let src_f = adt::trans_field_ptr(bcx, &src_repr, src, Disr(0), i); - let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, Disr(0), i); + let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i); + let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i); if src_fty == dst_fty { memcpy_ty(bcx, dst_f, src_f, src_fty); } else { @@ -1164,11 +1159,10 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if !fcx.fn_ty.ret.is_ignore() { let dest = fcx.llretslotptr.get().unwrap(); let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value - let repr = adt::represent_type(ccx, sig.output); let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs.into_iter().enumerate() { - let lldestptr = adt::trans_field_ptr(bcx, &repr, dest_val, Disr::from(disr), i); + let lldestptr = adt::trans_field_ptr(bcx, sig.output, dest_val, Disr::from(disr), i); let arg = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; let b = &bcx.build(); @@ -1181,7 +1175,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg.store_fn_arg(b, &mut llarg_idx, lldestptr); } } - adt::trans_set_discr(bcx, &repr, dest, disr); + adt::trans_set_discr(bcx, sig.output, dest, disr); } fcx.finish(bcx, DebugLoc::None); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index db1a5419190..5b1f691af8d 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -875,12 +875,6 @@ pub fn const_get_elt(v: ValueRef, us: &[c_uint]) } } -pub fn const_to_int(v: ValueRef) -> i64 { - unsafe { - llvm::LLVMConstIntGetSExtValue(v) - } -} - pub fn const_to_uint(v: ValueRef) -> u64 { unsafe { llvm::LLVMConstIntGetZExtValue(v) diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index b10129d1019..f7b89f6f1bb 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -994,7 +994,11 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { pub fn layout_of(&self, ty: Ty<'tcx>) -> &'tcx ty::layout::Layout { self.tcx().infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { ty.layout(&infcx).unwrap_or_else(|e| { - bug!("failed to get layout for `{}`: {}", ty, e); + match e { + ty::layout::LayoutError::SizeOverflow(_) => + self.sess().fatal(&e.to_string()), + _ => bug!("failed to get layout for `{}`: {}", ty, e) + } }) }) } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 31df49609cb..458127c7ada 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -27,10 +27,10 @@ use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType, DI use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::hir; -use {type_of, adt, machine, monomorphize}; +use {type_of, machine, monomorphize}; use common::CrateContext; use type_::Type; -use rustc::ty::{self, AdtKind, Ty}; +use rustc::ty::{self, AdtKind, Ty, layout}; use session::config; use util::nodemap::FnvHashMap; use util::common::path2cstr; @@ -40,7 +40,6 @@ use std::ffi::CString; use std::path::Path; use std::ptr; use std::rc::Rc; -use syntax; use syntax::util::interner::Interner; use syntax::ast; use syntax::parse::token; @@ -1281,7 +1280,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: Rc>, + type_rep: &'tcx layout::Layout, discriminant_type_metadata: Option, containing_scope: DIScope, file_metadata: DIFile, @@ -1292,11 +1291,15 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); + let substs = match self.enum_type.sty { + ty::TyAdt(def, ref s) if def.adt_kind() == AdtKind::Enum => s, + ref t @ _ => bug!("{} is not an enum", t) + }; match *self.type_rep { - adt::General(_, ref struct_defs) => { + layout::General { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); - struct_defs + variants .iter() .enumerate() .map(|(i, struct_def)| { @@ -1327,7 +1330,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } }).collect() }, - adt::Univariant(ref struct_def) => { + layout::Univariant{ ref variant, .. } => { assert!(adt.variants.len() <= 1); if adt.variants.is_empty() { @@ -1338,7 +1341,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { member_description_factory) = describe_enum_variant(cx, self.enum_type, - struct_def, + variant, &adt.variants[0], NoDiscriminant, self.containing_scope, @@ -1362,16 +1365,17 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { ] } } - adt::RawNullablePointer { nndiscr: non_null_variant_index, nnty, .. } => { + layout::RawNullablePointer { nndiscr: non_null_variant_index, .. } => { // As far as debuginfo is concerned, the pointer this enum // represents is still wrapped in a struct. This is to make the // DWARF representation of enums uniform. // First create a description of the artificial wrapper struct: - let non_null_variant = &adt.variants[non_null_variant_index.0 as usize]; + let non_null_variant = &adt.variants[non_null_variant_index as usize]; let non_null_variant_name = non_null_variant.name.as_str(); // The llvm type and metadata of the pointer + let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0] ); let non_null_llvm_type = type_of::type_of(cx, nnty); let non_null_type_metadata = type_metadata(cx, nnty, self.span); @@ -1416,7 +1420,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Encode the information about the null variant in the union // member's name. - let null_variant_index = (1 - non_null_variant_index.0) as usize; + let null_variant_index = (1 - non_null_variant_index) as usize; let null_variant_name = adt.variants[null_variant_index].name; let union_member_name = format!("RUST$ENCODED$ENUM${}${}", 0, @@ -1434,7 +1438,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } ] }, - adt::StructWrappedNullablePointer { nonnull: ref struct_def, + layout::StructWrappedNullablePointer { nonnull: ref struct_def, nndiscr, ref discrfield, ..} => { // Create a description of the non-null variant @@ -1442,7 +1446,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { describe_enum_variant(cx, self.enum_type, struct_def, - &adt.variants[nndiscr.0 as usize], + &adt.variants[nndiscr as usize], OptimizedDiscriminant, self.containing_scope, self.span); @@ -1457,7 +1461,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Encode the information about the null variant in the union // member's name. - let null_variant_index = (1 - nndiscr.0) as usize; + let null_variant_index = (1 - nndiscr) as usize; let null_variant_name = adt.variants[null_variant_index].name; let discrfield = discrfield.iter() .skip(1) @@ -1478,9 +1482,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } ] }, - adt::CEnum(..) | adt::UntaggedUnion(..) => { - span_bug!(self.span, "This should be unreachable.") - } + layout::CEnum { .. } => span_bug!(self.span, "This should be unreachable."), + ref l @ _ => bug!("Not an enum layout: {:#?}", l) } } } @@ -1523,16 +1526,39 @@ enum EnumDiscriminantInfo { // full RecursiveTypeDescription. fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_type: Ty<'tcx>, - struct_def: &adt::Struct<'tcx>, + struct_def: &layout::Struct, variant: ty::VariantDef<'tcx>, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, span: Span) -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) { + let substs = match enum_type.sty { + ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s, + ref t @ _ => bug!("{:#?} is not an enum", t) + }; + + let maybe_discr_and_signed: Option<(layout::Integer, bool)> = match *cx.layout_of(enum_type) { + layout::CEnum {discr, ..} => Some((discr, true)), + layout::General{discr, ..} => Some((discr, false)), + layout::Univariant { .. } + | layout::RawNullablePointer { .. } + | layout::StructWrappedNullablePointer { .. } => None, + ref l @ _ => bug!("This should be unreachable. Type is {:#?} layout is {:#?}", enum_type, l) + }; + + let mut field_tys = variant.fields.iter().map(|f: ty::FieldDef<'tcx>| { + monomorphize::field_ty(cx.tcx(), &substs, f) + }).collect::>(); + + if let Some((discr, signed)) = maybe_discr_and_signed { + field_tys.insert(0, discr.to_ty(&cx.tcx(), signed)); + } + + let variant_llvm_type = - Type::struct_(cx, &struct_def.fields + Type::struct_(cx, &field_tys .iter() - .map(|&t| type_of::type_of(cx, t)) + .map(|t| type_of::type_of(cx, t)) .collect::>() , struct_def.packed); @@ -1578,7 +1604,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // Build an array of (field name, field type) pairs to be captured in the factory closure. let args: Vec<(String, Ty)> = arg_names.iter() - .zip(&struct_def.fields) + .zip(field_tys.iter()) .map(|(s, &t)| (s.to_string(), t)) .collect(); @@ -1615,7 +1641,6 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let file_metadata = unknown_file_metadata(cx); let variants = &enum_type.ty_adt_def().unwrap().variants; - let enumerators_metadata: Vec = variants .iter() .map(|v| { @@ -1630,7 +1655,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }) .collect(); - let discriminant_type_metadata = |inttype: syntax::attr::IntType| { + let discriminant_type_metadata = |inttype: layout::Integer, signed: bool| { let disr_type_key = (enum_def_id, inttype); let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types .borrow() @@ -1638,12 +1663,12 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, match cached_discriminant_type_metadata { Some(discriminant_type_metadata) => discriminant_type_metadata, None => { - let discriminant_llvm_type = adt::ll_inttype(cx, inttype); + let discriminant_llvm_type = Type::from_integer(cx, inttype); let (discriminant_size, discriminant_align) = size_and_align_of(cx, discriminant_llvm_type); let discriminant_base_type_metadata = type_metadata(cx, - adt::ty_of_inttype(cx.tcx(), inttype), + inttype.to_ty(&cx.tcx(), signed), syntax_pos::DUMMY_SP); let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); @@ -1670,16 +1695,17 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } }; - let type_rep = adt::represent_type(cx, enum_type); + let type_rep = cx.layout_of(enum_type); let discriminant_type_metadata = match *type_rep { - adt::CEnum(inttype, ..) => { - return FinalMetadata(discriminant_type_metadata(inttype)) + layout::CEnum { discr, signed, .. } => { + return FinalMetadata(discriminant_type_metadata(discr, signed)) }, - adt::RawNullablePointer { .. } | - adt::StructWrappedNullablePointer { .. } | - adt::Univariant(..) | adt::UntaggedUnion(..) => None, - adt::General(inttype, _) => Some(discriminant_type_metadata(inttype)), + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } | + layout::Univariant { .. } => None, + layout::General { discr, .. } => Some(discriminant_type_metadata(discr, false)), + ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; let enum_llvm_type = type_of::type_of(cx, enum_type); @@ -1715,7 +1741,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_llvm_type, EnumMDF(EnumMemberDescriptionFactory { enum_type: enum_type, - type_rep: type_rep.clone(), + type_rep: type_rep, discriminant_type_metadata: discriminant_type_metadata, containing_scope: containing_scope, file_metadata: file_metadata, diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index bcd288671bc..a23fd3ab8b3 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -43,7 +43,7 @@ use std::ptr; use syntax_pos::{self, Span, Pos}; use syntax::ast; -use syntax::attr::IntType; +use rustc::ty::layout; pub mod gdb; mod utils; @@ -69,7 +69,7 @@ pub struct CrateDebugContext<'tcx> { builder: DIBuilderRef, current_debug_location: Cell, created_files: RefCell>, - created_enum_disr_types: RefCell>, + created_enum_disr_types: RefCell>, type_map: RefCell>, namespace_map: RefCell>, diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 2a20728f09b..fe76ec05f6e 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -513,7 +513,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, let _icx = push_ctxt("drop_structural_ty"); fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, + t: Ty<'tcx>, av: adt::MaybeSizedValue, variant: ty::VariantDef<'tcx>, substs: &Substs<'tcx>) @@ -525,7 +525,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); cx = drop_ty(cx, - adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i), + adt::trans_field_ptr(cx, t, av, Disr::from(variant.disr_val), i), arg, DebugLoc::None); } return cx; @@ -543,9 +543,8 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, let mut cx = cx; match t.sty { ty::TyClosure(_, ref substs) => { - let repr = adt::represent_type(cx.ccx(), t); for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() { - let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); + let llupvar = adt::trans_field_ptr(cx, t, value, Disr(0), i); cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); } } @@ -562,18 +561,16 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); } ty::TyTuple(ref args) => { - let repr = adt::represent_type(cx.ccx(), t); for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i); + let llfld_a = adt::trans_field_ptr(cx, t, value, Disr(0), i); cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); } } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { - let repr = adt::represent_type(cx.ccx(), t); let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i); + let llfld_a = adt::trans_field_ptr(cx, t, value, Disr::from(discr), i); let val = if type_is_sized(cx.tcx(), field_ty) { llfld_a @@ -593,18 +590,16 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, AdtKind::Enum => { let fcx = cx.fcx; let ccx = fcx.ccx; - - let repr = adt::represent_type(ccx, t); let n_variants = adt.variants.len(); // NB: we must hit the discriminant first so that structural // comparison know not to proceed when the discriminants differ. - match adt::trans_switch(cx, &repr, av, false) { + match adt::trans_switch(cx, t, av, false) { (adt::BranchKind::Single, None) => { if n_variants != 0 { assert!(n_variants == 1); - cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av), + cx = iter_variant(cx, t, adt::MaybeSizedValue::sized(av), &adt.variants[0], substs); } } @@ -633,10 +628,10 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}", &variant.disr_val .to_string())); - let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val)); + let case_val = adt::trans_case(cx, t, Disr::from(variant.disr_val)); AddCase(llswitch, case_val, variant_cx.llbb); let variant_cx = iter_variant(variant_cx, - &repr, + t, value, variant, substs); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 0d919cb7757..4cacbc0f35e 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -418,8 +418,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let val_ty = substs.type_at(0); match val_ty.sty { ty::TyAdt(adt, ..) if adt.is_enum() => { - let repr = adt::represent_type(ccx, val_ty); - adt::trans_get_discr(bcx, &repr, llargs[0], + adt::trans_get_discr(bcx, val_ty, llargs[0], Some(llret_ty), true) } _ => C_null(llret_ty) @@ -629,13 +628,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // destructors, and the contents are SIMD // etc. assert!(!bcx.fcx.type_needs_drop(arg_type)); - - let repr = adt::represent_type(bcx.ccx(), arg_type); - let repr_ptr = &repr; let arg = adt::MaybeSizedValue::sized(llarg); (0..contents.len()) .map(|i| { - Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i)) + Load(bcx, adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) }) .collect() } diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs index 59020b38ddc..cd31f02842a 100644 --- a/src/librustc_trans/machine.rs +++ b/src/librustc_trans/machine.rs @@ -24,13 +24,6 @@ pub type llalign = u32; // ______________________________________________________________________ // compute sizeof / alignof -// Returns the number of bytes clobbered by a Store to this type. -pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - return llvm::LLVMStoreSizeOfType(cx.td(), ty.to_ref()); - } -} - // Returns the number of bytes between successive elements of type T in an // array of T. This is the "ABI" size. It includes any ABI-mandated padding. pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { @@ -39,28 +32,6 @@ pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { } } -// Returns, as near as we can figure, the "real" size of a type. As in, the -// bits in this number of bytes actually carry data related to the datum -// with the type. Not junk, accidentally-damaged words, or whatever. -// Note that padding of the type will be included for structs, but not for the -// other types (i.e. SIMD types). -// Rounds up to the nearest byte though, so if you have a 1-bit -// value, we return 1 here, not 0. Most of rustc works in bytes. Be warned -// that LLVM *does* distinguish between e.g. a 1-bit value and an 8-bit value -// at the codegen level! In general you should prefer `llbitsize_of_real` -// below. -pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - let nbits = llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()); - if nbits & 7 != 0 { - // Not an even number of bytes, spills into "next" byte. - 1 + (nbits >> 3) - } else { - nbits >> 3 - } - } -} - /// Returns the "real" size of the type in bits. pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits { unsafe { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index baeafbe3e34..003830123ff 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -139,9 +139,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { let discr_lvalue = self.trans_lvalue(&bcx, discr); let ty = discr_lvalue.ty.to_ty(bcx.tcx()); - let repr = adt::represent_type(bcx.ccx(), ty); let discr = bcx.with_block(|bcx| - adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None, true) + adt::trans_get_discr(bcx, ty, discr_lvalue.llval, None, true) ); let mut bb_hist = FnvHashMap(); @@ -167,7 +166,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if default_bb != Some(target) { let llbb = llblock(self, target); let llval = bcx.with_block(|bcx| adt::trans_case( - bcx, &repr, Disr::from(adt_variant.disr_val))); + bcx, ty, Disr::from(adt_variant.disr_val))); build::AddCase(switch, llval, llbb) } } @@ -701,10 +700,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval) => { - let base_repr = adt::represent_type(bcx.ccx(), tuple.ty); let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n); + let ptr = adt::trans_field_ptr_builder(bcx, tuple.ty, base, Disr(0), n); let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { let (lldata, llextra) = load_fat_ptr(bcx, ptr); Pair(lldata, llextra) diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index f00da120799..aba50053b09 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -23,9 +23,9 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use {abi, adt, base, Disr}; +use {abi, adt, base, Disr, machine}; use callee::Callee; -use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; +use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty, type_is_sized}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; use common::{const_to_opt_int, const_to_opt_uint}; @@ -441,8 +441,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } } mir::ProjectionElem::Field(ref field, _) => { - let base_repr = adt::represent_type(self.ccx, tr_base.ty); - let llprojected = adt::const_get_field(&base_repr, base.llval, + let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval, Disr(0), field.index()); let llextra = if is_sized { ptr::null_mut() @@ -585,9 +584,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } _ => Disr(0) }; - let repr = adt::represent_type(self.ccx, dest_ty); Const::new( - adt::trans_const(self.ccx, &repr, disr, &fields), + adt::trans_const(self.ccx, dest_ty, disr, &fields), dest_ty ) } @@ -658,8 +656,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty); let llval = operand.llval; let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let repr = adt::represent_type(self.ccx, operand.ty); - adt::is_discr_signed(&repr) + let l = self.ccx.layout_of(operand.ty); + adt::is_discr_signed(&l) } else { operand.ty.is_signed() }; @@ -735,7 +733,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let base = match tr_lvalue.base { Base::Value(llval) => { - let align = type_of::align_of(self.ccx, ty); + //Fixme: may be wrong for &*(&simd_vec as &fmt::Debug) + let align = if type_is_sized(self.ccx.tcx(), ty) { + type_of::align_of(self.ccx, ty) + } else { + self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign + }; if bk == mir::BorrowKind::Mut { consts::addr_of_mut(self.ccx, llval, align, "ref_mut") } else { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5e180887a36..0ce5544c3bf 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -152,7 +152,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { let base_ty = tr_base.ty.to_ty(tcx); - let base_repr = adt::represent_type(ccx, base_ty); let discr = match tr_base.ty { LvalueTy::Ty { .. } => 0, LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, @@ -164,7 +163,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } else { adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) }; - let llprojected = adt::trans_field_ptr_builder(bcx, &base_repr, base, + let llprojected = adt::trans_field_ptr_builder(bcx, base_ty, base, Disr(discr), field.index()); let llextra = if is_sized { ptr::null_mut() diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 21b019d7e24..b643dcd9871 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -111,10 +111,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { match *kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { - let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx())); let disr = Disr::from(adt_def.variants[variant_index].disr_val); bcx.with_block(|bcx| { - adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr)); + adt::trans_set_discr(bcx, + dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr)); }); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); @@ -122,8 +122,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if !common::type_is_zero_size(bcx.ccx(), op.ty) { let val = adt::MaybeSizedValue::sized(dest.llval); let field_index = active_field_index.unwrap_or(i); - let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr, val, - disr, field_index); + let lldest_i = adt::trans_field_ptr_builder(&bcx, + dest.ty.to_ty(bcx.tcx()), + val, disr, field_index); self.store_operand(&bcx, lldest_i, op); } } @@ -270,17 +271,17 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let repr = adt::represent_type(bcx.ccx(), operand.ty); + let l = bcx.ccx().layout_of(operand.ty); let discr = match operand.val { OperandValue::Immediate(llval) => llval, OperandValue::Ref(llptr) => { bcx.with_block(|bcx| { - adt::trans_get_discr(bcx, &repr, llptr, None, true) + adt::trans_get_discr(bcx, operand.ty, llptr, None, true) }) } OperandValue::Pair(..) => bug!("Unexpected Pair operand") }; - (discr, adt::is_discr_signed(&repr)) + (discr, adt::is_discr_signed(&l)) } else { (operand.immediate(), operand.ty.is_signed()) }; diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 325bd655266..9943acbc88e 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -62,11 +62,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { let ty = self.monomorphized_lvalue_ty(lvalue); - let repr = adt::represent_type(bcx.ccx(), ty); let lvalue_transed = self.trans_lvalue(&bcx, lvalue); bcx.with_block(|bcx| adt::trans_set_discr(bcx, - &repr, + ty, lvalue_transed.llval, Disr::from(variant_index)) ); diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index d191591e082..f0f3dd0bc11 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -18,6 +18,7 @@ use context::CrateContext; use util::nodemap::FnvHashMap; use syntax::ast; +use rustc::ty::layout; use std::ffi::CString; use std::fmt; @@ -299,6 +300,26 @@ impl Type { llvm::LLVMGetIntTypeWidth(self.to_ref()) as u64 } } + + pub fn from_integer(cx: &CrateContext, i: layout::Integer)->Type { + use rustc::ty::layout::Integer::*; + match i { + I1 => Type::i1(cx), + I8 => Type::i8(cx), + I16 => Type::i16(cx), + I32 => Type::i32(cx), + I64 => Type::i64(cx), + } + } + + pub fn from_primitive(ccx: &CrateContext, p: layout::Primitive)->Type { + match p { + layout::Int(i) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => bug!("It is not possible to convert Pointer directly to Type.") + } + } } /* Memory-managed object interface to type handles. */ diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 141b8506c39..8183639ae22 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -22,17 +22,6 @@ use type_::Type; use syntax::ast; -// LLVM doesn't like objects that are too big. Issue #17913 -fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - llet: Type, - size: machine::llsize, - scapegoat: Ty<'tcx>) { - let esz = machine::llsize_of_alloc(ccx, llet); - match esz.checked_mul(size) { - Some(n) if n < ccx.obj_size_bound() => {} - _ => { ccx.report_overbig_object(scapegoat) } - } -} // A "sizing type" is an LLVM type, the size and alignment of which are // guaranteed to be equivalent to what you would get out of `type_of()`. It's @@ -81,7 +70,6 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ ty::TyArray(ty, size) => { let llty = sizing_type_of(cx, ty); let size = size as u64; - ensure_array_fits_in_address_space(cx, llty, size, t); Type::array(&llty, size) } @@ -98,13 +86,11 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ } let llet = type_of(cx, e); let n = t.simd_size(cx.tcx()) as u64; - ensure_array_fits_in_address_space(cx, llet, n, t); Type::vector(&llet, n) } ty::TyTuple(..) | ty::TyAdt(..) | ty::TyClosure(..) => { - let repr = adt::represent_type(cx, t); - adt::sizing_type_of(cx, &repr, false) + adt::sizing_type_of(cx, t, false) } ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) | @@ -242,8 +228,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyClosure(..) => { // Only create the named struct, but don't fill it in. We // fill it in *after* placing it into the type cache. - let repr = adt::represent_type(cx, t); - adt::incomplete_type_of(cx, &repr, "closure") + adt::incomplete_type_of(cx, t, "closure") } ty::TyBox(ty) | @@ -266,11 +251,6 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyArray(ty, size) => { let size = size as u64; - // we must use `sizing_type_of` here as the type may - // not be fully initialized. - let szty = sizing_type_of(cx, ty); - ensure_array_fits_in_address_space(cx, szty, size, t); - let llty = in_memory_type_of(cx, ty); Type::array(&llty, size) } @@ -290,8 +270,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> } ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx), ty::TyTuple(..) => { - let repr = adt::represent_type(cx, t); - adt::type_of(cx, &repr) + adt::type_of(cx, t) } ty::TyAdt(..) if t.is_simd() => { let e = t.simd_type(cx.tcx()); @@ -302,7 +281,6 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> } let llet = in_memory_type_of(cx, e); let n = t.simd_size(cx.tcx()) as u64; - ensure_array_fits_in_address_space(cx, llet, n, t); Type::vector(&llet, n) } ty::TyAdt(def, substs) => { @@ -310,9 +288,8 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> // fill it in *after* placing it into the type cache. This // avoids creating more than one copy of the enum when one // of the enum's variants refers to the enum itself. - let repr = adt::represent_type(cx, t); let name = llvm_type_name(cx, def.did, substs); - adt::incomplete_type_of(cx, &repr, &name[..]) + adt::incomplete_type_of(cx, t, &name[..]) } ty::TyInfer(..) | @@ -329,8 +306,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> // If this was an enum or struct, fill in the type now. match t.sty { ty::TyAdt(..) | ty::TyClosure(..) if !t.is_simd() => { - let repr = adt::represent_type(cx, t); - adt::finish_type_of(cx, &repr, &mut llty); + adt::finish_type_of(cx, t, &mut llty); } _ => () } @@ -340,8 +316,8 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> machine::llalign { - let llty = sizing_type_of(cx, t); - machine::llalign_of_min(cx, llty) + let layout = cx.layout_of(t); + layout.align(&cx.tcx().data_layout).abi() as machine::llalign } fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,