rustc_trans: go through layouts uniformly for fat pointers and variants.

This commit is contained in:
Eduard-Mihai Burtescu 2017-09-21 20:40:50 +03:00
parent 026214c858
commit b723af284a
25 changed files with 364 additions and 448 deletions

View File

@ -35,7 +35,7 @@ use rustc_serialize::{Decoder, Decodable, Encoder, Encodable};
use session::{config, early_error, Session};
use traits::Reveal;
use ty::{self, TyCtxt, Ty};
use ty::layout::{FullLayout, LayoutError, LayoutOf};
use ty::layout::{LayoutError, LayoutOf, TyLayout};
use util::nodemap::FxHashMap;
use std::default::Default as StdDefault;
@ -628,9 +628,9 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> {
}
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a LateContext<'a, 'tcx> {
type FullLayout = Result<FullLayout<'tcx>, LayoutError<'tcx>>;
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout {
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(self.tcx, self.param_env.reveal_all()).layout_of(ty)
}
}

View File

@ -23,7 +23,7 @@ use std::fmt;
use std::i64;
use std::iter;
use std::mem;
use std::ops::{Add, Sub, Mul, AddAssign, RangeInclusive};
use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive};
use ich::StableHashingContext;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
@ -907,6 +907,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> {
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct CachedLayout {
pub variant_index: Option<usize>,
pub layout: Layout,
pub fields: FieldPlacement,
pub abi: Abi,
@ -948,6 +949,7 @@ impl<'a, 'tcx> Layout {
let dl = cx.data_layout();
let scalar = |value| {
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Scalar,
fields: FieldPlacement::Union(0),
abi: Abi::Scalar(value)
@ -962,7 +964,7 @@ impl<'a, 'tcx> Layout {
/// A univariant, but part of an enum.
EnumVariant(Integer),
}
let univariant_uninterned = |fields: &[FullLayout], repr: &ReprOptions, kind| {
let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
let packed = repr.packed();
if packed && repr.align > 0 {
bug!("struct cannot be packed and aligned");
@ -1085,6 +1087,7 @@ impl<'a, 'tcx> Layout {
}
Ok(CachedLayout {
variant_index: None,
layout: Layout::Univariant,
fields: FieldPlacement::Arbitrary {
offsets,
@ -1099,7 +1102,7 @@ impl<'a, 'tcx> Layout {
}
})
};
let univariant = |fields: &[FullLayout], repr: &ReprOptions, kind| {
let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
};
assert!(!ty.has_infer_types());
@ -1129,6 +1132,7 @@ impl<'a, 'tcx> Layout {
memory_index: vec![0, 1]
};
Ok(tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Univariant,
fields,
abi: Abi::Aggregate {
@ -1185,6 +1189,7 @@ impl<'a, 'tcx> Layout {
.ok_or(LayoutError::SizeOverflow(ty))?;
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Array,
fields: FieldPlacement::Array {
stride: element_size,
@ -1202,6 +1207,7 @@ impl<'a, 'tcx> Layout {
ty::TySlice(element) => {
let element = cx.layout_of(element)?;
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Array,
fields: FieldPlacement::Array {
stride: element.size(dl),
@ -1218,6 +1224,7 @@ impl<'a, 'tcx> Layout {
}
ty::TyStr => {
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Array,
fields: FieldPlacement::Array {
stride: Size::from_bytes(1),
@ -1286,6 +1293,7 @@ impl<'a, 'tcx> Layout {
}
};
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Vector,
fields: FieldPlacement::Array {
stride: element.size(tcx),
@ -1343,6 +1351,7 @@ impl<'a, 'tcx> Layout {
}
return Ok(tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::UntaggedUnion,
fields: FieldPlacement::Union(variants[0].len()),
abi: Abi::Aggregate {
@ -1372,7 +1381,11 @@ impl<'a, 'tcx> Layout {
else { StructKind::AlwaysSized }
};
return univariant(&variants[0], &def.repr, kind);
let mut cached = univariant_uninterned(&variants[0], &def.repr, kind)?;
if def.is_enum() {
cached.variant_index = Some(0);
}
return Ok(tcx.intern_layout(cached));
}
let no_explicit_discriminants = def.variants.iter().enumerate()
@ -1389,12 +1402,14 @@ impl<'a, 'tcx> Layout {
for (field_index, field) in variants[i].iter().enumerate() {
if let Some((offset, discr)) = field.non_zero_field(cx)? {
let st = vec![
let mut st = vec![
univariant_uninterned(&variants[0],
&def.repr, StructKind::AlwaysSized)?,
univariant_uninterned(&variants[1],
&def.repr, StructKind::AlwaysSized)?
];
st[0].variant_index = Some(0);
st[1].variant_index = Some(1);
let offset = st[i].fields.offset(field_index) + offset;
let mut abi = st[i].abi;
if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) {
@ -1418,6 +1433,7 @@ impl<'a, 'tcx> Layout {
_ => {}
}
return Ok(tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::NullablePointer {
nndiscr: i as u64,
@ -1454,13 +1470,13 @@ impl<'a, 'tcx> Layout {
assert_eq!(Integer::for_abi_align(dl, start_align), None);
// Create the set of structs that represent each variant.
let mut variants = variants.into_iter().map(|field_layouts| {
let st = univariant_uninterned(&field_layouts,
let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| {
let mut st = univariant_uninterned(&field_layouts,
&def.repr, StructKind::EnumVariant(min_ity))?;
st.variant_index = Some(i);
// Find the first field we can't move later
// to make room for a larger discriminant.
for i in st.fields.index_by_increasing_offset() {
let field = field_layouts[i];
for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
let field_align = field.align(dl);
if !field.is_zst() || field_align.abi() != 1 {
start_align = start_align.min(field_align);
@ -1539,6 +1555,7 @@ impl<'a, 'tcx> Layout {
let discr = Int(ity, signed);
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::General {
discr,
@ -1587,7 +1604,7 @@ impl<'a, 'tcx> Layout {
fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>,
param_env: ty::ParamEnv<'tcx>,
layout: FullLayout<'tcx>) {
layout: TyLayout<'tcx>) {
// If we are running with `-Zprint-type-sizes`, record layouts for
// dumping later. Ignore layouts that are done with non-empty
// environments or non-monomorphic layouts, as the user only wants
@ -1607,7 +1624,7 @@ impl<'a, 'tcx> Layout {
fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>,
param_env: ty::ParamEnv<'tcx>,
layout: FullLayout<'tcx>) {
layout: TyLayout<'tcx>) {
let cx = (tcx, param_env);
// (delay format until we actually need it)
let record = |kind, opt_discr_size, variants| {
@ -1644,7 +1661,7 @@ impl<'a, 'tcx> Layout {
let build_variant_info = |n: Option<ast::Name>,
flds: &[ast::Name],
layout: FullLayout<'tcx>| {
layout: TyLayout<'tcx>| {
let mut min_size = Size::from_bytes(0);
let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
match layout.field(cx, i) {
@ -1685,7 +1702,7 @@ impl<'a, 'tcx> Layout {
}
};
match *layout.layout {
match layout.layout {
Layout::Univariant => {
let variant_names = || {
adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::<Vec<_>>()
@ -1723,7 +1740,7 @@ impl<'a, 'tcx> Layout {
layout.for_variant(i))
})
.collect();
record(adt_kind.into(), match *layout.layout {
record(adt_kind.into(), match layout.layout {
Layout::General { discr, .. } => Some(discr.size(tcx)),
_ => None
}, variant_infos);
@ -1901,12 +1918,16 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> {
/// layouts for which Rust types do not exist, such as enum variants
/// or synthetic fields of enums (i.e. discriminants) and fat pointers.
#[derive(Copy, Clone, Debug)]
pub struct FullLayout<'tcx> {
pub struct TyLayout<'tcx> {
pub ty: Ty<'tcx>,
pub variant_index: Option<usize>,
pub layout: &'tcx Layout,
pub fields: &'tcx FieldPlacement,
pub abi: Abi,
cached: &'tcx CachedLayout
}
impl<'tcx> Deref for TyLayout<'tcx> {
type Target = &'tcx CachedLayout;
fn deref(&self) -> &&'tcx CachedLayout {
&self.cached
}
}
pub trait HasTyCtxt<'tcx>: HasDataLayout {
@ -1937,29 +1958,42 @@ impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) {
}
}
pub trait LayoutOf<T> {
type FullLayout;
pub trait MaybeResult<T> {
fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
}
fn layout_of(self, ty: T) -> Self::FullLayout;
impl<T> MaybeResult<T> for T {
fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
f(self)
}
}
impl<T, E> MaybeResult<T> for Result<T, E> {
fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
self.map(f)
}
}
pub trait LayoutOf<T> {
type TyLayout;
fn layout_of(self, ty: T) -> Self::TyLayout;
}
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) {
type FullLayout = Result<FullLayout<'tcx>, LayoutError<'tcx>>;
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
#[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout {
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
let (tcx, param_env) = self;
let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
let cached = tcx.layout_raw(param_env.reveal_all().and(ty))?;
let layout = FullLayout {
let layout = TyLayout {
ty,
variant_index: None,
layout: &cached.layout,
fields: &cached.fields,
abi: cached.abi
cached
};
// NB: This recording is normally disabled; when enabled, it
@ -1976,22 +2010,19 @@ impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>,
ty::ParamEnv<'tcx>) {
type FullLayout = Result<FullLayout<'tcx>, LayoutError<'tcx>>;
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
#[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout {
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
let (tcx_at, param_env) = self;
let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
let cached = tcx_at.layout_raw(param_env.reveal_all().and(ty))?;
let layout = FullLayout {
let layout = TyLayout {
ty,
variant_index: None,
layout: &cached.layout,
fields: &cached.fields,
abi: cached.abi
cached
};
// NB: This recording is normally disabled; when enabled, it
@ -2006,79 +2037,57 @@ impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>,
}
}
impl<'a, 'tcx> FullLayout<'tcx> {
impl<'a, 'tcx> TyLayout<'tcx> {
pub fn for_variant(&self, variant_index: usize) -> Self {
let variants = match self.ty.sty {
ty::TyAdt(def, _) if def.is_enum() => &def.variants[..],
_ => &[]
};
let count = if variants.is_empty() {
0
} else {
variants[variant_index].fields.len()
};
let (layout, fields, abi) = match *self.layout {
Layout::Univariant => (self.layout, self.fields, self.abi),
let cached = match self.layout {
Layout::NullablePointer { ref variants, .. } |
Layout::General { ref variants, .. } => {
let variant = &variants[variant_index];
(&variant.layout, &variant.fields, variant.abi)
&variants[variant_index]
}
_ => bug!()
_ => self.cached
};
assert_eq!(fields.count(), count);
assert_eq!(cached.variant_index, Some(variant_index));
FullLayout {
variant_index: Some(variant_index),
layout,
fields,
abi,
..*self
TyLayout {
ty: self.ty,
cached
}
}
fn field_type_unnormalized(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, i: usize) -> Ty<'tcx> {
let ptr_field_type = |pointee: Ty<'tcx>| {
pub fn field<C>(&self, cx: C, i: usize) -> C::TyLayout
where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
C::TyLayout: MaybeResult<TyLayout<'tcx>>
{
let tcx = cx.tcx();
let ptr_field_layout = |pointee: Ty<'tcx>| {
assert!(i < 2);
let mk_ptr = |ty: Ty<'tcx>| {
match self.ty.sty {
ty::TyRef(r, ty::TypeAndMut { mutbl, .. }) => {
tcx.mk_ref(r, ty::TypeAndMut { ty, mutbl })
}
ty::TyRawPtr(ty::TypeAndMut { mutbl, .. }) => {
tcx.mk_ptr(ty::TypeAndMut { ty, mutbl })
}
ty::TyAdt(def, _) if def.is_box() => {
tcx.mk_box(ty)
}
_ => bug!()
}
};
let slice = |element: Ty<'tcx>| {
if i == 0 {
mk_ptr(element)
} else {
tcx.types.usize
}
};
match tcx.struct_tail(pointee).sty {
ty::TySlice(element) => slice(element),
ty::TyStr => slice(tcx.types.u8),
ty::TyDynamic(..) => {
if i == 0 {
mk_ptr(tcx.mk_nil())
} else {
Pointer.to_ty(tcx)
}
}
_ => bug!("FullLayout::field_type({:?}): not applicable", self)
// Reuse the fat *T type as its own thin pointer data field.
// This provides information about e.g. DST struct pointees
// (which may have no non-DST form), and will work as long
// as the `Abi` or `FieldPlacement` is checked by users.
if i == 0 {
return cx.layout_of(Pointer.to_ty(tcx)).map_same(|mut ptr_layout| {
ptr_layout.ty = self.ty;
ptr_layout
});
}
let meta_ty = match tcx.struct_tail(pointee).sty {
ty::TySlice(_) |
ty::TyStr => tcx.types.usize,
ty::TyDynamic(..) => {
// FIXME(eddyb) use an usize/fn() array with
// the correct number of vtables slots.
tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
}
_ => bug!("TyLayout::field_type({:?}): not applicable", self)
};
cx.layout_of(meta_ty)
};
match self.ty.sty {
cx.layout_of(match self.ty.sty {
ty::TyBool |
ty::TyChar |
ty::TyInt(_) |
@ -2089,16 +2098,16 @@ impl<'a, 'tcx> FullLayout<'tcx> {
ty::TyFnDef(..) |
ty::TyDynamic(..) |
ty::TyForeign(..) => {
bug!("FullLayout::field_type({:?}): not applicable", self)
bug!("TyLayout::field_type({:?}): not applicable", self)
}
// Potentially-fat pointers.
ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ptr_field_type(pointee)
return ptr_field_layout(pointee);
}
ty::TyAdt(def, _) if def.is_box() => {
ptr_field_type(self.ty.boxed_ty())
return ptr_field_layout(self.ty.boxed_ty());
}
// Arrays and slices.
@ -2126,16 +2135,16 @@ impl<'a, 'tcx> FullLayout<'tcx> {
ty::TyAdt(def, substs) => {
let v = if def.is_enum() {
match self.variant_index {
None => match *self.layout {
None => match self.layout {
// Discriminant field for enums (where applicable).
Layout::General { discr, .. } |
Layout::NullablePointer { discr, .. } => {
return [discr.to_ty(tcx)][i];
return cx.layout_of([discr.to_ty(tcx)][i]);
}
_ => {
bug!("TyLayout::field_type: enum `{}` has no discriminant",
self.ty)
}
_ if def.variants.len() > 1 => return [][i],
// Enums with one variant behave like structs.
_ => 0
},
Some(v) => v
}
@ -2148,16 +2157,9 @@ impl<'a, 'tcx> FullLayout<'tcx> {
ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
ty::TyInfer(_) | ty::TyError => {
bug!("FullLayout::field_type: unexpected type `{}`", self.ty)
bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
}
}
}
pub fn field<C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>>(&self,
cx: C,
i: usize)
-> C::FullLayout {
cx.layout_of(self.field_type_unnormalized(cx.tcx(), i))
})
}
/// Returns true if the layout corresponds to an unsized type.
@ -2198,11 +2200,11 @@ impl<'a, 'tcx> FullLayout<'tcx> {
// FIXME(eddyb) track value ranges and traverse already optimized enums.
fn non_zero_field<C>(&self, cx: C)
-> Result<Option<(Size, Primitive)>, LayoutError<'tcx>>
where C: LayoutOf<Ty<'tcx>, FullLayout = Result<Self, LayoutError<'tcx>>> +
where C: LayoutOf<Ty<'tcx>, TyLayout = Result<Self, LayoutError<'tcx>>> +
HasTyCtxt<'tcx>
{
let tcx = cx.tcx();
match (self.layout, self.abi, &self.ty.sty) {
match (&self.layout, self.abi, &self.ty.sty) {
// FIXME(eddyb) check this via value ranges on scalars.
(&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyRef(..)) |
(&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => {
@ -2238,7 +2240,7 @@ impl<'a, 'tcx> FullLayout<'tcx> {
// Perhaps one of the fields is non-zero, let's recurse and find out.
_ => {
if let FieldPlacement::Array { count, .. } = *self.fields {
if let FieldPlacement::Array { count, .. } = self.fields {
if count > 0 {
return self.field(cx, 0)?.non_zero_field(cx);
}
@ -2341,6 +2343,7 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for Abi {
}
impl_stable_hash_for!(struct ::ty::layout::CachedLayout {
variant_index,
layout,
fields,
abi

View File

@ -753,7 +753,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences {
bug!("failed to get layout for `{}`: {}", t, e)
});
if let Layout::General { ref variants, discr, .. } = *layout.layout {
if let Layout::General { ref variants, discr, .. } = layout.layout {
let discr_size = discr.size(cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}",

View File

@ -1316,11 +1316,6 @@ extern "C" {
ElementCount: c_uint,
Packed: Bool);
pub fn LLVMConstNamedStruct(S: TypeRef,
ConstantVals: *const ValueRef,
Count: c_uint)
-> ValueRef;
/// Enables LLVM debug output.
pub fn LLVMRustSetDebug(Enabled: c_int);

View File

@ -36,7 +36,7 @@ use type_of::LayoutLlvmExt;
use rustc::hir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, Size, FullLayout};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::ty::layout::{HasDataLayout, LayoutOf};
use rustc_back::PanicStrategy;
@ -275,7 +275,7 @@ pub trait LayoutExt<'tcx> {
fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
}
impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> {
impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
fn is_aggregate(&self) -> bool {
match self.abi {
layout::Abi::Scalar(_) |
@ -311,7 +311,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> {
let mut total = Size::from_bytes(0);
let mut result = None;
let is_union = match *self.fields {
let is_union = match self.fields {
layout::FieldPlacement::Array { count, .. } => {
if count > 0 {
return self.field(ccx, 0).homogeneous_aggregate(ccx);
@ -424,7 +424,7 @@ impl CastTarget {
#[derive(Debug)]
pub struct ArgType<'tcx> {
kind: ArgKind,
pub layout: FullLayout<'tcx>,
pub layout: TyLayout<'tcx>,
/// Cast target, either a single uniform or a pair of registers.
pub cast: Option<CastTarget>,
/// Dummy argument, which is emitted before the real argument.
@ -435,7 +435,7 @@ pub struct ArgType<'tcx> {
}
impl<'a, 'tcx> ArgType<'tcx> {
fn new(layout: FullLayout<'tcx>) -> ArgType<'tcx> {
fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
ArgType {
kind: ArgKind::Direct,
layout,
@ -610,7 +610,7 @@ impl<'a, 'tcx> FnType<'tcx> {
let fn_ty = instance_ty(ccx.tcx(), &instance);
let sig = ty_fn_sig(ccx, fn_ty);
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
Self::new(ccx, sig, &[])
FnType::new(ccx, sig, &[])
}
pub fn new(ccx: &CrateContext<'a, 'tcx>,

View File

@ -28,6 +28,7 @@ use super::ModuleSource;
use super::ModuleTranslation;
use super::ModuleKind;
use abi;
use assert_module_sources;
use back::link;
use back::symbol_export;
@ -40,7 +41,7 @@ use rustc::middle::lang_items::StartFnLangItem;
use rustc::middle::trans::{Linkage, Visibility, Stats};
use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, FullLayout, LayoutOf};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
use rustc::ty::maps::Providers;
use rustc::dep_graph::{DepNode, DepKind, DepConstructor};
use rustc::middle::cstore::{self, LinkMeta, LinkagePreference};
@ -68,7 +69,7 @@ use symbol_names_test;
use time_graph;
use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames};
use type_::Type;
use type_of::{self, LayoutLlvmExt};
use type_of::LayoutLlvmExt;
use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet};
use CrateInfo;
@ -203,8 +204,10 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::TyDynamic(ref data, ..)) => {
let vtable_ptr = ccx.layout_of(ccx.tcx().mk_mut_ptr(target))
.field(ccx, abi::FAT_PTR_EXTRA);
consts::ptrcast(meth::get_vtable(ccx, source, data.principal()),
Type::vtable_ptr(ccx))
vtable_ptr.llvm_type(ccx))
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
@ -255,8 +258,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// the types match up.
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty);
(bcx.pointercast(base, llcast_ty), info)
let thin_ptr = dst.layout.field(bcx.ccx, abi::FAT_PTR_ADDR);
(bcx.pointercast(base, thin_ptr.llvm_type(bcx.ccx)), info)
}
OperandValue::Immediate(base) => {
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
@ -371,7 +374,7 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
}
}
pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::FullLayout) -> ValueRef {
pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi {
bcx.trunc(val, Type::i1(bcx.ccx))
} else {
@ -400,7 +403,7 @@ pub fn memcpy_ty<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
dst: ValueRef,
src: ValueRef,
layout: FullLayout<'tcx>,
layout: TyLayout<'tcx>,
align: Option<Align>,
) {
let ccx = bcx.ccx;

View File

@ -14,7 +14,7 @@
use abi::{FnType, ArgType, LayoutExt, Reg};
use context::CrateContext;
use rustc::ty::layout::{self, FullLayout};
use rustc::ty::layout::{self, TyLayout};
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 {
@ -25,7 +25,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
}
fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> bool {
layout: TyLayout<'tcx>) -> bool {
match layout.abi {
layout::Abi::Scalar(layout::F32) |
layout::Abi::Scalar(layout::F64) => true,

View File

@ -11,7 +11,7 @@
use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind};
use common::CrateContext;
use rustc::ty::layout::{self, FullLayout};
use rustc::ty::layout::{self, TyLayout};
#[derive(PartialEq)]
pub enum Flavor {
@ -20,7 +20,7 @@ pub enum Flavor {
}
fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> bool {
layout: TyLayout<'tcx>) -> bool {
match layout.abi {
layout::Abi::Scalar(layout::F32) |
layout::Abi::Scalar(layout::F64) => true,

View File

@ -14,7 +14,7 @@
use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind};
use context::CrateContext;
use rustc::ty::layout::{self, Layout, FullLayout, Size};
use rustc::ty::layout::{self, Layout, TyLayout, Size};
#[derive(Clone, Copy, PartialEq, Debug)]
enum Class {
@ -53,7 +53,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
}
fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>,
layout: TyLayout<'tcx>,
cls: &mut [Class],
off: Size)
-> Result<(), Memory> {
@ -90,7 +90,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
// FIXME(eddyb) have to work around Rust enums for now.
// Fix is either guarantee no data where there is no field,
// by putting variants in fields, or be more clever.
match *layout.layout {
match layout.layout {
Layout::General { .. } |
Layout::NullablePointer { .. } => return Err(Memory),
_ => {}

View File

@ -54,20 +54,11 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) ->
}
}
pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
let layout = ccx.layout_of(ty);
match layout.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true,
layout::Abi::Aggregate { .. } => layout.is_zst()
}
}
/// Returns true if the type is represented as a pair of immediates.
pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
-> bool {
let layout = ccx.layout_of(ty);
match *layout.fields {
match layout.fields {
layout::FieldPlacement::Arbitrary { .. } => {
// There must be only 2 fields.
if layout.fields.count() != 2 {
@ -75,8 +66,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
}
// The two fields must be both immediates.
type_is_immediate(ccx, layout.field(ccx, 0).ty) &&
type_is_immediate(ccx, layout.field(ccx, 1).ty)
layout.field(ccx, 0).is_llvm_immediate() &&
layout.field(ccx, 1).is_llvm_immediate()
}
_ => false
}
@ -256,16 +247,7 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
let len = s.len();
let cs = consts::ptrcast(C_cstr(cx, s, false),
cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to());
let empty = C_array(Type::i8(cx), &[]);
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
C_named_struct(cx.str_slice_type(), &[
empty,
cs,
empty,
C_usize(cx, len as u64),
empty
])
C_fat_ptr(cx, cs, C_usize(cx, len as u64))
}
pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef {
@ -293,12 +275,6 @@ pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) ->
}
}
pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef {
unsafe {
llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint)
}
}
pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
unsafe {
return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint);

View File

@ -24,14 +24,13 @@ use monomorphize::Instance;
use partitioning::CodegenUnit;
use type_::Type;
use type_of::LayoutLlvmExt;
use rustc_data_structures::base_n;
use rustc::middle::trans::Stats;
use rustc_data_structures::stable_hasher::StableHashingContextProvider;
use rustc::session::config::{self, NoDebugInfo};
use rustc::session::Session;
use rustc::ty::layout::{LayoutError, LayoutOf, FullLayout};
use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap;
use rustc_trans_utils;
@ -101,9 +100,9 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> {
/// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details
used_statics: RefCell<Vec<ValueRef>>,
lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
isize_ty: Type,
str_slice_type: Type,
dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
@ -378,8 +377,8 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> {
statics_to_rauw: RefCell::new(Vec::new()),
used_statics: RefCell::new(Vec::new()),
lltypes: RefCell::new(FxHashMap()),
scalar_lltypes: RefCell::new(FxHashMap()),
isize_ty: Type::from_ref(ptr::null_mut()),
str_slice_type: Type::from_ref(ptr::null_mut()),
dbg_cx,
eh_personality: Cell::new(None),
eh_unwind_resume: Cell::new(None),
@ -389,28 +388,19 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> {
placeholder: PhantomData,
};
let (isize_ty, str_slice_ty, mut local_ccx) = {
let (isize_ty, mut local_ccx) = {
// Do a little dance to create a dummy CrateContext, so we can
// create some things in the LLVM module of this codegen unit
let mut local_ccxs = vec![local_ccx];
let (isize_ty, str_slice_ty) = {
let isize_ty = {
let dummy_ccx = LocalCrateContext::dummy_ccx(shared,
local_ccxs.as_mut_slice());
let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice");
str_slice_ty.set_struct_body(&[
Type::array(&Type::i8(&dummy_ccx), 0),
dummy_ccx.layout_of(shared.tcx.mk_str()).llvm_type(&dummy_ccx).ptr_to(),
Type::array(&Type::i8(&dummy_ccx), 0),
Type::isize(&dummy_ccx),
Type::array(&Type::i8(&dummy_ccx), 0)
], false);
(Type::isize(&dummy_ccx), str_slice_ty)
Type::isize(&dummy_ccx)
};
(isize_ty, str_slice_ty, local_ccxs.pop().unwrap())
(isize_ty, local_ccxs.pop().unwrap())
};
local_ccx.isize_ty = isize_ty;
local_ccx.str_slice_type = str_slice_ty;
local_ccx
}
@ -515,10 +505,14 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local().used_statics
}
pub fn lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> {
pub fn lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>> {
&self.local().lltypes
}
pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> {
&self.local().scalar_lltypes
}
pub fn stats<'a>(&'a self) -> &'a RefCell<Stats> {
&self.local().stats
}
@ -527,10 +521,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
self.local().isize_ty
}
pub fn str_slice_type(&self) -> Type {
self.local().str_slice_type
}
pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> {
&self.local().dbg_cx
}
@ -669,9 +659,9 @@ impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> {
}
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a SharedCrateContext<'a, 'tcx> {
type FullLayout = FullLayout<'tcx>;
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout {
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(self.tcx, ty::ParamEnv::empty(traits::Reveal::All))
.layout_of(ty)
.unwrap_or_else(|e| match e {
@ -682,10 +672,10 @@ impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a SharedCrateContext<'a, 'tcx> {
}
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a CrateContext<'a, 'tcx> {
type FullLayout = FullLayout<'tcx>;
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout {
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.shared.layout_of(ty)
}
}

View File

@ -32,7 +32,7 @@ use rustc::ty::util::TypeIdHasher;
use rustc::ich::Fingerprint;
use common::{self, CrateContext};
use rustc::ty::{self, AdtKind, Ty};
use rustc::ty::layout::{self, Align, LayoutOf, Size, FullLayout};
use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
use rustc::session::{Session, config};
use rustc::util::nodemap::FxHashMap;
use rustc::util::common::path2cstr;
@ -1052,7 +1052,7 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
//=-----------------------------------------------------------------------------
struct UnionMemberDescriptionFactory<'tcx> {
layout: FullLayout<'tcx>,
layout: TyLayout<'tcx>,
variant: &'tcx ty::VariantDef,
span: Span,
}
@ -1119,7 +1119,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
// offset of zero bytes).
struct EnumMemberDescriptionFactory<'tcx> {
enum_type: Ty<'tcx>,
type_rep: FullLayout<'tcx>,
type_rep: TyLayout<'tcx>,
discriminant_type_metadata: Option<DIType>,
containing_scope: DIScope,
span: Span,
@ -1129,7 +1129,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
let adt = &self.enum_type.ty_adt_def().unwrap();
match *self.type_rep.layout {
match self.type_rep.layout {
layout::Layout::General { ref variants, .. } => {
let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
.expect(""));
@ -1220,7 +1220,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
// of discriminant instead of us having to recover its path.
fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
name: &mut String,
layout: FullLayout<'tcx>,
layout: TyLayout<'tcx>,
offset: Size,
size: Size) {
for i in 0..layout.fields.count() {
@ -1300,7 +1300,7 @@ enum EnumDiscriminantInfo {
// descriptions of the fields of the variant. This is a rudimentary version of a
// full RecursiveTypeDescription.
fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
layout: layout::FullLayout<'tcx>,
layout: layout::TyLayout<'tcx>,
variant: &'tcx ty::VariantDef,
discriminant_info: EnumDiscriminantInfo,
containing_scope: DIScope,
@ -1431,7 +1431,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let type_rep = cx.layout_of(enum_type);
let discriminant_type_metadata = match *type_rep.layout {
let discriminant_type_metadata = match type_rep.layout {
layout::Layout::NullablePointer { .. } |
layout::Layout::Univariant { .. } => None,
layout::Layout::General { discr, .. } => Some(discriminant_type_metadata(discr)),

View File

@ -9,6 +9,7 @@
// except according to those terms.
use llvm::ValueRef;
use abi::FnType;
use callee;
use common::*;
use builder::Builder;
@ -32,10 +33,13 @@ impl<'a, 'tcx> VirtualIndex {
VirtualIndex(index as u64 + 3)
}
pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
pub fn get_fn(self, bcx: &Builder<'a, 'tcx>,
llvtable: ValueRef,
fn_ty: &FnType<'tcx>) -> ValueRef {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to());
let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);

View File

@ -20,6 +20,7 @@ use rustc::mir::traversal;
use rustc::ty;
use rustc::ty::layout::LayoutOf;
use common;
use type_of::LayoutLlvmExt;
use super::MirContext;
pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
@ -31,21 +32,14 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() {
let ty = mircx.monomorphize(&ty);
debug!("local {} has type {:?}", index, ty);
if ty.is_scalar() ||
ty.is_box() ||
ty.is_region_ptr() ||
ty.is_simd() ||
mircx.ccx.layout_of(ty).is_zst()
{
if mircx.ccx.layout_of(ty).is_llvm_immediate() {
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.
assert!(common::type_is_immediate(mircx.ccx, ty) ||
common::type_is_fat_ptr(mircx.ccx, ty));
} else if common::type_is_imm_pair(mircx.ccx, ty) {
// We allow pairs and uses of any of their 2 fields.
} else {
// These sorts of types require an alloca. Note that
// type_is_immediate() may *still* be true, particularly
// is_llvm_immediate() may *still* be true, particularly
// for newtypes, but we currently force some types
// (e.g. structs) into an alloca unconditionally, just so
// that we don't have to deal with having two pathways
@ -179,9 +173,9 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
LvalueContext::StorageLive |
LvalueContext::StorageDead |
LvalueContext::Validate |
LvalueContext::Inspect |
LvalueContext::Consume => {}
LvalueContext::Inspect |
LvalueContext::Store |
LvalueContext::Borrow { .. } |
LvalueContext::Projection(..) => {

View File

@ -274,13 +274,22 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
let lvalue = self.trans_lvalue(&bcx, location);
let fn_ty = FnType::of_instance(bcx.ccx, &drop_fn);
let (drop_fn, need_extra) = match ty.sty {
ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra),
false),
_ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra())
let mut args: &[_] = &[lvalue.llval, lvalue.llextra];
args = &args[..1 + lvalue.has_extra() as usize];
let (drop_fn, fn_ty) = match ty.sty {
ty::TyDynamic(..) => {
let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn);
let sig = common::ty_fn_sig(bcx.ccx, fn_ty);
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]);
args = &args[..1];
(meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty)
}
_ => {
(callee::get_fn(bcx.ccx, drop_fn),
FnType::of_instance(bcx.ccx, &drop_fn))
}
};
let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize];
do_call(self, bcx, fn_ty, drop_fn, args,
Some((ReturnDest::Nothing, target)),
unwind);
@ -561,15 +570,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
(&args[..], None)
};
for (idx, arg) in first_args.iter().enumerate() {
for (i, arg) in first_args.iter().enumerate() {
let mut op = self.trans_operand(&bcx, arg);
if idx == 0 {
if i == 0 {
if let Pair(_, meta) = op.val {
if let Some(ty::InstanceDef::Virtual(_, idx)) = def {
let llmeth = meth::VirtualIndex::from_index(idx)
.get_fn(&bcx, meta);
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
llfn = Some(bcx.pointercast(llmeth, llty));
llfn = Some(meth::VirtualIndex::from_index(idx)
.get_fn(&bcx, meta, &fn_ty));
}
}
}
@ -582,7 +589,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
op.val = Ref(tmp.llval, tmp.alignment);
}
self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[idx]);
self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]);
}
if let Some(tup) = untuple {
self.trans_arguments_untupled(&bcx, tup, &mut llargs,

View File

@ -32,7 +32,7 @@ use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u
use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr};
use common::const_to_opt_u128;
use consts;
use type_of::{self, LayoutLlvmExt};
use type_of::LayoutLlvmExt;
use type_::Type;
use value::Value;
@ -145,7 +145,7 @@ impl<'a, 'tcx> Const<'tcx> {
let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) {
let (a, b) = self.get_pair(ccx);
OperandValue::Pair(a, b)
} else if llty == llvalty && common::type_is_immediate(ccx, self.ty) {
} else if llty == llvalty && ccx.layout_of(self.ty).is_llvm_immediate() {
// If the types match, we can use the value directly.
OperandValue::Immediate(self.llval)
} else {
@ -677,11 +677,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
}
C_fat_ptr(self.ccx, base, info)
}
mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => {
debug_assert!(common::type_is_immediate(self.ccx, cast_ty));
mir::CastKind::Misc if self.ccx.layout_of(operand.ty).is_llvm_immediate() => {
let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
let ll_t_out = self.ccx.layout_of(cast_ty).immediate_llvm_type(self.ccx);
let cast_layout = self.ccx.layout_of(cast_ty);
assert!(cast_layout.is_llvm_immediate());
let ll_t_out = cast_layout.immediate_llvm_type(self.ccx);
let llval = operand.llval;
let signed = match self.ccx.layout_of(operand.ty).abi {
layout::Abi::Scalar(layout::Int(_, signed)) => signed,
@ -728,8 +729,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
if common::type_is_fat_ptr(self.ccx, operand.ty) {
let (data_ptr, meta) = operand.get_fat_ptr(self.ccx);
if common::type_is_fat_ptr(self.ccx, cast_ty) {
let llcast_ty = type_of::fat_ptr_base_ty(self.ccx, cast_ty);
let data_cast = consts::ptrcast(data_ptr, llcast_ty);
let thin_ptr = self.ccx.layout_of(cast_ty)
.field(self.ccx, abi::FAT_PTR_ADDR);
let data_cast = consts::ptrcast(data_ptr,
thin_ptr.llvm_type(self.ccx));
C_fat_ptr(self.ccx, data_cast, meta)
} else { // cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
@ -1091,7 +1094,7 @@ fn trans_const_adt<'a, 'tcx>(
mir::AggregateKind::Adt(_, index, _, _) => index,
_ => 0,
};
match *l.layout {
match l.layout {
layout::Layout::General { .. } => {
let discr = match *kind {
mir::AggregateKind::Adt(adt_def, _, _, _) => {
@ -1147,7 +1150,7 @@ fn trans_const_adt<'a, 'tcx>(
/// a two-element struct will locate it at offset 4, and accesses to it
/// will read the wrong memory.
fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: layout::FullLayout<'tcx>,
layout: layout::TyLayout<'tcx>,
vals: &[Const<'tcx>],
discr: Option<Const<'tcx>>)
-> Const<'tcx> {

View File

@ -10,7 +10,7 @@
use llvm::{self, ValueRef};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, FullLayout, LayoutOf};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
@ -19,7 +19,7 @@ use base;
use builder::Builder;
use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty};
use consts;
use type_of::{self, LayoutLlvmExt};
use type_of::LayoutLlvmExt;
use type_::Type;
use value::Value;
use glue;
@ -54,8 +54,8 @@ impl ops::BitOr for Alignment {
}
}
impl<'a> From<FullLayout<'a>> for Alignment {
fn from(layout: FullLayout) -> Self {
impl<'a> From<TyLayout<'a>> for Alignment {
fn from(layout: TyLayout) -> Self {
if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi {
Alignment::Packed(align)
} else {
@ -86,7 +86,7 @@ pub struct LvalueRef<'tcx> {
pub llextra: ValueRef,
/// Monomorphized type of this lvalue, including variant information
pub layout: FullLayout<'tcx>,
pub layout: TyLayout<'tcx>,
/// Whether this lvalue is known to be aligned according to its layout
pub alignment: Alignment,
@ -94,7 +94,7 @@ pub struct LvalueRef<'tcx> {
impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn new_sized(llval: ValueRef,
layout: FullLayout<'tcx>,
layout: TyLayout<'tcx>,
alignment: Alignment)
-> LvalueRef<'tcx> {
LvalueRef {
@ -105,7 +105,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
}
}
pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str)
pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
-> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, layout);
let tmp = bcx.alloca(
@ -114,7 +114,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
}
pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
if let layout::FieldPlacement::Array { count, .. } = *self.layout.fields {
if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert!(self.has_extra());
assert_eq!(count, 0);
@ -163,7 +163,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
OperandValue::Pair(
self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(),
self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate())
} else if common::type_is_immediate(bcx.ccx, self.layout.ty) {
} else if self.layout.is_llvm_immediate() {
let mut const_llval = ptr::null_mut();
unsafe {
let global = llvm::LLVMIsAGlobalVariable(self.llval);
@ -202,28 +202,15 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let ccx = bcx.ccx;
let field = self.layout.field(ccx, ix);
let offset = self.layout.fields.offset(ix).bytes();
let alignment = self.alignment | Alignment::from(self.layout);
// Unions and newtypes only use an offset of 0.
let has_llvm_fields = match *self.layout.fields {
layout::FieldPlacement::Union(_) => false,
layout::FieldPlacement::Array { .. } => true,
layout::FieldPlacement::Arbitrary { .. } => {
match self.layout.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false,
layout::Abi::Aggregate { .. } => true
}
}
};
let simple = || {
LvalueRef {
llval: if has_llvm_fields {
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
} else {
assert_eq!(offset, 0);
// Unions and newtypes only use an offset of 0.
llval: if offset == 0 {
bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to())
} else {
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
},
llextra: if ccx.shared().type_has_metadata(field.ty) {
self.llextra
@ -309,7 +296,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx);
match *self.layout.layout {
match self.layout.layout {
layout::Layout::Univariant { .. } |
layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0),
_ => {}
@ -320,7 +307,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
layout::Abi::Scalar(discr) => discr,
_ => bug!("discriminant not scalar: {:#?}", discr.layout)
};
let (min, max) = match *self.layout.layout {
let (min, max) = match self.layout.layout {
layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end),
_ => (0, u64::max_value()),
};
@ -346,7 +333,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
bcx.load(discr.llval, discr.alignment.non_abi())
}
};
match *self.layout.layout {
match self.layout.layout {
layout::Layout::General { .. } => {
let signed = match discr_scalar {
layout::Int(_, signed) => signed,
@ -369,7 +356,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let to = self.layout.ty.ty_adt_def().unwrap()
.discriminant_for_variant(bcx.tcx(), variant_index)
.to_u128_unchecked() as u64;
match *self.layout.layout {
match self.layout.layout {
layout::Layout::General { .. } => {
let ptr = self.project_field(bcx, 0);
bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
@ -419,17 +406,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(variant_index);
// If this is an enum, cast to the appropriate variant struct type.
match *self.layout.layout {
layout::Layout::NullablePointer { .. } |
layout::Layout::General { .. } => {
let variant_ty = Type::struct_(bcx.ccx,
&type_of::struct_llfields(bcx.ccx, downcast.layout),
downcast.layout.is_packed());
downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
}
_ => {}
}
// Cast to the appropriate variant struct type.
let variant_ty = downcast.layout.llvm_type(bcx.ccx);
downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
downcast
}

View File

@ -12,18 +12,17 @@ use libc::c_uint;
use llvm::{self, ValueRef, BasicBlockRef};
use llvm::debuginfo::DIScope;
use rustc::ty::{self, TypeFoldable};
use rustc::ty::layout::{LayoutOf, FullLayout};
use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc::mir::{self, Mir};
use rustc::ty::subst::Substs;
use rustc::infer::TransNormalize;
use rustc::session::config::FullDebugInfo;
use base;
use builder::Builder;
use common::{self, CrateContext, Funclet};
use common::{CrateContext, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::Instance;
use abi::{ArgAttribute, FnType};
use type_of::{self, LayoutLlvmExt};
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
use syntax::symbol::keywords;
@ -85,7 +84,7 @@ pub struct MirContext<'a, 'tcx:'a> {
/// directly using an `OperandRef`, which makes for tighter LLVM
/// IR. The conditions for using an `OperandRef` are as follows:
///
/// - the type of the local must be judged "immediate" by `type_is_immediate`
/// - the type of the local must be judged "immediate" by `is_llvm_immediate`
/// - the operand must never be referenced indirectly
/// - we should not take its address using the `&` operator
/// - nor should it appear in an lvalue path like `tmp.a`
@ -177,7 +176,7 @@ enum LocalRef<'tcx> {
}
impl<'a, 'tcx> LocalRef<'tcx> {
fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> LocalRef<'tcx> {
fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> {
if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
@ -448,32 +447,14 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none());
assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none());
let mut a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(a, &(name.clone() + ".0"));
llarg_idx += 1;
let mut b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(b, &(name + ".1"));
llarg_idx += 1;
if common::type_is_fat_ptr(bcx.ccx, arg.layout.ty) {
// FIXME(eddyb) As we can't perfectly represent the data and/or
// vtable pointer in a fat pointers in Rust's typesystem, and
// because we split fat pointers into two ArgType's, they're
// not the right type so we have to cast them for now.
let pointee = match arg.layout.ty.sty {
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty,
ty::TyAdt(def, _) if def.is_box() => arg.layout.ty.boxed_ty(),
_ => bug!()
};
let data_llty = bcx.ccx.layout_of(pointee).llvm_type(bcx.ccx);
let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee);
a = bcx.pointercast(a, data_llty.ptr_to());
bcx.set_value_name(a, &(name.clone() + ".ptr"));
b = bcx.pointercast(b, meta_llty);
bcx.set_value_name(b, &(name + ".meta"));
}
return LocalRef::Operand(Some(OperandRef {
val: OperandValue::Pair(a, b),
layout: arg.layout

View File

@ -10,7 +10,7 @@
use llvm::ValueRef;
use rustc::ty;
use rustc::ty::layout::{LayoutOf, FullLayout};
use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
@ -71,7 +71,7 @@ pub struct OperandRef<'tcx> {
pub val: OperandValue,
// The layout of value, based on its Rust type.
pub layout: FullLayout<'tcx>,
pub layout: TyLayout<'tcx>,
}
impl<'tcx> fmt::Debug for OperandRef<'tcx> {
@ -82,7 +82,7 @@ impl<'tcx> fmt::Debug for OperandRef<'tcx> {
impl<'a, 'tcx> OperandRef<'tcx> {
pub fn new_zst(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> OperandRef<'tcx> {
layout: TyLayout<'tcx>) -> OperandRef<'tcx> {
assert!(layout.is_zst());
let llty = layout.llvm_type(ccx);
// FIXME(eddyb) ZSTs should always be immediate, not pairs.

View File

@ -18,6 +18,7 @@ use rustc_apfloat::{ieee, Float, Status, Round};
use rustc_const_math::MAX_F32_PLUS_HALF_ULP;
use std::{u128, i128};
use abi;
use base;
use builder::Builder;
use callee;
@ -26,7 +27,7 @@ use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_i
use consts;
use monomorphize;
use type_::Type;
use type_of::{self, LayoutLlvmExt};
use type_of::LayoutLlvmExt;
use value::Value;
use super::{MirContext, LocalRef};
@ -234,8 +235,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// &'a fmt::Debug+Send => &'a fmt::Debug,
// So we need to pointercast the base to ensure
// the types match up.
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty);
let lldata = bcx.pointercast(lldata, llcast_ty);
let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR);
let lldata = bcx.pointercast(lldata, thin_ptr.llvm_type(bcx.ccx));
OperandValue::Pair(lldata, llextra)
}
OperandValue::Immediate(lldata) => {
@ -253,8 +254,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => {
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if common::type_is_fat_ptr(bcx.ccx, cast.ty) {
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty);
let data_cast = bcx.pointercast(data_ptr, llcast_ty);
let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR);
let data_cast = bcx.pointercast(data_ptr,
thin_ptr.llvm_type(bcx.ccx));
OperandValue::Pair(data_cast, meta)
} else { // cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
@ -268,7 +270,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
mir::CastKind::Misc => {
debug_assert!(common::type_is_immediate(bcx.ccx, cast.ty));
assert!(cast.is_llvm_immediate());
let r_t_in = CastTy::from_ty(operand.layout.ty)
.expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
@ -276,7 +278,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let ll_t_out = cast.immediate_llvm_type(bcx.ccx);
let llval = operand.immediate();
if let Layout::General { ref discr_range, .. } = *operand.layout.layout {
if let Layout::General { ref discr_range, .. } = operand.layout.layout {
if discr_range.end > discr_range.start {
// We want `table[e as usize]` to not
// have bound checks, and this is the most

View File

@ -207,10 +207,6 @@ impl Type {
ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint))
}
pub fn vtable_ptr(ccx: &CrateContext) -> Type {
Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to()
}
pub fn kind(&self) -> TypeKind {
unsafe {
llvm::LLVMRustGetTypeKind(self.to_ref())

View File

@ -11,131 +11,68 @@
use abi::FnType;
use common::*;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, FullLayout};
use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, TyLayout};
use trans_item::DefPathBasedNames;
use type_::Type;
use syntax::ast;
use std::fmt::Write;
pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
match ty.sty {
ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => {
ccx.layout_of(t).llvm_type(ccx).ptr_to()
}
ty::TyAdt(def, _) if def.is_box() => {
ccx.layout_of(ty.boxed_ty()).llvm_type(ccx).ptr_to()
}
_ => bug!("expected fat ptr ty but got {:?}", ty)
}
}
pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
let unsized_part = ccx.tcx().struct_tail(ty);
match unsized_part.sty {
ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
Type::uint_from_ty(ccx, ast::UintTy::Us)
}
ty::TyDynamic(..) => Type::vtable_ptr(ccx),
_ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}",
unsized_part, ty)
}
}
fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>,
defer: &mut Option<(Type, FullLayout<'tcx>)>)
fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: TyLayout<'tcx>,
defer: &mut Option<(Type, TyLayout<'tcx>)>)
-> Type {
let ptr_ty = |ty: Ty<'tcx>| {
if cx.shared().type_has_metadata(ty) {
if let ty::TyStr = ty.sty {
// This means we get a nicer name in the output (str is always
// unsized).
cx.str_slice_type()
} else {
let ptr_ty = cx.layout_of(ty).llvm_type(cx).ptr_to();
let info_ty = unsized_info_ty(cx, ty);
Type::struct_(cx, &[
Type::array(&Type::i8(cx), 0),
ptr_ty,
Type::array(&Type::i8(cx), 0),
info_ty,
Type::array(&Type::i8(cx), 0)
], false)
}
} else {
cx.layout_of(ty).llvm_type(cx).ptr_to()
match layout.abi {
layout::Abi::Scalar(_) => bug!("handled elsewhere"),
layout::Abi::Vector { .. } => {
return Type::vector(&layout.field(ccx, 0).llvm_type(ccx),
layout.fields.count() as u64);
}
};
match ty.sty {
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
return ptr_ty(ty);
}
ty::TyAdt(def, _) if def.is_box() => {
return ptr_ty(ty.boxed_ty());
}
ty::TyFnPtr(sig) => {
let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig);
return FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to();
}
_ => {}
layout::Abi::Aggregate { .. } => {}
}
let layout = cx.layout_of(ty);
if let layout::Abi::Scalar(value) = layout.abi {
let llty = match value {
layout::Int(layout::I1, _) => Type::i8(cx),
layout::Int(i, _) => Type::from_integer(cx, i),
layout::F32 => Type::f32(cx),
layout::F64 => Type::f64(cx),
layout::Pointer => {
cx.layout_of(layout::Pointer.to_ty(cx.tcx())).llvm_type(cx)
}
};
return llty;
}
if let layout::Abi::Vector { .. } = layout.abi {
return Type::vector(&layout.field(cx, 0).llvm_type(cx),
layout.fields.count() as u64);
}
let name = match ty.sty {
ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAdt(..) => {
let name = match layout.ty.sty {
ty::TyClosure(..) |
ty::TyGenerator(..) |
ty::TyAdt(..) |
ty::TyDynamic(..) |
ty::TyForeign(..) |
ty::TyStr => {
let mut name = String::with_capacity(32);
let printer = DefPathBasedNames::new(cx.tcx(), true, true);
printer.push_type_name(ty, &mut name);
let printer = DefPathBasedNames::new(ccx.tcx(), true, true);
printer.push_type_name(layout.ty, &mut name);
if let (&ty::TyAdt(def, _), Some(v)) = (&layout.ty.sty, layout.variant_index) {
write!(&mut name, "::{}", def.variants[v].name).unwrap();
}
Some(name)
}
_ => None
};
match *layout.fields {
match layout.fields {
layout::FieldPlacement::Union(_) => {
let size = layout.size(cx).bytes();
let fill = Type::array(&Type::i8(cx), size);
let size = layout.size(ccx).bytes();
let fill = Type::array(&Type::i8(ccx), size);
match name {
None => {
Type::struct_(cx, &[fill], layout.is_packed())
Type::struct_(ccx, &[fill], layout.is_packed())
}
Some(ref name) => {
let mut llty = Type::named_struct(cx, name);
let mut llty = Type::named_struct(ccx, name);
llty.set_struct_body(&[fill], layout.is_packed());
llty
}
}
}
layout::FieldPlacement::Array { count, .. } => {
Type::array(&layout.field(cx, 0).llvm_type(cx), count)
Type::array(&layout.field(ccx, 0).llvm_type(ccx), count)
}
layout::FieldPlacement::Arbitrary { .. } => {
match name {
None => {
Type::struct_(cx, &struct_llfields(cx, layout), layout.is_packed())
Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed())
}
Some(ref name) => {
let llty = Type::named_struct(cx, name);
let llty = Type::named_struct(ccx, name);
*defer = Some((llty, layout));
llty
}
@ -144,37 +81,37 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
}
}
pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> Vec<Type> {
fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: TyLayout<'tcx>) -> Vec<Type> {
debug!("struct_llfields: {:#?}", layout);
let align = layout.align(cx);
let size = layout.size(cx);
let align = layout.align(ccx);
let size = layout.size(ccx);
let field_count = layout.fields.count();
let mut offset = Size::from_bytes(0);
let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() {
let field = layout.field(cx, i);
let field = layout.field(ccx, i);
let target_offset = layout.fields.offset(i as usize);
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}",
i, field, offset, target_offset);
assert!(target_offset >= offset);
let padding = target_offset - offset;
result.push(Type::array(&Type::i8(cx), padding.bytes()));
result.push(Type::array(&Type::i8(ccx), padding.bytes()));
debug!(" padding before: {:?}", padding);
result.push(field.llvm_type(cx));
result.push(field.llvm_type(ccx));
if layout.is_packed() {
assert_eq!(padding.bytes(), 0);
} else {
let field_align = field.align(cx);
let field_align = field.align(ccx);
assert!(field_align.abi() <= align.abi(),
"non-packed type has field with larger align ({}): {:#?}",
field_align.abi(), layout);
}
offset = target_offset + field.size(cx);
offset = target_offset + field.size(ccx);
}
if !layout.is_unsized() && field_count > 0 {
if offset > size {
@ -184,7 +121,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let padding = size - offset;
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, size);
result.push(Type::array(&Type::i8(cx), padding.bytes()));
result.push(Type::array(&Type::i8(ccx), padding.bytes()));
assert!(result.len() == 1 + field_count * 2);
} else {
debug!("struct_llfields: offset: {:?} stride: {:?}",
@ -210,13 +147,22 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> {
}
pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_immediate(&self) -> bool;
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn over_align(&self, ccx: &CrateContext) -> Option<Align>;
fn llvm_field_index(&self, index: usize) -> u64;
}
impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> {
impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
match self.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true,
layout::Abi::Aggregate { .. } => self.is_zst()
}
}
/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
/// The pointee type of the pointer in `LvalueRef` is always this type.
/// For sized types, it is also the right LLVM type for an `alloca`
@ -229,8 +175,42 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> {
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
if let layout::Abi::Scalar(value) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) {
return llty;
}
let llty = match value {
layout::Int(layout::I1, _) => Type::i8(ccx),
layout::Int(i, _) => Type::from_integer(ccx, i),
layout::F32 => Type::f32(ccx),
layout::F64 => Type::f64(ccx),
layout::Pointer => {
let pointee = match self.ty.sty {
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
ccx.layout_of(ty).llvm_type(ccx)
}
ty::TyAdt(def, _) if def.is_box() => {
ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx)
}
ty::TyFnPtr(sig) => {
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
FnType::new(ccx, sig, &[]).llvm_type(ccx)
}
_ => Type::i8(ccx)
};
pointee.ptr_to()
}
};
ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty);
return llty;
}
// Check the cache.
if let Some(&llty) = ccx.lltypes().borrow().get(&self.ty) {
if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, self.variant_index)) {
return llty;
}
@ -244,13 +224,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> {
let mut defer = None;
let llty = if self.ty != normal_ty {
ccx.layout_of(normal_ty).llvm_type(ccx)
let mut layout = ccx.layout_of(normal_ty);
if let Some(v) = self.variant_index {
layout = layout.for_variant(v);
}
layout.llvm_type(ccx)
} else {
uncached_llvm_type(ccx, self.ty, &mut defer)
uncached_llvm_type(ccx, *self, &mut defer)
};
debug!("--> mapped {:#?} to llty={:?}", self, llty);
ccx.lltypes().borrow_mut().insert(self.ty, llty);
ccx.lltypes().borrow_mut().insert((self.ty, self.variant_index), llty);
if let Some((mut llty, layout)) = defer {
llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed())
@ -279,11 +263,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> {
fn llvm_field_index(&self, index: usize) -> u64 {
if let layout::Abi::Scalar(_) = self.abi {
bug!("FullLayout::llvm_field_index({:?}): not applicable", self);
bug!("TyLayout::llvm_field_index({:?}): not applicable", self);
}
match *self.fields {
match self.fields {
layout::FieldPlacement::Union(_) => {
bug!("FullLayout::llvm_field_index({:?}): not applicable", self)
bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
}
layout::FieldPlacement::Array { .. } => {

View File

@ -24,10 +24,9 @@ pub fn helper(_: usize) {
pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
// We used to generate an extra alloca and memcpy for the block's trailing expression value, so
// check that we copy directly to the return value slot
// CHECK: %x.ptr = bitcast i8* %0 to [0 x i8]*
// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.ptr, 1
// CHECK: %2 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1, [[USIZE]] %x.meta, 3
// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %2
// CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1
// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3
// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1
{ x }
}

View File

@ -97,43 +97,43 @@ pub fn struct_return() -> S {
pub fn helper(_: usize) {
}
// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
// CHECK: @slice([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
pub fn slice(_: &[u8]) {
}
// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
// CHECK: @mutable_slice([0 x i8]* nonnull %arg0.0, [[USIZE]] %arg0.1)
// FIXME #25759 This should also have `nocapture`
// ... there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle]
pub fn mutable_slice(_: &mut [u8]) {
}
// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
// CHECK: @unsafe_slice([0 x %UnsafeInner]* nonnull %arg0.0, [[USIZE]] %arg0.1)
// unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle]
pub fn unsafe_slice(_: &[UnsafeInner]) {
}
// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
// CHECK: @str([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
pub fn str(_: &[u8]) {
}
// CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly)
// CHECK: @trait_borrow(%"core::ops::drop::Drop"* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
pub fn trait_borrow(_: &Drop) {
}
// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly)
// CHECK: @trait_box(%"core::ops::drop::Drop"* noalias nonnull, {}* noalias nonnull readonly)
#[no_mangle]
pub fn trait_box(_: Box<Drop>) {
}
// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta)
// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1)
#[no_mangle]
pub fn return_slice(x: &[u16]) -> &[u16] {
x

View File

@ -24,10 +24,10 @@ pub fn helper(_: usize) {
pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 1
// CHECK: store [0 x i8]* %s.ptr, [0 x i8]** [[X0]]
// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]**
// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]]
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3
// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]]
// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]]
let x = &*s;
&x; // keep variable in an alloca