rustc: unpack scalar pair newtype layout ABIs.

This commit is contained in:
Eduard-Mihai Burtescu 2017-10-09 19:56:41 +03:00
parent 37a7521ef9
commit 7a36141465
9 changed files with 185 additions and 156 deletions

View File

@ -1087,10 +1087,11 @@ impl<'a, 'tcx> CachedLayout {
// We have exactly one non-ZST field.
match (non_zst_fields.next(), non_zst_fields.next()) {
(Some(field), None) => {
// Field size match and it has a scalar ABI.
// Field size matches and it has a scalar or scalar pair ABI.
if size == field.size {
match field.abi {
Abi::Scalar(_) => {
Abi::Scalar(_) |
Abi::ScalarPair(..) => {
abi = field.abi.clone();
}
_ => {}
@ -2228,17 +2229,7 @@ impl<'a, 'tcx> TyLayout<'tcx> {
ty::TyAdt(def, substs) => {
match self.variants {
Variants::Single { index } => {
let mut field_ty = def.variants[index].fields[i].ty(tcx, substs);
// Treat NonZero<*T> as containing &T.
// This is especially useful for fat pointers.
if Some(def.did) == tcx.lang_items().non_zero() {
if let ty::TyRawPtr(mt) = field_ty.sty {
field_ty = tcx.mk_ref(tcx.types.re_erased, mt);
}
}
field_ty
def.variants[index].fields[i].ty(tcx, substs)
}
// Discriminant field for enums (where applicable).
@ -2294,21 +2285,22 @@ impl<'a, 'tcx> TyLayout<'tcx> {
where C: LayoutOf<Ty<'tcx>, TyLayout = Result<Self, LayoutError<'tcx>>> +
HasTyCtxt<'tcx>
{
if let Abi::Scalar(Scalar { value, ref valid_range }) = self.abi {
let scalar_component = |scalar: &Scalar, offset| {
// FIXME(eddyb) support negative/wrap-around discriminant ranges.
return if valid_range.start < valid_range.end {
let Scalar { value, ref valid_range } = *scalar;
if valid_range.start < valid_range.end {
let bits = value.size(cx).bits();
assert!(bits <= 128);
let max_value = !0u128 >> (128 - bits);
if valid_range.start > 0 {
let niche = valid_range.start - 1;
Ok(Some((self.fields.offset(0), Scalar {
Ok(Some((offset, Scalar {
value,
valid_range: niche..=valid_range.end
}, niche)))
} else if valid_range.end < max_value {
let niche = valid_range.end + 1;
Ok(Some((self.fields.offset(0), Scalar {
Ok(Some((offset, Scalar {
value,
valid_range: valid_range.start..=niche
}, niche)))
@ -2317,7 +2309,20 @@ impl<'a, 'tcx> TyLayout<'tcx> {
}
} else {
Ok(None)
};
}
};
match self.abi {
Abi::Scalar(ref scalar) => {
return scalar_component(scalar, Size::from_bytes(0));
}
Abi::ScalarPair(ref a, ref b) => {
if let Some(result) = scalar_component(a, Size::from_bytes(0))? {
return Ok(Some(result));
}
return scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx)));
}
_ => {}
}
// Perhaps one of the fields is non-zero, let's recurse and find out.

View File

@ -240,6 +240,31 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to();
(bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None))
}
(&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_layout = bcx.ccx.layout_of(src_ty);
let dst_layout = bcx.ccx.layout_of(dst_ty);
let mut result = None;
for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(bcx.ccx, i);
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
if src_f.is_zst() {
continue;
}
assert_eq!(src_layout.size, src_f.size);
let dst_f = dst_layout.field(bcx.ccx, i);
assert_ne!(src_f.ty, dst_f.ty);
assert_eq!(result, None);
result = Some(unsize_thin_ptr(bcx, src, src_f.ty, dst_f.ty));
}
let (lldata, llextra) = result.unwrap();
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
(bcx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 0)),
bcx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 1)))
}
_ => bug!("unsize_thin_ptr: called on bad types"),
}
}

View File

@ -685,46 +685,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let tuple = self.trans_operand(bcx, operand);
// Handle both by-ref and immediate tuples.
match tuple.val {
Ref(llval, align) => {
let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align);
for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bcx, i);
self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]);
}
if let Ref(llval, align) = tuple.val {
let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align);
for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bcx, i);
self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]);
}
Immediate(llval) => {
for i in 0..tuple.layout.fields.count() {
let field = tuple.layout.field(bcx.ccx, i);
let elem = if field.is_zst() {
C_undef(field.llvm_type(bcx.ccx))
} else {
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
bcx.bitcast(llval, field.immediate_llvm_type(bcx.ccx))
};
// If the tuple is immediate, the elements are as well
let op = OperandRef {
val: Immediate(elem),
layout: field,
};
self.trans_argument(bcx, op, llargs, &args[i]);
}
}
Pair(a, b) => {
let elems = [a, b];
assert_eq!(tuple.layout.fields.count(), 2);
for i in 0..2 {
// Pair is always made up of immediates
let op = OperandRef {
val: Immediate(elems[i]),
layout: tuple.layout.field(bcx.ccx, i),
};
self.trans_argument(bcx, op, llargs, &args[i]);
}
} else {
// If the tuple is immediate, the elements are as well.
for i in 0..tuple.layout.fields.count() {
let op = tuple.extract_field(bcx, i);
self.trans_argument(bcx, op, llargs, &args[i]);
}
}
}
fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> LvalueRef<'tcx> {

View File

@ -127,8 +127,12 @@ impl<'a, 'tcx> Const<'tcx> {
layout::Abi::ScalarPair(ref a, ref b) => {
let offset = layout.fields.offset(i);
if offset.bytes() == 0 {
assert_eq!(field.size, a.value.size(ccx));
const_get_elt(self.llval, 0)
if field.size == layout.size {
self.llval
} else {
assert_eq!(field.size, a.value.size(ccx));
const_get_elt(self.llval, 0)
}
} else {
assert_eq!(offset, a.value.size(ccx)
.abi_align(b.value.align(ccx)));
@ -166,8 +170,9 @@ impl<'a, 'tcx> Const<'tcx> {
let llvalty = val_ty(self.llval);
let val = if llty == llvalty && layout.is_llvm_scalar_pair() {
let (a, b) = self.get_pair(ccx);
OperandValue::Pair(a, b)
OperandValue::Pair(
const_get_elt(self.llval, 0),
const_get_elt(self.llval, 1))
} else if llty == llvalty && layout.is_llvm_immediate() {
// If the types match, we can use the value directly.
OperandValue::Immediate(self.llval)

View File

@ -135,6 +135,31 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
return OperandRef::new_zst(bcx.ccx, self.layout);
}
let scalar_load_metadata = |load, scalar: &layout::Scalar| {
let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
let max_next = max.wrapping_add(1);
let bits = scalar.value.size(bcx.ccx).bits();
assert!(bits <= 128);
let mask = !0u128 >> (128 - bits);
// For a (max) value of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
match scalar.value {
layout::Int(..) if max_next & mask != min & mask => {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.range_metadata(load, min..max_next);
}
layout::Pointer if 0 < min && min < max => {
bcx.nonnull_metadata(load);
}
_ => {}
}
};
let val = if self.layout.is_llvm_immediate() {
let mut const_llval = ptr::null_mut();
unsafe {
@ -149,39 +174,27 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
} else {
let load = bcx.load(self.llval, self.alignment.non_abi());
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
let max_next = max.wrapping_add(1);
let bits = scalar.value.size(bcx.ccx).bits();
assert!(bits <= 128);
let mask = !0u128 >> (128 - bits);
// For a (max) value of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
match scalar.value {
layout::Int(..) if max_next & mask != min & mask => {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.range_metadata(load, min..max_next);
}
layout::Pointer if 0 < min && min < max => {
bcx.nonnull_metadata(load);
}
_ => {}
}
scalar_load_metadata(load, scalar);
}
load
};
OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
} else if self.layout.is_llvm_scalar_pair() {
let load = |i| {
let x = self.project_field(bcx, i).load(bcx).immediate();
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
bcx.bitcast(x, self.layout.scalar_pair_element_llvm_type(bcx.ccx, i))
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
let load = |i, scalar: &layout::Scalar| {
let mut llptr = bcx.struct_gep(self.llval, i as u64);
// Make sure to always load i1 as i8.
if scalar.is_bool() {
llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
}
let load = bcx.load(llptr, self.alignment.non_abi());
scalar_load_metadata(load, scalar);
if scalar.is_bool() {
bcx.trunc(load, Type::i1(bcx.ccx))
} else {
load
}
};
OperandValue::Pair(load(0), load(1))
OperandValue::Pair(load(0, a), load(1, b))
} else {
OperandValue::Ref(self.llval, self.alignment)
};

View File

@ -15,10 +15,11 @@ use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
use base;
use common::{CrateContext, C_undef, C_usize};
use common::{self, CrateContext, C_undef, C_usize};
use builder::Builder;
use value::Value;
use type_of::LayoutLlvmExt;
use type_::Type;
use std::fmt;
use std::ptr;
@ -84,7 +85,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
layout: TyLayout<'tcx>) -> OperandRef<'tcx> {
assert!(layout.is_zst());
OperandRef {
val: OperandValue::Immediate(C_undef(layout.llvm_type(ccx))),
val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(ccx))),
layout
}
}
@ -148,6 +149,66 @@ impl<'a, 'tcx> OperandRef<'tcx> {
};
OperandRef { val, layout }
}
pub fn extract_field(&self, bcx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> {
let field = self.layout.field(bcx.ccx, i);
let offset = self.layout.fields.offset(i);
let mut val = match (self.val, &self.layout.abi) {
// If we're uninhabited, or the field is ZST, it has no data.
_ if self.layout.abi == layout::Abi::Uninhabited || field.is_zst() => {
return OperandRef {
val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bcx.ccx))),
layout: field
};
}
// Newtype of a scalar or scalar pair.
(OperandValue::Immediate(_), _) |
(OperandValue::Pair(..), _) if field.size == self.layout.size => {
assert_eq!(offset.bytes(), 0);
self.val
}
// Extract a scalar component from a pair.
(OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
if offset.bytes() == 0 {
assert_eq!(field.size, a.value.size(bcx.ccx));
OperandValue::Immediate(a_llval)
} else {
assert_eq!(offset, a.value.size(bcx.ccx)
.abi_align(b.value.align(bcx.ccx)));
assert_eq!(field.size, b.value.size(bcx.ccx));
OperandValue::Immediate(b_llval)
}
}
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), &layout::Abi::Vector) => {
OperandValue::Immediate(
bcx.extract_element(llval, C_usize(bcx.ccx, i as u64)))
}
_ => bug!("OperandRef::extract_field({:?}): not applicable", self)
};
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
match val {
OperandValue::Immediate(ref mut llval) => {
*llval = bcx.bitcast(*llval, field.immediate_llvm_type(bcx.ccx));
}
OperandValue::Pair(ref mut a, ref mut b) => {
*a = bcx.bitcast(*a, field.scalar_pair_element_llvm_type(bcx.ccx, 0));
*b = bcx.bitcast(*b, field.scalar_pair_element_llvm_type(bcx.ccx, 1));
}
OperandValue::Ref(..) => bug!()
}
OperandRef {
val,
layout: field
}
}
}
impl<'a, 'tcx> OperandValue {
@ -167,11 +228,12 @@ impl<'a, 'tcx> OperandValue {
}
OperandValue::Pair(a, b) => {
for (i, &x) in [a, b].iter().enumerate() {
let field = dest.project_field(bcx, i);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let x = bcx.bitcast(x, field.layout.immediate_llvm_type(bcx.ccx));
bcx.store(base::from_immediate(bcx, x),
field.llval, field.alignment.non_abi());
let mut llptr = bcx.struct_gep(dest.llval, i as u64);
// Make sure to always store i1 as i8.
if common::val_ty(x) == Type::i1(bcx.ccx) {
llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
}
bcx.store(base::from_immediate(bcx, x), llptr, dest.alignment.non_abi());
}
}
}
@ -202,52 +264,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
}
// Moves out of pair fields are trivial.
// Moves out of scalar and scalar pair fields are trivial.
if let &mir::Lvalue::Projection(ref proj) = lvalue {
if let mir::ProjectionElem::Field(ref f, _) = proj.elem {
if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) {
let layout = o.layout.field(bcx.ccx, f.index());
let offset = o.layout.fields.offset(f.index());
// Handled in `trans_consume`.
assert!(!layout.is_zst());
// Offset has to match a scalar component.
let llval = match (o.val, &o.layout.abi) {
(OperandValue::Immediate(llval),
&layout::Abi::Scalar(ref scalar)) => {
assert_eq!(offset.bytes(), 0);
assert_eq!(layout.size, scalar.value.size(bcx.ccx));
llval
}
(OperandValue::Pair(a_llval, b_llval),
&layout::Abi::ScalarPair(ref a, ref b)) => {
if offset.bytes() == 0 {
assert_eq!(layout.size, a.value.size(bcx.ccx));
a_llval
} else {
assert_eq!(offset, a.value.size(bcx.ccx)
.abi_align(b.value.align(bcx.ccx)));
assert_eq!(layout.size, b.value.size(bcx.ccx));
b_llval
}
}
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval),
&layout::Abi::Vector) => {
bcx.extract_element(llval, C_usize(bcx.ccx, f.index() as u64))
}
_ => return None
};
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let llval = bcx.bitcast(llval, layout.immediate_llvm_type(bcx.ccx));
return Some(OperandRef {
val: OperandValue::Immediate(llval),
layout
});
return Some(o.extract_field(bcx, f.index()));
}
}
}

View File

@ -223,20 +223,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
operand.val
}
mir::CastKind::Unsize => {
// unsize targets other than to a fat pointer currently
// can't be operands.
assert!(common::type_is_fat_ptr(bcx.ccx, cast.ty));
match operand.val {
OperandValue::Pair(lldata, llextra) => {
// unsize from a fat pointer - this is a
// "trait-object-to-supertrait" coercion, for
// example,
// &'a fmt::Debug+Send => &'a fmt::Debug,
// So we need to pointercast the base to ensure
// the types match up.
let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR);
let lldata = bcx.pointercast(lldata, thin_ptr.llvm_type(bcx.ccx));
// HACK(eddyb) have to bitcast pointers
// until LLVM removes pointee types.
let lldata = bcx.pointercast(lldata,
cast.scalar_pair_element_llvm_type(bcx.ccx, 0));
OperandValue::Pair(lldata, llextra)
}
OperandValue::Immediate(lldata) => {

View File

@ -491,16 +491,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
}
}
}
if let ty::TyAdt(def, _) = self.ty.sty {
if Some(def.did) == ccx.tcx().lang_items().non_zero() {
// FIXME(eddyb) Don't treat NonZero<*T> as
// as containing &T in ty::layout.
if let Some(ref mut pointee) = result {
pointee.safe = None;
}
}
}
}
}

View File

@ -24,7 +24,7 @@ pub fn helper(_: usize) {
pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8]*, [[USIZE]] }* %x to [0 x i8]**
// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0
// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]]
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1
// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]]