rustc_trans: always require alignment for load/store/memcpy.

This commit is contained in:
Eduard-Mihai Burtescu 2017-12-02 00:28:43 +02:00
parent 16307465d5
commit 5cab0bf0ad
10 changed files with 56 additions and 61 deletions

View File

@ -568,7 +568,7 @@ impl<'a, 'tcx> ArgType<'tcx> {
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(ccx).ptr_to());
bcx.store(val, cast_dst, Some(self.layout.align));
bcx.store(val, cast_dst, self.layout.align);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
@ -585,19 +585,20 @@ impl<'a, 'tcx> ArgType<'tcx> {
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", cast.align(ccx));
let scratch_size = cast.size(ccx);
let scratch_align = cast.align(ccx);
let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", scratch_align);
bcx.lifetime_start(llscratch, scratch_size);
// ...where we first store the value...
bcx.store(val, llscratch, None);
bcx.store(val, llscratch, scratch_align);
// ...and then memcpy it to the intended destination.
base::call_memcpy(bcx,
bcx.pointercast(dst.llval, Type::i8p(ccx)),
bcx.pointercast(llscratch, Type::i8p(ccx)),
C_usize(ccx, self.layout.size.bytes()),
self.layout.align.min(cast.align(ccx)));
self.layout.align.min(scratch_align));
bcx.lifetime_end(llscratch, scratch_size);
}

View File

@ -316,7 +316,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
if src_f.layout.ty == dst_f.layout.ty {
memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout,
Some(src_f.align.min(dst_f.align)));
src_f.align.min(dst_f.align));
} else {
coerce_unsized_into(bcx, src_f, dst_f);
}
@ -430,14 +430,13 @@ pub fn memcpy_ty<'a, 'tcx>(
dst: ValueRef,
src: ValueRef,
layout: TyLayout<'tcx>,
align: Option<Align>,
align: Align,
) {
let size = layout.size.bytes();
if size == 0 {
return;
}
let align = align.unwrap_or(layout.align);
call_memcpy(bcx, dst, src, C_usize(bcx.ccx, size), align);
}

View File

@ -518,13 +518,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn load(&self, ptr: ValueRef, align: Option<Align>) -> ValueRef {
pub fn load(&self, ptr: ValueRef, align: Align) -> ValueRef {
self.count_insn("load");
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
if let Some(align) = align {
llvm::LLVMSetAlignment(load, align.abi() as c_uint);
}
llvm::LLVMSetAlignment(load, align.abi() as c_uint);
load
}
}
@ -573,16 +571,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<Align>) -> ValueRef {
pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Align) -> ValueRef {
debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
assert!(!self.llbuilder.is_null());
self.count_insn("store");
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
if let Some(align) = align {
llvm::LLVMSetAlignment(store, align.abi() as c_uint);
}
llvm::LLVMSetAlignment(store, align.abi() as c_uint);
store
}
}

View File

@ -254,7 +254,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
bcx.volatile_store(b, dst.project_field(bcx, 1).llval);
} else {
let val = if let OperandValue::Ref(ptr, align) = args[1].val {
bcx.load(ptr, Some(align))
bcx.load(ptr, align)
} else {
if dst.layout.is_zst() {
return;
@ -330,9 +330,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx));
let dest = result.project_field(bcx, 0);
bcx.store(val, dest.llval, dest.non_abi_align());
bcx.store(val, dest.llval, dest.align);
let dest = result.project_field(bcx, 1);
bcx.store(overflow, dest.llval, dest.non_abi_align());
bcx.store(overflow, dest.llval, dest.align);
return;
},
@ -473,9 +473,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx));
let dest = result.project_field(bcx, 0);
bcx.store(val, dest.llval, dest.non_abi_align());
bcx.store(val, dest.llval, dest.align);
let dest = result.project_field(bcx, 1);
bcx.store(success, dest.llval, dest.non_abi_align());
bcx.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
@ -544,7 +544,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let tp_ty = substs.type_at(0);
let dst = args[0].deref(bcx.ccx);
let val = if let OperandValue::Ref(ptr, align) = args[1].val {
bcx.load(ptr, Some(align))
bcx.load(ptr, align)
} else {
from_immediate(bcx, args[1].immediate())
};
@ -677,7 +677,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
for i in 0..elems.len() {
let dest = result.project_field(bcx, i);
let val = bcx.extract_value(val, i as u64);
bcx.store(val, dest.llval, dest.non_abi_align());
bcx.store(val, dest.llval, dest.align);
}
return;
}
@ -688,8 +688,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
if !fn_ty.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_ty.ret.mode {
let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to());
bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
let ptr = bcx.pointercast(result.llval, ty.llvm_type(ccx).ptr_to());
bcx.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout)
.val.store(bcx, result);
@ -758,7 +758,8 @@ fn try_intrinsic<'a, 'tcx>(
) {
if bcx.sess().no_landing_pads() {
bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
let ptr_align = bcx.tcx().data_layout.pointer_align;
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, ptr_align);
} else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, ccx, func, data, local_ptr, dest);
} else {
@ -833,7 +834,8 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
//
// More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to();
let slot = bcx.alloca(i64p, "slot", ccx.data_layout().pointer_align);
let ptr_align = bcx.tcx().data_layout.pointer_align;
let slot = bcx.alloca(i64p, "slot", ptr_align);
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
None);
@ -848,13 +850,15 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
None => bug!("msvc_try_filter not defined"),
};
let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
let addr = catchpad.load(slot, None);
let arg1 = catchpad.load(addr, None);
let addr = catchpad.load(slot, ptr_align);
let i64_align = bcx.tcx().data_layout.i64_align;
let arg1 = catchpad.load(addr, i64_align);
let val1 = C_i32(ccx, 1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), None);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, None);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);
catchpad.store(arg1, local_ptr, i64_align);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
catchpad.catch_ret(tok, caught.llbb());
caught.ret(C_i32(ccx, 1));
@ -863,7 +867,8 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest, None);
let i32_align = bcx.tcx().data_layout.i32_align;
bcx.store(ret, dest, i32_align);
}
// Definition of the standard "try" function for Rust using the GNU-like model
@ -923,14 +928,16 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn());
catch.add_clause(vals, C_null(Type::i8p(ccx)));
let ptr = catch.extract_value(vals, 0);
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
let ptr_align = bcx.tcx().data_layout.pointer_align;
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), ptr_align);
catch.ret(C_i32(ccx, 1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest, None);
let i32_align = bcx.tcx().data_layout.i32_align;
bcx.store(ret, dest, i32_align);
}
// Helper function to give a Block to a closure to translate a shim function.

View File

@ -40,7 +40,8 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to());
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
let ptr_align = bcx.tcx().data_layout.pointer_align;
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), ptr_align);
bcx.nonnull_metadata(ptr);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
@ -52,7 +53,8 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_int({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to());
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
let usize_align = bcx.tcx().data_layout.pointer_align;
let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), usize_align);
// Vtable loads are invariant
bcx.set_invariant_load(ptr);
ptr

View File

@ -216,7 +216,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_PLACE));
if let Ref(llval, align) = op.val {
bcx.load(llval, Some(align))
bcx.load(llval, align)
} else {
op.immediate_or_packed_pair(&bcx)
}
@ -247,7 +247,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
bcx.load(
bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()),
Some(self.fn_ty.ret.layout.align))
self.fn_ty.ret.layout.align)
}
};
bcx.ret(llval);
@ -653,7 +653,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// have scary latent bugs around.
let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
base::memcpy_ty(bcx, scratch.llval, llval, op.layout, Some(align));
base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align);
(scratch.llval, scratch.align, true)
} else {
(llval, align, true)
@ -665,14 +665,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty) = arg.mode {
llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()),
Some(align.min(arg.layout.align)));
align.min(arg.layout.align));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
// used for this call is passing it by-value. In that case,
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = bcx.load(llval, Some(align));
llval = bcx.load(llval, align);
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
if scalar.is_bool() {
bcx.range_metadata(llval, 0..2);

View File

@ -530,11 +530,11 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
let alloc = PlaceRef::alloca(bcx,
let scratch = PlaceRef::alloca(bcx,
bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
"__debuginfo_env_ptr");
bcx.store(place.llval, alloc.llval, None);
alloc.llval
bcx.store(place.llval, scratch.llval, scratch.align);
scratch.llval
} else {
place.llval
};

View File

@ -223,9 +223,9 @@ impl<'a, 'tcx> OperandValue {
match self {
OperandValue::Ref(r, source_align) =>
base::memcpy_ty(bcx, dest.llval, r, dest.layout,
Some(source_align.min(dest.align))),
source_align.min(dest.align)),
OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), dest.llval, dest.non_abi_align());
bcx.store(base::from_immediate(bcx, s), dest.llval, dest.align);
}
OperandValue::Pair(a, b) => {
for (i, &x) in [a, b].iter().enumerate() {
@ -234,7 +234,7 @@ impl<'a, 'tcx> OperandValue {
if common::val_ty(x) == Type::i1(bcx.ccx) {
llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
}
bcx.store(base::from_immediate(bcx, x), llptr, dest.non_abi_align());
bcx.store(base::from_immediate(bcx, x), llptr, dest.align);
}
}
}

View File

@ -81,14 +81,6 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
!self.llextra.is_null()
}
pub fn non_abi_align(self) -> Option<Align> {
if self.align.abi() >= self.layout.align.abi() {
None
} else {
Some(self.align)
}
}
pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
debug!("PlaceRef::load: {:?}", self);
@ -135,7 +127,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
let llval = if !const_llval.is_null() {
const_llval
} else {
let load = bcx.load(self.llval, self.non_abi_align());
let load = bcx.load(self.llval, self.align);
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
scalar_load_metadata(load, scalar);
}
@ -149,7 +141,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
if scalar.is_bool() {
llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
}
let load = bcx.load(llptr, self.non_abi_align());
let load = bcx.load(llptr, self.align);
scalar_load_metadata(load, scalar);
if scalar.is_bool() {
bcx.trunc(load, Type::i1(bcx.ccx))
@ -338,7 +330,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
.discriminant_for_variant(bcx.tcx(), variant_index)
.to_u128_unchecked() as u64;
bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
ptr.llval, ptr.non_abi_align());
ptr.llval, ptr.align);
}
layout::Variants::NicheFilling {
dataful_variant,

View File

@ -104,9 +104,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let start = dest.project_index(&bcx, C_usize(bcx.ccx, 0)).llval;
if let OperandValue::Immediate(v) = tr_elem.val {
let align = dest.non_abi_align()
.unwrap_or(tr_elem.layout.align);
let align = C_i32(bcx.ccx, align.abi() as i32);
let align = C_i32(bcx.ccx, dest.align.abi() as i32);
let size = C_usize(bcx.ccx, dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays