miri: track the Align instead of packedness in PtrAndAlign.

This commit is contained in:
Eduard-Mihai Burtescu 2017-12-16 14:07:04 +02:00
parent 5cab0bf0ad
commit ff080d389d
7 changed files with 152 additions and 182 deletions

View File

@ -1,6 +1,6 @@
#![allow(unknown_lints)]
use ty::layout::HasDataLayout;
use ty::layout::{Align, HasDataLayout};
use super::{EvalResult, MemoryPointer, PointerArithmetic};
use syntax::ast::FloatTy;
@ -9,8 +9,7 @@ use rustc_const_math::ConstFloat;
#[derive(Copy, Clone, Debug)]
pub struct PtrAndAlign {
pub ptr: Pointer,
/// Remember whether this place is *supposed* to be aligned.
pub aligned: bool,
pub align: Align,
}
impl PtrAndAlign {
@ -20,7 +19,7 @@ impl PtrAndAlign {
pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
Ok(PtrAndAlign {
ptr: self.ptr.offset(i, cx)?,
aligned: self.aligned,
align: self.align,
})
}
}
@ -182,13 +181,6 @@ pub enum PrimValKind {
Char,
}
impl<'a, 'tcx: 'a> Value {
#[inline]
pub fn by_ref(ptr: Pointer) -> Self {
Value::ByRef(PtrAndAlign { ptr, aligned: true })
}
}
impl<'tcx> PrimVal {
pub fn from_u128(n: u128) -> Self {
PrimVal::Bytes(n)

View File

@ -77,7 +77,7 @@ pub fn eval_body<'a, 'tcx>(
instance,
mir.span,
mir,
Place::from_ptr(ptr),
Place::from_ptr(ptr, layout.align),
cleanup.clone(),
)?;
@ -357,10 +357,11 @@ pub fn const_eval_provider<'a, 'tcx>(
(_, Err(err)) => Err(err),
(Ok((miri_val, miri_ty)), Ok(ctfe)) => {
let mut ecx = mk_eval_cx(tcx, instance, key.param_env).unwrap();
check_ctfe_against_miri(&mut ecx, PtrAndAlign {
let miri_ptr = PtrAndAlign {
ptr: miri_val,
aligned: true
}, miri_ty, ctfe.val);
align: ecx.layout_of(miri_ty).unwrap().align
};
check_ctfe_against_miri(&mut ecx, miri_ptr, miri_ty, ctfe.val);
Ok(ctfe)
}
}
@ -380,7 +381,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
use rustc::ty::TypeVariants::*;
match miri_ty.sty {
TyInt(int_ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
let value = ecx.read_with_align(miri_val.align, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
@ -391,7 +392,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe);
},
TyUint(uint_ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
let value = ecx.read_with_align(miri_val.align, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
@ -402,7 +403,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
assert_eq!(c, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", c, ctfe);
},
TyFloat(ty) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
let value = ecx.read_with_align(miri_val.align, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let prim = get_prim(ecx, value);
@ -410,7 +411,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
assert_eq!(f, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", f, ctfe);
},
TyBool => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
let value = ecx.read_with_align(miri_val.align, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let bits = get_prim(ecx, value);
@ -421,7 +422,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
assert_eq!(b, ctfe, "miri evaluated to {:?}, but ctfe yielded {:?}", b, ctfe);
},
TyChar => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
let value = ecx.read_with_align(miri_val.align, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let bits = get_prim(ecx, value);
@ -435,7 +436,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
}
},
TyStr => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
let value = ecx.read_with_align(miri_val.align, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
if let Ok(Some(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len)))) = value {
@ -522,8 +523,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
Field::new(field),
layout,
).unwrap();
let ptr = place.to_ptr_extra_aligned().0;
check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val);
check_ctfe_against_miri(ecx, place.to_ptr_align(), elem.ty, elem.val);
}
},
TySlice(_) => bug!("miri produced a slice?"),
@ -543,7 +543,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
// should be fine
TyFnDef(..) => {}
TyFnPtr(_) => {
let value = ecx.read_maybe_aligned(miri_val.aligned, |ectx| {
let value = ecx.read_with_align(miri_val.align, |ectx| {
ectx.try_read_value(miri_val.ptr, miri_ty)
});
let ptr = match value {

View File

@ -241,7 +241,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
))
}
pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
use rustc::middle::const_val::ConstVal::*;
let primval = match *const_val {
@ -264,7 +264,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
return Ok(self.read_global_as_value(GlobalId {
instance,
promoted: None,
}));
}, self.layout_of(ty)?));
}
Aggregate(..) |
@ -637,7 +637,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let src = self.eval_place(place)?;
// We ignore the alignment of the place here -- special handling for packed structs ends
// at the `&` operator.
let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned();
let (ptr, extra) = self.force_allocation(src)?.to_ptr_align_extra();
let val = match extra {
PlaceExtra::None => ptr.ptr.to_value(),
@ -677,7 +677,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
match kind {
Unsize => {
let src = self.eval_operand(operand)?;
self.unsize_into(src.value, src.ty, dest, dest_ty)?;
let src_layout = self.layout_of(src.ty)?;
let dst_layout = self.layout_of(dest_ty)?;
self.unsize_into(src.value, src_layout, dest, dst_layout)?;
}
Misc => {
@ -830,13 +832,13 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
use rustc::mir::Literal;
let mir::Constant { ref literal, .. } = **constant;
let value = match *literal {
Literal::Value { ref value } => self.const_to_value(&value.val)?,
Literal::Value { ref value } => self.const_to_value(&value.val, ty)?,
Literal::Promoted { index } => {
self.read_global_as_value(GlobalId {
instance: self.frame().instance,
promoted: Some(index),
})
}, self.layout_of(ty)?)
}
};
@ -948,10 +950,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Ok(())
}
pub fn read_global_as_value(&self, gid: GlobalId) -> Value {
pub fn read_global_as_value(&self, gid: GlobalId, layout: TyLayout) -> Value {
Value::ByRef(PtrAndAlign {
ptr: self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached"),
aligned: true
align: layout.align
})
}
@ -979,11 +981,15 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Some(val) => {
let ty = self.stack[frame].mir.local_decls[local].ty;
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
let layout = self.layout_of(ty)?;
let ptr = self.alloc_ptr(ty)?;
self.stack[frame].locals[local.index() - 1] =
Some(Value::by_ref(ptr.into())); // it stays live
Some(Value::ByRef(PtrAndAlign {
ptr: ptr.into(),
align: layout.align
})); // it stays live
self.write_value_to_ptr(val, ptr.into(), ty)?;
Place::from_ptr(ptr)
Place::from_ptr(ptr, layout.align)
}
}
}
@ -999,8 +1005,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
match value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
Value::ByRef(PtrAndAlign { ptr, align }) => {
self.read_with_align(align, |ectx| ectx.read_value(ptr, ty))
}
other => Ok(other),
}
@ -1056,14 +1062,12 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
match dest {
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned },
ptr: PtrAndAlign { ptr, align },
extra,
} => {
assert_eq!(extra, PlaceExtra::None);
self.write_maybe_aligned_mut(
aligned,
|ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty),
)
self.write_with_align_mut(align,
|ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty))
}
Place::Local { frame, local } => {
@ -1088,7 +1092,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx> {
if let Value::ByRef(PtrAndAlign {
ptr: dest_ptr,
aligned,
align,
}) = old_dest_val
{
// If the value is already `ByRef` (that is, backed by an `Allocation`),
@ -1098,13 +1102,13 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
//
// Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
// knew for certain that there were no outstanding pointers to this allocation.
self.write_maybe_aligned_mut(aligned, |ectx| {
self.write_with_align_mut(align, |ectx| {
ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty)
})?;
} else if let Value::ByRef(PtrAndAlign {
ptr: src_ptr,
aligned,
align,
}) = src_val
{
// If the value is not `ByRef`, then we know there are no pointers to it
@ -1118,13 +1122,17 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// It is a valid optimization to attempt reading a primitive value out of the
// source and write that into the destination without making an allocation, so
// we do so here.
self.read_maybe_aligned_mut(aligned, |ectx| {
self.read_with_align_mut(align, |ectx| {
if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
write_dest(ectx, src_val)?;
} else {
let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
ectx.copy(src_ptr, dest_ptr, dest_ty)?;
write_dest(ectx, Value::by_ref(dest_ptr))?;
let layout = ectx.layout_of(dest_ty)?;
write_dest(ectx, Value::ByRef(PtrAndAlign {
ptr: dest_ptr,
align: layout.align
}))?;
}
Ok(())
})?;
@ -1145,8 +1153,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx> {
trace!("write_value_to_ptr: {:#?}", value);
match value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
Value::ByRef(PtrAndAlign { ptr, align }) => {
self.read_with_align_mut(align, |ectx| ectx.copy(ptr, dest, dest_ty))
}
Value::ByVal(primval) => {
let layout = self.layout_of(dest_ty)?;
@ -1441,92 +1449,62 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
fn unsize_into(
&mut self,
src: Value,
src_ty: Ty<'tcx>,
src_layout: TyLayout<'tcx>,
dst: Place,
dst_ty: Ty<'tcx>,
dst_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx> {
let src_layout = self.layout_of(src_ty)?;
let dst_layout = self.layout_of(dst_ty)?;
match (&src_ty.sty, &dst_ty.sty) {
match (&src_layout.ty.sty, &dst_layout.ty.sty) {
(&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
(&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
(&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => {
self.unsize_into_ptr(src, src_ty, dst, dst_ty, s.ty, d.ty)
self.unsize_into_ptr(src, src_layout.ty, dst, dst_layout.ty, s.ty, d.ty)
}
(&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
assert_eq!(def_a, def_b);
if def_a.is_box() || def_b.is_box() {
if !def_a.is_box() || !def_b.is_box() {
panic!("invalid unsizing between {:?} -> {:?}", src_ty, dst_ty);
bug!("invalid unsizing between {:?} -> {:?}", src_layout, dst_layout);
}
return self.unsize_into_ptr(
src,
src_ty,
src_layout.ty,
dst,
dst_ty,
src_ty.boxed_ty(),
dst_ty.boxed_ty(),
dst_layout.ty,
src_layout.ty.boxed_ty(),
dst_layout.ty.boxed_ty(),
);
}
if self.ty_to_primval_kind(src_ty).is_ok() {
// TODO: We ignore the packed flag here
let sty = src_layout.field(&self, 0)?.ty;
let dty = dst_layout.field(&self, 0)?.ty;
return self.unsize_into(src, sty, dst, dty);
}
// unsizing of generic struct with pointer fields
// Example: `Arc<T>` -> `Arc<Trait>`
// here we need to increase the size of every &T thin ptr field to a fat ptr
assert_eq!(def_a, def_b);
let src_ptr = match src {
Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr,
// the entire struct is just a pointer
Value::ByVal(_) => {
for i in 0..src_layout.fields.count() {
let src_field = src_layout.field(&self, i)?;
let dst_field = dst_layout.field(&self, i)?;
if dst_layout.is_zst() {
continue;
}
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field.size, src_layout.size);
assert_eq!(dst_field.size, dst_layout.size);
return self.unsize_into(
src,
src_field.ty,
dst,
dst_field.ty,
);
}
bug!("by val unsize into where the value doesn't cover the entire type")
}
// TODO: Is it possible for unaligned pointers to occur here?
_ => bug!("expected aligned pointer, got {:?}", src),
};
// FIXME(solson)
let dst = self.force_allocation(dst)?.to_ptr()?;
for i in 0..src_layout.fields.count() {
let src_field = src_layout.field(&self, i)?;
let dst_field = dst_layout.field(&self, i)?;
let (dst_f_place, dst_field) =
self.place_field(dst, mir::Field::new(i), dst_layout)?;
if dst_field.is_zst() {
continue;
}
let src_field_offset = src_layout.fields.offset(i).bytes();
let dst_field_offset = dst_layout.fields.offset(i).bytes();
let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
let dst_f_ptr = dst.offset(dst_field_offset, &self)?;
let (src_f_value, src_field) = match src {
Value::ByRef(PtrAndAlign { ptr, align }) => {
let src_place = Place::from_primval_ptr(ptr, align);
let (src_f_place, src_field) =
self.place_field(src_place, mir::Field::new(i), src_layout)?;
(self.read_place(src_f_place)?, src_field)
}
Value::ByVal(_) | Value::ByValPair(..) => {
let src_field = src_layout.field(&self, i)?;
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field.size, src_layout.size);
(src, src_field)
}
};
if src_field.ty == dst_field.ty {
self.copy(src_f_ptr, dst_f_ptr.into(), src_field.ty)?;
self.write_value(ValTy {
value: src_f_value,
ty: src_field.ty,
}, dst_f_place)?;
} else {
self.unsize_into(
Value::by_ref(src_f_ptr),
src_field.ty,
Place::from_ptr(dst_f_ptr),
dst_field.ty,
)?;
self.unsize_into(src_f_value, src_field, dst_f_place, dst_field)?;
}
}
Ok(())
@ -1534,8 +1512,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
_ => {
bug!(
"unsize_into: invalid conversion: {:?} -> {:?}",
src_ty,
dst_ty
src_layout,
dst_layout
)
}
}
@ -1559,11 +1537,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Err(err) => {
panic!("Failed to access local: {:?}", err);
}
Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => {
Ok(Value::ByRef(PtrAndAlign { ptr, align })) => {
match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " })
.unwrap();
write!(msg, " by align({}) ref:", align.abi()).unwrap();
allocs.push(ptr.alloc_id);
}
ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
@ -1589,10 +1566,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
trace!("{}", msg);
self.memory.dump_allocs(allocs);
}
Place::Ptr { ptr: PtrAndAlign { ptr, aligned }, .. } => {
Place::Ptr { ptr: PtrAndAlign { ptr, align }, .. } => {
match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
trace!("by {}ref:", if aligned { "" } else { "unaligned " });
trace!("by align({}) ref:", align.abi());
self.memory.dump_alloc(ptr.alloc_id);
}
ptr => trace!(" integral by ref: {:?}", ptr),

View File

@ -4,7 +4,7 @@ use std::{ptr, mem, io};
use std::cell::Cell;
use rustc::ty::{Instance, TyCtxt};
use rustc::ty::layout::{self, TargetDataLayout};
use rustc::ty::layout::{self, Align, TargetDataLayout};
use syntax::ast::Mutability;
use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, UndefMask, PtrAndAlign, Value, Pointer,
@ -51,10 +51,10 @@ pub struct Memory<'a, 'tcx: 'a, M: Machine<'tcx>> {
/// Maximum number of virtual bytes that may be allocated.
memory_size: u64,
/// To avoid having to pass flags to every single memory access, we have some global state saying whether
/// To avoid having to pass flags to every single memory access, we have some global state saying how
/// alignment checking is currently enforced for read and/or write accesses.
reads_are_aligned: Cell<bool>,
writes_are_aligned: Cell<bool>,
read_align_override: Cell<Option<Align>>,
write_align_override: Cell<Option<Align>>,
/// The current stack frame. Used to check accesses against locks.
pub cur_frame: usize,
@ -72,8 +72,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
tcx,
memory_size: max_memory,
memory_usage: 0,
reads_are_aligned: Cell::new(true),
writes_are_aligned: Cell::new(true),
read_align_override: Cell::new(None),
write_align_override: Cell::new(None),
cur_frame: usize::max_value(),
}
}
@ -272,14 +272,12 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
PrimVal::Undef => return err!(ReadUndefBytes),
};
// See if alignment checking is disabled
let enforce_alignment = match access {
Some(AccessKind::Read) => self.reads_are_aligned.get(),
Some(AccessKind::Write) => self.writes_are_aligned.get(),
None => true,
let align_override = match access {
Some(AccessKind::Read) => self.read_align_override.get(),
Some(AccessKind::Write) => self.write_align_override.get(),
None => None,
};
if !enforce_alignment {
return Ok(());
}
let align = align_override.map_or(align, |o| o.abi().min(align));
// Check alignment
if alloc_align < align {
return err!(AlignmentCheckFailed {
@ -1005,39 +1003,39 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
fn memory(&self) -> &Memory<'a, 'tcx, M>;
// These are not supposed to be overriden.
fn read_maybe_aligned<F, T>(&self, aligned: bool, f: F) -> EvalResult<'tcx, T>
fn read_with_align<F, T>(&self, align: Align, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().reads_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().reads_are_aligned.set(old && aligned);
let old = self.memory().read_align_override.get();
// Do alignment checking for the minimum align out of *all* nested calls.
self.memory().read_align_override.set(Some(old.map_or(align, |old| old.min(align))));
let t = f(self);
self.memory().reads_are_aligned.set(old);
self.memory().read_align_override.set(old);
t
}
fn read_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
fn read_with_align_mut<F, T>(&mut self, align: Align, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().reads_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().reads_are_aligned.set(old && aligned);
let old = self.memory().read_align_override.get();
// Do alignment checking for the minimum align out of *all* nested calls.
self.memory().read_align_override.set(Some(old.map_or(align, |old| old.min(align))));
let t = f(self);
self.memory().reads_are_aligned.set(old);
self.memory().read_align_override.set(old);
t
}
fn write_maybe_aligned_mut<F, T>(&mut self, aligned: bool, f: F) -> EvalResult<'tcx, T>
fn write_with_align_mut<F, T>(&mut self, align: Align, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().writes_are_aligned.get();
// Do alignment checking if *all* nested calls say it has to be aligned.
self.memory().writes_are_aligned.set(old && aligned);
let old = self.memory().write_align_override.get();
// Do alignment checking for the minimum align out of *all* nested calls.
self.memory().write_align_override.set(Some(old.map_or(align, |old| old.min(align))));
let t = f(self);
self.memory().writes_are_aligned.set(old);
self.memory().write_align_override.set(old);
t
}
@ -1048,8 +1046,8 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
value: Value,
) -> EvalResult<'tcx, Pointer> {
Ok(match value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.memory().read_maybe_aligned(aligned, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))?
Value::ByRef(PtrAndAlign { ptr, align }) => {
self.memory().read_with_align(align, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))?
}
Value::ByVal(ptr) |
Value::ByValPair(ptr, _) => ptr,
@ -1063,9 +1061,9 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
match value {
Value::ByRef(PtrAndAlign {
ptr: ref_ptr,
aligned,
align,
}) => {
self.memory().read_maybe_aligned(aligned, |mem| {
self.memory().read_with_align(align, |mem| {
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
let vtable = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
@ -1088,9 +1086,9 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
match value {
Value::ByRef(PtrAndAlign {
ptr: ref_ptr,
aligned,
align,
}) => {
self.memory().read_maybe_aligned(aligned, |mem| {
self.memory().read_with_align(align, |mem| {
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into();
let len = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,

View File

@ -1,6 +1,6 @@
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, LayoutOf, TyLayout};
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, PtrAndAlign};
@ -35,21 +35,21 @@ pub enum PlaceExtra {
impl<'tcx> Place {
/// Produces an Place that will error if attempted to be read from
pub fn undef() -> Self {
Self::from_primval_ptr(PrimVal::Undef.into())
Self::from_primval_ptr(PrimVal::Undef.into(), Align::from_bytes(1, 1).unwrap())
}
pub fn from_primval_ptr(ptr: Pointer) -> Self {
pub fn from_primval_ptr(ptr: Pointer, align: Align) -> Self {
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
ptr: PtrAndAlign { ptr, align },
extra: PlaceExtra::None,
}
}
pub fn from_ptr(ptr: MemoryPointer) -> Self {
Self::from_primval_ptr(ptr.into())
pub fn from_ptr(ptr: MemoryPointer, align: Align) -> Self {
Self::from_primval_ptr(ptr.into(), align)
}
pub fn to_ptr_extra_aligned(self) -> (PtrAndAlign, PlaceExtra) {
pub fn to_ptr_align_extra(self) -> (PtrAndAlign, PlaceExtra) {
match self {
Place::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self),
@ -57,12 +57,15 @@ impl<'tcx> Place {
}
}
pub fn to_ptr_align(self) -> PtrAndAlign {
let (ptr, _extra) = self.to_ptr_align_extra();
ptr
}
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
let (ptr, extra) = self.to_ptr_extra_aligned();
// At this point, we forget about the alignment information -- the place has been turned into a reference,
// and no matter where it came from, it now must be aligned.
assert_eq!(extra, PlaceExtra::None);
ptr.to_ptr()
self.to_ptr_align().to_ptr()
}
pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
@ -102,11 +105,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// Directly reading a static will always succeed
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
let cid = GlobalId {
Ok(Some(self.read_global_as_value(GlobalId {
instance,
promoted: None,
};
Ok(Some(self.read_global_as_value(cid)))
}, self.layout_of(self.place_ty(place))?)))
}
Projection(ref proj) => self.try_read_place_projection(proj),
}
@ -190,10 +192,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
instance,
promoted: None,
};
let layout = self.layout_of(self.place_ty(mir_place))?;
Place::Ptr {
ptr: PtrAndAlign {
ptr: self.tcx.interpret_interner.borrow().get_cached(gid).expect("uncached global"),
aligned: true
align: layout.align
},
extra: PlaceExtra::None,
}
@ -241,7 +244,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
{
return Ok((base, field));
}
_ => self.force_allocation(base)?.to_ptr_extra_aligned(),
_ => self.force_allocation(base)?.to_ptr_align_extra(),
}
}
};
@ -258,7 +261,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
};
let mut ptr = base_ptr.offset(offset, &self)?;
ptr.aligned &= base_layout.align.abi() >= field.align.abi();
ptr.align = ptr.align.min(base_layout.align).min(field.align);
let extra = if !field.is_unsized() {
PlaceExtra::None
@ -278,22 +281,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> {
let layout = self.layout_of(ty)?;
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = self.into_ptr_vtable_pair(val)?;
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
ptr: PtrAndAlign { ptr, align: layout.align },
extra: PlaceExtra::Vtable(vtable),
}
}
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = self.into_slice(val)?;
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned: true },
ptr: PtrAndAlign { ptr, align: layout.align },
extra: PlaceExtra::Length(len),
}
}
_ => Place::from_primval_ptr(self.into_ptr(val)?),
_ => Place::from_primval_ptr(self.into_ptr(val)?, layout.align),
})
}
@ -305,7 +309,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx, Place> {
// Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let base_ptr = base.to_ptr_align();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
@ -329,7 +333,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx, Place> {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (ptr, _) = base.to_ptr_extra_aligned();
let ptr = base.to_ptr_align();
let extra = PlaceExtra::DowncastVariant(variant);
Ok(Place::Ptr { ptr, extra })
}
@ -380,7 +384,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
} => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let base_ptr = base.to_ptr_align();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
@ -399,7 +403,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Subslice { from, to } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_extra_aligned();
let base_ptr = base.to_ptr_align();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.layout_of(elem_ty)?.size.bytes();

View File

@ -197,7 +197,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
instance,
span,
mir,
Place::from_ptr(ptr),
Place::from_ptr(ptr, layout.align),
cleanup,
)?;
Ok(true)
@ -273,7 +273,7 @@ impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b,
this.instance,
constant.span,
mir,
Place::from_ptr(ptr),
Place::from_ptr(ptr, layout.align),
StackPopCleanup::MarkStatic(Mutability::Immutable),
)?;
Ok(true)

View File

@ -327,15 +327,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
if let ty::TyTuple(..) = args[1].ty.sty {
if self.frame().mir.args_iter().count() == layout.fields.count() + 1 {
match args[1].value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
assert!(
aligned,
"Unaligned ByRef-values cannot occur as function arguments"
);
Value::ByRef(PtrAndAlign { ptr, align }) => {
for (i, arg_local) in arg_locals.enumerate() {
let field = layout.field(&self, i)?;
let offset = layout.fields.offset(i).bytes();
let arg = Value::by_ref(ptr.offset(offset, &self)?);
let arg = Value::ByRef(PtrAndAlign {
ptr: ptr.offset(offset, &self)?,
align: align.min(field.align)
});
let dest =
self.eval_place(&mir::Place::Local(arg_local))?;
trace!(