miri: pass pointer alignments directly instead of contextually.

This commit is contained in:
Eduard-Mihai Burtescu 2017-12-17 08:47:22 +02:00
parent 08646c6c2c
commit 7dc79cc49b
7 changed files with 137 additions and 195 deletions

View File

@ -13,11 +13,10 @@ pub use self::error::{EvalError, EvalResult, EvalErrorKind};
pub use self::value::{PrimVal, PrimValKind, Value, Pointer, bytes_to_f32, bytes_to_f64}; pub use self::value::{PrimVal, PrimValKind, Value, Pointer, bytes_to_f32, bytes_to_f64};
use std::collections::BTreeMap; use std::collections::BTreeMap;
use ty::layout::HasDataLayout;
use std::fmt; use std::fmt;
use ty::layout;
use mir; use mir;
use ty; use ty;
use ty::layout::{self, Align, HasDataLayout};
use middle::region; use middle::region;
use std::iter; use std::iter;
@ -166,7 +165,7 @@ pub struct Allocation {
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri /// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask, pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads. /// The alignment of the allocation to detect unaligned reads.
pub align: u64, pub align: Align,
} }
impl Allocation { impl Allocation {
@ -177,7 +176,7 @@ impl Allocation {
bytes: slice.to_owned(), bytes: slice.to_owned(),
relocations: BTreeMap::new(), relocations: BTreeMap::new(),
undef_mask, undef_mask,
align: 1, align: Align::from_bytes(1, 1).unwrap(),
} }
} }
} }

View File

@ -66,7 +66,7 @@ pub fn eval_body<'a, 'tcx>(
assert!(!layout.is_unsized()); assert!(!layout.is_unsized());
let ptr = ecx.memory.allocate( let ptr = ecx.memory.allocate(
layout.size.bytes(), layout.size.bytes(),
layout.align.abi(), layout.align,
None, None,
)?; )?;
tcx.interpret_interner.borrow_mut().cache(cid, ptr.into()); tcx.interpret_interner.borrow_mut().cache(cid, ptr.into());
@ -95,7 +95,7 @@ pub fn eval_body_as_integer<'a, 'tcx>(
let ptr_ty = eval_body(tcx, instance, param_env); let ptr_ty = eval_body(tcx, instance, param_env);
let (ptr, ty) = ptr_ty?; let (ptr, ty) = ptr_ty?;
let ecx = mk_eval_cx(tcx, instance, param_env)?; let ecx = mk_eval_cx(tcx, instance, param_env)?;
let prim = match ecx.try_read_value(ptr, ty)? { let prim = match ecx.try_read_value(ptr, ecx.layout_of(ty)?.align, ty)? {
Some(Value::ByVal(prim)) => prim.to_bytes()?, Some(Value::ByVal(prim)) => prim.to_bytes()?,
_ => return err!(TypeNotPrimitive(ty)), _ => return err!(TypeNotPrimitive(ty)),
}; };

View File

@ -211,8 +211,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
let size = layout.size.bytes(); let size = layout.size.bytes();
let align = layout.align.abi(); self.memory.allocate(size, layout.align, Some(MemoryKind::Stack))
self.memory.allocate(size, align, Some(MemoryKind::Stack))
} }
pub fn memory(&self) -> &Memory<'a, 'tcx, M> { pub fn memory(&self) -> &Memory<'a, 'tcx, M> {
@ -612,12 +611,12 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let elem_size = self.layout_of(elem_ty)?.size.bytes(); let elem_size = self.layout_of(elem_ty)?.size.bytes();
let value = self.eval_operand(operand)?.value; let value = self.eval_operand(operand)?.value;
let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?); let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align();
// FIXME: speed up repeat filling // FIXME: speed up repeat filling
for i in 0..length { for i in 0..length {
let elem_dest = dest.offset(i * elem_size, &self)?; let elem_dest = dest.offset(i * elem_size, &self)?;
self.write_value_to_ptr(value, elem_dest, elem_ty)?; self.write_value_to_ptr(value, elem_dest, dest_align, elem_ty)?;
} }
} }
@ -955,15 +954,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
layout.align) layout.align)
} }
fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "cannot copy from an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi();
self.memory.copy(src, dest, size, align, false)?;
Ok(())
}
pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
let new_place = match place { let new_place = match place {
Place::Local { frame, local } => { Place::Local { frame, local } => {
@ -984,8 +974,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let ptr = self.alloc_ptr(ty)?; let ptr = self.alloc_ptr(ty)?;
self.stack[frame].locals[local.index() - 1] = self.stack[frame].locals[local.index() - 1] =
Some(Value::ByRef(ptr.into(), layout.align)); // it stays live Some(Value::ByRef(ptr.into(), layout.align)); // it stays live
self.write_value_to_ptr(val, ptr.into(), ty)?; let place = Place::from_ptr(ptr, layout.align);
Place::from_ptr(ptr, layout.align) self.write_value(ValTy { value: val, ty }, place)?;
place
} }
} }
} }
@ -1002,7 +993,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx, Value> { ) -> EvalResult<'tcx, Value> {
match value { match value {
Value::ByRef(ptr, align) => { Value::ByRef(ptr, align) => {
self.read_with_align(align, |ectx| ectx.read_value(ptr, ty)) self.read_value(ptr, align, ty)
} }
other => Ok(other), other => Ok(other),
} }
@ -1059,8 +1050,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
match dest { match dest {
Place::Ptr { ptr, align, extra } => { Place::Ptr { ptr, align, extra } => {
assert_eq!(extra, PlaceExtra::None); assert_eq!(extra, PlaceExtra::None);
self.write_with_align_mut(align, self.write_value_to_ptr(src_val, ptr, align, dest_ty)
|ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty))
} }
Place::Local { frame, local } => { Place::Local { frame, local } => {
@ -1091,10 +1081,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// //
// Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
// knew for certain that there were no outstanding pointers to this allocation. // knew for certain that there were no outstanding pointers to this allocation.
self.write_with_align_mut(align, |ectx| { self.write_value_to_ptr(src_val, dest_ptr, align, dest_ty)?;
ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty)
})?;
} else if let Value::ByRef(src_ptr, align) = src_val { } else if let Value::ByRef(src_ptr, align) = src_val {
// If the value is not `ByRef`, then we know there are no pointers to it // If the value is not `ByRef`, then we know there are no pointers to it
// and we can simply overwrite the `Value` in the locals array directly. // and we can simply overwrite the `Value` in the locals array directly.
@ -1107,18 +1094,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// It is a valid optimization to attempt reading a primitive value out of the // It is a valid optimization to attempt reading a primitive value out of the
// source and write that into the destination without making an allocation, so // source and write that into the destination without making an allocation, so
// we do so here. // we do so here.
self.read_with_align_mut(align, |ectx| { if let Ok(Some(src_val)) = self.try_read_value(src_ptr, align, dest_ty) {
if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) { write_dest(self, src_val)?;
write_dest(ectx, src_val)?; } else {
} else { let dest_ptr = self.alloc_ptr(dest_ty)?.into();
let dest_ptr = ectx.alloc_ptr(dest_ty)?.into(); let layout = self.layout_of(dest_ty)?;
ectx.copy(src_ptr, dest_ptr, dest_ty)?; self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size.bytes(), false)?;
let layout = ectx.layout_of(dest_ty)?; write_dest(self, Value::ByRef(dest_ptr, layout.align))?;
write_dest(ectx, Value::ByRef(dest_ptr, layout.align))?; }
}
Ok(())
})?;
} else { } else {
// Finally, we have the simple case where neither source nor destination are // Finally, we have the simple case where neither source nor destination are
// `ByRef`. We may simply copy the source value over the the destintion. // `ByRef`. We may simply copy the source value over the the destintion.
@ -1131,26 +1114,26 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
&mut self, &mut self,
value: Value, value: Value,
dest: Pointer, dest: Pointer,
dest_align: Align,
dest_ty: Ty<'tcx>, dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
trace!("write_value_to_ptr: {:#?}", value); trace!("write_value_to_ptr: {:#?}", value);
let layout = self.layout_of(dest_ty)?;
match value { match value {
Value::ByRef(ptr, align) => { Value::ByRef(ptr, align) => {
self.read_with_align_mut(align, |ectx| ectx.copy(ptr, dest, dest_ty)) self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size.bytes(), false)
} }
Value::ByVal(primval) => { Value::ByVal(primval) => {
let layout = self.layout_of(dest_ty)?;
match layout.abi { match layout.abi {
layout::Abi::Scalar(_) => {} layout::Abi::Scalar(_) => {}
_ if primval.is_undef() => {} _ if primval.is_undef() => {}
_ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout) _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout)
} }
// TODO: Do we need signedness? // TODO: Do we need signedness?
self.memory.write_primval(dest.to_ptr()?, primval, layout.size.bytes(), false) self.memory.write_primval(dest.to_ptr()?, dest_align, primval, layout.size.bytes(), false)
} }
Value::ByValPair(a_val, b_val) => { Value::ByValPair(a_val, b_val) => {
let ptr = dest.to_ptr()?; let ptr = dest.to_ptr()?;
let mut layout = self.layout_of(dest_ty)?;
trace!("write_value_to_ptr valpair: {:#?}", layout); trace!("write_value_to_ptr valpair: {:#?}", layout);
let (a, b) = match layout.abi { let (a, b) = match layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
@ -1161,9 +1144,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let b_offset = a_size.abi_align(b.align(&self)); let b_offset = a_size.abi_align(b.align(&self));
let b_ptr = ptr.offset(b_offset.bytes(), &self)?.into(); let b_ptr = ptr.offset(b_offset.bytes(), &self)?.into();
// TODO: What about signedess? // TODO: What about signedess?
self.memory.write_primval(a_ptr, a_val, a_size.bytes(), false)?; self.memory.write_primval(a_ptr, dest_align, a_val, a_size.bytes(), false)?;
self.memory.write_primval(b_ptr, b_val, b_size.bytes(), false)?; self.memory.write_primval(b_ptr, dest_align, b_val, b_size.bytes(), false)
Ok(())
} }
} }
} }
@ -1246,8 +1228,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
} }
} }
pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { pub fn read_value(&self, ptr: Pointer, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
if let Some(val) = self.try_read_value(ptr, ty)? { if let Some(val) = self.try_read_value(ptr, align, ty)? {
Ok(val) Ok(val)
} else { } else {
bug!("primitive read failed for type: {:?}", ty); bug!("primitive read failed for type: {:?}", ty);
@ -1257,10 +1239,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub(crate) fn read_ptr( pub(crate) fn read_ptr(
&self, &self,
ptr: MemoryPointer, ptr: MemoryPointer,
ptr_align: Align,
pointee_ty: Ty<'tcx>, pointee_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> { ) -> EvalResult<'tcx, Value> {
let ptr_size = self.memory.pointer_size(); let ptr_size = self.memory.pointer_size();
let p : Pointer = self.memory.read_ptr_sized_unsigned(ptr)?.into(); let p: Pointer = self.memory.read_ptr_sized_unsigned(ptr, ptr_align)?.into();
if self.type_is_sized(pointee_ty) { if self.type_is_sized(pointee_ty) {
Ok(p.to_value()) Ok(p.to_value())
} else { } else {
@ -1268,23 +1251,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let extra = ptr.offset(ptr_size, self)?; let extra = ptr.offset(ptr_size, self)?;
match self.tcx.struct_tail(pointee_ty).sty { match self.tcx.struct_tail(pointee_ty).sty {
ty::TyDynamic(..) => Ok(p.to_value_with_vtable( ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
self.memory.read_ptr_sized_unsigned(extra)?.to_ptr()?, self.memory.read_ptr_sized_unsigned(extra, ptr_align)?.to_ptr()?,
)), )),
ty::TySlice(..) | ty::TyStr => Ok( ty::TySlice(..) | ty::TyStr => Ok(
p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra)?.to_bytes()? as u64), p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra, ptr_align)?.to_bytes()? as u64),
), ),
_ => bug!("unsized primval ptr read from {:?}", pointee_ty), _ => bug!("unsized primval ptr read from {:?}", pointee_ty),
} }
} }
} }
pub fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> { pub fn try_read_value(&self, ptr: Pointer, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
use syntax::ast::FloatTy; use syntax::ast::FloatTy;
let ptr = ptr.to_ptr()?; let ptr = ptr.to_ptr()?;
let val = match ty.sty { let val = match ty.sty {
ty::TyBool => { ty::TyBool => {
let val = self.memory.read_primval(ptr, 1, false)?; let val = self.memory.read_primval(ptr, ptr_align, 1, false)?;
let val = match val { let val = match val {
PrimVal::Bytes(0) => false, PrimVal::Bytes(0) => false,
PrimVal::Bytes(1) => true, PrimVal::Bytes(1) => true,
@ -1294,7 +1277,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
PrimVal::from_bool(val) PrimVal::from_bool(val)
} }
ty::TyChar => { ty::TyChar => {
let c = self.memory.read_primval(ptr, 4, false)?.to_bytes()? as u32; let c = self.memory.read_primval(ptr, ptr_align, 4, false)?.to_bytes()? as u32;
match ::std::char::from_u32(c) { match ::std::char::from_u32(c) {
Some(ch) => PrimVal::from_char(ch), Some(ch) => PrimVal::from_char(ch),
None => return err!(InvalidChar(c as u128)), None => return err!(InvalidChar(c as u128)),
@ -1311,7 +1294,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
I128 => 16, I128 => 16,
Is => self.memory.pointer_size(), Is => self.memory.pointer_size(),
}; };
self.memory.read_primval(ptr, size, true)? self.memory.read_primval(ptr, ptr_align, size, true)?
} }
ty::TyUint(uint_ty) => { ty::TyUint(uint_ty) => {
@ -1324,19 +1307,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
U128 => 16, U128 => 16,
Us => self.memory.pointer_size(), Us => self.memory.pointer_size(),
}; };
self.memory.read_primval(ptr, size, false)? self.memory.read_primval(ptr, ptr_align, size, false)?
} }
ty::TyFloat(FloatTy::F32) => PrimVal::Bytes(self.memory.read_primval(ptr, 4, false)?.to_bytes()?), ty::TyFloat(FloatTy::F32) => {
ty::TyFloat(FloatTy::F64) => PrimVal::Bytes(self.memory.read_primval(ptr, 8, false)?.to_bytes()?), PrimVal::Bytes(self.memory.read_primval(ptr, ptr_align, 4, false)?.to_bytes()?)
}
ty::TyFloat(FloatTy::F64) => {
PrimVal::Bytes(self.memory.read_primval(ptr, ptr_align, 8, false)?.to_bytes()?)
}
ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr)?, ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr, ptr_align)?,
ty::TyRef(_, ref tam) | ty::TyRef(_, ref tam) |
ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some), ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, ptr_align, tam.ty).map(Some),
ty::TyAdt(def, _) => { ty::TyAdt(def, _) => {
if def.is_box() { if def.is_box() {
return self.read_ptr(ptr, ty.boxed_ty()).map(Some); return self.read_ptr(ptr, ptr_align, ty.boxed_ty()).map(Some);
} }
if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi { if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi {
@ -1345,7 +1332,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
signed = s; signed = s;
} }
let size = scalar.value.size(self).bytes(); let size = scalar.value.size(self).bytes();
self.memory.read_primval(ptr, size, signed)? self.memory.read_primval(ptr, ptr_align, size, signed)?
} else { } else {
return Ok(None); return Ok(None);
} }

View File

@ -1,7 +1,6 @@
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian}; use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian, BigEndian};
use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque}; use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque};
use std::{ptr, mem, io}; use std::{ptr, mem, io};
use std::cell::Cell;
use rustc::ty::{Instance, TyCtxt}; use rustc::ty::{Instance, TyCtxt};
use rustc::ty::layout::{self, Align, TargetDataLayout}; use rustc::ty::layout::{self, Align, TargetDataLayout};
@ -51,11 +50,6 @@ pub struct Memory<'a, 'tcx: 'a, M: Machine<'tcx>> {
/// Maximum number of virtual bytes that may be allocated. /// Maximum number of virtual bytes that may be allocated.
memory_size: u64, memory_size: u64,
/// To avoid having to pass flags to every single memory access, we have some global state saying how
/// alignment checking is currently enforced for read and/or write accesses.
read_align_override: Cell<Option<Align>>,
write_align_override: Cell<Option<Align>>,
/// The current stack frame. Used to check accesses against locks. /// The current stack frame. Used to check accesses against locks.
pub cur_frame: usize, pub cur_frame: usize,
@ -72,8 +66,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
tcx, tcx,
memory_size: max_memory, memory_size: max_memory,
memory_usage: 0, memory_usage: 0,
read_align_override: Cell::new(None),
write_align_override: Cell::new(None),
cur_frame: usize::max_value(), cur_frame: usize::max_value(),
} }
} }
@ -98,12 +90,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn allocate( pub fn allocate(
&mut self, &mut self,
size: u64, size: u64,
align: u64, align: Align,
kind: Option<MemoryKind<M::MemoryKinds>>, kind: Option<MemoryKind<M::MemoryKinds>>,
) -> EvalResult<'tcx, MemoryPointer> { ) -> EvalResult<'tcx, MemoryPointer> {
assert_ne!(align, 0);
assert!(align.is_power_of_two());
if self.memory_size - self.memory_usage < size { if self.memory_size - self.memory_usage < size {
return err!(OutOfMemory { return err!(OutOfMemory {
allocation_size: size, allocation_size: size,
@ -139,13 +128,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&mut self, &mut self,
ptr: MemoryPointer, ptr: MemoryPointer,
old_size: u64, old_size: u64,
old_align: u64, old_align: Align,
new_size: u64, new_size: u64,
new_align: u64, new_align: Align,
kind: MemoryKind<M::MemoryKinds>, kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, MemoryPointer> { ) -> EvalResult<'tcx, MemoryPointer> {
use std::cmp::min;
if ptr.offset != 0 { if ptr.offset != 0 {
return err!(ReallocateNonBasePtr); return err!(ReallocateNonBasePtr);
} }
@ -163,9 +150,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
let new_ptr = self.allocate(new_size, new_align, Some(kind))?; let new_ptr = self.allocate(new_size, new_align, Some(kind))?;
self.copy( self.copy(
ptr.into(), ptr.into(),
old_align,
new_ptr.into(), new_ptr.into(),
min(old_size, new_size), new_align,
min(old_align, new_align), old_size.min(new_size),
/*nonoverlapping*/ /*nonoverlapping*/
true, true,
)?; )?;
@ -190,7 +178,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn deallocate( pub fn deallocate(
&mut self, &mut self,
ptr: MemoryPointer, ptr: MemoryPointer,
size_and_align: Option<(u64, u64)>, size_and_align: Option<(u64, Align)>,
kind: MemoryKind<M::MemoryKinds>, kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
if ptr.offset != 0 { if ptr.offset != 0 {
@ -236,7 +224,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
} }
if let Some((size, align)) = size_and_align { if let Some((size, align)) = size_and_align {
if size != alloc.bytes.len() as u64 || align != alloc.align { if size != alloc.bytes.len() as u64 || align != alloc.align {
return err!(IncorrectAllocationInformation(size, alloc.bytes.len(), align, alloc.align)); return err!(IncorrectAllocationInformation(size, alloc.bytes.len(), align.abi(), alloc.align.abi()));
} }
} }
@ -255,7 +243,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
} }
/// Check that the pointer is aligned AND non-NULL. /// Check that the pointer is aligned AND non-NULL.
pub fn check_align(&self, ptr: Pointer, align: u64, access: Option<AccessKind>) -> EvalResult<'tcx> { pub fn check_align(&self, ptr: Pointer, required_align: Align) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset // Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr.into_inner_primval() { let (offset, alloc_align) = match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => { PrimVal::Ptr(ptr) => {
@ -267,30 +255,24 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
if v == 0 { if v == 0 {
return err!(InvalidNullPointerUsage); return err!(InvalidNullPointerUsage);
} }
(v, align) // the base address if the "integer allocation" is 0 and hence always aligned // the base address if the "integer allocation" is 0 and hence always aligned
(v, required_align)
} }
PrimVal::Undef => return err!(ReadUndefBytes), PrimVal::Undef => return err!(ReadUndefBytes),
}; };
// See if alignment checking is disabled
let align_override = match access {
Some(AccessKind::Read) => self.read_align_override.get(),
Some(AccessKind::Write) => self.write_align_override.get(),
None => None,
};
let align = align_override.map_or(align, |o| o.abi().min(align));
// Check alignment // Check alignment
if alloc_align < align { if alloc_align.abi() < required_align.abi() {
return err!(AlignmentCheckFailed { return err!(AlignmentCheckFailed {
has: alloc_align, has: alloc_align.abi(),
required: align, required: required_align.abi(),
}); });
} }
if offset % align == 0 { if offset % required_align.abi() == 0 {
Ok(()) Ok(())
} else { } else {
err!(AlignmentCheckFailed { err!(AlignmentCheckFailed {
has: offset % align, has: offset % required_align.abi(),
required: align, required: required_align.abi(),
}) })
} }
} }
@ -435,7 +417,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
"{}({} bytes, alignment {}){}", "{}({} bytes, alignment {}){}",
msg, msg,
alloc.bytes.len(), alloc.bytes.len(),
alloc.align, alloc.align.abi(),
immutable immutable
); );
@ -480,10 +462,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&self, &self,
ptr: MemoryPointer, ptr: MemoryPointer,
size: u64, size: u64,
align: u64, align: Align,
) -> EvalResult<'tcx, &[u8]> { ) -> EvalResult<'tcx, &[u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align, Some(AccessKind::Read))?; self.check_align(ptr.into(), align)?;
if size == 0 { if size == 0 {
return Ok(&[]); return Ok(&[]);
} }
@ -500,10 +482,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&mut self, &mut self,
ptr: MemoryPointer, ptr: MemoryPointer,
size: u64, size: u64,
align: u64, align: Align,
) -> EvalResult<'tcx, &mut [u8]> { ) -> EvalResult<'tcx, &mut [u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align, Some(AccessKind::Write))?; self.check_align(ptr.into(), align)?;
if size == 0 { if size == 0 {
return Ok(&mut []); return Ok(&mut []);
} }
@ -516,7 +498,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
Ok(&mut alloc.bytes[offset..offset + size as usize]) Ok(&mut alloc.bytes[offset..offset + size as usize])
} }
fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> { fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: Align) -> EvalResult<'tcx, &[u8]> {
assert_ne!(size, 0); assert_ne!(size, 0);
if self.relocations(ptr, size)?.count() != 0 { if self.relocations(ptr, size)?.count() != 0 {
return err!(ReadPointerAsBytes); return err!(ReadPointerAsBytes);
@ -529,7 +511,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
&mut self, &mut self,
ptr: MemoryPointer, ptr: MemoryPointer,
size: u64, size: u64,
align: u64, align: Align,
) -> EvalResult<'tcx, &mut [u8]> { ) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size, 0); assert_ne!(size, 0);
self.clear_relocations(ptr, size)?; self.clear_relocations(ptr, size)?;
@ -627,14 +609,15 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn copy( pub fn copy(
&mut self, &mut self,
src: Pointer, src: Pointer,
src_align: Align,
dest: Pointer, dest: Pointer,
dest_align: Align,
size: u64, size: u64,
align: u64,
nonoverlapping: bool, nonoverlapping: bool,
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be aligned // Empty accesses don't need to be valid pointers, but they should still be aligned
self.check_align(src, align, Some(AccessKind::Read))?; self.check_align(src, src_align)?;
self.check_align(dest, align, Some(AccessKind::Write))?; self.check_align(dest, dest_align)?;
if size == 0 { if size == 0 {
return Ok(()); return Ok(());
} }
@ -653,8 +636,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
}) })
.collect(); .collect();
let src_bytes = self.get_bytes_unchecked(src, size, align)?.as_ptr(); let src_bytes = self.get_bytes_unchecked(src, size, src_align)?.as_ptr();
let dest_bytes = self.get_bytes_mut(dest, size, align)?.as_mut_ptr(); let dest_bytes = self.get_bytes_mut(dest, size, dest_align)?.as_mut_ptr();
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
@ -703,41 +686,44 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> { pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL // Empty accesses don't need to be valid pointers, but they should still be non-NULL
self.check_align(ptr, 1, Some(AccessKind::Read))?; let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
if size == 0 { if size == 0 {
return Ok(&[]); return Ok(&[]);
} }
self.get_bytes(ptr.to_ptr()?, size, 1) self.get_bytes(ptr.to_ptr()?, size, align)
} }
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> { pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL // Empty accesses don't need to be valid pointers, but they should still be non-NULL
self.check_align(ptr, 1, Some(AccessKind::Write))?; let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
if src.is_empty() { if src.is_empty() {
return Ok(()); return Ok(());
} }
let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?; let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, align)?;
bytes.clone_from_slice(src); bytes.clone_from_slice(src);
Ok(()) Ok(())
} }
pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> { pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL // Empty accesses don't need to be valid pointers, but they should still be non-NULL
self.check_align(ptr, 1, Some(AccessKind::Write))?; let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
if count == 0 { if count == 0 {
return Ok(()); return Ok(());
} }
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?; let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
for b in bytes { for b in bytes {
*b = val; *b = val;
} }
Ok(()) Ok(())
} }
pub fn read_primval(&self, ptr: MemoryPointer, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> { pub fn read_primval(&self, ptr: MemoryPointer, ptr_align: Align, size: u64, signed: bool) -> EvalResult<'tcx, PrimVal> {
self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
let endianess = self.endianess(); let endianess = self.endianess();
let bytes = self.get_bytes_unchecked(ptr, size, self.int_align(size))?; let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// Undef check happens *after* we established that the alignment is correct. // Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers! // We must not return Ok() for unaligned pointers!
if self.check_defined(ptr, size).is_err() { if self.check_defined(ptr, size).is_err() {
@ -765,11 +751,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
Ok(PrimVal::Bytes(bytes)) Ok(PrimVal::Bytes(bytes))
} }
pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer) -> EvalResult<'tcx, PrimVal> { pub fn read_ptr_sized_unsigned(&self, ptr: MemoryPointer, ptr_align: Align) -> EvalResult<'tcx, PrimVal> {
self.read_primval(ptr, self.pointer_size(), false) self.read_primval(ptr, ptr_align, self.pointer_size(), false)
} }
pub fn write_primval(&mut self, ptr: MemoryPointer, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> { pub fn write_primval(&mut self, ptr: MemoryPointer, ptr_align: Align, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> {
let endianess = self.endianess(); let endianess = self.endianess();
let bytes = match val { let bytes = match val {
@ -800,7 +786,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
{ {
let align = self.int_align(size); let align = self.int_align(size);
let dst = self.get_bytes_mut(ptr, size, align)?; let dst = self.get_bytes_mut(ptr, size, ptr_align.min(align))?;
if signed { if signed {
write_target_int(endianess, dst, bytes as i128).unwrap(); write_target_int(endianess, dst, bytes as i128).unwrap();
} else { } else {
@ -822,22 +808,23 @@ impl<'a, 'tcx, M: Machine<'tcx>> Memory<'a, 'tcx, M> {
Ok(()) Ok(())
} }
pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, val: PrimVal) -> EvalResult<'tcx> { pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, ptr_align: Align, val: PrimVal) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size(); let ptr_size = self.pointer_size();
self.write_primval(ptr, val, ptr_size, false) self.write_primval(ptr, ptr_align, val, ptr_size, false)
} }
fn int_align(&self, size: u64) -> u64 { fn int_align(&self, size: u64) -> Align {
// We assume pointer-sized integers have the same alignment as pointers. // We assume pointer-sized integers have the same alignment as pointers.
// We also assume signed and unsigned integers of the same size have the same alignment. // We also assume signed and unsigned integers of the same size have the same alignment.
match size { let ity = match size {
1 => self.tcx.data_layout.i8_align.abi(), 1 => layout::I8,
2 => self.tcx.data_layout.i16_align.abi(), 2 => layout::I16,
4 => self.tcx.data_layout.i32_align.abi(), 4 => layout::I32,
8 => self.tcx.data_layout.i64_align.abi(), 8 => layout::I64,
16 => self.tcx.data_layout.i128_align.abi(), 16 => layout::I128,
_ => bug!("bad integer size: {}", size), _ => bug!("bad integer size: {}", size),
} };
ity.align(self)
} }
} }
@ -1002,43 +989,6 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M>; fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M>;
fn memory(&self) -> &Memory<'a, 'tcx, M>; fn memory(&self) -> &Memory<'a, 'tcx, M>;
// These are not supposed to be overriden.
fn read_with_align<F, T>(&self, align: Align, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().read_align_override.get();
// Do alignment checking for the minimum align out of *all* nested calls.
self.memory().read_align_override.set(Some(old.map_or(align, |old| old.min(align))));
let t = f(self);
self.memory().read_align_override.set(old);
t
}
fn read_with_align_mut<F, T>(&mut self, align: Align, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().read_align_override.get();
// Do alignment checking for the minimum align out of *all* nested calls.
self.memory().read_align_override.set(Some(old.map_or(align, |old| old.min(align))));
let t = f(self);
self.memory().read_align_override.set(old);
t
}
fn write_with_align_mut<F, T>(&mut self, align: Align, f: F) -> EvalResult<'tcx, T>
where
F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
{
let old = self.memory().write_align_override.get();
// Do alignment checking for the minimum align out of *all* nested calls.
self.memory().write_align_override.set(Some(old.map_or(align, |old| old.min(align))));
let t = f(self);
self.memory().write_align_override.set(old);
t
}
/// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef, /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
/// this may have to perform a load. /// this may have to perform a load.
fn into_ptr( fn into_ptr(
@ -1047,7 +997,7 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
) -> EvalResult<'tcx, Pointer> { ) -> EvalResult<'tcx, Pointer> {
Ok(match value { Ok(match value {
Value::ByRef(ptr, align) => { Value::ByRef(ptr, align) => {
self.memory().read_with_align(align, |mem| mem.read_ptr_sized_unsigned(ptr.to_ptr()?))? self.memory().read_ptr_sized_unsigned(ptr.to_ptr()?, align)?
} }
Value::ByVal(ptr) | Value::ByVal(ptr) |
Value::ByValPair(ptr, _) => ptr, Value::ByValPair(ptr, _) => ptr,
@ -1060,13 +1010,13 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
) -> EvalResult<'tcx, (Pointer, MemoryPointer)> { ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
match value { match value {
Value::ByRef(ref_ptr, align) => { Value::ByRef(ref_ptr, align) => {
self.memory().read_with_align(align, |mem| { let mem = self.memory();
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into(); let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?, align)?.into();
let vtable = mem.read_ptr_sized_unsigned( let vtable = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
)?.to_ptr()?; align
Ok((ptr, vtable)) )?.to_ptr()?;
}) Ok((ptr, vtable))
} }
Value::ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)), Value::ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
@ -1082,13 +1032,13 @@ pub trait HasMemory<'a, 'tcx: 'a, M: Machine<'tcx>> {
) -> EvalResult<'tcx, (Pointer, u64)> { ) -> EvalResult<'tcx, (Pointer, u64)> {
match value { match value {
Value::ByRef(ref_ptr, align) => { Value::ByRef(ref_ptr, align) => {
self.memory().read_with_align(align, |mem| { let mem = self.memory();
let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?)?.into(); let ptr = mem.read_ptr_sized_unsigned(ref_ptr.to_ptr()?, align)?.into();
let len = mem.read_ptr_sized_unsigned( let len = mem.read_ptr_sized_unsigned(
ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?,
)?.to_bytes()? as u64; align
Ok((ptr, len)) )?.to_bytes()? as u64;
}) Ok((ptr, len))
} }
Value::ByValPair(ptr, val) => { Value::ByValPair(ptr, val) => {
let len = val.to_u128()?; let len = val.to_u128()?;

View File

@ -179,7 +179,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
assert!(!layout.is_unsized()); assert!(!layout.is_unsized());
let ptr = self.memory.allocate( let ptr = self.memory.allocate(
layout.size.bytes(), layout.size.bytes(),
layout.align.abi(), layout.align,
None, None,
)?; )?;
self.tcx.interpret_interner.borrow_mut().cache(cid, ptr.into()); self.tcx.interpret_interner.borrow_mut().cache(cid, ptr.into());
@ -264,7 +264,7 @@ impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b,
assert!(!layout.is_unsized()); assert!(!layout.is_unsized());
let ptr = this.ecx.memory.allocate( let ptr = this.ecx.memory.allocate(
layout.size.bytes(), layout.size.bytes(),
layout.align.abi(), layout.align,
None, None,
)?; )?;
this.ecx.tcx.interpret_interner.borrow_mut().cache(cid, ptr.into()); this.ecx.tcx.interpret_interner.borrow_mut().cache(cid, ptr.into());

View File

@ -400,9 +400,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// cannot use the shim here, because that will only result in infinite recursion // cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => { ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size(); let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?; let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?;
let fn_ptr = self.memory.read_ptr_sized_unsigned( let fn_ptr = self.memory.read_ptr_sized_unsigned(
vtable.offset(ptr_size * (idx as u64 + 3), &self)? vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
)?.to_ptr()?; )?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?; let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec(); let mut args = args.to_vec();

View File

@ -26,28 +26,29 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let align = layout.align.abi(); let align = layout.align.abi();
let ptr_size = self.memory.pointer_size(); let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let methods = self.tcx.vtable_methods(trait_ref); let methods = self.tcx.vtable_methods(trait_ref);
let vtable = self.memory.allocate( let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64), ptr_size * (3 + methods.len() as u64),
ptr_size, ptr_align,
None, None,
)?; )?;
let drop = eval_context::resolve_drop_in_place(self.tcx, ty); let drop = eval_context::resolve_drop_in_place(self.tcx, ty);
let drop = self.memory.create_fn_alloc(drop); let drop = self.memory.create_fn_alloc(drop);
self.memory.write_ptr_sized_unsigned(vtable, PrimVal::Ptr(drop))?; self.memory.write_ptr_sized_unsigned(vtable, ptr_align, PrimVal::Ptr(drop))?;
let size_ptr = vtable.offset(ptr_size, &self)?; let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory.write_ptr_sized_unsigned(size_ptr, PrimVal::Bytes(size as u128))?; self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, PrimVal::Bytes(size as u128))?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?; let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory.write_ptr_sized_unsigned(align_ptr, PrimVal::Bytes(align as u128))?; self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, PrimVal::Bytes(align as u128))?;
for (i, method) in methods.iter().enumerate() { for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method { if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?; let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance); let fn_ptr = self.memory.create_fn_alloc(instance);
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?;
self.memory.write_ptr_sized_unsigned(method_ptr, PrimVal::Ptr(fn_ptr))?; self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, PrimVal::Ptr(fn_ptr))?;
} }
} }
@ -64,7 +65,8 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
vtable: MemoryPointer, vtable: MemoryPointer,
) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> { ) -> EvalResult<'tcx, Option<ty::Instance<'tcx>>> {
// we don't care about the pointee type, we just want a pointer // we don't care about the pointee type, we just want a pointer
match self.read_ptr(vtable, self.tcx.mk_nil_ptr())? { let pointer_align = self.tcx.data_layout.pointer_align;
match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? {
// some values don't need to call a drop impl, so the value is null // some values don't need to call a drop impl, so the value is null
Value::ByVal(PrimVal::Bytes(0)) => Ok(None), Value::ByVal(PrimVal::Bytes(0)) => Ok(None),
Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
@ -77,9 +79,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
vtable: MemoryPointer, vtable: MemoryPointer,
) -> EvalResult<'tcx, (Size, Align)> { ) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size(); let pointer_size = self.memory.pointer_size();
let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?)?.to_bytes()? as u64; let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory.read_ptr_sized_unsigned(vtable.offset(pointer_size, self)?, pointer_align)?.to_bytes()? as u64;
let align = self.memory.read_ptr_sized_unsigned( let align = self.memory.read_ptr_sized_unsigned(
vtable.offset(pointer_size * 2, self)? vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bytes()? as u64; )?.to_bytes()? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap()))
} }