Adjust rustc_mir::interpret to changes in Allocation/Memory methods

This commit is contained in:
Oliver Scherer 2018-11-12 13:26:53 +01:00
parent 9ecde5712e
commit 07e7804110
7 changed files with 146 additions and 76 deletions

View File

@ -10,9 +10,12 @@
//! The virtual memory representation of the MIR interpreter
use super::{Pointer, EvalResult, AllocId};
use super::{
Pointer, EvalResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
truncate,
};
use ty::layout::{Size, Align};
use ty::layout::{self, Size, Align};
use syntax::ast::Mutability;
use std::iter;
use mir;
@ -88,16 +91,19 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
/// Reading and writing
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
pub fn read_c_str(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, &[u8]> {
let alloc = self.get(ptr.alloc_id)?;
pub fn read_c_str(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
) -> EvalResult<'tcx, &[u8]> {
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
let offset = ptr.offset.bytes() as usize;
match alloc.bytes[offset..].iter().position(|&c| c == 0) {
match self.bytes[offset..].iter().position(|&c| c == 0) {
Some(size) => {
let p1 = Size::from_bytes((size + 1) as u64);
self.check_relocations(ptr, p1)?;
self.check_relocations(cx, ptr, p1)?;
self.check_defined(ptr, p1)?;
Ok(&alloc.bytes[offset..offset + size])
Ok(&self.bytes[offset..offset + size])
}
None => err!(UnterminatedCString(ptr.erase_tag())),
}
@ -105,7 +111,8 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
pub fn check_bytes(
&self,
ptr: Scalar<M::PointerTag>,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
allow_ptr_and_undef: bool,
) -> EvalResult<'tcx> {
@ -115,42 +122,54 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
self.check_align(ptr, align)?;
return Ok(());
}
let ptr = ptr.to_ptr()?;
// Check bounds, align and relocations on the edges
self.get_bytes_with_undef_and_ptr(ptr, size, align)?;
self.get_bytes_with_undef_and_ptr(cx, ptr, size, align)?;
// Check undef and ptr
if !allow_ptr_and_undef {
self.check_defined(ptr, size)?;
self.check_relocations(ptr, size)?;
self.check_relocations(cx, ptr, size)?;
}
Ok(())
}
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
pub fn read_bytes(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1).unwrap();
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, align)
self.get_bytes(cx, ptr, size, align)
}
pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
pub fn write_bytes(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
src: &[u8],
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1).unwrap();
if src.is_empty() {
self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?;
let bytes = self.get_bytes_mut(
cx, ptr, Size::from_bytes(src.len() as u64), align,
)?;
bytes.clone_from_slice(src);
Ok(())
}
pub fn write_repeat(
&mut self,
ptr: Scalar<M::PointerTag>,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
val: u8,
count: Size
) -> EvalResult<'tcx> {
@ -160,7 +179,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
self.check_align(ptr, align)?;
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
let bytes = self.get_bytes_mut(cx, ptr, count, align)?;
for b in bytes {
*b = val;
}
@ -170,13 +189,14 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Read a *non-ZST* scalar
pub fn read_scalar(
&self,
ptr: Pointer<M::PointerTag>,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
ptr_align: Align,
size: Size
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>> {
// get_bytes_unchecked tests alignment and relocation edges
let bytes = self.get_bytes_with_undef_and_ptr(
ptr, size, ptr_align.min(self.int_align(size))
cx, ptr, size, ptr_align.min(self.int_align(cx, size))
)?;
// Undef check happens *after* we established that the alignment is correct.
// We must not return Ok() for unaligned pointers!
@ -186,14 +206,13 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
return Ok(ScalarMaybeUndef::Undef);
}
// Now we do the actual reading
let bits = read_target_uint(self.tcx.data_layout.endian, bytes).unwrap();
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
// See if we got a pointer
if size != self.pointer_size() {
if size != cx.data_layout().pointer_size {
// *Now* better make sure that the inside also is free of relocations.
self.check_relocations(ptr, size)?;
self.check_relocations(cx, ptr, size)?;
} else {
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
match self.relocations.get(&ptr.offset) {
Some(&(tag, alloc_id)) => {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
@ -207,18 +226,20 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
pub fn read_ptr_sized(
&self,
ptr: Pointer<M::PointerTag>,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
ptr_align: Align
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
self.read_scalar(ptr, ptr_align, self.pointer_size())
) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>> {
self.read_scalar(cx, ptr, ptr_align, cx.data_layout().pointer_size)
}
/// Write a *non-ZST* scalar
pub fn write_scalar(
&mut self,
ptr: Pointer<M::PointerTag>,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
ptr_align: Align,
val: ScalarMaybeUndef<M::PointerTag>,
val: ScalarMaybeUndef<Tag>,
type_size: Size,
) -> EvalResult<'tcx> {
let val = match val {
@ -228,7 +249,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
let bytes = match val {
Scalar::Ptr(val) => {
assert_eq!(type_size, self.pointer_size());
assert_eq!(type_size, cx.data_layout().pointer_size);
val.offset.bytes() as u128
}
@ -242,15 +263,15 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
{
// get_bytes_mut checks alignment
let endian = self.tcx.data_layout.endian;
let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?;
let endian = cx.data_layout().endian;
let dst = self.get_bytes_mut(cx, ptr, type_size, ptr_align)?;
write_target_uint(endian, dst, bytes).unwrap();
}
// See if we have to also write a relocation
match val {
Scalar::Ptr(val) => {
self.get_mut(ptr.alloc_id)?.relocations.insert(
self.relocations.insert(
ptr.offset,
(val.tag, val.alloc_id),
);
@ -263,15 +284,20 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
pub fn write_ptr_sized(
&mut self,
ptr: Pointer<M::PointerTag>,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
ptr_align: Align,
val: ScalarMaybeUndef<M::PointerTag>
val: ScalarMaybeUndef<Tag>
) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
let ptr_size = cx.data_layout().pointer_size;
self.write_scalar(cx, ptr.into(), ptr_align, val, ptr_size)
}
fn int_align(&self, size: Size) -> Align {
fn int_align(
&self,
cx: &impl HasDataLayout,
size: Size,
) -> Align {
// We assume pointer-sized integers have the same alignment as pointers.
// We also assume signed and unsigned integers of the same size have the same alignment.
let ity = match size.bytes() {
@ -282,7 +308,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
16 => layout::I128,
_ => bug!("bad integer size: {}", size.bytes()),
};
ity.align(self).abi
ity.align(cx).abi
}
}
@ -337,7 +363,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// It is the caller's responsibility to handle undefined and pointer bytes.
/// However, this still checks that there are no relocations on the *edges*.
#[inline]
fn get_bytes_with_undef_and_ptr(
pub fn get_bytes_with_undef_and_ptr(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
@ -349,7 +375,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Just calling this already marks everything as defined and removes relocations,
/// so be sure to actually put data there!
fn get_bytes_mut(
pub fn get_bytes_mut(
&mut self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
@ -375,7 +401,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Relocations
impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Return all relocations overlapping with the given ptr-offset pair.
fn relocations(
pub fn relocations(
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,

View File

@ -21,7 +21,7 @@ use std::ptr;
use std::borrow::Cow;
use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
@ -30,7 +30,7 @@ use syntax::ast::Mutability;
use super::{
Pointer, AllocId, Allocation, GlobalId, AllocationExtra, InboundsCheck,
EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
Machine, AllocMap, MayLeak, ScalarMaybeUndef, ErrorHandled,
Machine, AllocMap, MayLeak, ErrorHandled,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
@ -655,7 +655,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
// (`get_bytes_with_undef_and_ptr` below checks that there are no
// relocations overlapping the edges; those would not be handled correctly).
let relocations = {
let relocations = self.relocations(src, size)?;
let relocations = self.get(src.alloc_id)?.relocations(self, src, size)?;
let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize));
for i in 0..length {
new_relocations.extend(
@ -671,9 +671,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
new_relocations
};
let tcx = self.tcx.tcx;
// This also checks alignment, and relocation edges on the src.
let src_bytes = self.get_bytes_with_undef_and_ptr(src, size, src_align)?.as_ptr();
let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr();
let src_bytes = self.get(src.alloc_id)?
.get_bytes_with_undef_and_ptr(&tcx, src, size, src_align)?
.as_ptr();
let dest_bytes = self.get_mut(dest.alloc_id)?
.get_bytes_mut(&tcx, dest, size * length, dest_align)?
.as_mut_ptr();
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and

View File

@ -278,7 +278,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let ptr = ptr.to_ptr()?;
match mplace.layout.abi {
layout::Abi::Scalar(..) => {
let scalar = self.memory.read_scalar(ptr, ptr_align, mplace.layout.size)?;
let scalar = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, ptr, ptr_align, mplace.layout.size)?;
Ok(Some(Immediate::Scalar(scalar)))
}
layout::Abi::ScalarPair(ref a, ref b) => {
@ -288,8 +290,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
let b_ptr = ptr.offset(b_offset, self)?.into();
let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
let a_val = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, a_ptr, ptr_align, a_size)?;
let b_val = self.memory
.get(ptr.alloc_id)?
.read_scalar(self, b_ptr, ptr_align, b_size)?;
Ok(Some(Immediate::ScalarPair(a_val, b_val)))
}
_ => Ok(None),
@ -345,7 +351,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
let ptr = mplace.ptr.to_ptr()?;
let bytes = self.memory
.get(ptr.alloc_id)?
.read_bytes(self, ptr, Size::from_bytes(len as u64))?;
let str = ::std::str::from_utf8(bytes)
.map_err(|err| EvalErrorKind::ValidationFailure(err.to_string()))?;
Ok(str)

View File

@ -718,6 +718,7 @@ where
}
let ptr = ptr.to_ptr()?;
let tcx = &*self.tcx;
// FIXME: We should check that there are dest.layout.size many bytes available in
// memory. The code below is not sufficient, with enough padding it might not
// cover all the bytes!
@ -729,8 +730,8 @@ where
dest.layout)
}
self.memory.write_scalar(
ptr, ptr_align.min(dest.layout.align.abi), scalar, dest.layout.size
self.memory.get_mut(ptr.alloc_id)?.write_scalar(
tcx, ptr, ptr_align.min(dest.layout.align.abi), scalar, dest.layout.size
)
}
Immediate::ScalarPair(a_val, b_val) => {
@ -742,14 +743,18 @@ where
let (a_size, b_size) = (a.size(self), b.size(self));
let (a_align, b_align) = (a.align(self).abi, b.align(self).abi);
let b_offset = a_size.align_to(b_align);
let b_ptr = ptr.offset(b_offset, self)?.into();
let b_ptr = ptr.offset(b_offset, self)?;
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
// fields do not match the `ScalarPair` components.
self.memory.write_scalar(ptr, ptr_align.min(a_align), a_val, a_size)?;
self.memory.write_scalar(b_ptr, ptr_align.min(b_align), b_val, b_size)
self.memory
.get_mut(ptr.alloc_id)?
.write_scalar(tcx, ptr, ptr_align.min(a_align), a_val, a_size)?;
self.memory
.get_mut(b_ptr.alloc_id)?
.write_scalar(tcx, b_ptr, ptr_align.min(b_align), b_val, b_size)
}
}
}

View File

@ -404,7 +404,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let ptr_align = self.tcx.data_layout.pointer_align.abi;
let ptr = self.deref_operand(args[0])?;
let vtable = ptr.vtable()?;
let fn_ptr = self.memory.read_ptr_sized(
let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized(
self,
vtable.offset(ptr_size * (idx as u64 + 3), self)?,
ptr_align
)?.to_ptr()?;

View File

@ -55,23 +55,31 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
ptr_align,
MemoryKind::Vtable,
)?.with_default_tag();
let tcx = &*self.tcx;
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
let drop = ::monomorphize::resolve_drop_in_place(*tcx, ty);
let drop = self.memory.create_fn_alloc(drop).with_default_tag();
self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
self.memory
.get_mut(vtable.alloc_id)?
.write_ptr_sized(tcx, vtable, ptr_align, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, self)?;
self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?;
self.memory
.get_mut(size_ptr.alloc_id)?
.write_ptr_sized(tcx, size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, self)?;
self.memory.write_ptr_sized(align_ptr, ptr_align,
Scalar::from_uint(align, ptr_size).into())?;
self.memory
.get_mut(align_ptr.alloc_id)?
.write_ptr_sized(tcx, align_ptr, ptr_align, Scalar::from_uint(align, ptr_size).into())?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag();
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?;
self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
self.memory
.get_mut(method_ptr.alloc_id)?
.write_ptr_sized(tcx, method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?;
}
}
@ -88,7 +96,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align.abi;
let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?;
let drop_fn = self.memory
.get(vtable.alloc_id)?
.read_ptr_sized(self, vtable, pointer_align)?
.to_ptr()?;
let drop_instance = self.memory.get_fn(drop_fn)?;
trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
@ -104,9 +115,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align.abi;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
let alloc = self.memory.get(vtable.alloc_id)?;
let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?, pointer_align)?
.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
let align = alloc.read_ptr_sized(
self,
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bits(pointer_size)? as u64;

View File

@ -21,7 +21,7 @@ use rustc::mir::interpret::{
};
use super::{
OpTy, MPlaceTy, Machine, EvalContext, ValueVisitor
OpTy, MPlaceTy, Machine, EvalContext, ValueVisitor, Operand,
};
macro_rules! validation_failure {
@ -396,7 +396,9 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// Maintain the invariant that the place we are checking is
// already verified to be in-bounds.
try_validation!(
self.ecx.memory.check_bounds(ptr, size, InboundsCheck::Live),
self.ecx.memory
.get(ptr.alloc_id)?
.check_bounds(self.ecx, ptr, size),
"dangling (not entirely in bounds) reference", self.path);
}
// Check if we have encountered this pointer+layout combination
@ -520,12 +522,14 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
_ => false,
}
} => {
let mplace = if op.layout.is_zst() {
let mplace = match *op {
// it's a ZST, the memory content cannot matter
MPlaceTy::dangling(op.layout, self.ecx)
} else {
// non-ZST array/slice/str cannot be immediate
op.to_mem_place()
Operand::Immediate(_) if op.layout.is_zst() =>
// invent an aligned mplace
MPlaceTy::dangling(op.layout, self.ecx),
// FIXME: what about single element arrays? They can be Scalar layout I think
Operand::Immediate(_) => bug!("non-ZST array/slice cannot be immediate"),
Operand::Indirect(_) => op.to_mem_place(),
};
// This is the length of the array/slice.
let len = mplace.len(self.ecx)?;
@ -534,6 +538,11 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// This is the size in bytes of the whole array.
let size = ty_size * len;
if op.layout.is_zst() {
return self.ecx.memory.check_align(mplace.ptr, op.layout.align);
}
let ptr = mplace.ptr.to_ptr()?;
// NOTE: Keep this in sync with the handling of integer and float
// types above, in `visit_primitive`.
// In run-time mode, we accept pointers in here. This is actually more
@ -543,8 +552,9 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer.
// We also accept undef, for consistency with the type-based checks.
match self.ecx.memory.check_bytes(
mplace.ptr,
match self.ecx.memory.get(ptr.alloc_id)?.check_bytes(
self.ecx,
ptr,
size,
/*allow_ptr_and_undef*/!self.const_mode,
) {