Auto merge of #54461 - RalfJung:pointer-provenance, r=oli-obk

miri engine: basic support for pointer provenance tracking

This enriches pointers with a new member, `tag`, that can be used to do provenance tracking. This is a new type parameter that propagates up through everything. It defaults to `()` (no tag), which is also the value used by CTFE -- but miri will use another type.

The only actually interesting piece here, I think, is what I had to do in the memory's `get`. The problem is that `tcx` (storing the allocations for statics) uses `()` for provenance information. But the machine might need another tag. The machine has a function to do the conversion, but if a conversion actually happened, we need to store the result of this *somewhere* -- we cannot return a pointer into `tcx` as we usually would.
So I introduced `MonoHashMap` which uses `RefCell` to be able to insert new entries even when we just have a shared ref. However, it is important that we can also return shared refs into the map without holding the `RefCell` opan. This is achieved by boxing the values stored in the map, so their addresses remain stable even when the map's table gets reallocated. This is all implemented in `mono_hash_map.rs`.

NOTE: This PR also contains the commits from https://github.com/rust-lang/rust/pull/54380#issuecomment-423130753. Only the [last two commits](8e74ee0998..HEAD) are new.
This commit is contained in:
bors 2018-10-10 12:13:03 +00:00
commit 2243fabd8f
19 changed files with 992 additions and 549 deletions

View File

@ -391,10 +391,39 @@ for ::mir::interpret::ConstValue<'gcx> {
}
}
impl_stable_hash_for!(struct mir::interpret::Pointer {
alloc_id,
offset
});
impl<'a, Tag> HashStable<StableHashingContext<'a>>
for ::mir::interpret::Pointer<Tag>
where Tag: HashStable<StableHashingContext<'a>>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
let ::mir::interpret::Pointer { alloc_id, offset, tag } = self;
alloc_id.hash_stable(hcx, hasher);
offset.hash_stable(hcx, hasher);
tag.hash_stable(hcx, hasher);
}
}
impl<'a, Tag> HashStable<StableHashingContext<'a>>
for ::mir::interpret::Scalar<Tag>
where Tag: HashStable<StableHashingContext<'a>>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
use mir::interpret::Scalar::*;
mem::discriminant(self).hash_stable(hcx, hasher);
match self {
Bits { bits, size } => {
bits.hash_stable(hcx, hasher);
size.hash_stable(hcx, hasher);
},
Ptr(ptr) => ptr.hash_stable(hcx, hasher),
}
}
}
impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
fn hash_stable<W: StableHasherResult>(
@ -449,25 +478,6 @@ impl_stable_hash_for!(enum ::syntax::ast::Mutability {
Mutable
});
impl<'a> HashStable<StableHashingContext<'a>>
for ::mir::interpret::Scalar {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
use mir::interpret::Scalar::*;
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
Bits { bits, size } => {
bits.hash_stable(hcx, hasher);
size.hash_stable(hcx, hasher);
},
Ptr(ptr) => ptr.hash_stable(hcx, hasher),
}
}
}
impl_stable_hash_for!(struct ty::Const<'tcx> {
ty,
val

View File

@ -138,54 +138,82 @@ impl<T: layout::HasDataLayout> PointerArithmetic for T {}
/// each context.
///
/// Defaults to the index based and loosely coupled AllocId.
///
/// Pointer is also generic over the `Tag` associated with each pointer,
/// which is used to do provenance tracking during execution.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub struct Pointer<Id=AllocId> {
pub struct Pointer<Tag=(),Id=AllocId> {
pub alloc_id: Id,
pub offset: Size,
pub tag: Tag,
}
/// Produces a `Pointer` which points to the beginning of the Allocation
impl From<AllocId> for Pointer {
#[inline(always)]
fn from(alloc_id: AllocId) -> Self {
Pointer::new(alloc_id, Size::ZERO)
}
}
impl<'tcx> Pointer {
impl<'tcx> Pointer<()> {
#[inline(always)]
pub fn new(alloc_id: AllocId, offset: Size) -> Self {
Pointer { alloc_id, offset }
Pointer { alloc_id, offset, tag: () }
}
#[inline(always)]
pub fn with_default_tag<Tag>(self) -> Pointer<Tag>
where Tag: Default
{
Pointer::new_with_tag(self.alloc_id, self.offset, Default::default())
}
}
impl<'tcx, Tag> Pointer<Tag> {
#[inline(always)]
pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self {
Pointer { alloc_id, offset, tag }
}
pub fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
Pointer::new(
Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)),
self.tag,
)
}
pub fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
(Pointer::new(self.alloc_id, Size::from_bytes(res)), over)
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
pub fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
Ok(Pointer::new(
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
self.tag,
))
}
pub fn overflowing_offset<C: HasDataLayout>(self, i: Size, cx: C) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
(Pointer::new(self.alloc_id, Size::from_bytes(res)), over)
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}
pub fn offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
Ok(Pointer::new(
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
self.tag
))
}
#[inline]
pub fn erase_tag(self) -> Pointer {
Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () }
}
}
@ -496,15 +524,15 @@ impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> {
}
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
pub struct Allocation {
pub struct Allocation<Tag=()> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer
pub bytes: Vec<u8>,
/// Maps from byte addresses to allocations.
/// Maps from byte addresses to extra data for each pointer.
/// Only the first byte of a pointer is inserted into the map; i.e.,
/// every entry in this map applies to `pointer_size` consecutive bytes starting
/// at the given offset.
pub relocations: Relocations,
pub relocations: Relocations<Tag>,
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
@ -515,7 +543,7 @@ pub struct Allocation {
pub mutability: Mutability,
}
impl Allocation {
impl<Tag> Allocation<Tag> {
/// Creates a read-only allocation initialized by the given bytes
pub fn from_bytes(slice: &[u8], align: Align) -> Self {
let mut undef_mask = UndefMask::new(Size::ZERO);
@ -548,29 +576,29 @@ impl Allocation {
impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct Relocations<Id=AllocId>(SortedMap<Size, Id>);
pub struct Relocations<Tag=(), Id=AllocId>(SortedMap<Size, (Tag, Id)>);
impl<Id> Relocations<Id> {
impl<Tag, Id> Relocations<Tag, Id> {
pub fn new() -> Self {
Relocations(SortedMap::new())
}
// The caller must guarantee that the given relocations are already sorted
// by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, Id)>) -> Self {
pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
Relocations(SortedMap::from_presorted_elements(r))
}
}
impl Deref for Relocations {
type Target = SortedMap<Size, AllocId>;
impl<Tag> Deref for Relocations<Tag> {
type Target = SortedMap<Size, (Tag, AllocId)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Relocations {
impl<Tag> DerefMut for Relocations<Tag> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}

View File

@ -79,7 +79,47 @@ impl<'tcx> ConstValue<'tcx> {
}
}
impl<'tcx> Scalar {
/// A `Scalar` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
/// of a simple value or a pointer into another `Allocation`
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum Scalar<Tag=(), Id=AllocId> {
/// The raw bytes of a simple value.
Bits {
/// The first `size` bytes are the value.
/// Do not try to read less or more bytes that that. The remaining bytes must be 0.
size: u8,
bits: u128,
},
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
/// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
/// relocation and its associated offset together as a `Pointer` here.
Ptr(Pointer<Tag, Id>),
}
impl<'tcx> Scalar<()> {
#[inline]
pub fn with_default_tag<Tag>(self) -> Scalar<Tag>
where Tag: Default
{
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_default_tag()),
Scalar::Bits { bits, size } => Scalar::Bits { bits, size },
}
}
}
impl<'tcx, Tag> Scalar<Tag> {
#[inline]
pub fn erase_tag(self) -> Scalar {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
Scalar::Bits { bits, size } => Scalar::Bits { bits, size },
}
}
#[inline]
pub fn ptr_null(cx: impl HasDataLayout) -> Self {
Scalar::Bits {
@ -208,7 +248,7 @@ impl<'tcx> Scalar {
}
#[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
match self {
Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage),
Scalar::Bits { .. } => err!(ReadBytesAsPointer),
@ -317,29 +357,9 @@ impl<'tcx> Scalar {
}
}
impl From<Pointer> for Scalar {
impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: Pointer) -> Self {
fn from(ptr: Pointer<Tag>) -> Self {
Scalar::Ptr(ptr)
}
}
/// A `Scalar` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
/// of a simple value or a pointer into another `Allocation`
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum Scalar<Id=AllocId> {
/// The raw bytes of a simple value.
Bits {
/// The first `size` bytes are the value.
/// Do not try to read less or more bytes that that. The remaining bytes must be 0.
size: u8,
bits: u128,
},
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
/// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
/// relocation and its associated offset together as a `Pointer` here.
Ptr(Pointer<Id>),
}

View File

@ -92,7 +92,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
let pointer_size = layout.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, alloc_id) in alloc.relocations.iter() {
for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
@ -105,7 +105,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(scalar_to_llvm(
cx,
Pointer { alloc_id, offset: Size::from_bytes(ptr_offset) }.into(),
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
valid_range: 0..=!0

View File

@ -12,6 +12,9 @@
use std::fmt;
use std::error::Error;
use std::borrow::{Borrow, Cow};
use std::hash::Hash;
use std::collections::hash_map::Entry;
use rustc::hir::{self, def_id::DefId};
use rustc::mir::interpret::ConstEvalErr;
@ -20,13 +23,14 @@ use rustc::ty::{self, TyCtxt, Instance, query::TyCtxtAt};
use rustc::ty::layout::{self, LayoutOf, TyLayout};
use rustc::ty::subst::Subst;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::fx::FxHashMap;
use syntax::ast::Mutability;
use syntax::source_map::{Span, DUMMY_SP};
use rustc::mir::interpret::{
EvalResult, EvalError, EvalErrorKind, GlobalId,
Scalar, Allocation, ConstValue,
Scalar, Allocation, AllocId, ConstValue,
};
use interpret::{self,
Place, PlaceTy, MemPlace, OpTy, Operand, Value,
@ -118,9 +122,9 @@ pub fn op_to_const<'tcx>(
}
};
let val = match normalized_op {
Err(MemPlace { ptr, align, extra }) => {
Err(MemPlace { ptr, align, meta }) => {
// extract alloc-offset pair
assert!(extra.is_none());
assert!(meta.is_none());
let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
@ -264,6 +268,67 @@ impl<'a, 'mir, 'tcx> CompileTimeInterpreter<'a, 'mir, 'tcx> {
}
}
impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
#[inline(always)]
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
where K: Borrow<Q>
{
FxHashMap::contains_key(self, k)
}
#[inline(always)]
fn insert(&mut self, k: K, v: V) -> Option<V>
{
FxHashMap::insert(self, k, v)
}
#[inline(always)]
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>
{
FxHashMap::remove(self, k)
}
#[inline(always)]
fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
self.iter()
.filter_map(move |(k, v)| f(k, &*v))
.collect()
}
#[inline(always)]
fn get_or<E>(
&self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&V, E>
{
match self.get(&k) {
Some(v) => Ok(v),
None => {
vacant()?;
bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
}
}
}
#[inline(always)]
fn get_mut_or<E>(
&mut self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&mut V, E>
{
match self.entry(k) {
Entry::Occupied(e) => Ok(e.into_mut()),
Entry::Vacant(e) => {
let v = vacant()?;
Ok(e.insert(v))
}
}
}
}
type CompileTimeEvalContext<'a, 'mir, 'tcx> =
EvalContext<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>;
@ -272,8 +337,11 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx>
{
type MemoryData = ();
type MemoryKinds = !;
type PointerTag = ();
const MUT_STATIC_KIND: Option<!> = None; // no mutating of statics allowed
type MemoryMap = FxHashMap<AllocId, (MemoryKind<!>, Allocation<()>)>;
const STATIC_KIND: Option<!> = None; // no copying of statics allowed
const ENFORCE_VALIDITY: bool = false; // for now, we don't
fn find_fn(
@ -339,10 +407,18 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx>
fn find_foreign_static(
_tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
_def_id: DefId,
) -> EvalResult<'tcx, &'tcx Allocation> {
) -> EvalResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>> {
err!(ReadForeignStatic)
}
#[inline(always)]
fn static_with_default_tag(
alloc: &'_ Allocation
) -> Cow<'_, Allocation<Self::PointerTag>> {
// We do not use a tag so we can just cheaply forward the reference
Cow::Borrowed(alloc)
}
fn box_alloc(
_ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
_dest: PlaceTy<'tcx>,

View File

@ -33,9 +33,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn cast(
&mut self,
src: OpTy<'tcx>,
src: OpTy<'tcx, M::PointerTag>,
kind: CastKind,
dest: PlaceTy<'tcx>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let src_layout = src.layout;
let dst_layout = dest.layout;
@ -143,10 +143,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub(super) fn cast_scalar(
&self,
val: Scalar,
val: Scalar<M::PointerTag>,
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
@ -182,7 +182,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
v: u128,
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
let signed = src_layout.abi.is_signed();
let v = if signed {
self.sign_extend(v, src_layout)
@ -239,7 +239,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
bits: u128,
fty: FloatTy,
dest_ty: Ty<'tcx>
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
use rustc_apfloat::FloatConvert;
match dest_ty.sty {
@ -283,7 +283,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
}
}
fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
fn cast_from_ptr(
&self,
ptr: Pointer<M::PointerTag>,
ty: Ty<'tcx>
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc,
@ -298,8 +302,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
fn unsize_into_ptr(
&mut self,
src: OpTy<'tcx>,
dest: PlaceTy<'tcx>,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
// The pointee types
sty: Ty<'tcx>,
dty: Ty<'tcx>,
@ -339,8 +343,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
fn unsize_into(
&mut self,
src: OpTy<'tcx>,
dest: PlaceTy<'tcx>,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
match (&src.layout.ty.sty, &dest.layout.ty.sty) {
(&ty::Ref(_, s, _), &ty::Ref(_, d, _)) |

View File

@ -49,12 +49,12 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
pub memory: Memory<'a, 'mir, 'tcx, M>,
/// The virtual call stack.
pub(crate) stack: Vec<Frame<'mir, 'tcx>>,
pub(crate) stack: Vec<Frame<'mir, 'tcx, M::PointerTag>>,
}
/// A stack frame.
#[derive(Clone)]
pub struct Frame<'mir, 'tcx: 'mir> {
pub struct Frame<'mir, 'tcx: 'mir, Tag=()> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
@ -74,14 +74,14 @@ pub struct Frame<'mir, 'tcx: 'mir> {
pub return_to_block: StackPopCleanup,
/// The location where the result of the current stack frame should be written to.
pub return_place: Place,
pub return_place: Place<Tag>,
/// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`.
/// The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
pub locals: IndexVec<mir::Local, LocalValue<AllocId>>,
pub locals: IndexVec<mir::Local, LocalValue<Tag>>,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
@ -108,24 +108,24 @@ pub enum StackPopCleanup {
// State of a local variable
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum LocalValue<Id=AllocId> {
pub enum LocalValue<Tag=(), Id=AllocId> {
Dead,
// Mostly for convenience, we re-use the `Operand` type here.
// This is an optimization over just always having a pointer here;
// we can thus avoid doing an allocation when the local just stores
// immediate values *and* never has its address taken.
Live(Operand<Id>),
Live(Operand<Tag, Id>),
}
impl<'tcx> LocalValue {
pub fn access(&self) -> EvalResult<'tcx, &Operand> {
impl<'tcx, Tag> LocalValue<Tag> {
pub fn access(&self) -> EvalResult<'tcx, &Operand<Tag>> {
match self {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(ref val) => Ok(val),
}
}
pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> {
pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand<Tag>> {
match self {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(ref mut val) => Ok(val),
@ -218,7 +218,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
&mut self.memory
}
pub fn stack(&self) -> &[Frame<'mir, 'tcx>] {
pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag>] {
&self.stack
}
@ -230,7 +230,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
/// Mark a storage as live, killing the previous content and returning it.
/// Remember to deallocate that!
pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> {
pub fn storage_live(
&mut self,
local: mir::Local
) -> EvalResult<'tcx, LocalValue<M::PointerTag>> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local);
@ -242,14 +245,14 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
/// Returns the old value of the local.
/// Remember to deallocate that!
pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue {
pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead)
}
pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value<M::PointerTag>> {
let ptr = self.memory.allocate_static_bytes(s.as_bytes());
Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
}
@ -327,10 +330,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
}
/// Return the actual dynamic size and alignment of the place at the given type.
/// Only the "extra" (metadata) part of the place matters.
/// Only the `meta` part of the place matters.
pub(super) fn size_and_align_of(
&self,
metadata: Option<Scalar>,
metadata: Option<Scalar<M::PointerTag>>,
layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Size, Align)> {
let metadata = match metadata {
@ -411,9 +414,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
#[inline]
pub fn size_and_align_of_mplace(
&self,
mplace: MPlaceTy<'tcx>
mplace: MPlaceTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, (Size, Align)> {
self.size_and_align_of(mplace.extra, mplace.layout)
self.size_and_align_of(mplace.meta, mplace.layout)
}
pub fn push_stack_frame(
@ -421,7 +424,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
instance: ty::Instance<'tcx>,
span: source_map::Span,
mir: &'mir mir::Mir<'tcx>,
return_place: Place,
return_place: Place<M::PointerTag>,
return_to_block: StackPopCleanup,
) -> EvalResult<'tcx> {
::log_settings::settings().indentation += 1;
@ -519,7 +522,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
Ok(())
}
pub(super) fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> {
pub(super) fn deallocate_local(
&mut self,
local: LocalValue<M::PointerTag>,
) -> EvalResult<'tcx> {
// FIXME: should we tell the user that there was a local which was never written to?
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
trace!("deallocating local");
@ -541,12 +547,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
}
#[inline(always)]
pub fn frame(&self) -> &Frame<'mir, 'tcx> {
pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag> {
self.stack.last().expect("no call frames exist")
}
#[inline(always)]
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> {
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag> {
self.stack.last_mut().expect("no call frames exist")
}
@ -562,7 +568,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
}
}
pub fn dump_place(&self, place: Place) {
pub fn dump_place(&self, place: Place<M::PointerTag>) {
// Debug output
if !log_enabled!(::log::Level::Trace) {
return;

View File

@ -25,11 +25,11 @@ use super::{
};
fn numeric_intrinsic<'tcx>(
fn numeric_intrinsic<'tcx, Tag>(
name: &str,
bits: u128,
kind: Primitive,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<Tag>> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
@ -51,8 +51,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn emulate_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, bool> {
let substs = instance.substs;
@ -169,8 +169,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn hook_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
) -> EvalResult<'tcx, bool> {
let def_id = instance.def_id();
// Some fn calls are actually BinOp intrinsics

View File

@ -12,17 +12,55 @@
//! This separation exists to ensure that no fancy miri features like
//! interpreting common C functions leak into CTFE.
use std::borrow::{Borrow, Cow};
use std::hash::Hash;
use rustc::hir::def_id::DefId;
use rustc::mir::interpret::{Allocation, EvalResult, Scalar};
use rustc::mir::interpret::{Allocation, AllocId, EvalResult, Scalar};
use rustc::mir;
use rustc::ty::{self, layout::TyLayout, query::TyCtxtAt};
use super::{EvalContext, PlaceTy, OpTy};
use super::{EvalContext, PlaceTy, OpTy, MemoryKind};
/// The functionality needed by memory to manage its allocations
pub trait AllocMap<K: Hash + Eq, V> {
/// Test if the map contains the given key.
/// Deliberately takes `&mut` because that is sufficient, and some implementations
/// can be more efficient then (using `RefCell::get_mut`).
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
where K: Borrow<Q>;
/// Insert new entry into the map.
fn insert(&mut self, k: K, v: V) -> Option<V>;
/// Remove entry from the map.
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
where K: Borrow<Q>;
/// Return data based the keys and values in the map.
fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
/// Return a reference to entry `k`. If no such entry exists, call
/// `vacant` and either forward its error, or add its result to the map
/// and return a reference to *that*.
fn get_or<E>(
&self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&V, E>;
/// Return a mutable reference to entry `k`. If no such entry exists, call
/// `vacant` and either forward its error, or add its result to the map
/// and return a reference to *that*.
fn get_mut_or<E>(
&mut self,
k: K,
vacant: impl FnOnce() -> Result<V, E>
) -> Result<&mut V, E>;
}
/// Methods of this trait signifies a point where CTFE evaluation would fail
/// and some use case dependent behaviour can instead be applied.
/// FIXME: We should be able to get rid of the 'a here if we can get rid of the 'a in
/// `snapshot::EvalSnapshot`.
pub trait Machine<'a, 'mir, 'tcx>: Sized {
/// Additional data that can be accessed via the Memory
type MemoryData;
@ -30,8 +68,22 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized {
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKinds: ::std::fmt::Debug + Copy + Eq;
/// The memory kind to use for mutated statics -- or None if those are not supported.
const MUT_STATIC_KIND: Option<Self::MemoryKinds>;
/// Memory's allocation map
type MemoryMap:
AllocMap<AllocId, (MemoryKind<Self::MemoryKinds>, Allocation<Self::PointerTag>)> +
Default +
Clone;
/// Tag tracked alongside every pointer. This is inert for now, in preparation for
/// a future implementation of "Stacked Borrows"
/// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>.
type PointerTag: ::std::fmt::Debug + Default + Copy + Eq + Hash + 'static;
/// The memory kind to use for copied statics -- or None if those are not supported.
/// Statics are copied under two circumstances: When they are mutated, and when
/// `static_with_default_tag` or `find_foreign_static` (see below) returns an owned allocation
/// that is added to the memory so that the work is not done twice.
const STATIC_KIND: Option<Self::MemoryKinds>;
/// Whether to enforce the validity invariant
const ENFORCE_VALIDITY: bool;
@ -53,8 +105,8 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized {
fn find_fn(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, Self::PointerTag>],
dest: Option<PlaceTy<'tcx, Self::PointerTag>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>;
@ -63,18 +115,30 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized {
fn call_intrinsic(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
args: &[OpTy<'tcx, Self::PointerTag>],
dest: PlaceTy<'tcx, Self::PointerTag>,
) -> EvalResult<'tcx>;
/// Called for read access to a foreign static item.
/// This can be called multiple times for the same static item and should return consistent
/// results. Once the item is *written* the first time, as usual for statics a copy is
/// made and this function is not called again.
///
/// This will only be called once per static and machine; the result is cached in
/// the machine memory. (This relies on `AllocMap::get_or` being able to add the
/// owned allocation to the map even when the map is shared.)
fn find_foreign_static(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> EvalResult<'tcx, &'tcx Allocation>;
) -> EvalResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>>;
/// Called to turn an allocation obtained from the `tcx` into one that has
/// the appropriate tags on each pointer.
///
/// This should avoid copying if no work has to be done! If this returns an owned
/// allocation (because a copy had to be done to add the tags), machine memory will
/// cache the result. (This relies on `AllocMap::get_or` being able to add the
/// owned allocation to the map even when the map is shared.)
fn static_with_default_tag(
alloc: &'_ Allocation
) -> Cow<'_, Allocation<Self::PointerTag>>;
/// Called for all binary operations on integer(-like) types when one operand is a pointer
/// value, and for the `Offset` operation that is inherently about pointers.
@ -83,18 +147,18 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized {
fn ptr_op(
ecx: &EvalContext<'a, 'mir, 'tcx, Self>,
bin_op: mir::BinOp,
left: Scalar,
left: Scalar<Self::PointerTag>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right: Scalar<Self::PointerTag>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)>;
) -> EvalResult<'tcx, (Scalar<Self::PointerTag>, bool)>;
/// Heap allocations via the `box` keyword
///
/// Returns a pointer to the allocated memory
fn box_alloc(
ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>,
dest: PlaceTy<'tcx>,
dest: PlaceTy<'tcx, Self::PointerTag>,
) -> EvalResult<'tcx>;
/// Execute a validation operation

View File

@ -18,23 +18,28 @@
use std::collections::VecDeque;
use std::ptr;
use std::borrow::Cow;
use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
use rustc::mir::interpret::{Pointer, AllocId, Allocation, ConstValue, GlobalId,
EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
truncate};
use rustc::mir::interpret::{
Pointer, AllocId, Allocation, ConstValue, GlobalId,
EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
truncate
};
pub use rustc::mir::interpret::{write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use syntax::ast::Mutability;
use super::{Machine, ScalarMaybeUndef};
use super::{Machine, AllocMap, ScalarMaybeUndef};
#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
pub enum MemoryKind<T> {
/// Error if deallocated except during a stack pop
Stack,
/// Error if ever deallocated
Vtable,
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
Machine(T),
}
@ -48,9 +53,13 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
/// Allocations local to this instance of the miri engine. The kind
/// helps ensure that the same mechanism is used for allocation and
/// deallocation. When an allocation is not found here, it is a
/// static and looked up in the `tcx` for read access. Writing to
/// a static creates a copy here, in the machine.
alloc_map: FxHashMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation)>,
/// static and looked up in the `tcx` for read access. Some machines may
/// have to mutate this map even on a read-only access to a static (because
/// they do pointer provenance tracking and the allocations in `tcx` have
/// the wrong type), so we let the machine override this type.
/// Either way, if the machine allows writing to a static, doing so will
/// create a copy of the static allocation here.
alloc_map: M::MemoryMap,
/// To be able to compare pointers with NULL, and to check alignment for accesses
/// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
@ -98,23 +107,23 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>, data: M::MemoryData) -> Self {
Memory {
data,
alloc_map: FxHashMap::default(),
alloc_map: Default::default(),
dead_alloc_map: FxHashMap::default(),
tcx,
}
}
pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer {
self.tcx.alloc_map.lock().create_fn_alloc(instance).into()
pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer<M::PointerTag> {
Pointer::from(self.tcx.alloc_map.lock().create_fn_alloc(instance)).with_default_tag()
}
pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer {
self.tcx.allocate_bytes(bytes).into()
pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer<M::PointerTag> {
Pointer::from(self.tcx.allocate_bytes(bytes)).with_default_tag()
}
pub fn allocate_with(
&mut self,
alloc: Allocation,
alloc: Allocation<M::PointerTag>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, AllocId> {
let id = self.tcx.alloc_map.lock().reserve();
@ -127,19 +136,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
size: Size,
align: Align,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, Pointer> {
self.allocate_with(Allocation::undef(size, align), kind).map(Pointer::from)
) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
let ptr = Pointer::from(self.allocate_with(Allocation::undef(size, align), kind)?);
Ok(ptr.with_default_tag())
}
pub fn reallocate(
&mut self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
old_size: Size,
old_align: Align,
new_size: Size,
new_align: Align,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, Pointer> {
) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
if ptr.offset.bytes() != 0 {
return err!(ReallocateNonBasePtr);
}
@ -160,7 +170,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
/// Deallocate a local, or do nothing if that local has been made into a static
pub fn deallocate_local(&mut self, ptr: Pointer) -> EvalResult<'tcx> {
pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx> {
// The allocation might be already removed by static interning.
// This can only really happen in the CTFE instance, not in miri.
if self.alloc_map.contains_key(&ptr.alloc_id) {
@ -172,7 +182,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn deallocate(
&mut self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> {
@ -231,7 +241,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Check that the pointer is aligned AND non-NULL. This supports ZSTs in two ways:
/// You can pass a scalar, and a `Pointer` does not have to actually still be allocated.
pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> {
pub fn check_align(
&self,
ptr: Scalar<M::PointerTag>,
required_align: Align
) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr {
Scalar::Ptr(ptr) => {
@ -240,7 +254,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
// of some (potentially dead) allocation.
if ptr.offset > size {
return err!(PointerOutOfBounds {
ptr,
ptr: ptr.erase_tag(),
access: true,
allocation_size: size,
});
@ -284,12 +298,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// If you want to check bounds before doing a memory access, be sure to
/// check the pointer one past the end of your access, then everything will
/// work out exactly.
pub fn check_bounds_ptr(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> {
pub fn check_bounds_ptr(&self, ptr: Pointer<M::PointerTag>, access: bool) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
let allocation_size = alloc.bytes.len() as u64;
if ptr.offset.bytes() > allocation_size {
return err!(PointerOutOfBounds {
ptr,
ptr: ptr.erase_tag(),
access,
allocation_size: Size::from_bytes(allocation_size),
});
@ -299,7 +313,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Check if the memory range beginning at `ptr` and of size `Size` is "in-bounds".
#[inline(always)]
pub fn check_bounds(&self, ptr: Pointer, size: Size, access: bool) -> EvalResult<'tcx> {
pub fn check_bounds(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
access: bool
) -> EvalResult<'tcx> {
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds_ptr(ptr.offset(size, &*self)?, access)
}
@ -307,15 +326,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Allocation accessors
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Helper function to obtain the global (tcx) allocation for a static
/// Helper function to obtain the global (tcx) allocation for a static.
/// This attempts to return a reference to an existing allocation if
/// one can be found in `tcx`. That, however, is only possible if `tcx` and
/// this machine use the same pointer tag, so it is indirected through
/// `M::static_with_default_tag`.
fn get_static_alloc(
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
id: AllocId,
) -> EvalResult<'tcx, &'tcx Allocation> {
) -> EvalResult<'tcx, Cow<'tcx, Allocation<M::PointerTag>>> {
let alloc = tcx.alloc_map.lock().get(id);
let def_id = match alloc {
Some(AllocType::Memory(mem)) => {
return Ok(mem)
// We got tcx memory. Let the machine figure out whether and how to
// turn that into memory with the right pointer tag.
return Ok(M::static_with_default_tag(mem))
}
Some(AllocType::Function(..)) => {
return err!(DerefFunctionPointer)
@ -342,20 +367,73 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
EvalErrorKind::ReferencedConstant(err).into()
}).map(|const_val| {
if let ConstValue::ByRef(_, allocation, _) = const_val.val {
allocation
// We got tcx memory. Let the machine figure out whether and how to
// turn that into memory with the right pointer tag.
M::static_with_default_tag(allocation)
} else {
bug!("Matching on non-ByRef static")
}
})
}
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation> {
match self.alloc_map.get(&id) {
// Normal alloc?
Some(alloc) => Ok(&alloc.1),
// Static. No need to make any copies, just provide read access to the global static
// memory in tcx.
None => Self::get_static_alloc(self.tcx, id),
pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::PointerTag>> {
// The error type of the inner closure here is somewhat funny. We have two
// ways of "erroring": An actual error, or because we got a reference from
// `get_static_alloc` that we can actually use directly without inserting anything anywhere.
// So the error type is `EvalResult<'tcx, &Allocation<M::PointerTag>>`.
let a = self.alloc_map.get_or(id, || {
let alloc = Self::get_static_alloc(self.tcx, id).map_err(Err)?;
match alloc {
Cow::Borrowed(alloc) => {
// We got a ref, cheaply return that as an "error" so that the
// map does not get mutated.
Err(Ok(alloc))
}
Cow::Owned(alloc) => {
// Need to put it into the map and return a ref to that
let kind = M::STATIC_KIND.expect(
"I got an owned allocation that I have to copy but the machine does \
not expect that to happen"
);
Ok((MemoryKind::Machine(kind), alloc))
}
}
});
// Now unpack that funny error type
match a {
Ok(a) => Ok(&a.1),
Err(a) => a
}
}
pub fn get_mut(
&mut self,
id: AllocId,
) -> EvalResult<'tcx, &mut Allocation<M::PointerTag>> {
let tcx = self.tcx;
let a = self.alloc_map.get_mut_or(id, || {
// Need to make a copy, even if `get_static_alloc` is able
// to give us a cheap reference.
let alloc = Self::get_static_alloc(tcx, id)?;
if alloc.mutability == Mutability::Immutable {
return err!(ModifiedConstantMemory);
}
let kind = M::STATIC_KIND.expect(
"An allocation is being mutated but the machine does not expect that to happen"
);
Ok((MemoryKind::Machine(kind), alloc.into_owned()))
});
// Unpack the error type manually because type inference doesn't
// work otherwise (and we cannot help it because `impl Trait`)
match a {
Err(e) => Err(e),
Ok(a) => {
let a = &mut a.1;
if a.mutability == Mutability::Immutable {
return err!(ModifiedConstantMemory);
}
Ok(a)
}
}
}
@ -367,7 +445,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
match self.tcx.alloc_map.lock().get(id) {
Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1, 1).unwrap()),
Some(AllocType::Static(did)) => {
// The only way `get` couldnÄt have worked here is if this is an extern static
// The only way `get` couldn't have worked here is if this is an extern static
assert!(self.tcx.is_foreign_item(did));
// Use size and align of the type
let ty = self.tcx.type_of(did);
@ -383,31 +461,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
}
pub fn get_mut(
&mut self,
id: AllocId,
) -> EvalResult<'tcx, &mut Allocation> {
// Static?
if !self.alloc_map.contains_key(&id) {
// Ask the machine for what to do
if let Some(kind) = M::MUT_STATIC_KIND {
// The machine supports mutating statics. Make a copy, use that.
self.deep_copy_static(id, MemoryKind::Machine(kind))?;
} else {
return err!(ModifiedConstantMemory)
}
}
// If we come here, we know the allocation is in our map
let alloc = &mut self.alloc_map.get_mut(&id).unwrap().1;
// See if we are allowed to mutate this
if alloc.mutability == Mutability::Immutable {
err!(ModifiedConstantMemory)
} else {
Ok(alloc)
}
}
pub fn get_fn(&self, ptr: Pointer) -> EvalResult<'tcx, Instance<'tcx>> {
pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, Instance<'tcx>> {
if ptr.offset.bytes() != 0 {
return err!(InvalidFunctionPointer);
}
@ -418,108 +472,132 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
}
pub fn mark_immutable(&mut self, id: AllocId) -> EvalResult<'tcx> {
self.get_mut(id)?.mutability = Mutability::Immutable;
Ok(())
}
/// For debugging, print an allocation and all allocations it points to, recursively.
pub fn dump_alloc(&self, id: AllocId) {
if !log_enabled!(::log::Level::Trace) {
return;
}
self.dump_allocs(vec![id]);
}
fn dump_alloc_helper<Tag>(
&self,
allocs_seen: &mut FxHashSet<AllocId>,
allocs_to_print: &mut VecDeque<AllocId>,
mut msg: String,
alloc: &Allocation<Tag>,
extra: String,
) {
use std::fmt::Write;
let prefix_len = msg.len();
let mut relocations = vec![];
for i in 0..(alloc.bytes.len() as u64) {
let i = Size::from_bytes(i);
if let Some(&(_, target_id)) = alloc.relocations.get(&i) {
if allocs_seen.insert(target_id) {
allocs_to_print.push_back(target_id);
}
relocations.push((i, target_id));
}
if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
// this `as usize` is fine, since `i` came from a `usize`
write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
} else {
msg.push_str("__ ");
}
}
trace!(
"{}({} bytes, alignment {}){}",
msg,
alloc.bytes.len(),
alloc.align.abi(),
extra
);
if !relocations.is_empty() {
msg.clear();
write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
let mut pos = Size::ZERO;
let relocation_width = (self.pointer_size().bytes() - 1) * 3;
for (i, target_id) in relocations {
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
let target = format!("({})", target_id);
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
pos = i + self.pointer_size();
}
trace!("{}", msg);
}
}
/// For debugging, print a list of allocations and all allocations they point to, recursively.
pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
if !log_enabled!(::log::Level::Trace) {
return;
}
use std::fmt::Write;
allocs.sort();
allocs.dedup();
let mut allocs_to_print = VecDeque::from(allocs);
let mut allocs_seen = FxHashSet::default();
while let Some(id) = allocs_to_print.pop_front() {
let mut msg = format!("Alloc {:<5} ", format!("{}:", id));
let prefix_len = msg.len();
let mut relocations = vec![];
let msg = format!("Alloc {:<5} ", format!("{}:", id));
let (alloc, immutable) =
// normal alloc?
match self.alloc_map.get(&id) {
Some((kind, alloc)) => (alloc, match kind {
// normal alloc?
match self.alloc_map.get_or(id, || Err(())) {
Ok((kind, alloc)) => {
let extra = match kind {
MemoryKind::Stack => " (stack)".to_owned(),
MemoryKind::Vtable => " (vtable)".to_owned(),
MemoryKind::Machine(m) => format!(" ({:?})", m),
}),
None => {
// static alloc?
match self.tcx.alloc_map.lock().get(id) {
Some(AllocType::Memory(a)) => (a, " (immutable)".to_owned()),
Some(AllocType::Function(func)) => {
trace!("{} {}", msg, func);
continue;
}
Some(AllocType::Static(did)) => {
trace!("{} {:?}", msg, did);
continue;
}
None => {
trace!("{} (deallocated)", msg);
continue;
}
};
self.dump_alloc_helper(
&mut allocs_seen, &mut allocs_to_print,
msg, alloc, extra
);
},
Err(()) => {
// static alloc?
match self.tcx.alloc_map.lock().get(id) {
Some(AllocType::Memory(alloc)) => {
self.dump_alloc_helper(
&mut allocs_seen, &mut allocs_to_print,
msg, alloc, " (immutable)".to_owned()
);
}
Some(AllocType::Function(func)) => {
trace!("{} {}", msg, func);
}
Some(AllocType::Static(did)) => {
trace!("{} {:?}", msg, did);
}
None => {
trace!("{} (deallocated)", msg);
}
},
};
for i in 0..(alloc.bytes.len() as u64) {
let i = Size::from_bytes(i);
if let Some(&target_id) = alloc.relocations.get(&i) {
if allocs_seen.insert(target_id) {
allocs_to_print.push_back(target_id);
}
relocations.push((i, target_id));
}
if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
// this `as usize` is fine, since `i` came from a `usize`
write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
} else {
msg.push_str("__ ");
}
}
},
};
trace!(
"{}({} bytes, alignment {}){}",
msg,
alloc.bytes.len(),
alloc.align.abi(),
immutable
);
if !relocations.is_empty() {
msg.clear();
write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
let mut pos = Size::ZERO;
let relocation_width = (self.pointer_size().bytes() - 1) * 3;
for (i, target_id) in relocations {
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
let target = format!("({})", target_id);
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
pos = i + self.pointer_size();
}
trace!("{}", msg);
}
}
}
pub fn leak_report(&self) -> usize {
trace!("### LEAK REPORT ###");
let mut_static_kind = M::MUT_STATIC_KIND.map(|k| MemoryKind::Machine(k));
let leaks: Vec<_> = self.alloc_map
.iter()
.filter_map(|(&id, &(kind, _))|
// exclude mutable statics
if Some(kind) == mut_static_kind { None } else { Some(id) } )
.collect();
let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
// exclude statics and vtables
let exclude = match kind {
MemoryKind::Stack => false,
MemoryKind::Vtable => true,
MemoryKind::Machine(k) => Some(k) == M::STATIC_KIND,
};
if exclude { None } else { Some(id) }
});
let n = leaks.len();
self.dump_allocs(leaks);
n
@ -531,9 +609,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// The last argument controls whether we error out when there are undefined
/// or pointer bytes. You should never call this, call `get_bytes` or
/// `get_bytes_with_undef_and_ptr` instead,
///
/// This function also guarantees that the resulting pointer will remain stable
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
/// on that.
fn get_bytes_internal(
&self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align,
check_defined_and_ptr: bool,
@ -558,7 +640,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
#[inline]
fn get_bytes(&self, ptr: Pointer, size: Size, align: Align) -> EvalResult<'tcx, &[u8]> {
fn get_bytes(
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(ptr, size, align, true)
}
@ -567,7 +654,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
#[inline]
fn get_bytes_with_undef_and_ptr(
&self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align
) -> EvalResult<'tcx, &[u8]> {
@ -578,7 +665,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// so be sure to actually put data there!
fn get_bytes_mut(
&mut self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
@ -597,8 +684,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
}
/// Reading and writing
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Interning (for CTFE)
impl<'a, 'mir, 'tcx, M> Memory<'a, 'mir, 'tcx, M>
where
M: Machine<'a, 'mir, 'tcx, PointerTag=()>,
M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation<()>)>,
{
/// mark an allocation as static and initialized, either mutable or not
pub fn intern_static(
&mut self,
@ -614,14 +705,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
let (kind, mut alloc) = self.alloc_map.remove(&alloc_id).unwrap();
match kind {
MemoryKind::Machine(_) => bug!("Static cannot refer to machine memory"),
MemoryKind::Stack => {},
MemoryKind::Stack | MemoryKind::Vtable => {},
}
// ensure llvm knows not to put this into immutable memory
alloc.mutability = mutability;
let alloc = self.tcx.intern_const_alloc(alloc);
self.tcx.alloc_map.lock().set_id_memory(alloc_id, alloc);
// recurse into inner allocations
for &alloc in alloc.relocations.values() {
for &(_, alloc) in alloc.relocations.values() {
// FIXME: Reusing the mutability here is likely incorrect. It is originally
// determined via `is_freeze`, and data is considered frozen if there is no
// `UnsafeCell` *immediately* in that data -- however, this search stops
@ -635,28 +726,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
Ok(())
}
}
/// The alloc_id must refer to a (mutable) static; a deep copy of that
/// static is made into this memory.
fn deep_copy_static(
&mut self,
id: AllocId,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> {
let alloc = Self::get_static_alloc(self.tcx, id)?;
if alloc.mutability == Mutability::Immutable {
return err!(ModifiedConstantMemory);
}
let old = self.alloc_map.insert(id, (kind, alloc.clone()));
assert!(old.is_none(), "deep_copy_static: must not overwrite existing memory");
Ok(())
}
/// Reading and writing
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn copy(
&mut self,
src: Scalar,
src: Scalar<M::PointerTag>,
src_align: Align,
dest: Scalar,
dest: Scalar<M::PointerTag>,
dest_align: Align,
size: Size,
nonoverlapping: bool,
@ -666,9 +744,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn copy_repeatedly(
&mut self,
src: Scalar,
src: Scalar<M::PointerTag>,
src_align: Align,
dest: Scalar,
dest: Scalar<M::PointerTag>,
dest_align: Align,
size: Size,
length: u64,
@ -695,9 +773,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
new_relocations.extend(
relocations
.iter()
.map(|&(offset, alloc_id)| {
.map(|&(offset, reloc)| {
(offset + dest.offset - src.offset + (i * size * relocations.len() as u64),
alloc_id)
reloc)
})
);
}
@ -712,6 +790,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
// The pointers above remain valid even if the `HashMap` table is moved around because they
// point into the `Vec` storing the bytes.
unsafe {
assert_eq!(size.bytes() as usize as u64, size.bytes());
if src.alloc_id == dest.alloc_id {
@ -747,7 +827,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Ok(())
}
pub fn read_c_str(&self, ptr: Pointer) -> EvalResult<'tcx, &[u8]> {
pub fn read_c_str(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, &[u8]> {
let alloc = self.get(ptr.alloc_id)?;
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
let offset = ptr.offset.bytes() as usize;
@ -758,11 +838,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
self.check_defined(ptr, p1)?;
Ok(&alloc.bytes[offset..offset + size])
}
None => err!(UnterminatedCString(ptr)),
None => err!(UnterminatedCString(ptr.erase_tag())),
}
}
pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> {
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
if size.bytes() == 0 {
@ -772,7 +852,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
self.get_bytes(ptr.to_ptr()?, size, align)
}
pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> {
pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
if src.is_empty() {
@ -784,7 +864,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Ok(())
}
pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<'tcx> {
pub fn write_repeat(
&mut self,
ptr: Scalar<M::PointerTag>,
val: u8,
count: Size
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
if count.bytes() == 0 {
@ -801,10 +886,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Read a *non-ZST* scalar
pub fn read_scalar(
&self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
size: Size
) -> EvalResult<'tcx, ScalarMaybeUndef> {
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
// get_bytes_unchecked tests alignment and relocation edges
let bytes = self.get_bytes_with_undef_and_ptr(
ptr, size, ptr_align.min(self.int_align(size))
@ -825,8 +910,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
} else {
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
Some(&alloc_id) => {
let ptr = Pointer::new(alloc_id, Size::from_bytes(bits as u64));
Some(&(tag, alloc_id)) => {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
}
None => {},
@ -836,17 +921,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
}
pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align)
-> EvalResult<'tcx, ScalarMaybeUndef> {
pub fn read_ptr_sized(
&self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
/// Write a *non-ZST* scalar
pub fn write_scalar(
&mut self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
val: ScalarMaybeUndef,
val: ScalarMaybeUndef<M::PointerTag>,
type_size: Size,
) -> EvalResult<'tcx> {
let val = match val {
@ -880,7 +968,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Scalar::Ptr(val) => {
self.get_mut(ptr.alloc_id)?.relocations.insert(
ptr.offset,
val.alloc_id,
(val.tag, val.alloc_id),
);
}
_ => {}
@ -889,8 +977,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
Ok(())
}
pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef)
-> EvalResult<'tcx> {
pub fn write_ptr_sized(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
val: ScalarMaybeUndef<M::PointerTag>
) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
}
@ -915,9 +1007,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Return all relocations overlapping with the given ptr-offset pair.
fn relocations(
&self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
size: Size,
) -> EvalResult<'tcx, &[(Size, AllocId)]> {
) -> EvalResult<'tcx, &[(Size, (M::PointerTag, AllocId))]> {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1);
@ -927,7 +1019,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Check that there ar eno relocations overlapping with the given range.
#[inline(always)]
fn check_relocations(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> {
fn check_relocations(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
if self.relocations(ptr, size)?.len() != 0 {
err!(ReadPointerAsBytes)
} else {
@ -941,7 +1033,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
fn clear_relocations(&mut self, ptr: Pointer, size: Size) -> EvalResult<'tcx> {
fn clear_relocations(&mut self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
@ -976,7 +1068,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Error if there are relocations overlapping with the egdes of the
/// given memory range.
#[inline]
fn check_relocation_edges(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> {
fn check_relocation_edges(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
self.check_relocations(ptr, Size::ZERO)?;
self.check_relocations(ptr.offset(size, self)?, Size::ZERO)?;
Ok(())
@ -988,8 +1080,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
// FIXME: Add a fast version for the common, nonoverlapping case
fn copy_undef_mask(
&mut self,
src: Pointer,
dest: Pointer,
src: Pointer<M::PointerTag>,
dest: Pointer<M::PointerTag>,
size: Size,
repeat: u64,
) -> EvalResult<'tcx> {
@ -1016,7 +1108,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
/// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
/// error which will report the first byte which is undefined.
#[inline]
fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> {
fn check_defined(&self, ptr: Pointer<M::PointerTag>, size: Size) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
alloc.undef_mask.is_range_defined(
ptr.offset,
@ -1026,7 +1118,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn mark_definedness(
&mut self,
ptr: Pointer,
ptr: Pointer<M::PointerTag>,
size: Size,
new_state: bool,
) -> EvalResult<'tcx> {

View File

@ -32,7 +32,7 @@ pub use self::place::{Place, PlaceTy, MemPlace, MPlaceTy};
pub use self::memory::{Memory, MemoryKind};
pub use self::machine::Machine;
pub use self::machine::{Machine, AllocMap};
pub use self::operand::{ScalarMaybeUndef, Value, ValTy, Operand, OpTy};

View File

@ -25,21 +25,42 @@ use rustc::mir::interpret::{
use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind};
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum ScalarMaybeUndef<Id=AllocId> {
Scalar(Scalar<Id>),
pub enum ScalarMaybeUndef<Tag=(), Id=AllocId> {
Scalar(Scalar<Tag, Id>),
Undef,
}
impl From<Scalar> for ScalarMaybeUndef {
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUndef<Tag> {
#[inline(always)]
fn from(s: Scalar) -> Self {
fn from(s: Scalar<Tag>) -> Self {
ScalarMaybeUndef::Scalar(s)
}
}
impl<'tcx> ScalarMaybeUndef {
impl<'tcx> ScalarMaybeUndef<()> {
#[inline]
pub fn not_undef(self) -> EvalResult<'static, Scalar> {
pub fn with_default_tag<Tag>(self) -> ScalarMaybeUndef<Tag>
where Tag: Default
{
match self {
ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()),
ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef,
}
}
}
impl<'tcx, Tag> ScalarMaybeUndef<Tag> {
#[inline]
pub fn erase_tag(self) -> ScalarMaybeUndef
{
match self {
ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()),
ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef,
}
}
#[inline]
pub fn not_undef(self) -> EvalResult<'static, Scalar<Tag>> {
match self {
ScalarMaybeUndef::Scalar(scalar) => Ok(scalar),
ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))),
@ -47,7 +68,7 @@ impl<'tcx> ScalarMaybeUndef {
}
#[inline(always)]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
self.not_undef()?.to_ptr()
}
@ -126,26 +147,49 @@ impl<'tcx> ScalarMaybeUndef {
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
/// defined on `Value`, and do not have to work with a `Place`.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Value<Id=AllocId> {
Scalar(ScalarMaybeUndef<Id>),
ScalarPair(ScalarMaybeUndef<Id>, ScalarMaybeUndef<Id>),
pub enum Value<Tag=(), Id=AllocId> {
Scalar(ScalarMaybeUndef<Tag, Id>),
ScalarPair(ScalarMaybeUndef<Tag, Id>, ScalarMaybeUndef<Tag, Id>),
}
impl<'tcx> Value {
impl Value {
#[inline]
pub fn with_default_tag<Tag>(self) -> Value<Tag>
where Tag: Default
{
match self {
Value::Scalar(x) => Value::Scalar(x.with_default_tag()),
Value::ScalarPair(x, y) =>
Value::ScalarPair(x.with_default_tag(), y.with_default_tag()),
}
}
}
impl<'tcx, Tag> Value<Tag> {
#[inline]
pub fn erase_tag(self) -> Value
{
match self {
Value::Scalar(x) => Value::Scalar(x.erase_tag()),
Value::ScalarPair(x, y) =>
Value::ScalarPair(x.erase_tag(), y.erase_tag()),
}
}
pub fn new_slice(
val: Scalar,
val: Scalar<Tag>,
len: u64,
cx: impl HasDataLayout
) -> Self {
Value::ScalarPair(val.into(), Scalar::from_uint(len, cx.data_layout().pointer_size).into())
}
pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into())
}
#[inline]
pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef {
pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef<Tag> {
match self {
Value::Scalar(val) => val,
Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"),
@ -153,12 +197,12 @@ impl<'tcx> Value {
}
#[inline]
pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> {
pub fn to_scalar(self) -> EvalResult<'tcx, Scalar<Tag>> {
self.to_scalar_or_undef().not_undef()
}
#[inline]
pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> {
pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
match self {
Value::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
Value::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?))
@ -168,7 +212,7 @@ impl<'tcx> Value {
/// Convert the value into a pointer (or a pointer-sized integer).
/// Throws away the second half of a ScalarPair!
#[inline]
pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> {
pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar<Tag>> {
match self {
Value::Scalar(ptr) |
Value::ScalarPair(ptr, _) => ptr.not_undef(),
@ -179,15 +223,15 @@ impl<'tcx> Value {
// ScalarPair needs a type to interpret, so we often have a value and a type together
// as input for binary and cast operations.
#[derive(Copy, Clone, Debug)]
pub struct ValTy<'tcx> {
value: Value,
pub struct ValTy<'tcx, Tag=()> {
value: Value<Tag>,
pub layout: TyLayout<'tcx>,
}
impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
type Target = Value;
impl<'tcx, Tag> ::std::ops::Deref for ValTy<'tcx, Tag> {
type Target = Value<Tag>;
#[inline(always)]
fn deref(&self) -> &Value {
fn deref(&self) -> &Value<Tag> {
&self.value
}
}
@ -196,14 +240,37 @@ impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
/// or still in memory. The latter is an optimization, to delay reading that chunk of
/// memory and to avoid having to store arbitrary-sized data here.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Operand<Id=AllocId> {
Immediate(Value<Id>),
Indirect(MemPlace<Id>),
pub enum Operand<Tag=(), Id=AllocId> {
Immediate(Value<Tag, Id>),
Indirect(MemPlace<Tag, Id>),
}
impl Operand {
#[inline]
pub fn to_mem_place(self) -> MemPlace {
pub fn with_default_tag<Tag>(self) -> Operand<Tag>
where Tag: Default
{
match self {
Operand::Immediate(x) => Operand::Immediate(x.with_default_tag()),
Operand::Indirect(x) => Operand::Indirect(x.with_default_tag()),
}
}
}
impl<Tag> Operand<Tag> {
#[inline]
pub fn erase_tag(self) -> Operand
{
match self {
Operand::Immediate(x) => Operand::Immediate(x.erase_tag()),
Operand::Indirect(x) => Operand::Indirect(x.erase_tag()),
}
}
#[inline]
pub fn to_mem_place(self) -> MemPlace<Tag>
where Tag: ::std::fmt::Debug
{
match self {
Operand::Indirect(mplace) => mplace,
_ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self),
@ -212,7 +279,9 @@ impl Operand {
}
#[inline]
pub fn to_immediate(self) -> Value {
pub fn to_immediate(self) -> Value<Tag>
where Tag: ::std::fmt::Debug
{
match self {
Operand::Immediate(val) => val,
_ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self),
@ -222,22 +291,22 @@ impl Operand {
}
#[derive(Copy, Clone, Debug)]
pub struct OpTy<'tcx> {
crate op: Operand, // ideally we'd make this private, but const_prop needs this
pub struct OpTy<'tcx, Tag=()> {
crate op: Operand<Tag>, // ideally we'd make this private, but const_prop needs this
pub layout: TyLayout<'tcx>,
}
impl<'tcx> ::std::ops::Deref for OpTy<'tcx> {
type Target = Operand;
impl<'tcx, Tag> ::std::ops::Deref for OpTy<'tcx, Tag> {
type Target = Operand<Tag>;
#[inline(always)]
fn deref(&self) -> &Operand {
fn deref(&self) -> &Operand<Tag> {
&self.op
}
}
impl<'tcx> From<MPlaceTy<'tcx>> for OpTy<'tcx> {
impl<'tcx, Tag: Copy> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx>) -> Self {
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
OpTy {
op: Operand::Indirect(*mplace),
layout: mplace.layout
@ -245,9 +314,9 @@ impl<'tcx> From<MPlaceTy<'tcx>> for OpTy<'tcx> {
}
}
impl<'tcx> From<ValTy<'tcx>> for OpTy<'tcx> {
impl<'tcx, Tag> From<ValTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
#[inline(always)]
fn from(val: ValTy<'tcx>) -> Self {
fn from(val: ValTy<'tcx, Tag>) -> Self {
OpTy {
op: Operand::Immediate(val.value),
layout: val.layout
@ -256,18 +325,36 @@ impl<'tcx> From<ValTy<'tcx>> for OpTy<'tcx> {
}
// Validation needs to hash OpTy, but we cannot hash Layout -- so we just hash the type
impl<'tcx> Hash for OpTy<'tcx> {
impl<'tcx, Tag> Hash for OpTy<'tcx, Tag>
where Tag: Hash
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.op.hash(state);
self.layout.ty.hash(state);
}
}
impl<'tcx> PartialEq for OpTy<'tcx> {
impl<'tcx, Tag> PartialEq for OpTy<'tcx, Tag>
where Tag: PartialEq
{
fn eq(&self, other: &Self) -> bool {
self.op == other.op && self.layout.ty == other.layout.ty
}
}
impl<'tcx> Eq for OpTy<'tcx> {}
impl<'tcx, Tag> Eq for OpTy<'tcx, Tag>
where Tag: Eq
{}
impl<'tcx, Tag> OpTy<'tcx, Tag>
{
#[inline]
pub fn erase_tag(self) -> OpTy<'tcx>
{
OpTy {
op: self.op.erase_tag(),
layout: self.layout,
}
}
}
// Use the existing layout if given (but sanity check in debug mode),
// or compute the layout.
@ -295,8 +382,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Return None if the layout does not permit loading this as a value.
pub(super) fn try_read_value_from_mplace(
&self,
mplace: MPlaceTy<'tcx>,
) -> EvalResult<'tcx, Option<Value>> {
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, Option<Value<M::PointerTag>>> {
if mplace.layout.is_unsized() {
// Dont touch unsized
return Ok(None);
@ -339,8 +426,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// in a `Value`, not on which data is stored there currently.
pub(crate) fn try_read_value(
&self,
src: OpTy<'tcx>,
) -> EvalResult<'tcx, Result<Value, MemPlace>> {
src: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, Result<Value<M::PointerTag>, MemPlace<M::PointerTag>>> {
Ok(match src.try_as_mplace() {
Ok(mplace) => {
if let Some(val) = self.try_read_value_from_mplace(mplace)? {
@ -355,7 +442,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Read a value from a place, asserting that that is possible with the given layout.
#[inline(always)]
pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
pub fn read_value(
&self,
op: OpTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, ValTy<'tcx, M::PointerTag>> {
if let Ok(value) = self.try_read_value(op)? {
Ok(ValTy { value, layout: op.layout })
} else {
@ -364,7 +454,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
}
/// Read a scalar from a place
pub fn read_scalar(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> {
pub fn read_scalar(
&self,
op: OpTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
match *self.read_value(op)? {
Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty),
Value::Scalar(val) => Ok(val),
@ -374,7 +467,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// Turn the MPlace into a string (must already be dereferenced!)
pub fn read_str(
&self,
mplace: MPlaceTy<'tcx>,
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
@ -383,7 +476,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
Ok(str)
}
pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> {
pub fn uninit_operand(
&mut self,
layout: TyLayout<'tcx>
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
// This decides which types we will use the Immediate optimization for, and hence should
// match what `try_read_value` and `eval_place_to_op` support.
if layout.is_zst() {
@ -410,9 +506,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Projection functions
pub fn operand_field(
&self,
op: OpTy<'tcx>,
op: OpTy<'tcx, M::PointerTag>,
field: u64,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace() {
Ok(mplace) => {
// The easy case
@ -445,9 +541,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn operand_downcast(
&self,
op: OpTy<'tcx>,
op: OpTy<'tcx, M::PointerTag>,
variant: usize,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
Ok(match op.try_as_mplace() {
Ok(mplace) => {
@ -464,8 +560,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// will always be a MemPlace.
pub(super) fn deref_operand(
&self,
src: OpTy<'tcx>,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
src: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_value(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
Ok(self.ref_to_mplace(val)?)
@ -473,9 +569,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn operand_projection(
&self,
base: OpTy<'tcx>,
base: OpTy<'tcx, M::PointerTag>,
proj_elem: &mir::PlaceElem<'tcx>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.operand_field(base, field.index() as u64)?,
@ -503,7 +599,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
&self,
mir_place: &mir::Place<'tcx>,
layout: Option<TyLayout<'tcx>>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Place::*;
let op = match *mir_place {
Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer),
@ -533,7 +629,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
&self,
mir_op: &mir::Operand<'tcx>,
layout: Option<TyLayout<'tcx>>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Operand::*;
let op = match *mir_op {
// FIXME: do some more logic on `move` to invalidate the old location
@ -558,7 +654,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub(super) fn eval_operands(
&self,
ops: &[mir::Operand<'tcx>],
) -> EvalResult<'tcx, Vec<OpTy<'tcx>>> {
) -> EvalResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
ops.into_iter()
.map(|op| self.eval_operand(op, None))
.collect()
@ -568,7 +664,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub(super) fn const_value_to_op(
&self,
val: ConstValue<'tcx>,
) -> EvalResult<'tcx, Operand> {
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
trace!("const_value_to_op: {:?}", val);
match val {
ConstValue::Unevaluated(def_id, substs) => {
@ -581,23 +677,28 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
ConstValue::ByRef(id, alloc, offset) => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen -- and for `static mut`, we copy on demand anyway.
Ok(Operand::Indirect(MemPlace::from_ptr(Pointer::new(id, offset), alloc.align)))
Ok(Operand::Indirect(
MemPlace::from_ptr(Pointer::new(id, offset), alloc.align)
).with_default_tag())
},
ConstValue::ScalarPair(a, b) =>
Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into()))),
Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into())).with_default_tag()),
ConstValue::Scalar(x) =>
Ok(Operand::Immediate(Value::Scalar(x.into()))),
Ok(Operand::Immediate(Value::Scalar(x.into())).with_default_tag()),
}
}
pub fn const_to_op(
&self,
cnst: &ty::Const<'tcx>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let op = self.const_value_to_op(cnst.val)?;
Ok(OpTy { op, layout: self.layout_of(cnst.ty)? })
}
pub(super) fn global_to_op(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> {
pub(super) fn global_to_op(
&self,
gid: GlobalId<'tcx>
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
let cv = self.const_eval(gid)?;
self.const_value_to_op(cv.val)
}
@ -605,7 +706,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Read discriminant, return the runtime value as well as the variant index.
pub fn read_discriminant(
&self,
rval: OpTy<'tcx>,
rval: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, (u128, usize)> {
trace!("read_discriminant_value {:#?}", rval.layout);
if rval.layout.abi.is_uninhabited() {

View File

@ -24,9 +24,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
dest: PlaceTy<'tcx>,
left: ValTy<'tcx, M::PointerTag>,
right: ValTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let (val, overflowed) = self.binary_op_val(op, left, right)?;
let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
@ -38,9 +38,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
dest: PlaceTy<'tcx>,
left: ValTy<'tcx, M::PointerTag>,
right: ValTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let (val, _overflowed) = self.binary_op_val(op, left, right)?;
self.write_scalar(val, dest)
@ -53,7 +53,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
bin_op: mir::BinOp,
l: char,
r: char,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
let res = match bin_op {
@ -73,7 +73,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
bin_op: mir::BinOp,
l: bool,
r: bool,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
let res = match bin_op {
@ -98,7 +98,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// passing in raw bits
l: u128,
r: u128,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
macro_rules! float_math {
@ -138,7 +138,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
left_layout: TyLayout<'tcx>,
r: u128,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
// Shift ops can have an RHS with a different numeric type.
@ -288,9 +288,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn binary_op_val(
&self,
bin_op: mir::BinOp,
left: ValTy<'tcx>,
right: ValTy<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
left: ValTy<'tcx, M::PointerTag>,
right: ValTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
self.binary_op(
bin_op,
left.to_scalar()?, left.layout,
@ -302,11 +302,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn binary_op(
&self,
bin_op: mir::BinOp,
left: Scalar,
left: Scalar<M::PointerTag>,
left_layout: TyLayout<'tcx>,
right: Scalar,
right: Scalar<M::PointerTag>,
right_layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, (Scalar, bool)> {
) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op, left, left_layout.ty, right, right_layout.ty);
@ -352,9 +352,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn unary_op(
&self,
un_op: mir::UnOp,
val: Scalar,
val: Scalar<M::PointerTag>,
layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Scalar> {
) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
use rustc::mir::UnOp::*;
use rustc_apfloat::ieee::{Single, Double};
use rustc_apfloat::Float;

View File

@ -13,33 +13,37 @@
//! All high-level functions to write to memory work on places as destinations.
use std::convert::TryFrom;
use std::hash::Hash;
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout};
use rustc::mir::interpret::{
GlobalId, AllocId, Scalar, EvalResult, Pointer, PointerArithmetic
GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic
};
use super::{
EvalContext, Machine, AllocMap,
Value, ValTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind
};
use super::{EvalContext, Machine, Value, ValTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind};
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub struct MemPlace<Id=AllocId> {
pub struct MemPlace<Tag=(), Id=AllocId> {
/// A place may have an integral pointer for ZSTs, and since it might
/// be turned back into a reference before ever being dereferenced.
/// However, it may never be undef.
pub ptr: Scalar<Id>,
pub ptr: Scalar<Tag, Id>,
pub align: Align,
/// Metadata for unsized places. Interpretation is up to the type.
/// Must not be present for sized types, but can be missing for unsized types
/// (e.g. `extern type`).
pub extra: Option<Scalar<Id>>,
pub meta: Option<Scalar<Tag, Id>>,
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Place<Id=AllocId> {
pub enum Place<Tag=(), Id=AllocId> {
/// A place referring to a value allocated in the `Memory` system.
Ptr(MemPlace<Id>),
Ptr(MemPlace<Tag, Id>),
/// To support alloc-free locals, we are able to write directly to a local.
/// (Without that optimization, we'd just always be a `MemPlace`.)
@ -50,37 +54,37 @@ pub enum Place<Id=AllocId> {
}
#[derive(Copy, Clone, Debug)]
pub struct PlaceTy<'tcx> {
place: Place,
pub struct PlaceTy<'tcx, Tag=()> {
place: Place<Tag>,
pub layout: TyLayout<'tcx>,
}
impl<'tcx> ::std::ops::Deref for PlaceTy<'tcx> {
type Target = Place;
impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> {
type Target = Place<Tag>;
#[inline(always)]
fn deref(&self) -> &Place {
fn deref(&self) -> &Place<Tag> {
&self.place
}
}
/// A MemPlace with its layout. Constructing it is only possible in this module.
#[derive(Copy, Clone, Debug)]
pub struct MPlaceTy<'tcx> {
mplace: MemPlace,
pub struct MPlaceTy<'tcx, Tag=()> {
mplace: MemPlace<Tag>,
pub layout: TyLayout<'tcx>,
}
impl<'tcx> ::std::ops::Deref for MPlaceTy<'tcx> {
type Target = MemPlace;
impl<'tcx, Tag> ::std::ops::Deref for MPlaceTy<'tcx, Tag> {
type Target = MemPlace<Tag>;
#[inline(always)]
fn deref(&self) -> &MemPlace {
fn deref(&self) -> &MemPlace<Tag> {
&self.mplace
}
}
impl<'tcx> From<MPlaceTy<'tcx>> for PlaceTy<'tcx> {
impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
#[inline(always)]
fn from(mplace: MPlaceTy<'tcx>) -> Self {
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
PlaceTy {
place: Place::Ptr(mplace.mplace),
layout: mplace.layout
@ -89,29 +93,52 @@ impl<'tcx> From<MPlaceTy<'tcx>> for PlaceTy<'tcx> {
}
impl MemPlace {
#[inline(always)]
pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self {
#[inline]
pub fn with_default_tag<Tag>(self) -> MemPlace<Tag>
where Tag: Default
{
MemPlace {
ptr,
align,
extra: None,
ptr: self.ptr.with_default_tag(),
align: self.align,
meta: self.meta.map(Scalar::with_default_tag),
}
}
}
impl<Tag> MemPlace<Tag> {
#[inline]
pub fn erase_tag(self) -> MemPlace
{
MemPlace {
ptr: self.ptr.erase_tag(),
align: self.align,
meta: self.meta.map(Scalar::erase_tag),
}
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
MemPlace {
ptr,
align,
meta: None,
}
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
Self::from_scalar_ptr(ptr.into(), align)
}
#[inline(always)]
pub fn to_scalar_ptr_align(self) -> (Scalar, Align) {
assert_eq!(self.extra, None);
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
assert!(self.meta.is_none());
(self.ptr, self.align)
}
/// Extract the ptr part of the mplace
/// metact the ptr part of the mplace
#[inline(always)]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
// At this point, we forget about the alignment information --
// the place has been turned into a reference, and no matter where it came from,
// it now must be aligned.
@ -120,17 +147,17 @@ impl MemPlace {
/// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space.
/// This is the inverse of `ref_to_mplace`.
pub fn to_ref(self) -> Value {
pub fn to_ref(self) -> Value<Tag> {
// We ignore the alignment of the place here -- special handling for packed structs ends
// at the `&` operator.
match self.extra {
match self.meta {
None => Value::Scalar(self.ptr.into()),
Some(extra) => Value::ScalarPair(self.ptr.into(), extra.into()),
Some(meta) => Value::ScalarPair(self.ptr.into(), meta.into()),
}
}
}
impl<'tcx> MPlaceTy<'tcx> {
impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
/// Produces a MemPlace that works for ZST but nothing else
#[inline]
pub fn dangling(layout: TyLayout<'tcx>, cx: impl HasDataLayout) -> Self {
@ -144,17 +171,17 @@ impl<'tcx> MPlaceTy<'tcx> {
}
#[inline]
fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self {
fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout }
}
#[inline]
pub(super) fn len(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> {
if self.layout.is_unsized() {
// We need to consult `extra` metadata
// We need to consult `meta` metadata
match self.layout.ty.sty {
ty::Slice(..) | ty::Str =>
return self.extra.unwrap().to_usize(cx),
return self.mplace.meta.unwrap().to_usize(cx),
_ => bug!("len not supported on unsized type {:?}", self.layout.ty),
}
} else {
@ -168,30 +195,30 @@ impl<'tcx> MPlaceTy<'tcx> {
}
#[inline]
pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer> {
pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer<Tag>> {
match self.layout.ty.sty {
ty::Dynamic(..) => self.extra.unwrap().to_ptr(),
ty::Dynamic(..) => self.mplace.meta.unwrap().to_ptr(),
_ => bug!("vtable not supported on type {:?}", self.layout.ty),
}
}
}
impl<'tcx> OpTy<'tcx> {
impl<'tcx, Tag: ::std::fmt::Debug> OpTy<'tcx, Tag> {
#[inline(always)]
pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx>, Value> {
match *self {
pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx, Tag>, Value<Tag>> {
match self.op {
Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
Operand::Immediate(value) => Err(value),
}
}
#[inline(always)]
pub fn to_mem_place(self) -> MPlaceTy<'tcx> {
pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> {
self.try_as_mplace().unwrap()
}
}
impl<'tcx> Place {
impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
/// Produces a Place that will error if attempted to be read from or written to
#[inline]
pub fn null(cx: impl HasDataLayout) -> Self {
@ -199,17 +226,17 @@ impl<'tcx> Place {
}
#[inline]
pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self {
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
Place::Ptr(MemPlace::from_scalar_ptr(ptr, align))
}
#[inline]
pub fn from_ptr(ptr: Pointer, align: Align) -> Self {
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
Place::Ptr(MemPlace::from_ptr(ptr, align))
}
#[inline]
pub fn to_mem_place(self) -> MemPlace {
pub fn to_mem_place(self) -> MemPlace<Tag> {
match self {
Place::Ptr(mplace) => mplace,
_ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self),
@ -218,17 +245,17 @@ impl<'tcx> Place {
}
#[inline]
pub fn to_scalar_ptr_align(self) -> (Scalar, Align) {
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
self.to_mem_place().to_scalar_ptr_align()
}
#[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
self.to_mem_place().to_ptr()
}
}
impl<'tcx> PlaceTy<'tcx> {
impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> {
/// Produces a Place that will error if attempted to be read from or written to
#[inline]
pub fn null(cx: impl HasDataLayout, layout: TyLayout<'tcx>) -> Self {
@ -236,25 +263,31 @@ impl<'tcx> PlaceTy<'tcx> {
}
#[inline]
pub fn to_mem_place(self) -> MPlaceTy<'tcx> {
pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> {
MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout }
}
}
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
// separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385
impl<'a, 'mir, 'tcx, Tag, M> EvalContext<'a, 'mir, 'tcx, M>
where
Tag: ::std::fmt::Debug+Default+Copy+Eq+Hash+'static,
M: Machine<'a, 'mir, 'tcx, PointerTag=Tag>,
M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation<Tag>)>,
{
/// Take a value, which represents a (thin or fat) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref`.
pub fn ref_to_mplace(
&self, val: ValTy<'tcx>
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
&self, val: ValTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty;
let layout = self.layout_of(pointee_type)?;
let align = layout.align;
let mplace = match *val {
Value::Scalar(ptr) =>
MemPlace { ptr: ptr.not_undef()?, align, extra: None },
Value::ScalarPair(ptr, extra) =>
MemPlace { ptr: ptr.not_undef()?, align, extra: Some(extra.not_undef()?) },
MemPlace { ptr: ptr.not_undef()?, align, meta: None },
Value::ScalarPair(ptr, meta) =>
MemPlace { ptr: ptr.not_undef()?, align, meta: Some(meta.not_undef()?) },
};
Ok(MPlaceTy { mplace, layout })
}
@ -265,9 +298,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
#[inline(always)]
pub fn mplace_field(
&self,
base: MPlaceTy<'tcx>,
base: MPlaceTy<'tcx, M::PointerTag>,
field: u64,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Not using the layout method because we want to compute on u64
let offset = match base.layout.fields {
layout::FieldPlacement::Arbitrary { ref offsets, .. } =>
@ -290,13 +323,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?;
// Offset may need adjustment for unsized fields
let (extra, offset) = if field_layout.is_unsized() {
let (meta, offset) = if field_layout.is_unsized() {
// re-use parent metadata to determine dynamic field layout
let (_, align) = self.size_and_align_of(base.extra, field_layout)?;
(base.extra, offset.abi_align(align))
let (_, align) = self.size_and_align_of(base.meta, field_layout)?;
(base.meta, offset.abi_align(align))
} else {
// base.extra could be present; we might be accessing a sized field of an unsized
// base.meta could be present; we might be accessing a sized field of an unsized
// struct.
(None, offset)
};
@ -307,15 +340,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// codegen -- mostly to see if we can get away with that
.restrict_for_offset(offset); // must be last thing that happens
Ok(MPlaceTy { mplace: MemPlace { ptr, align, extra }, layout: field_layout })
Ok(MPlaceTy { mplace: MemPlace { ptr, align, meta }, layout: field_layout })
}
// Iterates over all fields of an array. Much more efficient than doing the
// same by repeatedly calling `mplace_array`.
pub fn mplace_array_fields(
&self,
base: MPlaceTy<'tcx>,
) -> EvalResult<'tcx, impl Iterator<Item=EvalResult<'tcx, MPlaceTy<'tcx>>> + 'a> {
base: MPlaceTy<'tcx, Tag>,
) ->
EvalResult<'tcx, impl Iterator<Item=EvalResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
{
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let stride = match base.layout.fields {
layout::FieldPlacement::Array { stride, .. } => stride,
@ -326,7 +361,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
Ok((0..len).map(move |i| {
let ptr = base.ptr.ptr_offset(i * stride, dl)?;
Ok(MPlaceTy {
mplace: MemPlace { ptr, align: base.align, extra: None },
mplace: MemPlace { ptr, align: base.align, meta: None },
layout
})
}))
@ -334,10 +369,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn mplace_subslice(
&self,
base: MPlaceTy<'tcx>,
base: MPlaceTy<'tcx, M::PointerTag>,
from: u64,
to: u64,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
assert!(from <= len - to);
@ -350,9 +385,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
};
let ptr = base.ptr.ptr_offset(from_offset, self)?;
// Compute extra and new layout
// Compute meta and new layout
let inner_len = len - to - from;
let (extra, ty) = match base.layout.ty.sty {
let (meta, ty) = match base.layout.ty.sty {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
ty::Array(inner, _) =>
@ -367,27 +402,27 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let layout = self.layout_of(ty)?;
Ok(MPlaceTy {
mplace: MemPlace { ptr, align: base.align, extra },
mplace: MemPlace { ptr, align: base.align, meta },
layout
})
}
pub fn mplace_downcast(
&self,
base: MPlaceTy<'tcx>,
base: MPlaceTy<'tcx, M::PointerTag>,
variant: usize,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
assert_eq!(base.extra, None);
assert!(base.meta.is_none());
Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base })
}
/// Project into an mplace
pub fn mplace_projection(
&self,
base: MPlaceTy<'tcx>,
base: MPlaceTy<'tcx, M::PointerTag>,
proj_elem: &mir::PlaceElem<'tcx>,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.mplace_field(base, field.index() as u64)?,
@ -428,9 +463,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Just a convenience function, but used quite a bit.
pub fn place_field(
&mut self,
base: PlaceTy<'tcx>,
base: PlaceTy<'tcx, M::PointerTag>,
field: u64,
) -> EvalResult<'tcx, PlaceTy<'tcx>> {
) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// FIXME: We could try to be smarter and avoid allocation for fields that span the
// entire place.
let mplace = self.force_allocation(base)?;
@ -439,9 +474,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn place_downcast(
&mut self,
base: PlaceTy<'tcx>,
base: PlaceTy<'tcx, M::PointerTag>,
variant: usize,
) -> EvalResult<'tcx, PlaceTy<'tcx>> {
) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
Ok(match base.place {
Place::Ptr(mplace) =>
@ -456,9 +491,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Project into a place
pub fn place_projection(
&mut self,
base: PlaceTy<'tcx>,
base: PlaceTy<'tcx, M::PointerTag>,
proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>,
) -> EvalResult<'tcx, PlaceTy<'tcx>> {
) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.place_field(base, field.index() as u64)?,
@ -478,7 +513,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub(super) fn eval_place_to_mplace(
&self,
mir_place: &mir::Place<'tcx>
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::Place::*;
Ok(match *mir_place {
Promoted(ref promoted) => {
@ -515,7 +550,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// and miri: They use the same query to eventually obtain a `ty::Const`
// and use that for further computation.
let alloc = self.tcx.alloc_map.lock().intern_static(cid.instance.def_id());
MPlaceTy::from_aligned_ptr(alloc.into(), layout)
MPlaceTy::from_aligned_ptr(Pointer::from(alloc).with_default_tag(), layout)
}
_ => bug!("eval_place_to_mplace called on {:?}", mir_place),
@ -524,7 +559,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Compute a place. You should only use this if you intend to write into this
/// place; for reading, a more efficient alternative is `eval_place_for_read`.
pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> {
pub fn eval_place(
&mut self,
mir_place: &mir::Place<'tcx>
) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::Place::*;
let place = match *mir_place {
Local(mir::RETURN_PLACE) => PlaceTy {
@ -554,8 +592,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Write a scalar to a place
pub fn write_scalar(
&mut self,
val: impl Into<ScalarMaybeUndef>,
dest: PlaceTy<'tcx>,
val: impl Into<ScalarMaybeUndef<M::PointerTag>>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
self.write_value(Value::Scalar(val.into()), dest)
}
@ -563,8 +601,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Write a value to a place
pub fn write_value(
&mut self,
src_val: Value,
dest: PlaceTy<'tcx>,
src_val: Value<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
trace!("write_value: {:?} <- {:?}", *dest, src_val);
// Check that the value actually is okay for that type
@ -599,8 +637,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// done that before calling this!
fn write_value_to_mplace(
&mut self,
value: Value,
dest: MPlaceTy<'tcx>,
value: Value<M::PointerTag>,
dest: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
let (ptr, ptr_align) = dest.to_scalar_ptr_align();
// Note that it is really important that the type here is the right one, and matches the
@ -641,8 +679,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Copy the data from an operand to a place
pub fn copy_op(
&mut self,
src: OpTy<'tcx>,
dest: PlaceTy<'tcx>,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
"Cannot copy unsized data");
@ -678,8 +716,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// This is essentially `force_to_memplace`.
pub fn force_allocation(
&mut self,
place: PlaceTy<'tcx>,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
place: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let mplace = match place.place {
Place::Local { frame, local } => {
match *self.stack[frame].locals[local].access()? {
@ -715,7 +753,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
&mut self,
layout: TyLayout<'tcx>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
let ptr = self.memory.allocate(layout.size, layout.align, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
@ -724,7 +762,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn write_discriminant_index(
&mut self,
variant_index: usize,
dest: PlaceTy<'tcx>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
match dest.layout.variants {
layout::Variants::Single { index } => {
@ -772,7 +810,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Every place can be read from, so we can turm them into an operand
#[inline(always)]
pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> {
pub fn place_to_op(
&self,
place: PlaceTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let op = match place.place {
Place::Ptr(mplace) => {
Operand::Indirect(mplace)
@ -785,8 +826,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
/// Also return some more information so drop doesn't have to run the same code twice.
pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx>)
-> EvalResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx>)> {
pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>)
-> EvalResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable()?; // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;
@ -799,7 +840,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
}
let mplace = MPlaceTy {
mplace: MemPlace { extra: None, ..*mplace },
mplace: MemPlace { meta: None, ..*mplace },
layout
};
Ok((instance, mplace))

View File

@ -99,6 +99,8 @@ macro_rules! __impl_snapshot_field {
($field:ident, $ctx:expr, $delegate:expr) => ($delegate);
}
// This assumes the type has two type parameters, first for the tag (set to `()`),
// then for the id
macro_rules! impl_snapshot_for {
// FIXME(mark-i-m): Some of these should be `?` rather than `*`.
(enum $enum_name:ident {
@ -108,7 +110,7 @@ macro_rules! impl_snapshot_for {
impl<'a, Ctx> self::Snapshot<'a, Ctx> for $enum_name
where Ctx: self::SnapshotContext<'a>,
{
type Item = $enum_name<AllocIdSnapshot<'a>>;
type Item = $enum_name<(), AllocIdSnapshot<'a>>;
#[inline]
fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
@ -129,7 +131,7 @@ macro_rules! impl_snapshot_for {
impl<'a, Ctx> self::Snapshot<'a, Ctx> for $struct_name
where Ctx: self::SnapshotContext<'a>,
{
type Item = $struct_name<AllocIdSnapshot<'a>>;
type Item = $struct_name<(), AllocIdSnapshot<'a>>;
#[inline]
fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item {
@ -175,12 +177,13 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for AllocId
impl_snapshot_for!(struct Pointer {
alloc_id,
offset -> *offset, // just copy offset verbatim
tag -> *tag, // just copy tag
});
impl<'a, Ctx> Snapshot<'a, Ctx> for Scalar
where Ctx: SnapshotContext<'a>,
{
type Item = Scalar<AllocIdSnapshot<'a>>;
type Item = Scalar<(), AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self {
@ -206,11 +209,11 @@ impl_snapshot_for!(enum ScalarMaybeUndef {
impl_stable_hash_for!(struct ::interpret::MemPlace {
ptr,
align,
extra,
meta,
});
impl_snapshot_for!(struct MemPlace {
ptr,
extra,
meta,
align -> *align, // just copy alignment verbatim
});
@ -234,7 +237,7 @@ impl<'a> HashStable<StableHashingContext<'a>> for Place {
impl<'a, Ctx> Snapshot<'a, Ctx> for Place
where Ctx: SnapshotContext<'a>,
{
type Item = Place<AllocIdSnapshot<'a>>;
type Item = Place<(), AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self {
@ -278,11 +281,11 @@ impl_snapshot_for!(enum LocalValue {
impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations
where Ctx: SnapshotContext<'a>,
{
type Item = Relocations<AllocIdSnapshot<'a>>;
type Item = Relocations<(), AllocIdSnapshot<'a>>;
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
Relocations::from_presorted(self.iter()
.map(|(size, id)| (*size, id.snapshot(ctx)))
.map(|(size, ((), id))| (*size, ((), id.snapshot(ctx))))
.collect())
}
}
@ -290,7 +293,7 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations
#[derive(Eq, PartialEq)]
struct AllocationSnapshot<'a> {
bytes: &'a [u8],
relocations: Relocations<AllocIdSnapshot<'a>>,
relocations: Relocations<(), AllocIdSnapshot<'a>>,
undef_mask: &'a UndefMask,
align: &'a Align,
mutability: &'a Mutability,
@ -334,8 +337,8 @@ struct FrameSnapshot<'a, 'tcx: 'a> {
instance: &'a ty::Instance<'tcx>,
span: &'a Span,
return_to_block: &'a StackPopCleanup,
return_place: Place<AllocIdSnapshot<'a>>,
locals: IndexVec<mir::Local, LocalValue<AllocIdSnapshot<'a>>>,
return_place: Place<(), AllocIdSnapshot<'a>>,
locals: IndexVec<mir::Local, LocalValue<(), AllocIdSnapshot<'a>>>,
block: &'a mir::BasicBlock,
stmt: usize,
}

View File

@ -205,8 +205,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
fn pass_argument(
&mut self,
skip_zst: bool,
caller_arg: &mut impl Iterator<Item=OpTy<'tcx>>,
callee_arg: PlaceTy<'tcx>,
caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>,
callee_arg: PlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx> {
if skip_zst && callee_arg.layout.is_zst() {
// Nothing to do.
@ -231,8 +231,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
instance: ty::Instance<'tcx>,
span: Span,
caller_abi: Abi,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
ret: Option<mir::BasicBlock>,
) -> EvalResult<'tcx> {
trace!("eval_fn_call: {:#?}", instance);
@ -330,7 +330,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// last incoming argument. These two iterators do not have the same type,
// so to keep the code paths uniform we accept an allocation
// (for RustCall ABI only).
let caller_args : Cow<[OpTy<'tcx>]> =
let caller_args : Cow<[OpTy<'tcx, M::PointerTag>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
let (&untuple_arg, args) = args.split_last().unwrap();
@ -339,7 +339,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
.chain((0..untuple_arg.layout.fields.count()).into_iter()
.map(|i| self.operand_field(untuple_arg, i as u64))
)
.collect::<EvalResult<Vec<OpTy<'tcx>>>>()?)
.collect::<EvalResult<Vec<OpTy<'tcx, M::PointerTag>>>>()?)
} else {
// Plain arg passing
Cow::from(args)
@ -426,7 +426,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
fn drop_in_place(
&mut self,
place: PlaceTy<'tcx>,
place: PlaceTy<'tcx, M::PointerTag>,
instance: ty::Instance<'tcx>,
span: Span,
target: mir::BasicBlock,

View File

@ -12,8 +12,6 @@ use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use syntax::ast::Mutability;
use super::{EvalContext, Machine, MemoryKind};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
@ -27,9 +25,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
&mut self,
ty: Ty<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> EvalResult<'tcx, Pointer> {
) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
debug!("get_vtable(trait_ref={:?})", trait_ref);
// FIXME: Cache this!
let layout = self.layout_of(trait_ref.self_ty())?;
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
@ -41,7 +41,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64),
ptr_align,
MemoryKind::Stack,
MemoryKind::Vtable,
)?;
let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
@ -63,10 +63,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
}
}
self.memory.intern_static(
vtable.alloc_id,
Mutability::Immutable,
)?;
self.memory.mark_immutable(vtable.alloc_id)?;
Ok(vtable)
}
@ -74,7 +71,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Return the drop fn instance as well as the actual dynamic type
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer,
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align;
@ -90,7 +87,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer,
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;

View File

@ -9,6 +9,7 @@
// except according to those terms.
use std::fmt::Write;
use std::hash::Hash;
use syntax_pos::symbol::Symbol;
use rustc::ty::layout::{self, Size, Align, TyLayout};
@ -80,13 +81,13 @@ pub enum PathElem {
}
/// State for tracking recursive validation of references
pub struct RefTracking<'tcx> {
pub seen: FxHashSet<(OpTy<'tcx>)>,
pub todo: Vec<(OpTy<'tcx>, Vec<PathElem>)>,
pub struct RefTracking<'tcx, Tag> {
pub seen: FxHashSet<(OpTy<'tcx, Tag>)>,
pub todo: Vec<(OpTy<'tcx, Tag>, Vec<PathElem>)>,
}
impl<'tcx> RefTracking<'tcx> {
pub fn new(op: OpTy<'tcx>) -> Self {
impl<'tcx, Tag: Copy+Eq+Hash> RefTracking<'tcx, Tag> {
pub fn new(op: OpTy<'tcx, Tag>) -> Self {
let mut ref_tracking = RefTracking {
seen: FxHashSet(),
todo: vec![(op, Vec::new())],
@ -128,7 +129,7 @@ fn path_format(path: &Vec<PathElem>) -> String {
out
}
fn scalar_format(value: ScalarMaybeUndef) -> String {
fn scalar_format<Tag>(value: ScalarMaybeUndef<Tag>) -> String {
match value {
ScalarMaybeUndef::Undef =>
"uninitialized bytes".to_owned(),
@ -143,9 +144,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Make sure that `value` is valid for `ty`, *assuming* `ty` is a primitive type.
fn validate_primitive_type(
&self,
value: ValTy<'tcx>,
value: ValTy<'tcx, M::PointerTag>,
path: &Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<'tcx>>,
ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>,
const_mode: bool,
) -> EvalResult<'tcx> {
// Go over all the primitive types
@ -185,7 +186,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let tail = self.tcx.struct_tail(place.layout.ty);
match tail.sty {
ty::Dynamic(..) => {
let vtable = try_validation!(place.extra.unwrap().to_ptr(),
let vtable = try_validation!(place.meta.unwrap().to_ptr(),
"non-pointer vtable in fat pointer", path);
try_validation!(self.read_drop_type_from_vtable(vtable),
"invalid drop fn in vtable", path);
@ -194,7 +195,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// FIXME: More checks for the vtable.
}
ty::Slice(..) | ty::Str => {
try_validation!(place.extra.unwrap().to_usize(self),
try_validation!(place.meta.unwrap().to_usize(self),
"non-integer slice length in fat pointer", path);
}
ty::Foreign(..) => {
@ -207,7 +208,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// for safe ptrs, also check the ptr values itself
if !ty.is_unsafe_ptr() {
// Make sure this is non-NULL and aligned
let (size, align) = self.size_and_align_of(place.extra, place.layout)?;
let (size, align) = self.size_and_align_of(place.meta, place.layout)?;
match self.memory.check_align(place.ptr, align) {
Ok(_) => {},
Err(err) => match err.kind {
@ -272,7 +273,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Make sure that `value` matches the
fn validate_scalar_layout(
&self,
value: ScalarMaybeUndef,
value: ScalarMaybeUndef<M::PointerTag>,
size: Size,
path: &Vec<PathElem>,
layout: &layout::Scalar,
@ -363,9 +364,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// validation (e.g., pointer values are fine in integers at runtime).
pub fn validate_operand(
&self,
dest: OpTy<'tcx>,
dest: OpTy<'tcx, M::PointerTag>,
path: &mut Vec<PathElem>,
mut ref_tracking: Option<&mut RefTracking<'tcx>>,
mut ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>,
const_mode: bool,
) -> EvalResult<'tcx> {
trace!("validate_operand: {:?}, {:?}", *dest, dest.layout.ty);

View File

@ -1163,7 +1163,7 @@ fn collect_miri<'a, 'tcx>(
}
Some(AllocType::Memory(alloc)) => {
trace!("collecting {:?} with {:#?}", alloc_id, alloc);
for &inner in alloc.relocations.values() {
for &((), inner) in alloc.relocations.values() {
collect_miri(tcx, inner, output);
}
},
@ -1272,7 +1272,7 @@ fn collect_const<'a, 'tcx>(
ConstValue::Scalar(Scalar::Ptr(ptr)) =>
collect_miri(tcx, ptr.alloc_id, output),
ConstValue::ByRef(_id, alloc, _offset) => {
for &id in alloc.relocations.values() {
for &((), id) in alloc.relocations.values() {
collect_miri(tcx, id, output);
}
}