diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index b7281cf0445..90081d5b85e 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -632,7 +632,7 @@ define_dep_nodes!( <'tcx> // queries). Making them anonymous avoids hashing the result, which // may save a bit of time. [anon] EraseRegionsTy { ty: Ty<'tcx> }, - [anon] ConstValueToAllocation { val: &'tcx ty::Const<'tcx> }, + [anon] ConstToAllocation { val: &'tcx ty::Const<'tcx> }, [input] Freevars(DefId), [input] MaybeUnusedTraitImport(DefId), diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index f13e26fee3e..46f4ed4ec47 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -397,12 +397,6 @@ impl_stable_hash_for!(enum mir::interpret::ScalarMaybeUndef { Undef }); -impl_stable_hash_for!(enum mir::interpret::Value { - Scalar(v), - ScalarPair(a, b), - ByRef(ptr, align) -}); - impl_stable_hash_for!(struct mir::interpret::Pointer { alloc_id, offset diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index a0980b06230..50b8c271233 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -13,7 +13,7 @@ pub use self::error::{ FrameInfo, ConstEvalResult, }; -pub use self::value::{Scalar, Value, ConstValue, ScalarMaybeUndef}; +pub use self::value::{Scalar, ConstValue, ScalarMaybeUndef}; use std::fmt; use mir; @@ -135,7 +135,7 @@ impl<'tcx> Pointer { Pointer { alloc_id, offset } } - pub(crate) fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { + pub fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { Pointer::new( self.alloc_id, Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), @@ -147,7 +147,7 @@ impl<'tcx> Pointer { (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) } - pub(crate) fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { Ok(Pointer::new( self.alloc_id, Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 3e8b44b87fe..3f8130ec04c 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -1,14 +1,13 @@ #![allow(unknown_lints)] -use ty::layout::{Align, HasDataLayout, Size}; -use ty; +use ty::layout::{HasDataLayout, Size}; use ty::subst::Substs; use hir::def_id::DefId; use super::{EvalResult, Pointer, PointerArithmetic, Allocation}; /// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which -/// matches Value's optimizations for easy conversions between these two types +/// matches the LocalValue optimizations for easy conversions between Value and ConstValue. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)] pub enum ConstValue<'tcx> { /// Never returned from the `const_eval` query, but the HIR contains these frequently in order @@ -16,6 +15,8 @@ pub enum ConstValue<'tcx> { /// evaluation Unevaluated(DefId, &'tcx Substs<'tcx>), /// Used only for types with layout::abi::Scalar ABI and ZSTs + /// + /// Not using the enum `Value` to encode that this must not be `Undef` Scalar(Scalar), /// Used only for types with layout::abi::ScalarPair /// @@ -26,25 +27,6 @@ pub enum ConstValue<'tcx> { } impl<'tcx> ConstValue<'tcx> { - #[inline] - pub fn from_byval_value(val: Value) -> EvalResult<'static, Self> { - Ok(match val { - Value::ByRef(..) => bug!(), - Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b), - Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?), - }) - } - - #[inline] - pub fn to_byval_value(&self) -> Option { - match *self { - ConstValue::Unevaluated(..) | - ConstValue::ByRef(..) => None, - ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a.into(), b)), - ConstValue::Scalar(val) => Some(Value::Scalar(val.into())), - } - } - #[inline] pub fn try_to_scalar(&self) -> Option { match *self { @@ -56,58 +38,44 @@ impl<'tcx> ConstValue<'tcx> { } #[inline] - pub fn to_bits(&self, size: Size) -> Option { + pub fn try_to_bits(&self, size: Size) -> Option { self.try_to_scalar()?.to_bits(size).ok() } #[inline] - pub fn to_ptr(&self) -> Option { + pub fn try_to_ptr(&self) -> Option { self.try_to_scalar()?.to_ptr().ok() } -} -/// A `Value` represents a single self-contained Rust value. -/// -/// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitive -/// value held directly, outside of any allocation (`Scalar`). For `ByRef`-values, we remember -/// whether the pointer is supposed to be aligned or not (also see Place). -/// -/// For optimization of a few very common cases, there is also a representation for a pair of -/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary -/// operations and fat pointers. This idea was taken from rustc's codegen. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum Value { - ByRef(Scalar, Align), - Scalar(ScalarMaybeUndef), - ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), -} - -impl<'tcx> ty::TypeFoldable<'tcx> for Value { - fn super_fold_with<'gcx: 'tcx, F: ty::fold::TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self { - *self + pub fn new_slice( + val: Scalar, + len: u64, + cx: impl HasDataLayout + ) -> Self { + ConstValue::ScalarPair(val, Scalar::Bits { + bits: len as u128, + size: cx.data_layout().pointer_size.bytes() as u8, + }.into()) } - fn super_visit_with>(&self, _: &mut V) -> bool { - false + + pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { + ConstValue::ScalarPair(val, Scalar::Ptr(vtable).into()) } } impl<'tcx> Scalar { - pub fn ptr_null(cx: C) -> Self { + pub fn ptr_null(cx: impl HasDataLayout) -> Self { Scalar::Bits { bits: 0, size: cx.data_layout().pointer_size.bytes() as u8, } } - pub fn to_value_with_len(self, len: u64, cx: C) -> Value { - ScalarMaybeUndef::Scalar(self).to_value_with_len(len, cx) + pub fn zst() -> Self { + Scalar::Bits { bits: 0, size: 0 } } - pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { - ScalarMaybeUndef::Scalar(self).to_value_with_vtable(vtable) - } - - pub fn ptr_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); match self { Scalar::Bits { bits, size } => { @@ -121,7 +89,7 @@ impl<'tcx> Scalar { } } - pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); match self { Scalar::Bits { bits, size } => { @@ -135,7 +103,7 @@ impl<'tcx> Scalar { } } - pub fn ptr_wrapping_signed_offset(self, i: i64, cx: C) -> Self { + pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self { let layout = cx.data_layout(); match self { Scalar::Bits { bits, size } => { @@ -149,7 +117,7 @@ impl<'tcx> Scalar { } } - pub fn is_null_ptr(self, cx: C) -> bool { + pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool { match self { Scalar::Bits { bits, size } => { assert_eq!(size as u64, cx.data_layout().pointer_size.bytes()); @@ -159,79 +127,6 @@ impl<'tcx> Scalar { } } - pub fn to_value(self) -> Value { - Value::Scalar(ScalarMaybeUndef::Scalar(self)) - } -} - -impl From for Scalar { - fn from(ptr: Pointer) -> Self { - Scalar::Ptr(ptr) - } -} - -/// A `Scalar` represents an immediate, primitive value existing outside of a -/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in -/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes -/// of a simple value or a pointer into another `Allocation` -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum Scalar { - /// The raw bytes of a simple value. - Bits { - /// The first `size` bytes are the value. - /// Do not try to read less or more bytes that that - size: u8, - bits: u128, - }, - - /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of - /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the - /// relocation and its associated offset together as a `Pointer` here. - Ptr(Pointer), -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum ScalarMaybeUndef { - Scalar(Scalar), - Undef, -} - -impl From for ScalarMaybeUndef { - fn from(s: Scalar) -> Self { - ScalarMaybeUndef::Scalar(s) - } -} - -impl ScalarMaybeUndef { - pub fn unwrap_or_err(self) -> EvalResult<'static, Scalar> { - match self { - ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), - ScalarMaybeUndef::Undef => err!(ReadUndefBytes), - } - } - - pub fn to_value_with_len(self, len: u64, cx: C) -> Value { - Value::ScalarPair(self, Scalar::Bits { - bits: len as u128, - size: cx.data_layout().pointer_size.bytes() as u8, - }.into()) - } - - pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { - Value::ScalarPair(self, Scalar::Ptr(vtable).into()) - } - - pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { - match self { - ScalarMaybeUndef::Scalar(scalar) => { - scalar.ptr_offset(i, cx).map(ScalarMaybeUndef::Scalar) - }, - ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef) - } - } -} - -impl<'tcx> Scalar { pub fn from_bool(b: bool) -> Self { Scalar::Bits { bits: b as u128, size: 1 } } @@ -280,3 +175,62 @@ impl<'tcx> Scalar { } } } + +impl From for Scalar { + fn from(ptr: Pointer) -> Self { + Scalar::Ptr(ptr) + } +} + +/// A `Scalar` represents an immediate, primitive value existing outside of a +/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in +/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes +/// of a simple value or a pointer into another `Allocation` +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum Scalar { + /// The raw bytes of a simple value. + Bits { + /// The first `size` bytes are the value. + /// Do not try to read less or more bytes that that + size: u8, + bits: u128, + }, + + /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of + /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the + /// relocation and its associated offset together as a `Pointer` here. + Ptr(Pointer), +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum ScalarMaybeUndef { + Scalar(Scalar), + Undef, +} + +impl From for ScalarMaybeUndef { + fn from(s: Scalar) -> Self { + ScalarMaybeUndef::Scalar(s) + } +} + +impl<'tcx> ScalarMaybeUndef { + pub fn not_undef(self) -> EvalResult<'static, Scalar> { + match self { + ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), + ScalarMaybeUndef::Undef => err!(ReadUndefBytes), + } + } + + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + self.not_undef()?.to_ptr() + } + + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { + self.not_undef()?.to_bits(target_size) + } + + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + self.not_undef()?.to_bool() + } +} diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index bafeb5dd128..e958ca9b9bb 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -17,7 +17,7 @@ use hir::def::CtorKind; use hir::def_id::DefId; use hir::{self, HirId, InlineAsm}; use middle::region; -use mir::interpret::{EvalErrorKind, Scalar, Value, ScalarMaybeUndef}; +use mir::interpret::{EvalErrorKind, Scalar, ScalarMaybeUndef, ConstValue}; use mir::visit::MirVisitable; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; @@ -1469,14 +1469,14 @@ impl<'tcx> TerminatorKind<'tcx> { .iter() .map(|&u| { let mut s = String::new(); - print_miri_value( - Scalar::Bits { - bits: u, - size: size.bytes() as u8, - }.to_value(), - switch_ty, - &mut s, - ).unwrap(); + let c = ty::Const { + val: ConstValue::Scalar(Scalar::Bits { + bits: u, + size: size.bytes() as u8, + }.into()), + ty: switch_ty, + }; + fmt_const_val(&mut s, &c).unwrap(); s.into() }) .chain(iter::once(String::from("otherwise").into())) @@ -2220,18 +2220,12 @@ impl<'tcx> Debug for Constant<'tcx> { } /// Write a `ConstValue` in a way closer to the original source code than the `Debug` output. -pub fn fmt_const_val(fmt: &mut W, const_val: &ty::Const) -> fmt::Result { - if let Some(value) = const_val.to_byval_value() { - print_miri_value(value, const_val.ty, fmt) - } else { - write!(fmt, "{:?}:{}", const_val.val, const_val.ty) - } -} - -pub fn print_miri_value<'tcx, W: Write>(value: Value, ty: Ty<'tcx>, f: &mut W) -> fmt::Result { +pub fn fmt_const_val(f: &mut impl Write, const_val: &ty::Const) -> fmt::Result { use ty::TypeVariants::*; + let value = const_val.val; + let ty = const_val.ty; // print some primitives - if let Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) = value { + if let ConstValue::Scalar(Scalar::Bits { bits, .. }) = value { match ty.sty { TyBool if bits == 0 => return write!(f, "false"), TyBool if bits == 1 => return write!(f, "true"), @@ -2258,8 +2252,8 @@ pub fn print_miri_value<'tcx, W: Write>(value: Value, ty: Ty<'tcx>, f: &mut W) - return write!(f, "{}", item_path_str(did)); } // print string literals - if let Value::ScalarPair(ptr, len) = value { - if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = ptr { + if let ConstValue::ScalarPair(ptr, len) = value { + if let Scalar::Ptr(ptr) = ptr { if let ScalarMaybeUndef::Scalar(Scalar::Bits { bits: len, .. }) = len { if let TyRef(_, &ty::TyS { sty: TyStr, .. }, _) = ty.sty { return ty::tls::with(|tcx| { diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index ecf35c1b0da..b5093d0a1fc 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -198,9 +198,9 @@ impl<'tcx> QueryDescription<'tcx> for queries::super_predicates_of<'tcx> { } } -impl<'tcx> QueryDescription<'tcx> for queries::const_value_to_allocation<'tcx> { +impl<'tcx> QueryDescription<'tcx> for queries::const_to_allocation<'tcx> { fn describe(_tcx: TyCtxt, val: &'tcx ty::Const<'tcx>) -> String { - format!("converting value `{:?}` to an allocation", val) + format!("converting constant `{:?}` to an allocation", val) } } diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs index ef22ebef9d7..c1372293a1b 100644 --- a/src/librustc/ty/query/mod.rs +++ b/src/librustc/ty/query/mod.rs @@ -287,8 +287,8 @@ define_queries! { <'tcx> [] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) -> ConstEvalResult<'tcx>, - /// Converts a constant value to an constant allocation - [] fn const_value_to_allocation: const_value_to_allocation( + /// Converts a constant value to a constant allocation + [] fn const_to_allocation: const_to_allocation( &'tcx ty::Const<'tcx> ) -> &'tcx Allocation, }, @@ -706,10 +706,10 @@ fn erase_regions_ty<'tcx>(ty: Ty<'tcx>) -> DepConstructor<'tcx> { DepConstructor::EraseRegionsTy { ty } } -fn const_value_to_allocation<'tcx>( +fn const_to_allocation<'tcx>( val: &'tcx ty::Const<'tcx>, ) -> DepConstructor<'tcx> { - DepConstructor::ConstValueToAllocation { val } + DepConstructor::ConstToAllocation { val } } fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> { diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index bf721ddd13f..8473e4af40e 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -1062,7 +1062,7 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, DepKind::FulfillObligation | DepKind::VtableMethods | DepKind::EraseRegionsTy | - DepKind::ConstValueToAllocation | + DepKind::ConstToAllocation | DepKind::NormalizeProjectionTy | DepKind::NormalizeTyAfterErasingRegions | DepKind::ImpliedOutlivesBounds | diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 226388c9efa..7c0857cd2f9 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -20,7 +20,7 @@ use ty::subst::{Substs, Subst, Kind, UnpackedKind}; use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; use ty::{Slice, TyS, ParamEnvAnd, ParamEnv}; use util::captures::Captures; -use mir::interpret::{Scalar, Pointer, Value}; +use mir::interpret::{Scalar, Pointer}; use std::iter; use std::cmp::Ordering; @@ -1973,17 +1973,12 @@ impl<'tcx> Const<'tcx> { } let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; - self.val.to_bits(size) + self.val.try_to_bits(size) } #[inline] pub fn to_ptr(&self) -> Option { - self.val.to_ptr() - } - - #[inline] - pub fn to_byval_value(&self) -> Option { - self.val.to_byval_value() + self.val.try_to_ptr() } #[inline] @@ -1995,7 +1990,7 @@ impl<'tcx> Const<'tcx> { assert_eq!(self.ty, ty.value); let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; - self.val.to_bits(size) + self.val.try_to_bits(size) } #[inline] diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index cac2ae0302e..c79a1a4bd04 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -1359,6 +1359,7 @@ fn describe_enum_variant( // If this is not a univariant enum, there is also the discriminant field. let (discr_offset, discr_arg) = match discriminant_info { RegularDiscriminant(_) => { + // We have the layout of an enum variant, we need the layout of the outer enum let enum_layout = cx.layout_of(layout.ty); (Some(enum_layout.fields.offset(0)), Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty))) diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index 47fd92682fd..6774ce818c1 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -9,8 +9,8 @@ // except according to those terms. use llvm; -use rustc::mir::interpret::ConstEvalErr; -use rustc_mir::interpret::{read_target_uint, const_val_field}; +use rustc::mir::interpret::{ConstEvalErr, read_target_uint}; +use rustc_mir::interpret::{const_field}; use rustc::hir::def_id::DefId; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; @@ -186,7 +186,7 @@ impl FunctionCx<'a, 'll, 'tcx> { ref other => bug!("invalid simd shuffle type: {}", other), }; let values: Result, Lrc<_>> = (0..fields).map(|field| { - let field = const_val_field( + let field = const_field( bx.tcx(), ty::ParamEnv::reveal_all(), self.instance, diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 038d53a3547..bcbf6d167df 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -1615,20 +1615,19 @@ fn validate_const<'a, 'tcx>( ) { let mut ecx = ::rustc_mir::interpret::mk_eval_cx(tcx, gid.instance, param_env).unwrap(); let result = (|| { - let val = ecx.const_to_value(constant.val)?; use rustc_target::abi::LayoutOf; + use rustc_mir::interpret::OpTy; + + let op = ecx.const_value_to_op(constant.val)?; let layout = ecx.layout_of(constant.ty)?; - let place = ecx.allocate_place_for_value(val, layout, None)?; - let ptr = place.to_ptr()?; - let mut todo = vec![(ptr, layout.ty, String::new())]; + let place = ecx.allocate_op(OpTy { op, layout })?.into(); + + let mut todo = vec![(place, String::new())]; let mut seen = FxHashSet(); - seen.insert((ptr, layout.ty)); - while let Some((ptr, ty, path)) = todo.pop() { - let layout = ecx.layout_of(ty)?; - ecx.validate_ptr_target( - ptr, - layout.align, - layout, + seen.insert(place); + while let Some((place, path)) = todo.pop() { + ecx.validate_mplace( + place, path, &mut seen, &mut todo, diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 70148fc9176..79483e454ec 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -167,8 +167,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { LitKind::Str(ref s, _) => { let s = s.as_str(); let id = self.tcx.allocate_bytes(s.as_bytes()); - let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, self.tcx); - ConstValue::from_byval_value(value).unwrap() + ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, self.tcx) }, LitKind::ByteStr(ref data) => { let id = self.tcx.allocate_bytes(data); diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index 7ec93731304..dda589fd20d 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -16,7 +16,7 @@ mod check_match; pub use self::check_match::check_crate; pub(crate) use self::check_match::check_match; -use interpret::{const_val_field, const_variant_index, self}; +use interpret::{const_field, const_variant_index, self}; use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability}; use rustc::mir::interpret::{Scalar, GlobalId, ConstValue}; @@ -795,7 +795,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { debug!("const_to_pat: cv={:#?}", cv); let adt_subpattern = |i, variant_opt| { let field = Field::new(i); - let val = const_val_field( + let val = const_field( self.tcx, self.param_env, instance, variant_opt, field, cv, ).expect("field access failed"); @@ -1106,8 +1106,8 @@ pub fn compare_const_vals<'a, 'tcx>( len_b, ), ) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => { - let len_a = len_a.unwrap_or_err().ok(); - let len_b = len_b.unwrap_or_err().ok(); + let len_a = len_a.not_undef().ok(); + let len_b = len_b.not_undef().ok(); if len_a.is_none() || len_b.is_none() { tcx.sess.struct_err("str slice len is undef").delay_as_bug(); } @@ -1153,8 +1153,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, LitKind::Str(ref s, _) => { let s = s.as_str(); let id = tcx.allocate_bytes(s.as_bytes()); - let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, tcx); - ConstValue::from_byval_value(value).unwrap() + ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, tcx) }, LitKind::ByteStr(ref data) => { let id = tcx.allocate_bytes(data); diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 4e705254331..036b84ee1fb 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -1,87 +1,82 @@ -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, LayoutOf, TyLayout}; +use rustc::ty::{self, Ty, TypeAndMut}; +use rustc::ty::layout::{self, TyLayout}; use syntax::ast::{FloatTy, IntTy, UintTy}; use rustc_apfloat::ieee::{Single, Double}; -use super::{EvalContext, Machine}; -use rustc::mir::interpret::{Scalar, EvalResult, Pointer, PointerArithmetic, Value, EvalErrorKind}; +use rustc::mir::interpret::{Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind}; use rustc::mir::CastKind; use rustc_apfloat::Float; -use interpret::eval_context::ValTy; -use interpret::Place; + +use super::{EvalContext, Machine, PlaceTy, OpTy, Value}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { + match ty.sty { + ty::TyRawPtr(ty::TypeAndMut { ty, .. }) | + ty::TyRef(_, ty, _) => !self.type_is_sized(ty), + ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()), + _ => false, + } + } + crate fn cast( &mut self, - src: ValTy<'tcx>, + src: OpTy<'tcx>, kind: CastKind, - dest_ty: Ty<'tcx>, - dest: Place, + dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { - let src_layout = self.layout_of(src.ty)?; - let dst_layout = self.layout_of(dest_ty)?; + let src_layout = src.layout; + let dst_layout = dest.layout; use rustc::mir::CastKind::*; match kind { Unsize => { - self.unsize_into(src.value, src_layout, dest, dst_layout)?; + self.unsize_into(src, dest)?; } Misc => { - if self.type_is_fat_ptr(src.ty) { - match (src.value, self.type_is_fat_ptr(dest_ty)) { - (Value::ByRef { .. }, _) | + let src = self.read_value(src)?; + if self.type_is_fat_ptr(src_layout.ty) { + match (src.value, self.type_is_fat_ptr(dest.layout.ty)) { // pointers to extern types (Value::Scalar(_),_) | // slices and trait objects to other slices/trait objects (Value::ScalarPair(..), true) => { - let valty = ValTy { - value: src.value, - ty: dest_ty, - }; - self.write_value(valty, dest)?; + // No change to value + self.write_value(src.value, dest)?; } // slices and trait objects to thin pointers (dropping the metadata) (Value::ScalarPair(data, _), false) => { - let valty = ValTy { - value: Value::Scalar(data), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + self.write_scalar(data, dest)?; } } } else { - let src_layout = self.layout_of(src.ty)?; match src_layout.variants { layout::Variants::Single { index } => { - if let Some(def) = src.ty.ty_adt_def() { + if let Some(def) = src_layout.ty.ty_adt_def() { let discr_val = def .discriminant_for_variant(*self.tcx, index) .val; return self.write_scalar( - dest, Scalar::Bits { bits: discr_val, size: dst_layout.size.bytes() as u8, }, - dest_ty); + dest); } } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, } - let src_val = self.value_to_scalar(src)?; - let dest_val = self.cast_scalar(src_val, src_layout, dst_layout)?; - let valty = ValTy { - value: Value::Scalar(dest_val.into()), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + let src = src.to_scalar()?; + let dest_val = self.cast_scalar(src, src_layout, dest.layout)?; + self.write_scalar(dest_val, dest)?; } } ReifyFnPointer => { - match src.ty.sty { + // The src operand does not matter, just its type + match src_layout.ty.sty { ty::TyFnDef(def_id, substs) => { if self.tcx.has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ @@ -94,29 +89,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { substs, ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); let fn_ptr = self.memory.create_fn_alloc(instance?); - let valty = ValTy { - value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + self.write_scalar(Scalar::Ptr(fn_ptr.into()), dest)?; } ref other => bug!("reify fn pointer on {:?}", other), } } UnsafeFnPointer => { - match dest_ty.sty { + let src = self.read_value(src)?; + match dest.layout.ty.sty { ty::TyFnPtr(_) => { - let mut src = src; - src.ty = dest_ty; - self.write_value(src, dest)?; + // No change to value + self.write_value(*src, dest)?; } ref other => bug!("fn to unsafe fn cast on {:?}", other), } } ClosureFnPointer => { - match src.ty.sty { + // The src operand does not matter, just its type + match src_layout.ty.sty { ty::TyClosure(def_id, substs) => { let substs = self.tcx.subst_and_normalize_erasing_regions( self.substs(), @@ -130,11 +122,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ty::ClosureKind::FnOnce, ); let fn_ptr = self.memory.create_fn_alloc(instance); - let valty = ValTy { - value: Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()), - ty: dest_ty, - }; - self.write_value(valty, dest)?; + let val = Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()); + self.write_value(val, dest)?; } ref other => bug!("closure fn pointer on {:?}", other), } @@ -292,4 +281,111 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))), } } + + fn unsize_into_ptr( + &mut self, + src: OpTy<'tcx>, + dest: PlaceTy<'tcx>, + // The pointee types + sty: Ty<'tcx>, + dty: Ty<'tcx>, + ) -> EvalResult<'tcx> { + // A -> A conversion + let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty); + + match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { + (&ty::TyArray(_, length), &ty::TySlice(_)) => { + let ptr = self.read_value(src)?.to_scalar_ptr()?; + // u64 cast is from usize to u64, which is always good + let val = Value::new_slice(ptr, length.unwrap_usize(self.tcx.tcx), self.tcx.tcx); + self.write_value(val, dest) + } + (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + self.copy_op(src, dest) + } + (_, &ty::TyDynamic(ref data, _)) => { + // Initial cast from sized to dyn trait + let trait_ref = data.principal().unwrap().with_self_ty( + *self.tcx, + src_pointee_ty, + ); + let trait_ref = self.tcx.erase_regions(&trait_ref); + let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; + let ptr = self.read_value(src)?.to_scalar_ptr()?; + let val = Value::new_dyn_trait(ptr, vtable); + self.write_value(val, dest) + } + + _ => bug!("invalid unsizing {:?} -> {:?}", src.layout.ty, dest.layout.ty), + } + } + + fn unsize_into( + &mut self, + src: OpTy<'tcx>, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + match (&src.layout.ty.sty, &dest.layout.ty.sty) { + (&ty::TyRef(_, s, _), &ty::TyRef(_, d, _)) | + (&ty::TyRef(_, s, _), &ty::TyRawPtr(TypeAndMut { ty: d, .. })) | + (&ty::TyRawPtr(TypeAndMut { ty: s, .. }), + &ty::TyRawPtr(TypeAndMut { ty: d, .. })) => { + self.unsize_into_ptr(src, dest, s, d) + } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + assert_eq!(def_a, def_b); + if def_a.is_box() || def_b.is_box() { + if !def_a.is_box() || !def_b.is_box() { + bug!("invalid unsizing between {:?} -> {:?}", src.layout, dest.layout); + } + return self.unsize_into_ptr( + src, + dest, + src.layout.ty.boxed_ty(), + dest.layout.ty.boxed_ty(), + ); + } + + // unsizing of generic struct with pointer fields + // Example: `Arc` -> `Arc` + // here we need to increase the size of every &T thin ptr field to a fat ptr + for i in 0..src.layout.fields.count() { + let dst_field = self.place_field(dest, i as u64)?; + if dst_field.layout.is_zst() { + continue; + } + let src_field = match src.try_as_mplace() { + Ok(mplace) => { + let src_field = self.mplace_field(mplace, i as u64)?; + src_field.into() + } + Err(..) => { + let src_field_layout = src.layout.field(&self, i)?; + // this must be a field covering the entire thing + assert_eq!(src.layout.fields.offset(i).bytes(), 0); + assert_eq!(src_field_layout.size, src.layout.size); + // just sawp out the layout + OpTy { op: src.op, layout: src_field_layout } + } + }; + if src_field.layout.ty == dst_field.layout.ty { + self.copy_op(src_field, dst_field)?; + } else { + self.unsize_into(src_field, dst_field)?; + } + } + Ok(()) + } + _ => { + bug!( + "unsize_into: invalid conversion: {:?} -> {:?}", + src.layout, + dest.layout + ) + } + } + } } diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index dd298d9becb..d98f98bca60 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -2,12 +2,12 @@ use std::fmt; use std::error::Error; use rustc::hir; -use rustc::mir::interpret::{ConstEvalErr, ScalarMaybeUndef}; +use rustc::mir::interpret::ConstEvalErr; use rustc::mir; -use rustc::ty::{self, TyCtxt, Ty, Instance}; -use rustc::ty::layout::{self, LayoutOf, Primitive, TyLayout}; +use rustc::ty::{self, TyCtxt, Instance}; +use rustc::ty::layout::{LayoutOf, Primitive, TyLayout}; use rustc::ty::subst::Subst; -use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use syntax::ast::Mutability; use syntax::source_map::Span; @@ -15,9 +15,12 @@ use syntax::source_map::DUMMY_SP; use rustc::mir::interpret::{ EvalResult, EvalError, EvalErrorKind, GlobalId, - Value, Scalar, AllocId, Allocation, ConstValue, + Scalar, AllocId, Allocation, ConstValue, +}; +use super::{ + Place, PlaceExtra, PlaceTy, MemPlace, OpTy, Operand, Value, + EvalContext, StackPopCleanup, Memory, MemoryKind }; -use super::{Place, EvalContext, StackPopCleanup, ValTy, Memory, MemoryKind}; pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -35,7 +38,7 @@ pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( instance, span, mir, - return_place: Place::undef(), + return_place: Place::null(tcx), return_to_block: StackPopCleanup::None, stmt: 0, }); @@ -56,7 +59,7 @@ pub fn mk_eval_cx<'a, 'tcx>( instance, mir.span, mir, - Place::undef(), + Place::null(tcx), StackPopCleanup::None, )?; Ok(ecx) @@ -67,39 +70,51 @@ pub fn eval_promoted<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: &'mir mir::Mir<'tcx>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> { +) -> EvalResult<'tcx, OpTy<'tcx>> { ecx.with_fresh_body(|ecx| { eval_body_using_ecx(ecx, cid, Some(mir), param_env) }) } -pub fn value_to_const_value<'tcx>( +pub fn op_to_const<'tcx>( ecx: &EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>, - val: Value, - layout: TyLayout<'tcx>, + op: OpTy<'tcx>, + normalize: bool, ) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> { - match (val, &layout.abi) { - (Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size: 0, ..})), _) if layout.is_zst() => {}, - (Value::ByRef(..), _) | - (Value::Scalar(_), &layout::Abi::Scalar(_)) | - (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {}, - _ => bug!("bad value/layout combo: {:#?}, {:#?}", val, layout), - } - let val = match val { - Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?), - Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b), - Value::ByRef(ptr, align) => { - let ptr = ptr.to_ptr().unwrap(); + let normalized_op = if normalize { + ecx.try_read_value(op)? + } else { + match op.op { + Operand::Indirect(mplace) => Err(mplace), + Operand::Immediate(val) => Ok(val) + } + }; + let val = match normalized_op { + Err(MemPlace { ptr, align, extra }) => { + // extract alloc-offset pair + assert_eq!(extra, PlaceExtra::None); + let ptr = ptr.to_ptr()?; let alloc = ecx.memory.get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); - assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes()); + assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes()); let mut alloc = alloc.clone(); alloc.align = align; let alloc = ecx.tcx.intern_const_alloc(alloc); ConstValue::ByRef(alloc, ptr.offset) - } + }, + Ok(Value::Scalar(x)) => + ConstValue::Scalar(x.not_undef()?), + Ok(Value::ScalarPair(a, b)) => + ConstValue::ScalarPair(a.not_undef()?, b), }; - Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, layout.ty)) + Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty)) +} +pub fn const_to_op<'tcx>( + ecx: &mut EvalContext<'_, '_, 'tcx, CompileTimeEvaluator>, + cnst: &'tcx ty::Const<'tcx>, +) -> EvalResult<'tcx, OpTy<'tcx>> { + let op = ecx.const_value_to_op(cnst.val)?; + Ok(OpTy { op, layout: ecx.layout_of(cnst.ty)? }) } fn eval_body_and_ecx<'a, 'mir, 'tcx>( @@ -107,7 +122,7 @@ fn eval_body_and_ecx<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> (EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { +) -> (EvalResult<'tcx, OpTy<'tcx>>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { debug!("eval_body_and_ecx: {:?}, {:?}", cid, param_env); // we start out with the best span we have // and try improving it down the road when more information is available @@ -118,12 +133,13 @@ fn eval_body_and_ecx<'a, 'mir, 'tcx>( (r, ecx) } +// Returns a pointer to where the result lives fn eval_body_using_ecx<'a, 'mir, 'tcx>( ecx: &mut EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>, cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, (Value, Scalar, TyLayout<'tcx>)> { +) -> EvalResult<'tcx, OpTy<'tcx>> { debug!("eval_body: {:?}, {:?}", cid, param_env); let tcx = ecx.tcx.tcx; let mut mir = match mir { @@ -135,11 +151,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( } let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; assert!(!layout.is_unsized()); - let ptr = ecx.memory.allocate( - layout.size, - layout.align, - MemoryKind::Stack, - )?; + let ret = ecx.allocate(layout, MemoryKind::Stack)?; let internally_mutable = !layout.ty.is_freeze(tcx, param_env, mir.span); let is_static = tcx.is_static(cid.instance.def_id()); let mutability = if is_static == Some(hir::Mutability::MutMutable) || internally_mutable { @@ -156,19 +168,14 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( cid.instance, mir.span, mir, - Place::from_ptr(ptr, layout.align), + Place::Ptr(*ret), cleanup, )?; + // The main interpreter loop. while ecx.step()? {} - let ptr = ptr.into(); - // always try to read the value and report errors - let value = match ecx.try_read_value(ptr, layout.align, layout.ty)? { - Some(val) if is_static.is_none() && cid.promoted.is_none() => val, - // point at the allocation - _ => Value::ByRef(ptr, layout.align), - }; - Ok((value, ptr, layout)) + + Ok(ret.into()) } #[derive(Debug, Clone, Eq, PartialEq, Hash)] @@ -222,14 +229,14 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { fn eval_fn_call<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - destination: Option<(Place, mir::BasicBlock)>, - args: &[ValTy<'tcx>], + destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + args: &[OpTy<'tcx>], span: Span, - sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx, bool> { debug!("eval_fn_call: {:?}", instance); if !ecx.tcx.is_const_fn(instance.def_id()) { let def_id = instance.def_id(); + // Some fn calls are actually BinOp intrinsics let (op, oflo) = if let Some(op) = ecx.tcx.is_binop_lang_item(def_id) { op } else { @@ -238,11 +245,12 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { ); }; let (dest, bb) = destination.expect("128 lowerings can't diverge"); - let dest_ty = sig.output(); + let l = ecx.read_value(args[0])?; + let r = ecx.read_value(args[0])?; if oflo { - ecx.intrinsic_with_overflow(op, args[0], args[1], dest, dest_ty)?; + ecx.binop_with_overflow(op, l, r, dest)?; } else { - ecx.intrinsic_overflowing(op, args[0], args[1], dest, dest_ty)?; + ecx.binop_ignore_overflow(op, l, r, dest)?; } ecx.goto_block(bb); return Ok(true); @@ -260,8 +268,8 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } }; let (return_place, return_to_block) = match destination { - Some((place, block)) => (place, StackPopCleanup::Goto(block)), - None => (Place::undef(), StackPopCleanup::None), + Some((place, block)) => (*place, StackPopCleanup::Goto(block)), + None => (Place::null(&ecx), StackPopCleanup::None), }; ecx.push_stack_frame( @@ -279,9 +287,8 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { fn call_intrinsic<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - args: &[ValTy<'tcx>], - dest: Place, - dest_layout: layout::TyLayout<'tcx>, + args: &[OpTy<'tcx>], + dest: PlaceTy<'tcx>, target: mir::BasicBlock, ) -> EvalResult<'tcx> { let substs = instance.substs; @@ -293,9 +300,9 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let elem_align = ecx.layout_of(elem_ty)?.align.abi(); let align_val = Scalar::Bits { bits: elem_align as u128, - size: dest_layout.size.bytes() as u8, + size: dest.layout.size.bytes() as u8, }; - ecx.write_scalar(dest, align_val, dest_layout.ty)?; + ecx.write_scalar(align_val, dest)?; } "size_of" => { @@ -303,9 +310,9 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let size = ecx.layout_of(ty)?.size.bytes() as u128; let size_val = Scalar::Bits { bits: size, - size: dest_layout.size.bytes() as u8, + size: dest.layout.size.bytes() as u8, }; - ecx.write_scalar(dest, size_val, dest_layout.ty)?; + ecx.write_scalar(size_val, dest)?; } "type_id" => { @@ -313,14 +320,14 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { let type_id = ecx.tcx.type_id_hash(ty) as u128; let id_val = Scalar::Bits { bits: type_id, - size: dest_layout.size.bytes() as u8, + size: dest.layout.size.bytes() as u8, }; - ecx.write_scalar(dest, id_val, dest_layout.ty)?; + ecx.write_scalar(id_val, dest)?; } "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => { let ty = substs.type_at(0); let layout_of = ecx.layout_of(ty)?; - let bits = ecx.value_to_scalar(args[0])?.to_bits(layout_of.size)?; + let bits = ecx.read_scalar(args[0])?.to_bits(layout_of.size)?; let kind = match layout_of.abi { ty::layout::Abi::Scalar(ref scalar) => scalar.value, _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?, @@ -333,7 +340,7 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } else { numeric_intrinsic(intrinsic_name, bits, kind)? }; - ecx.write_scalar(dest, out_val, ty)?; + ecx.write_scalar(out_val, dest)?; } name => return Err( @@ -353,9 +360,9 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, _bin_op: mir::BinOp, left: Scalar, - _left_ty: Ty<'tcx>, + _left_layout: TyLayout<'tcx>, right: Scalar, - _right_ty: Ty<'tcx>, + _right_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Scalar, bool)>> { if left.is_bits() && right.is_bits() { Ok(None) @@ -387,8 +394,7 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { fn box_alloc<'a>( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _ty: Ty<'tcx>, - _dest: Place, + _dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { Err( ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(), @@ -406,7 +412,8 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { } } -pub fn const_val_field<'a, 'tcx>( +/// Project to a field of a (variant of a) const +pub fn const_field<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, instance: ty::Instance<'tcx>, @@ -414,30 +421,21 @@ pub fn const_val_field<'a, 'tcx>( field: mir::Field, value: &'tcx ty::Const<'tcx>, ) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { - trace!("const_val_field: {:?}, {:?}, {:?}", instance, field, value); + trace!("const_field: {:?}, {:?}, {:?}", instance, field, value); let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); let result = (|| { - let ty = value.ty; - let value = ecx.const_to_value(value.val)?; - let layout = ecx.layout_of(ty)?; - let place = ecx.allocate_place_for_value(value, layout, variant)?; - let (place, layout) = ecx.place_field(place, field, layout)?; - let (ptr, align) = place.to_ptr_align(); - let mut new_value = Value::ByRef(ptr.unwrap_or_err()?, align); - new_value = ecx.try_read_by_ref(new_value, layout.ty)?; - use rustc_data_structures::indexed_vec::Idx; - match (value, new_value) { - (Value::Scalar(_), Value::ByRef(..)) | - (Value::ScalarPair(..), Value::ByRef(..)) | - (Value::Scalar(_), Value::ScalarPair(..)) => bug!( - "field {} of {:?} yielded {:?}", - field.index(), - value, - new_value, - ), - _ => {}, - } - value_to_const_value(&ecx, new_value, layout) + // get the operand again + let op = const_to_op(&mut ecx, value)?; + // downcast + let down = match variant { + None => op, + Some(variant) => ecx.operand_downcast(op, variant)? + }; + // then project + let field = ecx.operand_field(down, field.index() as u64)?; + // and finally move back to the const world, always normalizing because + // this is not called for statics. + op_to_const(&ecx, field, true) })(); result.map_err(|err| { let (trace, span) = ecx.generate_stacktrace(None); @@ -457,21 +455,11 @@ pub fn const_variant_index<'a, 'tcx>( ) -> EvalResult<'tcx, usize> { trace!("const_variant_index: {:?}, {:?}", instance, val); let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); - let value = ecx.const_to_value(val.val)?; - let layout = ecx.layout_of(val.ty)?; - let (ptr, align) = match value { - Value::ScalarPair(..) | Value::Scalar(_) => { - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); - ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?; - (ptr, layout.align) - }, - Value::ByRef(ptr, align) => (ptr, align), - }; - let place = Place::from_scalar_ptr(ptr.into(), align); - ecx.read_discriminant_as_variant_index(place, layout) + let op = const_to_op(&mut ecx, val)?; + ecx.read_discriminant_as_variant_index(op) } -pub fn const_value_to_allocation_provider<'a, 'tcx>( +pub fn const_to_allocation_provider<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, val: &'tcx ty::Const<'tcx>, ) -> &'tcx Allocation { @@ -488,11 +476,11 @@ pub fn const_value_to_allocation_provider<'a, 'tcx>( ty::ParamEnv::reveal_all(), CompileTimeEvaluator, ()); - let value = ecx.const_to_value(val.val)?; - let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?; - ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?; - let alloc = ecx.memory.get(ptr.alloc_id)?; + let op = const_to_op(&mut ecx, val)?; + // Make a new allocation, copy things there + let ptr = ecx.allocate(op.layout, MemoryKind::Stack)?; + ecx.copy_op(op, ptr.into())?; + let alloc = ecx.memory.get(ptr.to_ptr()?.alloc_id)?; Ok(tcx.intern_const_alloc(alloc.clone())) }; result().expect("unable to convert ConstValue to Allocation") @@ -534,11 +522,16 @@ pub fn const_eval_provider<'a, 'tcx>( }; let (res, ecx) = eval_body_and_ecx(tcx, cid, None, key.param_env); - res.and_then(|(mut val, _, layout)| { - if tcx.is_static(def_id).is_none() && cid.promoted.is_none() { - val = ecx.try_read_by_ref(val, layout.ty)?; + res.and_then(|op| { + let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none(); + if !normalize { + // Sanity check: These must always be a MemPlace + match op.op { + Operand::Indirect(_) => { /* all is good */ }, + Operand::Immediate(_) => bug!("const eval gave us an Immediate"), + } } - value_to_const_value(&ecx, val, layout) + op_to_const(&ecx, op, normalize) }).map_err(|err| { let (trace, span) = ecx.generate_stacktrace(None); let err = ConstEvalErr { diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index f93607cecb9..cede6d4b22b 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -6,24 +6,27 @@ use rustc::hir::def_id::DefId; use rustc::hir::def::Def; use rustc::hir::map::definitions::DefPathData; use rustc::mir; -use rustc::ty::layout::{self, Size, Align, HasDataLayout, LayoutOf, TyLayout, Primitive}; +use rustc::ty::layout::{ + self, Size, Align, HasDataLayout, LayoutOf, TyLayout, Primitive +}; use rustc::ty::subst::{Subst, Substs}; -use rustc::ty::{self, Ty, TyCtxt, TypeAndMut}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::query::TyCtxtAt; use rustc_data_structures::fx::{FxHashSet, FxHasher}; -use rustc_data_structures::indexed_vec::{IndexVec, Idx}; +use rustc_data_structures::indexed_vec::IndexVec; use rustc::mir::interpret::{ - GlobalId, Value, Scalar, FrameInfo, AllocType, - EvalResult, EvalErrorKind, Pointer, + GlobalId, Scalar, FrameInfo, AllocType, + EvalResult, EvalErrorKind, ScalarMaybeUndef, }; use syntax::source_map::{self, Span}; use syntax::ast::Mutability; -use super::{Place, PlaceExtra, Memory, - HasMemory, MemoryKind, - Machine, LocalValue}; +use super::{ + Value, ValTy, Operand, MemPlace, MPlaceTy, Place, + Memory, Machine +}; macro_rules! validation_failure{ ($what:expr, $where:expr, $details:expr) => {{ @@ -167,6 +170,33 @@ impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { } } +// State of a local variable +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub enum LocalValue { + Dead, + // Mostly for convenience, we re-use the `Operand` type here. + // This is an optimization over just always having a pointer here; + // we can thus avoid doing an allocation when the local just stores + // immediate values *and* never has its address taken. + Live(Operand), +} + +impl<'tcx> LocalValue { + pub fn access(&self) -> EvalResult<'tcx, &Operand> { + match self { + LocalValue::Dead => err!(DeadLocal), + LocalValue::Live(ref val) => Ok(val), + } + } + + pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> { + match self { + LocalValue::Dead => err!(DeadLocal), + LocalValue::Live(ref mut val) => Ok(val), + } + } +} + /// The virtual machine state during const-evaluation at a given point in time. type EvalSnapshot<'a, 'mir, 'tcx, M> = (M, Vec>, Memory<'a, 'mir, 'tcx, M>); @@ -248,25 +278,6 @@ pub enum StackPopCleanup { None, } -#[derive(Copy, Clone, Debug)] -pub struct TyAndPacked<'tcx> { - pub ty: Ty<'tcx>, - pub packed: bool, -} - -#[derive(Copy, Clone, Debug)] -pub struct ValTy<'tcx> { - pub value: Value, - pub ty: Ty<'tcx>, -} - -impl<'tcx> ::std::ops::Deref for ValTy<'tcx> { - type Target = Value; - fn deref(&self) -> &Value { - &self.value - } -} - impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for &'a EvalContext<'a, 'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &layout::TargetDataLayout { @@ -348,12 +359,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M r } - pub fn alloc_ptr(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Pointer> { - assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); - - self.memory.allocate(layout.size, layout.align, MemoryKind::Stack) - } - pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { &self.memory } @@ -372,6 +377,30 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.stack.len() - 1 } + /// Mark a storage as live, killing the previous content and returning it. + /// Remember to deallocate that! + pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> { + trace!("{:?} is now live", local); + + let layout = self.layout_of_local(self.cur_frame(), local)?; + let init = LocalValue::Live(self.uninit_operand(layout)?); + // StorageLive *always* kills the value that's currently stored + Ok(mem::replace(&mut self.frame_mut().locals[local], init)) + } + + /// Returns the old value of the local. + /// Remember to deallocate that! + pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue { + trace!("{:?} is now dead", local); + + mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead) + } + + pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { + let ptr = self.memory.allocate_bytes(s.as_bytes()); + Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx)) + } + pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> { trace!("resolve: {:?}, {:#?}", def_id, substs); trace!("substs: {:#?}", self.substs()); @@ -420,41 +449,55 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substituted) } + pub fn layout_of_local( + &self, + frame: usize, + local: mir::Local + ) -> EvalResult<'tcx, TyLayout<'tcx>> { + let local_ty = self.stack[frame].mir.local_decls[local].ty; + let local_ty = self.monomorphize( + local_ty, + self.stack[frame].instance.substs + ); + self.layout_of(local_ty) + } + /// Return the size and alignment of the value at the given type. /// Note that the value does not matter if the type is sized. For unsized types, /// the value has to be a fat pointer, and we only care about the "extra" data in it. pub fn size_and_align_of_dst( &self, - ty: Ty<'tcx>, - value: Value, + val: ValTy<'tcx>, ) -> EvalResult<'tcx, (Size, Align)> { - let layout = self.layout_of(ty)?; - if !layout.is_unsized() { - Ok(layout.size_and_align()) + if !val.layout.is_unsized() { + Ok(val.layout.size_and_align()) } else { - match ty.sty { + match val.layout.ty.sty { ty::TyAdt(..) | ty::TyTuple(..) => { // First get the size of all statically known fields. // Don't use type_of::sizing_type_of because that expects t to be sized, // and it also rounds up to alignment, which we want to avoid, // as the unsized field's alignment could be smaller. - assert!(!ty.is_simd()); - debug!("DST {} layout: {:?}", ty, layout); + assert!(!val.layout.ty.is_simd()); + debug!("DST layout: {:?}", val.layout); - let sized_size = layout.fields.offset(layout.fields.count() - 1); - let sized_align = layout.align; + let sized_size = val.layout.fields.offset(val.layout.fields.count() - 1); + let sized_align = val.layout.align; debug!( "DST {} statically sized prefix size: {:?} align: {:?}", - ty, + val.layout.ty, sized_size, sized_align ); // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = layout.field(self, layout.fields.count() - 1)?.ty; + let field_layout = val.layout.field(self, val.layout.fields.count() - 1)?; let (unsized_size, unsized_align) = - self.size_and_align_of_dst(field_ty, value)?; + self.size_and_align_of_dst(ValTy { + value: val.value, + layout: field_layout + })?; // FIXME (#26403, #27023): We should be adding padding // to `sized_size` (to accommodate the `unsized_align` @@ -484,18 +527,18 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok((size.abi_align(align), align)) } ty::TyDynamic(..) => { - let (_, vtable) = self.into_ptr_vtable_pair(value)?; + let (_, vtable) = val.to_scalar_dyn_trait()?; // the second entry in the vtable is the dynamic size of the object. self.read_size_and_align_from_vtable(vtable) } ty::TySlice(_) | ty::TyStr => { - let (elem_size, align) = layout.field(self, 0)?.size_and_align(); - let (_, len) = self.into_slice(value)?; + let (elem_size, align) = val.layout.field(self, 0)?.size_and_align(); + let (_, len) = val.to_scalar_slice(self)?; Ok((elem_size * len, align)) } - _ => bug!("size_of_val::<{:?}>", ty), + _ => bug!("size_of_val::<{:?}>", val.layout.ty), } } } @@ -526,10 +569,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M // don't allocate at all for trivial constants if mir.local_decls.len() > 1 { - let mut locals = IndexVec::from_elem(LocalValue::Dead, &mir.local_decls); - for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) { - *local = LocalValue::Live(self.init_value(decl.ty)?); - } + // We put some marker value into the locals that we later want to initialize. + // This can be anything except for LocalValue::Dead -- because *that* is the + // value we use for things that we know are initially dead. + let dummy = + LocalValue::Live(Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef))); + self.frame_mut().locals = IndexVec::from_elem(dummy, &mir.local_decls); + // Now mark those locals as dead that we do not want to initialize match self.tcx.describe_def(instance.def_id()) { // statics and constants don't have `Storage*` statements, no need to look for them Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {}, @@ -540,14 +586,22 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M use rustc::mir::StatementKind::{StorageDead, StorageLive}; match stmt.kind { StorageLive(local) | - StorageDead(local) => locals[local] = LocalValue::Dead, + StorageDead(local) => { + // Worst case we are overwriting a dummy, no deallocation needed + self.storage_dead(local); + } _ => {} } } } }, } - self.frame_mut().locals = locals; + // Finally, properly initialize all those that still have the dummy value + for local in mir.local_decls.indices() { + if self.frame().locals[local] == dummy { + self.storage_live(local)?; + } + } } self.memory.cur_frame = self.cur_frame(); @@ -571,10 +625,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } match frame.return_to_block { StackPopCleanup::MarkStatic(mutable) => { - if let Place::Ptr { ptr, .. } = frame.return_place { + if let Place::Ptr(MemPlace { ptr, .. }) = frame.return_place { // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions self.memory.mark_static_initialized( - ptr.unwrap_or_err()?.to_ptr()?.alloc_id, + ptr.to_ptr()?.alloc_id, mutable, )? } else { @@ -592,18 +646,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok(()) } - pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { - match ty.sty { - ty::TyRawPtr(ty::TypeAndMut { ty, .. }) | - ty::TyRef(_, ty, _) => !self.type_is_sized(ty), - ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()), - _ => false, - } - } - - pub fn read_global_as_value(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Value> { - let cv = self.const_eval(gid)?; - self.const_to_value(cv.val) + crate fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { + // FIXME: should we tell the user that there was a local which was never written to? + if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { + trace!("deallocating local"); + let ptr = ptr.to_ptr()?; + self.memory.dump_alloc(ptr.alloc_id); + self.memory.deallocate_local(ptr)?; + }; + Ok(()) } pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> { @@ -704,62 +755,56 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M /// This function checks the memory where `ptr` points to. /// It will error if the bits at the destination do not match the ones described by the layout. - pub fn validate_ptr_target( + pub fn validate_mplace( &self, - ptr: Pointer, - ptr_align: Align, - mut layout: TyLayout<'tcx>, + dest: MPlaceTy<'tcx>, path: String, - seen: &mut FxHashSet<(Pointer, Ty<'tcx>)>, - todo: &mut Vec<(Pointer, Ty<'tcx>, String)>, + seen: &mut FxHashSet<(MPlaceTy<'tcx>)>, + todo: &mut Vec<(MPlaceTy<'tcx>, String)>, ) -> EvalResult<'tcx> { - self.memory.dump_alloc(ptr.alloc_id); - trace!("validate_ptr_target: {:?}, {:#?}", ptr, layout); + self.memory.dump_alloc(dest.to_ptr()?.alloc_id); + trace!("validate_mplace: {:?}, {:#?}", *dest, dest.layout); - let variant; - match layout.variants { + // Find the right variant + let (variant, dest) = match dest.layout.variants { layout::Variants::NicheFilling { niche: ref tag, .. } | layout::Variants::Tagged { ref tag, .. } => { let size = tag.value.size(self); - let (tag_value, tag_layout) = self.read_field( - Value::ByRef(ptr.into(), ptr_align), - None, - mir::Field::new(0), - layout, - )?; - let tag_value = self.value_to_scalar(ValTy { - value: tag_value, - ty: tag_layout.ty - })?; + // we first read the tag value as scalar, to be able to validate it + let tag_mplace = self.mplace_field(dest, 0)?; + let tag_value = self.read_scalar(tag_mplace.into())?; let path = format!("{}.TAG", path); self.validate_scalar( - ScalarMaybeUndef::Scalar(tag_value), size, tag, &path, tag_layout.ty + tag_value, size, tag, &path, tag_mplace.layout.ty )?; - let variant_index = self.read_discriminant_as_variant_index( - Place::from_ptr(ptr, ptr_align), - layout, - )?; - variant = variant_index; - layout = layout.for_variant(self, variant_index); - trace!("variant layout: {:#?}", layout); + // then we read it again to get the index, to continue + let variant = self.read_discriminant_as_variant_index(dest.into())?; + let dest = self.mplace_downcast(dest, variant)?; + trace!("variant layout: {:#?}", dest.layout); + (variant, dest) }, - layout::Variants::Single { index } => variant = index, - } - match layout.fields { + layout::Variants::Single { index } => { + (index, dest) + } + }; + + // Validate all fields + match dest.layout.fields { // primitives are unions with zero fields layout::FieldPlacement::Union(0) => { - match layout.abi { + match dest.layout.abi { // nothing to do, whatever the pointer points to, it is never going to be read layout::Abi::Uninhabited => validation_failure!("a value of an uninhabited type", path), // check that the scalar is a valid pointer or that its bit range matches the // expectation. - layout::Abi::Scalar(ref scalar) => { - let size = scalar.value.size(self); - let value = self.memory.read_scalar(ptr, ptr_align, size)?; - self.validate_scalar(value, size, scalar, &path, layout.ty)?; - if scalar.value == Primitive::Pointer { + layout::Abi::Scalar(ref scalar_layout) => { + let size = scalar_layout.value.size(self); + let value = self.read_value(dest.into())?; + let scalar = value.to_scalar_or_undef(); + self.validate_scalar(scalar, size, scalar_layout, &path, dest.layout.ty)?; + if scalar_layout.value == Primitive::Pointer { // ignore integer pointers, we can't reason about the final hardware - if let Scalar::Ptr(ptr) = value.unwrap_or_err()? { + if let Scalar::Ptr(ptr) = scalar.not_undef()? { let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id); if let Some(AllocType::Static(did)) = alloc_kind { // statics from other crates are already checked @@ -768,17 +813,19 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M return Ok(()); } } - if let Some(tam) = layout.ty.builtin_deref(false) { + if value.layout.ty.builtin_deref(false).is_some() { + trace!("Recursing below ptr {:#?}", value); + let ptr_place = self.ref_to_mplace(value)?; // we have not encountered this pointer+layout combination before - if seen.insert((ptr, tam.ty)) { - todo.push((ptr, tam.ty, format!("(*{})", path))) + if seen.insert(ptr_place) { + todo.push((ptr_place, format!("(*{})", path))) } } } } Ok(()) }, - _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", layout.abi), + _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", dest.layout.abi), } } layout::FieldPlacement::Union(_) => { @@ -787,52 +834,63 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389 Ok(()) }, - layout::FieldPlacement::Array { stride, count } => { - let elem_layout = layout.field(self, 0)?; + layout::FieldPlacement::Array { count, .. } => { for i in 0..count { let mut path = path.clone(); - self.write_field_name(&mut path, layout.ty, i as usize, variant).unwrap(); - self.validate_ptr_target(ptr.offset(stride * i, self)?, ptr_align, elem_layout, path, seen, todo)?; + self.dump_field_name(&mut path, dest.layout.ty, i as usize, variant).unwrap(); + let field = self.mplace_field(dest, i)?; + self.validate_mplace(field, path, seen, todo)?; } Ok(()) }, layout::FieldPlacement::Arbitrary { ref offsets, .. } => { - - // check length field and vtable field - match layout.ty.builtin_deref(false).map(|tam| &tam.ty.sty) { + // fat pointers need special treatment + match dest.layout.ty.builtin_deref(false).map(|tam| &tam.ty.sty) { | Some(ty::TyStr) | Some(ty::TySlice(_)) => { - let (len, len_layout) = self.read_field( - Value::ByRef(ptr.into(), ptr_align), - None, - mir::Field::new(1), - layout, - )?; - let len = self.value_to_scalar(ValTy { value: len, ty: len_layout.ty })?; - if len.to_bits(len_layout.size).is_err() { - return validation_failure!("length is not a valid integer", path); + // check the length + let len_mplace = self.mplace_field(dest, 1)?; + let len = self.read_scalar(len_mplace.into())?; + let len = match len.to_bits(len_mplace.layout.size) { + Err(_) => return validation_failure!("length is not a valid integer", path), + Ok(len) => len as u64, + }; + // get the fat ptr + let ptr = self.ref_to_mplace(self.read_value(dest.into())?)?; + let mut path = path.clone(); + self.dump_field_name(&mut path, dest.layout.ty, 0, variant).unwrap(); + // check all fields + for i in 0..len { + let mut path = path.clone(); + self.dump_field_name(&mut path, ptr.layout.ty, i as usize, 0).unwrap(); + let field = self.mplace_field(ptr, i)?; + self.validate_mplace(field, path, seen, todo)?; } + // FIXME: For a TyStr, check that this is valid UTF-8 }, Some(ty::TyDynamic(..)) => { - let (vtable, vtable_layout) = self.read_field( - Value::ByRef(ptr.into(), ptr_align), - None, - mir::Field::new(1), - layout, - )?; - let vtable = self.value_to_scalar(ValTy { value: vtable, ty: vtable_layout.ty })?; + let vtable_mplace = self.mplace_field(dest, 1)?; + let vtable = self.read_scalar(vtable_mplace.into())?; if vtable.to_ptr().is_err() { return validation_failure!("vtable address is not a pointer", path); } - } - _ => {}, - } - for (i, &offset) in offsets.iter().enumerate() { - let field_layout = layout.field(self, i)?; - let mut path = path.clone(); - self.write_field_name(&mut path, layout.ty, i, variant).unwrap(); - self.validate_ptr_target(ptr.offset(offset, self)?, ptr_align, field_layout, path, seen, todo)?; + // get the fat ptr + let _ptr = self.ref_to_mplace(self.read_value(dest.into())?)?; + // FIXME: What can we verify about this? + }, + Some(ty) => + bug!("Unexpected fat pointer target type {:?}", ty), + None => { + // Not a pointer, perform regular aggregate handling below + for i in 0..offsets.len() { + let mut path = path.clone(); + self.dump_field_name(&mut path, dest.layout.ty, i, variant).unwrap(); + let field = self.mplace_field(dest, i as u64)?; + self.validate_mplace(field, path, seen, todo)?; + } + }, } + Ok(()) } } @@ -858,132 +916,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } - fn unsize_into_ptr( - &mut self, - src: Value, - src_ty: Ty<'tcx>, - dest: Place, - dest_ty: Ty<'tcx>, - sty: Ty<'tcx>, - dty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - // A -> A conversion - let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty); - - match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { - (&ty::TyArray(_, length), &ty::TySlice(_)) => { - let ptr = self.into_ptr(src)?; - // u64 cast is from usize to u64, which is always good - let valty = ValTy { - value: ptr.to_value_with_len(length.unwrap_usize(self.tcx.tcx), self.tcx.tcx), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { - // For now, upcasts are limited to changes in marker - // traits, and hence never actually require an actual - // change to the vtable. - let valty = ValTy { - value: src, - ty: dest_ty, - }; - self.write_value(valty, dest) - } - (_, &ty::TyDynamic(ref data, _)) => { - let trait_ref = data.principal().unwrap().with_self_ty( - *self.tcx, - src_pointee_ty, - ); - let trait_ref = self.tcx.erase_regions(&trait_ref); - let vtable = self.get_vtable(src_pointee_ty, trait_ref)?; - let ptr = self.into_ptr(src)?; - let valty = ValTy { - value: ptr.to_value_with_vtable(vtable), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - - _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty), - } - } - - crate fn unsize_into( - &mut self, - src: Value, - src_layout: TyLayout<'tcx>, - dst: Place, - dst_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx> { - match (&src_layout.ty.sty, &dst_layout.ty.sty) { - (&ty::TyRef(_, s, _), &ty::TyRef(_, d, _)) | - (&ty::TyRef(_, s, _), &ty::TyRawPtr(TypeAndMut { ty: d, .. })) | - (&ty::TyRawPtr(TypeAndMut { ty: s, .. }), - &ty::TyRawPtr(TypeAndMut { ty: d, .. })) => { - self.unsize_into_ptr(src, src_layout.ty, dst, dst_layout.ty, s, d) - } - (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { - assert_eq!(def_a, def_b); - if def_a.is_box() || def_b.is_box() { - if !def_a.is_box() || !def_b.is_box() { - bug!("invalid unsizing between {:?} -> {:?}", src_layout, dst_layout); - } - return self.unsize_into_ptr( - src, - src_layout.ty, - dst, - dst_layout.ty, - src_layout.ty.boxed_ty(), - dst_layout.ty.boxed_ty(), - ); - } - - // unsizing of generic struct with pointer fields - // Example: `Arc` -> `Arc` - // here we need to increase the size of every &T thin ptr field to a fat ptr - for i in 0..src_layout.fields.count() { - let (dst_f_place, dst_field) = - self.place_field(dst, mir::Field::new(i), dst_layout)?; - if dst_field.is_zst() { - continue; - } - let (src_f_value, src_field) = match src { - Value::ByRef(ptr, align) => { - let src_place = Place::from_scalar_ptr(ptr.into(), align); - let (src_f_place, src_field) = - self.place_field(src_place, mir::Field::new(i), src_layout)?; - (self.read_place(src_f_place)?, src_field) - } - Value::Scalar(_) | Value::ScalarPair(..) => { - let src_field = src_layout.field(&self, i)?; - assert_eq!(src_layout.fields.offset(i).bytes(), 0); - assert_eq!(src_field.size, src_layout.size); - (src, src_field) - } - }; - if src_field.ty == dst_field.ty { - self.write_value(ValTy { - value: src_f_value, - ty: src_field.ty, - }, dst_f_place)?; - } else { - self.unsize_into(src_f_value, src_field, dst_f_place, dst_field)?; - } - } - Ok(()) - } - _ => { - bug!( - "unsize_into: invalid conversion: {:?} -> {:?}", - src_layout, - dst_layout - ) - } - } - } - - pub fn dump_local(&self, place: Place) { + pub fn dump_place(&self, place: Place) { // Debug output if !log_enabled!(::log::Level::Trace) { return; @@ -1005,22 +938,23 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M panic!("Failed to access local: {:?}", err); } } - Ok(Value::ByRef(ptr, align)) => { + Ok(Operand::Indirect(mplace)) => { + let (ptr, align) = mplace.to_scalar_ptr_align(); match ptr { Scalar::Ptr(ptr) => { write!(msg, " by align({}) ref:", align.abi()).unwrap(); allocs.push(ptr.alloc_id); } - ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), + ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), } } - Ok(Value::Scalar(val)) => { + Ok(Operand::Immediate(Value::Scalar(val))) => { write!(msg, " {:?}", val).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val { allocs.push(ptr.alloc_id); } } - Ok(Value::ScalarPair(val1, val2)) => { + Ok(Operand::Immediate(Value::ScalarPair(val1, val2))) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 { allocs.push(ptr.alloc_id); @@ -1034,9 +968,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M trace!("{}", msg); self.memory.dump_allocs(allocs); } - Place::Ptr { ptr, align, .. } => { + Place::Ptr(mplace) => { + let (ptr, align) = mplace.to_scalar_ptr_align(); match ptr { - ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => { + Scalar::Ptr(ptr) => { trace!("by align({}) ref:", align.abi()); self.memory.dump_alloc(ptr.alloc_id); } @@ -1092,7 +1027,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M super::truncate(value, ty) } - fn write_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result { + fn dump_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result { match ty.sty { ty::TyBool | ty::TyChar | @@ -1154,8 +1089,25 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | ty::TyInfer(_) | ty::TyError => { - bug!("write_field_name: unexpected type `{}`", ty) + bug!("dump_field_name: unexpected type `{}`", ty) } } } } + +pub fn sign_extend(value: u128, layout: TyLayout<'_>) -> u128 { + let size = layout.size.bits(); + assert!(layout.abi.is_signed()); + // sign extend + let shift = 128 - size; + // shift the unsigned value to the left + // and back to the right as signed (essentially fills with FF on the left) + (((value << shift) as i128) >> shift) as u128 +} + +pub fn truncate(value: u128, layout: TyLayout<'_>) -> u128 { + let size = layout.size.bits(); + let shift = 128 - size; + // truncate (shift left to drop out leftover values, shift right to fill with zeroes) + (value << shift) >> shift +} diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 112d8759c68..84556c7249d 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -5,10 +5,10 @@ use std::hash::Hash; use rustc::mir::interpret::{AllocId, EvalResult, Scalar, Pointer, AccessKind, GlobalId}; -use super::{EvalContext, Place, ValTy, Memory}; +use super::{EvalContext, PlaceTy, OpTy, Memory}; use rustc::mir; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, layout::TyLayout}; use rustc::ty::layout::Size; use syntax::source_map::Span; use syntax::ast::Mutability; @@ -31,19 +31,17 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { fn eval_fn_call<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - destination: Option<(Place, mir::BasicBlock)>, - args: &[ValTy<'tcx>], + destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + args: &[OpTy<'tcx>], span: Span, - sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx, bool>; /// directly process an intrinsic without pushing a stack frame. fn call_intrinsic<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, - args: &[ValTy<'tcx>], - dest: Place, - dest_layout: ty::layout::TyLayout<'tcx>, + args: &[OpTy<'tcx>], + dest: PlaceTy<'tcx>, target: mir::BasicBlock, ) -> EvalResult<'tcx>; @@ -57,9 +55,9 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { ecx: &EvalContext<'a, 'mir, 'tcx, Self>, bin_op: mir::BinOp, left: Scalar, - left_ty: Ty<'tcx>, + left_layout: TyLayout<'tcx>, right: Scalar, - right_ty: Ty<'tcx>, + right_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Scalar, bool)>>; /// Called when trying to mark machine defined `MemoryKinds` as static @@ -81,8 +79,7 @@ pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { /// Returns a pointer to the allocated memory fn box_alloc<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - ty: Ty<'tcx>, - dest: Place, + dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx>; /// Called when trying to access a global declared with a `linkage` attribute diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 636b04a8d16..461b98e4ff3 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -7,7 +7,7 @@ use rustc::ty::Instance; use rustc::ty::ParamEnv; use rustc::ty::query::TyCtxtAt; use rustc::ty::layout::{self, Align, TargetDataLayout, Size}; -use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, ScalarMaybeUndef, +use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, ScalarMaybeUndef, EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType}; pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint}; use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher}; @@ -301,6 +301,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } + /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end + /// of an allocation (i.e., at the first *inaccessible* location) *is* considered + /// in-bounds! This follows C's/LLVM's rules. pub fn check_bounds(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> { let alloc = self.get(ptr.alloc_id)?; let allocation_size = alloc.bytes.len() as u64; @@ -331,7 +334,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { assert!(self.tcx.is_static(def_id).is_some()); EvalErrorKind::ReferencedConstant(err).into() }).map(|val| { - self.tcx.const_value_to_allocation(val) + self.tcx.const_to_allocation(val) }) } @@ -828,6 +831,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { { let dst = self.get_bytes_mut(ptr, type_size, ptr_align.min(type_align))?; + // TODO: Why do we still need `signed` here? We do NOT have it for loading! if signed { write_target_int(endianness, dst, bytes as i128).unwrap(); } else { @@ -992,63 +996,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M>; fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>; - - /// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef, - /// this may have to perform a load. - fn into_ptr( - &self, - value: Value, - ) -> EvalResult<'tcx, ScalarMaybeUndef> { - Ok(match value { - Value::ByRef(ptr, align) => { - self.memory().read_ptr_sized(ptr.to_ptr()?, align)? - } - Value::Scalar(ptr) | - Value::ScalarPair(ptr, _) => ptr, - }.into()) - } - - fn into_ptr_vtable_pair( - &self, - value: Value, - ) -> EvalResult<'tcx, (ScalarMaybeUndef, Pointer)> { - match value { - Value::ByRef(ref_ptr, align) => { - let mem = self.memory(); - let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into(); - let vtable = mem.read_ptr_sized( - ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, - align - )?.unwrap_or_err()?.to_ptr()?; - Ok((ptr, vtable)) - } - - Value::ScalarPair(ptr, vtable) => Ok((ptr, vtable.unwrap_or_err()?.to_ptr()?)), - _ => bug!("expected ptr and vtable, got {:?}", value), - } - } - - fn into_slice( - &self, - value: Value, - ) -> EvalResult<'tcx, (ScalarMaybeUndef, u64)> { - match value { - Value::ByRef(ref_ptr, align) => { - let mem = self.memory(); - let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into(); - let len = mem.read_ptr_sized( - ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, - align - )?.unwrap_or_err()?.to_bits(mem.pointer_size())? as u64; - Ok((ptr, len)) - } - Value::ScalarPair(ptr, val) => { - let len = val.unwrap_or_err()?.to_bits(self.memory().pointer_size())?; - Ok((ptr, len as u64)) - } - Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value), - } - } } impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> { diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 1bae930fc3f..45270817900 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -1,23 +1,23 @@ //! An interpreter for MIR used in CTFE and by miri mod cast; -mod const_eval; mod eval_context; mod place; +mod operand; mod machine; mod memory; mod operator; mod step; mod terminator; mod traits; -mod value; +mod const_eval; pub use self::eval_context::{ EvalContext, Frame, StackPopCleanup, - TyAndPacked, ValTy, + sign_extend, truncate, LocalValue, }; -pub use self::place::{Place, PlaceExtra}; +pub use self::place::{Place, PlaceExtra, PlaceTy, MemPlace, MPlaceTy}; pub use self::memory::{Memory, MemoryKind, HasMemory}; @@ -26,34 +26,13 @@ pub use self::const_eval::{ mk_borrowck_eval_cx, mk_eval_cx, CompileTimeEvaluator, - const_value_to_allocation_provider, + const_to_allocation_provider, const_eval_provider, - const_val_field, + const_field, const_variant_index, - value_to_const_value, + op_to_const, }; pub use self::machine::Machine; -pub use self::memory::{write_target_uint, write_target_int, read_target_uint}; - -use self::value::LocalValue; - -use rustc::ty::layout::TyLayout; - -pub fn sign_extend(value: u128, layout: TyLayout<'_>) -> u128 { - let size = layout.size.bits(); - assert!(layout.abi.is_signed()); - // sign extend - let shift = 128 - size; - // shift the unsigned value to the left - // and back to the right as signed (essentially fills with FF on the left) - (((value << shift) as i128) >> shift) as u128 -} - -pub fn truncate(value: u128, layout: TyLayout<'_>) -> u128 { - let size = layout.size.bits(); - let shift = 128 - size; - // truncate (shift left to drop out leftover values, shift right to fill with zeroes) - (value << shift) >> shift -} +pub use self::operand::{Value, ValTy, Operand, OpTy}; diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs new file mode 100644 index 00000000000..ed2a9f06a91 --- /dev/null +++ b/src/librustc_mir/interpret/operand.rs @@ -0,0 +1,572 @@ +//! Functions concerning immediate values and operands, and reading from operands. +//! All high-level functions to read from memory work on operands as sources. + +use std::convert::TryInto; + +use rustc::mir; +use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasDataLayout, IntegerExt}; +use rustc_data_structures::indexed_vec::Idx; + +use rustc::mir::interpret::{ + GlobalId, ConstValue, Scalar, EvalResult, Pointer, ScalarMaybeUndef, EvalErrorKind +}; +use super::{EvalContext, Machine, MemPlace, MPlaceTy, PlaceExtra, MemoryKind}; + +/// A `Value` represents a single immediate self-contained Rust value. +/// +/// For optimization of a few very common cases, there is also a representation for a pair of +/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary +/// operations and fat pointers. This idea was taken from rustc's codegen. +/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely +/// defined on `Value`, and do not have to work with a `Place`. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum Value { + Scalar(ScalarMaybeUndef), + ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), +} + +impl<'tcx> Value { + pub fn new_slice( + val: Scalar, + len: u64, + cx: impl HasDataLayout + ) -> Self { + Value::ScalarPair(val.into(), Scalar::Bits { + bits: len as u128, + size: cx.data_layout().pointer_size.bytes() as u8, + }.into()) + } + + pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { + Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into()) + } + + pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef { + match self { + Value::Scalar(val) => val, + Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"), + } + } + + pub fn to_scalar(self) -> EvalResult<'tcx, Scalar> { + self.to_scalar_or_undef().not_undef() + } + + /// Convert the value into a pointer (or a pointer-sized integer). + pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> { + match self { + Value::Scalar(ptr) | + Value::ScalarPair(ptr, _) => ptr.not_undef(), + } + } + + pub fn to_scalar_dyn_trait(self) -> EvalResult<'tcx, (Scalar, Pointer)> { + match self { + Value::ScalarPair(ptr, vtable) => + Ok((ptr.not_undef()?, vtable.to_ptr()?)), + _ => bug!("expected ptr and vtable, got {:?}", self), + } + } + + pub fn to_scalar_slice(self, cx: impl HasDataLayout) -> EvalResult<'tcx, (Scalar, u64)> { + match self { + Value::ScalarPair(ptr, val) => { + let len = val.to_bits(cx.data_layout().pointer_size)?; + Ok((ptr.not_undef()?, len as u64)) + } + _ => bug!("expected ptr and length, got {:?}", self), + } + } +} + +// ScalarPair needs a type to interpret, so we often have a value and a type together +// as input for binary and cast operations. +#[derive(Copy, Clone, Debug)] +pub struct ValTy<'tcx> { + pub value: Value, + pub layout: TyLayout<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for ValTy<'tcx> { + type Target = Value; + fn deref(&self) -> &Value { + &self.value + } +} + +/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, +/// or still in memory. The latter is an optimization, to delay reading that chunk of +/// memory and to avoid having to store arbitrary-sized data here. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum Operand { + Immediate(Value), + Indirect(MemPlace), +} + +impl Operand { + #[inline] + pub fn from_ptr(ptr: Pointer, align: Align) -> Self { + Operand::Indirect(MemPlace::from_ptr(ptr, align)) + } + + #[inline] + pub fn from_scalar_value(val: Scalar) -> Self { + Operand::Immediate(Value::Scalar(val.into())) + } + + #[inline] + pub fn to_mem_place(self) -> MemPlace { + match self { + Operand::Indirect(mplace) => mplace, + _ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self), + + } + } + + #[inline] + pub fn to_immediate(self) -> Value { + match self { + Operand::Immediate(val) => val, + _ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self), + + } + } +} + +#[derive(Copy, Clone, Debug)] +pub struct OpTy<'tcx> { + pub op: Operand, + pub layout: TyLayout<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for OpTy<'tcx> { + type Target = Operand; + fn deref(&self) -> &Operand { + &self.op + } +} + +impl<'tcx> From> for OpTy<'tcx> { + fn from(mplace: MPlaceTy<'tcx>) -> Self { + OpTy { + op: Operand::Indirect(*mplace), + layout: mplace.layout + } + } +} + +impl<'tcx> From> for OpTy<'tcx> { + fn from(val: ValTy<'tcx>) -> Self { + OpTy { + op: Operand::Immediate(val.value), + layout: val.layout + } + } +} + +impl<'tcx> OpTy<'tcx> { + #[inline] + pub fn from_ptr(ptr: Pointer, align: Align, layout: TyLayout<'tcx>) -> Self { + OpTy { op: Operand::from_ptr(ptr, align), layout } + } + + #[inline] + pub fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { + OpTy { op: Operand::from_ptr(ptr, layout.align), layout } + } + + #[inline] + pub fn from_scalar_value(val: Scalar, layout: TyLayout<'tcx>) -> Self { + OpTy { op: Operand::Immediate(Value::Scalar(val.into())), layout } + } +} + +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + /// Try reading a value in memory; this is interesting particularily for ScalarPair. + /// Return None if the layout does not permit loading this as a value. + fn try_read_value_from_ptr( + &self, + ptr: Scalar, + ptr_align: Align, + layout: TyLayout<'tcx>, + ) -> EvalResult<'tcx, Option> { + self.memory.check_align(ptr, ptr_align)?; + + if layout.size.bytes() == 0 { + return Ok(Some(Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })))); + } + + let ptr = ptr.to_ptr()?; + + match layout.abi { + layout::Abi::Scalar(..) => { + let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?; + Ok(Some(Value::Scalar(scalar))) + } + layout::Abi::ScalarPair(ref a, ref b) => { + let (a, b) = (&a.value, &b.value); + let (a_size, b_size) = (a.size(self), b.size(self)); + let a_ptr = ptr; + let b_offset = a_size.abi_align(b.align(self)); + assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use + let b_ptr = ptr.offset(b_offset, self)?.into(); + let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; + let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; + Ok(Some(Value::ScalarPair(a_val, b_val))) + } + _ => Ok(None), + } + } + + /// Try returning an immediate value for the operand. + /// If the layout does not permit loading this as a value, return where in memory + /// we can find the data. + /// Note that for a given layout, this operation will either always fail or always + /// succeed! Whether it succeeds depends on whether the layout can be represented + /// in a `Value`, not on which data is stored there currently. + pub(super) fn try_read_value( + &self, + OpTy { op: src, layout } : OpTy<'tcx>, + ) -> EvalResult<'tcx, Result> { + match src { + Operand::Indirect(mplace) => { + if mplace.extra == PlaceExtra::None { + if let Some(val) = + self.try_read_value_from_ptr(mplace.ptr, mplace.align, layout)? + { + return Ok(Ok(val)); + } + } + Ok(Err(mplace)) + }, + Operand::Immediate(val) => Ok(Ok(val)), + } + } + + /// Read a value from a place, asserting that that is possible with the given layout. + #[inline(always)] + pub fn read_value(&self, op: OpTy<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { + if let Ok(value) = self.try_read_value(op)? { + Ok(ValTy { value, layout: op.layout }) + } else { + bug!("primitive read failed for type: {:?}", op.layout.ty); + } + } + + /// Read a scalar from a place + pub fn read_scalar(&self, op : OpTy<'tcx>) -> EvalResult<'tcx, ScalarMaybeUndef> { + match *self.read_value(op)? { + Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty), + Value::Scalar(val) => Ok(val), + } + } + + pub fn uninit_operand(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Operand> { + // FIXME: Aren't we supposed to also be immediate for a ZST? + // This decides which types we will use the Immediate optimization for, and hence should + // match what `try_read_value` and `eval_place_to_op` support. + Ok(match layout.abi { + layout::Abi::Scalar(..) => + Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)), + layout::Abi::ScalarPair(..) => + Operand::Immediate(Value::ScalarPair( + ScalarMaybeUndef::Undef, + ScalarMaybeUndef::Undef, + )), + _ => { + trace!("Forcing allocation for local of type {:?}", layout.ty); + Operand::Indirect( + *self.allocate(layout, MemoryKind::Stack)? + ) + } + }) + } + + /// Projection functions + pub fn operand_field( + &self, + op: OpTy<'tcx>, + field: u64, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + let base = match op.try_as_mplace() { + Ok(mplace) => { + // The easy case + let field = self.mplace_field(mplace, field)?; + return Ok(field.into()); + }, + Err(value) => value + }; + + let field = field.try_into().unwrap(); + let field_layout = op.layout.field(self, field)?; + if field_layout.size.bytes() == 0 { + let val = Value::Scalar(Scalar::zst().into()); + return Ok(OpTy { op: Operand::Immediate(val), layout: field_layout }); + } + let offset = op.layout.fields.offset(field); + let value = match base { + // the field covers the entire type + _ if offset.bytes() == 0 && field_layout.size == op.layout.size => base, + // extract fields from types with `ScalarPair` ABI + Value::ScalarPair(a, b) => { + let val = if offset.bytes() == 0 { a } else { b }; + Value::Scalar(val) + }, + Value::Scalar(val) => + bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout), + }; + Ok(OpTy { op: Operand::Immediate(value), layout: field_layout }) + } + + pub(super) fn operand_downcast( + &self, + op: OpTy<'tcx>, + variant: usize, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + // Downcasts only change the layout + Ok(match op.try_as_mplace() { + Ok(mplace) => { + self.mplace_downcast(mplace, variant)?.into() + }, + Err(..) => { + let layout = op.layout.for_variant(self, variant); + OpTy { layout, ..op } + } + }) + } + + // Take an operand, representing a pointer, and dereference it -- that + // will always be a MemPlace. + pub(super) fn deref_operand( + &self, + src: OpTy<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let val = self.read_value(src)?; + trace!("deref to {} on {:?}", val.layout.ty, val); + Ok(self.ref_to_mplace(val)?) + } + + pub fn operand_projection( + &self, + base: OpTy<'tcx>, + proj_elem: &mir::PlaceElem<'tcx>, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + use rustc::mir::ProjectionElem::*; + Ok(match *proj_elem { + Field(field, _) => self.operand_field(base, field.index() as u64)?, + Downcast(_, variant) => self.operand_downcast(base, variant)?, + Deref => self.deref_operand(base)?.into(), + // The rest should only occur as mplace, we do not use Immediates for types + // allowing such operations. This matches place_projection forcing an allocation. + Subslice { .. } | ConstantIndex { .. } | Index(_) => { + let mplace = base.to_mem_place(); + self.mplace_projection(mplace, proj_elem)?.into() + } + }) + } + + // Evaluate a place with the goal of reading from it. This lets us sometimes + // avoid allocations. + fn eval_place_to_op( + &mut self, + mir_place: &mir::Place<'tcx>, + ) -> EvalResult<'tcx, OpTy<'tcx>> { + use rustc::mir::Place::*; + Ok(match *mir_place { + Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer), + Local(local) => { + let op = *self.frame().locals[local].access()?; + OpTy { op, layout: self.layout_of_local(self.cur_frame(), local)? } + }, + + Projection(ref proj) => { + let op = self.eval_place_to_op(&proj.base)?; + self.operand_projection(op, &proj.elem)? + } + + // Everything else is an mplace, so we just call `eval_place`. + // Note that getting an mplace for a static aways requires `&mut`, + // so this does not "cost" us anything in terms if mutability. + Promoted(_) | Static(_) => { + let place = self.eval_place(mir_place)?; + place.to_mem_place().into() + } + }) + } + + /// Evaluate the operand, returning a place where you can then find the data. + pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> { + use rustc::mir::Operand::*; + match *op { + // FIXME: do some more logic on `move` to invalidate the old location + Copy(ref place) | + Move(ref place) => + self.eval_place_to_op(place), + + Constant(ref constant) => { + let ty = self.monomorphize(op.ty(self.mir(), *self.tcx), self.substs()); + let layout = self.layout_of(ty)?; + let op = self.const_value_to_op(constant.literal.val)?; + Ok(OpTy { op, layout }) + } + } + } + + /// Evaluate a bunch of operands at once + pub(crate) fn eval_operands( + &mut self, + ops: &[mir::Operand<'tcx>], + ) -> EvalResult<'tcx, Vec>> { + ops.into_iter() + .map(|op| self.eval_operand(op)) + .collect() + } + + // Also used e.g. when miri runs into a constant. + // Unfortunately, this needs an `&mut` to be able to allocate a copy of a `ByRef` + // constant. This bleeds up to `eval_operand` needing `&mut`. + pub fn const_value_to_op( + &mut self, + val: ConstValue<'tcx>, + ) -> EvalResult<'tcx, Operand> { + match val { + ConstValue::Unevaluated(def_id, substs) => { + let instance = self.resolve(def_id, substs)?; + self.global_to_op(GlobalId { + instance, + promoted: None, + }) + } + ConstValue::ByRef(alloc, offset) => { + // FIXME: Allocate new AllocId for all constants inside + let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; + Ok(Operand::from_ptr(Pointer::new(id, offset), alloc.align)) + }, + ConstValue::ScalarPair(a, b) => + Ok(Operand::Immediate(Value::ScalarPair(a.into(), b))), + ConstValue::Scalar(x) => + Ok(Operand::Immediate(Value::Scalar(x.into()))), + } + } + + pub(super) fn global_to_op(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Operand> { + let cv = self.const_eval(gid)?; + self.const_value_to_op(cv.val) + } + + /// We cannot do self.read_value(self.eval_operand) due to eval_operand taking &mut self, + /// so this helps avoid unnecessary let. + pub fn eval_operand_and_read_valty( + &mut self, + op: &mir::Operand<'tcx>, + ) -> EvalResult<'tcx, ValTy<'tcx>> { + let op = self.eval_operand(op)?; + self.read_value(op) + } + pub fn eval_operand_and_read_scalar( + &mut self, + op: &mir::Operand<'tcx>, + ) -> EvalResult<'tcx, ScalarMaybeUndef> { + Ok(self.eval_operand_and_read_valty(op)?.to_scalar_or_undef()) + } + + /// reads a tag and produces the corresponding variant index + pub fn read_discriminant_as_variant_index( + &self, + rval: OpTy<'tcx>, + ) -> EvalResult<'tcx, usize> { + match rval.layout.variants { + layout::Variants::Single { index } => Ok(index), + layout::Variants::Tagged { .. } => { + let discr_val = self.read_discriminant_value(rval)?; + rval.layout.ty + .ty_adt_def() + .expect("tagged layout for non adt") + .discriminants(self.tcx.tcx) + .position(|var| var.val == discr_val) + .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into()) + } + layout::Variants::NicheFilling { .. } => { + let discr_val = self.read_discriminant_value(rval)?; + assert_eq!(discr_val as usize as u128, discr_val); + Ok(discr_val as usize) + }, + } + } + + pub fn read_discriminant_value( + &self, + rval: OpTy<'tcx>, + ) -> EvalResult<'tcx, u128> { + trace!("read_discriminant_value {:#?}", rval.layout); + if rval.layout.abi == layout::Abi::Uninhabited { + return err!(Unreachable); + } + + match rval.layout.variants { + layout::Variants::Single { index } => { + let discr_val = rval.layout.ty.ty_adt_def().map_or( + index as u128, + |def| def.discriminant_for_variant(*self.tcx, index).val); + return Ok(discr_val); + } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => {}, + } + let discr_op = self.operand_field(rval, 0)?; + let discr_val = self.read_value(discr_op)?; + trace!("discr value: {:?}", discr_val); + let raw_discr = discr_val.to_scalar()?; + Ok(match rval.layout.variants { + layout::Variants::Single { .. } => bug!(), + // FIXME: We should catch invalid discriminants here! + layout::Variants::Tagged { .. } => { + if discr_val.layout.ty.is_signed() { + let i = raw_discr.to_bits(discr_val.layout.size)? as i128; + // going from layout tag type to typeck discriminant type + // requires first sign extending with the layout discriminant + let shift = 128 - discr_val.layout.size.bits(); + let sexted = (i << shift) >> shift; + // and then zeroing with the typeck discriminant type + let discr_ty = rval.layout.ty + .ty_adt_def().expect("tagged layout corresponds to adt") + .repr + .discr_type(); + let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty); + let shift = 128 - discr_ty.size().bits(); + let truncatee = sexted as u128; + (truncatee << shift) >> shift + } else { + raw_discr.to_bits(discr_val.layout.size)? + } + }, + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + let variants_start = *niche_variants.start() as u128; + let variants_end = *niche_variants.end() as u128; + match raw_discr { + Scalar::Ptr(_) => { + assert!(niche_start == 0); + assert!(variants_start == variants_end); + dataful_variant as u128 + }, + Scalar::Bits { bits: raw_discr, size } => { + assert_eq!(size as u64, discr_val.layout.size.bytes()); + let discr = raw_discr.wrapping_sub(niche_start) + .wrapping_add(variants_start); + if variants_start <= discr && discr <= variants_end { + discr + } else { + dataful_variant as u128 + } + }, + } + } + }) + } + +} diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 732c85bd014..c5475f9a4c0 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -1,58 +1,39 @@ use rustc::mir; -use rustc::ty::{self, Ty, layout}; +use rustc::ty::{self, layout::{self, TyLayout}}; use syntax::ast::FloatTy; -use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; +use rustc::mir::interpret::{EvalResult, Scalar}; -use super::{EvalContext, Place, Machine, ValTy}; +use super::{EvalContext, PlaceTy, Value, Machine, ValTy}; -use rustc::mir::interpret::{EvalResult, Scalar, Value}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - fn binop_with_overflow( - &self, - op: mir::BinOp, - left: ValTy<'tcx>, - right: ValTy<'tcx>, - ) -> EvalResult<'tcx, (Scalar, bool)> { - let left_val = self.value_to_scalar(left)?; - let right_val = self.value_to_scalar(right)?; - self.binary_op(op, left_val, left.ty, right_val, right.ty) - } - /// Applies the binary operation `op` to the two operands and writes a tuple of the result /// and a boolean signifying the potential overflow to the destination. - pub fn intrinsic_with_overflow( + pub fn binop_with_overflow( &mut self, op: mir::BinOp, left: ValTy<'tcx>, right: ValTy<'tcx>, - dest: Place, - dest_ty: Ty<'tcx>, + dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { - let (val, overflowed) = self.binop_with_overflow(op, left, right)?; + let (val, overflowed) = self.binary_op(op, left, right)?; let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); - let valty = ValTy { - value: val, - ty: dest_ty, - }; - self.write_value(valty, dest) + self.write_value(val, dest) } /// Applies the binary operation `op` to the arguments and writes the result to the - /// destination. Returns `true` if the operation overflowed. - pub fn intrinsic_overflowing( + /// destination. + pub fn binop_ignore_overflow( &mut self, op: mir::BinOp, left: ValTy<'tcx>, right: ValTy<'tcx>, - dest: Place, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, bool> { - let (val, overflowed) = self.binop_with_overflow(op, left, right)?; - self.write_scalar(dest, val, dest_ty)?; - Ok(overflowed) + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + let (val, _overflowed) = self.binary_op(op, left, right)?; + self.write_scalar(val, dest) } } @@ -61,29 +42,29 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn binary_op( &self, bin_op: mir::BinOp, - left: Scalar, - left_ty: Ty<'tcx>, - right: Scalar, - right_ty: Ty<'tcx>, + ValTy { value: left, layout: left_layout }: ValTy<'tcx>, + ValTy { value: right, layout: right_layout }: ValTy<'tcx>, ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; - let left_layout = self.layout_of(left_ty)?; - let right_layout = self.layout_of(right_ty)?; + let left = left.to_scalar()?; + let right = right.to_scalar()?; let left_kind = match left_layout.abi { layout::Abi::Scalar(ref scalar) => scalar.value, - _ => return err!(TypeNotPrimitive(left_ty)), + _ => return err!(TypeNotPrimitive(left_layout.ty)), }; let right_kind = match right_layout.abi { layout::Abi::Scalar(ref scalar) => scalar.value, - _ => return err!(TypeNotPrimitive(right_ty)), + _ => return err!(TypeNotPrimitive(right_layout.ty)), }; trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); // I: Handle operations that support pointers if !left_kind.is_float() && !right_kind.is_float() { - if let Some(handled) = M::try_ptr_op(self, bin_op, left, left_ty, right, right_ty)? { + if let Some(handled) = + M::try_ptr_op(self, bin_op, left, left_layout, right, right_layout)? + { return Ok(handled); } } @@ -188,7 +169,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - if let ty::TyFloat(fty) = left_ty.sty { + if let ty::TyFloat(fty) = left_layout.ty.sty { macro_rules! float_math { ($ty:path, $size:expr) => {{ let l = <$ty>::from_bits(l); @@ -220,7 +201,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - let size = self.layout_of(left_ty).unwrap().size.bytes() as u8; + let size = left_layout.size.bytes() as u8; // only ints left let val = match bin_op { @@ -260,9 +241,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { "unimplemented binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, - left_ty, + left_layout.ty, right, - right_ty, + right_layout.ty, ); return err!(Unimplemented(msg)); } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 91c25192306..c3ae78dbecf 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -1,423 +1,374 @@ +//! Computations on places -- field projections, going from mir::Place, and writing +//! into a place. +//! All high-level functions to write to memory work on places as destinations. + +use std::hash::{Hash, Hasher}; +use std::convert::TryFrom; + use rustc::mir; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasDataLayout}; use rustc_data_structures::indexed_vec::Idx; -use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer, ScalarMaybeUndef}; -use super::{EvalContext, Machine, ValTy}; -use interpret::memory::HasMemory; +use rustc::mir::interpret::{ + GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef +}; +use super::{EvalContext, Machine, Value, ValTy, Operand, OpTy, MemoryKind}; + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct MemPlace { + /// A place may have an integral pointer for ZSTs, and since it might + /// be turned back into a reference before ever being dereferenced. + /// However, it may never be undef. + pub ptr: Scalar, + pub align: Align, + pub extra: PlaceExtra, +} #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum Place { /// A place referring to a value allocated in the `Memory` system. - Ptr { - /// A place may have an invalid (integral or undef) pointer, - /// since it might be turned back into a reference - /// before ever being dereferenced. - ptr: ScalarMaybeUndef, - align: Align, - extra: PlaceExtra, - }, + Ptr(MemPlace), - /// A place referring to a value on the stack. Represented by a stack frame index paired with - /// a Mir local index. - Local { frame: usize, local: mir::Local }, + /// To support alloc-free locals, we are able to write directly to a local. + /// (Without that optimization, we'd just always be a `MemPlace`.) + Local { + frame: usize, + local: mir::Local, + }, } +// Extra information for fat pointers / places #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum PlaceExtra { None, Length(u64), Vtable(Pointer), - DowncastVariant(usize), } -impl<'tcx> Place { - /// Produces a Place that will error if attempted to be read from - pub fn undef() -> Self { - Self::from_scalar_ptr(ScalarMaybeUndef::Undef, Align::from_bytes(1, 1).unwrap()) - } +#[derive(Copy, Clone, Debug)] +pub struct PlaceTy<'tcx> { + place: Place, + pub layout: TyLayout<'tcx>, +} - pub fn from_scalar_ptr(ptr: ScalarMaybeUndef, align: Align) -> Self { - Place::Ptr { +impl<'tcx> ::std::ops::Deref for PlaceTy<'tcx> { + type Target = Place; + fn deref(&self) -> &Place { + &self.place + } +} + +/// A MemPlace with its layout. Constructing it is only possible in this module. +#[derive(Copy, Clone, Debug)] +pub struct MPlaceTy<'tcx> { + mplace: MemPlace, + pub layout: TyLayout<'tcx>, +} + +impl<'tcx> ::std::ops::Deref for MPlaceTy<'tcx> { + type Target = MemPlace; + fn deref(&self) -> &MemPlace { + &self.mplace + } +} + +impl<'tcx> From> for PlaceTy<'tcx> { + fn from(mplace: MPlaceTy<'tcx>) -> Self { + PlaceTy { + place: Place::Ptr(mplace.mplace), + layout: mplace.layout + } + } +} + +impl MemPlace { + #[inline(always)] + pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + MemPlace { ptr, align, extra: PlaceExtra::None, } } + #[inline(always)] pub fn from_ptr(ptr: Pointer, align: Align) -> Self { - Self::from_scalar_ptr(ScalarMaybeUndef::Scalar(ptr.into()), align) + Self::from_scalar_ptr(ptr.into(), align) } - pub fn to_ptr_align_extra(self) -> (ScalarMaybeUndef, Align, PlaceExtra) { - match self { - Place::Ptr { ptr, align, extra } => (ptr, align, extra), - _ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self), - - } - } - - pub fn to_ptr_align(self) -> (ScalarMaybeUndef, Align) { - let (ptr, align, _extra) = self.to_ptr_align_extra(); - (ptr, align) + #[inline(always)] + pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { + assert_eq!(self.extra, PlaceExtra::None); + (self.ptr, self.align) } + /// Extract the ptr part of the mplace + #[inline(always)] pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { // At this point, we forget about the alignment information -- the place has been turned into a reference, // and no matter where it came from, it now must be aligned. - self.to_ptr_align().0.unwrap_or_err()?.to_ptr() + self.to_scalar_ptr_align().0.to_ptr() } - pub(super) fn elem_ty_and_len( - self, - ty: Ty<'tcx>, - tcx: TyCtxt<'_, 'tcx, '_> - ) -> (Ty<'tcx>, u64) { - match ty.sty { - ty::TyArray(elem, n) => (elem, n.unwrap_usize(tcx)), - - ty::TySlice(elem) => { - match self { - Place::Ptr { extra: PlaceExtra::Length(len), .. } => (elem, len), - _ => { - bug!( - "elem_ty_and_len of a TySlice given non-slice place: {:?}", - self - ) - } - } - } - - _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty), + /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space. + /// This is the inverse of `ref_to_mplace`. + pub fn to_ref(self, cx: impl HasDataLayout) -> Value { + // We ignore the alignment of the place here -- special handling for packed structs ends + // at the `&` operator. + match self.extra { + PlaceExtra::None => Value::Scalar(self.ptr.into()), + PlaceExtra::Length(len) => Value::new_slice(self.ptr.into(), len, cx), + PlaceExtra::Vtable(vtable) => Value::new_dyn_trait(self.ptr.into(), vtable), } } } -impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - /// Reads a value from the place without going through the intermediate step of obtaining - /// a `miri::Place` - pub fn try_read_place( - &self, - place: &mir::Place<'tcx>, - ) -> EvalResult<'tcx, Option> { - use rustc::mir::Place::*; - match *place { - // Might allow this in the future, right now there's no way to do this from Rust code anyway - Local(mir::RETURN_PLACE) => err!(ReadFromReturnPointer), - // Directly reading a local will always succeed - Local(local) => self.frame().locals[local].access().map(Some), - // No fast path for statics. Reading from statics is rare and would require another - // Machine function to handle differently in miri. - Promoted(_) | - Static(_) => Ok(None), - Projection(ref proj) => self.try_read_place_projection(proj), - } +impl<'tcx> MPlaceTy<'tcx> { + #[inline] + fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { + MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout } } - pub fn read_field( - &self, - base: Value, - variant: Option, - field: mir::Field, - mut base_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, (Value, TyLayout<'tcx>)> { - if let Some(variant_index) = variant { - base_layout = base_layout.for_variant(self, variant_index); - } - let field_index = field.index(); - let field = base_layout.field(self, field_index)?; - if field.size.bytes() == 0 { - return Ok(( - Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })), - field, - )); - } - let offset = base_layout.fields.offset(field_index); - let value = match base { - // the field covers the entire type - Value::ScalarPair(..) | - Value::Scalar(_) if offset.bytes() == 0 && field.size == base_layout.size => base, - // extract fields from types with `ScalarPair` ABI - Value::ScalarPair(a, b) => { - let val = if offset.bytes() == 0 { a } else { b }; - Value::Scalar(val) - }, - Value::ByRef(base_ptr, align) => { - let offset = base_layout.fields.offset(field_index); - let ptr = base_ptr.ptr_offset(offset, self)?; - let align = align.min(base_layout.align).min(field.align); - assert!(!field.is_unsized()); - Value::ByRef(ptr, align) - }, - Value::Scalar(val) => bug!("field access on non aggregate {:#?}, {:#?}", val, base_layout), + #[inline] + pub(super) fn len(self) -> u64 { + // Sanity check + let ty_len = match self.layout.fields { + layout::FieldPlacement::Array { count, .. } => count, + _ => bug!("Length for non-array layout {:?} requested", self.layout), }; - Ok((value, field)) - } - - fn try_read_place_projection( - &self, - proj: &mir::PlaceProjection<'tcx>, - ) -> EvalResult<'tcx, Option> { - use rustc::mir::ProjectionElem::*; - let base = match self.try_read_place(&proj.base)? { - Some(base) => base, - None => return Ok(None), - }; - let base_ty = self.place_ty(&proj.base); - let base_layout = self.layout_of(base_ty)?; - match proj.elem { - Field(field, _) => Ok(Some(self.read_field(base, None, field, base_layout)?.0)), - // The NullablePointer cases should work fine, need to take care for normal enums - Downcast(..) | - Subslice { .. } | - // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized - ConstantIndex { .. } | Index(_) | - // No way to optimize this projection any better than the normal place path - Deref => Ok(None), - } - } - - /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses. - pub(super) fn eval_and_read_place( - &mut self, - place: &mir::Place<'tcx>, - ) -> EvalResult<'tcx, Value> { - // Shortcut for things like accessing a fat pointer's field, - // which would otherwise (in the `eval_place` path) require moving a `ScalarPair` to memory - // and returning an `Place::Ptr` to it - if let Some(val) = self.try_read_place(place)? { - return Ok(val); - } - let place = self.eval_place(place)?; - self.read_place(place) - } - - pub fn read_place(&self, place: Place) -> EvalResult<'tcx, Value> { - match place { - Place::Ptr { ptr, align, extra } => { - assert_eq!(extra, PlaceExtra::None); - Ok(Value::ByRef(ptr.unwrap_or_err()?, align)) - } - Place::Local { frame, local } => self.stack[frame].locals[local].access(), - } - } - - pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, Place> { - use rustc::mir::Place::*; - let place = match *mir_place { - Local(mir::RETURN_PLACE) => self.frame().return_place, - Local(local) => Place::Local { - frame: self.cur_frame(), - local, - }, - - Promoted(ref promoted) => { - let instance = self.frame().instance; - let val = self.read_global_as_value(GlobalId { - instance, - promoted: Some(promoted.0), - })?; - if let Value::ByRef(ptr, align) = val { - Place::Ptr { - ptr: ptr.into(), - align, - extra: PlaceExtra::None, - } - } else { - bug!("evaluated promoted and got {:#?}", val); - } - } - - Static(ref static_) => { - let layout = self.layout_of(self.place_ty(mir_place))?; - let instance = ty::Instance::mono(*self.tcx, static_.def_id); - let cid = GlobalId { - instance, - promoted: None - }; - let alloc = Machine::init_static(self, cid)?; - Place::Ptr { - ptr: ScalarMaybeUndef::Scalar(Scalar::Ptr(alloc.into())), - align: layout.align, - extra: PlaceExtra::None, - } - } - - Projection(ref proj) => { - let ty = self.place_ty(&proj.base); - let place = self.eval_place(&proj.base)?; - return self.eval_place_projection(place, ty, &proj.elem); - } - }; - - self.dump_local(place); - - Ok(place) - } - - pub fn place_field( - &mut self, - base: Place, - field: mir::Field, - mut base_layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, (Place, TyLayout<'tcx>)> { - match base { - Place::Ptr { extra: PlaceExtra::DowncastVariant(variant_index), .. } => { - base_layout = base_layout.for_variant(&self, variant_index); - } - _ => {} - } - let field_index = field.index(); - let field = base_layout.field(&self, field_index)?; - let offset = base_layout.fields.offset(field_index); - - // Do not allocate in trivial cases - let (base_ptr, base_align, base_extra) = match base { - Place::Ptr { ptr, align, extra } => (ptr, align, extra), - Place::Local { frame, local } => { - match (self.stack[frame].locals[local].access()?, &base_layout.abi) { - // in case the field covers the entire type, just return the value - (Value::Scalar(_), &layout::Abi::Scalar(_)) | - (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) - if offset.bytes() == 0 && field.size == base_layout.size => { - return Ok((base, field)) - }, - _ => self.force_allocation(base)?.to_ptr_align_extra(), - } - } - }; - - let offset = match base_extra { - PlaceExtra::Vtable(tab) => { - let (_, align) = self.size_and_align_of_dst( - base_layout.ty, - base_ptr.to_value_with_vtable(tab), - )?; - offset.abi_align(align) - } - _ => offset, - }; - - let ptr = base_ptr.ptr_offset(offset, &self)?; - let align = base_align.min(base_layout.align).min(field.align); - let extra = if !field.is_unsized() { - PlaceExtra::None + if let PlaceExtra::Length(len) = self.extra { + len } else { - match base_extra { - PlaceExtra::None => bug!("expected fat pointer"), - PlaceExtra::DowncastVariant(..) => { - bug!("Rust doesn't support unsized fields in enum variants") - } - PlaceExtra::Vtable(_) | - PlaceExtra::Length(_) => {} - } - base_extra - }; + ty_len + } + } +} - Ok((Place::Ptr { ptr, align, extra }, field)) +// Validation needs to hash MPlaceTy, but we cannot hash Layout -- so we just hash the type +impl<'tcx> Hash for MPlaceTy<'tcx> { + fn hash(&self, state: &mut H) { + self.mplace.hash(state); + self.layout.ty.hash(state); + } +} +impl<'tcx> PartialEq for MPlaceTy<'tcx> { + fn eq(&self, other: &Self) -> bool { + self.mplace == other.mplace && self.layout.ty == other.layout.ty + } +} +impl<'tcx> Eq for MPlaceTy<'tcx> {} + +impl<'tcx> OpTy<'tcx> { + pub fn try_as_mplace(self) -> Result, Value> { + match *self { + Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), + Operand::Immediate(value) => Err(value), + } } - pub fn val_to_place(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Place> { - let layout = self.layout_of(ty)?; - Ok(match self.tcx.struct_tail(ty).sty { + #[inline] + pub fn to_mem_place(self) -> MPlaceTy<'tcx> { + self.try_as_mplace().unwrap() + } +} + +impl<'tcx> Place { + /// Produces a Place that will error if attempted to be read from or written to + #[inline] + pub fn null(cx: impl HasDataLayout) -> Self { + Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap()) + } + + #[inline] + pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + Place::Ptr(MemPlace::from_scalar_ptr(ptr, align)) + } + + #[inline] + pub fn from_ptr(ptr: Pointer, align: Align) -> Self { + Place::Ptr(MemPlace::from_ptr(ptr, align)) + } + + #[inline] + pub fn to_mem_place(self) -> MemPlace { + match self { + Place::Ptr(mplace) => mplace, + _ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self), + + } + } + + #[inline] + pub fn to_scalar_ptr_align(self) -> (Scalar, Align) { + self.to_mem_place().to_scalar_ptr_align() + } + + #[inline] + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + self.to_mem_place().to_ptr() + } +} + +impl<'tcx> PlaceTy<'tcx> { + /// Produces a Place that will error if attempted to be read from or written to + #[inline] + pub fn null(cx: impl HasDataLayout, layout: TyLayout<'tcx>) -> Self { + PlaceTy { place: Place::from_scalar_ptr(Scalar::ptr_null(cx), layout.align), layout } + } + + #[inline] + pub fn to_mem_place(self) -> MPlaceTy<'tcx> { + MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout } + } +} + +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + /// Take a value, which represents a (thin or fat) reference, and make it a place. + /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref`. + pub fn ref_to_mplace( + &self, val: ValTy<'tcx> + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty; + let layout = self.layout_of(pointee_type)?; + let mplace = match self.tcx.struct_tail(pointee_type).sty { ty::TyDynamic(..) => { - let (ptr, vtable) = self.into_ptr_vtable_pair(val)?; - Place::Ptr { + let (ptr, vtable) = val.to_scalar_dyn_trait()?; + MemPlace { ptr, align: layout.align, extra: PlaceExtra::Vtable(vtable), } } ty::TyStr | ty::TySlice(_) => { - let (ptr, len) = self.into_slice(val)?; - Place::Ptr { + let (ptr, len) = val.to_scalar_slice(self)?; + MemPlace { ptr, align: layout.align, extra: PlaceExtra::Length(len), } } - _ => Place::from_scalar_ptr(self.into_ptr(val)?, layout.align), + _ => MemPlace { + ptr: val.to_scalar()?, + align: layout.align, + extra: PlaceExtra::None, + }, + }; + Ok(MPlaceTy { mplace, layout }) + } + + /// Offset a pointer to project to a field. Unlike place_field, this is always + /// possible without allocating, so it can take &self. Also return the field's layout. + /// This supports both struct and array fields. + #[inline(always)] + pub fn mplace_field( + &self, + base: MPlaceTy<'tcx>, + field: u64, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + // Not using the layout method because we want to compute on u64 + let offset = match base.layout.fields { + layout::FieldPlacement::Arbitrary { ref offsets, .. } => + offsets[usize::try_from(field).unwrap()], + layout::FieldPlacement::Array { stride, .. } => { + let len = base.len(); + assert!(field < len, "Tried to access element {} of array/slice with length {}", field, len); + stride * field + } + _ => bug!("Unexpected layout for field access: {:#?}", base.layout), + }; + // the only way conversion can fail if is this is an array (otherwise we already panicked + // above). In that case, all fields are equal. + let field = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; + + // Adjust offset + let offset = match base.extra { + PlaceExtra::Vtable(tab) => { + let (_, align) = self.size_and_align_of_dst(ValTy { + layout: base.layout, + value: Value::new_dyn_trait(base.ptr, tab), + })?; + offset.abi_align(align) + } + _ => offset, + }; + + let ptr = base.ptr.ptr_offset(offset, self)?; + let align = base.align.min(field.align); + let extra = if !field.is_unsized() { + PlaceExtra::None + } else { + assert!(base.extra != PlaceExtra::None, "Expected fat ptr"); + base.extra + }; + + Ok(MPlaceTy { mplace: MemPlace { ptr, align, extra }, layout: field }) + } + + pub fn mplace_subslice( + &self, + base: MPlaceTy<'tcx>, + from: u64, + to: u64, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let len = base.len(); + assert!(from <= len - to); + + // Not using layout method because that works with usize, and does not work with slices + // (that have count 0 in their layout). + let from_offset = match base.layout.fields { + layout::FieldPlacement::Array { stride, .. } => + stride * from, + _ => bug!("Unexpected layout of index access: {:#?}", base.layout), + }; + let ptr = base.ptr.ptr_offset(from_offset, self)?; + + // Compute extra and new layout + let inner_len = len - to - from; + let (extra, ty) = match base.layout.ty.sty { + ty::TyArray(inner, _) => + (PlaceExtra::None, self.tcx.mk_array(inner, inner_len)), + ty::TySlice(..) => + (PlaceExtra::Length(inner_len), base.layout.ty), + _ => + bug!("cannot subslice non-array type: `{:?}`", base.layout.ty), + }; + let layout = self.layout_of(ty)?; + + Ok(MPlaceTy { + mplace: MemPlace { ptr, align: base.align, extra }, + layout }) } - pub fn place_index( - &mut self, - base: Place, - outer_ty: Ty<'tcx>, - n: u64, - ) -> EvalResult<'tcx, Place> { - // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length. - let base = self.force_allocation(base)?; - let (base_ptr, align) = base.to_ptr_align(); - - let (elem_ty, len) = base.elem_ty_and_len(outer_ty, self.tcx.tcx); - let elem_size = self.layout_of(elem_ty)?.size; - assert!( - n < len, - "Tried to access element {} of array/slice with length {}", - n, - len - ); - let ptr = base_ptr.ptr_offset(elem_size * n, &*self)?; - Ok(Place::Ptr { - ptr, - align, - extra: PlaceExtra::None, - }) - } - - pub(super) fn place_downcast( - &mut self, - base: Place, + pub fn mplace_downcast( + &self, + base: MPlaceTy<'tcx>, variant: usize, - ) -> EvalResult<'tcx, Place> { - // FIXME(solson) - let base = self.force_allocation(base)?; - let (ptr, align) = base.to_ptr_align(); - let extra = PlaceExtra::DowncastVariant(variant); - Ok(Place::Ptr { ptr, align, extra }) + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + // Downcasts only change the layout + assert_eq!(base.extra, PlaceExtra::None); + Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base }) } - pub fn eval_place_projection( - &mut self, - base: Place, - base_ty: Ty<'tcx>, - proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, - ) -> EvalResult<'tcx, Place> { + /// Project into an mplace + pub fn mplace_projection( + &self, + base: MPlaceTy<'tcx>, + proj_elem: &mir::PlaceElem<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { use rustc::mir::ProjectionElem::*; - match *proj_elem { - Field(field, _) => { - let layout = self.layout_of(base_ty)?; - Ok(self.place_field(base, field, layout)?.0) - } - - Downcast(_, variant) => { - self.place_downcast(base, variant) - } - - Deref => { - let val = self.read_place(base)?; - - let pointee_type = match base_ty.sty { - ty::TyRawPtr(ref tam) => tam.ty, - ty::TyRef(_, ty, _) => ty, - ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(), - _ => bug!("can only deref pointer types"), - }; - - trace!("deref to {} on {:?}", pointee_type, val); - - self.val_to_place(val, pointee_type) - } + Ok(match *proj_elem { + Field(field, _) => self.mplace_field(base, field.index() as u64)?, + Downcast(_, variant) => self.mplace_downcast(base, variant)?, + Deref => self.deref_operand(base.into())?, Index(local) => { - let value = self.frame().locals[local].access()?; - let ty = self.tcx.types.usize; - let n = self - .value_to_scalar(ValTy { value, ty })? - .to_bits(self.tcx.data_layout.pointer_size)?; - self.place_index(base, base_ty, n as u64) + let n = *self.frame().locals[local].access()?; + let n_layout = self.layout_of(self.tcx.types.usize)?; + let n = self.read_scalar(OpTy { op: n, layout: n_layout })?; + let n = n.to_bits(self.tcx.data_layout.pointer_size)?; + self.mplace_field(base, u64::try_from(n).unwrap())? } ConstantIndex { @@ -425,12 +376,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { min_length, from_end, } => { - // FIXME(solson) - let base = self.force_allocation(base)?; - let (base_ptr, align) = base.to_ptr_align(); - - let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx); - let elem_size = self.layout_of(elem_ty)?.size; + let n = base.len(); assert!(n >= min_length as u64); let index = if from_end { @@ -439,34 +385,334 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { u64::from(offset) }; - let ptr = base_ptr.ptr_offset(elem_size * index, &self)?; - Ok(Place::Ptr { ptr, align, extra: PlaceExtra::None }) + self.mplace_field(base, index)? } - Subslice { from, to } => { - // FIXME(solson) - let base = self.force_allocation(base)?; - let (base_ptr, align) = base.to_ptr_align(); + Subslice { from, to } => + self.mplace_subslice(base, u64::from(from), u64::from(to))?, + }) + } - let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx); - let elem_size = self.layout_of(elem_ty)?.size; - assert!(u64::from(from) <= n - u64::from(to)); - let ptr = base_ptr.ptr_offset(elem_size * u64::from(from), &self)?; - // sublicing arrays produces arrays - let extra = if self.type_is_sized(base_ty) { - PlaceExtra::None - } else { - PlaceExtra::Length(n - u64::from(to) - u64::from(from)) + /// Get the place of a field inside the place, and also the field's type. + /// Just a convenience function, but used quite a bit. + pub fn place_field( + &mut self, + base : PlaceTy<'tcx>, + field: u64, + ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + // FIXME: We could try to be smarter and avoid allocation for fields that span the + // entire place. + let mplace = self.force_allocation(base)?; + Ok(self.mplace_field(mplace, field)?.into()) + } + + pub fn place_downcast( + &mut self, + base : PlaceTy<'tcx>, + variant: usize, + ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + // Downcast just changes the layout + Ok(match base.place { + Place::Ptr(mplace) => + self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(), + Place::Local { .. } => { + let layout = base.layout.for_variant(&self, variant); + PlaceTy { layout, ..base } + } + }) + } + + /// Project into a place + pub fn place_projection( + &mut self, + base: PlaceTy<'tcx>, + proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, + ) -> EvalResult<'tcx, PlaceTy<'tcx>> { + use rustc::mir::ProjectionElem::*; + Ok(match *proj_elem { + Field(field, _) => self.place_field(base, field.index() as u64)?, + Downcast(_, variant) => self.place_downcast(base, variant)?, + Deref => self.deref_operand(self.place_to_op(base)?)?.into(), + // For the other variants, we have to force an allocation. + // This matches `operand_projection`. + Subslice { .. } | ConstantIndex { .. } | Index(_) => { + let mplace = self.force_allocation(base)?; + self.mplace_projection(mplace, proj_elem)?.into() + } + }) + } + + /// Compute a place. You should only use this if you intend to write into this + /// place; for reading, a more efficient alternative is `eval_place_for_read`. + pub fn eval_place(&mut self, mir_place: &mir::Place<'tcx>) -> EvalResult<'tcx, PlaceTy<'tcx>> { + use rustc::mir::Place::*; + let place = match *mir_place { + Local(mir::RETURN_PLACE) => PlaceTy { + place: self.frame().return_place, + layout: self.layout_of_local(self.cur_frame(), mir::RETURN_PLACE)?, + }, + Local(local) => PlaceTy { + place: Place::Local { + frame: self.cur_frame(), + local, + }, + layout: self.layout_of_local(self.cur_frame(), local)?, + }, + + Promoted(ref promoted) => { + let instance = self.frame().instance; + let op = self.global_to_op(GlobalId { + instance, + promoted: Some(promoted.0), + })?; + let mplace = op.to_mem_place(); + let ty = self.monomorphize(promoted.1, self.substs()); + PlaceTy { + place: Place::Ptr(mplace), + layout: self.layout_of(ty)?, + } + } + + Static(ref static_) => { + let ty = self.monomorphize(static_.ty, self.substs()); + let layout = self.layout_of(ty)?; + let instance = ty::Instance::mono(*self.tcx, static_.def_id); + let cid = GlobalId { + instance, + promoted: None }; - Ok(Place::Ptr { ptr, align, extra }) + let alloc = Machine::init_static(self, cid)?; + MPlaceTy::from_aligned_ptr(alloc.into(), layout).into() + } + + Projection(ref proj) => { + let place = self.eval_place(&proj.base)?; + self.place_projection(place, &proj.elem)? + } + }; + + self.dump_place(place.place); + + Ok(place) + } + + /// Write a scalar to a place + pub fn write_scalar( + &mut self, + val: impl Into, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + self.write_value(Value::Scalar(val.into()), dest) + } + + /// Write a value to a place + pub fn write_value( + &mut self, + src_val: Value, + dest : PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + // See if we can avoid an allocation. This is the counterpart to `try_read_value`, + // but not factored as a separate function. + match dest.place { + Place::Local { frame, local } => { + match *self.stack[frame].locals[local].access_mut()? { + Operand::Immediate(ref mut dest_val) => { + // Yay, we can just change the local directly. + *dest_val = src_val; + return Ok(()); + }, + _ => {}, + } + }, + _ => {}, + }; + + // Slow path: write to memory + let dest = self.force_allocation(dest)?; + self.write_value_to_mplace(src_val, dest) + } + + /// Write a value to memory + fn write_value_to_mplace( + &mut self, + value: Value, + dest: MPlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + trace!("write_value_to_ptr: {:#?}, {:#?}", value, dest.layout); + assert_eq!(dest.extra, PlaceExtra::None); + // Note that it is really important that the type here is the right one, and matches the type things are read at. + // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only + // correct if we never look at this data with the wrong type. + match value { + Value::Scalar(scalar) => { + let signed = match dest.layout.abi { + layout::Abi::Scalar(ref scal) => match scal.value { + layout::Primitive::Int(_, signed) => signed, + _ => false, + }, + _ => false, + }; + self.memory.write_scalar( + dest.ptr, dest.align, scalar, dest.layout.size, dest.layout.align, signed + ) + } + Value::ScalarPair(a_val, b_val) => { + let (a, b) = match dest.layout.abi { + layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), + _ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", dest.layout) + }; + let (a_size, b_size) = (a.size(&self), b.size(&self)); + let (a_align, b_align) = (a.align(&self), b.align(&self)); + let a_ptr = dest.ptr; + let b_offset = a_size.abi_align(b_align); + let b_ptr = a_ptr.ptr_offset(b_offset, &self)?.into(); + // TODO: What about signedess? + self.memory.write_scalar(a_ptr, dest.align, a_val, a_size, a_align, false)?; + self.memory.write_scalar(b_ptr, dest.align, b_val, b_size, b_align, false) } } } - pub fn place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { - self.monomorphize( - place.ty(self.mir(), *self.tcx).to_ty(*self.tcx), - self.substs(), + /// Copy the data from an operand to a place + pub fn copy_op( + &mut self, + src: OpTy<'tcx>, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + trace!("Copying {:?} to {:?}", src, dest); + assert_eq!(src.layout.size, dest.layout.size, "Size mismatch when copying!"); + + // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. + let (src_ptr, src_align) = match self.try_read_value(src)? { + Ok(src_val) => + // Yay, we got a value that we can write directly. + return self.write_value(src_val, dest), + Err(mplace) => mplace.to_scalar_ptr_align(), + }; + // Slow path, this does not fit into an immediate. Just memcpy. + let (dest_ptr, dest_align) = self.force_allocation(dest)?.to_scalar_ptr_align(); + self.memory.copy( + src_ptr, src_align, + dest_ptr, dest_align, + src.layout.size, false ) } + + /// Make sure that a place is in memory, and return where it is. + /// This is essentially `force_to_memplace`. + pub fn force_allocation( + &mut self, + place: PlaceTy<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + let mplace = match place.place { + Place::Local { frame, local } => { + // We need the layout of the local. We can NOT use the layout we got, + // that might e.g. be a downcast variant! + let local_layout = self.layout_of_local(frame, local)?; + // Make sure it has a place + let rval = *self.stack[frame].locals[local].access()?; + let mplace = self.allocate_op(OpTy { op: rval, layout: local_layout })?.mplace; + // This might have allocated the flag + *self.stack[frame].locals[local].access_mut()? = + Operand::Indirect(mplace); + // done + mplace + } + Place::Ptr(mplace) => mplace + }; + // Return with the original layout, so that the caller can go on + Ok(MPlaceTy { mplace, layout: place.layout }) + } + + pub fn allocate( + &mut self, + layout: TyLayout<'tcx>, + kind: MemoryKind, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); + let ptr = self.memory.allocate(layout.size, layout.align, kind)?; + Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) + } + + /// Make a place for an operand, allocating if needed + pub fn allocate_op( + &mut self, + OpTy { op, layout }: OpTy<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { + Ok(match op { + Operand::Indirect(mplace) => MPlaceTy { mplace, layout }, + Operand::Immediate(value) => { + // FIXME: Is stack always right here? + let ptr = self.allocate(layout, MemoryKind::Stack)?; + self.write_value_to_mplace(value, ptr)?; + ptr + }, + }) + } + + pub fn write_discriminant_value( + &mut self, + variant_index: usize, + dest: PlaceTy<'tcx>, + ) -> EvalResult<'tcx> { + match dest.layout.variants { + layout::Variants::Single { index } => { + if index != variant_index { + // If the layout of an enum is `Single`, all + // other variants are necessarily uninhabited. + assert_eq!(dest.layout.for_variant(&self, variant_index).abi, + layout::Abi::Uninhabited); + } + } + layout::Variants::Tagged { ref tag, .. } => { + let discr_val = dest.layout.ty.ty_adt_def().unwrap() + .discriminant_for_variant(*self.tcx, variant_index) + .val; + + // raw discriminants for enums are isize or bigger during + // their computation, but the in-memory tag is the smallest possible + // representation + let size = tag.value.size(self.tcx.tcx); + let shift = 128 - size.bits(); + let discr_val = (discr_val << shift) >> shift; + + let discr_dest = self.place_field(dest, 0)?; + self.write_scalar(Scalar::Bits { + bits: discr_val, + size: size.bytes() as u8, + }, discr_dest)?; + } + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + if variant_index != dataful_variant { + let niche_dest = + self.place_field(dest, 0)?; + let niche_value = ((variant_index - niche_variants.start()) as u128) + .wrapping_add(niche_start); + self.write_scalar(Scalar::Bits { + bits: niche_value, + size: niche_dest.layout.size.bytes() as u8, + }, niche_dest)?; + } + } + } + + Ok(()) + } + + /// Every place can be read from, so we can turm them into an operand + pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>> { + let op = match place.place { + Place::Ptr(mplace) => { + Operand::Indirect(mplace) + } + Place::Local { frame, local } => + *self.stack[frame].locals[local].access()? + }; + Ok(OpTy { op, layout: place.layout }) + } } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index 27a5fcdaf2e..33ed1862fc3 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -2,12 +2,11 @@ //! //! The main entry point is the `step` method. -use rustc::{mir, ty}; +use rustc::mir; use rustc::ty::layout::LayoutOf; -use rustc::mir::interpret::{EvalResult, Scalar, Value}; -use rustc_data_structures::indexed_vec::Idx; +use rustc::mir::interpret::{EvalResult, Scalar}; -use super::{EvalContext, Machine, PlaceExtra, ValTy}; +use super::{EvalContext, Machine}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn inc_step_counter_and_detect_loops(&mut self) -> EvalResult<'tcx, ()> { @@ -86,8 +85,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { variant_index, } => { let dest = self.eval_place(place)?; - let dest_ty = self.place_ty(place); - self.write_discriminant_value(dest_ty, dest, variant_index)?; + self.write_discriminant_value(variant_index, dest)?; } // Mark locals as alive @@ -98,7 +96,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // Mark locals as dead StorageDead(local) => { - let old_val = self.frame_mut().storage_dead(local); + let old_val = self.storage_dead(local); self.deallocate_local(old_val)?; } @@ -139,58 +137,46 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { place: &mir::Place<'tcx>, ) -> EvalResult<'tcx> { let dest = self.eval_place(place)?; - let dest_ty = self.place_ty(place); - let dest_layout = self.layout_of(dest_ty)?; use rustc::mir::Rvalue::*; match *rvalue { Use(ref operand) => { - let value = self.eval_operand(operand)?.value; - let valty = ValTy { - value, - ty: dest_ty, - }; - self.write_value(valty, dest)?; + let op = self.eval_operand(operand)?; + self.copy_op(op, dest)?; } BinaryOp(bin_op, ref left, ref right) => { - let left = self.eval_operand(left)?; - let right = self.eval_operand(right)?; - self.intrinsic_overflowing( + let left = self.eval_operand_and_read_valty(left)?; + let right = self.eval_operand_and_read_valty(right)?; + self.binop_ignore_overflow( bin_op, left, right, dest, - dest_ty, )?; } CheckedBinaryOp(bin_op, ref left, ref right) => { - let left = self.eval_operand(left)?; - let right = self.eval_operand(right)?; - self.intrinsic_with_overflow( + let left = self.eval_operand_and_read_valty(left)?; + let right = self.eval_operand_and_read_valty(right)?; + self.binop_with_overflow( bin_op, left, right, dest, - dest_ty, )?; } UnaryOp(un_op, ref operand) => { - let val = self.eval_operand_to_scalar(operand)?; - let val = self.unary_op(un_op, val, dest_layout)?; - self.write_scalar( - dest, - val, - dest_ty, - )?; + let val = self.eval_operand_and_read_scalar(operand)?; + let val = self.unary_op(un_op, val.not_undef()?, dest.layout)?; + self.write_scalar(val, dest)?; } Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { - self.write_discriminant_value(dest_ty, dest, variant_index)?; + self.write_discriminant_value(variant_index, dest)?; if adt_def.is_enum() { (self.place_downcast(dest, variant_index)?, active_field_index) } else { @@ -200,41 +186,34 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => (dest, None) }; - let layout = self.layout_of(dest_ty)?; for (i, operand) in operands.iter().enumerate() { - let value = self.eval_operand(operand)?; + let op = self.eval_operand(operand)?; // Ignore zero-sized fields. - if !self.layout_of(value.ty)?.is_zst() { + if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?; - self.write_value(value, field_dest)?; + let field_dest = self.place_field(dest, field_index as u64)?; + self.copy_op(op, field_dest)?; } } } Repeat(ref operand, _) => { - let (elem_ty, length) = match dest_ty.sty { - ty::TyArray(elem_ty, n) => (elem_ty, n.unwrap_usize(self.tcx.tcx)), - _ => { - bug!( - "tried to assign array-repeat to non-array type {:?}", - dest_ty - ) - } - }; - let elem_size = self.layout_of(elem_ty)?.size; - let value = self.eval_operand(operand)?.value; - - let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align(); + let op = self.eval_operand(operand)?; + let dest = self.force_allocation(dest)?; + let length = dest.len(); if length > 0 { - let dest = dest.unwrap_or_err()?; - //write the first value - self.write_value_to_ptr(value, dest, dest_align, elem_ty)?; + // write the first + let first = self.mplace_field(dest, 0)?; + self.copy_op(op, first.into())?; if length > 1 { - let rest = dest.ptr_offset(elem_size * 1 as u64, &self)?; - self.memory.copy_repeatedly(dest, dest_align, rest, dest_align, elem_size, length - 1, false)?; + // copy the rest + let (dest, dest_align) = first.to_scalar_ptr_align(); + let rest = dest.ptr_offset(first.layout.size, &self)?; + self.memory.copy_repeatedly( + dest, dest_align, rest, dest_align, first.layout.size, length - 1, true + )?; } } } @@ -242,43 +221,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Len(ref place) => { // FIXME(CTFE): don't allow computing the length of arrays in const eval let src = self.eval_place(place)?; - let ty = self.place_ty(place); - let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx); + let mplace = self.force_allocation(src)?; + let len = mplace.len(); let size = self.memory.pointer_size().bytes() as u8; self.write_scalar( - dest, Scalar::Bits { bits: len as u128, size, }, - dest_ty, + dest, )?; } Ref(_, _, ref place) => { let src = self.eval_place(place)?; - // We ignore the alignment of the place here -- special handling for packed structs ends - // at the `&` operator. - let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra(); - - let val = match extra { - PlaceExtra::None => Value::Scalar(ptr), - PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx), - PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable), - PlaceExtra::DowncastVariant(..) => { - bug!("attempted to take a reference to an enum downcast place") - } - }; - let valty = ValTy { - value: val, - ty: dest_ty, - }; - self.write_value(valty, dest)?; + let val = self.force_allocation(src)?.to_ref(&self); + self.write_value(val, dest)?; } - NullaryOp(mir::NullOp::Box, ty) => { - let ty = self.monomorphize(ty, self.substs()); - M::box_alloc(self, ty, dest)?; + NullaryOp(mir::NullOp::Box, _) => { + M::box_alloc(self, dest)?; } NullaryOp(mir::NullOp::SizeOf, ty) => { @@ -288,35 +250,32 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { "SizeOf nullary MIR operator called for unsized type"); let size = self.memory.pointer_size().bytes() as u8; self.write_scalar( - dest, Scalar::Bits { bits: layout.size.bytes() as u128, size, }, - dest_ty, + dest, )?; } Cast(kind, ref operand, cast_ty) => { - debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty); + debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest.layout.ty); let src = self.eval_operand(operand)?; - self.cast(src, kind, dest_ty, dest)?; + self.cast(src, kind, dest)?; } Discriminant(ref place) => { - let ty = self.place_ty(place); - let layout = self.layout_of(ty)?; let place = self.eval_place(place)?; - let discr_val = self.read_discriminant_value(place, layout)?; - let size = self.layout_of(dest_ty).unwrap().size.bytes() as u8; - self.write_scalar(dest, Scalar::Bits { + let discr_val = self.read_discriminant_value(self.place_to_op(place)?)?; + let size = dest.layout.size.bytes() as u8; + self.write_scalar(Scalar::Bits { bits: discr_val, size, - }, dest_ty)?; + }, dest)?; } } - self.dump_local(dest); + self.dump_place(*dest); Ok(()) } diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs index f86c0e89954..df40f904616 100644 --- a/src/librustc_mir/interpret/terminator/drop.rs +++ b/src/librustc_mir/interpret/terminator/drop.rs @@ -1,16 +1,15 @@ use rustc::mir::BasicBlock; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, layout::LayoutOf}; use syntax::source_map::Span; -use rustc::mir::interpret::{EvalResult, Value}; -use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra}; +use rustc::mir::interpret::{EvalResult}; +use interpret::{Machine, EvalContext, PlaceTy, Value, OpTy, Operand}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub(crate) fn drop_place( &mut self, - place: Place, + place: PlaceTy<'tcx>, instance: ty::Instance<'tcx>, - ty: Ty<'tcx>, span: Span, target: BasicBlock, ) -> EvalResult<'tcx> { @@ -18,25 +17,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // We take the address of the object. This may well be unaligned, which is fine for us here. // However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared // by rustc. - let val = match self.force_allocation(place)? { - Place::Ptr { - ptr, - align: _, - extra: PlaceExtra::Vtable(vtable), - } => ptr.to_value_with_vtable(vtable), - Place::Ptr { - ptr, - align: _, - extra: PlaceExtra::Length(len), - } => ptr.to_value_with_len(len, self.tcx.tcx), - Place::Ptr { - ptr, - align: _, - extra: PlaceExtra::None, - } => Value::Scalar(ptr), - _ => bug!("force_allocation broken"), - }; - self.drop(val, instance, ty, span, target) + let val = self.force_allocation(place)?.to_ref(&self); + self.drop(val, instance, place.layout.ty, span, target) } fn drop( @@ -52,7 +34,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let instance = match ty.sty { ty::TyDynamic(..) => { if let Value::ScalarPair(_, vtable) = arg { - self.read_drop_type_from_vtable(vtable.unwrap_or_err()?.to_ptr()?)? + self.read_drop_type_from_vtable(vtable.to_ptr()?)? } else { bug!("expected fat ptr, got {:?}", arg); } @@ -61,17 +43,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }; // the drop function expects a reference to the value - let valty = ValTy { - value: arg, - ty: self.tcx.mk_mut_ptr(ty), + let arg = OpTy { + op: Operand::Immediate(arg), + layout: self.layout_of(self.tcx.mk_mut_ptr(ty))?, }; let fn_sig = self.tcx.fn_sig(instance.def_id()).skip_binder().clone(); + // This should always be (), but getting it from the sig seems + // easier than creating a layout of (). + let dest = PlaceTy::null(&self, self.layout_of(fn_sig.output())?); self.eval_fn_call( instance, - Some((Place::undef(), target)), - &[valty], + Some((dest, target)), + &[arg], span, fn_sig, ) diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index 3a772559d6d..25ce70e4f3b 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -1,14 +1,13 @@ use rustc::mir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{LayoutOf, Size}; +use rustc::ty::layout::LayoutOf; use syntax::source_map::Span; use rustc_target::spec::abi::Abi; -use rustc::mir::interpret::{EvalResult, Scalar, Value}; -use super::{EvalContext, Place, Machine, ValTy}; +use rustc::mir::interpret::{EvalResult, Scalar}; +use super::{EvalContext, Machine, Value, OpTy, PlaceTy, ValTy, Operand}; use rustc_data_structures::indexed_vec::Idx; -use interpret::memory::HasMemory; mod drop; @@ -25,7 +24,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { use rustc::mir::TerminatorKind::*; match terminator.kind { Return => { - self.dump_local(self.frame().return_place); + self.dump_place(self.frame().return_place); self.pop_stack_frame()? } @@ -38,21 +37,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { .. } => { let discr_val = self.eval_operand(discr)?; - let discr_prim = self.value_to_scalar(discr_val)?; - let discr_layout = self.layout_of(discr_val.ty).unwrap(); - trace!("SwitchInt({:?}, {:#?})", discr_prim, discr_layout); + let discr = self.read_value(discr_val)?; + trace!("SwitchInt({:#?})", *discr); // Branch to the `otherwise` case by default, if no match is found. let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { // Compare using binary_op - let const_int = Scalar::Bits { bits: const_int, size: discr_layout.size.bytes() as u8 }; - let res = self.binary_op(mir::BinOp::Eq, - discr_prim, discr_val.ty, - const_int, discr_val.ty + let const_int = Scalar::Bits { bits: const_int, size: discr.layout.size.bytes() as u8 }; + let (res, _) = self.binary_op(mir::BinOp::Eq, + discr, + ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout } )?; - if res.0.to_bits(Size::from_bytes(1))? != 0 { + if res.to_bool()? { target_block = targets[index]; break; } @@ -73,9 +71,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }; let func = self.eval_operand(func)?; - let (fn_def, sig) = match func.ty.sty { + let (fn_def, sig) = match func.layout.ty.sty { ty::TyFnPtr(sig) => { - let fn_ptr = self.value_to_scalar(func)?.to_ptr()?; + let fn_ptr = self.read_scalar(func)?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { @@ -99,14 +97,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } ty::TyFnDef(def_id, substs) => ( self.resolve(def_id, substs)?, - func.ty.fn_sig(*self.tcx), + func.layout.ty.fn_sig(*self.tcx), ), _ => { - let msg = format!("can't handle callee of type {:?}", func.ty); + let msg = format!("can't handle callee of type {:?}", func.layout.ty); return err!(Unimplemented(msg)); } }; - let args = self.operands_to_args(args)?; + let args = self.eval_operands(args)?; let sig = self.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, @@ -114,7 +112,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { self.eval_fn_call( fn_def, destination, - &args, + &args[..], terminator.source_info.span, sig, )?; @@ -127,19 +125,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } => { // FIXME(CTFE): forbid drop in const eval let place = self.eval_place(location)?; - let ty = self.place_ty(location); - let ty = self.tcx.subst_and_normalize_erasing_regions( - self.substs(), - ty::ParamEnv::reveal_all(), - &ty, - ); + let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", location, ty); let instance = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); self.drop_place( place, instance, - ty, terminator.source_info.span, target, )?; @@ -152,17 +144,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { target, .. } => { - let cond_val = self.eval_operand_to_scalar(cond)?.to_bool()?; + let cond_val = self.eval_operand_and_read_scalar(cond)?.not_undef()?.to_bool()?; if expected == cond_val { self.goto_block(target); } else { use rustc::mir::interpret::EvalErrorKind::*; return match *msg { BoundsCheck { ref len, ref index } => { - let len = self.eval_operand_to_scalar(len) + let len = self.eval_operand_and_read_scalar(len) .expect("can't eval len") .to_bits(self.memory().pointer_size())? as u64; - let index = self.eval_operand_to_scalar(index) + let index = self.eval_operand_and_read_scalar(index) .expect("can't eval index") .to_bits(self.memory().pointer_size())? as u64; err!(BoundsCheck { len, index }) @@ -259,8 +251,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { fn eval_fn_call( &mut self, instance: ty::Instance<'tcx>, - destination: Option<(Place, mir::BasicBlock)>, - args: &[ValTy<'tcx>], + destination: Option<(PlaceTy<'tcx>, mir::BasicBlock)>, + args: &[OpTy<'tcx>], span: Span, sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx> { @@ -271,24 +263,22 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Some(dest) => dest, _ => return err!(Unreachable), }; - let ty = sig.output(); - let layout = self.layout_of(ty)?; - M::call_intrinsic(self, instance, args, ret, layout, target)?; - self.dump_local(ret); + M::call_intrinsic(self, instance, args, ret, target)?; + self.dump_place(*ret); Ok(()) } // FIXME: figure out why we can't just go through the shim ty::InstanceDef::ClosureOnceShim { .. } => { - if M::eval_fn_call(self, instance, destination, args, span, sig)? { + if M::eval_fn_call(self, instance, destination, args, span)? { return Ok(()); } let mut arg_locals = self.frame().mir.args_iter(); match sig.abi { // closure as closure once Abi::RustCall => { - for (arg_local, &valty) in arg_locals.zip(args) { + for (arg_local, &op) in arg_locals.zip(args) { let dest = self.eval_place(&mir::Place::Local(arg_local))?; - self.write_value(valty, dest)?; + self.copy_op(op, dest)?; } } // non capture closure as fn ptr @@ -301,12 +291,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ); trace!("args: {:#?}", args); let local = arg_locals.nth(1).unwrap(); - for (i, &valty) in args.into_iter().enumerate() { + for (i, &op) in args.into_iter().enumerate() { let dest = self.eval_place(&mir::Place::Local(local).field( mir::Field::new(i), - valty.ty, + op.layout.ty, ))?; - self.write_value(valty, dest)?; + self.copy_op(op, dest)?; } } _ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi), @@ -318,7 +308,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ty::InstanceDef::CloneShim(..) | ty::InstanceDef::Item(_) => { // Push the stack frame, and potentially be entirely done if the call got hooked - if M::eval_fn_call(self, instance, destination, args, span, sig)? { + if M::eval_fn_call(self, instance, destination, args, span)? { + // TODO: Can we make it return the frame to push, instead + // of the hook doing half of the work and us doing the argument + // initialization? return Ok(()); } @@ -338,26 +331,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // write first argument let first_local = arg_locals.next().unwrap(); let dest = self.eval_place(&mir::Place::Local(first_local))?; - self.write_value(args[0], dest)?; + self.copy_op(args[0], dest)?; } // unpack and write all other args - let layout = self.layout_of(args[1].ty)?; - if let ty::TyTuple(_) = args[1].ty.sty { + let layout = args[1].layout; + if let ty::TyTuple(_) = layout.ty.sty { if layout.is_zst() { // Nothing to do, no need to unpack zsts return Ok(()); } if self.frame().mir.args_iter().count() == layout.fields.count() + 1 { for (i, arg_local) in arg_locals.enumerate() { - let field = mir::Field::new(i); - let (value, layout) = self.read_field(args[1].value, None, field, layout)?; + let arg = self.operand_field(args[1], i as u64)?; let dest = self.eval_place(&mir::Place::Local(arg_local))?; - let valty = ValTy { - value, - ty: layout.ty, - }; - self.write_value(valty, dest)?; + self.copy_op(arg, dest)?; } } else { trace!("manual impl of rust-call ABI"); @@ -365,20 +353,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let dest = self.eval_place( &mir::Place::Local(arg_locals.next().unwrap()), )?; - self.write_value(args[1], dest)?; + self.copy_op(args[1], dest)?; } } else { bug!( - "rust-call ABI tuple argument was {:#?}, {:#?}", - args[1].ty, + "rust-call ABI tuple argument was {:#?}", layout ); } } _ => { - for (arg_local, &valty) in arg_locals.zip(args) { + for (arg_local, &op) in arg_locals.zip(args) { let dest = self.eval_place(&mir::Place::Local(arg_local))?; - self.write_value(valty, dest)?; + self.copy_op(op, dest)?; } } } @@ -388,16 +375,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ty::InstanceDef::Virtual(_, idx) => { let ptr_size = self.memory.pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; - let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?; + let (ptr, vtable) = self.read_value(args[0])?.to_scalar_dyn_trait()?; let fn_ptr = self.memory.read_ptr_sized( vtable.offset(ptr_size * (idx as u64 + 3), &self)?, ptr_align - )?.unwrap_or_err()?.to_ptr()?; + )?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; let mut args = args.to_vec(); - let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty; - args[0].ty = ty; - args[0].value = Value::Scalar(ptr); + let layout = args[0].layout.field(&self, 0)?; + args[0].layout = layout; + args[0].op = Operand::Immediate(Value::Scalar(ptr.into())); // recurse with concrete function self.eval_fn_call(instance, destination, &args, span, sig) } diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 84583680988..1cc8644629e 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -72,7 +72,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, ty::Instance<'tcx>> { // we don't care about the pointee type, we just want a pointer let pointer_align = self.tcx.data_layout.pointer_align; - let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.unwrap_or_err()?.to_ptr()?; + let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?; self.memory.get_fn(drop_fn) } @@ -82,11 +82,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ) -> EvalResult<'tcx, (Size, Align)> { let pointer_size = self.memory.pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.unwrap_or_err()?.to_bits(pointer_size)? as u64; + let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; let align = self.memory.read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align - )?.unwrap_or_err()?.to_bits(pointer_size)? as u64; + )?.to_bits(pointer_size)? as u64; Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) } } diff --git a/src/librustc_mir/interpret/value.rs b/src/librustc_mir/interpret/value.rs deleted file mode 100644 index c450901eec8..00000000000 --- a/src/librustc_mir/interpret/value.rs +++ /dev/null @@ -1,572 +0,0 @@ -//! Reading and writing values from/to memory, handling LocalValue and the ByRef optimization, -//! reading/writing discriminants - -use std::mem; - -use rustc::mir; -use rustc::ty::layout::{self, Size, Align, IntegerExt, LayoutOf, TyLayout, Primitive}; -use rustc::ty::{self, Ty, TyCtxt, TypeAndMut}; -use rustc_data_structures::indexed_vec::{IndexVec, Idx}; -use rustc::mir::interpret::{ - GlobalId, Value, Scalar, FrameInfo, AllocType, - EvalResult, EvalErrorKind, Pointer, ConstValue, - ScalarMaybeUndef, -}; - -use super::{Place, PlaceExtra, Memory, Frame, - HasMemory, MemoryKind, - Machine, ValTy, EvalContext}; - -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub enum LocalValue { - Dead, - Live(Value), -} - -impl LocalValue { - pub fn access(self) -> EvalResult<'static, Value> { - match self { - LocalValue::Dead => err!(DeadLocal), - LocalValue::Live(val) => Ok(val), - } - } -} - -impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - pub fn write_ptr(&mut self, dest: Place, val: Scalar, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { - let valty = ValTy { - value: val.to_value(), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - - pub fn write_scalar( - &mut self, - dest: Place, - val: impl Into, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - let valty = ValTy { - value: Value::Scalar(val.into()), - ty: dest_ty, - }; - self.write_value(valty, dest) - } - - pub fn write_value( - &mut self, - ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>, - dest: Place, - ) -> EvalResult<'tcx> { - //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty); - // Note that it is really important that the type here is the right one, and matches the type things are read at. - // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only - // correct if we never look at this data with the wrong type. - - match dest { - Place::Ptr { ptr, align, extra } => { - assert_eq!(extra, PlaceExtra::None); - self.write_value_to_ptr(src_val, ptr.unwrap_or_err()?, align, dest_ty) - } - - Place::Local { frame, local } => { - let old_val = self.stack[frame].locals[local].access()?; - self.write_value_possibly_by_val( - src_val, - |this, val| this.stack[frame].set_local(local, val), - old_val, - dest_ty, - ) - } - } - } - - // The cases here can be a bit subtle. Read carefully! - fn write_value_possibly_by_val EvalResult<'tcx>>( - &mut self, - src_val: Value, - write_dest: F, - old_dest_val: Value, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - // FIXME: this should be a layout check, not underlying value - if let Value::ByRef(dest_ptr, align) = old_dest_val { - // If the value is already `ByRef` (that is, backed by an `Allocation`), - // then we must write the new value into this allocation, because there may be - // other pointers into the allocation. These other pointers are logically - // pointers into the local variable, and must be able to observe the change. - // - // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we - // knew for certain that there were no outstanding pointers to this allocation. - self.write_value_to_ptr(src_val, dest_ptr, align, dest_ty)?; - } else if let Value::ByRef(src_ptr, align) = src_val { - // If the value is not `ByRef`, then we know there are no pointers to it - // and we can simply overwrite the `Value` in the locals array directly. - // - // In this specific case, where the source value is `ByRef`, we must duplicate - // the allocation, because this is a by-value operation. It would be incorrect - // if they referred to the same allocation, since then a change to one would - // implicitly change the other. - // - // It is a valid optimization to attempt reading a primitive value out of the - // source and write that into the destination without making an allocation, so - // we do so here. - if let Ok(Some(src_val)) = self.try_read_value(src_ptr, align, dest_ty) { - write_dest(self, src_val)?; - } else { - let layout = self.layout_of(dest_ty)?; - let dest_ptr = self.alloc_ptr(layout)?.into(); - self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; - write_dest(self, Value::ByRef(dest_ptr, layout.align))?; - } - } else { - // Finally, we have the simple case where neither source nor destination are - // `ByRef`. We may simply copy the source value over the the destintion. - write_dest(self, src_val)?; - } - Ok(()) - } - - pub fn write_value_to_ptr( - &mut self, - value: Value, - dest: Scalar, - dest_align: Align, - dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx> { - let layout = self.layout_of(dest_ty)?; - trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout); - match value { - Value::ByRef(ptr, align) => { - self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) - } - Value::Scalar(scalar) => { - let signed = match layout.abi { - layout::Abi::Scalar(ref scal) => match scal.value { - layout::Primitive::Int(_, signed) => signed, - _ => false, - }, - _ => false, - }; - self.memory.write_scalar(dest, dest_align, scalar, layout.size, layout.align, signed) - } - Value::ScalarPair(a_val, b_val) => { - trace!("write_value_to_ptr valpair: {:#?}", layout); - let (a, b) = match layout.abi { - layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), - _ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", layout) - }; - let (a_size, b_size) = (a.size(&self), b.size(&self)); - let (a_align, b_align) = (a.align(&self), b.align(&self)); - let a_ptr = dest; - let b_offset = a_size.abi_align(b_align); - let b_ptr = dest.ptr_offset(b_offset, &self)?.into(); - // TODO: What about signedess? - self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, a_align, false)?; - self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, b_align, false) - } - } - } - - pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { - let layout = self.layout_of(ty)?; - self.memory.check_align(ptr, ptr_align)?; - - if layout.size.bytes() == 0 { - return Ok(Some(Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits: 0, size: 0 })))); - } - - let ptr = ptr.to_ptr()?; - - match layout.abi { - layout::Abi::Scalar(..) => { - let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?; - Ok(Some(Value::Scalar(scalar))) - } - layout::Abi::ScalarPair(ref a, ref b) => { - let (a, b) = (&a.value, &b.value); - let (a_size, b_size) = (a.size(self), b.size(self)); - let a_ptr = ptr; - let b_offset = a_size.abi_align(b.align(self)); - let b_ptr = ptr.offset(b_offset, self)?.into(); - let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; - let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; - Ok(Some(Value::ScalarPair(a_val, b_val))) - } - _ => Ok(None), - } - } - - pub fn read_value(&self, ptr: Scalar, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { - if let Some(val) = self.try_read_value(ptr, align, ty)? { - Ok(val) - } else { - bug!("primitive read failed for type: {:?}", ty); - } - } - - pub(super) fn eval_operand_to_scalar( - &mut self, - op: &mir::Operand<'tcx>, - ) -> EvalResult<'tcx, Scalar> { - let valty = self.eval_operand(op)?; - self.value_to_scalar(valty) - } - - pub(crate) fn operands_to_args( - &mut self, - ops: &[mir::Operand<'tcx>], - ) -> EvalResult<'tcx, Vec>> { - ops.into_iter() - .map(|op| self.eval_operand(op)) - .collect() - } - - pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> { - use rustc::mir::Operand::*; - let ty = self.monomorphize(op.ty(self.mir(), *self.tcx), self.substs()); - match *op { - // FIXME: do some more logic on `move` to invalidate the old location - Copy(ref place) | - Move(ref place) => { - Ok(ValTy { - value: self.eval_and_read_place(place)?, - ty - }) - }, - - Constant(ref constant) => { - let value = self.const_to_value(constant.literal.val)?; - - Ok(ValTy { - value, - ty, - }) - } - } - } - - pub fn deallocate_local(&mut self, local: LocalValue) -> EvalResult<'tcx> { - // FIXME: should we tell the user that there was a local which was never written to? - if let LocalValue::Live(Value::ByRef(ptr, _align)) = local { - trace!("deallocating local"); - let ptr = ptr.to_ptr()?; - self.memory.dump_alloc(ptr.alloc_id); - self.memory.deallocate_local(ptr)?; - }; - Ok(()) - } - - pub fn allocate_place_for_value( - &mut self, - value: Value, - layout: TyLayout<'tcx>, - variant: Option, - ) -> EvalResult<'tcx, Place> { - let (ptr, align) = match value { - Value::ByRef(ptr, align) => (ptr, align), - Value::ScalarPair(..) | Value::Scalar(_) => { - let ptr = self.alloc_ptr(layout)?.into(); - self.write_value_to_ptr(value, ptr, layout.align, layout.ty)?; - (ptr, layout.align) - }, - }; - Ok(Place::Ptr { - ptr: ptr.into(), - align, - extra: variant.map_or(PlaceExtra::None, PlaceExtra::DowncastVariant), - }) - } - - pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { - let new_place = match place { - Place::Local { frame, local } => { - match self.stack[frame].locals[local].access()? { - Value::ByRef(ptr, align) => { - Place::Ptr { - ptr: ptr.into(), - align, - extra: PlaceExtra::None, - } - } - val => { - let ty = self.stack[frame].mir.local_decls[local].ty; - let ty = self.monomorphize(ty, self.stack[frame].instance.substs); - let layout = self.layout_of(ty)?; - let ptr = self.alloc_ptr(layout)?; - self.stack[frame].locals[local] = - LocalValue::Live(Value::ByRef(ptr.into(), layout.align)); // it stays live - - let place = Place::from_ptr(ptr, layout.align); - self.write_value(ValTy { value: val, ty }, place)?; - place - } - } - } - Place::Ptr { .. } => place, - }; - Ok(new_place) - } - - /// Convert to ByVal or ScalarPair *if possible*, leave `ByRef` otherwise - pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { - if let Value::ByRef(ptr, align) = val { - if let Some(read_val) = self.try_read_value(ptr, align, ty)? { - val = read_val; - } - } - Ok(val) - } - - pub fn value_to_scalar( - &self, - ValTy { value, ty } : ValTy<'tcx>, - ) -> EvalResult<'tcx, Scalar> { - let value = match value { - Value::ByRef(ptr, align) => self.read_value(ptr, align, ty)?, - scalar_or_pair => scalar_or_pair, - }; - match value { - Value::ByRef(..) => bug!("read_value can't result in `ByRef`"), - - Value::Scalar(scalar) => scalar.unwrap_or_err(), - - Value::ScalarPair(..) => bug!("value_to_scalar can't work with fat pointers"), - } - } - - pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, LocalValue> { - trace!("{:?} is now live", local); - - let ty = self.frame().mir.local_decls[local].ty; - let init = self.init_value(ty)?; - // StorageLive *always* kills the value that's currently stored - Ok(mem::replace(&mut self.frame_mut().locals[local], LocalValue::Live(init))) - } - - pub(super) fn init_value(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { - let ty = self.monomorphize(ty, self.substs()); - let layout = self.layout_of(ty)?; - Ok(match layout.abi { - layout::Abi::Scalar(..) => Value::Scalar(ScalarMaybeUndef::Undef), - layout::Abi::ScalarPair(..) => Value::ScalarPair( - ScalarMaybeUndef::Undef, - ScalarMaybeUndef::Undef, - ), - _ => Value::ByRef(self.alloc_ptr(layout)?.into(), layout.align), - }) - } - - /// reads a tag and produces the corresponding variant index - pub fn read_discriminant_as_variant_index( - &self, - place: Place, - layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, usize> { - match layout.variants { - ty::layout::Variants::Single { index } => Ok(index), - ty::layout::Variants::Tagged { .. } => { - let discr_val = self.read_discriminant_value(place, layout)?; - layout - .ty - .ty_adt_def() - .expect("tagged layout for non adt") - .discriminants(self.tcx.tcx) - .position(|var| var.val == discr_val) - .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into()) - } - ty::layout::Variants::NicheFilling { .. } => { - let discr_val = self.read_discriminant_value(place, layout)?; - assert_eq!(discr_val as usize as u128, discr_val); - Ok(discr_val as usize) - }, - } - } - - pub fn read_discriminant_value( - &self, - place: Place, - layout: TyLayout<'tcx>, - ) -> EvalResult<'tcx, u128> { - trace!("read_discriminant_value {:#?}", layout); - if layout.abi == layout::Abi::Uninhabited { - return Ok(0); - } - - match layout.variants { - layout::Variants::Single { index } => { - let discr_val = layout.ty.ty_adt_def().map_or( - index as u128, - |def| def.discriminant_for_variant(*self.tcx, index).val); - return Ok(discr_val); - } - layout::Variants::Tagged { .. } | - layout::Variants::NicheFilling { .. } => {}, - } - let discr_place_val = self.read_place(place)?; - let (discr_val, discr) = self.read_field(discr_place_val, None, mir::Field::new(0), layout)?; - trace!("discr value: {:?}, {:?}", discr_val, discr); - let raw_discr = self.value_to_scalar(ValTy { - value: discr_val, - ty: discr.ty - })?; - let discr_val = match layout.variants { - layout::Variants::Single { .. } => bug!(), - // FIXME: should we catch invalid discriminants here? - layout::Variants::Tagged { .. } => { - if discr.ty.is_signed() { - let i = raw_discr.to_bits(discr.size)? as i128; - // going from layout tag type to typeck discriminant type - // requires first sign extending with the layout discriminant - let shift = 128 - discr.size.bits(); - let sexted = (i << shift) >> shift; - // and then zeroing with the typeck discriminant type - let discr_ty = layout - .ty - .ty_adt_def().expect("tagged layout corresponds to adt") - .repr - .discr_type(); - let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty); - let shift = 128 - discr_ty.size().bits(); - let truncatee = sexted as u128; - (truncatee << shift) >> shift - } else { - raw_discr.to_bits(discr.size)? - } - }, - layout::Variants::NicheFilling { - dataful_variant, - ref niche_variants, - niche_start, - .. - } => { - let variants_start = *niche_variants.start() as u128; - let variants_end = *niche_variants.end() as u128; - match raw_discr { - Scalar::Ptr(_) => { - assert!(niche_start == 0); - assert!(variants_start == variants_end); - dataful_variant as u128 - }, - Scalar::Bits { bits: raw_discr, size } => { - assert_eq!(size as u64, discr.size.bytes()); - let discr = raw_discr.wrapping_sub(niche_start) - .wrapping_add(variants_start); - if variants_start <= discr && discr <= variants_end { - discr - } else { - dataful_variant as u128 - } - }, - } - } - }; - - Ok(discr_val) - } - - - pub fn write_discriminant_value( - &mut self, - dest_ty: Ty<'tcx>, - dest: Place, - variant_index: usize, - ) -> EvalResult<'tcx> { - let layout = self.layout_of(dest_ty)?; - - match layout.variants { - layout::Variants::Single { index } => { - if index != variant_index { - // If the layout of an enum is `Single`, all - // other variants are necessarily uninhabited. - assert_eq!(layout.for_variant(&self, variant_index).abi, - layout::Abi::Uninhabited); - } - } - layout::Variants::Tagged { ref tag, .. } => { - let discr_val = dest_ty.ty_adt_def().unwrap() - .discriminant_for_variant(*self.tcx, variant_index) - .val; - - // raw discriminants for enums are isize or bigger during - // their computation, but the in-memory tag is the smallest possible - // representation - let size = tag.value.size(self.tcx.tcx); - let shift = 128 - size.bits(); - let discr_val = (discr_val << shift) >> shift; - - let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?; - self.write_scalar(discr_dest, Scalar::Bits { - bits: discr_val, - size: size.bytes() as u8, - }, tag.ty)?; - } - layout::Variants::NicheFilling { - dataful_variant, - ref niche_variants, - niche_start, - .. - } => { - if variant_index != dataful_variant { - let (niche_dest, niche) = - self.place_field(dest, mir::Field::new(0), layout)?; - let niche_value = ((variant_index - niche_variants.start()) as u128) - .wrapping_add(niche_start); - self.write_scalar(niche_dest, Scalar::Bits { - bits: niche_value, - size: niche.size.bytes() as u8, - }, niche.ty)?; - } - } - } - - Ok(()) - } - - pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { - let ptr = self.memory.allocate_bytes(s.as_bytes()); - Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx)) - } - - pub fn const_to_value( - &mut self, - val: ConstValue<'tcx>, - ) -> EvalResult<'tcx, Value> { - match val { - ConstValue::Unevaluated(def_id, substs) => { - let instance = self.resolve(def_id, substs)?; - self.read_global_as_value(GlobalId { - instance, - promoted: None, - }) - } - ConstValue::ByRef(alloc, offset) => { - // FIXME: Allocate new AllocId for all constants inside - let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; - Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align)) - }, - ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a.into(), b.into())), - ConstValue::Scalar(val) => Ok(Value::Scalar(val.into())), - } - } -} - -impl<'mir, 'tcx> Frame<'mir, 'tcx> { - pub(super) fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> { - match self.locals[local] { - LocalValue::Dead => err!(DeadLocal), - LocalValue::Live(ref mut local) => { - *local = value; - Ok(()) - } - } - } - - /// Returns the old value of the local - pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue { - trace!("{:?} is now dead", local); - - mem::replace(&mut self.locals[local], LocalValue::Dead) - } -} diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 05494131f32..35b8f63c664 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -37,6 +37,7 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(step_trait)] #![feature(slice_concat_ext)] #![feature(if_while_or_patterns)] +#![feature(try_from)] #![recursion_limit="256"] @@ -82,7 +83,7 @@ pub fn provide(providers: &mut Providers) { shim::provide(providers); transform::provide(providers); providers.const_eval = interpret::const_eval_provider; - providers.const_value_to_allocation = interpret::const_value_to_allocation_provider; + providers.const_to_allocation = interpret::const_to_allocation_provider; providers.check_match = hair::pattern::check_match; } diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index 47c45adb85f..3f77e69b7dc 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -17,16 +17,16 @@ use rustc::mir::{Constant, Location, Place, Mir, Operand, Rvalue, Local}; use rustc::mir::{NullOp, StatementKind, Statement, BasicBlock, LocalKind}; use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem}; use rustc::mir::visit::{Visitor, PlaceContext}; -use rustc::mir::interpret::{ConstEvalErr, EvalErrorKind, ScalarMaybeUndef}; +use rustc::mir::interpret::{ + ConstEvalErr, EvalErrorKind, ScalarMaybeUndef, Scalar, GlobalId, EvalResult +}; use rustc::ty::{TyCtxt, self, Instance}; -use rustc::mir::interpret::{Value, Scalar, GlobalId, EvalResult}; -use interpret::EvalContext; -use interpret::CompileTimeEvaluator; -use interpret::{eval_promoted, mk_borrowck_eval_cx, ValTy}; +use interpret::{EvalContext, CompileTimeEvaluator, eval_promoted, mk_borrowck_eval_cx}; +use interpret::{Value, OpTy, MemoryKind}; use transform::{MirPass, MirSource}; use syntax::source_map::{Span, DUMMY_SP}; use rustc::ty::subst::Substs; -use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc::ty::ParamEnv; use rustc::ty::layout::{ LayoutOf, TyLayout, LayoutError, @@ -65,7 +65,7 @@ impl MirPass for ConstProp { } } -type Const<'tcx> = (Value, TyLayout<'tcx>, Span); +type Const<'tcx> = (OpTy<'tcx>, Span); /// Finds optimization opportunities on the MIR. struct ConstPropagator<'b, 'a, 'tcx:'a+'b> { @@ -257,10 +257,10 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { source_info: SourceInfo, ) -> Option> { self.ecx.tcx.span = source_info.span; - match self.ecx.const_to_value(c.literal.val) { - Ok(val) => { + match self.ecx.const_value_to_op(c.literal.val) { + Ok(op) => { let layout = self.tcx.layout_of(self.param_env.and(c.literal.ty)).ok()?; - Some((val, layout, c.span)) + Some((OpTy { op, layout }, c.span)) }, Err(error) => { let (stacktrace, span) = self.ecx.generate_stacktrace(None); @@ -284,12 +284,15 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { Place::Projection(ref proj) => match proj.elem { ProjectionElem::Field(field, _) => { trace!("field proj on {:?}", proj.base); - let (base, layout, span) = self.eval_place(&proj.base, source_info)?; - let valty = self.use_ecx(source_info, |this| { - this.ecx.read_field(base, None, field, layout) + let (base, span) = self.eval_place(&proj.base, source_info)?; + let res = self.use_ecx(source_info, |this| { + this.ecx.operand_field(base, field.index() as u64) })?; - Some((valty.0, valty.1, span)) + Some((res, span)) }, + // We could get more projections by using e.g. `operand_projection`, + // but we do not even have the stack frame set up properly so + // an `Index` projection would throw us off-track. _ => None, }, Place::Promoted(ref promoted) => { @@ -306,12 +309,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { }; // cannot use `const_eval` here, because that would require having the MIR // for the current function available, but we're producing said MIR right now - let (value, _, ty) = self.use_ecx(source_info, |this| { + let res = self.use_ecx(source_info, |this| { eval_promoted(&mut this.ecx, cid, this.mir, this.param_env) })?; - let val = (value, ty, source_info.span); - trace!("evaluated promoted {:?} to {:?}", promoted, val); - Some(val) + trace!("evaluated promoted {:?} to {:?}", promoted, res); + Some((res, source_info.span)) }, _ => None, } @@ -343,17 +345,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { Rvalue::Discriminant(..) => None, Rvalue::Cast(kind, ref operand, _) => { - let (value, layout, span) = self.eval_operand(operand, source_info)?; + let (op, span) = self.eval_operand(operand, source_info)?; self.use_ecx(source_info, |this| { - let dest_ptr = this.ecx.alloc_ptr(place_layout)?; - let place_align = place_layout.align; - let dest = ::interpret::Place::from_ptr(dest_ptr, place_align); - this.ecx.cast(ValTy { value, ty: layout.ty }, kind, place_layout.ty, dest)?; - Ok(( - Value::ByRef(dest_ptr.into(), place_align), - place_layout, - span, - )) + let dest = this.ecx.allocate(place_layout, MemoryKind::Stack)?; + this.ecx.cast(op, kind, dest.into())?; + Ok((dest.into(), span)) }) } @@ -361,11 +357,13 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { Rvalue::Len(_) => None, Rvalue::NullaryOp(NullOp::SizeOf, ty) => { type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some(( - Value::Scalar(Scalar::Bits { - bits: n as u128, - size: self.tcx.data_layout.pointer_size.bytes() as u8, - }.into()), - self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?, + OpTy::from_scalar_value( + Scalar::Bits { + bits: n as u128, + size: self.tcx.data_layout.pointer_size.bytes() as u8, + }, + self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?, + ), span, ))) } @@ -381,12 +379,12 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { return None; } - let val = self.eval_operand(arg, source_info)?; - let prim = self.use_ecx(source_info, |this| { - this.ecx.value_to_scalar(ValTy { value: val.0, ty: val.1.ty }) + let (arg, _) = self.eval_operand(arg, source_info)?; + let val = self.use_ecx(source_info, |this| { + let prim = this.ecx.read_scalar(arg)?.not_undef()?; + this.ecx.unary_op(op, prim, arg.layout) })?; - let val = self.use_ecx(source_info, |this| this.ecx.unary_op(op, prim, val.1))?; - Some((Value::Scalar(val.into()), place_layout, span)) + Some((OpTy::from_scalar_value(val, place_layout), span)) } Rvalue::CheckedBinaryOp(op, ref left, ref right) | Rvalue::BinaryOp(op, ref left, ref right) => { @@ -404,7 +402,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } let r = self.use_ecx(source_info, |this| { - this.ecx.value_to_scalar(ValTy { value: right.0, ty: right.1.ty }) + this.ecx.read_value(right.0) })?; if op == BinOp::Shr || op == BinOp::Shl { let left_ty = left.ty(self.mir, self.tcx); @@ -414,8 +412,9 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { .unwrap() .size .bits(); - let right_size = right.1.size; - if r.to_bits(right_size).ok().map_or(false, |b| b >= left_bits as u128) { + let right_size = right.0.layout.size; + let r_bits = r.to_scalar().and_then(|r| r.to_bits(right_size)); + if r_bits.ok().map_or(false, |b| b >= left_bits as u128) { let source_scope_local_data = match self.mir.source_scope_local_data { ClearCrossCrate::Set(ref data) => data, ClearCrossCrate::Clear => return None, @@ -436,11 +435,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } let left = self.eval_operand(left, source_info)?; let l = self.use_ecx(source_info, |this| { - this.ecx.value_to_scalar(ValTy { value: left.0, ty: left.1.ty }) + this.ecx.read_value(left.0) })?; trace!("const evaluating {:?} for {:?} and {:?}", op, left, right); let (val, overflow) = self.use_ecx(source_info, |this| { - this.ecx.binary_op(op, l, left.1.ty, r, right.1.ty) + this.ecx.binary_op(op, l, r) })?; let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue { Value::ScalarPair( @@ -455,7 +454,11 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } Value::Scalar(val.into()) }; - Some((val, place_layout, span)) + let res = OpTy { + op: ::interpret::Operand::Immediate(val), + layout: place_layout, + }; + Some((res, span)) }, } } @@ -571,7 +574,8 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { if let TerminatorKind::Assert { expected, msg, cond, .. } = kind { if let Some(value) = self.eval_operand(cond, source_info) { trace!("assertion on {:?} should be {:?}", value, expected); - if Value::Scalar(Scalar::from_bool(*expected).into()) != value.0 { + let expected = Value::Scalar(Scalar::from_bool(*expected).into()); + if expected != value.0.to_immediate() { // poison all places this operand references so that further code // doesn't use the invalid value match cond { @@ -607,7 +611,7 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { let len = self .eval_operand(len, source_info) .expect("len must be const"); - let len = match len.0 { + let len = match len.0.to_immediate() { Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) => bits, @@ -616,7 +620,7 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { let index = self .eval_operand(index, source_info) .expect("index must be const"); - let index = match index.0 { + let index = match index.0.to_immediate() { Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) => bits, diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index c9f717e6059..010ca1f7ab4 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -1375,7 +1375,7 @@ fn maybe_check_static_with_link_section(tcx: TyCtxt, id: DefId, span: Span) { }; let param_env = ty::ParamEnv::reveal_all(); if let Ok(static_) = tcx.const_eval(param_env.and(cid)) { - let alloc = tcx.const_value_to_allocation(static_); + let alloc = tcx.const_to_allocation(static_); if alloc.relocations.len() != 0 { let msg = "statics with a custom `#[link_section]` must be a \ simple list of bytes on the wasm target with no \