diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index 8c4ff718aa6..43c9fc0b8fd 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -394,10 +394,10 @@ for ::mir::interpret::ConstValue<'gcx> { mem::discriminant(self).hash_stable(hcx, hasher); match *self { - ByVal(val) => { + Scalar(val) => { val.hash_stable(hcx, hasher); } - ByValPair(a, b) => { + ScalarPair(a, b) => { a.hash_stable(hcx, hasher); b.hash_stable(hcx, hasher); } @@ -410,12 +410,12 @@ for ::mir::interpret::ConstValue<'gcx> { } impl_stable_hash_for!(enum mir::interpret::Value { - ByVal(v), - ByValPair(a, b), + Scalar(v), + ScalarPair(a, b), ByRef(ptr, align) }); -impl_stable_hash_for!(struct mir::interpret::MemoryPointer { +impl_stable_hash_for!(struct mir::interpret::Pointer { alloc_id, offset }); @@ -473,13 +473,24 @@ impl_stable_hash_for!(enum ::syntax::ast::Mutability { Mutable }); -impl_stable_hash_for!(struct mir::interpret::Pointer{primval}); -impl_stable_hash_for!(enum mir::interpret::PrimVal { - Bytes(b), - Ptr(p), - Undef -}); +impl<'a> HashStable> +for ::mir::interpret::Scalar { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use mir::interpret::Scalar::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + Bits { bits, defined } => { + bits.hash_stable(hcx, hasher); + defined.hash_stable(hcx, hasher); + }, + Ptr(ptr) => ptr.hash_stable(hcx, hasher), + } + } +} impl_stable_hash_for!(struct ty::Const<'tcx> { ty, diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index c67f09c88bd..d138c6a85a0 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -68,6 +68,7 @@ #![feature(trusted_len)] #![feature(catch_expr)] #![feature(test)] +#![feature(in_band_lifetimes)] #![recursion_limit="512"] diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index 6885bf89cc8..45819afca3f 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -5,7 +5,7 @@ use ty::{FnSig, Ty, layout}; use ty::layout::{Size, Align}; use super::{ - MemoryPointer, Lock, AccessKind + Pointer, Lock, AccessKind }; use backtrace::Backtrace; @@ -38,7 +38,7 @@ pub enum EvalErrorKind<'tcx, O> { MachineError(String), FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>), NoMirFor(String), - UnterminatedCString(MemoryPointer), + UnterminatedCString(Pointer), DanglingPointerDeref, DoubleFree, InvalidMemoryAccess, @@ -46,7 +46,7 @@ pub enum EvalErrorKind<'tcx, O> { InvalidBool, InvalidDiscriminant, PointerOutOfBounds { - ptr: MemoryPointer, + ptr: Pointer, access: bool, allocation_size: Size, }, @@ -76,26 +76,26 @@ pub enum EvalErrorKind<'tcx, O> { has: Align, }, MemoryLockViolation { - ptr: MemoryPointer, + ptr: Pointer, len: u64, frame: usize, access: AccessKind, lock: Lock, }, MemoryAcquireConflict { - ptr: MemoryPointer, + ptr: Pointer, len: u64, kind: AccessKind, lock: Lock, }, InvalidMemoryLockRelease { - ptr: MemoryPointer, + ptr: Pointer, len: u64, frame: usize, lock: Lock, }, DeallocatedLockedMemory { - ptr: MemoryPointer, + ptr: Pointer, lock: Lock, }, ValidationFailure(String), diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 48954b1f0aa..b41652469ae 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -10,7 +10,7 @@ mod value; pub use self::error::{EvalError, EvalResult, EvalErrorKind, AssertMessage}; -pub use self::value::{PrimVal, PrimValKind, Value, Pointer, ConstValue}; +pub use self::value::{Scalar, Value, ConstValue}; use std::fmt; use mir; @@ -110,18 +110,25 @@ impl PointerArithmetic for T {} #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub struct MemoryPointer { +pub struct Pointer { pub alloc_id: AllocId, pub offset: Size, } -impl<'tcx> MemoryPointer { +/// Produces a `Pointer` which points to the beginning of the Allocation +impl From for Pointer { + fn from(alloc_id: AllocId) -> Self { + Pointer::new(alloc_id, Size::ZERO) + } +} + +impl<'tcx> Pointer { pub fn new(alloc_id: AllocId, offset: Size) -> Self { - MemoryPointer { alloc_id, offset } + Pointer { alloc_id, offset } } pub(crate) fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { - MemoryPointer::new( + Pointer::new( self.alloc_id, Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), ) @@ -129,11 +136,11 @@ impl<'tcx> MemoryPointer { pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); - (MemoryPointer::new(self.alloc_id, Size::from_bytes(res)), over) + (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) } pub(crate) fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { - Ok(MemoryPointer::new( + Ok(Pointer::new( self.alloc_id, Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), )) @@ -141,11 +148,11 @@ impl<'tcx> MemoryPointer { pub fn overflowing_offset(self, i: Size, cx: C) -> (Self, bool) { let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); - (MemoryPointer::new(self.alloc_id, Size::from_bytes(res)), over) + (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) } pub fn offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { - Ok(MemoryPointer::new( + Ok(Pointer::new( self.alloc_id, Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), )) @@ -355,7 +362,7 @@ pub struct Allocation { impl Allocation { pub fn from_bytes(slice: &[u8], align: Align) -> Self { - let mut undef_mask = UndefMask::new(Size::from_bytes(0)); + let mut undef_mask = UndefMask::new(Size::ZERO); undef_mask.grow(Size::from_bytes(slice.len() as u64), true); Self { bytes: slice.to_owned(), @@ -467,7 +474,7 @@ impl UndefMask { pub fn new(size: Size) -> Self { let mut m = UndefMask { blocks: vec![], - len: Size::from_bytes(0), + len: Size::ZERO, }; m.grow(size, false); m diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 7ad6826b2f6..9e3d4e60603 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -3,16 +3,16 @@ use ty::layout::{Align, HasDataLayout, Size}; use ty; -use super::{EvalResult, MemoryPointer, PointerArithmetic, Allocation}; +use super::{EvalResult, Pointer, PointerArithmetic, Allocation}; -/// Represents a constant value in Rust. ByVal and ByValPair are optimizations which +/// Represents a constant value in Rust. ByVal and ScalarPair are optimizations which /// matches Value's optimizations for easy conversions between these two types #[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)] pub enum ConstValue<'tcx> { - /// Used only for types with layout::abi::Scalar ABI and ZSTs which use PrimVal::Undef - ByVal(PrimVal), + /// Used only for types with layout::abi::Scalar ABI and ZSTs which use Scalar::undef() + Scalar(Scalar), /// Used only for types with layout::abi::ScalarPair - ByValPair(PrimVal, PrimVal), + ScalarPair(Scalar, Scalar), /// Used only for the remaining cases. An allocation + offset into the allocation ByRef(&'tcx Allocation, Size), } @@ -22,8 +22,8 @@ impl<'tcx> ConstValue<'tcx> { pub fn from_byval_value(val: Value) -> Self { match val { Value::ByRef(..) => bug!(), - Value::ByValPair(a, b) => ConstValue::ByValPair(a, b), - Value::ByVal(val) => ConstValue::ByVal(val), + Value::ScalarPair(a, b) => ConstValue::ScalarPair(a, b), + Value::Scalar(val) => ConstValue::Scalar(val), } } @@ -31,39 +31,33 @@ impl<'tcx> ConstValue<'tcx> { pub fn to_byval_value(&self) -> Option { match *self { ConstValue::ByRef(..) => None, - ConstValue::ByValPair(a, b) => Some(Value::ByValPair(a, b)), - ConstValue::ByVal(val) => Some(Value::ByVal(val)), + ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a, b)), + ConstValue::Scalar(val) => Some(Value::Scalar(val)), } } #[inline] - pub fn from_primval(val: PrimVal) -> Self { - ConstValue::ByVal(val) + pub fn from_scalar(val: Scalar) -> Self { + ConstValue::Scalar(val) } #[inline] - pub fn to_primval(&self) -> Option { + pub fn to_scalar(&self) -> Option { match *self { ConstValue::ByRef(..) => None, - ConstValue::ByValPair(..) => None, - ConstValue::ByVal(val) => Some(val), + ConstValue::ScalarPair(..) => None, + ConstValue::Scalar(val) => Some(val), } } #[inline] - pub fn to_bits(&self) -> Option { - match self.to_primval() { - Some(PrimVal::Bytes(val)) => Some(val), - _ => None, - } + pub fn to_bits(&self, size: Size) -> Option { + self.to_scalar()?.to_bits(size).ok() } #[inline] - pub fn to_ptr(&self) -> Option { - match self.to_primval() { - Some(PrimVal::Ptr(ptr)) => Some(ptr), - _ => None, - } + pub fn to_ptr(&self) -> Option { + self.to_scalar()?.to_ptr().ok() } } @@ -74,13 +68,13 @@ impl<'tcx> ConstValue<'tcx> { /// whether the pointer is supposed to be aligned or not (also see Place). /// /// For optimization of a few very common cases, there is also a representation for a pair of -/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary +/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary /// operations and fat pointers. This idea was taken from rustc's codegen. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] pub enum Value { - ByRef(Pointer, Align), - ByVal(PrimVal), - ByValPair(PrimVal, PrimVal), + ByRef(Scalar, Align), + Scalar(Scalar), + ScalarPair(Scalar, Scalar), } impl<'tcx> ty::TypeFoldable<'tcx> for Value { @@ -92,277 +86,171 @@ impl<'tcx> ty::TypeFoldable<'tcx> for Value { } } -/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally. -/// This type clears up a few APIs where having a `PrimVal` argument for something that is -/// potentially an integer pointer or a pointer to an allocation was unclear. -/// -/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just -/// the representation of pointers. Also all the sites that convert between primvals and pointers -/// are explicit now (and rare!) -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub struct Pointer { - pub primval: PrimVal, -} - -impl<'tcx> Pointer { - pub fn null() -> Self { - PrimVal::Bytes(0).into() - } - pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { - self.primval.to_ptr() - } - pub fn into_inner_primval(self) -> PrimVal { - self.primval +impl<'tcx> Scalar { + pub fn ptr_null(cx: C) -> Self { + Scalar::Bits { + bits: 0, + defined: cx.data_layout().pointer_size.bits() as u8, + } } - pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); - match self.primval { - PrimVal::Bytes(b) => { - assert_eq!(b as u64 as u128, b); - Ok(Pointer::from( - PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128), - )) + match self { + Scalar::Bits { bits, defined } => { + let pointer_size = layout.pointer_size.bits() as u8; + if defined < pointer_size { + err!(ReadUndefBytes) + } else { + Ok(Scalar::Bits { + bits: layout.signed_offset(bits as u64, i)? as u128, + defined: pointer_size, + }) } - PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from), - PrimVal::Undef => err!(ReadUndefBytes), + } + Scalar::Ptr(ptr) => ptr.signed_offset(i, layout).map(Scalar::Ptr), } } - pub fn offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); - match self.primval { - PrimVal::Bytes(b) => { - assert_eq!(b as u64 as u128, b); - Ok(Pointer::from( - PrimVal::Bytes(layout.offset(b as u64, i.bytes())? as u128), - )) + match self { + Scalar::Bits { bits, defined } => { + let pointer_size = layout.pointer_size.bits() as u8; + if defined < pointer_size { + err!(ReadUndefBytes) + } else { + Ok(Scalar::Bits { + bits: layout.offset(bits as u64, i.bytes())? as u128, + defined: pointer_size, + }) } - PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from), - PrimVal::Undef => err!(ReadUndefBytes), + } + Scalar::Ptr(ptr) => ptr.offset(i, layout).map(Scalar::Ptr), } } - pub fn wrapping_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_wrapping_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); - match self.primval { - PrimVal::Bytes(b) => { - assert_eq!(b as u64 as u128, b); - Ok(Pointer::from(PrimVal::Bytes( - layout.wrapping_signed_offset(b as u64, i) as u128, - ))) + match self { + Scalar::Bits { bits, defined } => { + let pointer_size = layout.pointer_size.bits() as u8; + if defined < pointer_size { + err!(ReadUndefBytes) + } else { + Ok(Scalar::Bits { + bits: layout.wrapping_signed_offset(bits as u64, i) as u128, + defined: pointer_size, + }) } - PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))), - PrimVal::Undef => err!(ReadUndefBytes), + } + Scalar::Ptr(ptr) => Ok(Scalar::Ptr(ptr.wrapping_signed_offset(i, layout))), } } - pub fn is_null(self) -> EvalResult<'tcx, bool> { - match self.primval { - PrimVal::Bytes(b) => Ok(b == 0), - PrimVal::Ptr(_) => Ok(false), - PrimVal::Undef => err!(ReadUndefBytes), + pub fn is_null_ptr(self, cx: C) -> EvalResult<'tcx, bool> { + match self { + Scalar::Bits { + bits, defined, + } => if defined < cx.data_layout().pointer_size.bits() as u8 { + err!(ReadUndefBytes) + } else { + Ok(bits == 0) + }, + Scalar::Ptr(_) => Ok(false), } } - pub fn to_value_with_len(self, len: u64) -> Value { - Value::ByValPair(self.primval, PrimVal::from_u128(len as u128)) + pub fn to_value_with_len(self, len: u64, cx: C) -> Value { + Value::ScalarPair(self, Scalar::Bits { + bits: len as u128, + defined: cx.data_layout().pointer_size.bits() as u8, + }) } - pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value { - Value::ByValPair(self.primval, PrimVal::Ptr(vtable)) + pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { + Value::ScalarPair(self, Scalar::Ptr(vtable)) } pub fn to_value(self) -> Value { - Value::ByVal(self.primval) + Value::Scalar(self) } } -impl ::std::convert::From for Pointer { - fn from(primval: PrimVal) -> Self { - Pointer { primval } +impl From for Scalar { + fn from(ptr: Pointer) -> Self { + Scalar::Ptr(ptr) } } -impl ::std::convert::From for Pointer { - fn from(ptr: MemoryPointer) -> Self { - PrimVal::Ptr(ptr).into() - } -} - -/// A `PrimVal` represents an immediate, primitive value existing outside of a +/// A `Scalar` represents an immediate, primitive value existing outside of a /// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in -/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes -/// of a simple value, a pointer into another `Allocation`, or be undefined. +/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes +/// of a simple value or a pointer into another `Allocation` #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum PrimVal { +pub enum Scalar { /// The raw bytes of a simple value. - Bytes(u128), + Bits { + /// The first `defined` number of bits are valid + defined: u8, + bits: u128, + }, /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of - /// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the - /// relocation and its associated offset together as a `MemoryPointer` here. - Ptr(MemoryPointer), - - /// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe - /// to copy around, just like undefined bytes in an `Allocation`. - Undef, + /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the + /// relocation and its associated offset together as a `Pointer` here. + Ptr(Pointer), } -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum PrimValKind { - I8, I16, I32, I64, I128, - U8, U16, U32, U64, U128, - F32, F64, - Ptr, FnPtr, - Bool, - Char, -} - -impl<'tcx> PrimVal { - pub fn from_u128(n: u128) -> Self { - PrimVal::Bytes(n) - } - - pub fn from_i128(n: i128) -> Self { - PrimVal::Bytes(n as u128) +impl<'tcx> Scalar { + pub fn undef() -> Self { + Scalar::Bits { bits: 0, defined: 0 } } pub fn from_bool(b: bool) -> Self { - PrimVal::Bytes(b as u128) + // FIXME: can we make defined `1`? + Scalar::Bits { bits: b as u128, defined: 8 } } pub fn from_char(c: char) -> Self { - PrimVal::Bytes(c as u128) + Scalar::Bits { bits: c as u128, defined: 32 } } - pub fn to_bytes(self) -> EvalResult<'tcx, u128> { + pub fn to_bits(self, size: Size) -> EvalResult<'tcx, u128> { match self { - PrimVal::Bytes(b) => Ok(b), - PrimVal::Ptr(_) => err!(ReadPointerAsBytes), - PrimVal::Undef => err!(ReadUndefBytes), + Scalar::Bits { .. } if size.bits() == 0 => bug!("to_bits cannot be used with zsts"), + Scalar::Bits { bits, defined } if size.bits() <= defined as u64 => Ok(bits), + Scalar::Bits { .. } => err!(ReadUndefBytes), + Scalar::Ptr(_) => err!(ReadPointerAsBytes), } } - pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { match self { - PrimVal::Bytes(_) => err!(ReadBytesAsPointer), - PrimVal::Ptr(p) => Ok(p), - PrimVal::Undef => err!(ReadUndefBytes), + Scalar::Bits {..} => err!(ReadBytesAsPointer), + Scalar::Ptr(p) => Ok(p), } } - pub fn is_bytes(self) -> bool { + pub fn is_bits(self) -> bool { match self { - PrimVal::Bytes(_) => true, + Scalar::Bits { .. } => true, _ => false, } } pub fn is_ptr(self) -> bool { match self { - PrimVal::Ptr(_) => true, + Scalar::Ptr(_) => true, _ => false, } } - pub fn is_undef(self) -> bool { - match self { - PrimVal::Undef => true, - _ => false, - } - } - - pub fn to_u128(self) -> EvalResult<'tcx, u128> { - self.to_bytes() - } - - pub fn to_u64(self) -> EvalResult<'tcx, u64> { - self.to_bytes().map(|b| { - assert_eq!(b as u64 as u128, b); - b as u64 - }) - } - - pub fn to_i32(self) -> EvalResult<'tcx, i32> { - self.to_bytes().map(|b| { - assert_eq!(b as i32 as u128, b); - b as i32 - }) - } - - pub fn to_i128(self) -> EvalResult<'tcx, i128> { - self.to_bytes().map(|b| b as i128) - } - - pub fn to_i64(self) -> EvalResult<'tcx, i64> { - self.to_bytes().map(|b| { - assert_eq!(b as i64 as u128, b); - b as i64 - }) - } - pub fn to_bool(self) -> EvalResult<'tcx, bool> { - match self.to_bytes()? { - 0 => Ok(false), - 1 => Ok(true), + match self { + Scalar::Bits { bits: 0, defined: 8 } => Ok(false), + Scalar::Bits { bits: 1, defined: 8 } => Ok(true), _ => err!(InvalidBool), } } } - -impl PrimValKind { - pub fn is_int(self) -> bool { - use self::PrimValKind::*; - match self { - I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true, - _ => false, - } - } - - pub fn is_signed_int(self) -> bool { - use self::PrimValKind::*; - match self { - I8 | I16 | I32 | I64 | I128 => true, - _ => false, - } - } - - pub fn is_float(self) -> bool { - use self::PrimValKind::*; - match self { - F32 | F64 => true, - _ => false, - } - } - - pub fn from_uint_size(size: Size) -> Self { - match size.bytes() { - 1 => PrimValKind::U8, - 2 => PrimValKind::U16, - 4 => PrimValKind::U32, - 8 => PrimValKind::U64, - 16 => PrimValKind::U128, - _ => bug!("can't make uint with size {}", size.bytes()), - } - } - - pub fn from_int_size(size: Size) -> Self { - match size.bytes() { - 1 => PrimValKind::I8, - 2 => PrimValKind::I16, - 4 => PrimValKind::I32, - 8 => PrimValKind::I64, - 16 => PrimValKind::I128, - _ => bug!("can't make int with size {}", size.bytes()), - } - } - - pub fn is_ptr(self) -> bool { - use self::PrimValKind::*; - match self { - Ptr | FnPtr => true, - _ => false, - } - } -} diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index ffb8031b83b..e44b1dc886b 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -24,7 +24,7 @@ use rustc_serialize as serialize; use hir::def::CtorKind; use hir::def_id::DefId; use mir::visit::MirVisitable; -use mir::interpret::{Value, PrimVal, EvalErrorKind}; +use mir::interpret::{Value, Scalar, EvalErrorKind}; use ty::subst::{Subst, Substs}; use ty::{self, AdtDef, CanonicalTy, ClosureSubsts, GeneratorSubsts, Region, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; @@ -1149,11 +1149,16 @@ impl<'tcx> TerminatorKind<'tcx> { Return | Resume | Abort | Unreachable | GeneratorDrop => vec![], Goto { .. } => vec!["".into()], SwitchInt { ref values, switch_ty, .. } => { + let size = ty::tls::with(|tcx| { + let param_env = ty::ParamEnv::empty(); + let switch_ty = tcx.lift_to_global(&switch_ty).unwrap(); + tcx.layout_of(param_env.and(switch_ty)).unwrap().size + }); values.iter() .map(|&u| { let mut s = String::new(); print_miri_value( - Value::ByVal(PrimVal::Bytes(u)), + Value::Scalar(Scalar::Bits { bits: u, defined: size.bits() as u8 }), switch_ty, &mut s, ).unwrap(); @@ -1893,19 +1898,26 @@ pub fn fmt_const_val(fmt: &mut W, const_val: &ty::Const) -> fmt::Resul pub fn print_miri_value(value: Value, ty: Ty, f: &mut W) -> fmt::Result { use ty::TypeVariants::*; match (value, &ty.sty) { - (Value::ByVal(PrimVal::Bytes(0)), &TyBool) => write!(f, "false"), - (Value::ByVal(PrimVal::Bytes(1)), &TyBool) => write!(f, "true"), - (Value::ByVal(PrimVal::Bytes(bits)), &TyFloat(ast::FloatTy::F32)) => + (Value::Scalar(Scalar::Bits { bits: 0, .. }), &TyBool) => write!(f, "false"), + (Value::Scalar(Scalar::Bits { bits: 1, .. }), &TyBool) => write!(f, "true"), + (Value::Scalar(Scalar::Bits { bits, .. }), &TyFloat(ast::FloatTy::F32)) => write!(f, "{}f32", Single::from_bits(bits)), - (Value::ByVal(PrimVal::Bytes(bits)), &TyFloat(ast::FloatTy::F64)) => + (Value::Scalar(Scalar::Bits { bits, .. }), &TyFloat(ast::FloatTy::F64)) => write!(f, "{}f64", Double::from_bits(bits)), - (Value::ByVal(PrimVal::Bytes(n)), &TyUint(ui)) => write!(f, "{:?}{}", n, ui), - (Value::ByVal(PrimVal::Bytes(n)), &TyInt(i)) => write!(f, "{:?}{}", n as i128, i), - (Value::ByVal(PrimVal::Bytes(n)), &TyChar) => - write!(f, "{:?}", ::std::char::from_u32(n as u32).unwrap()), - (Value::ByVal(PrimVal::Undef), &TyFnDef(did, _)) => + (Value::Scalar(Scalar::Bits { bits, .. }), &TyUint(ui)) => write!(f, "{:?}{}", bits, ui), + (Value::Scalar(Scalar::Bits { bits, .. }), &TyInt(i)) => { + let bit_width = ty::tls::with(|tcx| { + let ty = tcx.lift_to_global(&ty).unwrap(); + tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size.bits() + }); + let shift = 128 - bit_width; + write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i) + }, + (Value::Scalar(Scalar::Bits { bits, .. }), &TyChar) => + write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()), + (_, &TyFnDef(did, _)) => write!(f, "{}", item_path_str(did)), - (Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len)), + (Value::ScalarPair(Scalar::Ptr(ptr), Scalar::Bits { bits: len, .. }), &TyRef(_, &ty::TyS { sty: TyStr, .. }, _)) => { ty::tls::with(|tcx| { match tcx.alloc_map.lock().get(ptr.alloc_id) { diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 235a541f07b..6169b3bc33f 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -11,7 +11,7 @@ use session::{self, DataTypeKind}; use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions}; -use syntax::ast::{self, FloatTy, IntTy, UintTy}; +use syntax::ast::{self, IntTy, UintTy}; use syntax::attr; use syntax_pos::DUMMY_SP; @@ -130,8 +130,8 @@ impl PrimitiveExt for Primitive { fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { match *self { Int(i, signed) => i.to_ty(tcx, signed), - F32 => tcx.types.f32, - F64 => tcx.types.f64, + Float(FloatTy::F32) => tcx.types.f32, + Float(FloatTy::F64) => tcx.types.f64, Pointer => tcx.mk_mut_ptr(tcx.mk_nil()), } } @@ -231,7 +231,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0), b_offset], + offsets: vec![Size::ZERO, b_offset], memory_index: vec![0, 1] }, abi: Abi::ScalarPair(a, b), @@ -267,7 +267,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }; let mut sized = true; - let mut offsets = vec![Size::from_bytes(0); fields.len()]; + let mut offsets = vec![Size::ZERO; fields.len()]; let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); let mut optimize = !repr.inhibit_struct_field_reordering_opt(); @@ -307,7 +307,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // field 5 with offset 0 puts 0 in offsets[5]. // At the bottom of this function, we use inverse_memory_index to produce memory_index. - let mut offset = Size::from_bytes(0); + let mut offset = Size::ZERO; if let StructKind::Prefixed(prefix_size, prefix_align) = kind { if packed { @@ -488,8 +488,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { ty::TyUint(ity) => { scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)) } - ty::TyFloat(FloatTy::F32) => scalar(F32), - ty::TyFloat(FloatTy::F64) => scalar(F64), + ty::TyFloat(fty) => scalar(Float(fty)), ty::TyFnPtr(_) => { let mut ptr = scalar_unit(Pointer); ptr.valid_range = 1..=*ptr.valid_range.end(); @@ -503,7 +502,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { fields: FieldPlacement::Union(0), abi: Abi::Uninhabited, align: dl.i8_align, - size: Size::from_bytes(0) + size: Size::ZERO }) } @@ -575,7 +574,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }, abi: Abi::Aggregate { sized: false }, align: element.align, - size: Size::from_bytes(0) + size: Size::ZERO }) } ty::TyStr => { @@ -587,7 +586,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }, abi: Abi::Aggregate { sized: false }, align: dl.i8_align, - size: Size::from_bytes(0) + size: Size::ZERO }) } @@ -696,7 +695,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { Align::from_bytes(repr_align, repr_align).unwrap()); } - let mut size = Size::from_bytes(0); + let mut size = Size::ZERO; for field in &variants[0] { assert!(!field.is_unsized()); @@ -908,7 +907,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); let mut align = dl.aggregate_align; - let mut size = Size::from_bytes(0); + let mut size = Size::ZERO; // We're interested in the smallest alignment, so start large. let mut start_align = Align::from_bytes(256, 256).unwrap(); @@ -1078,7 +1077,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } _ => bug!() }; - if pair_offsets[0] == Size::from_bytes(0) && + if pair_offsets[0] == Size::ZERO && pair_offsets[1] == *offset && align == pair.align && size == pair.size { @@ -1099,7 +1098,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { variants: layout_variants, }, fields: FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0)], + offsets: vec![Size::ZERO], memory_index: vec![0] }, abi, @@ -1182,7 +1181,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let build_variant_info = |n: Option, flds: &[ast::Name], layout: TyLayout<'tcx>| { - let mut min_size = Size::from_bytes(0); + let mut min_size = Size::ZERO; let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| { match layout.field(self, i) { Err(err) => { @@ -1514,28 +1513,28 @@ impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>> { } // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users. -impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { +impl TyCtxt<'a, 'tcx, '_> { /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> Result, LayoutError<'tcx>> { let cx = LayoutCx { - tcx: self, + tcx: self.global_tcx(), param_env: param_env_and_ty.param_env }; cx.layout_of(param_env_and_ty.value) } } -impl<'a, 'tcx> ty::maps::TyCtxtAt<'a, 'tcx, 'tcx> { +impl ty::maps::TyCtxtAt<'a, 'tcx, '_> { /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> Result, LayoutError<'tcx>> { let cx = LayoutCx { - tcx: self, + tcx: self.global_tcx().at(self.span), param_env: param_env_and_ty.param_env }; cx.layout_of(param_env_and_ty.value) @@ -1567,7 +1566,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> fields: FieldPlacement::Union(fields), abi: Abi::Uninhabited, align: tcx.data_layout.i8_align, - size: Size::from_bytes(0) + size: Size::ZERO }) } @@ -1746,19 +1745,19 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { match layout.abi { Abi::Scalar(ref scalar) => { - return Ok(scalar_niche(scalar, Size::from_bytes(0))); + return Ok(scalar_niche(scalar, Size::ZERO)); } Abi::ScalarPair(ref a, ref b) => { // HACK(nox): We iter on `b` and then `a` because `max_by_key` // returns the last maximum. let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self)))) - .chain(iter::once((a, Size::from_bytes(0)))) + .chain(iter::once((a, Size::ZERO))) .filter_map(|(scalar, offset)| scalar_niche(scalar, offset)) .max_by_key(|niche| niche.available); return Ok(niche); } Abi::Vector { ref element, .. } => { - return Ok(scalar_niche(element, Size::from_bytes(0))); + return Ok(scalar_niche(element, Size::ZERO)); } _ => {} } @@ -1908,8 +1907,7 @@ impl_stable_hash_for!(enum ::ty::layout::Integer { impl_stable_hash_for!(enum ::ty::layout::Primitive { Int(integer, signed), - F32, - F64, + Float(fty), Pointer }); diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 4507da1c698..5ee6329e860 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -1982,7 +1982,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { match tcx.const_eval(param_env.and(cid)) { Ok(val) => { // FIXME: Find the right type and use it instead of `val.ty` here - if let Some(b) = val.assert_bits(val.ty) { + if let Some(b) = val.assert_bits(tcx.global_tcx(), param_env.and(val.ty)) { trace!("discriminants: {} ({:?})", b, repr_type); Some(Discr { val: b, diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index faf93ab30b7..3347d47a4e8 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -17,9 +17,9 @@ use middle::region; use rustc_data_structures::indexed_vec::Idx; use ty::subst::{Substs, Subst, Kind, UnpackedKind}; use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; -use ty::{Slice, TyS}; +use ty::{Slice, TyS, ParamEnvAnd, ParamEnv}; use util::captures::Captures; -use mir::interpret::{PrimVal, MemoryPointer, Value, ConstValue}; +use mir::interpret::{Scalar, Pointer, Value, ConstValue}; use std::iter; use std::cmp::Ordering; @@ -1809,51 +1809,64 @@ impl<'tcx> Const<'tcx> { } #[inline] - pub fn from_primval( + pub fn from_scalar( tcx: TyCtxt<'_, '_, 'tcx>, - val: PrimVal, + val: Scalar, ty: Ty<'tcx>, ) -> &'tcx Self { - Self::from_const_value(tcx, ConstValue::from_primval(val), ty) + Self::from_const_value(tcx, ConstValue::from_scalar(val), ty) } #[inline] pub fn from_bits( tcx: TyCtxt<'_, '_, 'tcx>, - val: u128, - ty: Ty<'tcx>, + bits: u128, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> &'tcx Self { - Self::from_primval(tcx, PrimVal::Bytes(val), ty) + let ty = tcx.lift_to_global(&ty).unwrap(); + let size = tcx.layout_of(ty).unwrap_or_else(|e| { + panic!("could not compute layout for {:?}: {:?}", ty, e) + }).size; + let shift = 128 - size.bits(); + let truncated = (bits << shift) >> shift; + assert_eq!(truncated, bits, "from_bits called with untruncated value"); + Self::from_scalar(tcx, Scalar::Bits { bits, defined: size.bits() as u8 }, ty.value) } #[inline] pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self { - Self::from_primval(tcx, PrimVal::Undef, ty) + Self::from_scalar(tcx, Scalar::undef(), ty) } #[inline] pub fn from_bool(tcx: TyCtxt<'_, '_, 'tcx>, v: bool) -> &'tcx Self { - Self::from_bits(tcx, v as u128, tcx.types.bool) + Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool)) } #[inline] pub fn from_usize(tcx: TyCtxt<'_, '_, 'tcx>, n: u64) -> &'tcx Self { - Self::from_bits(tcx, n as u128, tcx.types.usize) + Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize)) } #[inline] - pub fn to_bits(&self, ty: Ty<'_>) -> Option { - if self.ty != ty { + pub fn to_bits( + &self, + tcx: TyCtxt<'_, '_, 'tcx>, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> Option { + if self.ty != ty.value { return None; } + let ty = tcx.lift_to_global(&ty).unwrap(); + let size = tcx.layout_of(ty).ok()?.size; match self.val { - ConstVal::Value(val) => val.to_bits(), + ConstVal::Value(val) => val.to_bits(size), _ => None, } } #[inline] - pub fn to_ptr(&self) -> Option { + pub fn to_ptr(&self) -> Option { match self.val { ConstVal::Value(val) => val.to_ptr(), _ => None, @@ -1869,25 +1882,31 @@ impl<'tcx> Const<'tcx> { } #[inline] - pub fn to_primval(&self) -> Option { + pub fn to_scalar(&self) -> Option { match self.val { - ConstVal::Value(val) => val.to_primval(), + ConstVal::Value(val) => val.to_scalar(), _ => None, } } #[inline] - pub fn assert_bits(&self, ty: Ty<'_>) -> Option { - assert_eq!(self.ty, ty); + pub fn assert_bits( + &self, + tcx: TyCtxt<'_, '_, '_>, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> Option { + assert_eq!(self.ty, ty.value); + let ty = tcx.lift_to_global(&ty).unwrap(); + let size = tcx.layout_of(ty).ok()?.size; match self.val { - ConstVal::Value(val) => val.to_bits(), + ConstVal::Value(val) => val.to_bits(size), _ => None, } } #[inline] pub fn assert_bool(&self, tcx: TyCtxt<'_, '_, '_>) -> Option { - self.assert_bits(tcx.types.bool).and_then(|v| match v { + self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.bool)).and_then(|v| match v { 0 => Some(false), 1 => Some(true), _ => None, @@ -1896,14 +1915,18 @@ impl<'tcx> Const<'tcx> { #[inline] pub fn assert_usize(&self, tcx: TyCtxt<'_, '_, '_>) -> Option { - self.assert_bits(tcx.types.usize).map(|v| v as u64) + self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.usize)).map(|v| v as u64) } #[inline] - pub fn unwrap_bits(&self, ty: Ty<'_>) -> u128 { - match self.assert_bits(ty) { + pub fn unwrap_bits( + &self, + tcx: TyCtxt<'_, '_, '_>, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> u128 { + match self.assert_bits(tcx, ty) { Some(val) => val, - None => bug!("expected bits of {}, got {:#?}", ty, self), + None => bug!("expected bits of {}, got {:#?}", ty.value, self), } } diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index e3db4972edc..9b7443f97ef 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -68,14 +68,14 @@ impl<'tcx> Discr<'tcx> { }; let bit_size = int.size().bits(); - let amt = 128 - bit_size; + let shift = 128 - bit_size; if signed { let sext = |u| { let i = u as i128; - (i << amt) >> amt + (i << shift) >> shift }; let min = sext(1_u128 << (bit_size - 1)); - let max = i128::max_value() >> amt; + let max = i128::max_value() >> shift; let val = sext(self.val); assert!(n < (i128::max_value() as u128)); let n = n as i128; @@ -87,13 +87,13 @@ impl<'tcx> Discr<'tcx> { }; // zero the upper bits let val = val as u128; - let val = (val << amt) >> amt; + let val = (val << shift) >> shift; (Self { val: val as u128, ty: self.ty, }, oflo) } else { - let max = u128::max_value() >> amt; + let max = u128::max_value() >> shift; let val = self.val; let oflo = val > max - n; let val = if oflo { diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 221012903d9..6b5baa402b4 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -454,7 +454,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> { adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, - Size::from_bytes(0), + Size::ZERO, false); adjust_for_rust_scalar(&mut b_attrs, b, @@ -471,7 +471,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> { adjust_for_rust_scalar(attrs, scalar, arg.layout, - Size::from_bytes(0), + Size::ZERO, is_return); } } diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index ae0f6067f47..99f08540c71 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -325,7 +325,7 @@ fn vec_slice_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, MemberDescription { name: "data_ptr".to_string(), type_metadata: data_ptr_metadata, - offset: Size::from_bytes(0), + offset: Size::ZERO, size: pointer_size, align: pointer_align, flags: DIFlags::FlagZero, @@ -1074,7 +1074,7 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { MemberDescription { name: f.name.to_string(), type_metadata: type_metadata(cx, field.ty, self.span), - offset: Size::from_bytes(0), + offset: Size::ZERO, size, align, flags: DIFlags::FlagZero, @@ -1158,7 +1158,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { MemberDescription { name: "".to_string(), type_metadata: variant_type_metadata, - offset: Size::from_bytes(0), + offset: Size::ZERO, size: self.layout.size, align: self.layout.align, flags: DIFlags::FlagZero @@ -1187,7 +1187,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { MemberDescription { name: "".to_string(), type_metadata: variant_type_metadata, - offset: Size::from_bytes(0), + offset: Size::ZERO, size: variant.size, align: variant.align, flags: DIFlags::FlagZero @@ -1248,7 +1248,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { MemberDescription { name, type_metadata: variant_type_metadata, - offset: Size::from_bytes(0), + offset: Size::ZERO, size: variant.size, align: variant.align, flags: DIFlags::FlagZero @@ -1747,7 +1747,7 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - Size::from_bytes(0).bits(), + Size::ZERO.bits(), cx.tcx.data_layout.pointer_align.abi_bits() as u32, DIFlags::FlagArtificial, ptr::null_mut(), diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs index a4fe85135de..ef0bc3ed330 100644 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -14,9 +14,9 @@ use rustc_mir::interpret::{read_target_uint, const_val_field}; use rustc::hir::def_id::DefId; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; -use rustc::mir::interpret::{GlobalId, MemoryPointer, PrimVal, Allocation, ConstValue, AllocType}; +use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Scalar, Size}; +use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; use builder::Builder; use common::{CodegenCx}; use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize}; @@ -28,22 +28,24 @@ use syntax::ast::Mutability; use super::super::callee; use super::FunctionCx; -pub fn primval_to_llvm(cx: &CodegenCx, - cv: PrimVal, - scalar: &Scalar, +pub fn scalar_to_llvm(cx: &CodegenCx, + cv: Scalar, + layout: &layout::Scalar, llty: Type) -> ValueRef { - let bits = if scalar.is_bool() { 1 } else { scalar.value.size(cx).bits() }; + let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() }; match cv { - PrimVal::Undef => C_undef(Type::ix(cx, bits)), - PrimVal::Bytes(b) => { - let llval = C_uint_big(Type::ix(cx, bits), b); - if scalar.value == layout::Pointer { + Scalar::Bits { defined, .. } if (defined as u64) < bitsize || defined == 0 => { + C_undef(Type::ix(cx, bitsize)) + }, + Scalar::Bits { bits, .. } => { + let llval = C_uint_big(Type::ix(cx, bitsize), bits); + if layout.value == layout::Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty.to_ref()) } } else { consts::bitcast(llval, llty) } }, - PrimVal::Ptr(ptr) => { + Scalar::Ptr(ptr) => { let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id); let base_addr = match alloc_type { Some(AllocType::Memory(alloc)) => { @@ -68,7 +70,7 @@ pub fn primval_to_llvm(cx: &CodegenCx, &C_usize(cx, ptr.offset.bytes()), 1, ) }; - if scalar.value != layout::Pointer { + if layout.value != layout::Pointer { unsafe { llvm::LLVMConstPtrToInt(llval, llty.to_ref()) } } else { consts::bitcast(llval, llty) @@ -94,10 +96,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx, alloc: &Allocation) -> ValueRef { layout.endian, &alloc.bytes[offset..(offset + pointer_size)], ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - llvals.push(primval_to_llvm( + llvals.push(scalar_to_llvm( cx, - PrimVal::Ptr(MemoryPointer { alloc_id, offset: Size::from_bytes(ptr_offset) }), - &Scalar { + Pointer { alloc_id, offset: Size::from_bytes(ptr_offset) }.into(), + &layout::Scalar { value: layout::Primitive::Pointer, valid_range: 0..=!0 }, @@ -197,13 +199,13 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { c, constant.ty, )?; - if let Some(prim) = field.to_primval() { + if let Some(prim) = field.to_scalar() { let layout = bx.cx.layout_of(field_ty); let scalar = match layout.abi { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - Ok(primval_to_llvm( + Ok(scalar_to_llvm( bx.cx, prim, scalar, layout.immediate_llvm_type(bx.cx), )) diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs index e096b5495e9..98383e882c4 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -28,7 +28,7 @@ use std::fmt; use std::ptr; use super::{FunctionCx, LocalRef}; -use super::constant::{primval_to_llvm, const_alloc_to_llvm}; +use super::constant::{scalar_to_llvm, const_alloc_to_llvm}; use super::place::PlaceRef; /// The representation of a Rust value. The enum variant is in fact @@ -105,12 +105,12 @@ impl<'a, 'tcx> OperandRef<'tcx> { } let val = match val { - ConstValue::ByVal(x) => { + ConstValue::Scalar(x) => { let scalar = match layout.abi { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = primval_to_llvm( + let llval = scalar_to_llvm( bx.cx, x, scalar, @@ -118,18 +118,18 @@ impl<'a, 'tcx> OperandRef<'tcx> { ); OperandValue::Immediate(llval) }, - ConstValue::ByValPair(a, b) => { + ConstValue::ScalarPair(a, b) => { let (a_scalar, b_scalar) = match layout.abi { layout::Abi::ScalarPair(ref a, ref b) => (a, b), - _ => bug!("from_const: invalid ByValPair layout: {:#?}", layout) + _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = primval_to_llvm( + let a_llval = scalar_to_llvm( bx.cx, a, a_scalar, layout.scalar_pair_element_llvm_type(bx.cx, 0), ); - let b_llval = primval_to_llvm( + let b_llval = scalar_to_llvm( bx.cx, b, b_scalar, diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 21436b74731..88b75ff9c09 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -15,6 +15,7 @@ use rustc::hir; use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc_target::spec::PanicStrategy; +use rustc_target::abi::FloatTy; use mono_item::DefPathBasedNames; use type_::Type; @@ -40,7 +41,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, if use_x86_mmx { return Type::x86_mmx(cx) } else { - let element = layout.scalar_llvm_type_at(cx, element, Size::from_bytes(0)); + let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); return Type::vector(&element, count); } } @@ -120,7 +121,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, let field_count = layout.fields.count(); let mut packed = false; - let mut offset = Size::from_bytes(0); + let mut offset = Size::ZERO; let mut prev_align = layout.align; let mut result: Vec = Vec::with_capacity(1 + field_count * 2); for i in layout.fields.index_by_increasing_offset() { @@ -265,7 +266,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { ); FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() } - _ => self.scalar_llvm_type_at(cx, scalar, Size::from_bytes(0)) + _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO) }; cx.scalar_lltypes.borrow_mut().insert(self.ty, llty); return llty; @@ -324,8 +325,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { scalar: &layout::Scalar, offset: Size) -> Type { match scalar.value { layout::Int(i, _) => Type::from_integer(cx, i), - layout::F32 => Type::f32(cx), - layout::F64 => Type::f64(cx), + layout::Float(FloatTy::F32) => Type::f32(cx), + layout::Float(FloatTy::F64) => Type::f64(cx), layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { @@ -372,7 +373,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } let offset = if index == 0 { - Size::from_bytes(0) + Size::ZERO } else { a.value.size(cx).abi_align(b.value.align(cx)) }; diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index 4115dbe6274..d660b40e9cb 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -202,7 +202,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { value: ty::Const::from_bits( this.hir.tcx(), 0, - this.hir.tcx().types.u32), + ty::ParamEnv::empty().and(this.hir.tcx().types.u32)), }, })); box AggregateKind::Generator(closure_id, substs, movability) @@ -374,10 +374,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Helper to get a `-1` value of the appropriate type fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> { - let bits = self.hir.integer_bit_width(ty); + let param_ty = ty::ParamEnv::empty().and(self.hir.tcx().lift_to_global(&ty).unwrap()); + let bits = self.hir.tcx().layout_of(param_ty).unwrap().size.bits(); let n = (!0u128) >> (128 - bits); let literal = Literal::Value { - value: ty::Const::from_bits(self.hir.tcx(), n, ty) + value: ty::Const::from_bits(self.hir.tcx(), n, param_ty) }; self.literal_operand(span, ty, literal) @@ -386,10 +387,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Helper to get the minimum value of the appropriate type fn minval_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> { assert!(ty.is_signed()); - let bits = self.hir.integer_bit_width(ty); + let param_ty = ty::ParamEnv::empty().and(self.hir.tcx().lift_to_global(&ty).unwrap()); + let bits = self.hir.tcx().layout_of(param_ty).unwrap().size.bits(); let n = 1 << (bits - 1); let literal = Literal::Value { - value: ty::Const::from_bits(self.hir.tcx(), n, ty) + value: ty::Const::from_bits(self.hir.tcx(), n, param_ty) }; self.literal_operand(span, ty, literal) diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index 5dbe8d850bd..aa5727ee5c7 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -122,9 +122,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { match *match_pair.pattern.kind { PatternKind::Constant { value } => { + let switch_ty = ty::ParamEnv::empty().and(switch_ty); indices.entry(value) .or_insert_with(|| { - options.push(value.unwrap_bits(switch_ty)); + options.push(value.unwrap_bits(self.hir.tcx(), switch_ty)); options.len() - 1 }); true diff --git a/src/librustc_mir/build/misc.rs b/src/librustc_mir/build/misc.rs index 6501dd00fe8..5907a0cff8e 100644 --- a/src/librustc_mir/build/misc.rs +++ b/src/librustc_mir/build/misc.rs @@ -52,17 +52,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Returns a zero literal operand for the appropriate type, works for // bool, char and integers. pub fn zero_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> { - match ty.sty { - ty::TyBool | - ty::TyChar | - ty::TyUint(_) | - ty::TyInt(_) => {} - _ => { - span_bug!(span, "Invalid type for zero_literal: `{:?}`", ty) - } - } let literal = Literal::Value { - value: ty::Const::from_bits(self.hir.tcx(), 0, ty) + value: ty::Const::from_bits(self.hir.tcx(), 0, ty::ParamEnv::empty().and(ty)) }; self.literal_operand(span, ty, literal) diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index 0d93634981f..f6f98f0732e 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -614,7 +614,8 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let idx = adt_def.variant_index_with_id(variant_id); let (d, o) = adt_def.discriminant_def_for_variant(idx); use rustc::ty::util::IntTypeExt; - let ty = adt_def.repr.discr_type().to_ty(cx.tcx()); + let ty = adt_def.repr.discr_type(); + let ty = ty.to_ty(cx.tcx()); Some((d, o, ty)) } _ => None, @@ -634,7 +635,11 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, }, }, }.to_ref(); - let offset = mk_const(ty::Const::from_bits(cx.tcx, offset as u128, ty)); + let offset = mk_const(ty::Const::from_bits( + cx.tcx, + offset as u128, + cx.param_env.and(ty), + )); match did { Some(did) => { // in case we are offsetting from a computed discriminant diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 390b82af48a..8ff1738394e 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -21,9 +21,8 @@ use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::hir::map::blocks::FnLikeNode; use rustc::middle::region; use rustc::infer::InferCtxt; -use rustc::ty::layout::{IntegerExt, Size}; use rustc::ty::subst::Subst; -use rustc::ty::{self, Ty, TyCtxt, layout}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::{Kind, Substs}; use syntax::ast::{self, LitKind}; use syntax::attr; @@ -139,18 +138,6 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { } } - pub fn integer_bit_width( - &self, - ty: Ty, - ) -> u64 { - let ty = match ty.sty { - ty::TyInt(ity) => attr::IntType::SignedInt(ity), - ty::TyUint(uty) => attr::IntType::UnsignedInt(uty), - _ => bug!("{} is not an integer", ty), - }; - layout::Integer::from_attr(self.tcx, ty).size().bits() - } - // FIXME: Combine with rustc_mir::hair::pattern::lit_to_const pub fn const_eval_literal( &mut self, @@ -168,13 +155,17 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { }) }; - let clamp = |n| { - let size = self.integer_bit_width(ty); - trace!("clamp {} with size {} and amt {}", n, size, 128 - size); - let amt = 128 - size; - let result = (n << amt) >> amt; - trace!("clamp result: {}", result); - result + let trunc = |n| { + let param_ty = self.param_env.and(self.tcx.lift_to_global(&ty).unwrap()); + let bit_width = self.tcx.layout_of(param_ty).unwrap().size.bits(); + trace!("trunc {} with size {} and shift {}", n, bit_width, 128 - bit_width); + let shift = 128 - bit_width; + let result = (n << shift) >> shift; + trace!("trunc result: {}", result); + ConstValue::Scalar(Scalar::Bits { + bits: result, + defined: bit_width as u8, + }) }; use rustc::mir::interpret::*; @@ -182,25 +173,23 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { LitKind::Str(ref s, _) => { let s = s.as_str(); let id = self.tcx.allocate_bytes(s.as_bytes()); - let ptr = MemoryPointer::new(id, Size::from_bytes(0)); - ConstValue::ByValPair( - PrimVal::Ptr(ptr), - PrimVal::from_u128(s.len() as u128), - ) + let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, self.tcx); + ConstValue::from_byval_value(value) }, LitKind::ByteStr(ref data) => { let id = self.tcx.allocate_bytes(data); - let ptr = MemoryPointer::new(id, Size::from_bytes(0)); - ConstValue::ByVal(PrimVal::Ptr(ptr)) + ConstValue::Scalar(Scalar::Ptr(id.into())) }, - LitKind::Byte(n) => ConstValue::ByVal(PrimVal::Bytes(n as u128)), + LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { + bits: n as u128, + defined: 8, + }), LitKind::Int(n, _) if neg => { let n = n as i128; let n = n.overflowing_neg().0; - let n = clamp(n as u128); - ConstValue::ByVal(PrimVal::Bytes(n)) + trunc(n as u128) }, - LitKind::Int(n, _) => ConstValue::ByVal(PrimVal::Bytes(clamp(n))), + LitKind::Int(n, _) => trunc(n), LitKind::Float(n, fty) => { parse_float(n, fty) } @@ -211,8 +200,14 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { }; parse_float(n, fty) } - LitKind::Bool(b) => ConstValue::ByVal(PrimVal::Bytes(b as u128)), - LitKind::Char(c) => ConstValue::ByVal(PrimVal::Bytes(c as u128)), + LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bits { + bits: b as u128, + defined: 8, + }), + LitKind::Char(c) => ConstValue::Scalar(Scalar::Bits { + bits: c as u128, + defined: 32, + }), }; Literal::Value { value: ty::Const::from_const_value(self.tcx, lit, ty) diff --git a/src/librustc_mir/hair/pattern/_match.rs b/src/librustc_mir/hair/pattern/_match.rs index 70e8cd336a3..a7b2e205d00 100644 --- a/src/librustc_mir/hair/pattern/_match.rs +++ b/src/librustc_mir/hair/pattern/_match.rs @@ -198,7 +198,7 @@ impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { value: ty::Const::from_bits( tcx, *b as u128, - tcx.types.u8) + ty::ParamEnv::empty().and(tcx.types.u8)) } }) }).collect() @@ -958,7 +958,7 @@ fn slice_pat_covered_by_constructor<'tcx>( { match pat.kind { box PatternKind::Constant { value } => { - let b = value.unwrap_bits(pat.ty); + let b = value.unwrap_bits(tcx, ty::ParamEnv::empty().and(pat.ty)); assert_eq!(b as u8 as u128, b); if b as u8 != *ch { return Ok(false); @@ -979,9 +979,9 @@ fn constructor_covered_by_range<'a, 'tcx>( ty: Ty<'tcx>, ) -> Result { trace!("constructor_covered_by_range {:#?}, {:#?}, {:#?}, {}", ctor, from, to, ty); - let cmp_from = |c_from| compare_const_vals(tcx, c_from, from, ty) + let cmp_from = |c_from| compare_const_vals(tcx, c_from, from, ty::ParamEnv::empty().and(ty)) .map(|res| res != Ordering::Less); - let cmp_to = |c_to| compare_const_vals(tcx, c_to, to, ty); + let cmp_to = |c_to| compare_const_vals(tcx, c_to, to, ty::ParamEnv::empty().and(ty)); macro_rules! some_or_ok { ($e:expr) => { match $e { diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index 95ff5c24ecc..32cad88edb0 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -20,9 +20,8 @@ use interpret::{const_val_field, const_variant_index, self}; use rustc::middle::const_val::ConstVal; use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability}; -use rustc::mir::interpret::{PrimVal, GlobalId, ConstValue, Value}; +use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, Value}; use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region}; -use rustc::ty::layout::Size; use rustc::ty::subst::{Substs, Kind}; use rustc::hir::{self, PatKind, RangeEnd}; use rustc::hir::def::{Def, CtorKind}; @@ -360,8 +359,14 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { (PatternKind::Constant { value: lo }, PatternKind::Constant { value: hi }) => { use std::cmp::Ordering; - match (end, compare_const_vals(self.tcx, lo, hi, ty).unwrap()) { - (RangeEnd::Excluded, Ordering::Less) => + let cmp = compare_const_vals( + self.tcx, + lo, + hi, + self.param_env.and(ty), + ); + match (end, cmp) { + (RangeEnd::Excluded, Some(Ordering::Less)) => PatternKind::Range { lo, hi, end }, (RangeEnd::Excluded, _) => { span_err!( @@ -372,7 +377,8 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ); PatternKind::Wild }, - (RangeEnd::Included, Ordering::Greater) => { + (RangeEnd::Included, None) | + (RangeEnd::Included, Some(Ordering::Greater)) => { let mut err = struct_span_err!( self.tcx.sess, lo_expr.span, @@ -393,7 +399,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { err.emit(); PatternKind::Wild }, - (RangeEnd::Included, _) => PatternKind::Range { lo, hi, end }, + (RangeEnd::Included, Some(_)) => PatternKind::Range { lo, hi, end }, } } _ => PatternKind::Wild @@ -1037,7 +1043,7 @@ pub fn compare_const_vals<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, a: &'tcx ty::Const<'tcx>, b: &'tcx ty::Const<'tcx>, - ty: Ty<'tcx>, + ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Option { trace!("compare_const_vals: {:?}, {:?}", a, b); @@ -1052,15 +1058,15 @@ pub fn compare_const_vals<'a, 'tcx>( let fallback = || from_bool(a == b); // Use the fallback if any type differs - if a.ty != b.ty || a.ty != ty { + if a.ty != b.ty || a.ty != ty.value { return fallback(); } // FIXME: This should use assert_bits(ty) instead of use_bits // but triggers possibly bugs due to mismatching of arrays and slices - if let (Some(a), Some(b)) = (a.to_bits(ty), b.to_bits(ty)) { + if let (Some(a), Some(b)) = (a.to_bits(tcx, ty), b.to_bits(tcx, ty)) { use ::rustc_apfloat::Float; - return match ty.sty { + return match ty.value.sty { ty::TyFloat(ast::FloatTy::F32) => { let l = ::rustc_apfloat::ieee::Single::from_bits(a); let r = ::rustc_apfloat::ieee::Single::from_bits(b); @@ -1072,33 +1078,37 @@ pub fn compare_const_vals<'a, 'tcx>( l.partial_cmp(&r) }, ty::TyInt(_) => { - let a = interpret::sign_extend(tcx, a, ty).expect("layout error for TyInt"); - let b = interpret::sign_extend(tcx, b, ty).expect("layout error for TyInt"); + let a = interpret::sign_extend(tcx, a, ty.value).expect("layout error for TyInt"); + let b = interpret::sign_extend(tcx, b, ty.value).expect("layout error for TyInt"); Some((a as i128).cmp(&(b as i128))) }, _ => Some(a.cmp(&b)), } } - if let ty::TyRef(_, rty, _) = ty.sty { + if let ty::TyRef(_, rty, _) = ty.value.sty { if let ty::TyStr = rty.sty { match (a.to_byval_value(), b.to_byval_value()) { ( - Some(Value::ByValPair( - PrimVal::Ptr(ptr_a), - PrimVal::Bytes(size_a)) - ), - Some(Value::ByValPair( - PrimVal::Ptr(ptr_b), - PrimVal::Bytes(size_b)) - ) - ) if size_a == size_b => { - if ptr_a.offset == Size::from_bytes(0) && ptr_b.offset == Size::from_bytes(0) { - let map = tcx.alloc_map.lock(); - let alloc_a = map.unwrap_memory(ptr_a.alloc_id); - let alloc_b = map.unwrap_memory(ptr_b.alloc_id); - if alloc_a.bytes.len() as u64 == size_a as u64 { - return from_bool(alloc_a == alloc_b); + Some(Value::ScalarPair( + Scalar::Ptr(ptr_a), + len_a, + )), + Some(Value::ScalarPair( + Scalar::Ptr(ptr_b), + len_b, + )) + ) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => { + if let Ok(len_a) = len_a.to_bits(tcx.data_layout.pointer_size) { + if let Ok(len_b) = len_b.to_bits(tcx.data_layout.pointer_size) { + if len_a == len_b { + let map = tcx.alloc_map.lock(); + let alloc_a = map.unwrap_memory(ptr_a.alloc_id); + let alloc_b = map.unwrap_memory(ptr_b.alloc_id); + if alloc_a.bytes.len() as u128 == len_a { + return from_bool(alloc_a == alloc_b); + } + } } } } @@ -1123,24 +1133,23 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, LitKind::Str(ref s, _) => { let s = s.as_str(); let id = tcx.allocate_bytes(s.as_bytes()); - let ptr = MemoryPointer::new(id, Size::from_bytes(0)); - ConstValue::ByValPair( - PrimVal::Ptr(ptr), - PrimVal::from_u128(s.len() as u128), - ) + let value = Scalar::Ptr(id.into()).to_value_with_len(s.len() as u64, tcx); + ConstValue::from_byval_value(value) }, LitKind::ByteStr(ref data) => { let id = tcx.allocate_bytes(data); - let ptr = MemoryPointer::new(id, Size::from_bytes(0)); - ConstValue::ByVal(PrimVal::Ptr(ptr)) + ConstValue::Scalar(Scalar::Ptr(id.into())) }, - LitKind::Byte(n) => ConstValue::ByVal(PrimVal::Bytes(n as u128)), + LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { + bits: n as u128, + defined: 8, + }), LitKind::Int(n, _) => { enum Int { Signed(IntTy), Unsigned(UintTy), } - let ty = match ty.sty { + let ity = match ty.sty { ty::TyInt(IntTy::Isize) => Int::Signed(tcx.sess.target.isize_ty), ty::TyInt(other) => Int::Signed(other), ty::TyUint(UintTy::Usize) => Int::Unsigned(tcx.sess.target.usize_ty), @@ -1148,8 +1157,8 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, _ => bug!(), }; // This converts from LitKind::Int (which is sign extended) to - // PrimVal::Bytes (which is zero extended) - let n = match ty { + // Scalar::Bytes (which is zero extended) + let n = match ity { // FIXME(oli-obk): are these casts correct? Int::Signed(IntTy::I8) if neg => (n as i8).overflowing_neg().0 as u8 as u128, @@ -1168,7 +1177,11 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, Int::Signed(IntTy::I128)| Int::Unsigned(UintTy::U128) => n, _ => bug!(), }; - ConstValue::ByVal(PrimVal::Bytes(n)) + let defined = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size.bits() as u8; + ConstValue::Scalar(Scalar::Bits { + bits: n, + defined, + }) }, LitKind::Float(n, fty) => { parse_float(n, fty, neg)? @@ -1180,8 +1193,14 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, }; parse_float(n, fty, neg)? } - LitKind::Bool(b) => ConstValue::ByVal(PrimVal::Bytes(b as u128)), - LitKind::Char(c) => ConstValue::ByVal(PrimVal::Bytes(c as u128)), + LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bits { + bits: b as u128, + defined: 8, + }), + LitKind::Char(c) => ConstValue::Scalar(Scalar::Bits { + bits: c as u128, + defined: 32, + }), }; Ok(ty::Const::from_const_value(tcx, lit, ty)) } @@ -1194,7 +1213,7 @@ pub fn parse_float<'tcx>( let num = num.as_str(); use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::Float; - let bits = match fty { + let (bits, defined) = match fty { ast::FloatTy::F32 => { num.parse::().map_err(|_| ())?; let mut f = num.parse::().unwrap_or_else(|e| { @@ -1203,7 +1222,7 @@ pub fn parse_float<'tcx>( if neg { f = -f; } - f.to_bits() + (f.to_bits(), 32) } ast::FloatTy::F64 => { num.parse::().map_err(|_| ())?; @@ -1213,9 +1232,9 @@ pub fn parse_float<'tcx>( if neg { f = -f; } - f.to_bits() + (f.to_bits(), 64) } }; - Ok(ConstValue::ByVal(PrimVal::Bytes(bits))) + Ok(ConstValue::Scalar(Scalar::Bits { bits, defined })) } diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index b5568b83339..e69e7a522ab 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -4,26 +4,27 @@ use syntax::ast::{FloatTy, IntTy, UintTy}; use rustc_apfloat::ieee::{Single, Double}; use super::{EvalContext, Machine}; -use rustc::mir::interpret::{PrimVal, EvalResult, MemoryPointer, PointerArithmetic}; +use rustc::mir::interpret::{Scalar, EvalResult, Pointer, PointerArithmetic}; use rustc_apfloat::Float; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - pub(super) fn cast_primval( + pub(super) fn cast_scalar( &self, - val: PrimVal, + val: Scalar, src_ty: Ty<'tcx>, dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, PrimVal> { + ) -> EvalResult<'tcx, Scalar> { use rustc::ty::TypeVariants::*; trace!("Casting {:?}: {:?} to {:?}", val, src_ty, dest_ty); match val { - PrimVal::Undef => Ok(PrimVal::Undef), - PrimVal::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty), - PrimVal::Bytes(b) => { + Scalar::Bits { defined: 0, .. } => Ok(val), + Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_ty), + Scalar::Bits { bits, .. } => { + // TODO(oli-obk): check defined bits here match src_ty.sty { - TyFloat(fty) => self.cast_from_float(b, fty, dest_ty), - _ => self.cast_from_int(b, src_ty, dest_ty), + TyFloat(fty) => self.cast_from_float(bits, fty, dest_ty), + _ => self.cast_from_int(bits, src_ty, dest_ty), } } } @@ -34,7 +35,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { v: u128, src_ty: Ty<'tcx>, dest_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, PrimVal> { + ) -> EvalResult<'tcx, Scalar> { let signed = self.layout_of(src_ty)?.abi.is_signed(); let v = if signed { self.sign_extend(v, src_ty)? @@ -46,20 +47,38 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match dest_ty.sty { TyInt(_) | TyUint(_) => { let v = self.truncate(v, dest_ty)?; - Ok(PrimVal::Bytes(v)) + Ok(Scalar::Bits { + bits: v, + defined: self.layout_of(dest_ty).unwrap().size.bits() as u8, + }) } - TyFloat(FloatTy::F32) if signed => Ok(PrimVal::Bytes(Single::from_i128(v as i128).value.to_bits())), - TyFloat(FloatTy::F64) if signed => Ok(PrimVal::Bytes(Double::from_i128(v as i128).value.to_bits())), - TyFloat(FloatTy::F32) => Ok(PrimVal::Bytes(Single::from_u128(v).value.to_bits())), - TyFloat(FloatTy::F64) => Ok(PrimVal::Bytes(Double::from_u128(v).value.to_bits())), + TyFloat(FloatTy::F32) if signed => Ok(Scalar::Bits { + bits: Single::from_i128(v as i128).value.to_bits(), + defined: 32, + }), + TyFloat(FloatTy::F64) if signed => Ok(Scalar::Bits { + bits: Double::from_i128(v as i128).value.to_bits(), + defined: 64, + }), + TyFloat(FloatTy::F32) => Ok(Scalar::Bits { + bits: Single::from_u128(v).value.to_bits(), + defined: 32, + }), + TyFloat(FloatTy::F64) => Ok(Scalar::Bits { + bits: Double::from_u128(v).value.to_bits(), + defined: 64, + }), - TyChar if v as u8 as u128 == v => Ok(PrimVal::Bytes(v)), + TyChar if v as u8 as u128 == v => Ok(Scalar::Bits { bits: v, defined: 32 }), TyChar => err!(InvalidChar(v)), // No alignment check needed for raw pointers. But we have to truncate to target ptr size. TyRawPtr(_) => { - Ok(PrimVal::Bytes(self.memory.truncate_to_ptr(v).0 as u128)) + Ok(Scalar::Bits { + bits: self.memory.truncate_to_ptr(v).0 as u128, + defined: self.memory.pointer_size().bits() as u8, + }) }, // Casts to bool are not permitted by rustc, no need to handle them here. @@ -67,47 +86,72 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } } - fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> { use rustc::ty::TypeVariants::*; use rustc_apfloat::FloatConvert; match dest_ty.sty { // float -> uint TyUint(t) => { - let width = t.bit_width().unwrap_or(self.memory.pointer_size().bytes() as usize * 8); + let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); match fty { - FloatTy::F32 => Ok(PrimVal::Bytes(Single::from_bits(bits).to_u128(width).value)), - FloatTy::F64 => Ok(PrimVal::Bytes(Double::from_bits(bits).to_u128(width).value)), + FloatTy::F32 => Ok(Scalar::Bits { + bits: Single::from_bits(bits).to_u128(width).value, + defined: width as u8, + }), + FloatTy::F64 => Ok(Scalar::Bits { + bits: Double::from_bits(bits).to_u128(width).value, + defined: width as u8, + }), } }, // float -> int TyInt(t) => { - let width = t.bit_width().unwrap_or(self.memory.pointer_size().bytes() as usize * 8); + let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); match fty { - FloatTy::F32 => Ok(PrimVal::from_i128(Single::from_bits(bits).to_i128(width).value)), - FloatTy::F64 => Ok(PrimVal::from_i128(Double::from_bits(bits).to_i128(width).value)), + FloatTy::F32 => Ok(Scalar::Bits { + bits: Single::from_bits(bits).to_i128(width).value as u128, + defined: width as u8, + }), + FloatTy::F64 => Ok(Scalar::Bits { + bits: Double::from_bits(bits).to_i128(width).value as u128, + defined: width as u8, + }), } }, // f64 -> f32 TyFloat(FloatTy::F32) if fty == FloatTy::F64 => { - Ok(PrimVal::Bytes(Single::to_bits(Double::from_bits(bits).convert(&mut false).value))) + Ok(Scalar::Bits { + bits: Single::to_bits(Double::from_bits(bits).convert(&mut false).value), + defined: 32, + }) }, // f32 -> f64 TyFloat(FloatTy::F64) if fty == FloatTy::F32 => { - Ok(PrimVal::Bytes(Double::to_bits(Single::from_bits(bits).convert(&mut false).value))) + Ok(Scalar::Bits { + bits: Double::to_bits(Single::from_bits(bits).convert(&mut false).value), + defined: 64, + }) }, // identity cast - TyFloat(_) => Ok(PrimVal::Bytes(bits)), + TyFloat(FloatTy:: F64) => Ok(Scalar::Bits { + bits, + defined: 64, + }), + TyFloat(FloatTy:: F32) => Ok(Scalar::Bits { + bits, + defined: 32, + }), _ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))), } } - fn cast_from_ptr(&self, ptr: MemoryPointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> { + fn cast_from_ptr(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> { use rustc::ty::TypeVariants::*; match ty.sty { // Casting to a reference or fn pointer is not permitted by rustc, no need to support it here. TyRawPtr(_) | TyInt(IntTy::Isize) | - TyUint(UintTy::Usize) => Ok(PrimVal::Ptr(ptr)), + TyUint(UintTy::Usize) => Ok(ptr.into()), TyInt(_) | TyUint(_) => err!(ReadPointerAsBytes), _ => err!(Unimplemented(format!("ptr to {:?} cast", ty))), } diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index 1b4cde2f6ca..b1f290d7b61 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -12,7 +12,7 @@ use syntax::codemap::DUMMY_SP; use rustc::mir::interpret::{ EvalResult, EvalError, EvalErrorKind, GlobalId, - Value, Pointer, PrimVal, AllocId, Allocation, ConstValue, + Value, Scalar, AllocId, Allocation, ConstValue, }; use super::{Place, EvalContext, StackPopCleanup, ValTy, PlaceExtra, Memory, MemoryKind}; @@ -65,7 +65,7 @@ pub fn eval_promoted<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: &'mir mir::Mir<'tcx>, param_env: ty::ParamEnv<'tcx>, -) -> Option<(Value, Pointer, Ty<'tcx>)> { +) -> Option<(Value, Scalar, Ty<'tcx>)> { ecx.with_fresh_body(|ecx| { let res = eval_body_using_ecx(ecx, cid, Some(mir), param_env); match res { @@ -82,7 +82,7 @@ pub fn eval_body<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, cid: GlobalId<'tcx>, param_env: ty::ParamEnv<'tcx>, -) -> Option<(Value, Pointer, Ty<'tcx>)> { +) -> Option<(Value, Scalar, Ty<'tcx>)> { let (res, ecx) = eval_body_and_ecx(tcx, cid, None, param_env); match res { Ok(val) => Some(val), @@ -100,18 +100,18 @@ pub fn value_to_const_value<'tcx>( ) -> &'tcx ty::Const<'tcx> { let layout = ecx.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)).unwrap(); match (val, &layout.abi) { - (Value::ByVal(PrimVal::Undef), _) if layout.is_zst() => {}, + (Value::Scalar(Scalar::Bits { defined: 0, ..}), _) if layout.is_zst() => {}, (Value::ByRef(..), _) | - (Value::ByVal(_), &layout::Abi::Scalar(_)) | - (Value::ByValPair(..), &layout::Abi::ScalarPair(..)) => {}, + (Value::Scalar(_), &layout::Abi::Scalar(_)) | + (Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {}, _ => bug!("bad value/layout combo: {:#?}, {:#?}", val, layout), } let val = (|| { match val { - Value::ByVal(val) => Ok(ConstValue::ByVal(val)), - Value::ByValPair(a, b) => Ok(ConstValue::ByValPair(a, b)), + Value::Scalar(val) => Ok(ConstValue::Scalar(val)), + Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)), Value::ByRef(ptr, align) => { - let ptr = ptr.primval.to_ptr().unwrap(); + let ptr = ptr.to_ptr().unwrap(); let alloc = ecx.memory.get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes()); @@ -136,7 +136,7 @@ fn eval_body_and_ecx<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> (EvalResult<'tcx, (Value, Pointer, Ty<'tcx>)>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { +) -> (EvalResult<'tcx, (Value, Scalar, Ty<'tcx>)>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { debug!("eval_body_and_ecx: {:?}, {:?}", cid, param_env); // we start out with the best span we have // and try improving it down the road when more information is available @@ -152,7 +152,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, (Value, Pointer, Ty<'tcx>)> { +) -> EvalResult<'tcx, (Value, Scalar, Ty<'tcx>)> { debug!("eval_body: {:?}, {:?}", cid, param_env); let tcx = ecx.tcx.tcx; let mut mir = match mir { @@ -319,20 +319,31 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { "min_align_of" => { let elem_ty = substs.type_at(0); let elem_align = ecx.layout_of(elem_ty)?.align.abi(); - let align_val = PrimVal::from_u128(elem_align as u128); - ecx.write_primval(dest, align_val, dest_layout.ty)?; + let align_val = Scalar::Bits { + bits: elem_align as u128, + defined: dest_layout.size.bits() as u8, + }; + ecx.write_scalar(dest, align_val, dest_layout.ty)?; } "size_of" => { let ty = substs.type_at(0); let size = ecx.layout_of(ty)?.size.bytes() as u128; - ecx.write_primval(dest, PrimVal::from_u128(size), dest_layout.ty)?; + let size_val = Scalar::Bits { + bits: size, + defined: dest_layout.size.bits() as u8, + }; + ecx.write_scalar(dest, size_val, dest_layout.ty)?; } "type_id" => { let ty = substs.type_at(0); let type_id = ecx.tcx.type_id_hash(ty) as u128; - ecx.write_primval(dest, PrimVal::from_u128(type_id), dest_layout.ty)?; + let id_val = Scalar::Bits { + bits: type_id, + defined: dest_layout.size.bits() as u8, + }; + ecx.write_scalar(dest, id_val, dest_layout.ty)?; } name => return Err(ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", name)).into()), @@ -349,12 +360,12 @@ impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { fn try_ptr_op<'a>( _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, _bin_op: mir::BinOp, - left: PrimVal, + left: Scalar, _left_ty: Ty<'tcx>, - right: PrimVal, + right: Scalar, _right_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> { - if left.is_bytes() && right.is_bytes() { + ) -> EvalResult<'tcx, Option<(Scalar, bool)>> { + if left.is_bits() && right.is_bits() { Ok(None) } else { Err( @@ -419,7 +430,7 @@ pub fn const_val_field<'a, 'tcx>( let layout = ecx.layout_of(ty)?; let (ptr, align) = match value { Value::ByRef(ptr, align) => (ptr, align), - Value::ByValPair(..) | Value::ByVal(_) => { + Value::ScalarPair(..) | Value::Scalar(_) => { let ptr = ecx.alloc_ptr(ty)?.into(); ecx.write_value_to_ptr(value, ptr, layout.align, ty)?; (ptr, layout.align) @@ -436,9 +447,9 @@ pub fn const_val_field<'a, 'tcx>( new_value = ecx.try_read_by_ref(new_value, layout.ty)?; use rustc_data_structures::indexed_vec::Idx; match (value, new_value) { - (Value::ByVal(_), Value::ByRef(..)) | - (Value::ByValPair(..), Value::ByRef(..)) | - (Value::ByVal(_), Value::ByValPair(..)) => bug!( + (Value::Scalar(_), Value::ByRef(..)) | + (Value::ScalarPair(..), Value::ByRef(..)) | + (Value::Scalar(_), Value::ScalarPair(..)) => bug!( "field {} of {:?} yielded {:?}", field.index(), value, @@ -469,16 +480,15 @@ pub fn const_variant_index<'a, 'tcx>( let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); let value = ecx.const_value_to_value(val, ty)?; let (ptr, align) = match value { - Value::ByValPair(..) | Value::ByVal(_) => { + Value::ScalarPair(..) | Value::Scalar(_) => { let layout = ecx.layout_of(ty)?; - let ptr = ecx.memory.allocate(layout.size, layout.align, Some(MemoryKind::Stack))?; - let ptr: Pointer = ptr.into(); + let ptr = ecx.memory.allocate(layout.size, layout.align, Some(MemoryKind::Stack))?.into(); ecx.write_value_to_ptr(value, ptr, layout.align, ty)?; (ptr, layout.align) }, Value::ByRef(ptr, align) => (ptr, align), }; - let place = Place::from_primval_ptr(ptr, align); + let place = Place::from_scalar_ptr(ptr, align); ecx.read_discriminant_as_variant_index(place, ty) } diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 15103b78ca8..9f6e376d306 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -14,8 +14,8 @@ use rustc::middle::const_val::FrameInfo; use syntax::codemap::{self, Span}; use syntax::ast::Mutability; use rustc::mir::interpret::{ - GlobalId, Value, Pointer, PrimVal, PrimValKind, - EvalError, EvalResult, EvalErrorKind, MemoryPointer, ConstValue, + GlobalId, Value, Scalar, + EvalError, EvalResult, EvalErrorKind, Pointer, ConstValue, }; use std::mem; @@ -74,9 +74,9 @@ pub struct Frame<'mir, 'tcx: 'mir> { /// The list of locals for this stack frame, stored in order as /// `[return_ptr, arguments..., variables..., temporaries...]`. The locals are stored as `Option`s. /// `None` represents a local that is currently dead, while a live local - /// can either directly contain `PrimVal` or refer to some part of an `Allocation`. + /// can either directly contain `Scalar` or refer to some part of an `Allocation`. /// - /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`. + /// Before being initialized, arguments are `Value::Scalar(Scalar::undef())` and other locals are `None`. pub locals: IndexVec>, //////////////////////////////////////////////////////////////////////////////// @@ -203,7 +203,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M r } - pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> { + pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Pointer> { let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); @@ -230,10 +230,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { let ptr = self.memory.allocate_bytes(s.as_bytes()); - Ok(Value::ByValPair( - PrimVal::Ptr(ptr), - PrimVal::from_u128(s.len() as u128), - )) + Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx)) } pub fn const_value_to_value( @@ -245,10 +242,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ConstValue::ByRef(alloc, offset) => { // FIXME: Allocate new AllocId for all constants inside let id = self.memory.allocate_value(alloc.clone(), Some(MemoryKind::Stack))?; - Ok(Value::ByRef(MemoryPointer::new(id, offset).into(), alloc.align)) + Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align)) }, - ConstValue::ByValPair(a, b) => Ok(Value::ByValPair(a, b)), - ConstValue::ByVal(val) => Ok(Value::ByVal(val)), + ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)), + ConstValue::Scalar(val) => Ok(Value::Scalar(val)), } } @@ -408,7 +405,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ::log_settings::settings().indentation += 1; let locals = if mir.local_decls.len() > 1 { - let mut locals = IndexVec::from_elem(Some(Value::ByVal(PrimVal::Undef)), &mir.local_decls); + let mut locals = IndexVec::from_elem(Some(Value::Scalar(Scalar::undef())), &mir.local_decls); match self.tcx.describe_def(instance.def_id()) { // statics and constants don't have `Storage*` statements, no need to look for them Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {}, @@ -543,9 +540,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } UnaryOp(un_op, ref operand) => { - let val = self.eval_operand_to_primval(operand)?; + let val = self.eval_operand_to_scalar(operand)?; let val = self.unary_op(un_op, val, dest_ty)?; - self.write_primval( + self.write_scalar( dest, val, dest_ty, @@ -596,7 +593,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M // FIXME: speed up repeat filling for i in 0..length { - let elem_dest = dest.offset(elem_size * i as u64, &self)?; + let elem_dest = dest.ptr_offset(elem_size * i as u64, &self)?; self.write_value_to_ptr(value, elem_dest, dest_align, elem_ty)?; } } @@ -606,9 +603,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let src = self.eval_place(place)?; let ty = self.place_ty(place); let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx); - self.write_primval( + let defined = self.memory.pointer_size().bits() as u8; + self.write_scalar( dest, - PrimVal::from_u128(len as u128), + Scalar::Bits { + bits: len as u128, + defined, + }, dest_ty, )?; } @@ -621,7 +622,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let val = match extra { PlaceExtra::None => ptr.to_value(), - PlaceExtra::Length(len) => ptr.to_value_with_len(len), + PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx), PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable), PlaceExtra::DowncastVariant(..) => { bug!("attempted to take a reference to an enum downcast place") @@ -644,9 +645,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "SizeOf nullary MIR operator called for unsized type"); - self.write_primval( + let defined = self.memory.pointer_size().bits() as u8; + self.write_scalar( dest, - PrimVal::from_u128(layout.size.bytes() as u128), + Scalar::Bits { + bits: layout.size.bytes() as u128, + defined, + }, dest_ty, )?; } @@ -668,9 +673,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M match (src.value, self.type_is_fat_ptr(dest_ty)) { (Value::ByRef { .. }, _) | // pointers to extern types - (Value::ByVal(_),_) | + (Value::Scalar(_),_) | // slices and trait objects to other slices/trait objects - (Value::ByValPair(..), true) => { + (Value::ScalarPair(..), true) => { let valty = ValTy { value: src.value, ty: dest_ty, @@ -678,9 +683,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.write_value(valty, dest)?; } // slices and trait objects to thin pointers (dropping the metadata) - (Value::ByValPair(data, _), false) => { + (Value::ScalarPair(data, _), false) => { let valty = ValTy { - value: Value::ByVal(data), + value: Value::Scalar(data), ty: dest_ty, }; self.write_value(valty, dest)?; @@ -694,9 +699,17 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let discr_val = def .discriminant_for_variant(*self.tcx, index) .val; - return self.write_primval( + let defined = self + .layout_of(dest_ty) + .unwrap() + .size + .bits() as u8; + return self.write_scalar( dest, - PrimVal::Bytes(discr_val), + Scalar::Bits { + bits: discr_val, + defined, + }, dest_ty); } } @@ -704,10 +717,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M layout::Variants::NicheFilling { .. } => {}, } - let src_val = self.value_to_primval(src)?; - let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?; + let src_val = self.value_to_scalar(src)?; + let dest_val = self.cast_scalar(src_val, src.ty, dest_ty)?; let valty = ValTy { - value: Value::ByVal(dest_val), + value: Value::Scalar(dest_val), ty: dest_ty, }; self.write_value(valty, dest)?; @@ -729,7 +742,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ).ok_or_else(|| EvalErrorKind::TypeckError.into()); let fn_ptr = self.memory.create_fn_alloc(instance?); let valty = ValTy { - value: Value::ByVal(PrimVal::Ptr(fn_ptr)), + value: Value::Scalar(fn_ptr.into()), ty: dest_ty, }; self.write_value(valty, dest)?; @@ -765,7 +778,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ); let fn_ptr = self.memory.create_fn_alloc(instance); let valty = ValTy { - value: Value::ByVal(PrimVal::Ptr(fn_ptr)), + value: Value::Scalar(fn_ptr.into()), ty: dest_ty, }; self.write_value(valty, dest)?; @@ -780,7 +793,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let ty = self.place_ty(place); let place = self.eval_place(place)?; let discr_val = self.read_discriminant_value(place, ty)?; - self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?; + let defined = self.layout_of(ty).unwrap().size.bits() as u8; + self.write_scalar(dest, Scalar::Bits { + bits: discr_val, + defined, + }, dest_ty)?; } } @@ -798,12 +815,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } - pub(super) fn eval_operand_to_primval( + pub(super) fn eval_operand_to_scalar( &mut self, op: &mir::Operand<'tcx>, - ) -> EvalResult<'tcx, PrimVal> { + ) -> EvalResult<'tcx, Scalar> { let valty = self.eval_operand(op)?; - self.value_to_primval(valty) + self.value_to_scalar(valty) } pub(crate) fn operands_to_args( @@ -901,7 +918,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let (discr_place, discr) = self.place_field(place, mir::Field::new(0), layout)?; trace!("discr place: {:?}, {:?}", discr_place, discr); - let raw_discr = self.value_to_primval(ValTy { + let raw_discr = self.value_to_scalar(ValTy { value: self.read_place(discr_place)?, ty: discr.ty })?; @@ -910,22 +927,22 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M // FIXME: should we catch invalid discriminants here? layout::Variants::Tagged { .. } => { if discr.ty.is_signed() { - let i = raw_discr.to_bytes()? as i128; + let i = raw_discr.to_bits(discr.size)? as i128; // going from layout tag type to typeck discriminant type // requires first sign extending with the layout discriminant - let amt = 128 - discr.size.bits(); - let sexted = (i << amt) >> amt; + let shift = 128 - discr.size.bits(); + let sexted = (i << shift) >> shift; // and then zeroing with the typeck discriminant type let discr_ty = ty .ty_adt_def().expect("tagged layout corresponds to adt") .repr .discr_type(); let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty); - let amt = 128 - discr_ty.size().bits(); + let shift = 128 - discr_ty.size().bits(); let truncatee = sexted as u128; - (truncatee << amt) >> amt + (truncatee << shift) >> shift } else { - raw_discr.to_bytes()? + raw_discr.to_bits(discr.size)? } }, layout::Variants::NicheFilling { @@ -937,12 +954,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let variants_start = *niche_variants.start() as u128; let variants_end = *niche_variants.end() as u128; match raw_discr { - PrimVal::Ptr(_) => { + Scalar::Ptr(_) => { assert!(niche_start == 0); assert!(variants_start == variants_end); dataful_variant as u128 }, - PrimVal::Bytes(raw_discr) => { + Scalar::Bits { bits: raw_discr, defined } => { + if defined < discr.size.bits() as u8 { + return err!(ReadUndefBytes); + } let discr = raw_discr.wrapping_sub(niche_start) .wrapping_add(variants_start); if variants_start <= discr && discr <= variants_end { @@ -951,7 +971,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M dataful_variant as u128 } }, - PrimVal::Undef => return err!(ReadUndefBytes), } } }; @@ -986,11 +1005,14 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M // their computation, but the in-memory tag is the smallest possible // representation let size = tag.value.size(self.tcx.tcx).bits(); - let amt = 128 - size; - let discr_val = (discr_val << amt) >> amt; + let shift = 128 - size; + let discr_val = (discr_val << shift) >> shift; let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?; - self.write_primval(discr_dest, PrimVal::Bytes(discr_val), tag.ty)?; + self.write_scalar(discr_dest, Scalar::Bits { + bits: discr_val, + defined: size as u8, + }, tag.ty)?; } layout::Variants::NicheFilling { dataful_variant, @@ -1003,7 +1025,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.place_field(dest, mir::Field::new(0), layout)?; let niche_value = ((variant_index - niche_variants.start()) as u128) .wrapping_add(niche_start); - self.write_primval(niche_dest, PrimVal::Bytes(niche_value), niche.ty)?; + self.write_scalar(niche_dest, Scalar::Bits { + bits: niche_value, + defined: niche.size.bits() as u8, + }, niche.ty)?; } } } @@ -1019,8 +1044,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M .lock() .intern_static(gid.instance.def_id()); let layout = self.layout_of(ty)?; - let ptr = MemoryPointer::new(alloc_id, Size::from_bytes(0)); - return Ok(Value::ByRef(ptr.into(), layout.align)) + return Ok(Value::ByRef(Scalar::Ptr(alloc_id.into()), layout.align)) } let cv = self.const_eval(gid)?; self.const_to_value(&cv.val, ty) @@ -1087,24 +1111,24 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } - pub fn value_to_primval( + pub fn value_to_scalar( &self, ValTy { value, ty } : ValTy<'tcx>, - ) -> EvalResult<'tcx, PrimVal> { + ) -> EvalResult<'tcx, Scalar> { match self.follow_by_ref_value(value, ty)? { Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"), - Value::ByVal(primval) => { + Value::Scalar(scalar) => { // TODO: Do we really want insta-UB here? - self.ensure_valid_value(primval, ty)?; - Ok(primval) + self.ensure_valid_value(scalar, ty)?; + Ok(scalar) } - Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"), + Value::ScalarPair(..) => bug!("value_to_scalar can't work with fat pointers"), } } - pub fn write_ptr(&mut self, dest: Place, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { + pub fn write_ptr(&mut self, dest: Place, val: Scalar, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> { let valty = ValTy { value: val.to_value(), ty: dest_ty, @@ -1112,14 +1136,14 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.write_value(valty, dest) } - pub fn write_primval( + pub fn write_scalar( &mut self, dest: Place, - val: PrimVal, + val: Scalar, dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx> { let valty = ValTy { - value: Value::ByVal(val), + value: Value::Scalar(val), ty: dest_ty, }; self.write_value(valty, dest) @@ -1132,7 +1156,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ) -> EvalResult<'tcx> { //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty); // Note that it is really important that the type here is the right one, and matches the type things are read at. - // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only + // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only // correct if we never look at this data with the wrong type. match dest { @@ -1201,7 +1225,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn write_value_to_ptr( &mut self, value: Value, - dest: Pointer, + dest: Scalar, dest_align: Align, dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx> { @@ -1211,115 +1235,49 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Value::ByRef(ptr, align) => { self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) } - Value::ByVal(primval) => { + Value::Scalar(scalar) => { let signed = match layout.abi { layout::Abi::Scalar(ref scal) => match scal.value { layout::Primitive::Int(_, signed) => signed, _ => false, }, - _ if primval.is_undef() => false, - _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout) + _ => match scalar { + Scalar::Bits { defined: 0, .. } => false, + _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout), + } }; - self.memory.write_primval(dest, dest_align, primval, layout.size, signed) + self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed) } - Value::ByValPair(a_val, b_val) => { + Value::ScalarPair(a_val, b_val) => { trace!("write_value_to_ptr valpair: {:#?}", layout); let (a, b) = match layout.abi { layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), - _ => bug!("write_value_to_ptr: invalid ByValPair layout: {:#?}", layout) + _ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", layout) }; let (a_size, b_size) = (a.size(&self), b.size(&self)); let a_ptr = dest; let b_offset = a_size.abi_align(b.align(&self)); - let b_ptr = dest.offset(b_offset, &self)?.into(); + let b_ptr = dest.ptr_offset(b_offset, &self)?.into(); // TODO: What about signedess? - self.memory.write_primval(a_ptr, dest_align, a_val, a_size, false)?; - self.memory.write_primval(b_ptr, dest_align, b_val, b_size, false) + self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, false)?; + self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, false) } } } - pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> { - use syntax::ast::FloatTy; - - let kind = match ty.sty { - ty::TyBool => PrimValKind::Bool, - ty::TyChar => PrimValKind::Char, - - ty::TyInt(int_ty) => { - use syntax::ast::IntTy::*; - let size = match int_ty { - I8 => Size::from_bytes(1), - I16 => Size::from_bytes(2), - I32 => Size::from_bytes(4), - I64 => Size::from_bytes(8), - I128 => Size::from_bytes(16), - Isize => self.memory.pointer_size(), - }; - PrimValKind::from_int_size(size) - } - - ty::TyUint(uint_ty) => { - use syntax::ast::UintTy::*; - let size = match uint_ty { - U8 => Size::from_bytes(1), - U16 => Size::from_bytes(2), - U32 => Size::from_bytes(4), - U64 => Size::from_bytes(8), - U128 => Size::from_bytes(16), - Usize => self.memory.pointer_size(), - }; - PrimValKind::from_uint_size(size) - } - - ty::TyFloat(FloatTy::F32) => PrimValKind::F32, - ty::TyFloat(FloatTy::F64) => PrimValKind::F64, - - ty::TyFnPtr(_) => PrimValKind::FnPtr, - - ty::TyRef(_, ty, _) | - ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if self.type_is_sized(ty) => { - PrimValKind::Ptr - } - - ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr, - - ty::TyAdt(..) => { - match self.layout_of(ty)?.abi { - layout::Abi::Scalar(ref scalar) => { - use rustc::ty::layout::Primitive::*; - match scalar.value { - Int(i, false) => PrimValKind::from_uint_size(i.size()), - Int(i, true) => PrimValKind::from_int_size(i.size()), - F32 => PrimValKind::F32, - F64 => PrimValKind::F64, - Pointer => PrimValKind::Ptr, - } - } - - _ => return err!(TypeNotPrimitive(ty)), - } - } - - _ => return err!(TypeNotPrimitive(ty)), - }; - - Ok(kind) - } - - fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> { + fn ensure_valid_value(&self, val: Scalar, ty: Ty<'tcx>) -> EvalResult<'tcx> { match ty.sty { - ty::TyBool if val.to_bytes()? > 1 => err!(InvalidBool), + ty::TyBool => val.to_bool().map(|_| ()), - ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() => { - err!(InvalidChar(val.to_bytes()? as u32 as u128)) + ty::TyChar if ::std::char::from_u32(val.to_bits(Size::from_bytes(4))? as u32).is_none() => { + err!(InvalidChar(val.to_bits(Size::from_bytes(4))? as u32 as u128)) } _ => Ok(()), } } - pub fn read_value(&self, ptr: Pointer, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { + pub fn read_value(&self, ptr: Scalar, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { if let Some(val) = self.try_read_value(ptr, align, ty)? { Ok(val) } else { @@ -1329,12 +1287,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub(crate) fn read_ptr( &self, - ptr: MemoryPointer, + ptr: Pointer, ptr_align: Align, pointee_ty: Ty<'tcx>, ) -> EvalResult<'tcx, Value> { let ptr_size = self.memory.pointer_size(); - let p: Pointer = self.memory.read_ptr_sized(ptr, ptr_align)?.into(); + let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into(); if self.type_is_sized(pointee_ty) { Ok(p.to_value()) } else { @@ -1348,31 +1306,26 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let len = self .memory .read_ptr_sized(extra, ptr_align)? - .to_bytes()?; - Ok(p.to_value_with_len(len as u64)) + .to_bits(ptr_size)?; + Ok(p.to_value_with_len(len as u64, self.tcx.tcx)) }, - _ => bug!("unsized primval ptr read from {:?}", pointee_ty), + _ => bug!("unsized scalar ptr read from {:?}", pointee_ty), } } } pub fn validate_ptr_target( &self, - ptr: MemoryPointer, + ptr: Pointer, ptr_align: Align, ty: Ty<'tcx> ) -> EvalResult<'tcx> { match ty.sty { ty::TyBool => { - let val = self.memory.read_primval(ptr, ptr_align, Size::from_bytes(1))?; - match val { - PrimVal::Bytes(0) | PrimVal::Bytes(1) => (), - // TODO: This seems a little overeager, should reading at bool type already be insta-UB? - _ => return err!(InvalidBool), - } + self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?; } ty::TyChar => { - let c = self.memory.read_primval(ptr, ptr_align, Size::from_bytes(4))?.to_bytes()? as u32; + let c = self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32; match ::std::char::from_u32(c) { Some(..) => (), None => return err!(InvalidChar(c as u128)), @@ -1395,7 +1348,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi { let size = scalar.value.size(self); - self.memory.read_primval(ptr, ptr_align, size)?; + self.memory.read_scalar(ptr, ptr_align, size)?; } } @@ -1405,7 +1358,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> { - // Convert to ByVal or ByValPair if possible + // Convert to ByVal or ScalarPair if possible if let Value::ByRef(ptr, align) = val { if let Some(read_val) = self.try_read_value(ptr, align, ty)? { val = read_val; @@ -1414,12 +1367,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M Ok(val) } - pub fn try_read_value(&self, ptr: Pointer, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { + pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { let layout = self.layout_of(ty)?; self.memory.check_align(ptr, ptr_align)?; if layout.size.bytes() == 0 { - return Ok(Some(Value::ByVal(PrimVal::Undef))); + return Ok(Some(Value::Scalar(Scalar::undef()))); } let ptr = ptr.to_ptr()?; @@ -1429,8 +1382,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M match layout.abi { layout::Abi::Scalar(..) => { - let primval = self.memory.read_primval(ptr, ptr_align, layout.size)?; - Ok(Some(Value::ByVal(primval))) + let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?; + Ok(Some(Value::Scalar(scalar))) } layout::Abi::ScalarPair(ref a, ref b) => { let (a, b) = (&a.value, &b.value); @@ -1438,9 +1391,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let a_ptr = ptr; let b_offset = a_size.abi_align(b.align(self)); let b_ptr = ptr.offset(b_offset, self)?.into(); - let a_val = self.memory.read_primval(a_ptr, ptr_align, a_size)?; - let b_val = self.memory.read_primval(b_ptr, ptr_align, b_size)?; - Ok(Some(Value::ByValPair(a_val, b_val))) + let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; + let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; + Ok(Some(Value::ScalarPair(a_val, b_val))) } _ => Ok(None), } @@ -1483,7 +1436,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let ptr = self.into_ptr(src)?; // u64 cast is from usize to u64, which is always good let valty = ValTy { - value: ptr.to_value_with_len(length.unwrap_usize(self.tcx.tcx)), + value: ptr.to_value_with_len(length.unwrap_usize(self.tcx.tcx), self.tcx.tcx), ty: dest_ty, }; self.write_value(valty, dest) @@ -1558,12 +1511,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } let (src_f_value, src_field) = match src { Value::ByRef(ptr, align) => { - let src_place = Place::from_primval_ptr(ptr, align); + let src_place = Place::from_scalar_ptr(ptr, align); let (src_f_place, src_field) = self.place_field(src_place, mir::Field::new(i), src_layout)?; (self.read_place(src_f_place)?, src_field) } - Value::ByVal(_) | Value::ByValPair(..) => { + Value::Scalar(_) | Value::ScalarPair(..) => { let src_field = src_layout.field(&self, i)?; assert_eq!(src_layout.fields.offset(i).bytes(), 0); assert_eq!(src_field.size, src_layout.size); @@ -1614,26 +1567,26 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } Ok(Value::ByRef(ptr, align)) => { - match ptr.into_inner_primval() { - PrimVal::Ptr(ptr) => { + match ptr { + Scalar::Ptr(ptr) => { write!(msg, " by align({}) ref:", align.abi()).unwrap(); allocs.push(ptr.alloc_id); } ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(), } } - Ok(Value::ByVal(val)) => { + Ok(Value::Scalar(val)) => { write!(msg, " {:?}", val).unwrap(); - if let PrimVal::Ptr(ptr) = val { + if let Scalar::Ptr(ptr) = val { allocs.push(ptr.alloc_id); } } - Ok(Value::ByValPair(val1, val2)) => { + Ok(Value::ScalarPair(val1, val2)) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); - if let PrimVal::Ptr(ptr) = val1 { + if let Scalar::Ptr(ptr) = val1 { allocs.push(ptr.alloc_id); } - if let PrimVal::Ptr(ptr) = val2 { + if let Scalar::Ptr(ptr) = val2 { allocs.push(ptr.alloc_id); } } @@ -1643,8 +1596,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M self.memory.dump_allocs(allocs); } Place::Ptr { ptr, align, .. } => { - match ptr.into_inner_primval() { - PrimVal::Ptr(ptr) => { + match ptr { + Scalar::Ptr(ptr) => { trace!("by align({}) ref:", align.abi()); self.memory.dump_alloc(ptr.alloc_id); } @@ -1797,7 +1750,7 @@ impl<'mir, 'tcx> Frame<'mir, 'tcx> { trace!("{:?} is now live", local); // StorageLive *always* kills the value that's currently stored - mem::replace(&mut self.locals[local], Some(Value::ByVal(PrimVal::Undef))) + mem::replace(&mut self.locals[local], Some(Value::Scalar(Scalar::undef()))) } /// Returns the old value of the local diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index a5c94e4fcec..4d04900320f 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -2,7 +2,7 @@ //! This separation exists to ensure that no fancy miri features like //! interpreting common C functions leak into CTFE. -use rustc::mir::interpret::{AllocId, EvalResult, PrimVal, MemoryPointer, AccessKind, GlobalId}; +use rustc::mir::interpret::{AllocId, EvalResult, Scalar, Pointer, AccessKind, GlobalId}; use super::{EvalContext, Place, ValTy, Memory}; use rustc::mir; @@ -54,11 +54,11 @@ pub trait Machine<'mir, 'tcx>: Sized { fn try_ptr_op<'a>( ecx: &EvalContext<'a, 'mir, 'tcx, Self>, bin_op: mir::BinOp, - left: PrimVal, + left: Scalar, left_ty: Ty<'tcx>, - right: PrimVal, + right: Scalar, right_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, Option<(PrimVal, bool)>>; + ) -> EvalResult<'tcx, Option<(Scalar, bool)>>; /// Called when trying to mark machine defined `MemoryKinds` as static fn mark_static_initialized<'a>( @@ -92,7 +92,7 @@ pub trait Machine<'mir, 'tcx>: Sized { fn check_locks<'a>( _mem: &Memory<'a, 'mir, 'tcx, Self>, - _ptr: MemoryPointer, + _ptr: Pointer, _size: Size, _access: AccessKind, ) -> EvalResult<'tcx> { diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 3f7ecf9dfb2..912fc64d342 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -10,8 +10,8 @@ use syntax::ast::Mutability; use rustc::middle::const_val::{ConstVal, ErrKind}; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; -use rustc::mir::interpret::{MemoryPointer, AllocId, Allocation, AccessKind, Value, Pointer, - EvalResult, PrimVal, EvalErrorKind, GlobalId, AllocType}; +use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, + EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType}; pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint}; use super::{EvalContext, Machine}; @@ -71,14 +71,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { self.alloc_map.iter().map(|(&id, alloc)| (id, alloc)) } - pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer { - let id = self.tcx.alloc_map.lock().create_fn_alloc(instance); - MemoryPointer::new(id, Size::from_bytes(0)) + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer { + self.tcx.alloc_map.lock().create_fn_alloc(instance).into() } - pub fn allocate_bytes(&mut self, bytes: &[u8]) -> MemoryPointer { - let id = self.tcx.allocate_bytes(bytes); - MemoryPointer::new(id, Size::from_bytes(0)) + pub fn allocate_bytes(&mut self, bytes: &[u8]) -> Pointer { + self.tcx.allocate_bytes(bytes).into() } /// kind is `None` for statics @@ -108,20 +106,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { size: Size, align: Align, kind: Option>, - ) -> EvalResult<'tcx, MemoryPointer> { - let id = self.allocate_value(Allocation::undef(size, align), kind)?; - Ok(MemoryPointer::new(id, Size::from_bytes(0))) + ) -> EvalResult<'tcx, Pointer> { + self.allocate_value(Allocation::undef(size, align), kind).map(Pointer::from) } pub fn reallocate( &mut self, - ptr: MemoryPointer, + ptr: Pointer, old_size: Size, old_align: Align, new_size: Size, new_align: Align, kind: MemoryKind, - ) -> EvalResult<'tcx, MemoryPointer> { + ) -> EvalResult<'tcx, Pointer> { if ptr.offset.bytes() != 0 { return err!(ReallocateNonBasePtr); } @@ -151,7 +148,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(new_ptr) } - pub fn deallocate_local(&mut self, ptr: MemoryPointer) -> EvalResult<'tcx> { + pub fn deallocate_local(&mut self, ptr: Pointer) -> EvalResult<'tcx> { match self.alloc_kind.get(&ptr.alloc_id).cloned() { Some(MemoryKind::Stack) => self.deallocate(ptr, None, MemoryKind::Stack), // Happens if the memory was interned into immutable memory @@ -162,7 +159,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn deallocate( &mut self, - ptr: MemoryPointer, + ptr: Pointer, size_and_align: Option<(Size, Align)>, kind: MemoryKind, ) -> EvalResult<'tcx> { @@ -228,22 +225,25 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } /// Check that the pointer is aligned AND non-NULL. - pub fn check_align(&self, ptr: Pointer, required_align: Align) -> EvalResult<'tcx> { + pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> { // Check non-NULL/Undef, extract offset - let (offset, alloc_align) = match ptr.into_inner_primval() { - PrimVal::Ptr(ptr) => { + let (offset, alloc_align) = match ptr { + Scalar::Ptr(ptr) => { let alloc = self.get(ptr.alloc_id)?; (ptr.offset.bytes(), alloc.align) } - PrimVal::Bytes(bytes) => { - let v = ((bytes as u128) % (1 << self.pointer_size().bytes())) as u64; + Scalar::Bits { bits, defined } => { + if (defined as u64) < self.pointer_size().bits() { + return err!(ReadUndefBytes); + } + // FIXME: what on earth does this line do? docs or fix needed! + let v = ((bits as u128) % (1 << self.pointer_size().bytes())) as u64; if v == 0 { return err!(InvalidNullPointerUsage); } // the base address if the "integer allocation" is 0 and hence always aligned (v, required_align) } - PrimVal::Undef => return err!(ReadUndefBytes), }; // Check alignment if alloc_align.abi() < required_align.abi() { @@ -263,7 +263,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } - pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> { + pub fn check_bounds(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> { let alloc = self.get(ptr.alloc_id)?; let allocation_size = alloc.bytes.len() as u64; if ptr.offset.bytes() > allocation_size { @@ -351,7 +351,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } - pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Instance<'tcx>> { + pub fn get_fn(&self, ptr: Pointer) -> EvalResult<'tcx, Instance<'tcx>> { if ptr.offset.bytes() != 0 { return err!(InvalidFunctionPointer); } @@ -448,7 +448,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { if !relocations.is_empty() { msg.clear(); write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces. - let mut pos = Size::from_bytes(0); + let mut pos = Size::ZERO; let relocation_width = (self.pointer_size().bytes() - 1) * 3; for (i, target_id) in relocations { // this `as usize` is fine, since we can't print more chars than `usize::MAX` @@ -479,7 +479,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { fn get_bytes_unchecked( &self, - ptr: MemoryPointer, + ptr: Pointer, size: Size, align: Align, ) -> EvalResult<'tcx, &[u8]> { @@ -499,7 +499,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { fn get_bytes_unchecked_mut( &mut self, - ptr: MemoryPointer, + ptr: Pointer, size: Size, align: Align, ) -> EvalResult<'tcx, &mut [u8]> { @@ -517,7 +517,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(&mut alloc.bytes[offset..offset + size.bytes() as usize]) } - fn get_bytes(&self, ptr: MemoryPointer, size: Size, align: Align) -> EvalResult<'tcx, &[u8]> { + fn get_bytes(&self, ptr: Pointer, size: Size, align: Align) -> EvalResult<'tcx, &[u8]> { assert_ne!(size.bytes(), 0); if self.relocations(ptr, size)?.len() != 0 { return err!(ReadPointerAsBytes); @@ -528,7 +528,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { fn get_bytes_mut( &mut self, - ptr: MemoryPointer, + ptr: Pointer, size: Size, align: Align, ) -> EvalResult<'tcx, &mut [u8]> { @@ -594,9 +594,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn copy( &mut self, - src: Pointer, + src: Scalar, src_align: Align, - dest: Pointer, + dest: Scalar, dest_align: Align, size: Size, nonoverlapping: bool, @@ -653,7 +653,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> { + pub fn read_c_str(&self, ptr: Pointer) -> EvalResult<'tcx, &[u8]> { let alloc = self.get(ptr.alloc_id)?; assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); let offset = ptr.offset.bytes() as usize; @@ -671,7 +671,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } - pub fn read_bytes(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx, &[u8]> { + pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); self.check_align(ptr, align)?; @@ -681,7 +681,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { self.get_bytes(ptr.to_ptr()?, size, align) } - pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> { + pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); self.check_align(ptr, align)?; @@ -693,7 +693,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: Size) -> EvalResult<'tcx> { + pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL let align = Align::from_bytes(1, 1).unwrap(); self.check_align(ptr, align)?; @@ -707,17 +707,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - pub fn read_primval(&self, ptr: MemoryPointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, PrimVal> { + pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, Scalar> { self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer let endianness = self.endianness(); let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?; // Undef check happens *after* we established that the alignment is correct. // We must not return Ok() for unaligned pointers! if self.check_defined(ptr, size).is_err() { - return Ok(PrimVal::Undef.into()); + return Ok(Scalar::undef().into()); } // Now we do the actual reading - let bytes = read_target_uint(endianness, bytes).unwrap(); + let bits = read_target_uint(endianness, bytes).unwrap(); // See if we got a pointer if size != self.pointer_size() { if self.relocations(ptr, size)?.len() != 0 { @@ -726,30 +726,33 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } else { let alloc = self.get(ptr.alloc_id)?; match alloc.relocations.get(&ptr.offset) { - Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, Size::from_bytes(bytes as u64)))), + Some(&alloc_id) => return Ok(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into()), None => {}, } } - // We don't. Just return the bytes. - Ok(PrimVal::Bytes(bytes)) + // We don't. Just return the bits. + Ok(Scalar::Bits { + bits, + defined: size.bits() as u8, + }) } - pub fn read_ptr_sized(&self, ptr: MemoryPointer, ptr_align: Align) -> EvalResult<'tcx, PrimVal> { - self.read_primval(ptr, ptr_align, self.pointer_size()) + pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, Scalar> { + self.read_scalar(ptr, ptr_align, self.pointer_size()) } - pub fn write_primval(&mut self, ptr: Pointer, ptr_align: Align, val: PrimVal, size: Size, signed: bool) -> EvalResult<'tcx> { + pub fn write_scalar(&mut self, ptr: Scalar, ptr_align: Align, val: Scalar, size: Size, signed: bool) -> EvalResult<'tcx> { let endianness = self.endianness(); let bytes = match val { - PrimVal::Ptr(val) => { + Scalar::Ptr(val) => { assert_eq!(size, self.pointer_size()); val.offset.bytes() as u128 } - PrimVal::Bytes(bytes) => bytes, + Scalar::Bits { bits, defined } if defined as u64 >= size.bits() && size.bits() != 0 => bits, - PrimVal::Undef => { + Scalar::Bits { .. } => { self.check_align(ptr.into(), ptr_align)?; self.mark_definedness(ptr, size, false)?; return Ok(()); @@ -770,7 +773,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // See if we have to also write a relocation match val { - PrimVal::Ptr(val) => { + Scalar::Ptr(val) => { self.get_mut(ptr.alloc_id)?.relocations.insert( ptr.offset, val.alloc_id, @@ -782,9 +785,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - pub fn write_ptr_sized_unsigned(&mut self, ptr: MemoryPointer, ptr_align: Align, val: PrimVal) -> EvalResult<'tcx> { + pub fn write_ptr_sized_unsigned(&mut self, ptr: Pointer, ptr_align: Align, val: Scalar) -> EvalResult<'tcx> { let ptr_size = self.pointer_size(); - self.write_primval(ptr.into(), ptr_align, val, ptr_size, false) + self.write_scalar(ptr.into(), ptr_align, val, ptr_size, false) } fn int_align(&self, size: Size) -> Align { @@ -806,7 +809,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { fn relocations( &self, - ptr: MemoryPointer, + ptr: Pointer, size: Size, ) -> EvalResult<'tcx, &[(Size, AllocId)]> { let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1); @@ -814,7 +817,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(self.get(ptr.alloc_id)?.relocations.range(Size::from_bytes(start)..end)) } - fn clear_relocations(&mut self, ptr: MemoryPointer, size: Size) -> EvalResult<'tcx> { + fn clear_relocations(&mut self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { // Find the start and end of the given range and its outermost relocations. let (first, last) = { // Find all relocations overlapping the given range. @@ -846,9 +849,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - fn check_relocation_edges(&self, ptr: MemoryPointer, size: Size) -> EvalResult<'tcx> { - let overlapping_start = self.relocations(ptr, Size::from_bytes(0))?.len(); - let overlapping_end = self.relocations(ptr.offset(size, self)?, Size::from_bytes(0))?.len(); + fn check_relocation_edges(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + let overlapping_start = self.relocations(ptr, Size::ZERO)?.len(); + let overlapping_end = self.relocations(ptr.offset(size, self)?, Size::ZERO)?.len(); if overlapping_start + overlapping_end != 0 { return err!(ReadPointerAsBytes); } @@ -861,8 +864,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // FIXME(solson): This is a very naive, slow version. fn copy_undef_mask( &mut self, - src: MemoryPointer, - dest: MemoryPointer, + src: Pointer, + dest: Pointer, size: Size, ) -> EvalResult<'tcx> { // The bits have to be saved locally before writing to dest in case src and dest overlap. @@ -882,7 +885,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - fn check_defined(&self, ptr: MemoryPointer, size: Size) -> EvalResult<'tcx> { + fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { let alloc = self.get(ptr.alloc_id)?; if !alloc.undef_mask.is_range_defined( ptr.offset, @@ -896,7 +899,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn mark_definedness( &mut self, - ptr: Pointer, + ptr: Scalar, size: Size, new_state: bool, ) -> EvalResult<'tcx> { @@ -927,34 +930,32 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { fn into_ptr( &self, value: Value, - ) -> EvalResult<'tcx, Pointer> { + ) -> EvalResult<'tcx, Scalar> { Ok(match value { Value::ByRef(ptr, align) => { self.memory().read_ptr_sized(ptr.to_ptr()?, align)? } - Value::ByVal(ptr) | - Value::ByValPair(ptr, _) => ptr, + Value::Scalar(ptr) | + Value::ScalarPair(ptr, _) => ptr, }.into()) } fn into_ptr_vtable_pair( &self, value: Value, - ) -> EvalResult<'tcx, (Pointer, MemoryPointer)> { + ) -> EvalResult<'tcx, (Scalar, Pointer)> { match value { Value::ByRef(ref_ptr, align) => { let mem = self.memory(); let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into(); let vtable = mem.read_ptr_sized( - ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, + ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, align )?.to_ptr()?; Ok((ptr, vtable)) } - Value::ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)), - - Value::ByVal(PrimVal::Undef) => err!(ReadUndefBytes), + Value::ScalarPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)), _ => bug!("expected ptr and vtable, got {:?}", value), } } @@ -962,24 +963,22 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { fn into_slice( &self, value: Value, - ) -> EvalResult<'tcx, (Pointer, u64)> { + ) -> EvalResult<'tcx, (Scalar, u64)> { match value { Value::ByRef(ref_ptr, align) => { let mem = self.memory(); let ptr = mem.read_ptr_sized(ref_ptr.to_ptr()?, align)?.into(); let len = mem.read_ptr_sized( - ref_ptr.offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, + ref_ptr.ptr_offset(mem.pointer_size(), &mem.tcx.data_layout)?.to_ptr()?, align - )?.to_bytes()? as u64; + )?.to_bits(mem.pointer_size())? as u64; Ok((ptr, len)) } - Value::ByValPair(ptr, val) => { - let len = val.to_u128()?; - assert_eq!(len as u64 as u128, len); + Value::ScalarPair(ptr, val) => { + let len = val.to_bits(self.memory().pointer_size())?; Ok((ptr.into(), len as u64)) } - Value::ByVal(PrimVal::Undef) => err!(ReadUndefBytes), - Value::ByVal(_) => bug!("expected ptr and length, got {:?}", value), + Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value), } } } diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index d39bae5e8db..b5b4ac6df6b 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -43,17 +43,17 @@ pub fn sign_extend<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, value: u128, ty: Ty<'t let size = layout.size.bits(); assert!(layout.abi.is_signed()); // sign extend - let amt = 128 - size; + let shift = 128 - size; // shift the unsigned value to the left // and back to the right as signed (essentially fills with FF on the left) - Ok((((value << amt) as i128) >> amt) as u128) + Ok((((value << shift) as i128) >> shift) as u128) } pub fn truncate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { let param_env = ParamEnv::empty(); let layout = tcx.layout_of(param_env.and(ty)).map_err(|layout| EvalErrorKind::Layout(layout))?; let size = layout.size.bits(); - let amt = 128 - size; + let shift = 128 - size; // truncate (shift left to drop out leftover values, shift right to fill with zeroes) - Ok((value << amt) >> amt) + Ok((value << shift) >> shift) } diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index a4a36b0b355..8a2a78daa35 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -1,5 +1,5 @@ use rustc::mir; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, layout}; use syntax::ast::FloatTy; use rustc::ty::layout::LayoutOf; use rustc_apfloat::ieee::{Double, Single}; @@ -7,7 +7,7 @@ use rustc_apfloat::Float; use super::{EvalContext, Place, Machine, ValTy}; -use rustc::mir::interpret::{EvalResult, PrimVal, Value}; +use rustc::mir::interpret::{EvalResult, Scalar, Value}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { fn binop_with_overflow( @@ -15,9 +15,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { op: mir::BinOp, left: ValTy<'tcx>, right: ValTy<'tcx>, - ) -> EvalResult<'tcx, (PrimVal, bool)> { - let left_val = self.value_to_primval(left)?; - let right_val = self.value_to_primval(right)?; + ) -> EvalResult<'tcx, (Scalar, bool)> { + let left_val = self.value_to_scalar(left)?; + let right_val = self.value_to_scalar(right)?; self.binary_op(op, left_val, left.ty, right_val, right.ty) } @@ -32,7 +32,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx> { let (val, overflowed) = self.binop_with_overflow(op, left, right)?; - let val = Value::ByValPair(val, PrimVal::from_bool(overflowed)); + let val = Value::ScalarPair(val, Scalar::from_bool(overflowed)); let valty = ValTy { value: val, ty: dest_ty, @@ -51,7 +51,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { dest_ty: Ty<'tcx>, ) -> EvalResult<'tcx, bool> { let (val, overflowed) = self.binop_with_overflow(op, left, right)?; - self.write_primval(dest, val, dest_ty)?; + self.write_scalar(dest, val, dest_ty)?; Ok(overflowed) } } @@ -61,15 +61,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn binary_op( &self, bin_op: mir::BinOp, - left: PrimVal, + left: Scalar, left_ty: Ty<'tcx>, - right: PrimVal, + right: Scalar, right_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, (PrimVal, bool)> { + ) -> EvalResult<'tcx, (Scalar, bool)> { use rustc::mir::BinOp::*; - let left_kind = self.ty_to_primval_kind(left_ty)?; - let right_kind = self.ty_to_primval_kind(right_ty)?; + let left_layout = self.layout_of(left_ty)?; + let right_layout = self.layout_of(right_ty)?; + + let left_kind = match left_layout.abi { + layout::Abi::Scalar(ref scalar) => scalar.value, + _ => return err!(TypeNotPrimitive(left_ty)), + }; + let right_kind = match right_layout.abi { + layout::Abi::Scalar(ref scalar) => scalar.value, + _ => return err!(TypeNotPrimitive(right_ty)), + }; trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind); // I: Handle operations that support pointers @@ -80,10 +89,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } // II: From now on, everything must be bytes, no pointers - let l = left.to_bytes()?; - let r = right.to_bytes()?; - - let left_layout = self.layout_of(left_ty)?; + let l = left.to_bits(left_layout.size)?; + let r = right.to_bits(right_layout.size)?; // These ops can have an RHS with a different numeric type. if right_kind.is_int() && (bin_op == Shl || bin_op == Shr) { @@ -110,7 +117,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } }; let truncated = self.truncate(result, left_ty)?; - return Ok((PrimVal::Bytes(truncated), oflo)); + return Ok((Scalar::Bits { + bits: truncated, + defined: size as u8, + }, oflo)); } if left_kind != right_kind { @@ -136,7 +146,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { if let Some(op) = op { let l = self.sign_extend(l, left_ty)? as i128; let r = self.sign_extend(r, right_ty)? as i128; - return Ok((PrimVal::from_bool(op(&l, &r)), false)); + return Ok((Scalar::from_bool(op(&l, &r)), false)); } let op: Option (i128, bool)> = match bin_op { Div if r == 0 => return err!(DivisionByZero), @@ -156,7 +166,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Rem | Div => { // int_min / -1 if r == -1 && l == (1 << (size - 1)) { - return Ok((PrimVal::Bytes(l), true)); + return Ok((Scalar::Bits { bits: l, defined: size as u8 }, true)); } }, _ => {}, @@ -170,51 +180,60 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } let result = result as u128; let truncated = self.truncate(result, left_ty)?; - return Ok((PrimVal::Bytes(truncated), oflo)); + return Ok((Scalar::Bits { + bits: truncated, + defined: size as u8, + }, oflo)); } } if let ty::TyFloat(fty) = left_ty.sty { macro_rules! float_math { - ($ty:path) => {{ + ($ty:path, $bitsize:expr) => {{ let l = <$ty>::from_bits(l); let r = <$ty>::from_bits(r); + let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>| Scalar::Bits { + bits: res.value.to_bits(), + defined: $bitsize, + }; let val = match bin_op { - Eq => PrimVal::from_bool(l == r), - Ne => PrimVal::from_bool(l != r), - Lt => PrimVal::from_bool(l < r), - Le => PrimVal::from_bool(l <= r), - Gt => PrimVal::from_bool(l > r), - Ge => PrimVal::from_bool(l >= r), - Add => PrimVal::Bytes((l + r).value.to_bits()), - Sub => PrimVal::Bytes((l - r).value.to_bits()), - Mul => PrimVal::Bytes((l * r).value.to_bits()), - Div => PrimVal::Bytes((l / r).value.to_bits()), - Rem => PrimVal::Bytes((l % r).value.to_bits()), + Eq => Scalar::from_bool(l == r), + Ne => Scalar::from_bool(l != r), + Lt => Scalar::from_bool(l < r), + Le => Scalar::from_bool(l <= r), + Gt => Scalar::from_bool(l > r), + Ge => Scalar::from_bool(l >= r), + Add => bitify(l + r), + Sub => bitify(l - r), + Mul => bitify(l * r), + Div => bitify(l / r), + Rem => bitify(l % r), _ => bug!("invalid float op: `{:?}`", bin_op), }; return Ok((val, false)); }}; } match fty { - FloatTy::F32 => float_math!(Single), - FloatTy::F64 => float_math!(Double), + FloatTy::F32 => float_math!(Single, 32), + FloatTy::F64 => float_math!(Double, 64), } } + let bit_width = self.layout_of(left_ty).unwrap().size.bits() as u8; + // only ints left let val = match bin_op { - Eq => PrimVal::from_bool(l == r), - Ne => PrimVal::from_bool(l != r), + Eq => Scalar::from_bool(l == r), + Ne => Scalar::from_bool(l != r), - Lt => PrimVal::from_bool(l < r), - Le => PrimVal::from_bool(l <= r), - Gt => PrimVal::from_bool(l > r), - Ge => PrimVal::from_bool(l >= r), + Lt => Scalar::from_bool(l < r), + Le => Scalar::from_bool(l <= r), + Gt => Scalar::from_bool(l > r), + Ge => Scalar::from_bool(l >= r), - BitOr => PrimVal::Bytes(l | r), - BitAnd => PrimVal::Bytes(l & r), - BitXor => PrimVal::Bytes(l ^ r), + BitOr => Scalar::Bits { bits: l | r, defined: bit_width }, + BitAnd => Scalar::Bits { bits: l & r, defined: bit_width }, + BitXor => Scalar::Bits { bits: l ^ r, defined: bit_width }, Add | Sub | Mul | Rem | Div => { let op: fn(u128, u128) -> (u128, bool) = match bin_op { @@ -229,7 +248,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }; let (result, oflo) = op(l, r); let truncated = self.truncate(result, left_ty)?; - return Ok((PrimVal::Bytes(truncated), oflo || truncated != result)); + return Ok((Scalar::Bits { + bits: truncated, + defined: bit_width, + }, oflo || truncated != result)); } _ => { @@ -251,15 +273,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn unary_op( &self, un_op: mir::UnOp, - val: PrimVal, + val: Scalar, ty: Ty<'tcx>, - ) -> EvalResult<'tcx, PrimVal> { + ) -> EvalResult<'tcx, Scalar> { use rustc::mir::UnOp::*; use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::Float; - let bytes = val.to_bytes()?; - let size = self.layout_of(ty)?.size.bits(); + let size = self.layout_of(ty)?.size; + let bytes = val.to_bits(size)?; + let size = size.bits(); let result_bytes = match (un_op, &ty.sty) { @@ -274,6 +297,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { (Neg, _) => (-(bytes as i128)) as u128, }; - Ok(PrimVal::Bytes(self.truncate(result_bytes, ty)?)) + Ok(Scalar::Bits { + bits: self.truncate(result_bytes, ty)?, + defined: size as u8, + }) } } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 6ccbcf07370..c1bcffe7e9a 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -1,9 +1,9 @@ use rustc::mir; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, Size}; +use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; use rustc_data_structures::indexed_vec::Idx; -use rustc::mir::interpret::{GlobalId, Value, PrimVal, EvalResult, Pointer, MemoryPointer}; +use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer}; use super::{EvalContext, Machine, ValTy}; use interpret::memory::HasMemory; @@ -14,7 +14,7 @@ pub enum Place { /// A place may have an invalid (integral or undef) pointer, /// since it might be turned back into a reference /// before ever being dereferenced. - ptr: Pointer, + ptr: Scalar, align: Align, extra: PlaceExtra, }, @@ -28,17 +28,17 @@ pub enum Place { pub enum PlaceExtra { None, Length(u64), - Vtable(MemoryPointer), + Vtable(Pointer), DowncastVariant(usize), } impl<'tcx> Place { /// Produces a Place that will error if attempted to be read from pub fn undef() -> Self { - Self::from_primval_ptr(PrimVal::Undef.into(), Align::from_bytes(1, 1).unwrap()) + Self::from_scalar_ptr(Scalar::undef().into(), Align::from_bytes(1, 1).unwrap()) } - pub fn from_primval_ptr(ptr: Pointer, align: Align) -> Self { + pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { Place::Ptr { ptr, align, @@ -46,11 +46,11 @@ impl<'tcx> Place { } } - pub fn from_ptr(ptr: MemoryPointer, align: Align) -> Self { - Self::from_primval_ptr(ptr.into(), align) + pub fn from_ptr(ptr: Pointer, align: Align) -> Self { + Self::from_scalar_ptr(ptr.into(), align) } - pub fn to_ptr_align_extra(self) -> (Pointer, Align, PlaceExtra) { + pub fn to_ptr_align_extra(self) -> (Scalar, Align, PlaceExtra) { match self { Place::Ptr { ptr, align, extra } => (ptr, align, extra), _ => bug!("to_ptr_and_extra: expected Place::Ptr, got {:?}", self), @@ -58,12 +58,12 @@ impl<'tcx> Place { } } - pub fn to_ptr_align(self) -> (Pointer, Align) { + pub fn to_ptr_align(self) -> (Scalar, Align) { let (ptr, align, _extra) = self.to_ptr_align_extra(); (ptr, align) } - pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { // At this point, we forget about the alignment information -- the place has been turned into a reference, // and no matter where it came from, it now must be aligned. self.to_ptr_align().0.to_ptr() @@ -128,17 +128,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let field_index = field.index(); let field = base_layout.field(self, field_index)?; if field.size.bytes() == 0 { - return Ok(Some((Value::ByVal(PrimVal::Undef), field.ty))) + return Ok(Some((Value::Scalar(Scalar::undef()), field.ty))) } let offset = base_layout.fields.offset(field_index); match base { // the field covers the entire type - Value::ByValPair(..) | - Value::ByVal(_) if offset.bytes() == 0 && field.size == base_layout.size => Ok(Some((base, field.ty))), + Value::ScalarPair(..) | + Value::Scalar(_) if offset.bytes() == 0 && field.size == base_layout.size => Ok(Some((base, field.ty))), // split fat pointers, 2 element tuples, ... - Value::ByValPair(a, b) if base_layout.fields.count() == 2 => { + Value::ScalarPair(a, b) if base_layout.fields.count() == 2 => { let val = [a, b][field_index]; - Ok(Some((Value::ByVal(val), field.ty))) + Ok(Some((Value::Scalar(val), field.ty))) }, // FIXME(oli-obk): figure out whether we should be calling `try_read_value` here _ => Ok(None), @@ -173,7 +173,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { place: &mir::Place<'tcx>, ) -> EvalResult<'tcx, Value> { // Shortcut for things like accessing a fat pointer's field, - // which would otherwise (in the `eval_place` path) require moving a `ByValPair` to memory + // which would otherwise (in the `eval_place` path) require moving a `ScalarPair` to memory // and returning an `Place::Ptr` to it if let Some(val) = self.try_read_place(place)? { return Ok(val); @@ -210,7 +210,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }; let alloc = Machine::init_static(self, cid)?; Place::Ptr { - ptr: MemoryPointer::new(alloc, Size::from_bytes(0)).into(), + ptr: Scalar::Ptr(alloc.into()), align: layout.align, extra: PlaceExtra::None, } @@ -250,8 +250,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Place::Local { frame, local } => { match (&self.stack[frame].get_local(local)?, &base_layout.abi) { // in case the field covers the entire type, just return the value - (&Value::ByVal(_), &layout::Abi::Scalar(_)) | - (&Value::ByValPair(..), &layout::Abi::ScalarPair(..)) + (&Value::Scalar(_), &layout::Abi::Scalar(_)) | + (&Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) if offset.bytes() == 0 && field.size == base_layout.size => { return Ok((base, field)); @@ -272,7 +272,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { _ => offset, }; - let ptr = base_ptr.offset(offset, &self)?; + let ptr = base_ptr.ptr_offset(offset, &self)?; let align = base_align.min(base_layout.align).min(field.align); let extra = if !field.is_unsized() { PlaceExtra::None @@ -310,7 +310,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { extra: PlaceExtra::Length(len), } } - _ => Place::from_primval_ptr(self.into_ptr(val)?, layout.align), + _ => Place::from_scalar_ptr(self.into_ptr(val)?, layout.align), }) } @@ -332,7 +332,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { n, len ); - let ptr = base_ptr.offset(elem_size * n, &*self)?; + let ptr = base_ptr.ptr_offset(elem_size * n, &*self)?; Ok(Place::Ptr { ptr, align, @@ -387,8 +387,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { Index(local) => { let value = self.frame().get_local(local)?; let ty = self.tcx.types.usize; - let n = self.value_to_primval(ValTy { value, ty })?.to_u64()?; - self.place_index(base, base_ty, n) + let n = self + .value_to_scalar(ValTy { value, ty })? + .to_bits(self.tcx.data_layout.pointer_size)?; + self.place_index(base, base_ty, n as u64) } ConstantIndex { @@ -410,7 +412,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { u64::from(offset) }; - let ptr = base_ptr.offset(elem_size * index, &self)?; + let ptr = base_ptr.ptr_offset(elem_size * index, &self)?; Ok(Place::Ptr { ptr, align, extra: PlaceExtra::None }) } @@ -422,7 +424,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx); let elem_size = self.layout_of(elem_ty)?.size; assert!(u64::from(from) <= n - u64::from(to)); - let ptr = base_ptr.offset(elem_size * u64::from(from), &self)?; + let ptr = base_ptr.ptr_offset(elem_size * u64::from(from), &self)?; // sublicing arrays produces arrays let extra = if self.type_is_sized(base_ty) { PlaceExtra::None diff --git a/src/librustc_mir/interpret/terminator/drop.rs b/src/librustc_mir/interpret/terminator/drop.rs index fbc0c499e59..c0fafa7f83e 100644 --- a/src/librustc_mir/interpret/terminator/drop.rs +++ b/src/librustc_mir/interpret/terminator/drop.rs @@ -2,7 +2,7 @@ use rustc::mir::BasicBlock; use rustc::ty::{self, Ty}; use syntax::codemap::Span; -use rustc::mir::interpret::{EvalResult, PrimVal, Value}; +use rustc::mir::interpret::{EvalResult, Scalar, Value}; use interpret::{Machine, ValTy, EvalContext, Place, PlaceExtra}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { @@ -28,7 +28,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { ptr, align: _, extra: PlaceExtra::Length(len), - } => ptr.to_value_with_len(len), + } => ptr.to_value_with_len(len, self.tcx.tcx), Place::Ptr { ptr, align: _, @@ -52,7 +52,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let instance = match ty.sty { ty::TyDynamic(..) => { let vtable = match arg { - Value::ByValPair(_, PrimVal::Ptr(vtable)) => vtable, + Value::ScalarPair(_, Scalar::Ptr(vtable)) => vtable, _ => bug!("expected fat ptr, got {:?}", arg), }; match self.read_drop_type_from_vtable(vtable)? { diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index c5b823ca87b..cc250fb68c9 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -4,7 +4,7 @@ use rustc::ty::layout::LayoutOf; use syntax::codemap::Span; use rustc_target::spec::abi::Abi; -use rustc::mir::interpret::{EvalResult, PrimVal, Value}; +use rustc::mir::interpret::{EvalResult, Scalar, Value}; use super::{EvalContext, Place, Machine, ValTy}; use rustc_data_structures::indexed_vec::Idx; @@ -38,13 +38,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { .. } => { let discr_val = self.eval_operand(discr)?; - let discr_prim = self.value_to_primval(discr_val)?; + let discr_prim = self.value_to_scalar(discr_val)?; // Branch to the `otherwise` case by default, if no match is found. let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { - if discr_prim.to_bytes()? == const_int { + if discr_prim.to_bits(self.layout_of(discr_val.ty).unwrap().size)? == const_int { target_block = targets[index]; break; } @@ -67,7 +67,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let func = self.eval_operand(func)?; let (fn_def, sig) = match func.ty.sty { ty::TyFnPtr(sig) => { - let fn_ptr = self.value_to_primval(func)?.to_ptr()?; + let fn_ptr = self.value_to_scalar(func)?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { @@ -144,19 +144,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { target, .. } => { - let cond_val = self.eval_operand_to_primval(cond)?.to_bool()?; + let cond_val = self.eval_operand_to_scalar(cond)?.to_bool()?; if expected == cond_val { self.goto_block(target); } else { use rustc::mir::interpret::EvalErrorKind::*; return match *msg { BoundsCheck { ref len, ref index } => { - let len = self.eval_operand_to_primval(len) + let len = self.eval_operand_to_scalar(len) .expect("can't eval len") - .to_u64()?; - let index = self.eval_operand_to_primval(index) + .to_bits(self.memory().pointer_size())? as u64; + let index = self.eval_operand_to_scalar(index) .expect("can't eval index") - .to_u64()?; + .to_bits(self.memory().pointer_size())? as u64; err!(BoundsCheck { len, index }) } Overflow(op) => Err(Overflow(op).into()), @@ -342,7 +342,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { for (i, arg_local) in arg_locals.enumerate() { let field = layout.field(&self, i)?; let offset = layout.fields.offset(i); - let arg = Value::ByRef(ptr.offset(offset, &self)?, + let arg = Value::ByRef(ptr.ptr_offset(offset, &self)?, align.min(field.align)); let dest = self.eval_place(&mir::Place::Local(arg_local))?; @@ -359,7 +359,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { self.write_value(valty, dest)?; } } - Value::ByVal(PrimVal::Undef) => {} + Value::Scalar(Scalar::Bits { defined: 0, .. }) => {} other => { trace!("{:#?}, {:#?}", other, layout); let mut layout = layout; diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index ded27108e71..373a0b0d0bf 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -2,7 +2,7 @@ use rustc::ty::{self, Ty}; use rustc::ty::layout::{Size, Align, LayoutOf}; use syntax::ast::Mutability; -use rustc::mir::interpret::{PrimVal, Value, MemoryPointer, EvalResult}; +use rustc::mir::interpret::{Scalar, Value, Pointer, EvalResult}; use super::{EvalContext, Machine}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { @@ -16,7 +16,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { &mut self, ty: Ty<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, - ) -> EvalResult<'tcx, MemoryPointer> { + ) -> EvalResult<'tcx, Pointer> { debug!("get_vtable(trait_ref={:?})", trait_ref); let layout = self.layout_of(trait_ref.self_ty())?; @@ -35,19 +35,25 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); let drop = self.memory.create_fn_alloc(drop); - self.memory.write_ptr_sized_unsigned(vtable, ptr_align, PrimVal::Ptr(drop))?; + self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; let size_ptr = vtable.offset(ptr_size, &self)?; - self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, PrimVal::Bytes(size as u128))?; + self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { + bits: size as u128, + defined: ptr_size.bits() as u8, + })?; let align_ptr = vtable.offset(ptr_size * 2, &self)?; - self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, PrimVal::Bytes(align as u128))?; + self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { + bits: align as u128, + defined: ptr_size.bits() as u8, + })?; for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { let instance = self.resolve(def_id, substs)?; let fn_ptr = self.memory.create_fn_alloc(instance); let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; - self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, PrimVal::Ptr(fn_ptr))?; + self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; } } @@ -61,29 +67,30 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn read_drop_type_from_vtable( &self, - vtable: MemoryPointer, + vtable: Pointer, ) -> EvalResult<'tcx, Option>> { // we don't care about the pointee type, we just want a pointer let pointer_align = self.tcx.data_layout.pointer_align; + let pointer_size = self.tcx.data_layout.pointer_size.bits() as u8; match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? { // some values don't need to call a drop impl, so the value is null - Value::ByVal(PrimVal::Bytes(0)) => Ok(None), - Value::ByVal(PrimVal::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), + Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None), + Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), _ => err!(ReadBytesAsPointer), } } pub fn read_size_and_align_from_vtable( &self, - vtable: MemoryPointer, + vtable: Pointer, ) -> EvalResult<'tcx, (Size, Align)> { let pointer_size = self.memory.pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bytes()? as u64; + let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; let align = self.memory.read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align - )?.to_bytes()? as u64; + )?.to_bits(pointer_size)? as u64; Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) } } diff --git a/src/librustc_mir/monomorphize/collector.rs b/src/librustc_mir/monomorphize/collector.rs index b181a281ef4..a8a50e14c68 100644 --- a/src/librustc_mir/monomorphize/collector.rs +++ b/src/librustc_mir/monomorphize/collector.rs @@ -203,7 +203,7 @@ use rustc::session::config; use rustc::mir::{self, Location, Promoted}; use rustc::mir::visit::Visitor as MirVisitor; use rustc::mir::mono::MonoItem; -use rustc::mir::interpret::{PrimVal, GlobalId, AllocType}; +use rustc::mir::interpret::{Scalar, GlobalId, AllocType}; use monomorphize::{self, Instance}; use rustc::util::nodemap::{FxHashSet, FxHashMap, DefIdMap}; @@ -1245,13 +1245,13 @@ fn collect_const<'a, 'tcx>( }; match val { ConstVal::Unevaluated(..) => bug!("const eval yielded unevaluated const"), - ConstVal::Value(ConstValue::ByValPair(PrimVal::Ptr(a), PrimVal::Ptr(b))) => { + ConstVal::Value(ConstValue::ScalarPair(Scalar::Ptr(a), Scalar::Ptr(b))) => { collect_miri(tcx, a.alloc_id, output); collect_miri(tcx, b.alloc_id, output); } - ConstVal::Value(ConstValue::ByValPair(_, PrimVal::Ptr(ptr))) | - ConstVal::Value(ConstValue::ByValPair(PrimVal::Ptr(ptr), _)) | - ConstVal::Value(ConstValue::ByVal(PrimVal::Ptr(ptr))) => + ConstVal::Value(ConstValue::ScalarPair(_, Scalar::Ptr(ptr))) | + ConstVal::Value(ConstValue::ScalarPair(Scalar::Ptr(ptr), _)) | + ConstVal::Value(ConstValue::Scalar(Scalar::Ptr(ptr))) => collect_miri(tcx, ptr.alloc_id, output), ConstVal::Value(ConstValue::ByRef(alloc, _offset)) => { for &id in alloc.relocations.values() { diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index 80603959ec2..fa740876091 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -19,7 +19,7 @@ use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionE use rustc::mir::visit::{Visitor, PlaceContext}; use rustc::middle::const_val::ConstVal; use rustc::ty::{TyCtxt, self, Instance}; -use rustc::mir::interpret::{Value, PrimVal, GlobalId, EvalResult}; +use rustc::mir::interpret::{Value, Scalar, GlobalId, EvalResult}; use interpret::EvalContext; use interpret::CompileTimeEvaluator; use interpret::{eval_promoted, mk_borrowck_eval_cx, ValTy}; @@ -215,7 +215,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { trace!("field proj on {:?}", proj.base); let (base, ty, span) = self.eval_place(&proj.base)?; match base { - Value::ByValPair(a, b) => { + Value::ScalarPair(a, b) => { trace!("by val pair: {:?}, {:?}", a, b); let base_layout = self.tcx.layout_of(self.param_env.and(ty)).ok()?; trace!("layout computed"); @@ -228,7 +228,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { }; let field = base_layout.field(cx, field_index).ok()?; trace!("projection resulted in: {:?}", val); - Some((Value::ByVal(val), field.ty, span)) + Some((Value::Scalar(val), field.ty, span)) }, _ => None, } @@ -283,7 +283,10 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { Rvalue::NullaryOp(NullOp::SizeOf, ty) => { let param_env = self.tcx.param_env(self.source.def_id); type_size_of(self.tcx, param_env, ty).map(|n| ( - Value::ByVal(PrimVal::Bytes(n as u128)), + Value::Scalar(Scalar::Bits { + bits: n as u128, + defined: self.tcx.data_layout.pointer_size.bits() as u8, + }), self.tcx.types.usize, span, )) @@ -302,10 +305,10 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { let val = self.eval_operand(arg)?; let prim = self.use_ecx(span, |this| { - this.ecx.value_to_primval(ValTy { value: val.0, ty: val.1 }) + this.ecx.value_to_scalar(ValTy { value: val.0, ty: val.1 }) })?; let val = self.use_ecx(span, |this| this.ecx.unary_op(op, prim, val.1))?; - Some((Value::ByVal(val), place_ty, span)) + Some((Value::Scalar(val), place_ty, span)) } Rvalue::CheckedBinaryOp(op, ref left, ref right) | Rvalue::BinaryOp(op, ref left, ref right) => { @@ -323,13 +326,18 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } let r = self.use_ecx(span, |this| { - this.ecx.value_to_primval(ValTy { value: right.0, ty: right.1 }) + this.ecx.value_to_scalar(ValTy { value: right.0, ty: right.1 }) })?; if op == BinOp::Shr || op == BinOp::Shl { - let param_env = self.tcx.param_env(self.source.def_id); let left_ty = left.ty(self.mir, self.tcx); - let bits = self.tcx.layout_of(param_env.and(left_ty)).unwrap().size.bits(); - if r.to_bytes().ok().map_or(false, |b| b >= bits as u128) { + let left_bits = self + .tcx + .layout_of(self.param_env.and(left_ty)) + .unwrap() + .size + .bits(); + let right_size = self.tcx.layout_of(self.param_env.and(right.1)).unwrap().size; + if r.to_bits(right_size).ok().map_or(false, |b| b >= left_bits as u128) { let scope_info = match self.mir.visibility_scope_info { ClearCrossCrate::Set(ref data) => data, ClearCrossCrate::Clear => return None, @@ -350,16 +358,16 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { } let left = self.eval_operand(left)?; let l = self.use_ecx(span, |this| { - this.ecx.value_to_primval(ValTy { value: left.0, ty: left.1 }) + this.ecx.value_to_scalar(ValTy { value: left.0, ty: left.1 }) })?; trace!("const evaluating {:?} for {:?} and {:?}", op, left, right); let (val, overflow) = self.use_ecx(span, |this| { this.ecx.binary_op(op, l, left.1, r, right.1) })?; let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue { - Value::ByValPair( + Value::ScalarPair( val, - PrimVal::from_bool(overflow), + Scalar::from_bool(overflow), ) } else { if overflow { @@ -371,7 +379,7 @@ impl<'b, 'a, 'tcx:'b> ConstPropagator<'b, 'a, 'tcx> { }); return None; } - Value::ByVal(val) + Value::Scalar(val) }; Some((val, place_ty, span)) }, @@ -485,7 +493,7 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { if let TerminatorKind::Assert { expected, msg, cond, .. } = kind { if let Some(value) = self.eval_operand(cond) { trace!("assertion on {:?} should be {:?}", value, expected); - if Value::ByVal(PrimVal::from_bool(*expected)) != value.0 { + if Value::Scalar(Scalar::from_bool(*expected)) != value.0 { // poison all places this operand references so that further code // doesn't use the invalid value match cond { @@ -520,14 +528,14 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { BoundsCheck { ref len, ref index } => { let len = self.eval_operand(len).expect("len must be const"); let len = match len.0 { - Value::ByVal(PrimVal::Bytes(n)) => n, + Value::Scalar(Scalar::Bits { bits, ..}) => bits, _ => bug!("const len not primitive: {:?}", len), }; let index = self .eval_operand(index) .expect("index must be const"); let index = match index.0 { - Value::ByVal(PrimVal::Bytes(n)) => n, + Value::Scalar(Scalar::Bits { bits, .. }) => bits, _ => bug!("const index not primitive: {:?}", index), }; format!( diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index c9727f55d20..74b6d721882 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -181,7 +181,7 @@ impl<'a, 'tcx> TransformVisitor<'a, 'tcx> { value: ty::Const::from_bits( self.tcx, state_disc.into(), - self.tcx.types.u32), + ty::ParamEnv::empty().and(self.tcx.types.u32)), }, }); Statement { diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs index 72bee040c06..2dbba897fa8 100644 --- a/src/librustc_mir/transform/simplify_branches.rs +++ b/src/librustc_mir/transform/simplify_branches.rs @@ -10,7 +10,7 @@ //! A pass that simplifies branches when their condition is known. -use rustc::ty::TyCtxt; +use rustc::ty::{TyCtxt, ParamEnv}; use rustc::mir::*; use transform::{MirPass, MirSource}; @@ -39,7 +39,8 @@ impl MirPass for SimplifyBranches { TerminatorKind::SwitchInt { discr: Operand::Constant(box Constant { literal: Literal::Value { ref value }, .. }), switch_ty, ref values, ref targets, .. } => { - if let Some(constint) = value.assert_bits(switch_ty) { + let switch_ty = ParamEnv::empty().and(switch_ty); + if let Some(constint) = value.assert_bits(tcx, switch_ty) { let (otherwise, targets) = targets.split_last().unwrap(); let mut ret = TerminatorKind::Goto { target: *otherwise }; for (&v, t) in values.iter().zip(targets.iter()) { diff --git a/src/librustc_target/abi/call/mips.rs b/src/librustc_target/abi/call/mips.rs index 5001499ea5d..1e8af52e3e8 100644 --- a/src/librustc_target/abi/call/mips.rs +++ b/src/librustc_target/abi/call/mips.rs @@ -47,7 +47,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType, offset: &mut Size) pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { - let mut offset = Size::from_bytes(0); + let mut offset = Size::ZERO; if !fty.ret.is_ignore() { classify_ret_ty(cx, &mut fty.ret, &mut offset); } diff --git a/src/librustc_target/abi/call/mips64.rs b/src/librustc_target/abi/call/mips64.rs index e5cbc6424a4..3734e563d58 100644 --- a/src/librustc_target/abi/call/mips64.rs +++ b/src/librustc_target/abi/call/mips64.rs @@ -33,8 +33,8 @@ fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option { match ret.layout.field(cx, i).abi { abi::Abi::Scalar(ref scalar) => match scalar.value { - abi::F32 => Some(Reg::f32()), - abi::F64 => Some(Reg::f64()), + abi::Float(abi::FloatTy::F32) => Some(Reg::f32()), + abi::Float(abi::FloatTy::F64) => Some(Reg::f64()), _ => None }, _ => None @@ -109,7 +109,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) abi::FieldPlacement::Arbitrary { .. } => { // Structures are split up into a series of 64-bit integer chunks, but any aligned // doubles not part of another aggregate are passed as floats. - let mut last_offset = Size::from_bytes(0); + let mut last_offset = Size::ZERO; for i in 0..arg.layout.fields.count() { let field = arg.layout.field(cx, i); @@ -117,7 +117,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) // We only care about aligned doubles if let abi::Abi::Scalar(ref scalar) = field.abi { - if let abi::F64 = scalar.value { + if let abi::Float(abi::FloatTy::F64) = scalar.value { if offset.is_abi_aligned(dl.f64_align) { // Insert enough integers to cover [last_offset, offset) assert!(last_offset.is_abi_aligned(dl.f64_align)); diff --git a/src/librustc_target/abi/call/mod.rs b/src/librustc_target/abi/call/mod.rs index 2d8996bb5ae..fcae9ea22bb 100644 --- a/src/librustc_target/abi/call/mod.rs +++ b/src/librustc_target/abi/call/mod.rs @@ -83,7 +83,7 @@ impl ArgAttributes { pub fn new() -> Self { ArgAttributes { regular: ArgAttribute::default(), - pointee_size: Size::from_bytes(0), + pointee_size: Size::ZERO, pointee_align: None, } } @@ -206,7 +206,7 @@ impl From for CastTarget { fn from(uniform: Uniform) -> CastTarget { CastTarget { prefix: [None; 8], - prefix_chunk: Size::from_bytes(0), + prefix_chunk: Size::ZERO, rest: uniform } } @@ -256,8 +256,7 @@ impl<'a, Ty> TyLayout<'a, Ty> { let kind = match scalar.value { abi::Int(..) | abi::Pointer => RegKind::Integer, - abi::F32 | - abi::F64 => RegKind::Float + abi::Float(_) => RegKind::Float, }; Some(Reg { kind, @@ -274,7 +273,7 @@ impl<'a, Ty> TyLayout<'a, Ty> { Abi::ScalarPair(..) | Abi::Aggregate { .. } => { - let mut total = Size::from_bytes(0); + let mut total = Size::ZERO; let mut result = None; let is_union = match self.fields { diff --git a/src/librustc_target/abi/call/powerpc.rs b/src/librustc_target/abi/call/powerpc.rs index 8c3c2422d7f..3be3034143a 100644 --- a/src/librustc_target/abi/call/powerpc.rs +++ b/src/librustc_target/abi/call/powerpc.rs @@ -47,7 +47,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType, offset: &mut Size) pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { - let mut offset = Size::from_bytes(0); + let mut offset = Size::ZERO; if !fty.ret.is_ignore() { classify_ret_ty(cx, &mut fty.ret, &mut offset); } diff --git a/src/librustc_target/abi/call/s390x.rs b/src/librustc_target/abi/call/s390x.rs index 3002a3c46c8..37be6ea41c6 100644 --- a/src/librustc_target/abi/call/s390x.rs +++ b/src/librustc_target/abi/call/s390x.rs @@ -29,12 +29,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool C: LayoutOf> + HasDataLayout { match layout.abi { - abi::Abi::Scalar(ref scalar) => { - match scalar.value { - abi::F32 | abi::F64 => true, - _ => false - } - } + abi::Abi::Scalar(ref scalar) => scalar.value.is_float(), abi::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(cx, layout.field(cx, 0)) diff --git a/src/librustc_target/abi/call/sparc.rs b/src/librustc_target/abi/call/sparc.rs index 5001499ea5d..1e8af52e3e8 100644 --- a/src/librustc_target/abi/call/sparc.rs +++ b/src/librustc_target/abi/call/sparc.rs @@ -47,7 +47,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType, offset: &mut Size) pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { - let mut offset = Size::from_bytes(0); + let mut offset = Size::ZERO; if !fty.ret.is_ignore() { classify_ret_ty(cx, &mut fty.ret, &mut offset); } diff --git a/src/librustc_target/abi/call/x86.rs b/src/librustc_target/abi/call/x86.rs index e803b96b21c..0c0040de9df 100644 --- a/src/librustc_target/abi/call/x86.rs +++ b/src/librustc_target/abi/call/x86.rs @@ -23,12 +23,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool C: LayoutOf> + HasDataLayout { match layout.abi { - abi::Abi::Scalar(ref scalar) => { - match scalar.value { - abi::F32 | abi::F64 => true, - _ => false - } - } + abi::Abi::Scalar(ref scalar) => scalar.value.is_float(), abi::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(cx, layout.field(cx, 0)) diff --git a/src/librustc_target/abi/call/x86_64.rs b/src/librustc_target/abi/call/x86_64.rs index 0ba1ee736e7..a443255b970 100644 --- a/src/librustc_target/abi/call/x86_64.rs +++ b/src/librustc_target/abi/call/x86_64.rs @@ -55,8 +55,7 @@ fn classify_arg<'a, Ty, C>(cx: C, arg: &ArgType<'a, Ty>) match scalar.value { abi::Int(..) | abi::Pointer => Class::Int, - abi::F32 | - abi::F64 => Class::Sse + abi::Float(_) => Class::Sse } } @@ -101,7 +100,7 @@ fn classify_arg<'a, Ty, C>(cx: C, arg: &ArgType<'a, Ty>) } let mut cls = [None; MAX_EIGHTBYTES]; - classify(cx, arg.layout, &mut cls, Size::from_bytes(0))?; + classify(cx, arg.layout, &mut cls, Size::ZERO)?; if n > 2 { if cls[0] != Some(Class::Sse) { return Err(Memory); @@ -175,7 +174,7 @@ fn cast_target(cls: &[Option], size: Size) -> CastTarget { target = CastTarget::pair(lo, hi); } } - assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None); + assert_eq!(reg_component(cls, &mut i, Size::ZERO), None); target } diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 4b11de09773..7ff04df6233 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -13,7 +13,7 @@ pub use self::Primitive::*; use spec::Target; -use std::cmp; +use std::{cmp, fmt}; use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive}; pub mod call; @@ -227,6 +227,8 @@ pub struct Size { } impl Size { + pub const ZERO: Size = Self::from_bytes(0); + pub fn from_bits(bits: u64) -> Size { // Avoid potential overflow from `bits + 7`. Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8) @@ -486,6 +488,42 @@ impl Integer { } } + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy, + PartialOrd, Ord)] +pub enum FloatTy { + F32, + F64, +} + +impl fmt::Debug for FloatTy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for FloatTy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.ty_to_string()) + } +} + +impl FloatTy { + pub fn ty_to_string(&self) -> &'static str { + match *self { + FloatTy::F32 => "f32", + FloatTy::F64 => "f64", + } + } + + pub fn bit_width(&self) -> usize { + match *self { + FloatTy::F32 => 32, + FloatTy::F64 => 64, + } + } +} + /// Fundamental unit of memory access and layout. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Primitive { @@ -497,8 +535,7 @@ pub enum Primitive { /// a negative integer passed by zero-extension will appear positive in /// the callee, and most operations on it will produce the wrong values. Int(Integer, bool), - F32, - F64, + Float(FloatTy), Pointer } @@ -508,8 +545,8 @@ impl<'a, 'tcx> Primitive { match self { Int(i, _) => i.size(), - F32 => Size::from_bits(32), - F64 => Size::from_bits(64), + Float(FloatTy::F32) => Size::from_bits(32), + Float(FloatTy::F64) => Size::from_bits(64), Pointer => dl.pointer_size } } @@ -519,11 +556,25 @@ impl<'a, 'tcx> Primitive { match self { Int(i, _) => i.align(dl), - F32 => dl.f32_align, - F64 => dl.f64_align, + Float(FloatTy::F32) => dl.f32_align, + Float(FloatTy::F64) => dl.f64_align, Pointer => dl.pointer_align } } + + pub fn is_float(self) -> bool { + match self { + Float(_) => true, + _ => false + } + } + + pub fn is_int(self) -> bool { + match self { + Int(..) => true, + _ => false, + } + } } /// Information about one scalar component of a Rust type. @@ -614,7 +665,7 @@ impl FieldPlacement { pub fn offset(&self, i: usize) -> Size { match *self { - FieldPlacement::Union(_) => Size::from_bytes(0), + FieldPlacement::Union(_) => Size::ZERO, FieldPlacement::Array { stride, count } => { let i = i as u64; assert!(i < count); diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index 6ae1729295f..c9843de547f 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -33,6 +33,8 @@ use std::fmt; use rustc_data_structures::sync::Lrc; use std::u32; +pub use rustc_target::abi::FloatTy; + #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] pub struct Label { pub ident: Ident, @@ -1519,41 +1521,6 @@ impl fmt::Display for UintTy { } } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy, - PartialOrd, Ord)] -pub enum FloatTy { - F32, - F64, -} - -impl fmt::Debug for FloatTy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for FloatTy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.ty_to_string()) - } -} - -impl FloatTy { - pub fn ty_to_string(&self) -> &'static str { - match *self { - FloatTy::F32 => "f32", - FloatTy::F64 => "f64", - } - } - - pub fn bit_width(&self) -> usize { - match *self { - FloatTy::F32 => 32, - FloatTy::F64 => 64, - } - } -} - // Bind a type to an associated type: `A=Foo`. #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub struct TypeBinding { diff --git a/src/test/ui/const-eval/ref_to_float_transmute.rs b/src/test/ui/const-eval/ref_to_float_transmute.rs new file mode 100644 index 00000000000..77d5222cb9c --- /dev/null +++ b/src/test/ui/const-eval/ref_to_float_transmute.rs @@ -0,0 +1,28 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//compile-pass + +fn main() {} + +static FOO: u32 = 42; + +union Foo { + f: Float, + r: &'static u32, +} + +#[cfg(target_pointer_width="64")] +type Float = f64; + +#[cfg(target_pointer_width="32")] +type Float = f32; + +static BAR: Float = unsafe { Foo { r: &FOO }.f }; diff --git a/src/test/ui/const-eval/ref_to_int_match.rs b/src/test/ui/const-eval/ref_to_int_match.rs new file mode 100644 index 00000000000..4c5fc6c3797 --- /dev/null +++ b/src/test/ui/const-eval/ref_to_int_match.rs @@ -0,0 +1,31 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + let n: Int = 40; + match n { + 0...10 => {}, + 10...BAR => {}, //~ ERROR lower range bound must be less than or equal to upper + _ => {}, + } +} + +union Foo { + f: Int, + r: &'static u32, +} + +#[cfg(target_pointer_width="64")] +type Int = u64; + +#[cfg(target_pointer_width="32")] +type Int = u32; + +const BAR: Int = unsafe { Foo { r: &42 }.f }; diff --git a/src/test/ui/const-eval/ref_to_int_match.stderr b/src/test/ui/const-eval/ref_to_int_match.stderr new file mode 100644 index 00000000000..eef7b6df252 --- /dev/null +++ b/src/test/ui/const-eval/ref_to_int_match.stderr @@ -0,0 +1,9 @@ +error[E0030]: lower range bound must be less than or equal to upper + --> $DIR/ref_to_int_match.rs:15:9 + | +LL | 10...BAR => {}, //~ ERROR lower range bound must be less than or equal to upper + | ^^ lower bound larger than upper bound + +error: aborting due to previous error + +For more information about this error, try `rustc --explain E0030`.