make miri InterpCx TyCtxtAt a TyCtxt, and separately remember the root span of the evaluation

This commit is contained in:
Ralf Jung 2020-06-01 10:15:17 +02:00
parent 871513d02c
commit dc6ffaebd5
15 changed files with 123 additions and 109 deletions

View File

@ -705,6 +705,7 @@ impl<'tcx> ty::TyS<'tcx> {
/// optimization as well as the rules around static values. Note /// optimization as well as the rules around static values. Note
/// that the `Freeze` trait is not exposed to end users and is /// that the `Freeze` trait is not exposed to end users and is
/// effectively an implementation detail. /// effectively an implementation detail.
// FIXME: use `TyCtxtAt` instead of separate `Span`.
pub fn is_freeze( pub fn is_freeze(
&'tcx self, &'tcx self,
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,

View File

@ -56,5 +56,5 @@ pub fn error_to_const_error<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>(
) -> ConstEvalErr<'tcx> { ) -> ConstEvalErr<'tcx> {
error.print_backtrace(); error.print_backtrace();
let stacktrace = ecx.generate_stacktrace(); let stacktrace = ecx.generate_stacktrace();
ConstEvalErr { error: error.kind, stacktrace, span: ecx.tcx.span } ConstEvalErr { error: error.kind, stacktrace, span: ecx.cur_span() }
} }

View File

@ -27,7 +27,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
body: &'mir mir::Body<'tcx>, body: &'mir mir::Body<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>> { ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env); debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
let tcx = ecx.tcx.tcx; let tcx = ecx.tcx;
let layout = ecx.layout_of(body.return_ty().subst(tcx, cid.instance.substs))?; let layout = ecx.layout_of(body.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized()); assert!(!layout.is_unsized());
let ret = ecx.allocate(layout, MemoryKind::Stack); let ret = ecx.allocate(layout, MemoryKind::Stack);
@ -81,13 +81,14 @@ fn eval_body_using_ecx<'mir, 'tcx>(
/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument. /// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
pub(super) fn mk_eval_cx<'mir, 'tcx>( pub(super) fn mk_eval_cx<'mir, 'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
span: Span, root_span: Span,
param_env: ty::ParamEnv<'tcx>, param_env: ty::ParamEnv<'tcx>,
can_access_statics: bool, can_access_statics: bool,
) -> CompileTimeEvalContext<'mir, 'tcx> { ) -> CompileTimeEvalContext<'mir, 'tcx> {
debug!("mk_eval_cx: {:?}", param_env); debug!("mk_eval_cx: {:?}", param_env);
InterpCx::new( InterpCx::new(
tcx.at(span), tcx,
root_span,
param_env, param_env,
CompileTimeInterpreter::new(tcx.sess.const_eval_limit()), CompileTimeInterpreter::new(tcx.sess.const_eval_limit()),
MemoryExtra { can_access_statics }, MemoryExtra { can_access_statics },
@ -163,7 +164,7 @@ pub(super) fn op_to_const<'tcx>(
0, 0,
), ),
}; };
let len = b.to_machine_usize(&ecx.tcx.tcx).unwrap(); let len = b.to_machine_usize(ecx).unwrap();
let start = start.try_into().unwrap(); let start = start.try_into().unwrap();
let len: usize = len.try_into().unwrap(); let len: usize = len.try_into().unwrap();
ConstValue::Slice { data, start, end: start + len } ConstValue::Slice { data, start, end: start + len }
@ -213,7 +214,7 @@ fn validate_and_turn_into_const<'tcx>(
val.map_err(|error| { val.map_err(|error| {
let err = error_to_const_error(&ecx, error); let err = error_to_const_error(&ecx, error);
err.struct_error(ecx.tcx, "it is undefined behavior to use this value", |mut diag| { err.struct_error(ecx.tcx_at(), "it is undefined behavior to use this value", |mut diag| {
diag.note(note_on_undefined_behavior_error()); diag.note(note_on_undefined_behavior_error());
diag.emit(); diag.emit();
}) })
@ -299,9 +300,9 @@ pub fn const_eval_raw_provider<'tcx>(
let is_static = tcx.is_static(def_id); let is_static = tcx.is_static(def_id);
let span = tcx.def_span(cid.instance.def_id());
let mut ecx = InterpCx::new( let mut ecx = InterpCx::new(
tcx.at(span), tcx,
tcx.def_span(cid.instance.def_id()),
key.param_env, key.param_env,
CompileTimeInterpreter::new(tcx.sess.const_eval_limit()), CompileTimeInterpreter::new(tcx.sess.const_eval_limit()),
MemoryExtra { can_access_statics: is_static }, MemoryExtra { can_access_statics: is_static },
@ -316,7 +317,7 @@ pub fn const_eval_raw_provider<'tcx>(
if is_static { if is_static {
// Ensure that if the above error was either `TooGeneric` or `Reported` // Ensure that if the above error was either `TooGeneric` or `Reported`
// an error must be reported. // an error must be reported.
let v = err.report_as_error(ecx.tcx, "could not evaluate static initializer"); let v = err.report_as_error(ecx.tcx_at(), "could not evaluate static initializer");
// If this is `Reveal:All`, then we need to make sure an error is reported but if // If this is `Reveal:All`, then we need to make sure an error is reported but if
// this is `Reveal::UserFacing`, then it's expected that we could get a // this is `Reveal::UserFacing`, then it's expected that we could get a
@ -372,13 +373,13 @@ pub fn const_eval_raw_provider<'tcx>(
// anything else (array lengths, enum initializers, constant patterns) are // anything else (array lengths, enum initializers, constant patterns) are
// reported as hard errors // reported as hard errors
} else { } else {
err.report_as_error(ecx.tcx, "evaluation of constant value failed") err.report_as_error(ecx.tcx_at(), "evaluation of constant value failed")
} }
} }
} }
} else { } else {
// use of broken constant from other crate // use of broken constant from other crate
err.report_as_error(ecx.tcx, "could not evaluate constant") err.report_as_error(ecx.tcx_at(), "could not evaluate constant")
} }
}) })
} }

View File

@ -56,7 +56,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
let instance = ty::Instance::resolve_for_fn_ptr( let instance = ty::Instance::resolve_for_fn_ptr(
*self.tcx, self.tcx,
self.param_env, self.param_env,
def_id, def_id,
substs, substs,
@ -91,7 +91,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
let instance = ty::Instance::resolve_closure( let instance = ty::Instance::resolve_closure(
*self.tcx, self.tcx,
def_id, def_id,
substs, substs,
ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce,
@ -140,7 +140,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Handle cast from a univariant (ZST) enum. // Handle cast from a univariant (ZST) enum.
match src.layout.variants { match src.layout.variants {
Variants::Single { index } => { Variants::Single { index } => {
if let Some(discr) = src.layout.ty.discriminant_for_variant(*self.tcx, index) { if let Some(discr) = src.layout.ty.discriminant_for_variant(self.tcx, index) {
assert!(src.layout.is_zst()); assert!(src.layout.is_zst());
let discr_layout = self.layout_of(discr.ty)?; let discr_layout = self.layout_of(discr.ty)?;
return Ok(self.cast_from_scalar(discr.val, discr_layout, cast_ty).into()); return Ok(self.cast_from_scalar(discr.val, discr_layout, cast_ty).into());
@ -270,7 +270,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// u64 cast is from usize to u64, which is always good // u64 cast is from usize to u64, which is always good
let val = Immediate::new_slice( let val = Immediate::new_slice(
ptr, ptr,
length.eval_usize(self.tcx.tcx, self.param_env), length.eval_usize(self.tcx, self.param_env),
self, self,
); );
self.write_immediate(val, dest) self.write_immediate(val, dest)

View File

@ -33,7 +33,11 @@ pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
pub machine: M, pub machine: M,
/// The results of the type checker, from rustc. /// The results of the type checker, from rustc.
pub tcx: TyCtxtAt<'tcx>, pub tcx: TyCtxt<'tcx>,
/// The span of the "root" of the evaluation, i.e., the const
/// we are evaluating (if this is CTFE).
pub(super) root_span: Span,
/// Bounds in scope for polymorphic evaluations. /// Bounds in scope for polymorphic evaluations.
pub(crate) param_env: ty::ParamEnv<'tcx>, pub(crate) param_env: ty::ParamEnv<'tcx>,
@ -196,7 +200,7 @@ where
{ {
#[inline] #[inline]
fn tcx(&self) -> TyCtxt<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> {
*self.tcx self.tcx
} }
} }
@ -209,13 +213,13 @@ where
} }
} }
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
type Ty = Ty<'tcx>; type Ty = Ty<'tcx>;
type TyAndLayout = InterpResult<'tcx, TyAndLayout<'tcx>>; type TyAndLayout = InterpResult<'tcx, TyAndLayout<'tcx>>;
#[inline] #[inline]
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
self.tcx self.tcx_at()
.layout_of(self.param_env.and(ty)) .layout_of(self.param_env.and(ty))
.map_err(|layout| err_inval!(Layout(layout)).into()) .map_err(|layout| err_inval!(Layout(layout)).into())
} }
@ -292,7 +296,8 @@ pub(super) fn from_known_layout<'tcx>(
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn new( pub fn new(
tcx: TyCtxtAt<'tcx>, tcx: TyCtxt<'tcx>,
root_span: Span,
param_env: ty::ParamEnv<'tcx>, param_env: ty::ParamEnv<'tcx>,
machine: M, machine: M,
memory_extra: M::MemoryExtra, memory_extra: M::MemoryExtra,
@ -300,15 +305,26 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
InterpCx { InterpCx {
machine, machine,
tcx, tcx,
root_span,
param_env, param_env,
memory: Memory::new(*tcx, memory_extra), memory: Memory::new(tcx, memory_extra),
vtables: FxHashMap::default(), vtables: FxHashMap::default(),
} }
} }
#[inline(always)] #[inline(always)]
pub fn set_span(&mut self, span: Span) { pub fn cur_span(&self) -> Span {
self.tcx.span = span; self
.stack()
.last()
.and_then(|f| f.current_source_info())
.map(|si| si.span)
.unwrap_or(self.root_span)
}
#[inline(always)]
pub fn tcx_at(&self) -> TyCtxtAt<'tcx> {
self.tcx.at(self.cur_span())
} }
#[inline(always)] #[inline(always)]
@ -386,12 +402,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
#[inline] #[inline]
pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
ty.is_sized(self.tcx, self.param_env) ty.is_sized(self.tcx_at(), self.param_env)
} }
#[inline] #[inline]
pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
ty.is_freeze(*self.tcx, self.param_env, DUMMY_SP) ty.is_freeze(self.tcx, self.param_env, self.cur_span())
} }
pub fn load_mir( pub fn load_mir(
@ -402,20 +418,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// do not continue if typeck errors occurred (can only occur in local crate) // do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id(); let did = instance.def_id();
if let Some(did) = did.as_local() { if let Some(did) = did.as_local() {
if self.tcx.has_typeck_tables(did) { if self.tcx_at().has_typeck_tables(did) {
if let Some(error_reported) = self.tcx.typeck_tables_of(did).tainted_by_errors { if let Some(error_reported) = self.tcx_at().typeck_tables_of(did).tainted_by_errors {
throw_inval!(TypeckError(error_reported)) throw_inval!(TypeckError(error_reported))
} }
} }
} }
trace!("load mir(instance={:?}, promoted={:?})", instance, promoted); trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
if let Some(promoted) = promoted { if let Some(promoted) = promoted {
return Ok(&self.tcx.promoted_mir(did)[promoted]); return Ok(&self.tcx_at().promoted_mir(did)[promoted]);
} }
match instance { match instance {
ty::InstanceDef::Item(def_id) => { ty::InstanceDef::Item(def_id) => {
if self.tcx.is_mir_available(did) { if self.tcx_at().is_mir_available(did) {
Ok(self.tcx.optimized_mir(did)) Ok(self.tcx_at().optimized_mir(did))
} else { } else {
throw_unsup!(NoMirFor(def_id)) throw_unsup!(NoMirFor(def_id))
} }
@ -456,7 +472,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("resolve: {:?}, {:#?}", def_id, substs); trace!("resolve: {:?}, {:#?}", def_id, substs);
trace!("param_env: {:#?}", self.param_env); trace!("param_env: {:#?}", self.param_env);
trace!("substs: {:#?}", substs); trace!("substs: {:#?}", substs);
match ty::Instance::resolve(*self.tcx, self.param_env, def_id, substs) { match ty::Instance::resolve(self.tcx, self.param_env, def_id, substs) {
Ok(Some(instance)) => Ok(instance), Ok(Some(instance)) => Ok(instance),
Ok(None) => throw_inval!(TooGeneric), Ok(None) => throw_inval!(TooGeneric),
@ -475,7 +491,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// have to support that case (mostly by skipping all caching). // have to support that case (mostly by skipping all caching).
match frame.locals.get(local).and_then(|state| state.layout.get()) { match frame.locals.get(local).and_then(|state| state.layout.get()) {
None => { None => {
let layout = from_known_layout(self.tcx, layout, || { let layout = from_known_layout(self.tcx_at(), layout, || {
let local_ty = frame.body.local_decls[local].ty; let local_ty = frame.body.local_decls[local].ty;
let local_ty = let local_ty =
self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty); self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty);
@ -560,7 +576,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = size.align_to(align); let size = size.align_to(align);
// Check if this brought us over the size limit. // Check if this brought us over the size limit.
if size.bytes() >= self.tcx.data_layout().obj_size_bound() { if size.bytes() >= self.tcx.data_layout.obj_size_bound() {
throw_ub!(InvalidMeta("total size is bigger than largest supported object")); throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
} }
Ok(Some((size, align))) Ok(Some((size, align)))
@ -576,7 +592,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let elem = layout.field(self, 0)?; let elem = layout.field(self, 0)?;
// Make sure the slice is not too big. // Make sure the slice is not too big.
let size = elem.size.checked_mul(len, &*self.tcx).ok_or_else(|| { let size = elem.size.checked_mul(len, self).ok_or_else(|| {
err_ub!(InvalidMeta("slice is bigger than largest supported object")) err_ub!(InvalidMeta("slice is bigger than largest supported object"))
})?; })?;
Ok(Some((size, elem.align.abi))) Ok(Some((size, elem.align.abi)))
@ -627,7 +643,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let mut locals = IndexVec::from_elem(dummy, &body.local_decls); let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
// Now mark those locals as dead that we do not want to initialize // Now mark those locals as dead that we do not want to initialize
match self.tcx.def_kind(instance.def_id()) { match self.tcx_at().def_kind(instance.def_id()) {
// statics and constants don't have `Storage*` statements, no need to look for them // statics and constants don't have `Storage*` statements, no need to look for them
// //
// FIXME: The above is likely untrue. See // FIXME: The above is likely untrue. See
@ -842,7 +858,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} else { } else {
self.param_env self.param_env
}; };
let val = self.tcx.const_eval_global_id(param_env, gid, Some(self.tcx.span))?; let val = self.tcx.const_eval_global_id(param_env, gid, Some(self.cur_span()))?;
// Even though `ecx.const_eval` is called from `eval_const_to_op` we can never have a // Even though `ecx.const_eval` is called from `eval_const_to_op` we can never have a
// recursion deeper than one level, because the `tcx.const_eval` above is guaranteed to not // recursion deeper than one level, because the `tcx.const_eval` above is guaranteed to not
@ -873,7 +889,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// FIXME: We can hit delay_span_bug if this is an invalid const, interning finds // FIXME: We can hit delay_span_bug if this is an invalid const, interning finds
// that problem, but we never run validation to show an error. Can we ensure // that problem, but we never run validation to show an error. Can we ensure
// this does not happen? // this does not happen?
let val = self.tcx.const_eval_raw(param_env.and(gid))?; let val = self.tcx_at().const_eval_raw(param_env.and(gid))?;
self.raw_const_to_mplace(val) self.raw_const_to_mplace(val)
} }

View File

@ -93,7 +93,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>>(
// in the value the dangling reference lies. // in the value the dangling reference lies.
// The `delay_span_bug` ensures that we don't forget such a check in validation. // The `delay_span_bug` ensures that we don't forget such a check in validation.
if tcx.get_global_alloc(alloc_id).is_none() { if tcx.get_global_alloc(alloc_id).is_none() {
tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer"); tcx.sess.delay_span_bug(ecx.root_span, "tried to intern dangling pointer");
} }
// treat dangling pointers like other statics // treat dangling pointers like other statics
// just to stop trying to recurse into them // just to stop trying to recurse into them
@ -111,7 +111,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>>(
if let InternMode::Static(mutability) = mode { if let InternMode::Static(mutability) = mode {
// For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
// no interior mutability. // no interior mutability.
let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx.tcx, ecx.param_env, ecx.tcx.span)); let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env, ecx.root_span));
// For statics, allocation mutability is the combination of the place mutability and // For statics, allocation mutability is the combination of the place mutability and
// the type mutability. // the type mutability.
// The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere. // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
@ -174,7 +174,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir
// they caused. It also helps us to find cases where const-checking // they caused. It also helps us to find cases where const-checking
// failed to prevent an `UnsafeCell` (but as `ignore_interior_mut_in_const` // failed to prevent an `UnsafeCell` (but as `ignore_interior_mut_in_const`
// shows that part is not airtight). // shows that part is not airtight).
mutable_memory_in_const(self.ecx.tcx, "`UnsafeCell`"); mutable_memory_in_const(self.ecx.tcx_at(), "`UnsafeCell`");
} }
// We are crossing over an `UnsafeCell`, we can mutate again. This means that // We are crossing over an `UnsafeCell`, we can mutate again. This means that
// References we encounter inside here are interned as pointing to mutable // References we encounter inside here are interned as pointing to mutable
@ -192,7 +192,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir
fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> { fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> {
// Handle Reference types, as these are the only relocations supported by const eval. // Handle Reference types, as these are the only relocations supported by const eval.
// Raw pointers (and boxes) are handled by the `leftover_relocations` logic. // Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
let tcx = self.ecx.tcx; let tcx = self.ecx.tcx.at(self.ecx.root_span);
let ty = mplace.layout.ty; let ty = mplace.layout.ty;
if let ty::Ref(_, referenced_ty, ref_mutability) = ty.kind { if let ty::Ref(_, referenced_ty, ref_mutability) = ty.kind {
let value = self.ecx.read_immediate(mplace.into())?; let value = self.ecx.read_immediate(mplace.into())?;
@ -254,7 +254,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir
if ref_mutability == Mutability::Mut { if ref_mutability == Mutability::Mut {
match referenced_ty.kind { match referenced_ty.kind {
ty::Array(_, n) ty::Array(_, n)
if n.eval_usize(tcx.tcx, self.ecx.param_env) == 0 => {} if n.eval_usize(self.ecx.tcx, self.ecx.param_env) == 0 => {}
ty::Slice(_) ty::Slice(_)
if mplace.meta.unwrap_meta().to_machine_usize(self.ecx)? if mplace.meta.unwrap_meta().to_machine_usize(self.ecx)?
== 0 => {} == 0 => {}
@ -358,7 +358,7 @@ pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx>>(
Ok(()) => {} Ok(()) => {}
Err(error) => { Err(error) => {
ecx.tcx.sess.delay_span_bug( ecx.tcx.sess.delay_span_bug(
ecx.tcx.span, ecx.root_span,
"error during interning should later cause validation failure", "error during interning should later cause validation failure",
); );
// Some errors shouldn't come up because creating them causes // Some errors shouldn't come up because creating them causes
@ -407,7 +407,7 @@ pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx>>(
// such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`. // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
ecx.tcx ecx.tcx
.sess .sess
.span_err(ecx.tcx.span, "untyped pointers are not allowed in constant"); .span_err(ecx.root_span, "untyped pointers are not allowed in constant");
// For better errors later, mark the allocation as immutable. // For better errors later, mark the allocation as immutable.
alloc.mutability = Mutability::Not; alloc.mutability = Mutability::Not;
} }
@ -422,11 +422,11 @@ pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx>>(
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) { } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
// Codegen does not like dangling pointers, and generally `tcx` assumes that // Codegen does not like dangling pointers, and generally `tcx` assumes that
// all allocations referenced anywhere actually exist. So, make sure we error here. // all allocations referenced anywhere actually exist. So, make sure we error here.
ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant"); ecx.tcx.sess.span_err(ecx.root_span, "encountered dangling pointer in final constant");
} else if ecx.tcx.get_global_alloc(alloc_id).is_none() { } else if ecx.tcx.get_global_alloc(alloc_id).is_none() {
// We have hit an `AllocId` that is neither in local or global memory and isn't // We have hit an `AllocId` that is neither in local or global memory and isn't
// marked as dangling by local memory. That should be impossible. // marked as dangling by local memory. That should be impossible.
span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id); span_bug!(ecx.root_span, "encountered unknown alloc id {:?}", alloc_id);
} }
} }
} }

View File

@ -347,7 +347,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let index = u64::from(self.read_scalar(args[1])?.to_u32()?); let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
let elem = args[2]; let elem = args[2];
let input = args[0]; let input = args[0];
let (len, e_ty) = input.layout.ty.simd_size_and_type(self.tcx.tcx); let (len, e_ty) = input.layout.ty.simd_size_and_type(self.tcx);
assert!( assert!(
index < len, index < len,
"Index `{}` must be in bounds of vector type `{}`: `[0, {})`", "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
@ -374,7 +374,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
sym::simd_extract => { sym::simd_extract => {
let index = u64::from(self.read_scalar(args[1])?.to_u32()?); let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
let (len, e_ty) = args[0].layout.ty.simd_size_and_type(self.tcx.tcx); let (len, e_ty) = args[0].layout.ty.simd_size_and_type(self.tcx);
assert!( assert!(
index < len, index < len,
"index `{}` is out-of-bounds of vector type `{}` with length `{}`", "index `{}` is out-of-bounds of vector type `{}` with length `{}`",

View File

@ -25,7 +25,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
"find_closest_untracked_caller_location: checking frame {:?}", "find_closest_untracked_caller_location: checking frame {:?}",
frame.instance frame.instance
); );
!frame.instance.def.requires_caller_location(*self.tcx) !frame.instance.def.requires_caller_location(self.tcx)
}) })
// Assert that there is always such a frame. // Assert that there is always such a frame.
.unwrap(); .unwrap();
@ -58,7 +58,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let loc_ty = self let loc_ty = self
.tcx .tcx
.type_of(self.tcx.require_lang_item(PanicLocationLangItem, None)) .type_of(self.tcx.require_lang_item(PanicLocationLangItem, None))
.subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter())); .subst(self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
let loc_layout = self.layout_of(loc_ty).unwrap(); let loc_layout = self.layout_of(loc_ty).unwrap();
let location = self.allocate(loc_layout, MemoryKind::CallerLocation); let location = self.allocate(loc_layout, MemoryKind::CallerLocation);

View File

@ -14,7 +14,7 @@ use std::ptr;
use rustc_ast::ast::Mutability; use rustc_ast::ast::Mutability;
use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::ty::{self, TyCtxt, Instance, ParamEnv}; use rustc_middle::ty::{self, Instance, ParamEnv, TyCtxt};
use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout}; use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
use super::{ use super::{

View File

@ -471,9 +471,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("eval_place_to_op: got {:?}", *op); trace!("eval_place_to_op: got {:?}", *op);
// Sanity-check the type we ended up with. // Sanity-check the type we ended up with.
debug_assert!(mir_assign_valid_types( debug_assert!(mir_assign_valid_types(
*self.tcx, self.tcx,
self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
place.ty(&self.frame().body.local_decls, *self.tcx).ty place.ty(&self.frame().body.local_decls, self.tcx).ty
))?, ))?,
op.layout, op.layout,
)); ));
@ -554,7 +554,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// documentation). // documentation).
let val_val = M::adjust_global_const(self, val_val)?; let val_val = M::adjust_global_const(self, val_val)?;
// Other cases need layout. // Other cases need layout.
let layout = from_known_layout(self.tcx, layout, || self.layout_of(val.ty))?; let layout = from_known_layout(self.tcx_at(), layout, || self.layout_of(val.ty))?;
let op = match val_val { let op = match val_val {
ConstValue::ByRef { alloc, offset } => { ConstValue::ByRef { alloc, offset } => {
let id = self.tcx.create_memory_alloc(alloc); let id = self.tcx.create_memory_alloc(alloc);
@ -589,7 +589,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("read_discriminant_value {:#?}", op.layout); trace!("read_discriminant_value {:#?}", op.layout);
// Get type and layout of the discriminant. // Get type and layout of the discriminant.
let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?; let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(self.tcx))?;
trace!("discriminant type: {:?}", discr_layout.ty); trace!("discriminant type: {:?}", discr_layout.ty);
// We use "discriminant" to refer to the value associated with a particular enum variant. // We use "discriminant" to refer to the value associated with a particular enum variant.
@ -601,7 +601,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// rather confusing. // rather confusing.
let (tag_scalar_layout, tag_kind, tag_index) = match op.layout.variants { let (tag_scalar_layout, tag_kind, tag_index) = match op.layout.variants {
Variants::Single { index } => { Variants::Single { index } => {
let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) { let discr = match op.layout.ty.discriminant_for_variant(self.tcx, index) {
Some(discr) => { Some(discr) => {
// This type actually has discriminants. // This type actually has discriminants.
assert_eq!(discr.ty, discr_layout.ty); assert_eq!(discr.ty, discr_layout.ty);
@ -630,7 +630,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// may be a pointer. This is `tag_val.layout`; we just use it for sanity checks. // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
// Get layout for tag. // Get layout for tag.
let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?; let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(self.tcx))?;
// Read tag and sanity-check `tag_layout`. // Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(self.operand_field(op, tag_index)?)?; let tag_val = self.read_immediate(self.operand_field(op, tag_index)?)?;
@ -651,12 +651,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Convert discriminant to variant index, and catch invalid discriminants. // Convert discriminant to variant index, and catch invalid discriminants.
let index = match op.layout.ty.kind { let index = match op.layout.ty.kind {
ty::Adt(adt, _) => { ty::Adt(adt, _) => {
adt.discriminants(self.tcx.tcx).find(|(_, var)| var.val == discr_bits) adt.discriminants(self.tcx).find(|(_, var)| var.val == discr_bits)
} }
ty::Generator(def_id, substs, _) => { ty::Generator(def_id, substs, _) => {
let substs = substs.as_generator(); let substs = substs.as_generator();
substs substs
.discriminants(def_id, self.tcx.tcx) .discriminants(def_id, self.tcx)
.find(|(_, var)| var.val == discr_bits) .find(|(_, var)| var.val == discr_bits)
} }
_ => bug!("tagged layout for non-adt non-generator"), _ => bug!("tagged layout for non-adt non-generator"),

View File

@ -404,7 +404,7 @@ where
// to get some code to work that probably ought to work. // to get some code to work that probably ought to work.
field_layout.align.abi field_layout.align.abi
} }
None => bug!("Cannot compute offset for extern type field at non-0 offset"), None => span_bug!(self.cur_span(), "cannot compute offset for extern type field at non-0 offset"),
}; };
(base.meta, offset.align_to(align)) (base.meta, offset.align_to(align))
} else { } else {
@ -440,7 +440,7 @@ where
assert!(!field_layout.is_unsized()); assert!(!field_layout.is_unsized());
base.offset(offset, MemPlaceMeta::None, field_layout, self) base.offset(offset, MemPlaceMeta::None, field_layout, self)
} }
_ => bug!("`mplace_index` called on non-array type {:?}", base.layout.ty), _ => span_bug!(self.cur_span(), "`mplace_index` called on non-array type {:?}", base.layout.ty),
} }
} }
@ -454,7 +454,7 @@ where
let len = base.len(self)?; // also asserts that we have a type where this makes sense let len = base.len(self)?; // also asserts that we have a type where this makes sense
let stride = match base.layout.fields { let stride = match base.layout.fields {
FieldsShape::Array { stride, .. } => stride, FieldsShape::Array { stride, .. } => stride,
_ => bug!("mplace_array_fields: expected an array layout"), _ => span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout"),
}; };
let layout = base.layout.field(self, 0)?; let layout = base.layout.field(self, 0)?;
let dl = &self.tcx.data_layout; let dl = &self.tcx.data_layout;
@ -484,7 +484,7 @@ where
// (that have count 0 in their layout). // (that have count 0 in their layout).
let from_offset = match base.layout.fields { let from_offset = match base.layout.fields {
FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
_ => bug!("Unexpected layout of index access: {:#?}", base.layout), _ => span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout),
}; };
// Compute meta and new layout // Compute meta and new layout
@ -497,7 +497,7 @@ where
let len = Scalar::from_machine_usize(inner_len, self); let len = Scalar::from_machine_usize(inner_len, self);
(MemPlaceMeta::Meta(len), base.layout.ty) (MemPlaceMeta::Meta(len), base.layout.ty)
} }
_ => bug!("cannot subslice non-array type: `{:?}`", base.layout.ty), _ => span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty),
}; };
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;
base.offset(from_offset, meta, layout, self) base.offset(from_offset, meta, layout, self)
@ -640,9 +640,9 @@ where
self.dump_place(place_ty.place); self.dump_place(place_ty.place);
// Sanity-check the type we ended up with. // Sanity-check the type we ended up with.
debug_assert!(mir_assign_valid_types( debug_assert!(mir_assign_valid_types(
*self.tcx, self.tcx,
self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
place.ty(&self.frame().body.local_decls, *self.tcx).ty place.ty(&self.frame().body.local_decls, self.tcx).ty
))?, ))?,
place_ty.layout, place_ty.layout,
)); ));
@ -768,7 +768,7 @@ where
None => return Ok(()), // zero-sized access None => return Ok(()), // zero-sized access
}; };
let tcx = &*self.tcx; let tcx = self.tcx;
// FIXME: We should check that there are dest.layout.size many bytes available in // FIXME: We should check that there are dest.layout.size many bytes available in
// memory. The code below is not sufficient, with enough padding it might not // memory. The code below is not sufficient, with enough padding it might not
// cover all the bytes! // cover all the bytes!
@ -777,11 +777,11 @@ where
match dest.layout.abi { match dest.layout.abi {
Abi::Scalar(_) => {} // fine Abi::Scalar(_) => {} // fine
_ => { _ => {
bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}", dest.layout) span_bug!(self.cur_span(), "write_immediate_to_mplace: invalid Scalar layout: {:#?}", dest.layout)
} }
} }
self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar( self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(
tcx, &tcx,
ptr, ptr,
scalar, scalar,
dest.layout.size, dest.layout.size,
@ -793,7 +793,8 @@ where
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy. // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a, b) = match dest.layout.abi { let (a, b) = match dest.layout.abi {
Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
_ => bug!( _ => span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout dest.layout
), ),
@ -806,8 +807,8 @@ where
// but that does not work: We could be a newtype around a pair, then the // but that does not work: We could be a newtype around a pair, then the
// fields do not match the `ScalarPair` components. // fields do not match the `ScalarPair` components.
self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(tcx, ptr, a_val, a_size)?; self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(&tcx, ptr, a_val, a_size)?;
self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(tcx, b_ptr, b_val, b_size) self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(&tcx, b_ptr, b_val, b_size)
} }
} }
} }
@ -841,9 +842,9 @@ where
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can // We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast. // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
if !mir_assign_valid_types(self.tcx.tcx, src.layout, dest.layout) { if !mir_assign_valid_types(self.tcx, src.layout, dest.layout) {
span_bug!( span_bug!(
self.tcx.span, self.cur_span(),
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}", "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
src.layout.ty, src.layout.ty,
dest.layout.ty, dest.layout.ty,
@ -898,7 +899,7 @@ where
src: OpTy<'tcx, M::PointerTag>, src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
if mir_assign_valid_types(self.tcx.tcx, src.layout, dest.layout) { if mir_assign_valid_types(self.tcx, src.layout, dest.layout) {
// Fast path: Just use normal `copy_op` // Fast path: Just use normal `copy_op`
return self.copy_op(src, dest); return self.copy_op(src, dest);
} }
@ -910,7 +911,7 @@ where
// on `typeck_tables().has_errors` at all const eval entry points. // on `typeck_tables().has_errors` at all const eval entry points.
debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest); debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
self.tcx.sess.delay_span_bug( self.tcx.sess.delay_span_bug(
self.tcx.span, self.cur_span(),
"size-changing transmute, should have been caught by transmute checking", "size-changing transmute, should have been caught by transmute checking",
); );
throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty)); throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty));
@ -1056,7 +1057,7 @@ where
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid. // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
let discr_val = let discr_val =
dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val; dest.layout.ty.discriminant_for_variant(self.tcx, variant_index).unwrap().val;
// raw discriminants for enums are isize or bigger during // raw discriminants for enums are isize or bigger during
// their computation, but the in-memory tag is the smallest possible // their computation, but the in-memory tag is the smallest possible
@ -1085,7 +1086,7 @@ where
.expect("overflow computing relative variant idx"); .expect("overflow computing relative variant idx");
// We need to use machine arithmetic when taking into account `niche_start`: // We need to use machine arithmetic when taking into account `niche_start`:
// discr_val = variant_index_relative + niche_start_val // discr_val = variant_index_relative + niche_start_val
let discr_layout = self.layout_of(discr_layout.value.to_int_ty(*self.tcx))?; let discr_layout = self.layout_of(discr_layout.value.to_int_ty(self.tcx))?;
let niche_start_val = ImmTy::from_uint(niche_start, discr_layout); let niche_start_val = ImmTy::from_uint(niche_start, discr_layout);
let variant_index_relative_val = let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, discr_layout); ImmTy::from_uint(variant_index_relative, discr_layout);

View File

@ -76,7 +76,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", stmt); info!("{:?}", stmt);
self.set_span(stmt.source_info.span);
use rustc_middle::mir::StatementKind::*; use rustc_middle::mir::StatementKind::*;
@ -279,7 +278,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", terminator.kind); info!("{:?}", terminator.kind);
self.set_span(terminator.source_info.span);
self.eval_terminator(terminator)?; self.eval_terminator(terminator)?;
if !self.stack().is_empty() { if !self.stack().is_empty() {

View File

@ -69,7 +69,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(fn_val, caller_abi) (fn_val, caller_abi)
} }
ty::FnDef(def_id, substs) => { ty::FnDef(def_id, substs) => {
let sig = func.layout.ty.fn_sig(*self.tcx); let sig = func.layout.ty.fn_sig(self.tcx);
(FnVal::Instance(self.resolve(def_id, substs)?), sig.abi()) (FnVal::Instance(self.resolve(def_id, substs)?), sig.abi())
} }
_ => span_bug!( _ => span_bug!(
@ -96,7 +96,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let ty = place.layout.ty; let ty = place.layout.ty;
trace!("TerminatorKind::drop: {:?}, type {}", location, ty); trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
let instance = Instance::resolve_drop_in_place(*self.tcx, ty); let instance = Instance::resolve_drop_in_place(self.tcx, ty);
self.drop_in_place(place, instance, target, unwind)?; self.drop_in_place(place, instance, target, unwind)?;
} }
@ -227,9 +227,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// ABI check // ABI check
{ {
let callee_abi = { let callee_abi = {
let instance_ty = instance.ty_env(*self.tcx, self.param_env); let instance_ty = instance.ty_env(self.tcx, self.param_env);
match instance_ty.kind { match instance_ty.kind {
ty::FnDef(..) => instance_ty.fn_sig(*self.tcx).abi(), ty::FnDef(..) => instance_ty.fn_sig(self.tcx).abi(),
ty::Closure(..) => Abi::RustCall, ty::Closure(..) => Abi::RustCall,
ty::Generator(..) => Abi::Rust, ty::Generator(..) => Abi::Rust,
_ => bug!("unexpected callee ty: {:?}", instance_ty), _ => bug!("unexpected callee ty: {:?}", instance_ty),

View File

@ -2,7 +2,7 @@ use std::convert::TryFrom;
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar}; use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
use rustc_middle::ty::{self, Instance, Ty, TypeFoldable}; use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size}; use rustc_target::abi::{Align, LayoutOf, Size};
use super::{FnVal, InterpCx, Machine, MemoryKind}; use super::{FnVal, InterpCx, Machine, MemoryKind};
@ -36,10 +36,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
let methods = if let Some(poly_trait_ref) = poly_trait_ref { let methods = if let Some(poly_trait_ref) = poly_trait_ref {
let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty); let trait_ref = poly_trait_ref.with_self_ty(self.tcx, ty);
let trait_ref = self.tcx.erase_regions(&trait_ref); let trait_ref = self.tcx.erase_regions(&trait_ref);
self.tcx.vtable_methods(trait_ref) self.tcx_at().vtable_methods(trait_ref)
} else { } else {
&[] &[]
}; };
@ -49,8 +49,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = layout.size.bytes(); let size = layout.size.bytes();
let align = layout.align.abi.bytes(); let align = layout.align.abi.bytes();
let tcx = self.tcx;
let ptr_size = self.pointer_size(); let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align.abi; let ptr_align = tcx.data_layout.pointer_align.abi;
// ///////////////////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////////////////
// If you touch this code, be sure to also make the corresponding changes to // If you touch this code, be sure to also make the corresponding changes to
// `get_vtable` in `rust_codegen_llvm/meth.rs`. // `get_vtable` in `rust_codegen_llvm/meth.rs`.
@ -60,33 +61,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ptr_align, ptr_align,
MemoryKind::Vtable, MemoryKind::Vtable,
); );
let tcx = &*self.tcx;
let drop = Instance::resolve_drop_in_place(*tcx, ty); let drop = Instance::resolve_drop_in_place(tcx, ty);
let drop = self.memory.create_fn_alloc(FnVal::Instance(drop)); let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
// No need to do any alignment checks on the memory accesses below, because we know the // No need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by // allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`. // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?; let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?;
vtable_alloc.write_ptr_sized(tcx, vtable, drop.into())?; vtable_alloc.write_ptr_sized(&tcx, vtable, drop.into())?;
let size_ptr = vtable.offset(ptr_size, tcx)?; let size_ptr = vtable.offset(ptr_size, &tcx)?;
vtable_alloc.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?; vtable_alloc.write_ptr_sized(&tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, tcx)?; let align_ptr = vtable.offset(ptr_size * 2, &tcx)?;
vtable_alloc.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?; vtable_alloc.write_ptr_sized(&tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
for (i, method) in methods.iter().enumerate() { for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method { if let Some((def_id, substs)) = *method {
// resolve for vtable: insert shims where needed // resolve for vtable: insert shims where needed
let instance = let instance =
ty::Instance::resolve_for_vtable(*tcx, self.param_env, def_id, substs) ty::Instance::resolve_for_vtable(tcx, self.param_env, def_id, substs)
.ok_or_else(|| err_inval!(TooGeneric))?; .ok_or_else(|| err_inval!(TooGeneric))?;
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance)); let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
// We cannot use `vtable_allic` as we are creating fn ptrs in this loop. // We cannot use `vtable_allic` as we are creating fn ptrs in this loop.
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), tcx)?; let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &tcx)?;
self.memory.get_raw_mut(vtable.alloc_id)?.write_ptr_sized( self.memory.get_raw_mut(vtable.alloc_id)?.write_ptr_sized(
tcx, &tcx,
method_ptr, method_ptr,
fn_ptr.into(), fn_ptr.into(),
)?; )?;
@ -142,7 +142,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// to determine the type. // to determine the type.
let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?; let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
trace!("Found drop fn: {:?}", drop_instance); trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty_env(*self.tcx, self.param_env).fn_sig(*self.tcx); let fn_sig = drop_instance.ty_env(self.tcx, self.param_env).fn_sig(self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
// The drop function takes `*mut T` where `T` is the type being dropped, so get that. // The drop function takes `*mut T` where `T` is the type being dropped, so get that.
let args = fn_sig.inputs(); let args = fn_sig.inputs();
@ -171,7 +171,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.not_undef()?; alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.not_undef()?;
let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap(); let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
if size >= self.tcx.data_layout().obj_size_bound() { if size >= self.tcx.data_layout.obj_size_bound() {
throw_ub_format!( throw_ub_format!(
"invalid vtable: \ "invalid vtable: \
size is bigger than largest supported object" size is bigger than largest supported object"

View File

@ -313,7 +313,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let param_env = tcx.param_env(def_id).with_reveal_all(); let param_env = tcx.param_env(def_id).with_reveal_all();
let span = tcx.def_span(def_id); let span = tcx.def_span(def_id);
let mut ecx = InterpCx::new(tcx.at(span), param_env, ConstPropMachine::new(), ()); let mut ecx = InterpCx::new(tcx, span, param_env, ConstPropMachine::new(), ());
let can_const_prop = CanConstProp::check(body); let can_const_prop = CanConstProp::check(body);
let ret = ecx let ret = ecx
@ -404,8 +404,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
match self.ecx.eval_const_to_op(c.literal, None) { match self.ecx.eval_const_to_op(c.literal, None) {
Ok(op) => Some(op), Ok(op) => Some(op),
Err(error) => { Err(error) => {
// Make sure errors point at the constant. let tcx = self.ecx.tcx.at(c.span);
self.ecx.set_span(c.span);
let err = error_to_const_error(&self.ecx, error); let err = error_to_const_error(&self.ecx, error);
if let Some(lint_root) = self.lint_root(source_info) { if let Some(lint_root) = self.lint_root(source_info) {
let lint_only = match c.literal.val { let lint_only = match c.literal.val {
@ -419,16 +418,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// Out of backwards compatibility we cannot report hard errors in unused // Out of backwards compatibility we cannot report hard errors in unused
// generic functions using associated constants of the generic parameters. // generic functions using associated constants of the generic parameters.
err.report_as_lint( err.report_as_lint(
self.ecx.tcx, tcx,
"erroneous constant used", "erroneous constant used",
lint_root, lint_root,
Some(c.span), Some(c.span),
); );
} else { } else {
err.report_as_error(self.ecx.tcx, "erroneous constant used"); err.report_as_error(tcx, "erroneous constant used");
} }
} else { } else {
err.report_as_error(self.ecx.tcx, "erroneous constant used"); err.report_as_error(tcx, "erroneous constant used");
} }
None None
} }
@ -851,7 +850,6 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> {
fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) { fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
trace!("visit_statement: {:?}", statement); trace!("visit_statement: {:?}", statement);
let source_info = statement.source_info; let source_info = statement.source_info;
self.ecx.set_span(source_info.span);
self.source_info = Some(source_info); self.source_info = Some(source_info);
if let StatementKind::Assign(box (place, ref mut rval)) = statement.kind { if let StatementKind::Assign(box (place, ref mut rval)) = statement.kind {
let place_ty: Ty<'tcx> = place.ty(&self.local_decls, self.tcx).ty; let place_ty: Ty<'tcx> = place.ty(&self.local_decls, self.tcx).ty;
@ -864,7 +862,7 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> {
if let Some(value) = self.get_const(place) { if let Some(value) = self.get_const(place) {
if self.should_const_prop(value) { if self.should_const_prop(value) {
trace!("replacing {:?} with {:?}", rval, value); trace!("replacing {:?} with {:?}", rval, value);
self.replace_with_const(rval, value, statement.source_info); self.replace_with_const(rval, value, source_info);
if can_const_prop == ConstPropMode::FullConstProp if can_const_prop == ConstPropMode::FullConstProp
|| can_const_prop == ConstPropMode::OnlyInsideOwnBlock || can_const_prop == ConstPropMode::OnlyInsideOwnBlock
{ {
@ -927,7 +925,6 @@ impl<'mir, 'tcx> MutVisitor<'tcx> for ConstPropagator<'mir, 'tcx> {
fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) { fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
let source_info = terminator.source_info; let source_info = terminator.source_info;
self.ecx.set_span(source_info.span);
self.source_info = Some(source_info); self.source_info = Some(source_info);
self.super_terminator(terminator, location); self.super_terminator(terminator, location);
match &mut terminator.kind { match &mut terminator.kind {