Auto merge of #45380 - dotdash:arg_copies, r=arielb1

Avoid unnecessary copies of arguments that are simple bindings

Initially MIR differentiated between arguments and locals, which
introduced a need to add extra copies assigning the argument to a
local, even for simple bindings. This differentiation no longer exists,
but we're still creating those copies, bloating the MIR and LLVM IR we
emit.

Additionally, the current approach means that we create debug info for
both the incoming argument (marking it as an argument), and then
immediately shadow it a local that goes by the same name. This can be
confusing when using e.g. "info args" in gdb, or when e.g. a debugger
with a GUI displays the function arguments separately from the local
variables, especially when the binding is mutable, because the argument
doesn't change, while the local variable does.
This commit is contained in:
bors 2017-10-26 14:14:15 +00:00
commit b0b80f8c22
21 changed files with 145 additions and 122 deletions

View File

@ -650,7 +650,9 @@ pub enum TerminatorKind<'tcx> {
Call {
/// The function thats being called
func: Operand<'tcx>,
/// Arguments the function is called with
/// Arguments the function is called with. These are owned by the callee, which is free to
/// modify them. This is important as "by-value" arguments might be passed by-reference at
/// the ABI level.
args: Vec<Operand<'tcx>>,
/// Destination for the return value. If some, the call is converging.
destination: Option<(Lvalue<'tcx>, BasicBlock)>,

View File

@ -247,7 +247,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
} else {
let args: Vec<_> =
args.into_iter()
.map(|arg| unpack!(block = this.as_local_operand(block, arg)))
.map(|arg| {
let scope = this.local_scope();
// Function arguments are owned by the callee, so we need as_temp()
// instead of as_operand() to enforce copies
let operand = unpack!(block = this.as_temp(block, scope, arg));
Operand::Consume(Lvalue::Local(operand))
})
.collect();
let success = this.cfg.start_new_block();

View File

@ -21,6 +21,7 @@ use rustc::mir::visit::{MutVisitor, Lookup};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::subst::Substs;
use rustc::util::nodemap::NodeMap;
use rustc_const_eval::pattern::{BindingMode, PatternKind};
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use shim;
use std::mem;
@ -571,13 +572,24 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// Bind the argument patterns
for (index, &(ty, pattern)) in arguments.iter().enumerate() {
// Function arguments always get the first Local indices after the return pointer
let lvalue = Lvalue::Local(Local::new(index + 1));
let local = Local::new(index + 1);
let lvalue = Lvalue::Local(local);
if let Some(pattern) = pattern {
let pattern = self.hir.pattern_from_hir(pattern);
scope = self.declare_bindings(scope, ast_body.span,
LintLevel::Inherited, &pattern);
unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue));
match *pattern.kind {
// Don't introduce extra copies for simple bindings
PatternKind::Binding { mutability, var, mode: BindingMode::ByValue, .. } => {
self.local_decls[local].mutability = mutability;
self.var_indices.insert(var, local);
}
_ => {
scope = self.declare_bindings(scope, ast_body.span,
LintLevel::Inherited, &pattern);
unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue));
}
}
}
// Make sure we drop (parts of) the argument even when not matched on.

View File

@ -112,6 +112,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
pub fn set_value_name(&self, value: ValueRef, name: &str) {
let cname = CString::new(name.as_bytes()).unwrap();
unsafe {
llvm::LLVMSetValueName(value, cname.as_ptr());
}
}
pub fn position_before(&self, insn: ValueRef) {
unsafe {
llvm::LLVMPositionBuilderBefore(self.llbuilder, insn);

View File

@ -64,11 +64,18 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> {
impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> {
LocalAnalyzer {
let mut analyzer = LocalAnalyzer {
cx: mircx,
lvalue_locals: BitVector::new(mircx.mir.local_decls.len()),
seen_assigned: BitVector::new(mircx.mir.local_decls.len())
};
// Arguments get assigned to by means of the function being called
for idx in 0..mircx.mir.arg_count {
analyzer.seen_assigned.insert(idx + 1);
}
analyzer
}
fn mark_as_lvalue(&mut self, local: mir::Local) {

View File

@ -386,6 +386,12 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let arg_decl = &mir.local_decls[local];
let arg_ty = mircx.monomorphize(&arg_decl.ty);
let name = if let Some(name) = arg_decl.name {
name.as_str().to_string()
} else {
format!("arg{}", arg_index)
};
if Some(local) == mir.spread_arg {
// This argument (e.g. the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have
@ -397,7 +403,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
_ => bug!("spread argument isn't a tuple?!")
};
let lvalue = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
let lvalue = LvalueRef::alloca(bcx, arg_ty, &name);
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let (dst, _) = lvalue.trans_field_ptr(bcx, i);
let arg = &mircx.fn_ty.args[idx];
@ -444,6 +450,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llarg_idx += 1;
}
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1;
llarg
} else if !lvalue_locals.contains(local.index()) &&
@ -481,10 +488,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee);
let llarg = bcx.pointercast(llarg, data_llty.ptr_to());
bcx.set_value_name(llarg, &(name.clone() + ".ptr"));
let llmeta = bcx.pointercast(llmeta, meta_llty);
bcx.set_value_name(llmeta, &(name + ".meta"));
OperandValue::Pair(llarg, llmeta)
} else {
bcx.set_value_name(llarg, &name);
OperandValue::Immediate(llarg)
};
let operand = OperandRef {
@ -493,7 +503,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
};
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else {
let lltemp = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
let lltemp = LvalueRef::alloca(bcx, arg_ty, &name);
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,

View File

@ -13,7 +13,7 @@
#![crate_type = "lib"]
// Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]])
// CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
#[no_mangle]
fn helper(_: usize) {
}
@ -23,9 +23,9 @@ fn helper(_: usize) {
pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
// We used to generate an extra alloca and memcpy for the block's trailing expression value, so
// check that we copy directly to the return value slot
// CHECK: %2 = insertvalue { i8*, [[USIZE]] } undef, i8* %0, 0
// CHECK: %3 = insertvalue { i8*, [[USIZE]] } %2, [[USIZE]] %1, 1
// CHECK: ret { i8*, [[USIZE]] } %3
// CHECK: %0 = insertvalue { i8*, [[USIZE]] } undef, i8* %x.ptr, 0
// CHECK: %1 = insertvalue { i8*, [[USIZE]] } %0, [[USIZE]] %x.meta, 1
// CHECK: ret { i8*, [[USIZE]] } %1
{ x }
}

View File

@ -42,7 +42,6 @@ pub fn align64(i : i32) -> Align64 {
#[no_mangle]
pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
// CHECK: %n64 = alloca %Nested64, align 64
// CHECK: %a = alloca %Align64, align 64
let n64 = Nested64 { a, b, c, d };
n64
}
@ -51,7 +50,6 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
#[no_mangle]
pub fn enum64(a: Align64) -> Enum64 {
// CHECK: %e64 = alloca %Enum64, align 64
// CHECK: %a = alloca %Align64, align 64
let e64 = Enum64::A(a);
e64
}

View File

@ -60,27 +60,27 @@
#![crate_type = "lib"]
mod tests {
// CHECK: @f1(i32 inreg, i32 inreg, i32)
// CHECK: @f1(i32 inreg %arg0, i32 inreg %arg1, i32 %arg2)
#[no_mangle]
extern "fastcall" fn f1(_: i32, _: i32, _: i32) {}
// CHECK: @f2(i32* inreg, i32* inreg, i32*)
// CHECK: @f2(i32* inreg %arg0, i32* inreg %arg1, i32* %arg2)
#[no_mangle]
extern "fastcall" fn f2(_: *const i32, _: *const i32, _: *const i32) {}
// CHECK: @f3(float, i32 inreg, i32 inreg, i32)
// CHECK: @f3(float %arg0, i32 inreg %arg1, i32 inreg %arg2, i32 %arg3)
#[no_mangle]
extern "fastcall" fn f3(_: f32, _: i32, _: i32, _: i32) {}
// CHECK: @f4(i32 inreg, float, i32 inreg, i32)
// CHECK: @f4(i32 inreg %arg0, float %arg1, i32 inreg %arg2, i32 %arg3)
#[no_mangle]
extern "fastcall" fn f4(_: i32, _: f32, _: i32, _: i32) {}
// CHECK: @f5(i64, i32)
// CHECK: @f5(i64 %arg0, i32 %arg1)
#[no_mangle]
extern "fastcall" fn f5(_: i64, _: i32) {}
// CHECK: @f6(i1 inreg zeroext, i32 inreg, i32)
// CHECK: @f6(i1 inreg zeroext %arg0, i32 inreg %arg1, i32 %arg2)
#[no_mangle]
extern "fastcall" fn f6(_: bool, _: i32, _: i32) {}
}

View File

@ -21,62 +21,62 @@ pub struct UnsafeInner {
_field: std::cell::UnsafeCell<i16>,
}
// CHECK: zeroext i1 @boolean(i1 zeroext)
// CHECK: zeroext i1 @boolean(i1 zeroext %x)
#[no_mangle]
pub fn boolean(x: bool) -> bool {
x
}
// CHECK: @readonly_borrow(i32* noalias readonly dereferenceable(4))
// CHECK: @readonly_borrow(i32* noalias readonly dereferenceable(4) %arg0)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
pub fn readonly_borrow(_: &i32) {
}
// CHECK: @static_borrow(i32* noalias readonly dereferenceable(4))
// CHECK: @static_borrow(i32* noalias readonly dereferenceable(4) %arg0)
// static borrow may be captured
#[no_mangle]
pub fn static_borrow(_: &'static i32) {
}
// CHECK: @named_borrow(i32* noalias readonly dereferenceable(4))
// CHECK: @named_borrow(i32* noalias readonly dereferenceable(4) %arg0)
// borrow with named lifetime may be captured
#[no_mangle]
pub fn named_borrow<'r>(_: &'r i32) {
}
// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2))
// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
// unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle]
pub fn unsafe_borrow(_: &UnsafeInner) {
}
// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2))
// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
// ... unless this is a mutable borrow, those never alias
// ... except that there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle]
pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) {
}
// CHECK: @mutable_borrow(i32* dereferenceable(4))
// CHECK: @mutable_borrow(i32* dereferenceable(4) %arg0)
// FIXME #25759 This should also have `nocapture`
// ... there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle]
pub fn mutable_borrow(_: &mut i32) {
}
// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32))
// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0)
#[no_mangle]
pub fn indirect_struct(_: S) {
}
// CHECK: @borrowed_struct(%S* noalias readonly dereferenceable(32))
// CHECK: @borrowed_struct(%S* noalias readonly dereferenceable(32) %arg0)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
pub fn borrowed_struct(_: &S) {
}
// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4))
// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x)
#[no_mangle]
pub fn _box(x: Box<i32>) -> Box<i32> {
x
@ -91,31 +91,31 @@ pub fn struct_return() -> S {
}
// Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]])
// CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
#[no_mangle]
fn helper(_: usize) {
}
// CHECK: @slice(i8* noalias nonnull readonly, [[USIZE]])
// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
fn slice(_: &[u8]) {
}
// CHECK: @mutable_slice(i8* nonnull, [[USIZE]])
// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
// FIXME #25759 This should also have `nocapture`
// ... there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle]
fn mutable_slice(_: &mut [u8]) {
}
// CHECK: @unsafe_slice(%UnsafeInner* nonnull, [[USIZE]])
// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
// unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle]
pub fn unsafe_slice(_: &[UnsafeInner]) {
}
// CHECK: @str(i8* noalias nonnull readonly, [[USIZE]])
// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
fn str(_: &[u8]) {
@ -132,7 +132,7 @@ fn trait_borrow(_: &Drop) {
fn trait_box(_: Box<Drop>) {
}
// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly, [[USIZE]])
// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta)
#[no_mangle]
fn return_slice(x: &[u16]) -> &[u16] {
x

View File

@ -24,6 +24,6 @@ pub struct Big {
// CHECK-LABEL: @test_mvi
#[no_mangle]
pub unsafe fn test_mvi(target: *mut Big, make_big: fn() -> Big) {
// CHECK: call void %1(%Big*{{[^%]*}} %0)
// CHECK: call void %make_big(%Big*{{[^%]*}} %target)
move_val_init(target, make_big());
}

View File

@ -13,7 +13,7 @@
#![crate_type = "lib"]
// Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]])
// CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
#[no_mangle]
fn helper(_: usize) {
}
@ -24,9 +24,9 @@ pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0
// CHECK: store i8* %0, i8** [[X0]]
// CHECK: store i8* %s.ptr, i8** [[X0]]
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1
// CHECK: store [[USIZE]] %1, [[USIZE]]* [[X1]]
// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]]
let x = &*s;
&x; // keep variable in an alloca

View File

@ -25,9 +25,9 @@ pub struct Bytes {
#[no_mangle]
pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca [4 x i8]
// CHECK: store i32 %1, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8*
// CHECK: %y = alloca [4 x i8]
// CHECK: store i32 %0, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %y to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
*x = y;
@ -39,9 +39,9 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
#[no_mangle]
pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca %Bytes
// CHECK: store i32 %1, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8*
// CHECK: %y = alloca %Bytes
// CHECK: store i32 %0, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %y to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
*x = y;

View File

@ -19,13 +19,12 @@ fn main() { }
// START rustc.node4.CopyPropagation.before.mir
// bb0: {
// ...
// _2 = _1;
// _3 = _1;
// ...
// _2 = _3;
// ...
// _4 = _2;
// _3 = _4;
// ...
// _5 = _3;
// _0 = _5;
// _0 = _4;
// ...
// return;
// }

View File

@ -26,8 +26,7 @@ fn main() {}
// ...
// _2 = _1;
// ...
// _3 = _2;
// _0 = Baz { x: _3, y: const 0f32, z: const false };
// _0 = Baz { x: _2, y: const 0f32, z: const false };
// ...
// return;
// }
@ -37,8 +36,7 @@ fn main() {}
// ...
// _2 = _1;
// ...
// _3 = _2;
// (_0.0: usize) = _3;
// (_0.0: usize) = _2;
// (_0.1: f32) = const 0f32;
// (_0.2: bool) = const false;
// ...

View File

@ -30,10 +30,7 @@ fn main() {
// bb0: {
// StorageLive(_2);
// _2 = _1;
// StorageLive(_3);
// _3 = _2;
// _0 = Baz::Foo { x: _3 };
// StorageDead(_3);
// _0 = Baz::Foo { x: _2 };
// StorageDead(_2);
// return;
// }
@ -42,11 +39,8 @@ fn main() {
// bb0: {
// StorageLive(_2);
// _2 = _1;
// StorageLive(_3);
// _3 = _2;
// ((_0 as Foo).0: usize) = _3;
// ((_0 as Foo).0: usize) = _2;
// discriminant(_0) = 1;
// StorageDead(_3);
// StorageDead(_2);
// return;
// }

View File

@ -28,35 +28,35 @@ fn main() {}
// END RUST SOURCE
// START rustc.node12.Deaggregator.before.mir
// bb1: {
// StorageLive(_6);
// _6 = _4;
// _0 = Foo::A(_6,);
// StorageDead(_6);
// StorageLive(_4);
// _4 = _2;
// _0 = Foo::A(_4,);
// StorageDead(_4);
// goto -> bb3;
// }
// bb2: {
// StorageLive(_7);
// _7 = _4;
// _0 = Foo::B(_7,);
// StorageDead(_7);
// StorageLive(_5);
// _5 = _2;
// _0 = Foo::B(_5,);
// StorageDead(_5);
// goto -> bb3;
// }
// END rustc.node12.Deaggregator.before.mir
// START rustc.node12.Deaggregator.after.mir
// bb1: {
// StorageLive(_6);
// _6 = _4;
// ((_0 as A).0: i32) = _6;
// StorageLive(_4);
// _4 = _2;
// ((_0 as A).0: i32) = _4;
// discriminant(_0) = 0;
// StorageDead(_6);
// StorageDead(_4);
// goto -> bb3;
// }
// bb2: {
// StorageLive(_7);
// _7 = _4;
// ((_0 as B).0: i32) = _7;
// StorageLive(_5);
// _5 = _2;
// ((_0 as B).0: i32) = _5;
// discriminant(_0) = 1;
// StorageDead(_7);
// StorageDead(_5);
// goto -> bb3;
// }
// END rustc.node12.Deaggregator.after.mir

View File

@ -25,15 +25,14 @@ fn main() { }
// START rustc.node10.Deaggregator.before.mir
// bb0: {
// ...
// _2 = _1;
// _3 = _1;
// ...
// _4 = _2;
// _3 = Foo::A(_4,);
// _2 = Foo::A(_3,);
// ...
// _6 = _2;
// _5 = Foo::A(_6,);
// _5 = _1;
// _4 = Foo::A(_5,);
// ...
// _0 = [_3, _5];
// _0 = [_2, _4];
// ...
// return;
// }
@ -41,17 +40,16 @@ fn main() { }
// START rustc.node10.Deaggregator.after.mir
// bb0: {
// ...
// _2 = _1;
// _3 = _1;
// ...
// _4 = _2;
// ((_3 as A).0: i32) = _4;
// discriminant(_3) = 0;
// ((_2 as A).0: i32) = _3;
// discriminant(_2) = 0;
// ...
// _6 = _2;
// ((_5 as A).0: i32) = _6;
// discriminant(_5) = 0;
// _5 = _1;
// ((_4 as A).0: i32) = _5;
// discriminant(_4) = 0;
// ...
// _0 = [_3, _5];
// _0 = [_2, _4];
// ...
// return;
// }

View File

@ -64,17 +64,14 @@ fn main() {
// bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:11) => validate_1[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(50)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:11) => validate_1[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]);
// StorageLive(_3);
// _3 = _2;
// Validate(Suspend(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 }))), [(*_2): i32]);
// _3 = &ReErased (*_2);
// Validate(Acquire, [(*_3): i32/ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })) (imm)]);
// StorageLive(_4);
// Validate(Suspend(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 }))), [(*_3): i32]);
// _4 = &ReErased (*_3);
// Validate(Acquire, [(*_4): i32/ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })) (imm)]);
// StorageLive(_5);
// _5 = (*_4);
// _0 = _5;
// StorageDead(_5);
// EndRegion(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })));
// _4 = (*_3);
// _0 = _4;
// StorageDead(_4);
// EndRegion(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })));
// StorageDead(_3);
// return;
// }

View File

@ -53,10 +53,7 @@ fn main() {
// bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_4[317d]::write_42[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_4[317d]::write_42[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// StorageLive(_3);
// _3 = _2;
// (*_3) = const 23i32;
// StorageDead(_3);
// (*_2) = const 23i32;
// return;
// }
// }
@ -68,11 +65,11 @@ fn main() {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_4[317d]::test[0] }, BrAnon(0)) mut i32]);
// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_4[317d]::test[0] }, BrAnon(0)) mut i32]);
// ...
// _3 = const write_42(_4) -> bb1;
// _2 = const write_42(_3) -> bb1;
// }
// bb1: {
// Validate(Acquire, [_3: bool]);
// Validate(Release, [_3: bool]);
// Validate(Acquire, [_2: bool]);
// Validate(Release, [_2: bool]);
// ...
// }
// }
@ -85,7 +82,7 @@ fn main() {
// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:10) => validate_4[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(60)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:10) => validate_4[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]);
// StorageLive(_3);
// ...
// _0 = const write_42(_4) -> bb1;
// _0 = const write_42(_3) -> bb1;
// }
// ...
// }

View File

@ -39,8 +39,8 @@ fn main() {
// bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_5[317d]::test[0] }, BrAnon(0)) mut i32]);
// ...
// Validate(Release, [_3: bool, _4: *mut i32]);
// _3 = const write_42(_4) -> bb1;
// Validate(Release, [_2: bool, _3: *mut i32]);
// _2 = const write_42(_3) -> bb1;
// }
// ...
// }
@ -51,17 +51,15 @@ fn main() {
// bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_5[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(46)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_5[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]);
// StorageLive(_3);
// _3 = _2;
// StorageLive(_4);
// StorageLive(_5);
// Validate(Suspend(ReScope(Node(ItemLocalId(9)))), [(*_3): i32]);
// _5 = &ReErased mut (*_3);
// Validate(Acquire, [(*_5): i32/ReScope(Node(ItemLocalId(9)))]);
// _4 = _5 as *mut i32 (Misc);
// Validate(Suspend(ReScope(Node(ItemLocalId(9)))), [(*_2): i32]);
// _4 = &ReErased mut (*_2);
// Validate(Acquire, [(*_4): i32/ReScope(Node(ItemLocalId(9)))]);
// _3 = _4 as *mut i32 (Misc);
// EndRegion(ReScope(Node(ItemLocalId(9))));
// StorageDead(_5);
// Validate(Release, [_0: bool, _4: *mut i32]);
// _0 = const write_42(_4) -> bb1;
// StorageDead(_4);
// Validate(Release, [_0: bool, _3: *mut i32]);
// _0 = const write_42(_3) -> bb1;
// }
// ...
// }