From a091cfd4f3be8677481a3a502bd96bdebd0de1bb Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Tue, 17 May 2016 02:26:18 +0300 Subject: [PATCH] implement drop elaboration Fixes #30380 --- src/librustc/infer/mod.rs | 18 + .../borrowck/mir/dataflow/mod.rs | 6 + .../borrowck/mir/elaborate_drops.rs | 1038 +++++++++++++++++ src/librustc_borrowck/borrowck/mir/mod.rs | 35 +- src/librustc_borrowck/borrowck/mir/patch.rs | 184 +++ src/librustc_borrowck/borrowck/mod.rs | 2 + src/librustc_borrowck/lib.rs | 2 +- src/librustc_driver/driver.rs | 4 + src/test/run-fail/issue-30380.rs | 44 + src/test/run-pass/dynamic-drop.rs | 100 ++ 10 files changed, 1431 insertions(+), 2 deletions(-) create mode 100644 src/librustc_borrowck/borrowck/mir/elaborate_drops.rs create mode 100644 src/librustc_borrowck/borrowck/mir/patch.rs create mode 100644 src/test/run-fail/issue-30380.rs create mode 100644 src/test/run-pass/dynamic-drop.rs diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 7c9c52baa63..d22b7bd6d5a 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -613,6 +613,24 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { value.trans_normalize(&infcx) }) } + + pub fn normalize_associated_type_in_env( + self, value: &T, env: &'a ty::ParameterEnvironment<'tcx> + ) -> T + where T: TransNormalize<'tcx> + { + debug!("normalize_associated_type_in_env(t={:?})", value); + + let value = self.erase_regions(value); + + if !value.has_projection_types() { + return value; + } + + self.infer_ctxt(None, Some(env.clone()), ProjectionMode::Any).enter(|infcx| { + value.trans_normalize(&infcx) + }) + } } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs index 99592e5d60f..113d3ff8512 100644 --- a/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs @@ -200,6 +200,12 @@ impl<'a, 'tcx: 'a, O> DataflowAnalysis<'a, 'tcx, O> pub struct DataflowResults(DataflowState) where O: BitDenotation; +impl DataflowResults { + pub fn sets(&self) -> &AllSets { + &self.0.sets + } +} + // FIXME: This type shouldn't be public, but the graphviz::MirWithFlowState trait // references it in a method signature. Look into using `pub(crate)` to address this. pub struct DataflowState diff --git a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs new file mode 100644 index 00000000000..e299d47aee6 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs @@ -0,0 +1,1038 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use bitslice::BitSlice; +use super::gather_moves::{MoveData, MovePathIndex, MovePathContent, Location}; +use super::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; +use super::dataflow::{DataflowResults}; +use super::{drop_flag_effects_for_location, on_all_children_bits}; +use super::{DropFlagState}; +use super::patch::MirPatch; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::{Subst, Substs, VecPerParamSpace}; +use rustc::mir::repr::*; +use rustc::mir::transform::{Pass, MirPass, MirSource}; +use rustc::middle::const_val::ConstVal; +use rustc::middle::lang_items; +use rustc::util::nodemap::FnvHashMap; +use rustc_mir::pretty; +use syntax::codemap::Span; + +use std::fmt; +use std::u32; + +pub struct ElaborateDrops; + +impl<'tcx> MirPass<'tcx> for ElaborateDrops { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, mir: &mut Mir<'tcx>) + { + debug!("elaborate_drops({:?} @ {:?})", src, mir.span); + match src { + MirSource::Fn(..) => {}, + _ => return + } + let id = src.item_id(); + let param_env = ty::ParameterEnvironment::for_item(tcx, id); + let move_data = MoveData::gather_moves(mir, tcx); + let elaborate_patch = { + let mir = &*mir; + let ((_, _, move_data), flow_inits) = + super::do_dataflow(tcx, mir, id, &[], (tcx, mir, move_data), + MaybeInitializedLvals::default()); + let ((_, _, move_data), flow_uninits) = + super::do_dataflow(tcx, mir, id, &[], (tcx, mir, move_data), + MaybeUninitializedLvals::default()); + + match (tcx, mir, move_data) { + ref ctxt => ElaborateDropsCtxt { + ctxt: ctxt, + param_env: ¶m_env, + flow_inits: flow_inits, + flow_uninits: flow_uninits, + drop_flags: FnvHashMap(), + patch: MirPatch::new(mir), + }.elaborate() + } + }; + pretty::dump_mir(tcx, "elaborate_drops", &0, src, mir, None); + elaborate_patch.apply(mir); + pretty::dump_mir(tcx, "elaborate_drops", &1, src, mir, None); + } +} + +impl Pass for ElaborateDrops {} + +struct InitializationData { + live: Vec, + dead: Vec +} + +impl InitializationData { + fn apply_location<'a,'tcx>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + move_data: &MoveData<'tcx>, + loc: Location) + { + drop_flag_effects_for_location(tcx, mir, move_data, loc, |path, df| { + debug!("at location {:?}: setting {:?} to {:?}", + loc, path, df); + match df { + DropFlagState::Live => { + self.live.set_bit(path.idx()); + self.dead.clear_bit(path.idx()); + } + DropFlagState::Dead => { + self.dead.set_bit(path.idx()); + self.live.clear_bit(path.idx()); + } + } + }); + } + + fn state(&self, path: MovePathIndex) -> (bool, bool) { + (self.live.get_bit(path.idx()), self.dead.get_bit(path.idx())) + } +} + +impl fmt::Debug for InitializationData { + fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + Ok(()) + } +} + +struct ElaborateDropsCtxt<'a, 'tcx: 'a> { + ctxt: &'a (TyCtxt<'a, 'tcx, 'tcx>, &'a Mir<'tcx>, MoveData<'tcx>), + param_env: &'a ty::ParameterEnvironment<'tcx>, + flow_inits: DataflowResults>, + flow_uninits: DataflowResults>, + drop_flags: FnvHashMap, + patch: MirPatch<'tcx>, +} + +#[derive(Copy, Clone, Debug)] +struct DropCtxt<'a, 'tcx: 'a> { + span: Span, + scope: ScopeId, + is_cleanup: bool, + + init_data: &'a InitializationData, + + lvalue: &'a Lvalue<'tcx>, + path: MovePathIndex, + succ: BasicBlock, + unwind: Option +} + +impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { + fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> { self.ctxt.0 } + fn mir(&self) -> &'b Mir<'tcx> { self.ctxt.1 } + fn move_data(&self) -> &'b MoveData<'tcx> { &self.ctxt.2 } + + fn initialization_data_at(&self, loc: Location) -> InitializationData { + let mut data = InitializationData { + live: self.flow_inits.sets().on_entry_set_for(loc.block.index()) + .to_owned(), + dead: self.flow_uninits.sets().on_entry_set_for(loc.block.index()) + .to_owned(), + }; + for stmt in 0..loc.index { + data.apply_location(self.ctxt.0, self.ctxt.1, &self.ctxt.2, + Location { block: loc.block, index: stmt }); + } + data + } + + fn create_drop_flag(&mut self, index: MovePathIndex) { + let tcx = self.tcx(); + let patch = &mut self.patch; + self.drop_flags.entry(index).or_insert_with(|| { + patch.new_temp(tcx.types.bool) + }); + } + + fn drop_flag(&mut self, index: MovePathIndex) -> Option> { + self.drop_flags.get(&index).map(|t| Lvalue::Temp(*t)) + } + + /// create a patch that elaborates all drops in the input + /// MIR. + fn elaborate(mut self) -> MirPatch<'tcx> + { + self.collect_drop_flags(); + + self.elaborate_drops(); + + self.drop_flags_on_init(); + self.drop_flags_for_fn_rets(); + self.drop_flags_for_args(); + self.drop_flags_for_locs(); + + self.patch + } + + fn path_needs_drop(&self, path: MovePathIndex) -> bool + { + match self.move_data().move_paths[path].content { + MovePathContent::Lvalue(ref lvalue) => { + let ty = self.mir().lvalue_ty(self.tcx(), lvalue) + .to_ty(self.tcx()); + debug!("path_needs_drop({:?}, {:?} : {:?})", path, lvalue, ty); + + self.tcx().type_needs_drop_given_env(ty, &self.param_env) + } + _ => false + } + } + + /// Returns whether this lvalue is tracked by drop elaboration. This + /// includes all lvalues, except these behind references or arrays. + /// + /// Lvalues behind references or arrays are not tracked by elaboration + /// and are always assumed to be initialized when accessible. As + /// references and indexes can be reseated, trying to track them + /// can only lead to trouble. + fn lvalue_is_tracked(&self, lv: &Lvalue<'tcx>) -> bool + { + if let &Lvalue::Projection(ref data) = lv { + self.lvalue_contents_are_tracked(&data.base) + } else { + true + } + } + + fn lvalue_contents_are_tracked(&self, lv: &Lvalue<'tcx>) -> bool { + let ty = self.mir().lvalue_ty(self.tcx(), lv).to_ty(self.tcx()); + match ty.sty { + ty::TyArray(..) | ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => { + false + } + _ => self.lvalue_is_tracked(lv) + } + } + + fn collect_drop_flags(&mut self) + { + for bb in self.mir().all_basic_blocks() { + let data = self.mir().basic_block_data(bb); + let terminator = data.terminator(); + let location = match terminator.kind { + TerminatorKind::Drop { ref location, .. } | + TerminatorKind::DropAndReplace { ref location, .. } => location, + _ => continue + }; + + if !self.lvalue_is_tracked(location) { + continue + } + + let init_data = self.initialization_data_at(Location { + block: bb, + index: data.statements.len() + }); + + let path = self.move_data().rev_lookup.find(location); + debug!("collect_drop_flags: {:?}, lv {:?} (index {:?})", + bb, location, path); + + on_all_children_bits(self.tcx(), self.mir(), self.move_data(), path, |child| { + if self.path_needs_drop(child) { + let (maybe_live, maybe_dead) = init_data.state(child); + debug!("collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}", + child, location, path, (maybe_live, maybe_dead)); + if maybe_live && maybe_dead { + self.create_drop_flag(child) + } + } + }); + } + } + + fn elaborate_drops(&mut self) + { + for bb in self.mir().all_basic_blocks() { + let data = self.mir().basic_block_data(bb); + let loc = Location { block: bb, index: data.statements.len() }; + let terminator = data.terminator(); + + let resume_block = self.patch.resume_block(); + match terminator.kind { + TerminatorKind::Drop { ref location, target, unwind } => { + let init_data = self.initialization_data_at(loc); + let path = self.move_data().rev_lookup.find(location); + self.elaborate_drop(&DropCtxt { + span: terminator.span, + scope: terminator.scope, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: if data.is_cleanup { + None + } else { + Some(Option::unwrap_or(unwind, resume_block)) + } + }, bb); + } + TerminatorKind::DropAndReplace { ref location, ref value, + target, unwind } => + { + assert!(!data.is_cleanup); + + self.elaborate_replace( + loc, + location, value, + target, unwind + ); + } + _ => continue + } + } + } + + /// Elaborate a MIR `replace` terminator. This instruction + /// is not directly handled by translation, and therefore + /// must be desugared. + /// + /// The desugaring drops the location if needed, and then writes + /// the value (including setting the drop flag) over it in *both* arms. + /// + /// The `replace` terminator can also be called on lvalues that + /// are not tracked by elaboration (for example, + /// `replace x[i] <- tmp0`). The borrow checker requires that + /// these locations are initialized before the assignment, + /// so we just generate an unconditional drop. + fn elaborate_replace( + &mut self, + loc: Location, + location: &Lvalue<'tcx>, + value: &Operand<'tcx>, + target: BasicBlock, + unwind: Option) + { + let bb = loc.block; + let data = self.mir().basic_block_data(bb); + let terminator = data.terminator(); + + let unwind = Some(unwind.unwrap_or_else(|| { + // we can't use the resume block directly, because we + // may want to add a drop flag write. + self.jump_to_resume_block(terminator.scope, + terminator.span) + })); + + if !self.lvalue_is_tracked(location) { + // drop and replace behind a pointer/array/whatever. The location + // must be initialized. + debug!("elaborate_drop_and_replace({:?}) - untracked", terminator); + self.patch.patch_terminator(bb, TerminatorKind::Drop { + location: location.clone(), + target: target, + unwind: unwind + }); + } else { + debug!("elaborate_drop_and_replace({:?}) - tracked", terminator); + let init_data = self.initialization_data_at(loc); + let path = self.move_data().rev_lookup.find(location); + + self.elaborate_drop(&DropCtxt { + span: terminator.span, + scope: terminator.scope, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: unwind + }, bb); + on_all_children_bits(self.tcx(), self.mir(), self.move_data(), path, |child| { + self.set_drop_flag(Location { block: target, index: 0 }, + child, DropFlagState::Live); + if let Some(unwind) = unwind { + self.set_drop_flag(Location { block: unwind, index: 0 }, + child, DropFlagState::Live); + } + }); + } + + self.patch.add_assign(Location { block: target, index: 0 }, + location.clone(), Rvalue::Use(value.clone())); + if let Some(unwind) = unwind { + self.patch.add_assign(Location { block: unwind, index: 0 }, + location.clone(), Rvalue::Use(value.clone())); + } + } + + /// This elaborates a single drop instruction, located at `bb`, and + /// patches over it. + /// + /// The elaborated drop checks the drop flags to only drop what + /// is initialized. + /// + /// In addition, the relevant drop flags also need to be cleared + /// to avoid double-drops. However, in the middle of a complex + /// drop, one must avoid clearing some of the flags before they + /// are read, as that would cause a memory leak. + /// + /// In particular, when dropping an ADT, multiple fields may be + /// joined together under the `rest` subpath. They are all controlled + /// by the primary drop flag, but only the last rest-field dropped + /// should clear it (and it must also not clear anything else). + /// + /// FIXME: I think we should just control the flags externally + /// and then we do not need this machinery. + fn elaborate_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, bb: BasicBlock) { + debug!("elaborate_drop({:?})", c); + + let mut some_live = false; + let mut some_dead = false; + let mut children_count = 0; + on_all_children_bits( + self.tcx(), self.mir(), self.move_data(), + c.path, |child| { + if self.path_needs_drop(child) { + let (live, dead) = c.init_data.state(child); + debug!("elaborate_drop: state({:?}) = {:?}", + child, (live, dead)); + some_live |= live; + some_dead |= dead; + children_count += 1; + } + }); + + debug!("elaborate_drop({:?}): live - {:?}", c, + (some_live, some_dead)); + match (some_live, some_dead) { + (false, false) | (false, true) => { + // dead drop - patch it out + self.patch.patch_terminator(bb, TerminatorKind::Goto { + target: c.succ + }); + } + (true, false) => { + // static drop - just set the flag + self.patch.patch_terminator(bb, TerminatorKind::Drop { + location: c.lvalue.clone(), + target: c.succ, + unwind: c.unwind + }); + self.drop_flags_for_drop(c, bb); + } + (true, true) => { + // dynamic drop + let drop_bb = if children_count == 1 || self.must_complete_drop(c) { + self.conditional_drop(c) + } else { + self.open_drop(c) + }; + self.patch.patch_terminator(bb, TerminatorKind::Goto { + target: drop_bb + }); + } + } + } + + /// Return the lvalue and move path for each field of `variant`, + /// (the move path is `None` if the field is a rest field). + fn move_paths_for_fields(&self, + base_lv: &Lvalue<'tcx>, + variant_path: MovePathIndex, + variant: ty::VariantDef<'tcx>, + substs: &'tcx Substs<'tcx>) + -> Vec<(Lvalue<'tcx>, Option)> + { + let move_paths = &self.move_data().move_paths; + variant.fields.iter().enumerate().map(|(i, f)| { + let subpath = + super::move_path_children_matching(move_paths, variant_path, |p| { + match p { + &Projection { + elem: ProjectionElem::Field(idx, _), .. + } => idx.index() == i, + _ => false + } + }); + + let field_ty = + self.tcx().normalize_associated_type_in_env( + &f.ty(self.tcx(), substs), + &self.param_env + ); + (base_lv.clone().field(Field::new(i), field_ty), subpath) + }).collect() + } + + /// Create one-half of the drop ladder for a list of fields, and return + /// the list of steps in it in reverse order. + /// + /// `unwind_ladder` is such a list of steps in reverse order, + /// which is called instead of the next step if the drop unwinds + /// (the first field is never reached). If it is `None`, all + /// unwind targets are left blank. + fn drop_halfladder<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + unwind_ladder: Option>, + succ: BasicBlock, + fields: &[(Lvalue<'tcx>, Option)], + is_cleanup: bool) + -> Vec + { + let mut succ = succ; + let mut unwind_succ = if is_cleanup { + None + } else { + c.unwind + }; + let mut update_drop_flag = true; + + fields.iter().rev().enumerate().map(|(i, &(ref lv, path))| { + let drop_block = match path { + Some(path) => { + debug!("drop_ladder: for std field {} ({:?})", i, lv); + + self.elaborated_drop_block(&DropCtxt { + span: c.span, + scope: c.scope, + is_cleanup: is_cleanup, + init_data: c.init_data, + lvalue: lv, + path: path, + succ: succ, + unwind: unwind_succ, + }) + } + None => { + debug!("drop_ladder: for rest field {} ({:?})", i, lv); + + let blk = self.complete_drop(&DropCtxt { + span: c.span, + scope: c.scope, + is_cleanup: is_cleanup, + init_data: c.init_data, + lvalue: lv, + path: c.path, + succ: succ, + unwind: unwind_succ, + }, update_drop_flag); + + // the drop flag has been updated - updating + // it again would clobber it. + update_drop_flag = false; + + blk + } + }; + + succ = drop_block; + unwind_succ = unwind_ladder.as_ref().map(|p| p[i]); + + drop_block + }).collect() + } + + /// Create a full drop ladder, consisting of 2 connected half-drop-ladders + /// + /// For example, with 3 fields, the drop ladder is + /// + /// .d0: + /// ELAB(drop location.0 [target=.d1, unwind=.c1]) + /// .d1: + /// ELAB(drop location.1 [target=.d2, unwind=.c2]) + /// .d2: + /// ELAB(drop location.2 [target=`c.succ`, unwind=`c.unwind`]) + /// .c1: + /// ELAB(drop location.1 [target=.c2]) + /// .c2: + /// ELAB(drop location.2 [target=`c.unwind]) + fn drop_ladder<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + fields: &[(Lvalue<'tcx>, Option)]) + -> BasicBlock + { + debug!("drop_ladder({:?}, {:?})", c, fields); + let unwind_ladder = if c.is_cleanup { + None + } else { + Some(self.drop_halfladder(c, None, c.unwind.unwrap(), &fields, true)) + }; + + self.drop_halfladder(c, unwind_ladder, c.succ, fields, c.is_cleanup) + .last().cloned().unwrap_or(c.succ) + } + + fn open_drop_for_tuple<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, tys: &[Ty<'tcx>]) + -> BasicBlock + { + debug!("open_drop_for_tuple({:?}, {:?})", c, tys); + + let fields: Vec<_> = tys.iter().enumerate().map(|(i, &ty)| { + (c.lvalue.clone().field(Field::new(i), ty), + super::move_path_children_matching( + &self.move_data().move_paths, c.path, |proj| match proj { + &Projection { + elem: ProjectionElem::Field(f, _), .. + } => f.index() == i, + _ => false + } + )) + }).collect(); + + self.drop_ladder(c, &fields) + } + + fn open_drop_for_box<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, ty: Ty<'tcx>) + -> BasicBlock + { + debug!("open_drop_for_box({:?}, {:?})", c, ty); + + let interior_path = super::move_path_children_matching( + &self.move_data().move_paths, c.path, |proj| match proj { + &Projection { elem: ProjectionElem::Deref, .. } => true, + _ => false + }).unwrap(); + + let interior = c.lvalue.clone().deref(); + let inner_c = DropCtxt { + lvalue: &interior, + unwind: c.unwind.map(|u| { + self.box_free_block(c, ty, u, true) + }), + succ: self.box_free_block(c, ty, c.succ, c.is_cleanup), + path: interior_path, + ..*c + }; + + self.elaborated_drop_block(&inner_c) + } + + fn open_drop_for_variant<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + drop_block: &mut Option, + adt: ty::AdtDef<'tcx>, + substs: &'tcx Substs<'tcx>, + variant_index: usize) + -> BasicBlock + { + let move_paths = &self.move_data().move_paths; + + let subpath = super::move_path_children_matching( + move_paths, c.path, |proj| match proj { + &Projection { + elem: ProjectionElem::Downcast(_, idx), .. + } => idx == variant_index, + _ => false + }); + + if let Some(variant_path) = subpath { + let base_lv = c.lvalue.clone().elem( + ProjectionElem::Downcast(adt, variant_index) + ); + let fields = self.move_paths_for_fields( + &base_lv, + variant_path, + &adt.variants[variant_index], + substs); + self.drop_ladder(c, &fields) + } else { + // variant not found - drop the entire enum + if let None = *drop_block { + *drop_block = Some(self.complete_drop(c, true)); + } + return drop_block.unwrap(); + } + } + + fn open_drop_for_adt<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, + adt: ty::AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) + -> BasicBlock { + debug!("open_drop_for_adt({:?}, {:?}, {:?})", c, adt, substs); + + let mut drop_block = None; + + match adt.variants.len() { + 1 => { + let fields = self.move_paths_for_fields( + c.lvalue, + c.path, + &adt.variants[0], + substs + ); + self.drop_ladder(c, &fields) + } + _ => { + let variant_drops : Vec = + (0..adt.variants.len()).map(|i| { + self.open_drop_for_variant(c, &mut drop_block, + adt, substs, i) + }).collect(); + + // If there are multiple variants, then if something + // is present within the enum the discriminant, tracked + // by the rest path, must be initialized. + // + // Additionally, we do not want to switch on the + // discriminant after it is free-ed, because that + // way lies only trouble. + + let switch_block = self.new_block( + c, c.is_cleanup, TerminatorKind::Switch { + discr: c.lvalue.clone(), + adt_def: adt, + targets: variant_drops + }); + + self.drop_flag_test_block(c, c.is_cleanup, switch_block) + } + } + } + + /// The slow-path - create an "open", elaborated drop for a type + /// which is moved-out-of only partially, and patch `bb` to a jump + /// to it. This must not be called on ADTs with a destructor, + /// as these can't be moved-out-of, except for `Box`, which is + /// special-cased. + /// + /// This creates a "drop ladder" that drops the needed fields of the + /// ADT, both in the success case or if one of the destructors fail. + fn open_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + let ty = self.mir().lvalue_ty(self.tcx(), c.lvalue).to_ty(self.tcx()); + match ty.sty { + ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + self.open_drop_for_adt(c, def, substs) + } + ty::TyTuple(tys) | ty::TyClosure(_, ty::ClosureSubsts { + upvar_tys: tys, .. + }) => { + self.open_drop_for_tuple(c, tys) + } + ty::TyBox(ty) => { + self.open_drop_for_box(c, ty) + } + _ => bug!("open drop from non-ADT `{:?}`", ty) + } + } + + /// Return a basic block that drop an lvalue using the context + /// and path in `c`. If `update_drop_flag` is true, also + /// clear `c`. + /// + /// if FLAG(c.path) + /// if(update_drop_flag) FLAG(c.path) = false + /// drop(c.lv) + fn complete_drop<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + update_drop_flag: bool) + -> BasicBlock + { + debug!("complete_drop({:?},{:?})", c, update_drop_flag); + + let drop_block = self.drop_block(c); + if update_drop_flag { + self.set_drop_flag( + Location { block: drop_block, index: 0 }, + c.path, + DropFlagState::Dead + ); + } + + self.drop_flag_test_block(c, c.is_cleanup, drop_block) + } + + /// Create a simple conditional drop. + /// + /// if FLAG(c.lv) + /// FLAGS(c.lv) = false + /// drop(c.lv) + fn conditional_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) + -> BasicBlock + { + debug!("conditional_drop({:?})", c); + let drop_bb = self.drop_block(c); + self.drop_flags_for_drop(c, drop_bb); + + self.drop_flag_test_block(c, c.is_cleanup, drop_bb) + } + + fn new_block<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + is_cleanup: bool, + k: TerminatorKind<'tcx>) + -> BasicBlock + { + self.patch.new_block(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + scope: c.scope, span: c.span, kind: k + }), + is_cleanup: is_cleanup + }) + } + + fn elaborated_drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + debug!("elaborated_drop_block({:?})", c); + let blk = self.drop_block(c); + self.elaborate_drop(c, blk); + blk + } + + fn drop_flag_test_block<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + is_cleanup: bool, + on_set: BasicBlock) + -> BasicBlock + { + let (maybe_live, maybe_dead) = c.init_data.state(c.path); + debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}", + c, is_cleanup, on_set, (maybe_live, maybe_dead)); + + match (maybe_live, maybe_dead) { + (false, _) => c.succ, + (true, false) => on_set, + (true, true) => { + let flag = self.drop_flag(c.path).unwrap(); + self.new_block(c, is_cleanup, TerminatorKind::If { + cond: Operand::Consume(flag), + targets: (on_set, c.succ) + }) + } + } + } + + fn drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + self.new_block(c, c.is_cleanup, TerminatorKind::Drop { + location: c.lvalue.clone(), + target: c.succ, + unwind: c.unwind + }) + } + + fn jump_to_resume_block<'a>(&mut self, scope: ScopeId, span: Span) -> BasicBlock { + let resume_block = self.patch.resume_block(); + self.patch.new_block(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + scope: scope, span: span, kind: TerminatorKind::Goto { + target: resume_block + } + }), + is_cleanup: true + }) + } + + fn box_free_block<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + ty: Ty<'tcx>, + target: BasicBlock, + is_cleanup: bool + ) -> BasicBlock { + let block = self.unelaborated_free_block(c, ty, target, is_cleanup); + self.drop_flag_test_block(c, is_cleanup, block) + } + + fn unelaborated_free_block<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + ty: Ty<'tcx>, + target: BasicBlock, + is_cleanup: bool + ) -> BasicBlock { + let mut statements = vec![]; + if let Some(&flag) = self.drop_flags.get(&c.path) { + statements.push(Statement { + span: c.span, + scope: c.scope, + kind: StatementKind::Assign( + Lvalue::Temp(flag), + self.constant_bool(c.span, false) + ) + }); + } + + let tcx = self.tcx(); + let unit_temp = Lvalue::Temp(self.patch.new_temp(tcx.mk_nil())); + let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem) + .unwrap_or_else(|e| tcx.sess.fatal(&e)); + let substs = tcx.mk_substs(Substs::new( + VecPerParamSpace::new(vec![], vec![], vec![ty]), + VecPerParamSpace::new(vec![], vec![], vec![]) + )); + let fty = tcx.lookup_item_type(free_func).ty.subst(tcx, substs); + + self.patch.new_block(BasicBlockData { + statements: statements, + terminator: Some(Terminator { + scope: c.scope, span: c.span, kind: TerminatorKind::Call { + func: Operand::Constant(Constant { + span: c.span, + ty: fty, + literal: Literal::Item { + def_id: free_func, + substs: substs + } + }), + args: vec![Operand::Consume(c.lvalue.clone())], + destination: Some((unit_temp, target)), + cleanup: None + } + }), + is_cleanup: is_cleanup + }) + } + + fn must_complete_drop<'a>(&self, c: &DropCtxt<'a, 'tcx>) -> bool { + // if we have a destuctor, we must *not* split the drop. + + // dataflow can create unneeded children in some cases + // - be sure to ignore them. + + let ty = self.mir().lvalue_ty(self.tcx(), c.lvalue).to_ty(self.tcx()); + + match ty.sty { + ty::TyStruct(def, _) | ty::TyEnum(def, _) => { + if def.has_dtor() { + self.tcx().sess.span_warn( + c.span, + &format!("dataflow bug??? moving out of type with dtor {:?}", + c)); + true + } else { + false + } + } + _ => false + } + } + + fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> { + Rvalue::Use(Operand::Constant(Constant { + span: span, + ty: self.tcx().types.bool, + literal: Literal::Value { value: ConstVal::Bool(val) } + })) + } + + fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) { + if let Some(&flag) = self.drop_flags.get(&path) { + let span = self.patch.context_for_location(self.mir(), loc).0; + let val = self.constant_bool(span, val.value()); + self.patch.add_assign(loc, Lvalue::Temp(flag), val); + } + } + + fn drop_flags_on_init(&mut self) { + let loc = Location { block: START_BLOCK, index: 0 }; + let span = self.patch.context_for_location(self.mir(), loc).0; + let false_ = self.constant_bool(span, false); + for flag in self.drop_flags.values() { + self.patch.add_assign(loc, Lvalue::Temp(*flag), false_.clone()); + } + } + + fn drop_flags_for_fn_rets(&mut self) { + for bb in self.mir().all_basic_blocks() { + let data = self.mir().basic_block_data(bb); + if let TerminatorKind::Call { + destination: Some((ref lv, tgt)), cleanup: Some(_), .. + } = data.terminator().kind { + assert!(!self.patch.is_patched(bb)); + + let loc = Location { block: tgt, index: 0 }; + let path = self.move_data().rev_lookup.find(lv); + on_all_children_bits( + self.tcx(), self.mir(), self.move_data(), path, + |child| self.set_drop_flag(loc, child, DropFlagState::Live) + ); + } + } + } + + fn drop_flags_for_args(&mut self) { + let loc = Location { block: START_BLOCK, index: 0 }; + super::drop_flag_effects_for_function_entry( + self.tcx(), self.mir(), self.move_data(), |path, ds| { + self.set_drop_flag(loc, path, ds); + } + ) + } + + fn drop_flags_for_locs(&mut self) { + // We intentionally iterate only over the *old* basic blocks. + // + // Basic blocks created by drop elaboration update their + // drop flags by themselves, to avoid the drop flags being + // clobbered before they are read. + + for bb in self.mir().all_basic_blocks() { + let data = self.mir().basic_block_data(bb); + debug!("drop_flags_for_locs({:?})", data); + for i in 0..(data.statements.len()+1) { + debug!("drop_flag_for_locs: stmt {}", i); + let mut allow_initializations = true; + if i == data.statements.len() { + match data.terminator().kind { + TerminatorKind::Drop { .. } => { + // drop elaboration should handle that by itself + continue + } + TerminatorKind::DropAndReplace { .. } => { + // this contains the consume of the source and + // the initialization of the destination. We + // only want the latter + assert!(self.patch.is_patched(bb)); + allow_initializations = false; + } + _ => { + assert!(!self.patch.is_patched(bb)); + } + } + } + let loc = Location { block: bb, index: i }; + super::drop_flag_effects_for_location( + self.tcx(), self.mir(), self.move_data(), loc, |path, ds| { + if ds == DropFlagState::Dead || allow_initializations { + self.set_drop_flag(loc, path, ds) + } + } + ) + } + + // There may be a critical edge after this call, + // so mark the return as initialized *before* the + // call. + if let TerminatorKind::Call { + destination: Some((ref lv, _)), cleanup: None, .. + } = data.terminator().kind { + assert!(!self.patch.is_patched(bb)); + + let loc = Location { block: bb, index: data.statements.len() }; + let path = self.move_data().rev_lookup.find(lv); + on_all_children_bits( + self.tcx(), self.mir(), self.move_data(), path, + |child| self.set_drop_flag(loc, child, DropFlagState::Live) + ); + } + } + } + + fn drop_flags_for_drop<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + bb: BasicBlock) + { + let loc = self.patch.terminator_loc(self.mir(), bb); + on_all_children_bits( + self.tcx(), self.mir(), self.move_data(), c.path, + |child| self.set_drop_flag(loc, child, DropFlagState::Dead) + ); + } +} diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs index 38ebecf248f..7e1a196629f 100644 --- a/src/librustc_borrowck/borrowck/mir/mod.rs +++ b/src/librustc_borrowck/borrowck/mir/mod.rs @@ -24,8 +24,10 @@ use rustc::session::Session; use rustc::ty::{self, TyCtxt}; mod abs_domain; +pub mod elaborate_drops; mod dataflow; mod gather_moves; +mod patch; // mod graphviz; use self::dataflow::{BitDenotation}; @@ -34,7 +36,7 @@ use self::dataflow::{Dataflow, DataflowAnalysis, DataflowResults}; use self::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; use self::dataflow::{DefinitelyInitializedLvals}; use self::gather_moves::{MoveData, MovePathIndex, Location}; -use self::gather_moves::{MovePathContent}; +use self::gather_moves::{MovePathContent, MovePathData}; fn has_rustc_mir_with(attrs: &[ast::Attribute], name: &str) -> Option> { for attr in attrs { @@ -202,6 +204,37 @@ enum DropFlagState { Absent, // i.e. deinitialized or "moved" } +impl DropFlagState { + fn value(self) -> bool { + match self { + DropFlagState::Live => true, + DropFlagState::Dead => false + } + } +} + +fn move_path_children_matching<'tcx, F>(move_paths: &MovePathData<'tcx>, + path: MovePathIndex, + mut cond: F) + -> Option + where F: FnMut(&repr::LvalueProjection<'tcx>) -> bool +{ + let mut next_child = move_paths[path].first_child; + while let Some(child_index) = next_child { + match move_paths[child_index].content { + MovePathContent::Lvalue(repr::Lvalue::Projection(ref proj)) => { + if cond(proj) { + return Some(child_index) + } + } + _ => {} + } + next_child = move_paths[child_index].next_sibling; + } + + None +} + fn on_all_children_bits<'a, 'tcx, F>( tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, diff --git a/src/librustc_borrowck/borrowck/mir/patch.rs b/src/librustc_borrowck/borrowck/mir/patch.rs new file mode 100644 index 00000000000..b390c19af1a --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/patch.rs @@ -0,0 +1,184 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::gather_moves::Location; +use rustc::ty::Ty; +use rustc::mir::repr::*; +use syntax::codemap::Span; + +use std::iter; +use std::u32; + +/// This struct represents a patch to MIR, which can add +/// new statements and basic blocks and patch over block +/// terminators. +pub struct MirPatch<'tcx> { + patch_map: Vec>>, + new_blocks: Vec>, + new_statements: Vec<(Location, StatementKind<'tcx>)>, + new_temps: Vec>, + resume_block: BasicBlock, + next_temp: u32, +} + +impl<'tcx> MirPatch<'tcx> { + pub fn new(mir: &Mir<'tcx>) -> Self { + let mut result = MirPatch { + patch_map: iter::repeat(None) + .take(mir.basic_blocks.len()).collect(), + new_blocks: vec![], + new_temps: vec![], + new_statements: vec![], + next_temp: mir.temp_decls.len() as u32, + resume_block: START_BLOCK + }; + + // make sure the MIR we create has a resume block. It is + // completely legal to convert jumps to the resume block + // to jumps to None, but we occasionally have to add + // instructions just before that. + + let mut resume_block = None; + let mut resume_stmt_block = None; + for block in mir.all_basic_blocks() { + let data = mir.basic_block_data(block); + if let TerminatorKind::Resume = data.terminator().kind { + if data.statements.len() > 0 { + resume_stmt_block = Some(block); + } else { + resume_block = Some(block); + } + break + } + } + let resume_block = resume_block.unwrap_or_else(|| { + result.new_block(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + span: mir.span, + scope: ScopeId::new(0), + kind: TerminatorKind::Resume + }), + is_cleanup: true + })}); + result.resume_block = resume_block; + if let Some(resume_stmt_block) = resume_stmt_block { + result.patch_terminator(resume_stmt_block, TerminatorKind::Goto { + target: resume_block + }); + } + result + } + + pub fn resume_block(&self) -> BasicBlock { + self.resume_block + } + + pub fn is_patched(&self, bb: BasicBlock) -> bool { + self.patch_map[bb.index()].is_some() + } + + pub fn terminator_loc(&self, mir: &Mir<'tcx>, bb: BasicBlock) -> Location { + let offset = match bb.index().checked_sub(mir.basic_blocks.len()) { + Some(index) => self.new_blocks[index].statements.len(), + None => mir.basic_block_data(bb).statements.len() + }; + Location { + block: bb, + index: offset + } + } + + pub fn new_temp(&mut self, ty: Ty<'tcx>) -> u32 { + let index = self.next_temp; + assert!(self.next_temp < u32::MAX); + self.next_temp += 1; + self.new_temps.push(TempDecl { ty: ty }); + index + } + + pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock { + let block = BasicBlock::new(self.patch_map.len()); + debug!("MirPatch: new_block: {:?}: {:?}", block, data); + self.new_blocks.push(data); + self.patch_map.push(None); + block + } + + pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) { + assert!(self.patch_map[block.index()].is_none()); + debug!("MirPatch: patch_terminator({:?}, {:?})", block, new); + self.patch_map[block.index()] = Some(new); + } + + pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) { + debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt); + self.new_statements.push((loc, stmt)); + } + + pub fn add_assign(&mut self, loc: Location, lv: Lvalue<'tcx>, rv: Rvalue<'tcx>) { + self.add_statement(loc, StatementKind::Assign(lv, rv)); + } + + pub fn apply(self, mir: &mut Mir<'tcx>) { + debug!("MirPatch: {:?} new temps, starting from index {}: {:?}", + self.new_temps.len(), mir.temp_decls.len(), self.new_temps); + debug!("MirPatch: {} new blocks, starting from index {}", + self.new_blocks.len(), mir.basic_blocks.len()); + mir.basic_blocks.extend(self.new_blocks); + mir.temp_decls.extend(self.new_temps); + for (src, patch) in self.patch_map.into_iter().enumerate() { + if let Some(patch) = patch { + debug!("MirPatch: patching block {:?}", src); + mir.basic_blocks[src].terminator_mut().kind = patch; + } + } + + let mut new_statements = self.new_statements; + new_statements.sort_by(|u,v| u.0.cmp(&v.0)); + + let mut delta = 0; + let mut last_bb = START_BLOCK; + for (mut loc, stmt) in new_statements { + if loc.block != last_bb { + delta = 0; + last_bb = loc.block; + } + debug!("MirPatch: adding statement {:?} at loc {:?}+{}", + stmt, loc, delta); + loc.index += delta; + let (span, scope) = Self::context_for_index( + mir.basic_block_data(loc.block), loc + ); + mir.basic_block_data_mut(loc.block).statements.insert( + loc.index, Statement { + span: span, + scope: scope, + kind: stmt + }); + delta += 1; + } + } + + pub fn context_for_index(data: &BasicBlockData, loc: Location) -> (Span, ScopeId) { + match data.statements.get(loc.index) { + Some(stmt) => (stmt.span, stmt.scope), + None => (data.terminator().span, data.terminator().scope) + } + } + + pub fn context_for_location(&self, mir: &Mir, loc: Location) -> (Span, ScopeId) { + let data = match loc.block.index().checked_sub(mir.basic_blocks.len()) { + Some(new) => &self.new_blocks[new], + None => mir.basic_block_data(loc.block) + }; + Self::context_for_index(data, loc) + } +} diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 819717628d6..5acbb18a2ff 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -18,6 +18,8 @@ pub use self::bckerr_code::*; pub use self::AliasableViolationKind::*; pub use self::MovedValueUseKind::*; +pub use self::mir::elaborate_drops::ElaborateDrops; + use self::InteriorKind::*; use rustc::dep_graph::DepNode; diff --git a/src/librustc_borrowck/lib.rs b/src/librustc_borrowck/lib.rs index 9d7e05ed9fa..cc694c59245 100644 --- a/src/librustc_borrowck/lib.rs +++ b/src/librustc_borrowck/lib.rs @@ -39,7 +39,7 @@ extern crate core; // for NonZero pub use borrowck::check_crate; pub use borrowck::build_borrowck_dataflow_data_for_fn; -pub use borrowck::{AnalysisData, BorrowckCtxt}; +pub use borrowck::{AnalysisData, BorrowckCtxt, ElaborateDrops}; // NB: This module needs to be declared first so diagnostics are // registered before they are used. diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 911626bd2c2..bfad281702f 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -1033,6 +1033,10 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, passes.push_pass(box mir::transform::remove_dead_blocks::RemoveDeadBlocks); passes.push_pass(box mir::transform::erase_regions::EraseRegions); passes.push_pass(box mir::transform::break_cleanup_edges::BreakCleanupEdges); + passes.push_pass(box borrowck::ElaborateDrops); + passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads); + passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg); + passes.push_pass(box mir::transform::break_cleanup_edges::BreakCleanupEdges); passes.run_passes(tcx, &mut mir_map); }); diff --git a/src/test/run-fail/issue-30380.rs b/src/test/run-fail/issue-30380.rs new file mode 100644 index 00000000000..7bd9adcba9b --- /dev/null +++ b/src/test/run-fail/issue-30380.rs @@ -0,0 +1,44 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// check that panics in destructors during assignment do not leave +// destroyed values lying around for other destructors to observe. + +// error-pattern:panicking destructors ftw! + +struct Observer<'a>(&'a mut FilledOnDrop); + +struct FilledOnDrop(u32); +impl Drop for FilledOnDrop { + fn drop(&mut self) { + if self.0 == 0 { + // this is only set during the destructor - safe + // code should not be able to observe this. + self.0 = 0x1c1c1c1c; + panic!("panicking destructors ftw!"); + } + } +} + +impl<'a> Drop for Observer<'a> { + fn drop(&mut self) { + assert_eq!(self.0 .0, 1); + } +} + +fn foo(b: &mut Observer) { + *b.0 = FilledOnDrop(1); +} + +fn main() { + let mut bomb = FilledOnDrop(0); + let mut observer = Observer(&mut bomb); + foo(&mut observer); +} diff --git a/src/test/run-pass/dynamic-drop.rs b/src/test/run-pass/dynamic-drop.rs new file mode 100644 index 00000000000..48e7b7ca576 --- /dev/null +++ b/src/test/run-pass/dynamic-drop.rs @@ -0,0 +1,100 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cell::RefCell; + +struct Allocator { + data: RefCell>, +} + +impl Drop for Allocator { + fn drop(&mut self) { + let data = self.data.borrow(); + if data.iter().any(|d| *d) { + panic!("missing free: {:?}", data); + } + } +} + +impl Allocator { + fn new() -> Self { Allocator { data: RefCell::new(vec![]) } } + fn alloc(&self) -> Ptr { + let mut data = self.data.borrow_mut(); + let addr = data.len(); + data.push(true); + Ptr(addr, self) + } +} + +struct Ptr<'a>(usize, &'a Allocator); +impl<'a> Drop for Ptr<'a> { + fn drop(&mut self) { + match self.1.data.borrow_mut()[self.0] { + false => { + panic!("double free at index {:?}", self.0) + } + ref mut d => *d = false + } + } +} + +fn dynamic_init(a: &Allocator, c: bool) { + let _x; + if c { + _x = Some(a.alloc()); + } +} + +fn dynamic_drop(a: &Allocator, c: bool) -> Option { + let x = a.alloc(); + if c { + Some(x) + } else { + None + } +} + +fn assignment2(a: &Allocator, c0: bool, c1: bool) { + let mut _v = a.alloc(); + let mut _w = a.alloc(); + if c0 { + drop(_v); + } + _v = _w; + if c1 { + _w = a.alloc(); + } +} + +fn assignment1(a: &Allocator, c0: bool) { + let mut _v = a.alloc(); + let mut _w = a.alloc(); + if c0 { + drop(_v); + } + _v = _w; +} + + +fn main() { + let a = Allocator::new(); + dynamic_init(&a, false); + dynamic_init(&a, true); + dynamic_drop(&a, false); + dynamic_drop(&a, true); + + assignment2(&a, false, false); + assignment2(&a, false, true); + assignment2(&a, true, false); + assignment2(&a, true, true); + + assignment1(&a, false); + assignment1(&a, true); +}