diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 03273419432..b88b61f1818 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -154,6 +154,11 @@ struct CachedBlock { unwind: Option, /// The cached block for unwinds during cleanups-on-generator-drop path + /// + /// This is split from the standard unwind path here to prevent drop + /// elaboration from creating drop flags that would have to be captured + /// by the generator. I'm not sure how important this optimization is, + /// but it is here. generator_drop: Option, } @@ -217,13 +222,26 @@ impl<'tcx> Scope<'tcx> { /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a /// larger extent of code. /// - /// `unwind` controls whether caches for the unwind branch are also invalidated. - fn invalidate_cache(&mut self, unwind: bool) { + /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`. + /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current + /// top-of-scope (as opposed to dependent scopes). + fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) { + // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions + // with lots of `try!`? + + // cached exits drop storage and refer to the top-of-scope self.cached_exits.clear(); - if !unwind { return; } - for dropdata in &mut self.drops { - if let DropKind::Value { ref mut cached_block } = dropdata.kind { - cached_block.invalidate(); + + if !storage_only { + // the current generator drop ignores storage but refers to top-of-scope + self.cached_generator_drop = None; + } + + if !storage_only && !this_scope_only { + for dropdata in &mut self.drops { + if let DropKind::Value { ref mut cached_block } = dropdata.kind { + cached_block.invalidate(); + } } } } @@ -672,8 +690,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // invalidating caches of each scope visited. This way bare minimum of the // caches gets invalidated. i.e. if a new drop is added into the middle scope, the // cache of outer scpoe stays intact. - let invalidate_unwind = needs_drop && !this_scope; - scope.invalidate_cache(invalidate_unwind); + scope.invalidate_cache(!needs_drop, this_scope); if this_scope { if let DropKind::Value { .. } = drop_kind { scope.needs_cleanup = true; @@ -942,5 +959,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, target = block } + debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target); + target } diff --git a/src/test/run-pass/dynamic-drop.rs b/src/test/run-pass/dynamic-drop.rs index 1aba47af1e9..d8b6dbe48f1 100644 --- a/src/test/run-pass/dynamic-drop.rs +++ b/src/test/run-pass/dynamic-drop.rs @@ -8,9 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(untagged_unions)] +#![feature(generators, generator_trait, untagged_unions)] use std::cell::{Cell, RefCell}; +use std::ops::Generator; use std::panic; use std::usize; @@ -161,6 +162,21 @@ fn vec_simple(a: &Allocator) { let _x = vec![a.alloc(), a.alloc(), a.alloc(), a.alloc()]; } +fn generator(a: &Allocator, run_count: usize) { + assert!(run_count < 4); + + let mut gen = || { + (a.alloc(), + yield a.alloc(), + a.alloc(), + yield a.alloc() + ); + }; + for _ in 0..run_count { + gen.resume(); + } +} + #[allow(unreachable_code)] fn vec_unreachable(a: &Allocator) { let _x = vec![a.alloc(), a.alloc(), a.alloc(), return]; @@ -228,5 +244,11 @@ fn main() { run_test(|a| field_assignment(a, false)); run_test(|a| field_assignment(a, true)); + // FIXME: fix leaks on panics + run_test_nopanic(|a| generator(a, 0)); + run_test_nopanic(|a| generator(a, 1)); + run_test_nopanic(|a| generator(a, 2)); + run_test_nopanic(|a| generator(a, 3)); + run_test_nopanic(|a| union1(a)); }