Auto merge of #39648 - Aatch:mir-inlining-2, r=eddyb

[MIR] Implement Inlining

Fairly basic implementation of inlining for MIR. Uses conservative
heuristics for inlining.

Doesn't handle a number of cases, but can be extended later. This is basically the same as the previous inlining PR, but without the span-related changes (as the bugs it was dealing with have since been fixed).

/cc @rust-lang/compiler
This commit is contained in:
bors 2017-03-11 08:25:44 +00:00
commit 8c72b7651f
12 changed files with 1505 additions and 8 deletions

View File

@ -19,6 +19,7 @@ use hir::def::CtorKind;
use hir::def_id::DefId;
use ty::subst::Substs;
use ty::{self, AdtDef, ClosureSubsts, Region, Ty};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use util::ppaux;
use rustc_back::slice;
use hir::InlineAsm;
@ -63,8 +64,7 @@ macro_rules! newtype_index {
}
/// Lowered representation of a single function.
// Do not implement clone for Mir, which can be accidently done and kind of expensive.
#[derive(RustcEncodable, RustcDecodable, Debug)]
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
pub struct Mir<'tcx> {
/// List of basic blocks. References to basic block use a newtyped index type `BasicBlock`
/// that indexes into this vector.
@ -1333,3 +1333,347 @@ impl Location {
}
}
}
/*
* TypeFoldable implementations for MIR types
*/
impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
Mir {
basic_blocks: self.basic_blocks.fold_with(folder),
visibility_scopes: self.visibility_scopes.clone(),
promoted: self.promoted.fold_with(folder),
return_ty: self.return_ty.fold_with(folder),
local_decls: self.local_decls.fold_with(folder),
arg_count: self.arg_count,
upvar_decls: self.upvar_decls.clone(),
spread_arg: self.spread_arg,
span: self.span,
cache: cache::Cache::new()
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.basic_blocks.visit_with(visitor) ||
self.promoted.visit_with(visitor) ||
self.return_ty.visit_with(visitor) ||
self.local_decls.visit_with(visitor)
}
}
impl<'tcx> TypeFoldable<'tcx> for LocalDecl<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
LocalDecl {
ty: self.ty.fold_with(folder),
..self.clone()
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.ty.visit_with(visitor)
}
}
impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
BasicBlockData {
statements: self.statements.fold_with(folder),
terminator: self.terminator.fold_with(folder),
is_cleanup: self.is_cleanup
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.statements.visit_with(visitor) || self.terminator.visit_with(visitor)
}
}
impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
use mir::StatementKind::*;
let kind = match self.kind {
Assign(ref lval, ref rval) => Assign(lval.fold_with(folder), rval.fold_with(folder)),
SetDiscriminant { ref lvalue, variant_index } => SetDiscriminant {
lvalue: lvalue.fold_with(folder),
variant_index: variant_index
},
StorageLive(ref lval) => StorageLive(lval.fold_with(folder)),
StorageDead(ref lval) => StorageDead(lval.fold_with(folder)),
InlineAsm { ref asm, ref outputs, ref inputs } => InlineAsm {
asm: asm.clone(),
outputs: outputs.fold_with(folder),
inputs: inputs.fold_with(folder)
},
Nop => Nop,
};
Statement {
source_info: self.source_info,
kind: kind
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
use mir::StatementKind::*;
match self.kind {
Assign(ref lval, ref rval) => { lval.visit_with(visitor) || rval.visit_with(visitor) }
SetDiscriminant { ref lvalue, .. } |
StorageLive(ref lvalue) |
StorageDead(ref lvalue) => lvalue.visit_with(visitor),
InlineAsm { ref outputs, ref inputs, .. } =>
outputs.visit_with(visitor) || inputs.visit_with(visitor),
Nop => false,
}
}
}
impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
use mir::TerminatorKind::*;
let kind = match self.kind {
Goto { target } => Goto { target: target },
SwitchInt { ref discr, switch_ty, ref values, ref targets } => SwitchInt {
discr: discr.fold_with(folder),
switch_ty: switch_ty.fold_with(folder),
values: values.clone(),
targets: targets.clone()
},
Drop { ref location, target, unwind } => Drop {
location: location.fold_with(folder),
target: target,
unwind: unwind
},
DropAndReplace { ref location, ref value, target, unwind } => DropAndReplace {
location: location.fold_with(folder),
value: value.fold_with(folder),
target: target,
unwind: unwind
},
Call { ref func, ref args, ref destination, cleanup } => {
let dest = destination.as_ref().map(|&(ref loc, dest)| {
(loc.fold_with(folder), dest)
});
Call {
func: func.fold_with(folder),
args: args.fold_with(folder),
destination: dest,
cleanup: cleanup
}
},
Assert { ref cond, expected, ref msg, target, cleanup } => {
let msg = if let AssertMessage::BoundsCheck { ref len, ref index } = *msg {
AssertMessage::BoundsCheck {
len: len.fold_with(folder),
index: index.fold_with(folder),
}
} else {
msg.clone()
};
Assert {
cond: cond.fold_with(folder),
expected: expected,
msg: msg,
target: target,
cleanup: cleanup
}
},
Resume => Resume,
Return => Return,
Unreachable => Unreachable,
};
Terminator {
source_info: self.source_info,
kind: kind
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
use mir::TerminatorKind::*;
match self.kind {
SwitchInt { ref discr, switch_ty, .. } =>
discr.visit_with(visitor) || switch_ty.visit_with(visitor),
Drop { ref location, ..} => location.visit_with(visitor),
DropAndReplace { ref location, ref value, ..} =>
location.visit_with(visitor) || value.visit_with(visitor),
Call { ref func, ref args, ref destination, .. } => {
let dest = if let Some((ref loc, _)) = *destination {
loc.visit_with(visitor)
} else { false };
dest || func.visit_with(visitor) || args.visit_with(visitor)
},
Assert { ref cond, ref msg, .. } => {
if cond.visit_with(visitor) {
if let AssertMessage::BoundsCheck { ref len, ref index } = *msg {
len.visit_with(visitor) || index.visit_with(visitor)
} else {
false
}
} else {
false
}
},
Goto { .. } |
Resume |
Return |
Unreachable => false
}
}
}
impl<'tcx> TypeFoldable<'tcx> for Lvalue<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match self {
&Lvalue::Projection(ref p) => Lvalue::Projection(p.fold_with(folder)),
_ => self.clone()
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
if let &Lvalue::Projection(ref p) = self {
p.visit_with(visitor)
} else {
false
}
}
}
impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
use mir::Rvalue::*;
match *self {
Use(ref op) => Use(op.fold_with(folder)),
Repeat(ref op, len) => Repeat(op.fold_with(folder), len),
Ref(region, bk, ref lval) => Ref(region.fold_with(folder), bk, lval.fold_with(folder)),
Len(ref lval) => Len(lval.fold_with(folder)),
Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)),
BinaryOp(op, ref rhs, ref lhs) =>
BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)),
CheckedBinaryOp(op, ref rhs, ref lhs) =>
CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)),
UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)),
Discriminant(ref lval) => Discriminant(lval.fold_with(folder)),
Box(ty) => Box(ty.fold_with(folder)),
Aggregate(ref kind, ref fields) => {
let kind = match *kind {
AggregateKind::Array(ty) => AggregateKind::Array(ty.fold_with(folder)),
AggregateKind::Tuple => AggregateKind::Tuple,
AggregateKind::Adt(def, v, substs, n) =>
AggregateKind::Adt(def, v, substs.fold_with(folder), n),
AggregateKind::Closure(id, substs) =>
AggregateKind::Closure(id, substs.fold_with(folder))
};
Aggregate(kind, fields.fold_with(folder))
}
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
use mir::Rvalue::*;
match *self {
Use(ref op) => op.visit_with(visitor),
Repeat(ref op, _) => op.visit_with(visitor),
Ref(region, _, ref lval) => region.visit_with(visitor) || lval.visit_with(visitor),
Len(ref lval) => lval.visit_with(visitor),
Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor),
BinaryOp(_, ref rhs, ref lhs) |
CheckedBinaryOp(_, ref rhs, ref lhs) =>
rhs.visit_with(visitor) || lhs.visit_with(visitor),
UnaryOp(_, ref val) => val.visit_with(visitor),
Discriminant(ref lval) => lval.visit_with(visitor),
Box(ty) => ty.visit_with(visitor),
Aggregate(ref kind, ref fields) => {
(match *kind {
AggregateKind::Array(ty) => ty.visit_with(visitor),
AggregateKind::Tuple => false,
AggregateKind::Adt(_, _, substs, _) => substs.visit_with(visitor),
AggregateKind::Closure(_, substs) => substs.visit_with(visitor)
}) || fields.visit_with(visitor)
}
}
}
}
impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
Operand::Consume(ref lval) => Operand::Consume(lval.fold_with(folder)),
Operand::Constant(ref c) => Operand::Constant(c.fold_with(folder)),
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match *self {
Operand::Consume(ref lval) => lval.visit_with(visitor),
Operand::Constant(ref c) => c.visit_with(visitor)
}
}
}
impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V>
where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx>
{
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
use mir::ProjectionElem::*;
let base = self.base.fold_with(folder);
let elem = match self.elem {
Deref => Deref,
Field(f, ty) => Field(f, ty.fold_with(folder)),
Index(ref v) => Index(v.fold_with(folder)),
ref elem => elem.clone()
};
Projection {
base: base,
elem: elem
}
}
fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> bool {
use mir::ProjectionElem::*;
self.base.visit_with(visitor) ||
match self.elem {
Field(_, ty) => ty.visit_with(visitor),
Index(ref v) => v.visit_with(visitor),
_ => false
}
}
}
impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
Constant {
span: self.span.clone(),
ty: self.ty.fold_with(folder),
literal: self.literal.fold_with(folder)
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.ty.visit_with(visitor) || self.literal.visit_with(visitor)
}
}
impl<'tcx> TypeFoldable<'tcx> for Literal<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
Literal::Item { def_id, substs } => Literal::Item {
def_id: def_id,
substs: substs.fold_with(folder)
},
_ => self.clone()
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match *self {
Literal::Item { substs, .. } => substs.visit_with(visitor),
_ => false
}
}
}

View File

@ -2302,6 +2302,20 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
queries::mir::get(self, DUMMY_SP, did).borrow()
}
/// Given the DefId of an item, returns its MIR, borrowed immutably.
/// Returns None if there is no MIR for the DefId
pub fn maybe_item_mir(self, did: DefId) -> Option<Ref<'gcx, Mir<'gcx>>> {
if did.is_local() && !self.maps.mir.borrow().contains_key(&did) {
return None;
}
if !did.is_local() && !self.sess.cstore.is_item_mir_available(did) {
return None;
}
Some(self.item_mir(did))
}
/// If `type_needs_drop` returns true, then `ty` is definitely
/// non-copy and *might* have a destructor attached; if it returns
/// false, then `ty` definitely has no destructor (i.e. no drop glue).

View File

@ -12,6 +12,7 @@ use infer::type_variable;
use ty::{self, Lift, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use rustc_data_structures::accumulate_vec::AccumulateVec;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use std::rc::Rc;
use syntax::abi;
@ -834,3 +835,13 @@ impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::error::ExpectedFoun
self.expected.visit_with(visitor) || self.found.visit_with(visitor)
}
}
impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec<I, T> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
self.iter().map(|x| x.fold_with(folder)).collect()
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.iter().any(|t| t.visit_with(visitor))
}
}

View File

@ -654,11 +654,12 @@ impl<'a, 'tcx> ty::TyS<'tcx> {
}
tcx.layout_depth.set(depth+1);
let layout = Layout::compute_uncached(self, infcx)?;
let layout = Layout::compute_uncached(self, infcx);
tcx.layout_depth.set(depth);
let layout = layout?;
if can_cache {
tcx.layout_cache.borrow_mut().insert(self, layout);
}
tcx.layout_depth.set(depth);
Ok(layout)
}

View File

@ -8,6 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::collections::range::RangeArgument;
use std::fmt::Debug;
use std::iter::{self, FromIterator};
use std::slice;
@ -145,6 +146,18 @@ impl<I: Idx, T> IndexVec<I, T> {
self.raw.iter_mut().enumerate().map(IntoIdx { _marker: PhantomData })
}
#[inline]
pub fn drain<'a, R: RangeArgument<usize>>(
&'a mut self, range: R) -> impl Iterator<Item=T> + 'a {
self.raw.drain(range)
}
#[inline]
pub fn drain_enumerated<'a, R: RangeArgument<usize>>(
&'a mut self, range: R) -> impl Iterator<Item=(I, T)> + 'a {
self.raw.drain(range).enumerate().map(IntoIdx { _marker: PhantomData })
}
#[inline]
pub fn last(&self) -> Option<I> {
self.len().checked_sub(1).map(I::new)
@ -164,6 +177,16 @@ impl<I: Idx, T> IndexVec<I, T> {
pub fn truncate(&mut self, a: usize) {
self.raw.truncate(a)
}
#[inline]
pub fn get(&self, index: I) -> Option<&T> {
self.raw.get(index.index())
}
#[inline]
pub fn get_mut(&mut self, index: I) -> Option<&mut T> {
self.raw.get_mut(index.index())
}
}
impl<I: Idx, T> Index<I> for IndexVec<I, T> {

View File

@ -38,6 +38,7 @@
#![feature(associated_consts)]
#![feature(unsize)]
#![feature(i128_type)]
#![feature(conservative_impl_trait)]
#![cfg_attr(unix, feature(libc))]
#![cfg_attr(test, feature(test))]

View File

@ -1051,6 +1051,7 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("elaborate-drops"));
// No lifetime analysis based on borrowing can be done from here on out.
passes.push_pass(box mir::transform::inline::Inline);
passes.push_pass(box mir::transform::instcombine::InstCombine::new());
passes.push_pass(box mir::transform::deaggregator::Deaggregator);
passes.push_pass(box mir::transform::copy_prop::CopyPropagation);

View File

@ -0,0 +1,252 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! MIR-based callgraph.
//!
//! This only considers direct calls
use rustc::hir::def_id::DefId;
use rustc_data_structures::graph;
use rustc::mir::*;
use rustc::mir::visit::*;
use rustc::ty;
use rustc::util::nodemap::DefIdMap;
pub struct CallGraph {
node_map: DefIdMap<graph::NodeIndex>,
graph: graph::Graph<DefId, ()>
}
impl CallGraph {
// FIXME: allow for construction of a callgraph that inspects
// cross-crate MIRs if available.
pub fn build<'a, 'tcx>(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> CallGraph {
let def_ids = tcx.maps.mir.borrow().keys();
let mut callgraph = CallGraph {
node_map: DefIdMap(),
graph: graph::Graph::new()
};
for def_id in def_ids {
if !def_id.is_local() { continue; }
let idx = callgraph.add_node(def_id);
let mut call_visitor = CallVisitor {
caller: idx,
graph: &mut callgraph
};
let mir = tcx.item_mir(def_id);
call_visitor.visit_mir(&mir);
}
callgraph
}
// Iterate over the strongly-connected components of the graph
pub fn scc_iter(&self) -> SCCIterator {
SCCIterator::new(&self.graph)
}
// Get the def_id for the given graph node
pub fn def_id(&self, node: graph::NodeIndex) -> DefId {
*self.graph.node_data(node)
}
fn add_node(&mut self, id: DefId) -> graph::NodeIndex {
let graph = &mut self.graph;
*self.node_map.entry(id).or_insert_with(|| {
graph.add_node(id)
})
}
}
struct CallVisitor<'a> {
caller: graph::NodeIndex,
graph: &'a mut CallGraph
}
impl<'a, 'tcx> Visitor<'tcx> for CallVisitor<'a> {
fn visit_terminator_kind(&mut self, _block: BasicBlock,
kind: &TerminatorKind<'tcx>, _loc: Location) {
if let TerminatorKind::Call {
func: Operand::Constant(ref f)
, .. } = *kind {
if let ty::TyFnDef(def_id, _, _) = f.ty.sty {
let callee = self.graph.add_node(def_id);
self.graph.graph.add_edge(self.caller, callee, ());
}
}
}
}
struct StackElement<'g> {
node: graph::NodeIndex,
lowlink: usize,
children: graph::AdjacentTargets<'g, DefId, ()>
}
/**
* Iterator over strongly-connected-components using Tarjan's algorithm[1]
*
* [1]: https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
*/
pub struct SCCIterator<'g> {
graph: &'g graph::Graph<DefId, ()>,
index: usize,
node_indices: Vec<Option<usize>>,
scc_stack: Vec<graph::NodeIndex>,
current_scc: Vec<graph::NodeIndex>,
visit_stack: Vec<StackElement<'g>>,
}
impl<'g> SCCIterator<'g> {
pub fn new(graph: &'g graph::Graph<DefId, ()>) -> SCCIterator<'g> {
if graph.len_nodes() == 0 {
return SCCIterator {
graph: graph,
index: 0,
node_indices: Vec::new(),
scc_stack: Vec::new(),
current_scc: Vec::new(),
visit_stack: Vec::new()
};
}
let first = graph::NodeIndex(0);
SCCIterator::with_entry(graph, first)
}
pub fn with_entry(graph: &'g graph::Graph<DefId, ()>,
entry: graph::NodeIndex) -> SCCIterator<'g> {
let mut iter = SCCIterator {
graph: graph,
index: 0,
node_indices: Vec::with_capacity(graph.len_nodes()),
scc_stack: Vec::new(),
current_scc: Vec::new(),
visit_stack: Vec::new()
};
iter.visit_one(entry);
iter
}
fn get_next(&mut self) {
self.current_scc.clear();
while !self.visit_stack.is_empty() {
self.visit_children();
let node = self.visit_stack.pop().unwrap();
if let Some(last) = self.visit_stack.last_mut() {
if last.lowlink > node.lowlink {
last.lowlink = node.lowlink;
}
}
debug!("TarjanSCC: Popped node {:?} : lowlink = {:?}; index = {:?}",
node.node, node.lowlink, self.node_index(node.node).unwrap());
if node.lowlink != self.node_index(node.node).unwrap() {
continue;
}
loop {
let n = self.scc_stack.pop().unwrap();
self.current_scc.push(n);
self.set_node_index(n, !0);
if n == node.node { return; }
}
}
}
fn visit_one(&mut self, node: graph::NodeIndex) {
self.index += 1;
let idx = self.index;
self.set_node_index(node, idx);
self.scc_stack.push(node);
self.visit_stack.push(StackElement {
node: node,
lowlink: self.index,
children: self.graph.successor_nodes(node)
});
debug!("TarjanSCC: Node {:?} : index = {:?}", node, idx);
}
fn visit_children(&mut self) {
while let Some(child) = self.visit_stack.last_mut().unwrap().children.next() {
if let Some(child_num) = self.node_index(child) {
let cur = self.visit_stack.last_mut().unwrap();
if cur.lowlink > child_num {
cur.lowlink = child_num;
}
} else {
self.visit_one(child);
}
}
}
fn node_index(&self, node: graph::NodeIndex) -> Option<usize> {
self.node_indices.get(node.node_id()).and_then(|&idx| idx)
}
fn set_node_index(&mut self, node: graph::NodeIndex, idx: usize) {
let i = node.node_id();
if i >= self.node_indices.len() {
self.node_indices.resize(i + 1, None);
}
self.node_indices[i] = Some(idx);
}
}
impl<'g> Iterator for SCCIterator<'g> {
type Item = Vec<graph::NodeIndex>;
fn next(&mut self) -> Option<Vec<graph::NodeIndex>> {
self.get_next();
if self.current_scc.is_empty() {
// Try a new root for the next SCC, if the node_indices
// map is doesn't contain all nodes, use the smallest one
// with no entry, otherwise find the first empty node.
//
// FIXME: This should probably use a set of precomputed
// roots instead
if self.node_indices.len() < self.graph.len_nodes() {
let idx = graph::NodeIndex(self.node_indices.len());
self.visit_one(idx);
} else {
for idx in 0..self.node_indices.len() {
if self.node_indices[idx].is_none() {
let idx = graph::NodeIndex(idx);
self.visit_one(idx);
break;
}
}
}
self.get_next();
}
if self.current_scc.is_empty() {
None
} else {
Some(self.current_scc.clone())
}
}
}

View File

@ -46,6 +46,7 @@ extern crate rustc_const_eval;
pub mod diagnostics;
pub mod build;
pub mod callgraph;
pub mod def_use;
pub mod graphviz;
mod hair;
@ -58,4 +59,4 @@ use rustc::ty::maps::Providers;
pub fn provide(providers: &mut Providers) {
mir_map::provide(providers);
transform::qualify_consts::provide(providers);
}
}

View File

@ -0,0 +1,836 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Inlining pass for MIR functions
use rustc::hir::def_id::DefId;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use rustc_data_structures::graph;
use rustc::dep_graph::DepNode;
use rustc::mir::*;
use rustc::mir::transform::{MirMapPass, MirPassHook, MirSource, Pass};
use rustc::mir::visit::*;
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::subst::{Subst,Substs};
use rustc::util::nodemap::{DefIdSet};
use super::simplify::{remove_dead_blocks, CfgSimplifier};
use syntax::{attr};
use syntax::abi::Abi;
use callgraph;
const DEFAULT_THRESHOLD: usize = 50;
const HINT_THRESHOLD: usize = 100;
const INSTR_COST: usize = 5;
const CALL_PENALTY: usize = 25;
const UNKNOWN_SIZE_COST: usize = 10;
pub struct Inline;
impl<'tcx> MirMapPass<'tcx> for Inline {
fn run_pass<'a>(
&mut self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
hooks: &mut [Box<for<'s> MirPassHook<'s>>]) {
if tcx.sess.opts.debugging_opts.mir_opt_level < 2 { return; }
let _ignore = tcx.dep_graph.in_ignore();
let callgraph = callgraph::CallGraph::build(tcx);
let mut inliner = Inliner {
tcx: tcx,
};
let def_ids = tcx.maps.mir.borrow().keys();
for &def_id in &def_ids {
if !def_id.is_local() { continue; }
let _task = tcx.dep_graph.in_task(DepNode::Mir(def_id));
let mut mir = if let Some(mir) = tcx.maps.mir.borrow().get(&def_id) {
mir.borrow_mut()
} else {
continue;
};
tcx.dep_graph.write(DepNode::Mir(def_id));
let id = tcx.hir.as_local_node_id(def_id).unwrap();
let src = MirSource::from_node(tcx, id);
for hook in &mut *hooks {
hook.on_mir_pass(tcx, src, &mut mir, self, false);
}
}
for scc in callgraph.scc_iter() {
inliner.inline_scc(&callgraph, &scc);
}
for def_id in def_ids {
if !def_id.is_local() { continue; }
let _task = tcx.dep_graph.in_task(DepNode::Mir(def_id));
let mut mir = tcx.maps.mir.borrow()[&def_id].borrow_mut();
tcx.dep_graph.write(DepNode::Mir(def_id));
let id = tcx.hir.as_local_node_id(def_id).unwrap();
let src = MirSource::from_node(tcx, id);
for hook in &mut *hooks {
hook.on_mir_pass(tcx, src, &mut mir, self, true);
}
}
}
}
impl<'tcx> Pass for Inline { }
struct Inliner<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
#[derive(Copy, Clone)]
struct CallSite<'tcx> {
caller: DefId,
callee: DefId,
substs: &'tcx Substs<'tcx>,
bb: BasicBlock,
location: SourceInfo,
}
impl<'a, 'tcx> Inliner<'a, 'tcx> {
fn inline_scc(&mut self, callgraph: &callgraph::CallGraph, scc: &[graph::NodeIndex]) -> bool {
let mut callsites = Vec::new();
let mut in_scc = DefIdSet();
let mut inlined_into = DefIdSet();
for &node in scc {
let def_id = callgraph.def_id(node);
// Don't inspect functions from other crates
let id = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) {
id
} else {
continue;
};
let src = MirSource::from_node(self.tcx, id);
if let MirSource::Fn(_) = src {
if let Some(mir) = self.tcx.maybe_item_mir(def_id) {
for (bb, bb_data) in mir.basic_blocks().iter_enumerated() {
// Don't inline calls that are in cleanup blocks.
if bb_data.is_cleanup { continue; }
// Only consider direct calls to functions
let terminator = bb_data.terminator();
if let TerminatorKind::Call {
func: Operand::Constant(ref f), .. } = terminator.kind {
if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty {
callsites.push(CallSite {
caller: def_id,
callee: callee_def_id,
substs: substs,
bb: bb,
location: terminator.source_info
});
}
}
}
in_scc.insert(def_id);
}
}
}
// Move callsites that are in the the SCC to the end so
// they're inlined after calls to outside the SCC
let mut first_call_in_scc = callsites.len();
let mut i = 0;
while i < first_call_in_scc {
let f = callsites[i].caller;
if in_scc.contains(&f) {
first_call_in_scc -= 1;
callsites.swap(i, first_call_in_scc);
} else {
i += 1;
}
}
let mut local_change;
let mut changed = false;
loop {
local_change = false;
let mut csi = 0;
while csi < callsites.len() {
let callsite = callsites[csi];
csi += 1;
let _task = self.tcx.dep_graph.in_task(DepNode::Mir(callsite.caller));
self.tcx.dep_graph.write(DepNode::Mir(callsite.caller));
let callee_mir = {
if let Some(callee_mir) = self.tcx.maybe_item_mir(callsite.callee) {
if !self.should_inline(callsite, &callee_mir) {
continue;
}
callee_mir.subst(self.tcx, callsite.substs)
} else {
continue;
}
};
let mut caller_mir = {
let map = self.tcx.maps.mir.borrow();
let mir = map.get(&callsite.caller).unwrap();
mir.borrow_mut()
};
let start = caller_mir.basic_blocks().len();
if !self.inline_call(callsite, &mut caller_mir, callee_mir) {
continue;
}
inlined_into.insert(callsite.caller);
// Add callsites from inlined function
for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) {
// Only consider direct calls to functions
let terminator = bb_data.terminator();
if let TerminatorKind::Call {
func: Operand::Constant(ref f), .. } = terminator.kind {
if let ty::TyFnDef(callee_def_id, substs, _) = f.ty.sty {
// Don't inline the same function multiple times.
if callsite.callee != callee_def_id {
callsites.push(CallSite {
caller: callsite.caller,
callee: callee_def_id,
substs: substs,
bb: bb,
location: terminator.source_info
});
}
}
}
}
csi -= 1;
if scc.len() == 1 {
callsites.swap_remove(csi);
} else {
callsites.remove(csi);
}
local_change = true;
changed = true;
}
if !local_change {
break;
}
}
// Simplify functions we inlined into.
for def_id in inlined_into {
let _task = self.tcx.dep_graph.in_task(DepNode::Mir(def_id));
self.tcx.dep_graph.write(DepNode::Mir(def_id));
let mut caller_mir = {
let map = self.tcx.maps.mir.borrow();
let mir = map.get(&def_id).unwrap();
mir.borrow_mut()
};
debug!("Running simplify cfg on {:?}", def_id);
CfgSimplifier::new(&mut caller_mir).simplify();
remove_dead_blocks(&mut caller_mir);
}
changed
}
fn should_inline(&self, callsite: CallSite<'tcx>,
callee_mir: &'a Mir<'tcx>) -> bool {
let tcx = self.tcx;
// Don't inline closures that have captures
// FIXME: Handle closures better
if callee_mir.upvar_decls.len() > 0 {
return false;
}
let attrs = tcx.get_attrs(callsite.callee);
let hint = attr::find_inline_attr(None, &attrs[..]);
let hinted = match hint {
// Just treat inline(always) as a hint for now,
// there are cases that prevent inlining that we
// need to check for first.
attr::InlineAttr::Always => true,
attr::InlineAttr::Never => return false,
attr::InlineAttr::Hint => true,
attr::InlineAttr::None => false,
};
// Only inline local functions if they would be eligible for cross-crate
// inlining. This is to ensure that the final crate doesn't have MIR that
// reference unexported symbols
if callsite.callee.is_local() {
if callsite.substs.types().count() == 0 && !hinted {
return false;
}
}
let mut threshold = if hinted {
HINT_THRESHOLD
} else {
DEFAULT_THRESHOLD
};
// Significantly lower the threshold for inlining cold functions
if attr::contains_name(&attrs[..], "cold") {
threshold /= 5;
}
// Give a bonus functions with a small number of blocks,
// We normally have two or three blocks for even
// very small functions.
if callee_mir.basic_blocks().len() <= 3 {
threshold += threshold / 4;
}
// FIXME: Give a bonus to functions with only a single caller
let id = tcx.hir.as_local_node_id(callsite.caller).expect("Caller not local");
let param_env = ty::ParameterEnvironment::for_item(tcx, id);
let mut first_block = true;
let mut cost = 0;
// Traverse the MIR manually so we can account for the effects of
// inlining on the CFG.
let mut work_list = vec![START_BLOCK];
let mut visited = BitVector::new(callee_mir.basic_blocks().len());
while let Some(bb) = work_list.pop() {
if !visited.insert(bb.index()) { continue; }
let blk = &callee_mir.basic_blocks()[bb];
for stmt in &blk.statements {
// Don't count StorageLive/StorageDead in the inlining cost.
match stmt.kind {
StatementKind::StorageLive(_) |
StatementKind::StorageDead(_) |
StatementKind::Nop => {}
_ => cost += INSTR_COST
}
}
let term = blk.terminator();
let mut is_drop = false;
match term.kind {
TerminatorKind::Drop { ref location, target, unwind } |
TerminatorKind::DropAndReplace { ref location, target, unwind, .. } => {
is_drop = true;
work_list.push(target);
// If the location doesn't actually need dropping, treat it like
// a regular goto.
let ty = location.ty(&callee_mir, tcx).subst(tcx, callsite.substs);
let ty = ty.to_ty(tcx);
if tcx.type_needs_drop_given_env(ty, &param_env) {
cost += CALL_PENALTY;
if let Some(unwind) = unwind {
work_list.push(unwind);
}
} else {
cost += INSTR_COST;
}
}
TerminatorKind::Unreachable |
TerminatorKind::Call { destination: None, .. } if first_block => {
// If the function always diverges, don't inline
// unless the cost is zero
threshold = 0;
}
TerminatorKind::Call {func: Operand::Constant(ref f), .. } => {
if let ty::TyFnDef(.., f) = f.ty.sty {
// Don't give intrinsics the extra penalty for calls
if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
cost += INSTR_COST;
} else {
cost += CALL_PENALTY;
}
}
}
TerminatorKind::Assert { .. } => cost += CALL_PENALTY,
_ => cost += INSTR_COST
}
if !is_drop {
for &succ in &term.successors()[..] {
work_list.push(succ);
}
}
first_block = false;
}
// Count up the cost of local variables and temps, if we know the size
// use that, otherwise we use a moderately-large dummy cost.
let ptr_size = tcx.data_layout.pointer_size.bytes();
for v in callee_mir.vars_and_temps_iter() {
let v = &callee_mir.local_decls[v];
let ty = v.ty.subst(tcx, callsite.substs);
// Cost of the var is the size in machine-words, if we know
// it.
if let Some(size) = type_size_of(tcx, param_env.clone(), ty) {
cost += (size / ptr_size) as usize;
} else {
cost += UNKNOWN_SIZE_COST;
}
}
debug!("Inline cost for {:?} is {}", callsite.callee, cost);
if let attr::InlineAttr::Always = hint {
true
} else {
cost <= threshold
}
}
fn inline_call(&self, callsite: CallSite<'tcx>,
caller_mir: &mut Mir<'tcx>, mut callee_mir: Mir<'tcx>) -> bool {
// Don't inline a function into itself
if callsite.caller == callsite.callee { return false; }
let _task = self.tcx.dep_graph.in_task(DepNode::Mir(callsite.caller));
let terminator = caller_mir[callsite.bb].terminator.take().unwrap();
match terminator.kind {
// FIXME: Handle inlining of diverging calls
TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
debug!("Inlined {:?} into {:?}", callsite.callee, callsite.caller);
let is_box_free = Some(callsite.callee) == self.tcx.lang_items.box_free_fn();
let mut local_map = IndexVec::with_capacity(callee_mir.local_decls.len());
let mut scope_map = IndexVec::with_capacity(callee_mir.visibility_scopes.len());
let mut promoted_map = IndexVec::with_capacity(callee_mir.promoted.len());
for mut scope in callee_mir.visibility_scopes.iter().cloned() {
if scope.parent_scope.is_none() {
scope.parent_scope = Some(callsite.location.scope);
scope.span = callee_mir.span;
}
scope.span = callsite.location.span;
let idx = caller_mir.visibility_scopes.push(scope);
scope_map.push(idx);
}
for loc in callee_mir.vars_and_temps_iter() {
let mut local = callee_mir.local_decls[loc].clone();
if let Some(ref mut source_info) = local.source_info {
source_info.scope = scope_map[source_info.scope];
source_info.span = callsite.location.span;
}
let idx = caller_mir.local_decls.push(local);
local_map.push(idx);
}
for p in callee_mir.promoted.iter().cloned() {
let idx = caller_mir.promoted.push(p);
promoted_map.push(idx);
}
// If the call is something like `a[*i] = f(i)`, where
// `i : &mut usize`, then just duplicating the `a[*i]`
// Lvalue could result in two different locations if `f`
// writes to `i`. To prevent this we need to create a temporary
// borrow of the lvalue and pass the destination as `*temp` instead.
fn dest_needs_borrow(lval: &Lvalue) -> bool {
match *lval {
Lvalue::Projection(ref p) => {
match p.elem {
ProjectionElem::Deref |
ProjectionElem::Index(_) => true,
_ => dest_needs_borrow(&p.base)
}
}
// Static variables need a borrow because the callee
// might modify the same static.
Lvalue::Static(_) => true,
_ => false
}
}
let dest = if dest_needs_borrow(&destination.0) {
debug!("Creating temp for return destination");
let dest = Rvalue::Ref(
self.tcx.mk_region(ty::ReErased),
BorrowKind::Mut,
destination.0);
let ty = dest.ty(caller_mir, self.tcx);
let temp = LocalDecl::new_temp(ty);
let tmp = caller_mir.local_decls.push(temp);
let tmp = Lvalue::Local(tmp);
let stmt = Statement {
source_info: callsite.location,
kind: StatementKind::Assign(tmp.clone(), dest)
};
caller_mir[callsite.bb]
.statements.push(stmt);
tmp.deref()
} else {
destination.0
};
let return_block = destination.1;
let args : Vec<_> = if is_box_free {
assert!(args.len() == 1);
// box_free takes a Box, but is defined with a *mut T, inlining
// needs to generate the cast.
// FIXME: we should probably just generate correct MIR in the first place...
let arg = if let Operand::Consume(ref lval) = args[0] {
lval.clone()
} else {
bug!("Constant arg to \"box_free\"");
};
let ptr_ty = args[0].ty(caller_mir, self.tcx);
vec![self.cast_box_free_arg(arg, ptr_ty, &callsite, caller_mir)]
} else {
// Copy the arguments if needed.
self.make_call_args(args, &callsite, caller_mir)
};
let bb_len = caller_mir.basic_blocks().len();
let mut integrator = Integrator {
block_idx: bb_len,
args: &args,
local_map: local_map,
scope_map: scope_map,
promoted_map: promoted_map,
_callsite: callsite,
destination: dest,
return_block: return_block,
cleanup_block: cleanup,
in_cleanup_block: false
};
for (bb, mut block) in callee_mir.basic_blocks_mut().drain_enumerated(..) {
integrator.visit_basic_block_data(bb, &mut block);
caller_mir.basic_blocks_mut().push(block);
}
let terminator = Terminator {
source_info: callsite.location,
kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) }
};
caller_mir[callsite.bb].terminator = Some(terminator);
true
}
kind => {
caller_mir[callsite.bb].terminator = Some(Terminator {
source_info: terminator.source_info,
kind: kind
});
false
}
}
}
fn cast_box_free_arg(&self, arg: Lvalue<'tcx>, ptr_ty: Ty<'tcx>,
callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Operand<'tcx> {
let arg = Rvalue::Ref(
self.tcx.mk_region(ty::ReErased),
BorrowKind::Mut,
arg.deref());
let ty = arg.ty(caller_mir, self.tcx);
let ref_tmp = LocalDecl::new_temp(ty);
let ref_tmp = caller_mir.local_decls.push(ref_tmp);
let ref_tmp = Lvalue::Local(ref_tmp);
let ref_stmt = Statement {
source_info: callsite.location,
kind: StatementKind::Assign(ref_tmp.clone(), arg)
};
caller_mir[callsite.bb]
.statements.push(ref_stmt);
let pointee_ty = match ptr_ty.sty {
ty::TyRawPtr(tm) | ty::TyRef(_, tm) => tm.ty,
_ if ptr_ty.is_box() => ptr_ty.boxed_ty(),
_ => bug!("Invalid type `{:?}` for call to box_free", ptr_ty)
};
let ptr_ty = self.tcx.mk_mut_ptr(pointee_ty);
let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Consume(ref_tmp), ptr_ty);
let cast_tmp = LocalDecl::new_temp(ptr_ty);
let cast_tmp = caller_mir.local_decls.push(cast_tmp);
let cast_tmp = Lvalue::Local(cast_tmp);
let cast_stmt = Statement {
source_info: callsite.location,
kind: StatementKind::Assign(cast_tmp.clone(), raw_ptr)
};
caller_mir[callsite.bb]
.statements.push(cast_stmt);
Operand::Consume(cast_tmp)
}
fn make_call_args(&self, args: Vec<Operand<'tcx>>,
callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Vec<Operand<'tcx>> {
let tcx = self.tcx;
// FIXME: Analysis of the usage of the arguments to avoid
// unnecessary temporaries.
args.into_iter().map(|a| {
if let Operand::Consume(Lvalue::Local(local)) = a {
if caller_mir.local_kind(local) == LocalKind::Temp {
// Reuse the operand if it's a temporary already
return a;
}
}
debug!("Creating temp for argument");
// Otherwise, create a temporary for the arg
let arg = Rvalue::Use(a);
let ty = arg.ty(caller_mir, tcx);
let arg_tmp = LocalDecl::new_temp(ty);
let arg_tmp = caller_mir.local_decls.push(arg_tmp);
let arg_tmp = Lvalue::Local(arg_tmp);
let stmt = Statement {
source_info: callsite.location,
kind: StatementKind::Assign(arg_tmp.clone(), arg)
};
caller_mir[callsite.bb].statements.push(stmt);
Operand::Consume(arg_tmp)
}).collect()
}
}
fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParameterEnvironment<'tcx>,
ty: Ty<'tcx>) -> Option<u64> {
tcx.infer_ctxt(param_env, traits::Reveal::All).enter(|infcx| {
ty.layout(&infcx).ok().map(|layout| {
layout.size(&tcx.data_layout).bytes()
})
})
}
/**
* Integrator.
*
* Integrates blocks from the callee function into the calling function.
* Updates block indices, references to locals and other control flow
* stuff.
*/
struct Integrator<'a, 'tcx: 'a> {
block_idx: usize,
args: &'a [Operand<'tcx>],
local_map: IndexVec<Local, Local>,
scope_map: IndexVec<VisibilityScope, VisibilityScope>,
promoted_map: IndexVec<Promoted, Promoted>,
_callsite: CallSite<'tcx>,
destination: Lvalue<'tcx>,
return_block: BasicBlock,
cleanup_block: Option<BasicBlock>,
in_cleanup_block: bool,
}
impl<'a, 'tcx> Integrator<'a, 'tcx> {
fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
let new = BasicBlock::new(tgt.index() + self.block_idx);
debug!("Updating target `{:?}`, new: `{:?}`", tgt, new);
new
}
fn update_local(&self, local: Local) -> Option<Local> {
let idx = local.index();
if idx < (self.args.len() + 1) {
return None;
}
let idx = idx - (self.args.len() + 1);
let local = Local::new(idx);
self.local_map.get(local).cloned()
}
fn arg_index(&self, arg: Local) -> Option<usize> {
let idx = arg.index();
if idx > 0 && idx <= self.args.len() {
Some(idx - 1)
} else {
None
}
}
}
impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
fn visit_lvalue(&mut self,
lvalue: &mut Lvalue<'tcx>,
_ctxt: LvalueContext<'tcx>,
_location: Location) {
if let Lvalue::Local(ref mut local) = *lvalue {
if let Some(l) = self.update_local(*local) {
// Temp or Var; update the local reference
*local = l;
return;
}
}
if let Lvalue::Local(local) = *lvalue {
if local == RETURN_POINTER {
// Return pointer; update the lvalue itself
*lvalue = self.destination.clone();
} else if local.index() < (self.args.len() + 1) {
// Argument, once again update the the lvalue itself
let idx = local.index() - 1;
if let Operand::Consume(ref lval) = self.args[idx] {
*lvalue = lval.clone();
} else {
bug!("Arg operand `{:?}` is not an Lvalue use.", idx)
}
}
} else {
self.super_lvalue(lvalue, _ctxt, _location)
}
}
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
if let Operand::Consume(Lvalue::Local(arg)) = *operand {
if let Some(idx) = self.arg_index(arg) {
let new_arg = self.args[idx].clone();
*operand = new_arg;
return;
}
}
self.super_operand(operand, location);
}
fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
self.in_cleanup_block = data.is_cleanup;
self.super_basic_block_data(block, data);
self.in_cleanup_block = false;
}
fn visit_terminator_kind(&mut self, block: BasicBlock,
kind: &mut TerminatorKind<'tcx>, loc: Location) {
self.super_terminator_kind(block, kind, loc);
match *kind {
TerminatorKind::Goto { ref mut target} => {
*target = self.update_target(*target);
}
TerminatorKind::SwitchInt { ref mut targets, .. } => {
for tgt in targets {
*tgt = self.update_target(*tgt);
}
}
TerminatorKind::Drop { ref mut target, ref mut unwind, .. } |
TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
*target = self.update_target(*target);
if let Some(tgt) = *unwind {
*unwind = Some(self.update_target(tgt));
} else if !self.in_cleanup_block {
// Unless this drop is in a cleanup block, add an unwind edge to
// the orignal call's cleanup block
*unwind = self.cleanup_block;
}
}
TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
if let Some((_, ref mut tgt)) = *destination {
*tgt = self.update_target(*tgt);
}
if let Some(tgt) = *cleanup {
*cleanup = Some(self.update_target(tgt));
} else if !self.in_cleanup_block {
// Unless this call is in a cleanup block, add an unwind edge to
// the orignal call's cleanup block
*cleanup = self.cleanup_block;
}
}
TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
*target = self.update_target(*target);
if let Some(tgt) = *cleanup {
*cleanup = Some(self.update_target(tgt));
} else if !self.in_cleanup_block {
// Unless this assert is in a cleanup block, add an unwind edge to
// the orignal call's cleanup block
*cleanup = self.cleanup_block;
}
}
TerminatorKind::Return => {
*kind = TerminatorKind::Goto { target: self.return_block };
}
TerminatorKind::Resume => {
if let Some(tgt) = self.cleanup_block {
*kind = TerminatorKind::Goto { target: tgt }
}
}
TerminatorKind::Unreachable => { }
}
}
fn visit_visibility_scope(&mut self, scope: &mut VisibilityScope) {
*scope = self.scope_map[*scope];
}
fn visit_literal(&mut self, literal: &mut Literal<'tcx>, loc: Location) {
if let Literal::Promoted { ref mut index } = *literal {
if let Some(p) = self.promoted_map.get(*index).cloned() {
*index = p;
}
} else {
self.super_literal(literal, loc);
}
}
}

View File

@ -20,3 +20,4 @@ pub mod dump_mir;
pub mod deaggregator;
pub mod instcombine;
pub mod copy_prop;
pub mod inline;

View File

@ -79,7 +79,7 @@ pub struct CfgSimplifier<'a, 'tcx: 'a> {
}
impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> {
fn new(mir: &'a mut Mir<'tcx>) -> Self {
pub fn new(mir: &'a mut Mir<'tcx>) -> Self {
let mut pred_count = IndexVec::from_elem(0u32, mir.basic_blocks());
// we can't use mir.predecessors() here because that counts
@ -102,7 +102,7 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> {
}
}
fn simplify(mut self) {
pub fn simplify(mut self) {
loop {
let mut changed = false;
@ -137,6 +137,8 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> {
if !changed { break }
}
self.strip_nops()
}
// Collapse a goto chain starting from `start`
@ -231,9 +233,19 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> {
terminator.kind = TerminatorKind::Goto { target: first_succ };
true
}
fn strip_nops(&mut self) {
for blk in self.basic_blocks.iter_mut() {
blk.statements.retain(|stmt| if let StatementKind::Nop = stmt.kind {
false
} else {
true
})
}
}
}
fn remove_dead_blocks(mir: &mut Mir) {
pub fn remove_dead_blocks(mir: &mut Mir) {
let mut seen = BitVector::new(mir.basic_blocks().len());
for (bb, _) in traversal::preorder(mir) {
seen.insert(bb.index());