Auto merge of #33030 - nagisa:mir-unrequire-end-block, r=nikomatsakis

MIR: Do not require END_BLOCK to always exist

Basically, all this does, is removing restriction for END_BLOCK to exist past the first invocation of RemoveDeadBlocks pass. This way for functions whose CFG does not reach the `END_BLOCK` end up not containing the block.

As far as the implementation goes, I’m not entirely satisfied with the `BasicBlock::end_block`. I had hoped to make `new` a `const fn` and then just have a `const END_BLOCK` private to mir::build, but it turns out that constant functions don’t yet support conditionals nor a way to assert.
This commit is contained in:
bors 2016-04-20 21:25:26 -07:00
commit 6e03608209
5 changed files with 35 additions and 29 deletions

View File

@ -63,9 +63,6 @@ pub struct Mir<'tcx> {
/// where execution begins
pub const START_BLOCK: BasicBlock = BasicBlock(0);
/// where execution ends, on normal return
pub const END_BLOCK: BasicBlock = BasicBlock(1);
impl<'tcx> Mir<'tcx> {
pub fn all_basic_blocks(&self) -> Vec<BasicBlock> {
(0..self.basic_blocks.len())
@ -322,8 +319,7 @@ pub enum TerminatorKind<'tcx> {
Resume,
/// Indicates a normal return. The ReturnPointer lvalue should
/// have been filled in by now. This should only occur in the
/// `END_BLOCK`.
/// have been filled in by now. This should occur at most once.
Return,
/// Drop the Lvalue

View File

@ -262,7 +262,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}
};
let extent = this.extent_of_return_scope();
this.exit_scope(expr_span, extent, block, END_BLOCK);
let return_block = this.return_block();
this.exit_scope(expr_span, extent, block, return_block);
this.cfg.start_new_block().unit()
}
ExprKind::Call { ty, fun, args } => {

View File

@ -26,23 +26,23 @@ pub struct Builder<'a, 'tcx: 'a> {
fn_span: Span,
// the current set of scopes, updated as we traverse;
// see the `scope` module for more details
/// the current set of scopes, updated as we traverse;
/// see the `scope` module for more details
scopes: Vec<scope::Scope<'tcx>>,
// for each scope, a span of blocks that defines it;
// we track these for use in region and borrow checking,
// but these are liable to get out of date once optimization
// begins. They are also hopefully temporary, and will be
// no longer needed when we adopt graph-based regions.
/// for each scope, a span of blocks that defines it;
/// we track these for use in region and borrow checking,
/// but these are liable to get out of date once optimization
/// begins. They are also hopefully temporary, and will be
/// no longer needed when we adopt graph-based regions.
scope_auxiliary: ScopeAuxiliaryVec,
// the current set of loops; see the `scope` module for more
// details
/// the current set of loops; see the `scope` module for more
/// details
loop_scopes: Vec<scope::LoopScope>,
// the vector of all scopes that we have created thus far;
// we track this for debuginfo later
/// the vector of all scopes that we have created thus far;
/// we track this for debuginfo later
scope_datas: Vec<ScopeData>,
var_decls: Vec<VarDecl<'tcx>>,
@ -50,9 +50,11 @@ pub struct Builder<'a, 'tcx: 'a> {
temp_decls: Vec<TempDecl<'tcx>>,
unit_temp: Option<Lvalue<'tcx>>,
// cached block with a RESUME terminator; we create this at the
// first panic
/// cached block with the RESUME terminator; this is created
/// when first set of cleanups are built.
cached_resume_block: Option<BasicBlock>,
/// cached block with the RETURN terminator
cached_return_block: Option<BasicBlock>,
}
struct CFG<'tcx> {
@ -182,11 +184,10 @@ pub fn construct<'a,'tcx>(hir: Cx<'a,'tcx>,
var_indices: FnvHashMap(),
unit_temp: None,
cached_resume_block: None,
cached_return_block: None
};
assert_eq!(builder.cfg.start_new_block(), START_BLOCK);
assert_eq!(builder.cfg.start_new_block(), END_BLOCK);
let mut arg_decls = None; // assigned to `Some` in closures below
let call_site_extent =
@ -206,12 +207,12 @@ pub fn construct<'a,'tcx>(hir: Cx<'a,'tcx>,
block.unit()
}));
let return_block = builder.return_block();
builder.cfg.terminate(block, call_site_scope_id, span,
TerminatorKind::Goto { target: END_BLOCK });
builder.cfg.terminate(END_BLOCK, call_site_scope_id, span,
TerminatorKind::Goto { target: return_block });
builder.cfg.terminate(return_block, call_site_scope_id, span,
TerminatorKind::Return);
END_BLOCK.unit()
return_block.unit()
});
assert!(
@ -329,6 +330,17 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}
}
}
fn return_block(&mut self) -> BasicBlock {
match self.cached_return_block {
Some(rb) => rb,
None => {
let rb = self.cfg.start_new_block();
self.cached_return_block = Some(rb);
rb
}
}
}
}
///////////////////////////////////////////////////////////////////////////

View File

@ -43,9 +43,8 @@ pub struct RemoveDeadBlocks;
impl<'tcx> MirPass<'tcx> for RemoveDeadBlocks {
fn run_pass(&mut self, _: &TyCtxt<'tcx>, _: NodeId, mir: &mut Mir<'tcx>) {
let mut seen = BitVector::new(mir.basic_blocks.len());
// These blocks are always required.
// This block is always required.
seen.insert(START_BLOCK.index());
seen.insert(END_BLOCK.index());
let mut worklist = Vec::with_capacity(4);
worklist.push(START_BLOCK);

View File

@ -164,8 +164,6 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
.map(|&bb|{
if bb == mir::START_BLOCK {
fcx.new_block("start", None)
} else if bb == mir::END_BLOCK {
fcx.new_block("end", None)
} else {
fcx.new_block(&format!("{:?}", bb), None)
}