auto merge of #7186 : dotdash/rust/landing_pads, r=pcwalton

Currently, cleanup blocks are only reused when there are nested scopes, the
child scope's cleanup block will terminate with a jump to the parent
scope's cleanup block. But within a single scope, adding or revoking
any cleanup will force a fresh cleanup block. This means quadratic
growth with the number of allocations in a scope, because each
allocation needs a landing pad.

Instead of forcing a fresh cleanup block, we can keep a list chained
cleanup blocks that form a prefix of the currently required cleanups.
That way, the next cleanup block only has to handle newly added
cleanups. And by keeping the whole list instead of just the latest
block, we can also handle revocations more efficiently, by only
dropping those blocks that are no longer required, instead of all of
them.

Reduces the size of librustc by about 5% and the time required to build
it by about 10%.
This commit is contained in:
bors 2013-06-16 11:48:57 -07:00
commit 81506a6b81
2 changed files with 31 additions and 13 deletions

View File

@ -1317,26 +1317,38 @@ pub fn cleanup_and_leave(bcx: block,
match cur.kind {
block_scope(inf) if !inf.empty_cleanups() => {
let (sub_cx, inf_cleanups) = {
let (sub_cx, dest, inf_cleanups) = {
let inf = &mut *inf; // FIXME(#5074) workaround stage0
let mut skip = 0;
let mut dest = None;
{
let r = vec::find((*inf).cleanup_paths, |cp| cp.target == leave);
let r = vec::rfind((*inf).cleanup_paths, |cp| cp.target == leave);
for r.iter().advance |cp| {
Br(bcx, cp.dest);
return;
if cp.size == inf.cleanups.len() {
Br(bcx, cp.dest);
return;
}
skip = cp.size;
dest = Some(cp.dest);
}
}
let sub_cx = sub_block(bcx, "cleanup");
Br(bcx, sub_cx.llbb);
inf.cleanup_paths.push(cleanup_path {
target: leave,
size: inf.cleanups.len(),
dest: sub_cx.llbb
});
(sub_cx, copy inf.cleanups)
(sub_cx, dest, inf.cleanups.tailn(skip).to_owned())
};
bcx = trans_block_cleanups_(sub_cx,
inf_cleanups,
is_lpad);
for dest.iter().advance |&dest| {
Br(bcx, dest);
return;
}
}
_ => ()
}

View File

@ -325,11 +325,17 @@ pub enum cleanup {
// target: none means the path ends in an resume instruction
pub struct cleanup_path {
target: Option<BasicBlockRef>,
size: uint,
dest: BasicBlockRef
}
pub fn scope_clean_changed(scope_info: &mut scope_info) {
if scope_info.cleanup_paths.len() > 0u { scope_info.cleanup_paths = ~[]; }
pub fn shrink_scope_clean(scope_info: &mut scope_info, size: uint) {
scope_info.landing_pad = None;
scope_info.cleanup_paths = scope_info.cleanup_paths.iter()
.take_while(|&cu| cu.size <= size).transform(|&x|x).collect();
}
pub fn grow_scope_clean(scope_info: &mut scope_info) {
scope_info.landing_pad = None;
}
@ -374,7 +380,7 @@ pub fn add_clean(bcx: block, val: ValueRef, t: ty::t) {
scope_info.cleanups.push(
clean(|a| glue::drop_ty_root(a, root, rooted, t),
cleanup_type));
scope_clean_changed(scope_info);
grow_scope_clean(scope_info);
}
}
@ -388,7 +394,7 @@ pub fn add_clean_temp_immediate(cx: block, val: ValueRef, ty: ty::t) {
scope_info.cleanups.push(
clean_temp(val, |a| glue::drop_ty_immediate(a, val, ty),
cleanup_type));
scope_clean_changed(scope_info);
grow_scope_clean(scope_info);
}
}
pub fn add_clean_temp_mem(bcx: block, val: ValueRef, t: ty::t) {
@ -402,7 +408,7 @@ pub fn add_clean_temp_mem(bcx: block, val: ValueRef, t: ty::t) {
scope_info.cleanups.push(
clean_temp(val, |a| glue::drop_ty_root(a, root, rooted, t),
cleanup_type));
scope_clean_changed(scope_info);
grow_scope_clean(scope_info);
}
}
pub fn add_clean_return_to_mut(bcx: block,
@ -434,7 +440,7 @@ pub fn add_clean_return_to_mut(bcx: block,
filename_val,
line_val),
normal_exit_only));
scope_clean_changed(scope_info);
grow_scope_clean(scope_info);
}
}
pub fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) {
@ -451,7 +457,7 @@ pub fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) {
do in_scope_cx(cx) |scope_info| {
scope_info.cleanups.push(clean_temp(ptr, free_fn,
normal_exit_and_unwind));
scope_clean_changed(scope_info);
grow_scope_clean(scope_info);
}
}
@ -474,7 +480,7 @@ pub fn revoke_clean(cx: block, val: ValueRef) {
vec::slice(scope_info.cleanups,
*i + 1u,
scope_info.cleanups.len()));
scope_clean_changed(scope_info);
shrink_scope_clean(scope_info, *i);
}
}
}