Add KillHandle and implement exit code propagation to replace join_latch

This commit is contained in:
Ben Blum 2013-07-02 21:15:34 -04:00
parent 2a99163f5d
commit 52ca256d7b
3 changed files with 134 additions and 3 deletions

128
src/libstd/rt/kill.rs Normal file
View File

@ -0,0 +1,128 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Task death: asynchronous killing, linked failure, exit code propagation.
use cell::Cell;
use option::{Option, Some, None};
use prelude::*;
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
use util;
// FIXME(#7544)(bblum): think about the cache efficiency of this
struct KillHandleInner {
// ((more fields to be added in a future commit))
// Shared state between task and children for exit code propagation. These
// are here so we can re-use the kill handle to implement watched children
// tasks. Using a separate ARClike would introduce extra atomic adds/subs
// into common spawn paths, so this is just for speed.
// Locklessly accessed; protected by the enclosing refcount's barriers.
any_child_failed: bool,
// A lazy list, consuming which may unwrap() many child tombstones.
child_tombstones: Option<~fn() -> bool>,
// Protects multiple children simultaneously creating tombstones.
graveyard_lock: LittleLock,
}
/// State shared between tasks used for task killing during linked failure.
#[deriving(Clone)]
pub struct KillHandle(UnsafeAtomicRcBox<KillHandleInner>);
impl KillHandle {
pub fn new() -> KillHandle {
KillHandle(UnsafeAtomicRcBox::new(KillHandleInner {
// Linked failure fields
// ((none yet))
// Exit code propagation fields
any_child_failed: false,
child_tombstones: None,
graveyard_lock: LittleLock(),
}))
}
pub fn notify_immediate_failure(&mut self) {
// A benign data race may happen here if there are failing sibling
// tasks that were also spawned-watched. The refcount's write barriers
// in UnsafeAtomicRcBox ensure that this write will be seen by the
// unwrapper/destructor, whichever task may unwrap it.
unsafe { (*self.get()).any_child_failed = true; }
}
// For use when a task does not need to collect its children's exit
// statuses, but the task has a parent which might want them.
pub fn reparent_children_to(self, parent: &mut KillHandle) {
// Optimistic path: If another child of the parent's already failed,
// we don't need to worry about any of this.
if unsafe { (*parent.get()).any_child_failed } {
return;
}
// Try to see if all our children are gone already.
match unsafe { self.try_unwrap() } {
// Couldn't unwrap; children still alive. Reparent entire handle as
// our own tombstone, to be unwrapped later.
Left(this) => {
let this = Cell::new(this); // :(
do add_lazy_tombstone(parent) |other_tombstones| {
let this = Cell::new(this.take()); // :(
let others = Cell::new(other_tombstones); // :(
|| {
// Prefer to check tombstones that were there first,
// being "more fair" at the expense of tail-recursion.
others.take().map_consume_default(true, |f| f()) && {
let mut inner = unsafe { this.take().unwrap() };
(!inner.any_child_failed) &&
inner.child_tombstones.take_map_default(true, |f| f())
}
}
}
}
// Whether or not all children exited, one or more already failed.
Right(KillHandleInner { any_child_failed: true, _ }) => {
parent.notify_immediate_failure();
}
// All children exited, but some left behind tombstones that we
// don't want to wait on now. Give them to our parent.
Right(KillHandleInner { any_child_failed: false,
child_tombstones: Some(f), _ }) => {
let f = Cell::new(f); // :(
do add_lazy_tombstone(parent) |other_tombstones| {
let f = Cell::new(f.take()); // :(
let others = Cell::new(other_tombstones); // :(
|| {
// Prefer fairness to tail-recursion, as in above case.
others.take().map_consume_default(true, |f| f()) &&
f.take()()
}
}
}
// All children exited, none failed. Nothing to do!
Right(KillHandleInner { any_child_failed: false,
child_tombstones: None, _ }) => { }
}
// NB: Takes a pthread mutex -- 'blk' not allowed to reschedule.
fn add_lazy_tombstone(parent: &mut KillHandle,
blk: &fn(Option<~fn() -> bool>) -> ~fn() -> bool) {
let inner: &mut KillHandleInner = unsafe { &mut *parent.get() };
unsafe {
do inner.graveyard_lock.lock {
// Update the current "head node" of the lazy list.
inner.child_tombstones =
Some(blk(util::replace(&mut inner.child_tombstones, None)));
}
}
}
}
}

View File

@ -83,6 +83,9 @@ pub mod global_heap;
/// Implementations of language-critical runtime features like @.
pub mod task;
/// Facilities related to task failure, killing, and death.
mod kill;
/// The coroutine task scheduler, built on the `io` event loop.
mod sched;

View File

@ -225,9 +225,9 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
/****************************************************************************/
#[allow(non_camel_case_types)] // runtime type
pub type rust_little_lock = *libc::c_void;
type rust_little_lock = *libc::c_void;
struct LittleLock {
pub struct LittleLock {
l: rust_little_lock,
}
@ -239,7 +239,7 @@ impl Drop for LittleLock {
}
}
fn LittleLock() -> LittleLock {
pub fn LittleLock() -> LittleLock {
unsafe {
LittleLock {
l: rust_create_little_lock()