Remove libgreen

With runtime removal complete, there is no longer any reason to provide
libgreen.

[breaking-change]
This commit is contained in:
Aaron Turon 2014-11-14 13:56:15 -08:00
parent 3ee916e50b
commit 91a2c0d512
11 changed files with 0 additions and 3862 deletions

View File

@ -1,259 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This is a basic event loop implementation not meant for any "real purposes"
//! other than testing the scheduler and proving that it's possible to have a
//! pluggable event loop.
//!
//! This implementation is also used as the fallback implementation of an event
//! loop if no other one is provided (and M:N scheduling is desired).
use self::Message::*;
use alloc::arc::Arc;
use std::sync::atomic;
use std::mem;
use std::rt::rtio::{EventLoop, RemoteCallback};
use std::rt::rtio::{PausableIdleCallback, Callback};
use std::rt::exclusive::Exclusive;
/// This is the only exported function from this module.
pub fn event_loop() -> Box<EventLoop + Send> {
box BasicLoop::new() as Box<EventLoop + Send>
}
struct BasicLoop {
work: Vec<proc(): Send>, // pending work
remotes: Vec<(uint, Box<Callback + Send>)>,
next_remote: uint,
messages: Arc<Exclusive<Vec<Message>>>,
idle: Option<Box<Callback + Send>>,
idle_active: Option<Arc<atomic::AtomicBool>>,
}
enum Message { RunRemote(uint), RemoveRemote(uint) }
impl BasicLoop {
fn new() -> BasicLoop {
BasicLoop {
work: vec![],
idle: None,
idle_active: None,
next_remote: 0,
remotes: vec![],
messages: Arc::new(Exclusive::new(Vec::new())),
}
}
/// Process everything in the work queue (continually)
fn work(&mut self) {
while self.work.len() > 0 {
for work in mem::replace(&mut self.work, vec![]).into_iter() {
work();
}
}
}
fn remote_work(&mut self) {
let messages = unsafe {
mem::replace(&mut *self.messages.lock(), Vec::new())
};
for message in messages.into_iter() {
self.message(message);
}
}
fn message(&mut self, message: Message) {
match message {
RunRemote(i) => {
match self.remotes.iter_mut().find(|& &(id, _)| id == i) {
Some(&(_, ref mut f)) => f.call(),
None => panic!("bad remote: {}", i),
}
}
RemoveRemote(i) => {
match self.remotes.iter().position(|&(id, _)| id == i) {
Some(i) => { self.remotes.remove(i).unwrap(); }
None => panic!("bad remote: {}", i),
}
}
}
}
/// Run the idle callback if one is registered
fn idle(&mut self) {
match self.idle {
Some(ref mut idle) => {
if self.idle_active.as_ref().unwrap().load(atomic::SeqCst) {
idle.call();
}
}
None => {}
}
}
fn has_idle(&self) -> bool {
self.idle.is_some() && self.idle_active.as_ref().unwrap().load(atomic::SeqCst)
}
}
impl EventLoop for BasicLoop {
fn run(&mut self) {
// Not exactly efficient, but it gets the job done.
while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() {
self.work();
self.remote_work();
if self.has_idle() {
self.idle();
continue
}
unsafe {
let messages = self.messages.lock();
// We block here if we have no messages to process and we may
// receive a message at a later date
if self.remotes.len() > 0 && messages.len() == 0 &&
self.work.len() == 0 {
messages.wait()
}
}
}
}
fn callback(&mut self, f: proc():Send) {
self.work.push(f);
}
// FIXME: Seems like a really weird requirement to have an event loop provide.
fn pausable_idle_callback(&mut self, cb: Box<Callback + Send>)
-> Box<PausableIdleCallback + Send> {
rtassert!(self.idle.is_none());
self.idle = Some(cb);
let a = Arc::new(atomic::AtomicBool::new(true));
self.idle_active = Some(a.clone());
box BasicPausable { active: a } as Box<PausableIdleCallback + Send>
}
fn remote_callback(&mut self, f: Box<Callback + Send>)
-> Box<RemoteCallback + Send> {
let id = self.next_remote;
self.next_remote += 1;
self.remotes.push((id, f));
box BasicRemote::new(self.messages.clone(), id) as
Box<RemoteCallback + Send>
}
fn has_active_io(&self) -> bool { false }
}
struct BasicRemote {
queue: Arc<Exclusive<Vec<Message>>>,
id: uint,
}
impl BasicRemote {
fn new(queue: Arc<Exclusive<Vec<Message>>>, id: uint) -> BasicRemote {
BasicRemote { queue: queue, id: id }
}
}
impl RemoteCallback for BasicRemote {
fn fire(&mut self) {
let mut queue = unsafe { self.queue.lock() };
queue.push(RunRemote(self.id));
queue.signal();
}
}
impl Drop for BasicRemote {
fn drop(&mut self) {
let mut queue = unsafe { self.queue.lock() };
queue.push(RemoveRemote(self.id));
queue.signal();
}
}
struct BasicPausable {
active: Arc<atomic::AtomicBool>,
}
impl PausableIdleCallback for BasicPausable {
fn pause(&mut self) {
self.active.store(false, atomic::SeqCst);
}
fn resume(&mut self) {
self.active.store(true, atomic::SeqCst);
}
}
impl Drop for BasicPausable {
fn drop(&mut self) {
self.active.store(false, atomic::SeqCst);
}
}
#[cfg(test)]
mod test {
use std::rt::task::TaskOpts;
use basic;
use PoolConfig;
use SchedPool;
fn pool() -> SchedPool {
SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: basic::event_loop,
})
}
fn run(f: proc():Send) {
let mut pool = pool();
pool.spawn(TaskOpts::new(), f);
pool.shutdown();
}
#[test]
fn smoke() {
run(proc() {});
}
#[test]
fn some_channels() {
run(proc() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(());
});
rx.recv();
});
}
#[test]
fn multi_thread() {
let mut pool = SchedPool::new(PoolConfig {
threads: 2,
event_loop_factory: basic::event_loop,
});
for _ in range(0u, 20) {
pool.spawn(TaskOpts::new(), proc() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(());
});
rx.recv();
});
}
pool.shutdown();
}
}

View File

@ -1,325 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use stack::Stack;
use std::uint;
use std::mem::transmute;
use std::rt::stack;
use std::raw;
#[cfg(target_arch = "x86_64")]
use std::simd;
use libc;
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
// SSE regs. It would be marginally better not to do this. In C++ we
// use an attribute on a struct.
// FIXME #7761: It would be nice to define regs as `Box<Option<Registers>>`
// since the registers are sometimes empty, but the discriminant would
// then misalign the regs again.
pub struct Context {
/// Hold the registers while the task or scheduler is suspended
regs: Box<Registers>,
/// Lower bound and upper bound for the stack
stack_bounds: Option<(uint, uint)>,
}
pub type InitFn = extern "C" fn(uint, *mut (), *mut ()) -> !;
impl Context {
pub fn empty() -> Context {
Context {
regs: new_regs(),
stack_bounds: None,
}
}
/// Create a new context that will resume execution by running proc()
///
/// The `init` function will be run with `arg` and the `start` procedure
/// split up into code and env pointers. It is required that the `init`
/// function never return.
///
/// FIXME: this is basically an awful the interface. The main reason for
/// this is to reduce the number of allocations made when a green
/// task is spawned as much as possible
pub fn new(init: InitFn, arg: uint, start: proc():Send,
stack: &mut Stack) -> Context {
let sp: *const uint = stack.end();
let sp: *mut uint = sp as *mut uint;
// Save and then immediately load the current context,
// which we will then modify to call the given function when restored
let mut regs = new_regs();
initialize_call_frame(&mut *regs,
init,
arg,
unsafe { transmute(start) },
sp);
// Scheduler tasks don't have a stack in the "we allocated it" sense,
// but rather they run on pthreads stacks. We have complete control over
// them in terms of the code running on them (and hopefully they don't
// overflow). Additionally, their coroutine stacks are listed as being
// zero-length, so that's how we detect what's what here.
let stack_base: *const uint = stack.start();
let bounds = if sp as libc::uintptr_t == stack_base as libc::uintptr_t {
None
} else {
Some((stack_base as uint, sp as uint))
};
return Context {
regs: regs,
stack_bounds: bounds,
}
}
/* Switch contexts
Suspend the current execution context and resume another by
saving the registers values of the executing thread to a Context
then loading the registers from a previously saved Context.
*/
pub fn swap(out_context: &mut Context, in_context: &Context) {
rtdebug!("swapping contexts");
let out_regs: &mut Registers = match out_context {
&Context { regs: box ref mut r, .. } => r
};
let in_regs: &Registers = match in_context {
&Context { regs: box ref r, .. } => r
};
rtdebug!("noting the stack limit and doing raw swap");
unsafe {
// Right before we switch to the new context, set the new context's
// stack limit in the OS-specified TLS slot. This also means that
// we cannot call any more rust functions after record_stack_bounds
// returns because they would all likely panic due to the limit being
// invalid for the current task. Lucky for us `rust_swap_registers`
// is a C function so we don't have to worry about that!
match in_context.stack_bounds {
Some((lo, hi)) => stack::record_rust_managed_stack_bounds(lo, hi),
// If we're going back to one of the original contexts or
// something that's possibly not a "normal task", then reset
// the stack limit to 0 to make morestack never panic
None => stack::record_rust_managed_stack_bounds(0, uint::MAX),
}
rust_swap_registers(out_regs, in_regs);
}
}
}
#[link(name = "context_switch", kind = "static")]
extern {
fn rust_swap_registers(out_regs: *mut Registers, in_regs: *const Registers);
}
// Register contexts used in various architectures
//
// These structures all represent a context of one task throughout its
// execution. Each struct is a representation of the architecture's register
// set. When swapping between tasks, these register sets are used to save off
// the current registers into one struct, and load them all from another.
//
// Note that this is only used for context switching, which means that some of
// the registers may go unused. For example, for architectures with
// callee/caller saved registers, the context will only reflect the callee-saved
// registers. This is because the caller saved registers are already stored
// elsewhere on the stack (if it was necessary anyway).
//
// Additionally, there may be fields on various architectures which are unused
// entirely because they only reflect what is theoretically possible for a
// "complete register set" to show, but user-space cannot alter these registers.
// An example of this would be the segment selectors for x86.
//
// These structures/functions are roughly in-sync with the source files inside
// of src/rt/arch/$arch. The only currently used function from those folders is
// the `rust_swap_registers` function, but that's only because for now segmented
// stacks are disabled.
#[cfg(target_arch = "x86")]
#[repr(C)]
struct Registers {
eax: u32, ebx: u32, ecx: u32, edx: u32,
ebp: u32, esi: u32, edi: u32, esp: u32,
cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16,
eflags: u32, eip: u32
}
#[cfg(target_arch = "x86")]
fn new_regs() -> Box<Registers> {
box Registers {
eax: 0, ebx: 0, ecx: 0, edx: 0,
ebp: 0, esi: 0, edi: 0, esp: 0,
cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0,
eflags: 0, eip: 0
}
}
#[cfg(target_arch = "x86")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
let sp = sp as *mut uint;
// x86 has interesting stack alignment requirements, so do some alignment
// plus some offsetting to figure out what the actual stack should be.
let sp = align_down(sp);
let sp = mut_offset(sp, -4);
unsafe { *mut_offset(sp, 2) = procedure.env as uint };
unsafe { *mut_offset(sp, 1) = procedure.code as uint };
unsafe { *mut_offset(sp, 0) = arg as uint };
let sp = mut_offset(sp, -1);
unsafe { *sp = 0 }; // The final return address
regs.esp = sp as u32;
regs.eip = fptr as u32;
// Last base pointer on the stack is 0
regs.ebp = 0;
}
// windows requires saving more registers (both general and XMM), so the windows
// register context must be larger.
#[cfg(all(windows, target_arch = "x86_64"))]
#[repr(C)]
struct Registers {
gpr:[libc::uintptr_t, ..14],
_xmm:[simd::u32x4, ..10]
}
#[cfg(all(not(windows), target_arch = "x86_64"))]
#[repr(C)]
struct Registers {
gpr:[libc::uintptr_t, ..10],
_xmm:[simd::u32x4, ..6]
}
#[cfg(all(windows, target_arch = "x86_64"))]
fn new_regs() -> Box<Registers> {
box() Registers {
gpr:[0,..14],
_xmm:[simd::u32x4(0,0,0,0),..10]
}
}
#[cfg(all(not(windows), target_arch = "x86_64"))]
fn new_regs() -> Box<Registers> {
box() Registers {
gpr:[0,..10],
_xmm:[simd::u32x4(0,0,0,0),..6]
}
}
#[cfg(target_arch = "x86_64")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
extern { fn rust_bootstrap_green_task(); }
// Redefinitions from rt/arch/x86_64/regs.h
static RUSTRT_RSP: uint = 1;
static RUSTRT_IP: uint = 8;
static RUSTRT_RBP: uint = 2;
static RUSTRT_R12: uint = 4;
static RUSTRT_R13: uint = 5;
static RUSTRT_R14: uint = 6;
static RUSTRT_R15: uint = 7;
let sp = align_down(sp);
let sp = mut_offset(sp, -1);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
rtdebug!("creating call frame");
rtdebug!("fptr {:#x}", fptr as libc::uintptr_t);
rtdebug!("arg {:#x}", arg);
rtdebug!("sp {}", sp);
// These registers are frobbed by rust_bootstrap_green_task into the right
// location so we can invoke the "real init function", `fptr`.
regs.gpr[RUSTRT_R12] = arg as libc::uintptr_t;
regs.gpr[RUSTRT_R13] = procedure.code as libc::uintptr_t;
regs.gpr[RUSTRT_R14] = procedure.env as libc::uintptr_t;
regs.gpr[RUSTRT_R15] = fptr as libc::uintptr_t;
// These registers are picked up by the regular context switch paths. These
// will put us in "mostly the right context" except for frobbing all the
// arguments to the right place. We have the small trampoline code inside of
// rust_bootstrap_green_task to do that.
regs.gpr[RUSTRT_RSP] = sp as libc::uintptr_t;
regs.gpr[RUSTRT_IP] = rust_bootstrap_green_task as libc::uintptr_t;
// Last base pointer on the stack should be 0
regs.gpr[RUSTRT_RBP] = 0;
}
#[cfg(target_arch = "arm")]
type Registers = [libc::uintptr_t, ..32];
#[cfg(target_arch = "arm")]
fn new_regs() -> Box<Registers> { box {[0, .. 32]} }
#[cfg(target_arch = "arm")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
extern { fn rust_bootstrap_green_task(); }
let sp = align_down(sp);
// sp of arm eabi is 8-byte aligned
let sp = mut_offset(sp, -2);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
// ARM uses the same technique as x86_64 to have a landing pad for the start
// of all new green tasks. Neither r1/r2 are saved on a context switch, so
// the shim will copy r3/r4 into r1/r2 and then execute the function in r5
regs[0] = arg as libc::uintptr_t; // r0
regs[3] = procedure.code as libc::uintptr_t; // r3
regs[4] = procedure.env as libc::uintptr_t; // r4
regs[5] = fptr as libc::uintptr_t; // r5
regs[13] = sp as libc::uintptr_t; // #52 sp, r13
regs[14] = rust_bootstrap_green_task as libc::uintptr_t; // #56 pc, r14 --> lr
}
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
type Registers = [libc::uintptr_t, ..32];
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
fn new_regs() -> Box<Registers> { box {[0, .. 32]} }
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
let sp = align_down(sp);
// sp of mips o32 is 8-byte aligned
let sp = mut_offset(sp, -2);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
regs[4] = arg as libc::uintptr_t;
regs[5] = procedure.code as libc::uintptr_t;
regs[6] = procedure.env as libc::uintptr_t;
regs[29] = sp as libc::uintptr_t;
regs[25] = fptr as libc::uintptr_t;
regs[31] = fptr as libc::uintptr_t;
}
fn align_down(sp: *mut uint) -> *mut uint {
let sp = (sp as uint) & !(16 - 1);
sp as *mut uint
}
// ptr::mut_offset is positive ints only
#[inline]
pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
use std::mem::size_of;
(ptr as int + count * (size_of::<T>() as int)) as *mut T
}

View File

@ -1,44 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Coroutines represent nothing more than a context and a stack
// segment.
use context::Context;
use stack::{StackPool, Stack};
/// A coroutine is nothing more than a (register context, stack) pair.
pub struct Coroutine {
/// The segment of stack on which the task is currently running or
/// if the task is blocked, on which the task will resume
/// execution.
///
/// Servo needs this to be public in order to tell SpiderMonkey
/// about the stack bounds.
pub current_stack_segment: Stack,
/// Always valid if the task is alive and not running.
pub saved_context: Context
}
impl Coroutine {
pub fn empty() -> Coroutine {
Coroutine {
current_stack_segment: unsafe { Stack::dummy_stack() },
saved_context: Context::empty()
}
}
/// Destroy coroutine and try to reuse std::stack segment.
pub fn recycle(self, stack_pool: &mut StackPool) {
let Coroutine { current_stack_segment, .. } = self;
stack_pool.give_stack(current_stack_segment);
}
}

View File

@ -1,567 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The "green scheduling" library
//!
//! This library provides M:N threading for rust programs. Internally this has
//! the implementation of a green scheduler along with context switching and a
//! stack-allocation strategy. This can be optionally linked in to rust
//! programs in order to provide M:N functionality inside of 1:1 programs.
//!
//! # Architecture
//!
//! An M:N scheduling library implies that there are N OS thread upon which M
//! "green threads" are multiplexed. In other words, a set of green threads are
//! all run inside a pool of OS threads.
//!
//! With this design, you can achieve _concurrency_ by spawning many green
//! threads, and you can achieve _parallelism_ by running the green threads
//! simultaneously on multiple OS threads. Each OS thread is a candidate for
//! being scheduled on a different core (the source of parallelism), and then
//! all of the green threads cooperatively schedule amongst one another (the
//! source of concurrency).
//!
//! ## Schedulers
//!
//! In order to coordinate among green threads, each OS thread is primarily
//! running something which we call a Scheduler. Whenever a reference to a
//! Scheduler is made, it is synonymous to referencing one OS thread. Each
//! scheduler is bound to one and exactly one OS thread, and the thread that it
//! is bound to never changes.
//!
//! Each scheduler is connected to a pool of other schedulers (a `SchedPool`)
//! which is the thread pool term from above. A pool of schedulers all share the
//! work that they create. Furthermore, whenever a green thread is created (also
//! synonymously referred to as a green task), it is associated with a
//! `SchedPool` forevermore. A green thread cannot leave its scheduler pool.
//!
//! Schedulers can have at most one green thread running on them at a time. When
//! a scheduler is asleep on its event loop, there are no green tasks running on
//! the OS thread or the scheduler. The term "context switch" is used for when
//! the running green thread is swapped out, but this simply changes the one
//! green thread which is running on the scheduler.
//!
//! ## Green Threads
//!
//! A green thread can largely be summarized by a stack and a register context.
//! Whenever a green thread is spawned, it allocates a stack, and then prepares
//! a register context for execution. The green task may be executed across
//! multiple OS threads, but it will always use the same stack and it will carry
//! its register context across OS threads.
//!
//! Each green thread is cooperatively scheduled with other green threads.
//! Primarily, this means that there is no pre-emption of a green thread. The
//! major consequence of this design is that a green thread stuck in an infinite
//! loop will prevent all other green threads from running on that particular
//! scheduler.
//!
//! Scheduling events for green threads occur on communication and I/O
//! boundaries. For example, if a green task blocks waiting for a message on a
//! channel some other green thread can now run on the scheduler. This also has
//! the consequence that until a green thread performs any form of scheduling
//! event, it will be running on the same OS thread (unconditionally).
//!
//! ## Work Stealing
//!
//! With a pool of schedulers, a new green task has a number of options when
//! deciding where to run initially. The current implementation uses a concept
//! called work stealing in order to spread out work among schedulers.
//!
//! In a work-stealing model, each scheduler maintains a local queue of tasks to
//! run, and this queue is stolen from by other schedulers. Implementation-wise,
//! work stealing has some hairy parts, but from a user-perspective, work
//! stealing simply implies what with M green threads and N schedulers where
//! M > N it is very likely that all schedulers will be busy executing work.
//!
//! # Considerations when using libgreen
//!
//! An M:N runtime has both pros and cons, and there is no one answer as to
//! whether M:N or 1:1 is appropriate to use. As always, there are many
//! advantages and disadvantages between the two. Regardless of the workload,
//! however, there are some aspects of using green thread which you should be
//! aware of:
//!
//! * The largest concern when using libgreen is interoperating with native
//! code. Care should be taken when calling native code that will block the OS
//! thread as it will prevent further green tasks from being scheduled on the
//! OS thread.
//!
//! * Native code using thread-local-storage should be approached
//! with care. Green threads may migrate among OS threads at any time, so
//! native libraries using thread-local state may not always work.
//!
//! * Native synchronization primitives (e.g. pthread mutexes) will also not
//! work for green threads. The reason for this is because native primitives
//! often operate on a _os thread_ granularity whereas green threads are
//! operating on a more granular unit of work.
//!
//! * A green threading runtime is not fork-safe. If the process forks(), it
//! cannot expect to make reasonable progress by continuing to use green
//! threads.
//!
//! Note that these concerns do not mean that operating with native code is a
//! lost cause. These are simply just concerns which should be considered when
//! invoking native code.
//!
//! # Starting with libgreen
//!
//! ```rust
//! extern crate green;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, green::basic::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers
//! }
//! ```
//!
//! > **Note**: This `main` function in this example does *not* have I/O
//! > support. The basic event loop does not provide any support
//!
//! # Using a scheduler pool
//!
//! This library adds a `GreenTaskBuilder` trait that extends the methods
//! available on `std::task::TaskBuilder` to allow spawning a green task,
//! possibly pinned to a particular scheduler thread:
//!
//! ```rust
//! extern crate green;
//!
//! # fn main() {
//! use std::task::TaskBuilder;
//! use green::{SchedPool, PoolConfig, GreenTaskBuilder};
//!
//! let mut config = PoolConfig::new();
//!
//! let mut pool = SchedPool::new(config);
//!
//! // Spawn tasks into the pool of schedulers
//! TaskBuilder::new().green(&mut pool).spawn(proc() {
//! // this code is running inside the pool of schedulers
//!
//! spawn(proc() {
//! // this code is also running inside the same scheduler pool
//! });
//! });
//!
//! // Dynamically add a new scheduler to the scheduler pool. This adds another
//! // OS thread that green threads can be multiplexed on to.
//! let mut handle = pool.spawn_sched();
//!
//! // Pin a task to the spawned scheduler
//! TaskBuilder::new().green_pinned(&mut pool, &mut handle).spawn(proc() {
//! /* ... */
//! });
//!
//! // Handles keep schedulers alive, so be sure to drop all handles before
//! // destroying the sched pool
//! drop(handle);
//!
//! // Required to shut down this scheduler pool.
//! // The task will panic if `shutdown` is not called.
//! pool.shutdown();
//! # }
//! ```
#![crate_name = "green"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, phase, default_type_params, globs)]
#![allow(deprecated)]
#[cfg(test)] #[phase(plugin, link)] extern crate log;
extern crate libc;
extern crate alloc;
use alloc::arc::Arc;
use std::mem::replace;
use std::os;
use std::rt::rtio;
use std::rt::thread::Thread;
use std::rt::task::TaskOpts;
use std::rt;
use std::sync::atomic::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
use std::sync::deque;
use std::task::{TaskBuilder, Spawner};
use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, PinnedTask, NewNeighbor};
use sleeper_list::SleeperList;
use stack::StackPool;
use task::GreenTask;
mod macros;
mod simple;
mod message_queue;
pub mod basic;
pub mod context;
pub mod coroutine;
pub mod sched;
pub mod sleeper_list;
pub mod stack;
pub mod task;
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This function will block until the entire pool of M:N schedulers have
/// exited. This function also requires a local task to be available.
///
/// # Arguments
///
/// * `argc` & `argv` - The argument vector. On Unix this information is used
/// by os::args.
/// * `main` - The initial procedure to run inside of the M:N scheduling pool.
/// Once this procedure exits, the scheduling pool will begin to shut
/// down. The entire pool (and this function) will only return once
/// all child tasks have finished executing.
///
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on
/// error.
pub fn start(argc: int, argv: *const *const u8,
event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
rt::init(argc, argv);
let mut main = Some(main);
let mut ret = None;
simple::task().run(|| {
ret = Some(run(event_loop_factory, main.take().unwrap()));
}).destroy();
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { rt::cleanup() }
ret.unwrap()
}
/// Execute the main function in a pool of M:N schedulers.
///
/// Configures the runtime according to the environment, by default using a task
/// scheduler with the same number of threads as cores. Returns a process exit
/// code.
///
/// This function will not return until all schedulers in the associated pool
/// have returned.
pub fn run(event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
// Create a scheduler pool and spawn the main task into this pool. We will
// get notified over a channel when the main task exits.
let mut cfg = PoolConfig::new();
cfg.event_loop_factory = event_loop_factory;
let mut pool = SchedPool::new(cfg);
let (tx, rx) = channel();
let mut opts = TaskOpts::new();
opts.on_exit = Some(proc(r) tx.send(r));
opts.name = Some("<main>".into_maybe_owned());
pool.spawn(opts, main);
// Wait for the main task to return, and set the process error code
// appropriately.
if rx.recv().is_err() {
os::set_exit_status(rt::DEFAULT_ERROR_CODE);
}
// Now that we're sure all tasks are dead, shut down the pool of schedulers,
// waiting for them all to return.
pool.shutdown();
os::get_exit_status()
}
/// Configuration of how an M:N pool of schedulers is spawned.
pub struct PoolConfig {
/// The number of schedulers (OS threads) to spawn into this M:N pool.
pub threads: uint,
/// A factory function used to create new event loops. If this is not
/// specified then the default event loop factory is used.
pub event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
}
impl PoolConfig {
/// Returns the default configuration, as determined the environment
/// variables of this process.
pub fn new() -> PoolConfig {
PoolConfig {
threads: rt::default_sched_threads(),
event_loop_factory: basic::event_loop,
}
}
}
/// A structure representing a handle to a pool of schedulers. This handle is
/// used to keep the pool alive and also reap the status from the pool.
pub struct SchedPool {
id: uint,
threads: Vec<Thread<()>>,
handles: Vec<SchedHandle>,
stealers: Vec<deque::Stealer<Box<task::GreenTask>>>,
next_friend: uint,
stack_pool: StackPool,
deque_pool: deque::BufferPool<Box<task::GreenTask>>,
sleepers: SleeperList,
factory: fn() -> Box<rtio::EventLoop + Send>,
task_state: TaskState,
tasks_done: Receiver<()>,
}
/// This is an internal state shared among a pool of schedulers. This is used to
/// keep track of how many tasks are currently running in the pool and then
/// sending on a channel once the entire pool has been drained of all tasks.
#[deriving(Clone)]
pub struct TaskState {
cnt: Arc<AtomicUint>,
done: Sender<()>,
}
impl SchedPool {
/// Execute the main function in a pool of M:N schedulers.
///
/// This will configure the pool according to the `config` parameter, and
/// initially run `main` inside the pool of schedulers.
pub fn new(config: PoolConfig) -> SchedPool {
static POOL_ID: AtomicUint = INIT_ATOMIC_UINT;
let PoolConfig {
threads: nscheds,
event_loop_factory: factory
} = config;
assert!(nscheds > 0);
// The pool of schedulers that will be returned from this function
let (p, state) = TaskState::new();
let mut pool = SchedPool {
threads: vec![],
handles: vec![],
stealers: vec![],
id: POOL_ID.fetch_add(1, SeqCst),
sleepers: SleeperList::new(),
stack_pool: StackPool::new(),
deque_pool: deque::BufferPool::new(),
next_friend: 0,
factory: factory,
task_state: state,
tasks_done: p,
};
// Create a work queue for each scheduler, ntimes. Create an extra
// for the main thread if that flag is set. We won't steal from it.
let mut workers = Vec::with_capacity(nscheds);
let mut stealers = Vec::with_capacity(nscheds);
for _ in range(0, nscheds) {
let (w, s) = pool.deque_pool.deque();
workers.push(w);
stealers.push(s);
}
pool.stealers = stealers;
// Now that we've got all our work queues, create one scheduler per
// queue, spawn the scheduler into a thread, and be sure to keep a
// handle to the scheduler and the thread to keep them alive.
for worker in workers.into_iter() {
rtdebug!("inserting a regular scheduler");
let mut sched = box Scheduler::new(pool.id,
(pool.factory)(),
worker,
pool.stealers.clone(),
pool.sleepers.clone(),
pool.task_state.clone());
pool.handles.push(sched.make_handle());
pool.threads.push(Thread::start(proc() { sched.bootstrap(); }));
}
return pool;
}
/// Creates a new task configured to run inside of this pool of schedulers.
/// This is useful to create a task which can then be sent to a specific
/// scheduler created by `spawn_sched` (and possibly pin it to that
/// scheduler).
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> Box<GreenTask> {
GreenTask::configure(&mut self.stack_pool, opts, f)
}
/// Spawns a new task into this pool of schedulers, using the specified
/// options to configure the new task which is spawned.
///
/// New tasks are spawned in a round-robin fashion to the schedulers in this
/// pool, but tasks can certainly migrate among schedulers once they're in
/// the pool.
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn spawn(&mut self, opts: TaskOpts, f: proc():Send) {
let task = self.task(opts, f);
// Figure out someone to send this task to
let idx = self.next_friend;
self.next_friend += 1;
if self.next_friend >= self.handles.len() {
self.next_friend = 0;
}
// Jettison the task away!
self.handles[idx].send(TaskFromFriend(task));
}
/// Spawns a new scheduler into this M:N pool. A handle is returned to the
/// scheduler for use. The scheduler will not exit as long as this handle is
/// active.
///
/// The scheduler spawned will participate in work stealing with all of the
/// other schedulers currently in the scheduler pool.
pub fn spawn_sched(&mut self) -> SchedHandle {
let (worker, stealer) = self.deque_pool.deque();
self.stealers.push(stealer.clone());
// Tell all existing schedulers about this new scheduler so they can all
// steal work from it
for handle in self.handles.iter_mut() {
handle.send(NewNeighbor(stealer.clone()));
}
// Create the new scheduler, using the same sleeper list as all the
// other schedulers as well as having a stealer handle to all other
// schedulers.
let mut sched = box Scheduler::new(self.id,
(self.factory)(),
worker,
self.stealers.clone(),
self.sleepers.clone(),
self.task_state.clone());
let ret = sched.make_handle();
self.handles.push(sched.make_handle());
self.threads.push(Thread::start(proc() { sched.bootstrap() }));
return ret;
}
/// Consumes the pool of schedulers, waiting for all tasks to exit and all
/// schedulers to shut down.
///
/// This function is required to be called in order to drop a pool of
/// schedulers, it is considered an error to drop a pool without calling
/// this method.
///
/// This only waits for all tasks in *this pool* of schedulers to exit, any
/// native tasks or extern pools will not be waited on
pub fn shutdown(mut self) {
self.stealers = vec![];
// Wait for everyone to exit. We may have reached a 0-task count
// multiple times in the past, meaning there could be several buffered
// messages on the `tasks_done` port. We're guaranteed that after *some*
// message the current task count will be 0, so we just receive in a
// loop until everything is totally dead.
while self.task_state.active() {
self.tasks_done.recv();
}
// Now that everyone's gone, tell everything to shut down.
for mut handle in replace(&mut self.handles, vec![]).into_iter() {
handle.send(Shutdown);
}
for thread in replace(&mut self.threads, vec![]).into_iter() {
thread.join();
}
}
}
impl TaskState {
fn new() -> (Receiver<()>, TaskState) {
let (tx, rx) = channel();
(rx, TaskState {
cnt: Arc::new(AtomicUint::new(0)),
done: tx,
})
}
fn increment(&mut self) {
self.cnt.fetch_add(1, SeqCst);
}
fn active(&self) -> bool {
self.cnt.load(SeqCst) != 0
}
fn decrement(&mut self) {
let prev = self.cnt.fetch_sub(1, SeqCst);
if prev == 1 {
self.done.send(());
}
}
}
impl Drop for SchedPool {
fn drop(&mut self) {
if self.threads.len() > 0 {
panic!("dropping a M:N scheduler pool that wasn't shut down");
}
}
}
/// A spawner for green tasks
pub struct GreenSpawner<'a>{
pool: &'a mut SchedPool,
handle: Option<&'a mut SchedHandle>
}
impl<'a> Spawner for GreenSpawner<'a> {
#[inline]
fn spawn(self, opts: TaskOpts, f: proc():Send) {
let GreenSpawner { pool, handle } = self;
match handle {
None => pool.spawn(opts, f),
Some(h) => h.send(PinnedTask(pool.task(opts, f)))
}
}
}
/// An extension trait adding `green` configuration methods to `TaskBuilder`.
pub trait GreenTaskBuilder {
fn green<'a>(self, &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>>;
fn green_pinned<'a>(self, &'a mut SchedPool, &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>>;
}
impl<S: Spawner> GreenTaskBuilder for TaskBuilder<S> {
fn green<'a>(self, pool: &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: None})
}
fn green_pinned<'a>(self, pool: &'a mut SchedPool, handle: &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: Some(handle)})
}
}
#[cfg(test)]
mod test {
use std::task::TaskBuilder;
use super::{SchedPool, PoolConfig, GreenTaskBuilder};
#[test]
fn test_green_builder() {
let mut pool = SchedPool::new(PoolConfig::new());
let res = TaskBuilder::new().green(&mut pool).try(proc() {
"Success!".to_string()
});
assert_eq!(res.ok().unwrap(), "Success!".to_string());
pool.shutdown();
}
}

View File

@ -1,118 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: this file probably shouldn't exist
// ignore-lexer-test FIXME #15677
#![macro_escape]
use std::fmt;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
macro_rules! rterrln (
($($arg:tt)*) => ( {
format_args!(::macros::dumb_println, $($arg)*)
} )
)
// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build.
macro_rules! rtdebug (
($($arg:tt)*) => ( {
if cfg!(rtdebug) {
rterrln!($($arg)*)
}
})
)
macro_rules! rtassert (
( $arg:expr ) => ( {
if ::macros::ENFORCE_SANITY {
if !$arg {
rtabort!(" assertion failed: {}", stringify!($arg));
}
}
} )
)
macro_rules! rtabort (
($($arg:tt)*) => ( {
::macros::abort(format!($($arg)*).as_slice());
} )
)
pub fn dumb_println(args: &fmt::Arguments) {
use std::rt;
let mut w = rt::Stderr;
let _ = writeln!(&mut w, "{}", args);
}
pub fn abort(msg: &str) -> ! {
let msg = if !msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before the unwhisperable secret of secrets The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abort() -> ! {
use std::intrinsics;
unsafe { intrinsics::abort() }
}
}

View File

@ -1,67 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::PopResult::*;
use alloc::arc::Arc;
use std::sync::mpsc_queue as mpsc;
use std::kinds::marker;
pub enum PopResult<T> {
Inconsistent,
Empty,
Data(T),
}
pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) {
let a = Arc::new(mpsc::Queue::new());
(Consumer { inner: a.clone(), noshare: marker::NoSync },
Producer { inner: a, noshare: marker::NoSync })
}
pub struct Producer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
pub struct Consumer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
impl<T: Send> Consumer<T> {
pub fn pop(&self) -> PopResult<T> {
match self.inner.pop() {
mpsc::Inconsistent => Inconsistent,
mpsc::Empty => Empty,
mpsc::Data(t) => Data(t),
}
}
pub fn casual_pop(&self) -> Option<T> {
match self.inner.pop() {
mpsc::Inconsistent => None,
mpsc::Empty => None,
mpsc::Data(t) => Some(t),
}
}
}
impl<T: Send> Producer<T> {
pub fn push(&self, t: T) {
self.inner.push(t);
}
}
impl<T: Send> Clone for Producer<T> {
fn clone(&self) -> Producer<T> {
Producer { inner: self.inner.clone(), noshare: marker::NoSync }
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,96 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A small module implementing a simple "runtime" used for bootstrapping a rust
//! scheduler pool and then interacting with it.
use std::any::Any;
use std::mem;
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::mutex::NativeMutex;
use std::rt::task::{Task, BlockedTask, TaskOpts};
struct SimpleTask {
lock: NativeMutex,
awoken: bool,
}
impl Runtime for SimpleTask {
// Implement the simple tasks of descheduling and rescheduling, but only in
// a simple number of cases.
fn deschedule(mut self: Box<SimpleTask>,
times: uint,
mut cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
assert!(times == 1);
let me = &mut *self as *mut SimpleTask;
let cur_dupe = &mut *cur_task as *mut Task;
cur_task.put_runtime(self);
let task = BlockedTask::block(cur_task);
// See libnative/task.rs for what's going on here with the `awoken`
// field and the while loop around wait()
unsafe {
let guard = (*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
guard.wait();
}
}
Err(task) => { mem::forget(task.wake()); }
}
drop(guard);
cur_task = mem::transmute(cur_dupe);
}
Local::put(cur_task);
}
fn reawaken(mut self: Box<SimpleTask>, mut to_wake: Box<Task>) {
let me = &mut *self as *mut SimpleTask;
to_wake.put_runtime(self);
unsafe {
mem::forget(to_wake);
let guard = (*me).lock.lock();
(*me).awoken = true;
guard.signal();
}
}
// These functions are all unimplemented and panic as a result. This is on
// purpose. A "simple task" is just that, a very simple task that can't
// really do a whole lot. The only purpose of the task is to get us off our
// feet and running.
fn yield_now(self: Box<SimpleTask>, _cur_task: Box<Task>) { panic!() }
fn maybe_yield(self: Box<SimpleTask>, _cur_task: Box<Task>) { panic!() }
fn spawn_sibling(self: Box<SimpleTask>,
_cur_task: Box<Task>,
_opts: TaskOpts,
_f: proc():Send) {
panic!()
}
fn stack_bounds(&self) -> (uint, uint) { panic!() }
fn stack_guard(&self) -> Option<uint> { panic!() }
fn can_block(&self) -> bool { true }
fn wrap(self: Box<SimpleTask>) -> Box<Any+'static> { panic!() }
}
pub fn task() -> Box<Task> {
let mut task = box Task::new();
task.put_runtime(box SimpleTask {
lock: unsafe {NativeMutex::new()},
awoken: false,
});
return task;
}

View File

@ -1,46 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Maintains a shared list of sleeping schedulers. Schedulers
//! use this to wake each other up.
use std::sync::mpmc_bounded_queue::Queue;
use sched::SchedHandle;
pub struct SleeperList {
q: Queue<SchedHandle>,
}
impl SleeperList {
pub fn new() -> SleeperList {
SleeperList{q: Queue::with_capacity(8*1024)}
}
pub fn push(&mut self, value: SchedHandle) {
assert!(self.q.push(value))
}
pub fn pop(&mut self) -> Option<SchedHandle> {
self.q.pop()
}
pub fn casual_pop(&mut self) -> Option<SchedHandle> {
self.q.pop()
}
}
impl Clone for SleeperList {
fn clone(&self) -> SleeperList {
SleeperList {
q: self.q.clone()
}
}
}

View File

@ -1,215 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
use std::sync::atomic;
use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
MapNonStandardFlags, getenv};
use libc;
/// A task's stack. The name "Stack" is a vestige of segmented stacks.
pub struct Stack {
buf: Option<MemoryMap>,
min_size: uint,
valgrind_id: libc::c_uint,
}
// Try to use MAP_STACK on platforms that support it (it's what we're doing
// anyway), but some platforms don't support it at all. For example, it appears
// that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
// panics): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
//
// DragonFly BSD also seems to suffer from the same problem. When MAP_STACK is
// used, it returns the same `ptr` multiple times.
#[cfg(not(any(windows, target_os = "freebsd", target_os = "dragonfly")))]
static STACK_FLAGS: libc::c_int = libc::MAP_STACK | libc::MAP_PRIVATE |
libc::MAP_ANON;
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
static STACK_FLAGS: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON;
#[cfg(windows)]
static STACK_FLAGS: libc::c_int = 0;
impl Stack {
/// Allocate a new stack of `size`. If size = 0, this will panic. Use
/// `dummy_stack` if you want a zero-sized stack.
pub fn new(size: uint) -> Stack {
// Map in a stack. Eventually we might be able to handle stack
// allocation failure, which would fail to spawn the task. But there's
// not many sensible things to do on OOM. Panic seems fine (and is
// what the old stack allocation did).
let stack = match MemoryMap::new(size, &[MapReadable, MapWritable,
MapNonStandardFlags(STACK_FLAGS)]) {
Ok(map) => map,
Err(e) => panic!("mmap for stack of size {} failed: {}", size, e)
};
// Change the last page to be inaccessible. This is to provide safety;
// when an FFI function overflows it will (hopefully) hit this guard
// page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is
// guaranteed to be aligned properly.
if !protect_last_page(&stack) {
panic!("Could not memory-protect guard page. stack={}, errno={}",
stack.data(), errno());
}
let mut stk = Stack {
buf: Some(stack),
min_size: size,
valgrind_id: 0
};
// FIXME: Using the FFI to call a C macro. Slow
stk.valgrind_id = unsafe {
rust_valgrind_stack_register(stk.start() as *const libc::uintptr_t,
stk.end() as *const libc::uintptr_t)
};
return stk;
}
/// Create a 0-length stack which starts (and ends) at 0.
pub unsafe fn dummy_stack() -> Stack {
Stack {
buf: None,
min_size: 0,
valgrind_id: 0
}
}
/// Point to the last writable byte of the stack
pub fn guard(&self) -> *const uint {
(self.start() as uint + page_size()) as *const uint
}
/// Point to the low end of the allocated stack
pub fn start(&self) -> *const uint {
self.buf.as_ref().map(|m| m.data() as *const uint)
.unwrap_or(ptr::null())
}
/// Point one uint beyond the high end of the allocated stack
pub fn end(&self) -> *const uint {
self.buf.as_ref().map(|buf| unsafe {
buf.data().offset(buf.len() as int) as *const uint
}).unwrap_or(ptr::null())
}
}
#[cfg(unix)]
fn protect_last_page(stack: &MemoryMap) -> bool {
unsafe {
// This may seem backwards: the start of the segment is the last page?
// Yes! The stack grows from higher addresses (the end of the allocated
// block) to lower addresses (the start of the allocated block).
let last_page = stack.data() as *mut libc::c_void;
libc::mprotect(last_page, page_size() as libc::size_t,
libc::PROT_NONE) != -1
}
}
#[cfg(windows)]
fn protect_last_page(stack: &MemoryMap) -> bool {
unsafe {
// see above
let last_page = stack.data() as *mut libc::c_void;
let mut old_prot: libc::DWORD = 0;
libc::VirtualProtect(last_page, page_size() as libc::SIZE_T,
libc::PAGE_NOACCESS,
&mut old_prot as libc::LPDWORD) != 0
}
}
impl Drop for Stack {
fn drop(&mut self) {
unsafe {
// FIXME: Using the FFI to call a C macro. Slow
rust_valgrind_stack_deregister(self.valgrind_id);
}
}
}
pub struct StackPool {
// Ideally this would be some data structure that preserved ordering on
// Stack.min_size.
stacks: Vec<Stack>,
}
impl StackPool {
pub fn new() -> StackPool {
StackPool {
stacks: vec![],
}
}
pub fn take_stack(&mut self, min_size: uint) -> Stack {
// Ideally this would be a binary search
match self.stacks.iter().position(|s| min_size <= s.min_size) {
Some(idx) => self.stacks.swap_remove(idx).unwrap(),
None => Stack::new(min_size)
}
}
pub fn give_stack(&mut self, stack: Stack) {
if self.stacks.len() <= max_cached_stacks() {
self.stacks.push(stack)
}
}
}
fn max_cached_stacks() -> uint {
static AMT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match AMT.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = getenv("RUST_MAX_CACHED_STACKS").and_then(|s| from_str(s.as_slice()));
// This default corresponds to 20M of cache per scheduler (at the
// default size).
let amt = amt.unwrap_or(10);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
AMT.store(amt + 1, atomic::SeqCst);
return amt;
}
extern {
fn rust_valgrind_stack_register(start: *const libc::uintptr_t,
end: *const libc::uintptr_t) -> libc::c_uint;
fn rust_valgrind_stack_deregister(id: libc::c_uint);
}
#[cfg(test)]
mod tests {
use super::StackPool;
#[test]
fn stack_pool_caches() {
let mut p = StackPool::new();
let s = p.take_stack(10);
p.give_stack(s);
let s = p.take_stack(4);
assert_eq!(s.min_size, 10);
p.give_stack(s);
let s = p.take_stack(14);
assert_eq!(s.min_size, 14);
p.give_stack(s);
}
#[test]
fn stack_pool_caches_exact() {
let mut p = StackPool::new();
let mut s = p.take_stack(10);
s.valgrind_id = 100;
p.give_stack(s);
let s = p.take_stack(10);
assert_eq!(s.min_size, 10);
assert_eq!(s.valgrind_id, 100);
}
}

View File

@ -1,602 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Green Task implementation
//!
//! This module contains the glue to the libstd runtime necessary to integrate
//! M:N scheduling. This GreenTask structure is hidden as a trait object in all
//! rust tasks and virtual calls are made in order to interface with it.
//!
//! Each green task contains a scheduler if it is currently running, and it also
//! contains the rust task itself in order to juggle around ownership of the
//! values.
pub use self::TaskType::*;
pub use self::Home::*;
use std::any::Any;
use std::mem;
use std::raw;
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::mutex::NativeMutex;
use std::rt::stack;
use std::rt::task::{Task, BlockedTask, TaskOpts};
use std::rt;
use context::Context;
use coroutine::Coroutine;
use sched::{Scheduler, SchedHandle, RunOnce};
use stack::StackPool;
/// The necessary fields needed to keep track of a green task (as opposed to a
/// 1:1 task).
pub struct GreenTask {
/// Coroutine that this task is running on, otherwise known as the register
/// context and the stack that this task owns. This field is optional to
/// relinquish ownership back to a scheduler to recycle stacks at a later
/// date.
pub coroutine: Option<Coroutine>,
/// Optional handle back into the home sched pool of this task. This field
/// is lazily initialized.
pub handle: Option<SchedHandle>,
/// Slot for maintaining ownership of a scheduler. If a task is running,
/// this value will be Some(sched) where the task is running on "sched".
pub sched: Option<Box<Scheduler>>,
/// Temporary ownership slot of a std::rt::task::Task object. This is used
/// to squirrel that libstd task away while we're performing green task
/// operations.
pub task: Option<Box<Task>>,
/// Dictates whether this is a sched task or a normal green task
pub task_type: TaskType,
/// Home pool that this task was spawned into. This field is lazily
/// initialized until when the task is initially scheduled, and is used to
/// make sure that tasks are always woken up in the correct pool of
/// schedulers.
pub pool_id: uint,
// See the comments in the scheduler about why this is necessary
pub nasty_deschedule_lock: NativeMutex,
}
pub enum TaskType {
TypeGreen(Option<Home>),
TypeSched,
}
pub enum Home {
AnySched,
HomeSched(SchedHandle),
}
/// Trampoline code for all new green tasks which are running around. This
/// function is passed through to Context::new as the initial rust landing pad
/// for all green tasks. This code is actually called after the initial context
/// switch onto a green thread.
///
/// The first argument to this function is the `Box<GreenTask>` pointer, and
/// the next two arguments are the user-provided procedure for running code.
///
/// The goal for having this weird-looking function is to reduce the number of
/// allocations done on a green-task startup as much as possible.
extern fn bootstrap_green_task(task: uint, code: *mut (), env: *mut ()) -> ! {
// Acquire ownership of the `proc()`
let start: proc() = unsafe {
mem::transmute(raw::Procedure { code: code, env: env })
};
// Acquire ownership of the `Box<GreenTask>`
let mut task: Box<GreenTask> = unsafe { mem::transmute(task) };
// First code after swap to this new context. Run our cleanup job
task.pool_id = {
let sched = task.sched.as_mut().unwrap();
sched.run_cleanup_job();
sched.task_state.increment();
sched.pool_id
};
// Convert our green task to a libstd task and then execute the code
// requested. This is the "try/catch" block for this green task and
// is the wrapper for *all* code run in the task.
let mut start = Some(start);
let task = task.swap().run(|| start.take().unwrap()()).destroy();
// Once the function has exited, it's time to run the termination
// routine. This means we need to context switch one more time but
// clean ourselves up on the other end. Since we have no way of
// preserving a handle to the GreenTask down to this point, this
// unfortunately must call `GreenTask::convert`. In order to avoid
// this we could add a `terminate` function to the `Runtime` trait
// in libstd, but that seems less appropriate since the conversion
// method exists.
GreenTask::convert(task).terminate();
}
impl GreenTask {
/// Creates a new green task which is not homed to any particular scheduler
/// and will not have any contained Task structure.
pub fn new(stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc():Send) -> Box<GreenTask> {
GreenTask::new_homed(stack_pool, stack_size, AnySched, start)
}
/// Creates a new task (like `new`), but specifies the home for new task.
pub fn new_homed(stack_pool: &mut StackPool,
stack_size: Option<uint>,
home: Home,
start: proc():Send) -> Box<GreenTask> {
// Allocate ourselves a GreenTask structure
let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home)));
// Allocate a stack for us to run on
let stack_size = stack_size.unwrap_or_else(|| rt::min_stack());
let mut stack = stack_pool.take_stack(stack_size);
let context = Context::new(bootstrap_green_task, ops.as_uint(), start,
&mut stack);
// Package everything up in a coroutine and return
ops.coroutine = Some(Coroutine {
current_stack_segment: stack,
saved_context: context,
});
return ops;
}
/// Creates a new green task with the specified coroutine and type, this is
/// useful when creating scheduler tasks.
pub fn new_typed(coroutine: Option<Coroutine>,
task_type: TaskType) -> Box<GreenTask> {
box GreenTask {
pool_id: 0,
coroutine: coroutine,
task_type: task_type,
sched: None,
handle: None,
nasty_deschedule_lock: unsafe { NativeMutex::new() },
task: Some(box Task::new()),
}
}
/// Creates a new green task with the given configuration options for the
/// contained Task object. The given stack pool is also used to allocate a
/// new stack for this task.
pub fn configure(pool: &mut StackPool,
opts: TaskOpts,
f: proc():Send) -> Box<GreenTask> {
let TaskOpts { name, stack_size, on_exit } = opts;
let mut green = GreenTask::new(pool, stack_size, f);
{
let task = green.task.as_mut().unwrap();
task.name = name;
task.death.on_exit = on_exit;
}
return green;
}
/// Just like the `maybe_take_runtime` function, this function should *not*
/// exist. Usage of this function is _strongly_ discouraged. This is an
/// absolute last resort necessary for converting a libstd task to a green
/// task.
///
/// This function will assert that the task is indeed a green task before
/// returning (and will kill the entire process if this is wrong).
pub fn convert(mut task: Box<Task>) -> Box<GreenTask> {
match task.maybe_take_runtime::<GreenTask>() {
Some(mut green) => {
green.put_task(task);
green
}
None => rtabort!("not a green task any more?"),
}
}
pub fn give_home(&mut self, new_home: Home) {
match self.task_type {
TypeGreen(ref mut home) => { *home = Some(new_home); }
TypeSched => rtabort!("type error: used SchedTask as GreenTask"),
}
}
pub fn take_unwrap_home(&mut self) -> Home {
match self.task_type {
TypeGreen(ref mut home) => home.take().unwrap(),
TypeSched => rtabort!("type error: used SchedTask as GreenTask"),
}
}
// New utility functions for homes.
pub fn is_home_no_tls(&self, sched: &Scheduler) -> bool {
match self.task_type {
TypeGreen(Some(AnySched)) => { false }
TypeGreen(Some(HomeSched(SchedHandle { sched_id: ref id, .. }))) => {
*id == sched.sched_id()
}
TypeGreen(None) => { rtabort!("task without home"); }
TypeSched => {
// Awe yea
rtabort!("type error: expected: TypeGreen, found: TaskSched");
}
}
}
pub fn homed(&self) -> bool {
match self.task_type {
TypeGreen(Some(AnySched)) => { false }
TypeGreen(Some(HomeSched(SchedHandle { .. }))) => { true }
TypeGreen(None) => {
rtabort!("task without home");
}
TypeSched => {
rtabort!("type error: expected: TypeGreen, found: TaskSched");
}
}
}
pub fn is_sched(&self) -> bool {
match self.task_type {
TypeGreen(..) => false, TypeSched => true,
}
}
// Unsafe functions for transferring ownership of this GreenTask across
// context switches
pub fn as_uint(&self) -> uint {
self as *const GreenTask as uint
}
pub unsafe fn from_uint(val: uint) -> Box<GreenTask> {
mem::transmute(val)
}
// Runtime glue functions and helpers
pub fn put_with_sched(mut self: Box<GreenTask>, sched: Box<Scheduler>) {
assert!(self.sched.is_none());
self.sched = Some(sched);
self.put();
}
pub fn put_task(&mut self, task: Box<Task>) {
assert!(self.task.is_none());
self.task = Some(task);
}
pub fn swap(mut self: Box<GreenTask>) -> Box<Task> {
let mut task = self.task.take().unwrap();
task.put_runtime(self);
return task;
}
pub fn put(self: Box<GreenTask>) {
assert!(self.sched.is_some());
Local::put(self.swap());
}
fn terminate(mut self: Box<GreenTask>) -> ! {
let sched = self.sched.take().unwrap();
sched.terminate_current_task(self)
}
// This function is used to remotely wakeup this green task back on to its
// original pool of schedulers. In order to do so, each tasks arranges a
// SchedHandle upon descheduling to be available for sending itself back to
// the original pool.
//
// Note that there is an interesting transfer of ownership going on here. We
// must relinquish ownership of the green task, but then also send the task
// over the handle back to the original scheduler. In order to safely do
// this, we leverage the already-present "nasty descheduling lock". The
// reason for doing this is that each task will bounce on this lock after
// resuming after a context switch. By holding the lock over the enqueueing
// of the task, we're guaranteed that the SchedHandle's memory will be valid
// for this entire function.
//
// An alternative would include having incredibly cheaply cloneable handles,
// but right now a SchedHandle is something like 6 allocations, so it is
// *not* a cheap operation to clone a handle. Until the day comes that we
// need to optimize this, a lock should do just fine (it's completely
// uncontended except for when the task is rescheduled).
fn reawaken_remotely(mut self: Box<GreenTask>) {
unsafe {
let mtx = &mut self.nasty_deschedule_lock as *mut NativeMutex;
let handle = self.handle.as_mut().unwrap() as *mut SchedHandle;
let _guard = (*mtx).lock();
(*handle).send(RunOnce(self));
}
}
}
impl Runtime for GreenTask {
fn yield_now(mut self: Box<GreenTask>, cur_task: Box<Task>) {
self.put_task(cur_task);
let sched = self.sched.take().unwrap();
sched.yield_now(self);
}
fn maybe_yield(mut self: Box<GreenTask>, cur_task: Box<Task>) {
self.put_task(cur_task);
let sched = self.sched.take().unwrap();
sched.maybe_yield(self);
}
fn deschedule(mut self: Box<GreenTask>,
times: uint,
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
self.put_task(cur_task);
let mut sched = self.sched.take().unwrap();
// In order for this task to be reawoken in all possible contexts, we
// may need a handle back in to the current scheduler. When we're woken
// up in anything other than the local scheduler pool, this handle is
// used to send this task back into the scheduler pool.
if self.handle.is_none() {
self.handle = Some(sched.make_handle());
self.pool_id = sched.pool_id;
}
// This code is pretty standard, except for the usage of
// `GreenTask::convert`. Right now if we use `reawaken` directly it will
// expect for there to be a task in local TLS, but that is not true for
// this deschedule block (because the scheduler must retain ownership of
// the task while the cleanup job is running). In order to get around
// this for now, we invoke the scheduler directly with the converted
// Task => GreenTask structure.
if times == 1 {
sched.deschedule_running_task_and_then(self, |sched, task| {
match f(task) {
Ok(()) => {}
Err(t) => {
t.wake().map(|t| {
sched.enqueue_task(GreenTask::convert(t))
});
}
}
});
} else {
sched.deschedule_running_task_and_then(self, |sched, task| {
for task in task.make_selectable(times) {
match f(task) {
Ok(()) => {},
Err(task) => {
task.wake().map(|t| {
sched.enqueue_task(GreenTask::convert(t))
});
break
}
}
}
});
}
}
fn reawaken(mut self: Box<GreenTask>, to_wake: Box<Task>) {
self.put_task(to_wake);
assert!(self.sched.is_none());
// Optimistically look for a local task, but if one's not available to
// inspect (in order to see if it's in the same sched pool as we are),
// then just use our remote wakeup routine and carry on!
let mut running_task: Box<Task> = match Local::try_take() {
Some(task) => task,
None => return self.reawaken_remotely()
};
// Waking up a green thread is a bit of a tricky situation. We have no
// guarantee about where the current task is running. The options we
// have for where this current task is running are:
//
// 1. Our original scheduler pool
// 2. Some other scheduler pool
// 3. Something that isn't a scheduler pool
//
// In order to figure out what case we're in, this is the reason that
// the `maybe_take_runtime` function exists. Using this function we can
// dynamically check to see which of these cases is the current
// situation and then dispatch accordingly.
//
// In case 1, we just use the local scheduler to resume ourselves
// immediately (if a rescheduling is possible).
//
// In case 2 and 3, we need to remotely reawaken ourself in order to be
// transplanted back to the correct scheduler pool.
match running_task.maybe_take_runtime::<GreenTask>() {
Some(mut running_green_task) => {
running_green_task.put_task(running_task);
let sched = running_green_task.sched.take().unwrap();
if sched.pool_id == self.pool_id {
sched.run_task(running_green_task, self);
} else {
self.reawaken_remotely();
// put that thing back where it came from!
running_green_task.put_with_sched(sched);
}
}
None => {
self.reawaken_remotely();
Local::put(running_task);
}
}
}
fn spawn_sibling(mut self: Box<GreenTask>,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send) {
self.put_task(cur_task);
// First, set up a bomb which when it goes off will restore the local
// task unless its disarmed. This will allow us to gracefully panic from
// inside of `configure` which allocates a new task.
struct Bomb { inner: Option<Box<GreenTask>> }
impl Drop for Bomb {
fn drop(&mut self) {
let _ = self.inner.take().map(|task| task.put());
}
}
let mut bomb = Bomb { inner: Some(self) };
// Spawns a task into the current scheduler. We allocate the new task's
// stack from the scheduler's stack pool, and then configure it
// accordingly to `opts`. Afterwards we bootstrap it immediately by
// switching to it.
//
// Upon returning, our task is back in TLS and we're good to return.
let sibling = {
let sched = bomb.inner.as_mut().unwrap().sched.as_mut().unwrap();
GreenTask::configure(&mut sched.stack_pool, opts, f)
};
let mut me = bomb.inner.take().unwrap();
let sched = me.sched.take().unwrap();
sched.run_task(me, sibling)
}
fn stack_bounds(&self) -> (uint, uint) {
let c = self.coroutine.as_ref()
.expect("GreenTask.stack_bounds called without a coroutine");
// Don't return the red zone as part of the usable stack of this task,
// it's essentially an implementation detail.
(c.current_stack_segment.start() as uint + stack::RED_ZONE,
c.current_stack_segment.end() as uint)
}
fn stack_guard(&self) -> Option<uint> {
let c = self.coroutine.as_ref()
.expect("GreenTask.stack_guard called without a coroutine");
Some(c.current_stack_segment.guard() as uint)
}
fn can_block(&self) -> bool { false }
fn wrap(self: Box<GreenTask>) -> Box<Any+'static> {
self as Box<Any+'static>
}
}
#[cfg(test)]
mod tests {
use std::rt::local::Local;
use std::rt::task::Task;
use std::task;
use std::rt::task::TaskOpts;
use super::super::{PoolConfig, SchedPool};
use super::GreenTask;
fn spawn_opts(opts: TaskOpts, f: proc():Send) {
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: super::super::basic::event_loop,
});
pool.spawn(opts, f);
pool.shutdown();
}
#[test]
fn smoke() {
let (tx, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
tx.send(());
});
rx.recv();
}
#[test]
fn smoke_panic() {
let (tx, rx) = channel::<int>();
spawn_opts(TaskOpts::new(), proc() {
let _tx = tx;
panic!()
});
assert_eq!(rx.recv_opt(), Err(()));
}
#[test]
fn smoke_opts() {
let mut opts = TaskOpts::new();
opts.name = Some("test".into_maybe_owned());
opts.stack_size = Some(20 * 4096);
let (tx, rx) = channel();
opts.on_exit = Some(proc(r) tx.send(r));
spawn_opts(opts, proc() {});
assert!(rx.recv().is_ok());
}
#[test]
fn smoke_opts_panic() {
let mut opts = TaskOpts::new();
let (tx, rx) = channel();
opts.on_exit = Some(proc(r) tx.send(r));
spawn_opts(opts, proc() { panic!() });
assert!(rx.recv().is_err());
}
#[test]
fn yield_test() {
let (tx, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
for _ in range(0u, 10) { task::deschedule(); }
tx.send(());
});
rx.recv();
}
#[test]
fn spawn_children() {
let (tx1, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
let (tx2, rx) = channel();
spawn(proc() {
let (tx3, rx) = channel();
spawn(proc() {
tx3.send(());
});
rx.recv();
tx2.send(());
});
rx.recv();
tx1.send(());
});
rx.recv();
}
#[test]
fn spawn_inherits() {
let (tx, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
spawn(proc() {
let mut task: Box<Task> = Local::take();
match task.maybe_take_runtime::<GreenTask>() {
Some(ops) => {
task.put_runtime(ops);
}
None => panic!(),
}
Local::put(task);
tx.send(());
});
});
rx.recv();
}
}