auto merge of #18967 : aturon/rust/remove-runtime, r=alexcrichton

This PR completes the removal of the runtime system and green-threaded abstractions as part of implementing [RFC 230](https://github.com/rust-lang/rfcs/pull/230).

Specifically:

* It removes the `Runtime` trait, welding the scheduling infrastructure directly to native threads.

* It removes `libgreen` and `libnative` entirely.

* It rewrites `sync::mutex` as a trivial layer on top of native mutexes. Eventually, the two modules will be merged.

* It hides the vast majority of `std::rt`.

This completes the basic task of removing the runtime system (I/O and scheduling) and components that depend on it. 

After this lands, a follow-up PR will pull the `rustrt` crate back into `std`, turn `std::task` into `std::thread` (with API changes to go along with it), and completely cut out the remaining startup/teardown sequence. Other changes, including new [TLS](https://github.com/rust-lang/rfcs/pull/461) and synchronization are in the RFC or pre-RFC phase.

Closes #17325
Closes #18687

[breaking-change]

r? @alexcrichton
This commit is contained in:
bors 2014-11-21 03:41:45 +00:00
commit c9f6d69642
71 changed files with 378 additions and 7001 deletions

View File

@ -37,7 +37,7 @@
#
# DEPS_<crate>
# These lists are the dependencies of the <crate> that is to be built.
# Rust dependencies are listed bare (i.e. std, green) and native
# Rust dependencies are listed bare (i.e. std) and native
# dependencies have a "native:" prefix (i.e. native:hoedown). All deps
# will be built before the crate itself is built.
#
@ -49,7 +49,7 @@
# automatically generated for all stage/host/target combinations.
################################################################################
TARGET_CRATES := libc std green native flate arena term \
TARGET_CRATES := libc std flate arena term \
serialize sync getopts collections test time rand \
log regex graphviz core rbml alloc rustrt \
unicode
@ -66,8 +66,6 @@ DEPS_rustrt := alloc core libc collections native:rustrt_native
DEPS_std := core libc rand alloc collections rustrt sync unicode \
native:rust_builtin native:backtrace
DEPS_graphviz := std
DEPS_green := std native:context_switch
DEPS_native := std
DEPS_syntax := std term serialize log fmt_macros arena libc
DEPS_rustc_trans := rustc rustc_back rustc_llvm libc
DEPS_rustc := syntax flate arena serialize getopts rbml \
@ -95,9 +93,9 @@ DEPS_regex := std
DEPS_regex_macros = rustc syntax std regex
DEPS_fmt_macros = std
TOOL_DEPS_compiletest := test getopts native
TOOL_DEPS_rustdoc := rustdoc native
TOOL_DEPS_rustc := rustc_trans native
TOOL_DEPS_compiletest := test getopts
TOOL_DEPS_rustdoc := rustdoc
TOOL_DEPS_rustc := rustc_trans
TOOL_SOURCE_compiletest := $(S)src/compiletest/compiletest.rs
TOOL_SOURCE_rustdoc := $(S)src/driver/driver.rs
TOOL_SOURCE_rustc := $(S)src/driver/driver.rs

View File

@ -9,8 +9,6 @@ Source layout:
| `libcore/` | The Rust core library |
| `libdebug/` | Debugging utilities |
| `libstd/` | The standard library (imported and linked by default) |
| `libgreen/` | The M:N runtime library |
| `libnative/` | The 1:1 runtime library |
| `libsyntax/` | The Rust parser and pretty-printer |
| `libtest/` | Rust's test-runner code |
| ------------------- | --------------------------------------------------------- |

View File

@ -1059,14 +1059,14 @@ An example of what will and will not work for `use` items:
```
# #![allow(unused_imports)]
use foo::native::start; // good: foo is at the root of the crate
use foo::core::iter; // good: foo is at the root of the crate
use foo::baz::foobaz; // good: foo is at the root of the crate
mod foo {
extern crate native;
extern crate core;
use foo::native::start; // good: foo is at crate root
// use native::start; // bad: native is not at the crate root
use foo::core::iter; // good: foo is at crate root
// use core::iter; // bad: native is not at the crate root
use self::baz::foobaz; // good: self refers to module 'foo'
use foo::bar::foobar; // good: foo is at crate root

View File

@ -8,6 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_start]
#[cfg(rustdoc)]
extern crate "rustdoc" as this;

View File

@ -73,7 +73,6 @@ extern crate libc;
// Allow testing this library
#[cfg(test)] extern crate native;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
#[cfg(test)] #[phase(plugin, link)] extern crate log;

View File

@ -31,7 +31,6 @@
extern crate unicode;
extern crate alloc;
#[cfg(test)] extern crate native;
#[cfg(test)] extern crate test;
#[cfg(test)] #[phase(plugin, link)] extern crate std;

View File

@ -666,6 +666,8 @@ pub mod raw {
#[cfg(test)]
mod tests {
extern crate rustrt;
use std::cell::Cell;
use std::default::Default;
use std::mem;
@ -949,9 +951,9 @@ mod tests {
#[test]
fn test_swap_remove_noncopyable() {
// Tests that we don't accidentally run destructors twice.
let mut v = vec![rt::exclusive::Exclusive::new(()),
rt::exclusive::Exclusive::new(()),
rt::exclusive::Exclusive::new(())];
let mut v = vec![rustrt::exclusive::Exclusive::new(()),
rustrt::exclusive::Exclusive::new(()),
rustrt::exclusive::Exclusive::new(())];
let mut _e = v.swap_remove(0);
assert_eq!(v.len(), 2);
_e = v.swap_remove(1);

View File

@ -1,259 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This is a basic event loop implementation not meant for any "real purposes"
//! other than testing the scheduler and proving that it's possible to have a
//! pluggable event loop.
//!
//! This implementation is also used as the fallback implementation of an event
//! loop if no other one is provided (and M:N scheduling is desired).
use self::Message::*;
use alloc::arc::Arc;
use std::sync::atomic;
use std::mem;
use std::rt::rtio::{EventLoop, RemoteCallback};
use std::rt::rtio::{PausableIdleCallback, Callback};
use std::rt::exclusive::Exclusive;
/// This is the only exported function from this module.
pub fn event_loop() -> Box<EventLoop + Send> {
box BasicLoop::new() as Box<EventLoop + Send>
}
struct BasicLoop {
work: Vec<proc(): Send>, // pending work
remotes: Vec<(uint, Box<Callback + Send>)>,
next_remote: uint,
messages: Arc<Exclusive<Vec<Message>>>,
idle: Option<Box<Callback + Send>>,
idle_active: Option<Arc<atomic::AtomicBool>>,
}
enum Message { RunRemote(uint), RemoveRemote(uint) }
impl BasicLoop {
fn new() -> BasicLoop {
BasicLoop {
work: vec![],
idle: None,
idle_active: None,
next_remote: 0,
remotes: vec![],
messages: Arc::new(Exclusive::new(Vec::new())),
}
}
/// Process everything in the work queue (continually)
fn work(&mut self) {
while self.work.len() > 0 {
for work in mem::replace(&mut self.work, vec![]).into_iter() {
work();
}
}
}
fn remote_work(&mut self) {
let messages = unsafe {
mem::replace(&mut *self.messages.lock(), Vec::new())
};
for message in messages.into_iter() {
self.message(message);
}
}
fn message(&mut self, message: Message) {
match message {
RunRemote(i) => {
match self.remotes.iter_mut().find(|& &(id, _)| id == i) {
Some(&(_, ref mut f)) => f.call(),
None => panic!("bad remote: {}", i),
}
}
RemoveRemote(i) => {
match self.remotes.iter().position(|&(id, _)| id == i) {
Some(i) => { self.remotes.remove(i).unwrap(); }
None => panic!("bad remote: {}", i),
}
}
}
}
/// Run the idle callback if one is registered
fn idle(&mut self) {
match self.idle {
Some(ref mut idle) => {
if self.idle_active.as_ref().unwrap().load(atomic::SeqCst) {
idle.call();
}
}
None => {}
}
}
fn has_idle(&self) -> bool {
self.idle.is_some() && self.idle_active.as_ref().unwrap().load(atomic::SeqCst)
}
}
impl EventLoop for BasicLoop {
fn run(&mut self) {
// Not exactly efficient, but it gets the job done.
while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() {
self.work();
self.remote_work();
if self.has_idle() {
self.idle();
continue
}
unsafe {
let messages = self.messages.lock();
// We block here if we have no messages to process and we may
// receive a message at a later date
if self.remotes.len() > 0 && messages.len() == 0 &&
self.work.len() == 0 {
messages.wait()
}
}
}
}
fn callback(&mut self, f: proc():Send) {
self.work.push(f);
}
// FIXME: Seems like a really weird requirement to have an event loop provide.
fn pausable_idle_callback(&mut self, cb: Box<Callback + Send>)
-> Box<PausableIdleCallback + Send> {
rtassert!(self.idle.is_none());
self.idle = Some(cb);
let a = Arc::new(atomic::AtomicBool::new(true));
self.idle_active = Some(a.clone());
box BasicPausable { active: a } as Box<PausableIdleCallback + Send>
}
fn remote_callback(&mut self, f: Box<Callback + Send>)
-> Box<RemoteCallback + Send> {
let id = self.next_remote;
self.next_remote += 1;
self.remotes.push((id, f));
box BasicRemote::new(self.messages.clone(), id) as
Box<RemoteCallback + Send>
}
fn has_active_io(&self) -> bool { false }
}
struct BasicRemote {
queue: Arc<Exclusive<Vec<Message>>>,
id: uint,
}
impl BasicRemote {
fn new(queue: Arc<Exclusive<Vec<Message>>>, id: uint) -> BasicRemote {
BasicRemote { queue: queue, id: id }
}
}
impl RemoteCallback for BasicRemote {
fn fire(&mut self) {
let mut queue = unsafe { self.queue.lock() };
queue.push(RunRemote(self.id));
queue.signal();
}
}
impl Drop for BasicRemote {
fn drop(&mut self) {
let mut queue = unsafe { self.queue.lock() };
queue.push(RemoveRemote(self.id));
queue.signal();
}
}
struct BasicPausable {
active: Arc<atomic::AtomicBool>,
}
impl PausableIdleCallback for BasicPausable {
fn pause(&mut self) {
self.active.store(false, atomic::SeqCst);
}
fn resume(&mut self) {
self.active.store(true, atomic::SeqCst);
}
}
impl Drop for BasicPausable {
fn drop(&mut self) {
self.active.store(false, atomic::SeqCst);
}
}
#[cfg(test)]
mod test {
use std::rt::task::TaskOpts;
use basic;
use PoolConfig;
use SchedPool;
fn pool() -> SchedPool {
SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: basic::event_loop,
})
}
fn run(f: proc():Send) {
let mut pool = pool();
pool.spawn(TaskOpts::new(), f);
pool.shutdown();
}
#[test]
fn smoke() {
run(proc() {});
}
#[test]
fn some_channels() {
run(proc() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(());
});
rx.recv();
});
}
#[test]
fn multi_thread() {
let mut pool = SchedPool::new(PoolConfig {
threads: 2,
event_loop_factory: basic::event_loop,
});
for _ in range(0u, 20) {
pool.spawn(TaskOpts::new(), proc() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(());
});
rx.recv();
});
}
pool.shutdown();
}
}

View File

@ -1,325 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use stack::Stack;
use std::uint;
use std::mem::transmute;
use std::rt::stack;
use std::raw;
#[cfg(target_arch = "x86_64")]
use std::simd;
use libc;
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
// SSE regs. It would be marginally better not to do this. In C++ we
// use an attribute on a struct.
// FIXME #7761: It would be nice to define regs as `Box<Option<Registers>>`
// since the registers are sometimes empty, but the discriminant would
// then misalign the regs again.
pub struct Context {
/// Hold the registers while the task or scheduler is suspended
regs: Box<Registers>,
/// Lower bound and upper bound for the stack
stack_bounds: Option<(uint, uint)>,
}
pub type InitFn = extern "C" fn(uint, *mut (), *mut ()) -> !;
impl Context {
pub fn empty() -> Context {
Context {
regs: new_regs(),
stack_bounds: None,
}
}
/// Create a new context that will resume execution by running proc()
///
/// The `init` function will be run with `arg` and the `start` procedure
/// split up into code and env pointers. It is required that the `init`
/// function never return.
///
/// FIXME: this is basically an awful the interface. The main reason for
/// this is to reduce the number of allocations made when a green
/// task is spawned as much as possible
pub fn new(init: InitFn, arg: uint, start: proc():Send,
stack: &mut Stack) -> Context {
let sp: *const uint = stack.end();
let sp: *mut uint = sp as *mut uint;
// Save and then immediately load the current context,
// which we will then modify to call the given function when restored
let mut regs = new_regs();
initialize_call_frame(&mut *regs,
init,
arg,
unsafe { transmute(start) },
sp);
// Scheduler tasks don't have a stack in the "we allocated it" sense,
// but rather they run on pthreads stacks. We have complete control over
// them in terms of the code running on them (and hopefully they don't
// overflow). Additionally, their coroutine stacks are listed as being
// zero-length, so that's how we detect what's what here.
let stack_base: *const uint = stack.start();
let bounds = if sp as libc::uintptr_t == stack_base as libc::uintptr_t {
None
} else {
Some((stack_base as uint, sp as uint))
};
return Context {
regs: regs,
stack_bounds: bounds,
}
}
/* Switch contexts
Suspend the current execution context and resume another by
saving the registers values of the executing thread to a Context
then loading the registers from a previously saved Context.
*/
pub fn swap(out_context: &mut Context, in_context: &Context) {
rtdebug!("swapping contexts");
let out_regs: &mut Registers = match out_context {
&Context { regs: box ref mut r, .. } => r
};
let in_regs: &Registers = match in_context {
&Context { regs: box ref r, .. } => r
};
rtdebug!("noting the stack limit and doing raw swap");
unsafe {
// Right before we switch to the new context, set the new context's
// stack limit in the OS-specified TLS slot. This also means that
// we cannot call any more rust functions after record_stack_bounds
// returns because they would all likely panic due to the limit being
// invalid for the current task. Lucky for us `rust_swap_registers`
// is a C function so we don't have to worry about that!
match in_context.stack_bounds {
Some((lo, hi)) => stack::record_rust_managed_stack_bounds(lo, hi),
// If we're going back to one of the original contexts or
// something that's possibly not a "normal task", then reset
// the stack limit to 0 to make morestack never panic
None => stack::record_rust_managed_stack_bounds(0, uint::MAX),
}
rust_swap_registers(out_regs, in_regs);
}
}
}
#[link(name = "context_switch", kind = "static")]
extern {
fn rust_swap_registers(out_regs: *mut Registers, in_regs: *const Registers);
}
// Register contexts used in various architectures
//
// These structures all represent a context of one task throughout its
// execution. Each struct is a representation of the architecture's register
// set. When swapping between tasks, these register sets are used to save off
// the current registers into one struct, and load them all from another.
//
// Note that this is only used for context switching, which means that some of
// the registers may go unused. For example, for architectures with
// callee/caller saved registers, the context will only reflect the callee-saved
// registers. This is because the caller saved registers are already stored
// elsewhere on the stack (if it was necessary anyway).
//
// Additionally, there may be fields on various architectures which are unused
// entirely because they only reflect what is theoretically possible for a
// "complete register set" to show, but user-space cannot alter these registers.
// An example of this would be the segment selectors for x86.
//
// These structures/functions are roughly in-sync with the source files inside
// of src/rt/arch/$arch. The only currently used function from those folders is
// the `rust_swap_registers` function, but that's only because for now segmented
// stacks are disabled.
#[cfg(target_arch = "x86")]
#[repr(C)]
struct Registers {
eax: u32, ebx: u32, ecx: u32, edx: u32,
ebp: u32, esi: u32, edi: u32, esp: u32,
cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16,
eflags: u32, eip: u32
}
#[cfg(target_arch = "x86")]
fn new_regs() -> Box<Registers> {
box Registers {
eax: 0, ebx: 0, ecx: 0, edx: 0,
ebp: 0, esi: 0, edi: 0, esp: 0,
cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0,
eflags: 0, eip: 0
}
}
#[cfg(target_arch = "x86")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
let sp = sp as *mut uint;
// x86 has interesting stack alignment requirements, so do some alignment
// plus some offsetting to figure out what the actual stack should be.
let sp = align_down(sp);
let sp = mut_offset(sp, -4);
unsafe { *mut_offset(sp, 2) = procedure.env as uint };
unsafe { *mut_offset(sp, 1) = procedure.code as uint };
unsafe { *mut_offset(sp, 0) = arg as uint };
let sp = mut_offset(sp, -1);
unsafe { *sp = 0 }; // The final return address
regs.esp = sp as u32;
regs.eip = fptr as u32;
// Last base pointer on the stack is 0
regs.ebp = 0;
}
// windows requires saving more registers (both general and XMM), so the windows
// register context must be larger.
#[cfg(all(windows, target_arch = "x86_64"))]
#[repr(C)]
struct Registers {
gpr:[libc::uintptr_t, ..14],
_xmm:[simd::u32x4, ..10]
}
#[cfg(all(not(windows), target_arch = "x86_64"))]
#[repr(C)]
struct Registers {
gpr:[libc::uintptr_t, ..10],
_xmm:[simd::u32x4, ..6]
}
#[cfg(all(windows, target_arch = "x86_64"))]
fn new_regs() -> Box<Registers> {
box() Registers {
gpr:[0,..14],
_xmm:[simd::u32x4(0,0,0,0),..10]
}
}
#[cfg(all(not(windows), target_arch = "x86_64"))]
fn new_regs() -> Box<Registers> {
box() Registers {
gpr:[0,..10],
_xmm:[simd::u32x4(0,0,0,0),..6]
}
}
#[cfg(target_arch = "x86_64")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
extern { fn rust_bootstrap_green_task(); }
// Redefinitions from rt/arch/x86_64/regs.h
static RUSTRT_RSP: uint = 1;
static RUSTRT_IP: uint = 8;
static RUSTRT_RBP: uint = 2;
static RUSTRT_R12: uint = 4;
static RUSTRT_R13: uint = 5;
static RUSTRT_R14: uint = 6;
static RUSTRT_R15: uint = 7;
let sp = align_down(sp);
let sp = mut_offset(sp, -1);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
rtdebug!("creating call frame");
rtdebug!("fptr {:#x}", fptr as libc::uintptr_t);
rtdebug!("arg {:#x}", arg);
rtdebug!("sp {}", sp);
// These registers are frobbed by rust_bootstrap_green_task into the right
// location so we can invoke the "real init function", `fptr`.
regs.gpr[RUSTRT_R12] = arg as libc::uintptr_t;
regs.gpr[RUSTRT_R13] = procedure.code as libc::uintptr_t;
regs.gpr[RUSTRT_R14] = procedure.env as libc::uintptr_t;
regs.gpr[RUSTRT_R15] = fptr as libc::uintptr_t;
// These registers are picked up by the regular context switch paths. These
// will put us in "mostly the right context" except for frobbing all the
// arguments to the right place. We have the small trampoline code inside of
// rust_bootstrap_green_task to do that.
regs.gpr[RUSTRT_RSP] = sp as libc::uintptr_t;
regs.gpr[RUSTRT_IP] = rust_bootstrap_green_task as libc::uintptr_t;
// Last base pointer on the stack should be 0
regs.gpr[RUSTRT_RBP] = 0;
}
#[cfg(target_arch = "arm")]
type Registers = [libc::uintptr_t, ..32];
#[cfg(target_arch = "arm")]
fn new_regs() -> Box<Registers> { box {[0, .. 32]} }
#[cfg(target_arch = "arm")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
extern { fn rust_bootstrap_green_task(); }
let sp = align_down(sp);
// sp of arm eabi is 8-byte aligned
let sp = mut_offset(sp, -2);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
// ARM uses the same technique as x86_64 to have a landing pad for the start
// of all new green tasks. Neither r1/r2 are saved on a context switch, so
// the shim will copy r3/r4 into r1/r2 and then execute the function in r5
regs[0] = arg as libc::uintptr_t; // r0
regs[3] = procedure.code as libc::uintptr_t; // r3
regs[4] = procedure.env as libc::uintptr_t; // r4
regs[5] = fptr as libc::uintptr_t; // r5
regs[13] = sp as libc::uintptr_t; // #52 sp, r13
regs[14] = rust_bootstrap_green_task as libc::uintptr_t; // #56 pc, r14 --> lr
}
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
type Registers = [libc::uintptr_t, ..32];
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
fn new_regs() -> Box<Registers> { box {[0, .. 32]} }
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
procedure: raw::Procedure, sp: *mut uint) {
let sp = align_down(sp);
// sp of mips o32 is 8-byte aligned
let sp = mut_offset(sp, -2);
// The final return address. 0 indicates the bottom of the stack
unsafe { *sp = 0; }
regs[4] = arg as libc::uintptr_t;
regs[5] = procedure.code as libc::uintptr_t;
regs[6] = procedure.env as libc::uintptr_t;
regs[29] = sp as libc::uintptr_t;
regs[25] = fptr as libc::uintptr_t;
regs[31] = fptr as libc::uintptr_t;
}
fn align_down(sp: *mut uint) -> *mut uint {
let sp = (sp as uint) & !(16 - 1);
sp as *mut uint
}
// ptr::mut_offset is positive ints only
#[inline]
pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
use std::mem::size_of;
(ptr as int + count * (size_of::<T>() as int)) as *mut T
}

View File

@ -1,44 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Coroutines represent nothing more than a context and a stack
// segment.
use context::Context;
use stack::{StackPool, Stack};
/// A coroutine is nothing more than a (register context, stack) pair.
pub struct Coroutine {
/// The segment of stack on which the task is currently running or
/// if the task is blocked, on which the task will resume
/// execution.
///
/// Servo needs this to be public in order to tell SpiderMonkey
/// about the stack bounds.
pub current_stack_segment: Stack,
/// Always valid if the task is alive and not running.
pub saved_context: Context
}
impl Coroutine {
pub fn empty() -> Coroutine {
Coroutine {
current_stack_segment: unsafe { Stack::dummy_stack() },
saved_context: Context::empty()
}
}
/// Destroy coroutine and try to reuse std::stack segment.
pub fn recycle(self, stack_pool: &mut StackPool) {
let Coroutine { current_stack_segment, .. } = self;
stack_pool.give_stack(current_stack_segment);
}
}

View File

@ -1,567 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The "green scheduling" library
//!
//! This library provides M:N threading for rust programs. Internally this has
//! the implementation of a green scheduler along with context switching and a
//! stack-allocation strategy. This can be optionally linked in to rust
//! programs in order to provide M:N functionality inside of 1:1 programs.
//!
//! # Architecture
//!
//! An M:N scheduling library implies that there are N OS thread upon which M
//! "green threads" are multiplexed. In other words, a set of green threads are
//! all run inside a pool of OS threads.
//!
//! With this design, you can achieve _concurrency_ by spawning many green
//! threads, and you can achieve _parallelism_ by running the green threads
//! simultaneously on multiple OS threads. Each OS thread is a candidate for
//! being scheduled on a different core (the source of parallelism), and then
//! all of the green threads cooperatively schedule amongst one another (the
//! source of concurrency).
//!
//! ## Schedulers
//!
//! In order to coordinate among green threads, each OS thread is primarily
//! running something which we call a Scheduler. Whenever a reference to a
//! Scheduler is made, it is synonymous to referencing one OS thread. Each
//! scheduler is bound to one and exactly one OS thread, and the thread that it
//! is bound to never changes.
//!
//! Each scheduler is connected to a pool of other schedulers (a `SchedPool`)
//! which is the thread pool term from above. A pool of schedulers all share the
//! work that they create. Furthermore, whenever a green thread is created (also
//! synonymously referred to as a green task), it is associated with a
//! `SchedPool` forevermore. A green thread cannot leave its scheduler pool.
//!
//! Schedulers can have at most one green thread running on them at a time. When
//! a scheduler is asleep on its event loop, there are no green tasks running on
//! the OS thread or the scheduler. The term "context switch" is used for when
//! the running green thread is swapped out, but this simply changes the one
//! green thread which is running on the scheduler.
//!
//! ## Green Threads
//!
//! A green thread can largely be summarized by a stack and a register context.
//! Whenever a green thread is spawned, it allocates a stack, and then prepares
//! a register context for execution. The green task may be executed across
//! multiple OS threads, but it will always use the same stack and it will carry
//! its register context across OS threads.
//!
//! Each green thread is cooperatively scheduled with other green threads.
//! Primarily, this means that there is no pre-emption of a green thread. The
//! major consequence of this design is that a green thread stuck in an infinite
//! loop will prevent all other green threads from running on that particular
//! scheduler.
//!
//! Scheduling events for green threads occur on communication and I/O
//! boundaries. For example, if a green task blocks waiting for a message on a
//! channel some other green thread can now run on the scheduler. This also has
//! the consequence that until a green thread performs any form of scheduling
//! event, it will be running on the same OS thread (unconditionally).
//!
//! ## Work Stealing
//!
//! With a pool of schedulers, a new green task has a number of options when
//! deciding where to run initially. The current implementation uses a concept
//! called work stealing in order to spread out work among schedulers.
//!
//! In a work-stealing model, each scheduler maintains a local queue of tasks to
//! run, and this queue is stolen from by other schedulers. Implementation-wise,
//! work stealing has some hairy parts, but from a user-perspective, work
//! stealing simply implies what with M green threads and N schedulers where
//! M > N it is very likely that all schedulers will be busy executing work.
//!
//! # Considerations when using libgreen
//!
//! An M:N runtime has both pros and cons, and there is no one answer as to
//! whether M:N or 1:1 is appropriate to use. As always, there are many
//! advantages and disadvantages between the two. Regardless of the workload,
//! however, there are some aspects of using green thread which you should be
//! aware of:
//!
//! * The largest concern when using libgreen is interoperating with native
//! code. Care should be taken when calling native code that will block the OS
//! thread as it will prevent further green tasks from being scheduled on the
//! OS thread.
//!
//! * Native code using thread-local-storage should be approached
//! with care. Green threads may migrate among OS threads at any time, so
//! native libraries using thread-local state may not always work.
//!
//! * Native synchronization primitives (e.g. pthread mutexes) will also not
//! work for green threads. The reason for this is because native primitives
//! often operate on a _os thread_ granularity whereas green threads are
//! operating on a more granular unit of work.
//!
//! * A green threading runtime is not fork-safe. If the process forks(), it
//! cannot expect to make reasonable progress by continuing to use green
//! threads.
//!
//! Note that these concerns do not mean that operating with native code is a
//! lost cause. These are simply just concerns which should be considered when
//! invoking native code.
//!
//! # Starting with libgreen
//!
//! ```rust
//! extern crate green;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, green::basic::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers
//! }
//! ```
//!
//! > **Note**: This `main` function in this example does *not* have I/O
//! > support. The basic event loop does not provide any support
//!
//! # Using a scheduler pool
//!
//! This library adds a `GreenTaskBuilder` trait that extends the methods
//! available on `std::task::TaskBuilder` to allow spawning a green task,
//! possibly pinned to a particular scheduler thread:
//!
//! ```rust
//! extern crate green;
//!
//! # fn main() {
//! use std::task::TaskBuilder;
//! use green::{SchedPool, PoolConfig, GreenTaskBuilder};
//!
//! let mut config = PoolConfig::new();
//!
//! let mut pool = SchedPool::new(config);
//!
//! // Spawn tasks into the pool of schedulers
//! TaskBuilder::new().green(&mut pool).spawn(proc() {
//! // this code is running inside the pool of schedulers
//!
//! spawn(proc() {
//! // this code is also running inside the same scheduler pool
//! });
//! });
//!
//! // Dynamically add a new scheduler to the scheduler pool. This adds another
//! // OS thread that green threads can be multiplexed on to.
//! let mut handle = pool.spawn_sched();
//!
//! // Pin a task to the spawned scheduler
//! TaskBuilder::new().green_pinned(&mut pool, &mut handle).spawn(proc() {
//! /* ... */
//! });
//!
//! // Handles keep schedulers alive, so be sure to drop all handles before
//! // destroying the sched pool
//! drop(handle);
//!
//! // Required to shut down this scheduler pool.
//! // The task will panic if `shutdown` is not called.
//! pool.shutdown();
//! # }
//! ```
#![crate_name = "green"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, phase, default_type_params, globs)]
#![allow(deprecated)]
#[cfg(test)] #[phase(plugin, link)] extern crate log;
extern crate libc;
extern crate alloc;
use alloc::arc::Arc;
use std::mem::replace;
use std::os;
use std::rt::rtio;
use std::rt::thread::Thread;
use std::rt::task::TaskOpts;
use std::rt;
use std::sync::atomic::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
use std::sync::deque;
use std::task::{TaskBuilder, Spawner};
use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, PinnedTask, NewNeighbor};
use sleeper_list::SleeperList;
use stack::StackPool;
use task::GreenTask;
mod macros;
mod simple;
mod message_queue;
pub mod basic;
pub mod context;
pub mod coroutine;
pub mod sched;
pub mod sleeper_list;
pub mod stack;
pub mod task;
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This function will block until the entire pool of M:N schedulers have
/// exited. This function also requires a local task to be available.
///
/// # Arguments
///
/// * `argc` & `argv` - The argument vector. On Unix this information is used
/// by os::args.
/// * `main` - The initial procedure to run inside of the M:N scheduling pool.
/// Once this procedure exits, the scheduling pool will begin to shut
/// down. The entire pool (and this function) will only return once
/// all child tasks have finished executing.
///
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on
/// error.
pub fn start(argc: int, argv: *const *const u8,
event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
rt::init(argc, argv);
let mut main = Some(main);
let mut ret = None;
simple::task().run(|| {
ret = Some(run(event_loop_factory, main.take().unwrap()));
}).destroy();
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { rt::cleanup() }
ret.unwrap()
}
/// Execute the main function in a pool of M:N schedulers.
///
/// Configures the runtime according to the environment, by default using a task
/// scheduler with the same number of threads as cores. Returns a process exit
/// code.
///
/// This function will not return until all schedulers in the associated pool
/// have returned.
pub fn run(event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
// Create a scheduler pool and spawn the main task into this pool. We will
// get notified over a channel when the main task exits.
let mut cfg = PoolConfig::new();
cfg.event_loop_factory = event_loop_factory;
let mut pool = SchedPool::new(cfg);
let (tx, rx) = channel();
let mut opts = TaskOpts::new();
opts.on_exit = Some(proc(r) tx.send(r));
opts.name = Some("<main>".into_maybe_owned());
pool.spawn(opts, main);
// Wait for the main task to return, and set the process error code
// appropriately.
if rx.recv().is_err() {
os::set_exit_status(rt::DEFAULT_ERROR_CODE);
}
// Now that we're sure all tasks are dead, shut down the pool of schedulers,
// waiting for them all to return.
pool.shutdown();
os::get_exit_status()
}
/// Configuration of how an M:N pool of schedulers is spawned.
pub struct PoolConfig {
/// The number of schedulers (OS threads) to spawn into this M:N pool.
pub threads: uint,
/// A factory function used to create new event loops. If this is not
/// specified then the default event loop factory is used.
pub event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
}
impl PoolConfig {
/// Returns the default configuration, as determined the environment
/// variables of this process.
pub fn new() -> PoolConfig {
PoolConfig {
threads: rt::default_sched_threads(),
event_loop_factory: basic::event_loop,
}
}
}
/// A structure representing a handle to a pool of schedulers. This handle is
/// used to keep the pool alive and also reap the status from the pool.
pub struct SchedPool {
id: uint,
threads: Vec<Thread<()>>,
handles: Vec<SchedHandle>,
stealers: Vec<deque::Stealer<Box<task::GreenTask>>>,
next_friend: uint,
stack_pool: StackPool,
deque_pool: deque::BufferPool<Box<task::GreenTask>>,
sleepers: SleeperList,
factory: fn() -> Box<rtio::EventLoop + Send>,
task_state: TaskState,
tasks_done: Receiver<()>,
}
/// This is an internal state shared among a pool of schedulers. This is used to
/// keep track of how many tasks are currently running in the pool and then
/// sending on a channel once the entire pool has been drained of all tasks.
#[deriving(Clone)]
pub struct TaskState {
cnt: Arc<AtomicUint>,
done: Sender<()>,
}
impl SchedPool {
/// Execute the main function in a pool of M:N schedulers.
///
/// This will configure the pool according to the `config` parameter, and
/// initially run `main` inside the pool of schedulers.
pub fn new(config: PoolConfig) -> SchedPool {
static POOL_ID: AtomicUint = INIT_ATOMIC_UINT;
let PoolConfig {
threads: nscheds,
event_loop_factory: factory
} = config;
assert!(nscheds > 0);
// The pool of schedulers that will be returned from this function
let (p, state) = TaskState::new();
let mut pool = SchedPool {
threads: vec![],
handles: vec![],
stealers: vec![],
id: POOL_ID.fetch_add(1, SeqCst),
sleepers: SleeperList::new(),
stack_pool: StackPool::new(),
deque_pool: deque::BufferPool::new(),
next_friend: 0,
factory: factory,
task_state: state,
tasks_done: p,
};
// Create a work queue for each scheduler, ntimes. Create an extra
// for the main thread if that flag is set. We won't steal from it.
let mut workers = Vec::with_capacity(nscheds);
let mut stealers = Vec::with_capacity(nscheds);
for _ in range(0, nscheds) {
let (w, s) = pool.deque_pool.deque();
workers.push(w);
stealers.push(s);
}
pool.stealers = stealers;
// Now that we've got all our work queues, create one scheduler per
// queue, spawn the scheduler into a thread, and be sure to keep a
// handle to the scheduler and the thread to keep them alive.
for worker in workers.into_iter() {
rtdebug!("inserting a regular scheduler");
let mut sched = box Scheduler::new(pool.id,
(pool.factory)(),
worker,
pool.stealers.clone(),
pool.sleepers.clone(),
pool.task_state.clone());
pool.handles.push(sched.make_handle());
pool.threads.push(Thread::start(proc() { sched.bootstrap(); }));
}
return pool;
}
/// Creates a new task configured to run inside of this pool of schedulers.
/// This is useful to create a task which can then be sent to a specific
/// scheduler created by `spawn_sched` (and possibly pin it to that
/// scheduler).
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> Box<GreenTask> {
GreenTask::configure(&mut self.stack_pool, opts, f)
}
/// Spawns a new task into this pool of schedulers, using the specified
/// options to configure the new task which is spawned.
///
/// New tasks are spawned in a round-robin fashion to the schedulers in this
/// pool, but tasks can certainly migrate among schedulers once they're in
/// the pool.
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn spawn(&mut self, opts: TaskOpts, f: proc():Send) {
let task = self.task(opts, f);
// Figure out someone to send this task to
let idx = self.next_friend;
self.next_friend += 1;
if self.next_friend >= self.handles.len() {
self.next_friend = 0;
}
// Jettison the task away!
self.handles[idx].send(TaskFromFriend(task));
}
/// Spawns a new scheduler into this M:N pool. A handle is returned to the
/// scheduler for use. The scheduler will not exit as long as this handle is
/// active.
///
/// The scheduler spawned will participate in work stealing with all of the
/// other schedulers currently in the scheduler pool.
pub fn spawn_sched(&mut self) -> SchedHandle {
let (worker, stealer) = self.deque_pool.deque();
self.stealers.push(stealer.clone());
// Tell all existing schedulers about this new scheduler so they can all
// steal work from it
for handle in self.handles.iter_mut() {
handle.send(NewNeighbor(stealer.clone()));
}
// Create the new scheduler, using the same sleeper list as all the
// other schedulers as well as having a stealer handle to all other
// schedulers.
let mut sched = box Scheduler::new(self.id,
(self.factory)(),
worker,
self.stealers.clone(),
self.sleepers.clone(),
self.task_state.clone());
let ret = sched.make_handle();
self.handles.push(sched.make_handle());
self.threads.push(Thread::start(proc() { sched.bootstrap() }));
return ret;
}
/// Consumes the pool of schedulers, waiting for all tasks to exit and all
/// schedulers to shut down.
///
/// This function is required to be called in order to drop a pool of
/// schedulers, it is considered an error to drop a pool without calling
/// this method.
///
/// This only waits for all tasks in *this pool* of schedulers to exit, any
/// native tasks or extern pools will not be waited on
pub fn shutdown(mut self) {
self.stealers = vec![];
// Wait for everyone to exit. We may have reached a 0-task count
// multiple times in the past, meaning there could be several buffered
// messages on the `tasks_done` port. We're guaranteed that after *some*
// message the current task count will be 0, so we just receive in a
// loop until everything is totally dead.
while self.task_state.active() {
self.tasks_done.recv();
}
// Now that everyone's gone, tell everything to shut down.
for mut handle in replace(&mut self.handles, vec![]).into_iter() {
handle.send(Shutdown);
}
for thread in replace(&mut self.threads, vec![]).into_iter() {
thread.join();
}
}
}
impl TaskState {
fn new() -> (Receiver<()>, TaskState) {
let (tx, rx) = channel();
(rx, TaskState {
cnt: Arc::new(AtomicUint::new(0)),
done: tx,
})
}
fn increment(&mut self) {
self.cnt.fetch_add(1, SeqCst);
}
fn active(&self) -> bool {
self.cnt.load(SeqCst) != 0
}
fn decrement(&mut self) {
let prev = self.cnt.fetch_sub(1, SeqCst);
if prev == 1 {
self.done.send(());
}
}
}
impl Drop for SchedPool {
fn drop(&mut self) {
if self.threads.len() > 0 {
panic!("dropping a M:N scheduler pool that wasn't shut down");
}
}
}
/// A spawner for green tasks
pub struct GreenSpawner<'a>{
pool: &'a mut SchedPool,
handle: Option<&'a mut SchedHandle>
}
impl<'a> Spawner for GreenSpawner<'a> {
#[inline]
fn spawn(self, opts: TaskOpts, f: proc():Send) {
let GreenSpawner { pool, handle } = self;
match handle {
None => pool.spawn(opts, f),
Some(h) => h.send(PinnedTask(pool.task(opts, f)))
}
}
}
/// An extension trait adding `green` configuration methods to `TaskBuilder`.
pub trait GreenTaskBuilder {
fn green<'a>(self, &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>>;
fn green_pinned<'a>(self, &'a mut SchedPool, &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>>;
}
impl<S: Spawner> GreenTaskBuilder for TaskBuilder<S> {
fn green<'a>(self, pool: &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: None})
}
fn green_pinned<'a>(self, pool: &'a mut SchedPool, handle: &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: Some(handle)})
}
}
#[cfg(test)]
mod test {
use std::task::TaskBuilder;
use super::{SchedPool, PoolConfig, GreenTaskBuilder};
#[test]
fn test_green_builder() {
let mut pool = SchedPool::new(PoolConfig::new());
let res = TaskBuilder::new().green(&mut pool).try(proc() {
"Success!".to_string()
});
assert_eq!(res.ok().unwrap(), "Success!".to_string());
pool.shutdown();
}
}

View File

@ -1,118 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: this file probably shouldn't exist
// ignore-lexer-test FIXME #15677
#![macro_escape]
use std::fmt;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
macro_rules! rterrln (
($($arg:tt)*) => ( {
format_args!(::macros::dumb_println, $($arg)*)
} )
)
// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build.
macro_rules! rtdebug (
($($arg:tt)*) => ( {
if cfg!(rtdebug) {
rterrln!($($arg)*)
}
})
)
macro_rules! rtassert (
( $arg:expr ) => ( {
if ::macros::ENFORCE_SANITY {
if !$arg {
rtabort!(" assertion failed: {}", stringify!($arg));
}
}
} )
)
macro_rules! rtabort (
($($arg:tt)*) => ( {
::macros::abort(format!($($arg)*).as_slice());
} )
)
pub fn dumb_println(args: &fmt::Arguments) {
use std::rt;
let mut w = rt::Stderr;
let _ = writeln!(&mut w, "{}", args);
}
pub fn abort(msg: &str) -> ! {
let msg = if !msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before the unwhisperable secret of secrets The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abort() -> ! {
use std::intrinsics;
unsafe { intrinsics::abort() }
}
}

View File

@ -1,67 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::PopResult::*;
use alloc::arc::Arc;
use std::sync::mpsc_queue as mpsc;
use std::kinds::marker;
pub enum PopResult<T> {
Inconsistent,
Empty,
Data(T),
}
pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) {
let a = Arc::new(mpsc::Queue::new());
(Consumer { inner: a.clone(), noshare: marker::NoSync },
Producer { inner: a, noshare: marker::NoSync })
}
pub struct Producer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
pub struct Consumer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
impl<T: Send> Consumer<T> {
pub fn pop(&self) -> PopResult<T> {
match self.inner.pop() {
mpsc::Inconsistent => Inconsistent,
mpsc::Empty => Empty,
mpsc::Data(t) => Data(t),
}
}
pub fn casual_pop(&self) -> Option<T> {
match self.inner.pop() {
mpsc::Inconsistent => None,
mpsc::Empty => None,
mpsc::Data(t) => Some(t),
}
}
}
impl<T: Send> Producer<T> {
pub fn push(&self, t: T) {
self.inner.push(t);
}
}
impl<T: Send> Clone for Producer<T> {
fn clone(&self) -> Producer<T> {
Producer { inner: self.inner.clone(), noshare: marker::NoSync }
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,96 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A small module implementing a simple "runtime" used for bootstrapping a rust
//! scheduler pool and then interacting with it.
use std::any::Any;
use std::mem;
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::mutex::NativeMutex;
use std::rt::task::{Task, BlockedTask, TaskOpts};
struct SimpleTask {
lock: NativeMutex,
awoken: bool,
}
impl Runtime for SimpleTask {
// Implement the simple tasks of descheduling and rescheduling, but only in
// a simple number of cases.
fn deschedule(mut self: Box<SimpleTask>,
times: uint,
mut cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
assert!(times == 1);
let me = &mut *self as *mut SimpleTask;
let cur_dupe = &mut *cur_task as *mut Task;
cur_task.put_runtime(self);
let task = BlockedTask::block(cur_task);
// See libnative/task.rs for what's going on here with the `awoken`
// field and the while loop around wait()
unsafe {
let guard = (*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
guard.wait();
}
}
Err(task) => { mem::forget(task.wake()); }
}
drop(guard);
cur_task = mem::transmute(cur_dupe);
}
Local::put(cur_task);
}
fn reawaken(mut self: Box<SimpleTask>, mut to_wake: Box<Task>) {
let me = &mut *self as *mut SimpleTask;
to_wake.put_runtime(self);
unsafe {
mem::forget(to_wake);
let guard = (*me).lock.lock();
(*me).awoken = true;
guard.signal();
}
}
// These functions are all unimplemented and panic as a result. This is on
// purpose. A "simple task" is just that, a very simple task that can't
// really do a whole lot. The only purpose of the task is to get us off our
// feet and running.
fn yield_now(self: Box<SimpleTask>, _cur_task: Box<Task>) { panic!() }
fn maybe_yield(self: Box<SimpleTask>, _cur_task: Box<Task>) { panic!() }
fn spawn_sibling(self: Box<SimpleTask>,
_cur_task: Box<Task>,
_opts: TaskOpts,
_f: proc():Send) {
panic!()
}
fn stack_bounds(&self) -> (uint, uint) { panic!() }
fn stack_guard(&self) -> Option<uint> { panic!() }
fn can_block(&self) -> bool { true }
fn wrap(self: Box<SimpleTask>) -> Box<Any+'static> { panic!() }
}
pub fn task() -> Box<Task> {
let mut task = box Task::new();
task.put_runtime(box SimpleTask {
lock: unsafe {NativeMutex::new()},
awoken: false,
});
return task;
}

View File

@ -1,46 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Maintains a shared list of sleeping schedulers. Schedulers
//! use this to wake each other up.
use std::sync::mpmc_bounded_queue::Queue;
use sched::SchedHandle;
pub struct SleeperList {
q: Queue<SchedHandle>,
}
impl SleeperList {
pub fn new() -> SleeperList {
SleeperList{q: Queue::with_capacity(8*1024)}
}
pub fn push(&mut self, value: SchedHandle) {
assert!(self.q.push(value))
}
pub fn pop(&mut self) -> Option<SchedHandle> {
self.q.pop()
}
pub fn casual_pop(&mut self) -> Option<SchedHandle> {
self.q.pop()
}
}
impl Clone for SleeperList {
fn clone(&self) -> SleeperList {
SleeperList {
q: self.q.clone()
}
}
}

View File

@ -1,215 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
use std::sync::atomic;
use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
MapNonStandardFlags, getenv};
use libc;
/// A task's stack. The name "Stack" is a vestige of segmented stacks.
pub struct Stack {
buf: Option<MemoryMap>,
min_size: uint,
valgrind_id: libc::c_uint,
}
// Try to use MAP_STACK on platforms that support it (it's what we're doing
// anyway), but some platforms don't support it at all. For example, it appears
// that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
// panics): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
//
// DragonFly BSD also seems to suffer from the same problem. When MAP_STACK is
// used, it returns the same `ptr` multiple times.
#[cfg(not(any(windows, target_os = "freebsd", target_os = "dragonfly")))]
static STACK_FLAGS: libc::c_int = libc::MAP_STACK | libc::MAP_PRIVATE |
libc::MAP_ANON;
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
static STACK_FLAGS: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON;
#[cfg(windows)]
static STACK_FLAGS: libc::c_int = 0;
impl Stack {
/// Allocate a new stack of `size`. If size = 0, this will panic. Use
/// `dummy_stack` if you want a zero-sized stack.
pub fn new(size: uint) -> Stack {
// Map in a stack. Eventually we might be able to handle stack
// allocation failure, which would fail to spawn the task. But there's
// not many sensible things to do on OOM. Panic seems fine (and is
// what the old stack allocation did).
let stack = match MemoryMap::new(size, &[MapReadable, MapWritable,
MapNonStandardFlags(STACK_FLAGS)]) {
Ok(map) => map,
Err(e) => panic!("mmap for stack of size {} failed: {}", size, e)
};
// Change the last page to be inaccessible. This is to provide safety;
// when an FFI function overflows it will (hopefully) hit this guard
// page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is
// guaranteed to be aligned properly.
if !protect_last_page(&stack) {
panic!("Could not memory-protect guard page. stack={}, errno={}",
stack.data(), errno());
}
let mut stk = Stack {
buf: Some(stack),
min_size: size,
valgrind_id: 0
};
// FIXME: Using the FFI to call a C macro. Slow
stk.valgrind_id = unsafe {
rust_valgrind_stack_register(stk.start() as *const libc::uintptr_t,
stk.end() as *const libc::uintptr_t)
};
return stk;
}
/// Create a 0-length stack which starts (and ends) at 0.
pub unsafe fn dummy_stack() -> Stack {
Stack {
buf: None,
min_size: 0,
valgrind_id: 0
}
}
/// Point to the last writable byte of the stack
pub fn guard(&self) -> *const uint {
(self.start() as uint + page_size()) as *const uint
}
/// Point to the low end of the allocated stack
pub fn start(&self) -> *const uint {
self.buf.as_ref().map(|m| m.data() as *const uint)
.unwrap_or(ptr::null())
}
/// Point one uint beyond the high end of the allocated stack
pub fn end(&self) -> *const uint {
self.buf.as_ref().map(|buf| unsafe {
buf.data().offset(buf.len() as int) as *const uint
}).unwrap_or(ptr::null())
}
}
#[cfg(unix)]
fn protect_last_page(stack: &MemoryMap) -> bool {
unsafe {
// This may seem backwards: the start of the segment is the last page?
// Yes! The stack grows from higher addresses (the end of the allocated
// block) to lower addresses (the start of the allocated block).
let last_page = stack.data() as *mut libc::c_void;
libc::mprotect(last_page, page_size() as libc::size_t,
libc::PROT_NONE) != -1
}
}
#[cfg(windows)]
fn protect_last_page(stack: &MemoryMap) -> bool {
unsafe {
// see above
let last_page = stack.data() as *mut libc::c_void;
let mut old_prot: libc::DWORD = 0;
libc::VirtualProtect(last_page, page_size() as libc::SIZE_T,
libc::PAGE_NOACCESS,
&mut old_prot as libc::LPDWORD) != 0
}
}
impl Drop for Stack {
fn drop(&mut self) {
unsafe {
// FIXME: Using the FFI to call a C macro. Slow
rust_valgrind_stack_deregister(self.valgrind_id);
}
}
}
pub struct StackPool {
// Ideally this would be some data structure that preserved ordering on
// Stack.min_size.
stacks: Vec<Stack>,
}
impl StackPool {
pub fn new() -> StackPool {
StackPool {
stacks: vec![],
}
}
pub fn take_stack(&mut self, min_size: uint) -> Stack {
// Ideally this would be a binary search
match self.stacks.iter().position(|s| min_size <= s.min_size) {
Some(idx) => self.stacks.swap_remove(idx).unwrap(),
None => Stack::new(min_size)
}
}
pub fn give_stack(&mut self, stack: Stack) {
if self.stacks.len() <= max_cached_stacks() {
self.stacks.push(stack)
}
}
}
fn max_cached_stacks() -> uint {
static AMT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match AMT.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = getenv("RUST_MAX_CACHED_STACKS").and_then(|s| from_str(s.as_slice()));
// This default corresponds to 20M of cache per scheduler (at the
// default size).
let amt = amt.unwrap_or(10);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
AMT.store(amt + 1, atomic::SeqCst);
return amt;
}
extern {
fn rust_valgrind_stack_register(start: *const libc::uintptr_t,
end: *const libc::uintptr_t) -> libc::c_uint;
fn rust_valgrind_stack_deregister(id: libc::c_uint);
}
#[cfg(test)]
mod tests {
use super::StackPool;
#[test]
fn stack_pool_caches() {
let mut p = StackPool::new();
let s = p.take_stack(10);
p.give_stack(s);
let s = p.take_stack(4);
assert_eq!(s.min_size, 10);
p.give_stack(s);
let s = p.take_stack(14);
assert_eq!(s.min_size, 14);
p.give_stack(s);
}
#[test]
fn stack_pool_caches_exact() {
let mut p = StackPool::new();
let mut s = p.take_stack(10);
s.valgrind_id = 100;
p.give_stack(s);
let s = p.take_stack(10);
assert_eq!(s.min_size, 10);
assert_eq!(s.valgrind_id, 100);
}
}

View File

@ -1,602 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Green Task implementation
//!
//! This module contains the glue to the libstd runtime necessary to integrate
//! M:N scheduling. This GreenTask structure is hidden as a trait object in all
//! rust tasks and virtual calls are made in order to interface with it.
//!
//! Each green task contains a scheduler if it is currently running, and it also
//! contains the rust task itself in order to juggle around ownership of the
//! values.
pub use self::TaskType::*;
pub use self::Home::*;
use std::any::Any;
use std::mem;
use std::raw;
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::mutex::NativeMutex;
use std::rt::stack;
use std::rt::task::{Task, BlockedTask, TaskOpts};
use std::rt;
use context::Context;
use coroutine::Coroutine;
use sched::{Scheduler, SchedHandle, RunOnce};
use stack::StackPool;
/// The necessary fields needed to keep track of a green task (as opposed to a
/// 1:1 task).
pub struct GreenTask {
/// Coroutine that this task is running on, otherwise known as the register
/// context and the stack that this task owns. This field is optional to
/// relinquish ownership back to a scheduler to recycle stacks at a later
/// date.
pub coroutine: Option<Coroutine>,
/// Optional handle back into the home sched pool of this task. This field
/// is lazily initialized.
pub handle: Option<SchedHandle>,
/// Slot for maintaining ownership of a scheduler. If a task is running,
/// this value will be Some(sched) where the task is running on "sched".
pub sched: Option<Box<Scheduler>>,
/// Temporary ownership slot of a std::rt::task::Task object. This is used
/// to squirrel that libstd task away while we're performing green task
/// operations.
pub task: Option<Box<Task>>,
/// Dictates whether this is a sched task or a normal green task
pub task_type: TaskType,
/// Home pool that this task was spawned into. This field is lazily
/// initialized until when the task is initially scheduled, and is used to
/// make sure that tasks are always woken up in the correct pool of
/// schedulers.
pub pool_id: uint,
// See the comments in the scheduler about why this is necessary
pub nasty_deschedule_lock: NativeMutex,
}
pub enum TaskType {
TypeGreen(Option<Home>),
TypeSched,
}
pub enum Home {
AnySched,
HomeSched(SchedHandle),
}
/// Trampoline code for all new green tasks which are running around. This
/// function is passed through to Context::new as the initial rust landing pad
/// for all green tasks. This code is actually called after the initial context
/// switch onto a green thread.
///
/// The first argument to this function is the `Box<GreenTask>` pointer, and
/// the next two arguments are the user-provided procedure for running code.
///
/// The goal for having this weird-looking function is to reduce the number of
/// allocations done on a green-task startup as much as possible.
extern fn bootstrap_green_task(task: uint, code: *mut (), env: *mut ()) -> ! {
// Acquire ownership of the `proc()`
let start: proc() = unsafe {
mem::transmute(raw::Procedure { code: code, env: env })
};
// Acquire ownership of the `Box<GreenTask>`
let mut task: Box<GreenTask> = unsafe { mem::transmute(task) };
// First code after swap to this new context. Run our cleanup job
task.pool_id = {
let sched = task.sched.as_mut().unwrap();
sched.run_cleanup_job();
sched.task_state.increment();
sched.pool_id
};
// Convert our green task to a libstd task and then execute the code
// requested. This is the "try/catch" block for this green task and
// is the wrapper for *all* code run in the task.
let mut start = Some(start);
let task = task.swap().run(|| start.take().unwrap()()).destroy();
// Once the function has exited, it's time to run the termination
// routine. This means we need to context switch one more time but
// clean ourselves up on the other end. Since we have no way of
// preserving a handle to the GreenTask down to this point, this
// unfortunately must call `GreenTask::convert`. In order to avoid
// this we could add a `terminate` function to the `Runtime` trait
// in libstd, but that seems less appropriate since the conversion
// method exists.
GreenTask::convert(task).terminate();
}
impl GreenTask {
/// Creates a new green task which is not homed to any particular scheduler
/// and will not have any contained Task structure.
pub fn new(stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc():Send) -> Box<GreenTask> {
GreenTask::new_homed(stack_pool, stack_size, AnySched, start)
}
/// Creates a new task (like `new`), but specifies the home for new task.
pub fn new_homed(stack_pool: &mut StackPool,
stack_size: Option<uint>,
home: Home,
start: proc():Send) -> Box<GreenTask> {
// Allocate ourselves a GreenTask structure
let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home)));
// Allocate a stack for us to run on
let stack_size = stack_size.unwrap_or_else(|| rt::min_stack());
let mut stack = stack_pool.take_stack(stack_size);
let context = Context::new(bootstrap_green_task, ops.as_uint(), start,
&mut stack);
// Package everything up in a coroutine and return
ops.coroutine = Some(Coroutine {
current_stack_segment: stack,
saved_context: context,
});
return ops;
}
/// Creates a new green task with the specified coroutine and type, this is
/// useful when creating scheduler tasks.
pub fn new_typed(coroutine: Option<Coroutine>,
task_type: TaskType) -> Box<GreenTask> {
box GreenTask {
pool_id: 0,
coroutine: coroutine,
task_type: task_type,
sched: None,
handle: None,
nasty_deschedule_lock: unsafe { NativeMutex::new() },
task: Some(box Task::new()),
}
}
/// Creates a new green task with the given configuration options for the
/// contained Task object. The given stack pool is also used to allocate a
/// new stack for this task.
pub fn configure(pool: &mut StackPool,
opts: TaskOpts,
f: proc():Send) -> Box<GreenTask> {
let TaskOpts { name, stack_size, on_exit } = opts;
let mut green = GreenTask::new(pool, stack_size, f);
{
let task = green.task.as_mut().unwrap();
task.name = name;
task.death.on_exit = on_exit;
}
return green;
}
/// Just like the `maybe_take_runtime` function, this function should *not*
/// exist. Usage of this function is _strongly_ discouraged. This is an
/// absolute last resort necessary for converting a libstd task to a green
/// task.
///
/// This function will assert that the task is indeed a green task before
/// returning (and will kill the entire process if this is wrong).
pub fn convert(mut task: Box<Task>) -> Box<GreenTask> {
match task.maybe_take_runtime::<GreenTask>() {
Some(mut green) => {
green.put_task(task);
green
}
None => rtabort!("not a green task any more?"),
}
}
pub fn give_home(&mut self, new_home: Home) {
match self.task_type {
TypeGreen(ref mut home) => { *home = Some(new_home); }
TypeSched => rtabort!("type error: used SchedTask as GreenTask"),
}
}
pub fn take_unwrap_home(&mut self) -> Home {
match self.task_type {
TypeGreen(ref mut home) => home.take().unwrap(),
TypeSched => rtabort!("type error: used SchedTask as GreenTask"),
}
}
// New utility functions for homes.
pub fn is_home_no_tls(&self, sched: &Scheduler) -> bool {
match self.task_type {
TypeGreen(Some(AnySched)) => { false }
TypeGreen(Some(HomeSched(SchedHandle { sched_id: ref id, .. }))) => {
*id == sched.sched_id()
}
TypeGreen(None) => { rtabort!("task without home"); }
TypeSched => {
// Awe yea
rtabort!("type error: expected: TypeGreen, found: TaskSched");
}
}
}
pub fn homed(&self) -> bool {
match self.task_type {
TypeGreen(Some(AnySched)) => { false }
TypeGreen(Some(HomeSched(SchedHandle { .. }))) => { true }
TypeGreen(None) => {
rtabort!("task without home");
}
TypeSched => {
rtabort!("type error: expected: TypeGreen, found: TaskSched");
}
}
}
pub fn is_sched(&self) -> bool {
match self.task_type {
TypeGreen(..) => false, TypeSched => true,
}
}
// Unsafe functions for transferring ownership of this GreenTask across
// context switches
pub fn as_uint(&self) -> uint {
self as *const GreenTask as uint
}
pub unsafe fn from_uint(val: uint) -> Box<GreenTask> {
mem::transmute(val)
}
// Runtime glue functions and helpers
pub fn put_with_sched(mut self: Box<GreenTask>, sched: Box<Scheduler>) {
assert!(self.sched.is_none());
self.sched = Some(sched);
self.put();
}
pub fn put_task(&mut self, task: Box<Task>) {
assert!(self.task.is_none());
self.task = Some(task);
}
pub fn swap(mut self: Box<GreenTask>) -> Box<Task> {
let mut task = self.task.take().unwrap();
task.put_runtime(self);
return task;
}
pub fn put(self: Box<GreenTask>) {
assert!(self.sched.is_some());
Local::put(self.swap());
}
fn terminate(mut self: Box<GreenTask>) -> ! {
let sched = self.sched.take().unwrap();
sched.terminate_current_task(self)
}
// This function is used to remotely wakeup this green task back on to its
// original pool of schedulers. In order to do so, each tasks arranges a
// SchedHandle upon descheduling to be available for sending itself back to
// the original pool.
//
// Note that there is an interesting transfer of ownership going on here. We
// must relinquish ownership of the green task, but then also send the task
// over the handle back to the original scheduler. In order to safely do
// this, we leverage the already-present "nasty descheduling lock". The
// reason for doing this is that each task will bounce on this lock after
// resuming after a context switch. By holding the lock over the enqueueing
// of the task, we're guaranteed that the SchedHandle's memory will be valid
// for this entire function.
//
// An alternative would include having incredibly cheaply cloneable handles,
// but right now a SchedHandle is something like 6 allocations, so it is
// *not* a cheap operation to clone a handle. Until the day comes that we
// need to optimize this, a lock should do just fine (it's completely
// uncontended except for when the task is rescheduled).
fn reawaken_remotely(mut self: Box<GreenTask>) {
unsafe {
let mtx = &mut self.nasty_deschedule_lock as *mut NativeMutex;
let handle = self.handle.as_mut().unwrap() as *mut SchedHandle;
let _guard = (*mtx).lock();
(*handle).send(RunOnce(self));
}
}
}
impl Runtime for GreenTask {
fn yield_now(mut self: Box<GreenTask>, cur_task: Box<Task>) {
self.put_task(cur_task);
let sched = self.sched.take().unwrap();
sched.yield_now(self);
}
fn maybe_yield(mut self: Box<GreenTask>, cur_task: Box<Task>) {
self.put_task(cur_task);
let sched = self.sched.take().unwrap();
sched.maybe_yield(self);
}
fn deschedule(mut self: Box<GreenTask>,
times: uint,
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
self.put_task(cur_task);
let mut sched = self.sched.take().unwrap();
// In order for this task to be reawoken in all possible contexts, we
// may need a handle back in to the current scheduler. When we're woken
// up in anything other than the local scheduler pool, this handle is
// used to send this task back into the scheduler pool.
if self.handle.is_none() {
self.handle = Some(sched.make_handle());
self.pool_id = sched.pool_id;
}
// This code is pretty standard, except for the usage of
// `GreenTask::convert`. Right now if we use `reawaken` directly it will
// expect for there to be a task in local TLS, but that is not true for
// this deschedule block (because the scheduler must retain ownership of
// the task while the cleanup job is running). In order to get around
// this for now, we invoke the scheduler directly with the converted
// Task => GreenTask structure.
if times == 1 {
sched.deschedule_running_task_and_then(self, |sched, task| {
match f(task) {
Ok(()) => {}
Err(t) => {
t.wake().map(|t| {
sched.enqueue_task(GreenTask::convert(t))
});
}
}
});
} else {
sched.deschedule_running_task_and_then(self, |sched, task| {
for task in task.make_selectable(times) {
match f(task) {
Ok(()) => {},
Err(task) => {
task.wake().map(|t| {
sched.enqueue_task(GreenTask::convert(t))
});
break
}
}
}
});
}
}
fn reawaken(mut self: Box<GreenTask>, to_wake: Box<Task>) {
self.put_task(to_wake);
assert!(self.sched.is_none());
// Optimistically look for a local task, but if one's not available to
// inspect (in order to see if it's in the same sched pool as we are),
// then just use our remote wakeup routine and carry on!
let mut running_task: Box<Task> = match Local::try_take() {
Some(task) => task,
None => return self.reawaken_remotely()
};
// Waking up a green thread is a bit of a tricky situation. We have no
// guarantee about where the current task is running. The options we
// have for where this current task is running are:
//
// 1. Our original scheduler pool
// 2. Some other scheduler pool
// 3. Something that isn't a scheduler pool
//
// In order to figure out what case we're in, this is the reason that
// the `maybe_take_runtime` function exists. Using this function we can
// dynamically check to see which of these cases is the current
// situation and then dispatch accordingly.
//
// In case 1, we just use the local scheduler to resume ourselves
// immediately (if a rescheduling is possible).
//
// In case 2 and 3, we need to remotely reawaken ourself in order to be
// transplanted back to the correct scheduler pool.
match running_task.maybe_take_runtime::<GreenTask>() {
Some(mut running_green_task) => {
running_green_task.put_task(running_task);
let sched = running_green_task.sched.take().unwrap();
if sched.pool_id == self.pool_id {
sched.run_task(running_green_task, self);
} else {
self.reawaken_remotely();
// put that thing back where it came from!
running_green_task.put_with_sched(sched);
}
}
None => {
self.reawaken_remotely();
Local::put(running_task);
}
}
}
fn spawn_sibling(mut self: Box<GreenTask>,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send) {
self.put_task(cur_task);
// First, set up a bomb which when it goes off will restore the local
// task unless its disarmed. This will allow us to gracefully panic from
// inside of `configure` which allocates a new task.
struct Bomb { inner: Option<Box<GreenTask>> }
impl Drop for Bomb {
fn drop(&mut self) {
let _ = self.inner.take().map(|task| task.put());
}
}
let mut bomb = Bomb { inner: Some(self) };
// Spawns a task into the current scheduler. We allocate the new task's
// stack from the scheduler's stack pool, and then configure it
// accordingly to `opts`. Afterwards we bootstrap it immediately by
// switching to it.
//
// Upon returning, our task is back in TLS and we're good to return.
let sibling = {
let sched = bomb.inner.as_mut().unwrap().sched.as_mut().unwrap();
GreenTask::configure(&mut sched.stack_pool, opts, f)
};
let mut me = bomb.inner.take().unwrap();
let sched = me.sched.take().unwrap();
sched.run_task(me, sibling)
}
fn stack_bounds(&self) -> (uint, uint) {
let c = self.coroutine.as_ref()
.expect("GreenTask.stack_bounds called without a coroutine");
// Don't return the red zone as part of the usable stack of this task,
// it's essentially an implementation detail.
(c.current_stack_segment.start() as uint + stack::RED_ZONE,
c.current_stack_segment.end() as uint)
}
fn stack_guard(&self) -> Option<uint> {
let c = self.coroutine.as_ref()
.expect("GreenTask.stack_guard called without a coroutine");
Some(c.current_stack_segment.guard() as uint)
}
fn can_block(&self) -> bool { false }
fn wrap(self: Box<GreenTask>) -> Box<Any+'static> {
self as Box<Any+'static>
}
}
#[cfg(test)]
mod tests {
use std::rt::local::Local;
use std::rt::task::Task;
use std::task;
use std::rt::task::TaskOpts;
use super::super::{PoolConfig, SchedPool};
use super::GreenTask;
fn spawn_opts(opts: TaskOpts, f: proc():Send) {
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: super::super::basic::event_loop,
});
pool.spawn(opts, f);
pool.shutdown();
}
#[test]
fn smoke() {
let (tx, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
tx.send(());
});
rx.recv();
}
#[test]
fn smoke_panic() {
let (tx, rx) = channel::<int>();
spawn_opts(TaskOpts::new(), proc() {
let _tx = tx;
panic!()
});
assert_eq!(rx.recv_opt(), Err(()));
}
#[test]
fn smoke_opts() {
let mut opts = TaskOpts::new();
opts.name = Some("test".into_maybe_owned());
opts.stack_size = Some(20 * 4096);
let (tx, rx) = channel();
opts.on_exit = Some(proc(r) tx.send(r));
spawn_opts(opts, proc() {});
assert!(rx.recv().is_ok());
}
#[test]
fn smoke_opts_panic() {
let mut opts = TaskOpts::new();
let (tx, rx) = channel();
opts.on_exit = Some(proc(r) tx.send(r));
spawn_opts(opts, proc() { panic!() });
assert!(rx.recv().is_err());
}
#[test]
fn yield_test() {
let (tx, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
for _ in range(0u, 10) { task::deschedule(); }
tx.send(());
});
rx.recv();
}
#[test]
fn spawn_children() {
let (tx1, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
let (tx2, rx) = channel();
spawn(proc() {
let (tx3, rx) = channel();
spawn(proc() {
tx3.send(());
});
rx.recv();
tx2.send(());
});
rx.recv();
tx1.send(());
});
rx.recv();
}
#[test]
fn spawn_inherits() {
let (tx, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
spawn(proc() {
let mut task: Box<Task> = Local::take();
match task.maybe_take_runtime::<GreenTask>() {
Some(ops) => {
task.put_runtime(ops);
}
None => panic!(),
}
Local::put(task);
tx.send(());
});
});
rx.recv();
}
}

View File

@ -83,7 +83,6 @@ extern crate core;
#[cfg(test)] extern crate std;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
pub use self::Nullable::*;

View File

@ -1,116 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{c_char, c_int};
use libc;
use std::mem;
use std::ptr::{null, null_mut};
use std::rt::rtio;
use std::rt::rtio::IoError;
use super::net;
pub struct GetAddrInfoRequest;
impl GetAddrInfoRequest {
pub fn run(host: Option<&str>, servname: Option<&str>,
hint: Option<rtio::AddrinfoHint>)
-> Result<Vec<rtio::AddrinfoInfo>, IoError>
{
assert!(host.is_some() || servname.is_some());
let c_host = host.map(|x| x.to_c_str());
let c_host = c_host.as_ref().map(|x| x.as_ptr()).unwrap_or(null());
let c_serv = servname.map(|x| x.to_c_str());
let c_serv = c_serv.as_ref().map(|x| x.as_ptr()).unwrap_or(null());
let hint = hint.map(|hint| {
libc::addrinfo {
ai_flags: hint.flags as c_int,
ai_family: hint.family as c_int,
ai_socktype: 0,
ai_protocol: 0,
ai_addrlen: 0,
ai_canonname: null_mut(),
ai_addr: null_mut(),
ai_next: null_mut()
}
});
let hint_ptr = hint.as_ref().map_or(null(), |x| {
x as *const libc::addrinfo
});
let mut res = null_mut();
// Make the call
let s = unsafe {
getaddrinfo(c_host, c_serv, hint_ptr, &mut res)
};
// Error?
if s != 0 {
return Err(get_error(s));
}
// Collect all the results we found
let mut addrs = Vec::new();
let mut rp = res;
while rp.is_not_null() {
unsafe {
let addr = match net::sockaddr_to_addr(mem::transmute((*rp).ai_addr),
(*rp).ai_addrlen as uint) {
Ok(a) => a,
Err(e) => return Err(e)
};
addrs.push(rtio::AddrinfoInfo {
address: addr,
family: (*rp).ai_family as uint,
socktype: 0,
protocol: 0,
flags: (*rp).ai_flags as uint
});
rp = (*rp).ai_next as *mut libc::addrinfo;
}
}
unsafe { freeaddrinfo(res); }
Ok(addrs)
}
}
extern "system" {
fn getaddrinfo(node: *const c_char, service: *const c_char,
hints: *const libc::addrinfo,
res: *mut *mut libc::addrinfo) -> c_int;
fn freeaddrinfo(res: *mut libc::addrinfo);
#[cfg(not(windows))]
fn gai_strerror(errcode: c_int) -> *const c_char;
}
#[cfg(windows)]
fn get_error(_: c_int) -> IoError {
net::last_error()
}
#[cfg(not(windows))]
fn get_error(s: c_int) -> IoError {
use std::c_str::CString;
let err_str = unsafe {
CString::new(gai_strerror(s), false).as_str().unwrap().to_string()
};
IoError {
code: s as uint,
extra: 0,
detail: Some(err_str),
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,155 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The native I/O and threading crate
//!
//! This crate contains an implementation of 1:1 scheduling for a "native"
//! runtime. In addition, all I/O provided by this crate is the thread blocking
//! version of I/O.
//!
//! # Starting with libnative
//!
//! ```rust
//! extern crate native;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! native::start(argc, argv, main)
//! }
//!
//! fn main() {
//! // this code is running on the main OS thread
//! }
//! ```
//!
//! # Force spawning a native task
//!
//! ```rust
//! extern crate native;
//!
//! use std::task::TaskBuilder;
//! use native::NativeTaskBuilder;
//!
//! fn main() {
//! // We're not sure whether this main function is run in 1:1 or M:N mode.
//!
//! TaskBuilder::new().native().spawn(proc() {
//! // this code is guaranteed to be run on a native thread
//! });
//! }
//! ```
#![crate_name = "native"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![deny(unused_results, unused_must_use)]
#![allow(non_camel_case_types)]
#![allow(unknown_features)]
#![feature(default_type_params, lang_items, slicing_syntax, globs)]
// NB this crate explicitly does *not* allow glob imports, please seriously
// consider whether they're needed before adding that feature here (the
// answer is that you don't need them)
#![feature(macro_rules, unsafe_destructor, default_type_params)]
extern crate alloc;
extern crate libc;
use std::os;
use std::rt;
use std::str;
pub use task::NativeTaskBuilder;
pub mod task;
#[cfg(any(windows, android))]
static OS_DEFAULT_STACK_ESTIMATE: uint = 1 << 20;
#[cfg(all(unix, not(android)))]
static OS_DEFAULT_STACK_ESTIMATE: uint = 2 * (1 << 20);
#[lang = "start"]
#[cfg(not(test))]
pub fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int {
use std::mem;
start(argc, argv, proc() {
let main: extern "Rust" fn() = unsafe { mem::transmute(main) };
main();
})
}
/// Executes the given procedure after initializing the runtime with the given
/// argc/argv.
///
/// This procedure is guaranteed to run on the thread calling this function, but
/// the stack bounds for this rust task will *not* be set. Care must be taken
/// for this function to not overflow its stack.
///
/// This function will only return once *all* native threads in the system have
/// exited.
pub fn start(argc: int, argv: *const *const u8, main: proc()) -> int {
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *const int;
let my_stack_top = addr as uint;
// FIXME #11359 we just assume that this thread has a stack of a
// certain size, and estimate that there's at most 20KB of stack
// frames above our current position.
let my_stack_bottom = my_stack_top + 20000 - OS_DEFAULT_STACK_ESTIMATE;
// When using libgreen, one of the first things that we do is to turn off
// the SIGPIPE signal (set it to ignore). By default, some platforms will
// send a *signal* when a EPIPE error would otherwise be delivered. This
// runtime doesn't install a SIGPIPE handler, causing it to kill the
// program, which isn't exactly what we want!
//
// Hence, we set SIGPIPE to ignore when the program starts up in order to
// prevent this problem.
#[cfg(windows)] fn ignore_sigpipe() {}
#[cfg(unix)] fn ignore_sigpipe() {
use libc;
use libc::funcs::posix01::signal::signal;
unsafe {
assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != -1);
}
}
ignore_sigpipe();
rt::init(argc, argv);
let mut exit_code = None;
let mut main = Some(main);
let mut task = task::new((my_stack_bottom, my_stack_top),
rt::thread::main_guard_page());
task.name = Some(str::Slice("<main>"));
drop(task.run(|| {
unsafe {
rt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
}
exit_code = Some(run(main.take().unwrap()));
}).destroy());
unsafe { rt::cleanup(); }
// If the exit code wasn't set, then the task block must have panicked.
return exit_code.unwrap_or(rt::DEFAULT_ERROR_CODE);
}
/// Executes a procedure on the current thread in a Rust task context.
///
/// This function has all of the same details as `start` except for a different
/// number of arguments.
pub fn run(main: proc()) -> int {
main();
os::get_exit_status()
}

View File

@ -1,376 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Tasks implemented on top of OS threads
//!
//! This module contains the implementation of the 1:1 threading module required
//! by rust tasks. This implements the necessary API traits laid out by std::rt
//! in order to spawn new tasks and deschedule the current task.
use std::any::Any;
use std::mem;
use std::rt::bookkeeping;
use std::rt::local::Local;
use std::rt::mutex::NativeMutex;
use std::rt::stack;
use std::rt::task::{Task, BlockedTask, TaskOpts};
use std::rt::thread::Thread;
use std::rt;
use std::task::{TaskBuilder, Spawner};
/// Creates a new Task which is ready to execute as a 1:1 task.
pub fn new(stack_bounds: (uint, uint), stack_guard: uint) -> Box<Task> {
let mut task = box Task::new();
let mut ops = ops();
ops.stack_bounds = stack_bounds;
ops.stack_guard = stack_guard;
task.put_runtime(ops);
return task;
}
fn ops() -> Box<Ops> {
box Ops {
lock: unsafe { NativeMutex::new() },
awoken: false,
// these *should* get overwritten
stack_bounds: (0, 0),
stack_guard: 0
}
}
/// A spawner for native tasks
pub struct NativeSpawner;
impl Spawner for NativeSpawner {
fn spawn(self, opts: TaskOpts, f: proc():Send) {
let TaskOpts { name, stack_size, on_exit } = opts;
let mut task = box Task::new();
task.name = name;
task.death.on_exit = on_exit;
let stack = stack_size.unwrap_or(rt::min_stack());
let task = task;
let ops = ops();
// Note that this increment must happen *before* the spawn in order to
// guarantee that if this task exits it will always end up waiting for
// the spawned task to exit.
let token = bookkeeping::increment();
// Spawning a new OS thread guarantees that __morestack will never get
// triggered, but we must manually set up the actual stack bounds once
// this function starts executing. This raises the lower limit by a bit
// because by the time that this function is executing we've already
// consumed at least a little bit of stack (we don't know the exact byte
// address at which our stack started).
Thread::spawn_stack(stack, proc() {
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *const int;
let my_stack = addr as uint;
unsafe {
stack::record_os_managed_stack_bounds(my_stack - stack + 1024,
my_stack);
}
let mut ops = ops;
ops.stack_guard = rt::thread::current_guard_page();
ops.stack_bounds = (my_stack - stack + 1024, my_stack);
let mut f = Some(f);
let mut task = task;
task.put_runtime(ops);
drop(task.run(|| { f.take().unwrap()() }).destroy());
drop(token);
})
}
}
/// An extension trait adding a `native` configuration method to `TaskBuilder`.
pub trait NativeTaskBuilder {
fn native(self) -> TaskBuilder<NativeSpawner>;
}
impl<S: Spawner> NativeTaskBuilder for TaskBuilder<S> {
fn native(self) -> TaskBuilder<NativeSpawner> {
self.spawner(NativeSpawner)
}
}
// This structure is the glue between channels and the 1:1 scheduling mode. This
// structure is allocated once per task.
struct Ops {
lock: NativeMutex, // native synchronization
awoken: bool, // used to prevent spurious wakeups
// This field holds the known bounds of the stack in (lo, hi) form. Not all
// native tasks necessarily know their precise bounds, hence this is
// optional.
stack_bounds: (uint, uint),
stack_guard: uint
}
impl rt::Runtime for Ops {
fn yield_now(self: Box<Ops>, mut cur_task: Box<Task>) {
// put the task back in TLS and then invoke the OS thread yield
cur_task.put_runtime(self);
Local::put(cur_task);
Thread::yield_now();
}
fn maybe_yield(self: Box<Ops>, mut cur_task: Box<Task>) {
// just put the task back in TLS, on OS threads we never need to
// opportunistically yield b/c the OS will do that for us (preemption)
cur_task.put_runtime(self);
Local::put(cur_task);
}
fn wrap(self: Box<Ops>) -> Box<Any+'static> {
self as Box<Any+'static>
}
fn stack_bounds(&self) -> (uint, uint) { self.stack_bounds }
fn stack_guard(&self) -> Option<uint> {
if self.stack_guard != 0 {
Some(self.stack_guard)
} else {
None
}
}
fn can_block(&self) -> bool { true }
// This function gets a little interesting. There are a few safety and
// ownership violations going on here, but this is all done in the name of
// shared state. Additionally, all of the violations are protected with a
// mutex, so in theory there are no races.
//
// The first thing we need to do is to get a pointer to the task's internal
// mutex. This address will not be changing (because the task is allocated
// on the heap). We must have this handle separately because the task will
// have its ownership transferred to the given closure. We're guaranteed,
// however, that this memory will remain valid because *this* is the current
// task's execution thread.
//
// The next weird part is where ownership of the task actually goes. We
// relinquish it to the `f` blocking function, but upon returning this
// function needs to replace the task back in TLS. There is no communication
// from the wakeup thread back to this thread about the task pointer, and
// there's really no need to. In order to get around this, we cast the task
// to a `uint` which is then used at the end of this function to cast back
// to a `Box<Task>` object. Naturally, this looks like it violates
// ownership semantics in that there may be two `Box<Task>` objects.
//
// The fun part is that the wakeup half of this implementation knows to
// "forget" the task on the other end. This means that the awakening half of
// things silently relinquishes ownership back to this thread, but not in a
// way that the compiler can understand. The task's memory is always valid
// for both tasks because these operations are all done inside of a mutex.
//
// You'll also find that if blocking fails (the `f` function hands the
// BlockedTask back to us), we will `mem::forget` the handles. The
// reasoning for this is the same logic as above in that the task silently
// transfers ownership via the `uint`, not through normal compiler
// semantics.
//
// On a mildly unrelated note, it should also be pointed out that OS
// condition variables are susceptible to spurious wakeups, which we need to
// be ready for. In order to accommodate for this fact, we have an extra
// `awoken` field which indicates whether we were actually woken up via some
// invocation of `reawaken`. This flag is only ever accessed inside the
// lock, so there's no need to make it atomic.
fn deschedule(mut self: Box<Ops>,
times: uint,
mut cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
let me = &mut *self as *mut Ops;
cur_task.put_runtime(self);
unsafe {
let cur_task_dupe = &mut *cur_task as *mut Task;
let task = BlockedTask::block(cur_task);
if times == 1 {
let guard = (*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
guard.wait();
}
}
Err(task) => { mem::forget(task.wake()); }
}
} else {
let iter = task.make_selectable(times);
let guard = (*me).lock.lock();
(*me).awoken = false;
// Apply the given closure to all of the "selectable tasks",
// bailing on the first one that produces an error. Note that
// care must be taken such that when an error is occurred, we
// may not own the task, so we may still have to wait for the
// task to become available. In other words, if task.wake()
// returns `None`, then someone else has ownership and we must
// wait for their signal.
match iter.map(f).filter_map(|a| a.err()).next() {
None => {}
Some(task) => {
match task.wake() {
Some(task) => {
mem::forget(task);
(*me).awoken = true;
}
None => {}
}
}
}
while !(*me).awoken {
guard.wait();
}
}
// re-acquire ownership of the task
cur_task = mem::transmute(cur_task_dupe);
}
// put the task back in TLS, and everything is as it once was.
Local::put(cur_task);
}
// See the comments on `deschedule` for why the task is forgotten here, and
// why it's valid to do so.
fn reawaken(mut self: Box<Ops>, mut to_wake: Box<Task>) {
unsafe {
let me = &mut *self as *mut Ops;
to_wake.put_runtime(self);
mem::forget(to_wake);
let guard = (*me).lock.lock();
(*me).awoken = true;
guard.signal();
}
}
fn spawn_sibling(self: Box<Ops>,
mut cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send) {
cur_task.put_runtime(self);
Local::put(cur_task);
NativeSpawner.spawn(opts, f);
}
}
#[cfg(test)]
mod tests {
use std::rt::local::Local;
use std::rt::task::{Task, TaskOpts};
use std::task;
use std::task::{TaskBuilder, Spawner};
use super::{Ops, NativeTaskBuilder, NativeSpawner};
#[test]
fn smoke() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(());
});
rx.recv();
}
#[test]
fn smoke_panic() {
let (tx, rx) = channel::<()>();
spawn(proc() {
let _tx = tx;
panic!()
});
assert_eq!(rx.recv_opt(), Err(()));
}
#[test]
fn smoke_opts() {
let mut opts = TaskOpts::new();
opts.name = Some("test".into_maybe_owned());
opts.stack_size = Some(20 * 4096);
let (tx, rx) = channel();
opts.on_exit = Some(proc(r) tx.send(r));
NativeSpawner.spawn(opts, proc() {});
assert!(rx.recv().is_ok());
}
#[test]
fn smoke_opts_panic() {
let mut opts = TaskOpts::new();
let (tx, rx) = channel();
opts.on_exit = Some(proc(r) tx.send(r));
NativeSpawner.spawn(opts, proc() { panic!() });
assert!(rx.recv().is_err());
}
#[test]
fn yield_test() {
let (tx, rx) = channel();
spawn(proc() {
for _ in range(0u, 10) { task::deschedule(); }
tx.send(());
});
rx.recv();
}
#[test]
fn spawn_children() {
let (tx1, rx) = channel();
spawn(proc() {
let (tx2, rx) = channel();
spawn(proc() {
let (tx3, rx) = channel();
spawn(proc() {
tx3.send(());
});
rx.recv();
tx2.send(());
});
rx.recv();
tx1.send(());
});
rx.recv();
}
#[test]
fn spawn_inherits() {
let (tx, rx) = channel();
TaskBuilder::new().spawner(NativeSpawner).spawn(proc() {
spawn(proc() {
let mut task: Box<Task> = Local::take();
match task.maybe_take_runtime::<Ops>() {
Some(ops) => {
task.put_runtime(ops);
}
None => panic!(),
}
Local::put(task);
tx.send(());
});
});
rx.recv();
}
#[test]
fn test_native_builder() {
let res = TaskBuilder::new().native().try(proc() {
"Success!".to_string()
});
assert_eq!(res.ok().unwrap(), "Success!".to_string());
}
}

View File

@ -33,7 +33,6 @@ extern crate core;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
#[cfg(test)] #[phase(plugin, link)] extern crate log;
#[cfg(test)] extern crate native;
use core::prelude::*;

View File

@ -198,10 +198,6 @@ pub fn phase_2_configure_and_expand(sess: &Session,
*sess.features.borrow_mut() = features;
});
let any_exe = sess.crate_types.borrow().iter().any(|ty| {
*ty == config::CrateTypeExecutable
});
// strip before expansion to allow macros to depend on
// configuration variables e.g/ in
//
@ -215,8 +211,7 @@ pub fn phase_2_configure_and_expand(sess: &Session,
krate = time(time_passes, "crate injection", krate, |krate|
syntax::std_inject::maybe_inject_crates_ref(krate,
sess.opts.alt_std_name.clone(),
any_exe));
sess.opts.alt_std_name.clone()));
let mut addl_plugins = Some(addl_plugins);
let Plugins { macros, registrars }

View File

@ -30,7 +30,6 @@ extern crate collections;
#[cfg(test)] extern crate "rustrt" as realrustrt;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
@ -39,11 +38,6 @@ pub use self::unwind::{begin_unwind, begin_unwind_fmt};
use core::prelude::*;
use alloc::boxed::Box;
use core::any::Any;
use task::{Task, BlockedTask, TaskOpts};
mod macros;
mod at_exit_imp;
@ -60,46 +54,11 @@ pub mod exclusive;
pub mod local;
pub mod local_data;
pub mod mutex;
pub mod rtio;
pub mod stack;
pub mod task;
pub mod thread;
pub mod unwind;
/// The interface to the current runtime.
///
/// This trait is used as the abstraction between 1:1 and M:N scheduling. The
/// two independent crates, libnative and libgreen, both have objects which
/// implement this trait. The goal of this trait is to encompass all the
/// fundamental differences in functionality between the 1:1 and M:N runtime
/// modes.
pub trait Runtime {
// Necessary scheduling functions, used for channels and blocking I/O
// (sometimes).
fn yield_now(self: Box<Self>, cur_task: Box<Task>);
fn maybe_yield(self: Box<Self>, cur_task: Box<Task>);
fn deschedule(self: Box<Self>,
times: uint,
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>);
fn reawaken(self: Box<Self>, to_wake: Box<Task>);
// Miscellaneous calls which are very different depending on what context
// you're in.
fn spawn_sibling(self: Box<Self>,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send);
/// The (low, high) edges of the current stack.
fn stack_bounds(&self) -> (uint, uint); // (lo, hi)
/// The last writable byte of the stack next to the guard page
fn stack_guard(&self) -> Option<uint>;
fn can_block(&self) -> bool;
// FIXME: This is a serious code smell and this should not exist at all.
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}
/// The default error code of the rust runtime if the main task panics instead
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: int = 101;

View File

@ -53,14 +53,14 @@ impl Local<local_ptr::Borrowed<Task>> for Task {
#[cfg(test)]
mod test {
use std::prelude::*;
use std::rt::thread::Thread;
use thread::Thread;
use super::*;
use task::Task;
#[test]
fn thread_local_task_smoke_test() {
Thread::start(proc() {
let task = box Task::new();
let task = box Task::new(None, None);
Local::put(task);
let task: Box<Task> = Local::take();
cleanup_task(task);
@ -70,11 +70,11 @@ mod test {
#[test]
fn thread_local_task_two_instances() {
Thread::start(proc() {
let task = box Task::new();
let task = box Task::new(None, None);
Local::put(task);
let task: Box<Task> = Local::take();
cleanup_task(task);
let task = box Task::new();
let task = box Task::new(None, None);
Local::put(task);
let task: Box<Task> = Local::take();
cleanup_task(task);
@ -84,7 +84,7 @@ mod test {
#[test]
fn borrow_smoke_test() {
Thread::start(proc() {
let task = box Task::new();
let task = box Task::new(None, None);
Local::put(task);
unsafe {
@ -98,7 +98,7 @@ mod test {
#[test]
fn borrow_with_return() {
Thread::start(proc() {
let task = box Task::new();
let task = box Task::new(None, None);
Local::put(task);
{
@ -113,7 +113,7 @@ mod test {
#[test]
fn try_take() {
Thread::start(proc() {
let task = box Task::new();
let task = box Task::new(None, None);
Local::put(task);
let t: Box<Task> = Local::try_take().unwrap();

View File

@ -33,7 +33,7 @@
//! # Example
//!
//! ```rust
//! use std::rt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
//! use rustrt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
//!
//! // Use a statically initialized mutex
//! static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
@ -108,7 +108,7 @@ impl StaticNativeMutex {
/// # Example
///
/// ```rust
/// use std::rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
/// use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
/// static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
/// unsafe {
/// let _guard = LOCK.lock();
@ -225,7 +225,7 @@ impl NativeMutex {
/// # Example
///
/// ```rust
/// use std::rt::mutex::NativeMutex;
/// use rustrt::mutex::NativeMutex;
/// unsafe {
/// let mut lock = NativeMutex::new();
///
@ -653,7 +653,7 @@ mod test {
use std::mem::drop;
use super::{StaticNativeMutex, NATIVE_MUTEX_INIT};
use std::rt::thread::Thread;
use thread::Thread;
#[test]
fn smoke_lock() {

View File

@ -1,45 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The EventLoop and internal synchronous I/O interface.
use core::prelude::*;
use alloc::boxed::Box;
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, arg: proc(): Send);
fn pausable_idle_callback(&mut self, Box<Callback + Send>)
-> Box<PausableIdleCallback + Send>;
fn remote_callback(&mut self, Box<Callback + Send>)
-> Box<RemoteCallback + Send>;
// last vestige of IoFactory
fn has_active_io(&self) -> bool;
}
pub trait Callback {
fn call(&mut self);
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the
/// callback is run is not guaranteed. All that is guaranteed is
/// that, after calling 'fire', the callback will be called at
/// least once, but multiple callbacks may be coalesced and
/// callbacks may be called more often requested. Destruction also
/// triggers the callback.
fn fire(&mut self);
}
pub trait PausableIdleCallback {
fn pause(&mut self);
fn resume(&mut self);
}

View File

@ -65,14 +65,7 @@ pub unsafe fn report() {
#[cfg(any(windows, target_os = "linux", target_os = "macos"))]
unsafe fn get_task_guard_page() -> Option<uint> {
let task: Option<*mut Task> = Local::try_unsafe_borrow();
task.map(|task| {
let runtime = (*task).take_runtime();
let guard = runtime.stack_guard();
(*task).put_runtime(runtime);
guard.unwrap_or(0)
})
task.map(|task| (&*task).stack_guard().unwrap_or(0))
}
#[cfg(windows)]

View File

@ -16,7 +16,7 @@ pub use self::BlockedTask::*;
use self::TaskState::*;
use alloc::arc::Arc;
use alloc::boxed::{BoxAny, Box};
use alloc::boxed::Box;
use core::any::Any;
use core::atomic::{AtomicUint, SeqCst};
use core::iter::Take;
@ -24,76 +24,21 @@ use core::kinds::marker;
use core::mem;
use core::prelude::{Clone, Drop, Err, Iterator, None, Ok, Option, Send, Some};
use core::prelude::{drop};
use core::raw;
use bookkeeping;
use mutex::NativeMutex;
use local_data;
use Runtime;
use local::Local;
use thread::{mod, Thread};
use stack;
use unwind;
use unwind::Unwinder;
use collections::str::SendStr;
/// State associated with Rust tasks.
///
/// Rust tasks are primarily built with two separate components. One is this
/// structure which handles standard services such as TLD, unwinding support,
/// naming of a task, etc. The second component is the runtime of this task, a
/// `Runtime` trait object.
///
/// The `Runtime` object instructs this task how it can perform critical
/// operations such as blocking, rescheduling, I/O constructors, etc. The two
/// halves are separately owned, but one is often found contained in the other.
/// A task's runtime can be reflected upon with the `maybe_take_runtime` method,
/// and otherwise its ownership is managed with `take_runtime` and
/// `put_runtime`.
///
/// In general, this structure should not be used. This is meant to be an
/// unstable internal detail of the runtime itself. From time-to-time, however,
/// it is useful to manage tasks directly. An example of this would be
/// interoperating with the Rust runtime from FFI callbacks or such. For this
/// reason, there are two methods of note with the `Task` structure.
///
/// * `run` - This function will execute a closure inside the context of a task.
/// Failure is caught and handled via the task's on_exit callback. If
/// this panics, the task is still returned, but it can no longer be
/// used, it is poisoned.
///
/// * `destroy` - This is a required function to call to destroy a task. If a
/// task falls out of scope without calling `destroy`, its
/// destructor bomb will go off, aborting the process.
///
/// With these two methods, tasks can be re-used to execute code inside of its
/// context while having a point in the future where destruction is allowed.
/// More information can be found on these specific methods.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a task using a native runtime
/// let task = native::task::new((0, uint::MAX), 0);
///
/// // Run some code, catching any possible panic
/// let task = task.run(|| {
/// // Run some code inside this task
/// println!("Hello with a native runtime!");
/// });
///
/// // Run some code again, catching the panic
/// let task = task.run(|| {
/// panic!("oh no, what to do!");
/// });
///
/// // Now that the task has panicked, it can never be used again
/// assert!(task.is_destroyed());
///
/// // Deallocate the resources associated with this task
/// task.destroy();
/// # }
/// ```
/// This structure is currently undergoing major changes, and is
/// likely to be move/be merged with a `Thread` structure.
pub struct Task {
pub storage: LocalStorage,
pub unwinder: Unwinder,
@ -101,7 +46,15 @@ pub struct Task {
pub name: Option<SendStr>,
state: TaskState,
imp: Option<Box<Runtime + Send + 'static>>,
lock: NativeMutex, // native synchronization
awoken: bool, // used to prevent spurious wakeups
// This field holds the known bounds of the stack in (lo, hi) form. Not all
// native tasks necessarily know their precise bounds, hence this is
// optional.
stack_bounds: (uint, uint),
stack_guard: uint
}
// Once a task has entered the `Armed` state it must be destroyed via `drop`,
@ -152,23 +105,60 @@ pub struct BlockedTasks {
impl Task {
/// Creates a new uninitialized task.
///
/// This method cannot be used to immediately invoke `run` because the task
/// itself will likely require a runtime to be inserted via `put_runtime`.
///
/// Note that you likely don't want to call this function, but rather the
/// task creation functions through libnative or libgreen.
pub fn new() -> Task {
pub fn new(stack_bounds: Option<(uint, uint)>, stack_guard: Option<uint>) -> Task {
Task {
storage: LocalStorage(None),
unwinder: Unwinder::new(),
death: Death::new(),
state: New,
name: None,
imp: None,
lock: unsafe { NativeMutex::new() },
awoken: false,
// these *should* get overwritten
stack_bounds: stack_bounds.unwrap_or((0, 0)),
stack_guard: stack_guard.unwrap_or(0)
}
}
pub fn spawn(opts: TaskOpts, f: proc():Send) {
let TaskOpts { name, stack_size, on_exit } = opts;
let mut task = box Task::new(None, None);
task.name = name;
task.death.on_exit = on_exit;
// FIXME: change this back after moving rustrt into std
// let stack = stack_size.unwrap_or(rt::min_stack());
let stack = stack_size.unwrap_or(2 * 1024 * 1024);
// Note that this increment must happen *before* the spawn in order to
// guarantee that if this task exits it will always end up waiting for
// the spawned task to exit.
let token = bookkeeping::increment();
// Spawning a new OS thread guarantees that __morestack will never get
// triggered, but we must manually set up the actual stack bounds once
// this function starts executing. This raises the lower limit by a bit
// because by the time that this function is executing we've already
// consumed at least a little bit of stack (we don't know the exact byte
// address at which our stack started).
Thread::spawn_stack(stack, proc() {
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *const int;
let my_stack = addr as uint;
unsafe {
stack::record_os_managed_stack_bounds(my_stack - stack + 1024,
my_stack);
}
task.stack_guard = thread::current_guard_page();
task.stack_bounds = (my_stack - stack + 1024, my_stack);
let mut f = Some(f);
drop(task.run(|| { f.take().unwrap()() }).destroy());
drop(token);
})
}
/// Consumes ownership of a task, runs some code, and returns the task back.
///
/// This function can be used as an emulated "try/catch" to interoperate
@ -190,23 +180,6 @@ impl Task {
///
/// It is invalid to call this function with a task that has been previously
/// destroyed via a failed call to `run`.
///
/// # Example
///
/// ```no_run
/// extern crate native;
/// use std::uint;
/// # fn main() {
///
/// // Create a new native task
/// let task = native::task::new((0, uint::MAX), 0);
///
/// // Run some code once and then destroy this task
/// task.run(|| {
/// println!("Hello with a native runtime!");
/// }).destroy();
/// # }
/// ```
pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
@ -329,111 +302,136 @@ impl Task {
/// Queries whether this can be destroyed or not.
pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
/// Inserts a runtime object into this task, transferring ownership to the
/// task. It is illegal to replace a previous runtime object in this task
/// with this argument.
pub fn put_runtime(&mut self, ops: Box<Runtime + Send + 'static>) {
assert!(self.imp.is_none());
self.imp = Some(ops);
}
/// Removes the runtime from this task, transferring ownership to the
/// caller.
pub fn take_runtime(&mut self) -> Box<Runtime + Send + 'static> {
assert!(self.imp.is_some());
self.imp.take().unwrap()
}
/// Attempts to extract the runtime as a specific type. If the runtime does
/// not have the provided type, then the runtime is not removed. If the
/// runtime does have the specified type, then it is removed and returned
/// (transfer of ownership).
///
/// It is recommended to only use this method when *absolutely necessary*.
/// This function may not be available in the future.
pub fn maybe_take_runtime<T: 'static>(&mut self) -> Option<Box<T>> {
// This is a terrible, terrible function. The general idea here is to
// take the runtime, cast it to Box<Any>, check if it has the right
// type, and then re-cast it back if necessary. The method of doing
// this is pretty sketchy and involves shuffling vtables of trait
// objects around, but it gets the job done.
//
// FIXME: This function is a serious code smell and should be avoided at
// all costs. I have yet to think of a method to avoid this
// function, and I would be saddened if more usage of the function
// crops up.
unsafe {
let imp = self.imp.take().unwrap();
let vtable = mem::transmute::<_, &raw::TraitObject>(&imp).vtable;
match imp.wrap().downcast::<T>() {
Ok(t) => Some(t),
Err(t) => {
let data = mem::transmute::<_, raw::TraitObject>(t).data;
let obj: Box<Runtime + Send + 'static> =
mem::transmute(raw::TraitObject {
vtable: vtable,
data: data,
});
self.put_runtime(obj);
None
}
}
}
}
/// Spawns a sibling to this task. The newly spawned task is configured with
/// the `opts` structure and will run `f` as the body of its code.
pub fn spawn_sibling(mut self: Box<Task>,
opts: TaskOpts,
f: proc(): Send) {
let ops = self.imp.take().unwrap();
ops.spawn_sibling(self, opts, f)
}
/// Deschedules the current task, invoking `f` `amt` times. It is not
/// recommended to use this function directly, but rather communication
/// primitives in `std::comm` should be used.
//
// This function gets a little interesting. There are a few safety and
// ownership violations going on here, but this is all done in the name of
// shared state. Additionally, all of the violations are protected with a
// mutex, so in theory there are no races.
//
// The first thing we need to do is to get a pointer to the task's internal
// mutex. This address will not be changing (because the task is allocated
// on the heap). We must have this handle separately because the task will
// have its ownership transferred to the given closure. We're guaranteed,
// however, that this memory will remain valid because *this* is the current
// task's execution thread.
//
// The next weird part is where ownership of the task actually goes. We
// relinquish it to the `f` blocking function, but upon returning this
// function needs to replace the task back in TLS. There is no communication
// from the wakeup thread back to this thread about the task pointer, and
// there's really no need to. In order to get around this, we cast the task
// to a `uint` which is then used at the end of this function to cast back
// to a `Box<Task>` object. Naturally, this looks like it violates
// ownership semantics in that there may be two `Box<Task>` objects.
//
// The fun part is that the wakeup half of this implementation knows to
// "forget" the task on the other end. This means that the awakening half of
// things silently relinquishes ownership back to this thread, but not in a
// way that the compiler can understand. The task's memory is always valid
// for both tasks because these operations are all done inside of a mutex.
//
// You'll also find that if blocking fails (the `f` function hands the
// BlockedTask back to us), we will `mem::forget` the handles. The
// reasoning for this is the same logic as above in that the task silently
// transfers ownership via the `uint`, not through normal compiler
// semantics.
//
// On a mildly unrelated note, it should also be pointed out that OS
// condition variables are susceptible to spurious wakeups, which we need to
// be ready for. In order to accommodate for this fact, we have an extra
// `awoken` field which indicates whether we were actually woken up via some
// invocation of `reawaken`. This flag is only ever accessed inside the
// lock, so there's no need to make it atomic.
pub fn deschedule(mut self: Box<Task>,
amt: uint,
times: uint,
f: |BlockedTask| -> ::core::result::Result<(), BlockedTask>) {
let ops = self.imp.take().unwrap();
ops.deschedule(amt, self, f)
unsafe {
let me = &mut *self as *mut Task;
let task = BlockedTask::block(self);
if times == 1 {
let guard = (*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
guard.wait();
}
}
Err(task) => { mem::forget(task.wake()); }
}
} else {
let iter = task.make_selectable(times);
let guard = (*me).lock.lock();
(*me).awoken = false;
// Apply the given closure to all of the "selectable tasks",
// bailing on the first one that produces an error. Note that
// care must be taken such that when an error is occurred, we
// may not own the task, so we may still have to wait for the
// task to become available. In other words, if task.wake()
// returns `None`, then someone else has ownership and we must
// wait for their signal.
match iter.map(f).filter_map(|a| a.err()).next() {
None => {}
Some(task) => {
match task.wake() {
Some(task) => {
mem::forget(task);
(*me).awoken = true;
}
None => {}
}
}
}
while !(*me).awoken {
guard.wait();
}
}
// put the task back in TLS, and everything is as it once was.
Local::put(mem::transmute(me));
}
}
/// Wakes up a previously blocked task, optionally specifying whether the
/// current task can accept a change in scheduling. This function can only
/// be called on tasks that were previously blocked in `deschedule`.
/// Wakes up a previously blocked task. This function can only be
/// called on tasks that were previously blocked in `deschedule`.
//
// See the comments on `deschedule` for why the task is forgotten here, and
// why it's valid to do so.
pub fn reawaken(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.reawaken(self);
unsafe {
let me = &mut *self as *mut Task;
mem::forget(self);
let guard = (*me).lock.lock();
(*me).awoken = true;
guard.signal();
}
}
/// Yields control of this task to another task. This function will
/// eventually return, but possibly not immediately. This is used as an
/// opportunity to allow other tasks a chance to run.
pub fn yield_now(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.yield_now(self);
}
/// Similar to `yield_now`, except that this function may immediately return
/// without yielding (depending on what the runtime decides to do).
pub fn maybe_yield(mut self: Box<Task>) {
let ops = self.imp.take().unwrap();
ops.maybe_yield(self);
pub fn yield_now() {
Thread::yield_now();
}
/// Returns the stack bounds for this task in (lo, hi) format. The stack
/// bounds may not be known for all tasks, so the return value may be
/// `None`.
pub fn stack_bounds(&self) -> (uint, uint) {
self.imp.as_ref().unwrap().stack_bounds()
self.stack_bounds
}
/// Returns whether it is legal for this task to block the OS thread that it
/// is running on.
pub fn can_block(&self) -> bool {
self.imp.as_ref().unwrap().can_block()
/// Returns the stack guard for this task, if known.
pub fn stack_guard(&self) -> Option<uint> {
if self.stack_guard != 0 {
Some(self.stack_guard)
} else {
None
}
}
/// Consume this task, flagging it as a candidate for destruction.
@ -549,6 +547,7 @@ mod test {
use super::*;
use std::prelude::*;
use std::task;
use unwind;
#[test]
fn tls() {
@ -594,20 +593,20 @@ mod test {
#[test]
#[should_fail]
fn test_begin_unwind() {
use std::rt::unwind::begin_unwind;
use unwind::begin_unwind;
begin_unwind("cause", &(file!(), line!()))
}
#[test]
fn drop_new_task_ok() {
drop(Task::new());
drop(Task::new(None, None));
}
// Task blocking tests
#[test]
fn block_and_wake() {
let task = box Task::new();
let task = box Task::new(None, None);
let task = BlockedTask::block(task).wake().unwrap();
task.drop();
}

View File

@ -229,7 +229,7 @@ pub mod dl {
}
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, String> {
use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire

View File

@ -18,7 +18,7 @@ use kinds::Send;
use option::{Some, None};
use result::Ok;
use rt::backtrace;
use rt::{Stderr, Stdio};
use rustrt::{Stderr, Stdio};
use rustrt::local::Local;
use rustrt::task::Task;
use str::Str;

View File

@ -45,7 +45,7 @@ impl PipeStream {
///
/// # Example
///
/// ```rust
/// ```{rust,no_run}
/// # #![allow(unused_must_use)]
/// extern crate libc;
///

View File

@ -740,8 +740,6 @@ impl Drop for Process {
mod tests {
#![allow(unused_imports)]
extern crate native;
use super::*;
use prelude::*;
use io::timer::*;

View File

@ -40,9 +40,9 @@ use option::{Option, Some, None};
use boxed::Box;
use sys::{fs, tty};
use result::{Ok, Err};
use rt;
use rt::local::Local;
use rt::task::Task;
use rustrt;
use rustrt::local::Local;
use rustrt::task::Task;
use slice::SlicePrelude;
use str::StrPrelude;
use uint;
@ -207,7 +207,7 @@ fn with_task_stdout(f: |&mut Writer| -> IoResult<()>) {
local_stdout.replace(Some(my_stdout));
result
} else {
let mut io = rt::Stdout;
let mut io = rustrt::Stdout;
f(&mut io as &mut Writer)
};
match result {

View File

@ -117,7 +117,6 @@
#![reexport_test_harness_main = "test_main"]
#[cfg(test)] extern crate green;
#[cfg(test)] #[phase(plugin, link)] extern crate log;
extern crate alloc;
@ -163,7 +162,6 @@ pub use core::result;
pub use core::option;
pub use alloc::boxed;
pub use alloc::rc;
pub use core_collections::slice;
@ -248,8 +246,6 @@ pub mod fmt;
#[path = "sys/common/mod.rs"] mod sys_common;
// FIXME #7809: This shouldn't be pub, and it should be reexported under 'unstable'
// but name resolution doesn't work without it being pub.
pub mod rt;
mod failure;

View File

@ -208,7 +208,7 @@ Accessing environment variables is not generally threadsafe.
Serialize access through a global lock.
*/
fn with_env_lock<T>(f: || -> T) -> T {
use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
@ -1039,9 +1039,9 @@ fn real_args_as_bytes() -> Vec<Vec<u8>> {
target_os = "freebsd",
target_os = "dragonfly"))]
fn real_args_as_bytes() -> Vec<Vec<u8>> {
use rt;
use rustrt;
match rt::args::clone() {
match rustrt::args::clone() {
Some(args) => args,
None => panic!("process arguments not initialized")
}

View File

@ -238,7 +238,7 @@ mod imp {
use mem;
use option::{Some, None, Option};
use result::{Ok, Err};
use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
/// As always - iOS on arm uses SjLj exceptions and
/// _Unwind_Backtrace is even not available there. Still,
@ -667,7 +667,7 @@ mod imp {
use option::{Some, None};
use path::Path;
use result::{Ok, Err};
use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
use slice::SlicePrelude;
use str::StrPrelude;
use dynamic_lib::DynamicLibrary;

View File

@ -54,8 +54,11 @@ Several modules in `core` are clients of `rt`:
// FIXME: this should not be here.
#![allow(missing_docs)]
#![allow(dead_code)]
use failure;
use rustrt;
use os;
// Reexport some of our utilities which are expected by other crates.
pub use self::util::{default_sched_threads, min_stack, running_on_valgrind};
@ -63,9 +66,7 @@ pub use self::util::{default_sched_threads, min_stack, running_on_valgrind};
// Reexport functionality from librustrt and other crates underneath the
// standard library which work together to create the entire runtime.
pub use alloc::heap;
pub use rustrt::{task, local, mutex, exclusive, stack, args, rtio, thread};
pub use rustrt::{Stdio, Stdout, Stderr, begin_unwind, begin_unwind_fmt};
pub use rustrt::{bookkeeping, at_exit, unwind, DEFAULT_ERROR_CODE, Runtime};
pub use rustrt::{begin_unwind, begin_unwind_fmt, at_exit};
// Simple backtrace functionality (to print on panic)
pub mod backtrace;
@ -81,7 +82,82 @@ mod util;
#[allow(experimental)]
pub fn init(argc: int, argv: *const *const u8) {
rustrt::init(argc, argv);
unsafe { unwind::register(failure::on_fail); }
unsafe { rustrt::unwind::register(failure::on_fail); }
}
#[cfg(any(windows, android))]
static OS_DEFAULT_STACK_ESTIMATE: uint = 1 << 20;
#[cfg(all(unix, not(android)))]
static OS_DEFAULT_STACK_ESTIMATE: uint = 2 * (1 << 20);
#[cfg(not(test))]
#[lang = "start"]
fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int {
use mem;
start(argc, argv, proc() {
let main: extern "Rust" fn() = unsafe { mem::transmute(main) };
main();
})
}
/// Executes the given procedure after initializing the runtime with the given
/// argc/argv.
///
/// This procedure is guaranteed to run on the thread calling this function, but
/// the stack bounds for this rust task will *not* be set. Care must be taken
/// for this function to not overflow its stack.
///
/// This function will only return once *all* native threads in the system have
/// exited.
pub fn start(argc: int, argv: *const *const u8, main: proc()) -> int {
use prelude::*;
use rt;
use rustrt::task::Task;
use str;
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *const int;
let my_stack_top = addr as uint;
// FIXME #11359 we just assume that this thread has a stack of a
// certain size, and estimate that there's at most 20KB of stack
// frames above our current position.
let my_stack_bottom = my_stack_top + 20000 - OS_DEFAULT_STACK_ESTIMATE;
// When using libgreen, one of the first things that we do is to turn off
// the SIGPIPE signal (set it to ignore). By default, some platforms will
// send a *signal* when a EPIPE error would otherwise be delivered. This
// runtime doesn't install a SIGPIPE handler, causing it to kill the
// program, which isn't exactly what we want!
//
// Hence, we set SIGPIPE to ignore when the program starts up in order to
// prevent this problem.
#[cfg(windows)] fn ignore_sigpipe() {}
#[cfg(unix)] fn ignore_sigpipe() {
use libc;
use libc::funcs::posix01::signal::signal;
unsafe {
assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != -1);
}
}
ignore_sigpipe();
init(argc, argv);
let mut exit_code = None;
let mut main = Some(main);
let mut task = box Task::new(Some((my_stack_bottom, my_stack_top)),
Some(rustrt::thread::main_guard_page()));
task.name = Some(str::Slice("<main>"));
drop(task.run(|| {
unsafe {
rustrt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
}
(main.take().unwrap())();
exit_code = Some(os::get_exit_status());
}).destroy());
unsafe { rt::cleanup(); }
// If the exit code wasn't set, then the task block must have panicked.
return exit_code.unwrap_or(rustrt::DEFAULT_ERROR_CODE);
}
/// One-time runtime cleanup.

View File

@ -21,9 +21,9 @@
//! time.
use mem;
use rt::bookkeeping;
use rt::mutex::StaticNativeMutex;
use rt;
use rustrt::bookkeeping;
use rustrt::mutex::StaticNativeMutex;
use rustrt;
use cell::UnsafeCell;
use sys::helper_signal;
use prelude::*;
@ -83,7 +83,7 @@ impl<M: Send> Helper<M> {
self.lock.lock().signal()
});
rt::at_exit(proc() { self.shutdown() });
rustrt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
}
}

View File

@ -16,7 +16,7 @@ use libc::{mod, c_char, c_int};
use mem;
use num::Int;
use ptr::{mod, null, null_mut};
use rt::mutex;
use rustrt::mutex;
use io::net::ip::{SocketAddr, IpAddr, Ipv4Addr, Ipv6Addr};
use io::net::addrinfo;
use io::{IoResult, IoError};

View File

@ -25,7 +25,7 @@ use sys_common::mkerr_libc;
macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => (
static $name: Helper<$m> = Helper {
lock: ::rt::mutex::NATIVE_MUTEX_INIT,
lock: ::rustrt::mutex::NATIVE_MUTEX_INIT,
chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> },
signal: ::cell::UnsafeCell { value: 0 },
initialized: ::cell::UnsafeCell { value: false },

View File

@ -12,7 +12,7 @@ use alloc::arc::Arc;
use libc;
use c_str::CString;
use mem;
use rt::mutex;
use rustrt::mutex;
use sync::atomic;
use io::{mod, IoResult, IoError};
use prelude::*;

View File

@ -26,7 +26,7 @@ use sync::{Once, ONCE_INIT};
macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => (
static $name: Helper<$m> = Helper {
lock: ::rt::mutex::NATIVE_MUTEX_INIT,
lock: ::rustrt::mutex::NATIVE_MUTEX_INIT,
chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> },
signal: ::cell::UnsafeCell { value: 0 },
initialized: ::cell::UnsafeCell { value: false },

View File

@ -90,7 +90,7 @@ use c_str::CString;
use mem;
use ptr;
use sync::atomic;
use rt::mutex;
use rustrt::mutex;
use io::{mod, IoError, IoResult};
use prelude::*;

View File

@ -11,11 +11,7 @@
//! Task creation
//!
//! An executing Rust program consists of a collection of tasks, each
//! with their own stack and local state. A Rust task is typically
//! backed by an operating system thread, making tasks 'just threads',
//! but may also be implemented via other strategies as well
//! (e.g. Rust comes with the [`green`](../../green/index.html)
//! scheduling crate for creating tasks backed by green threads).
//! with their own stack and local state.
//!
//! Tasks generally have their memory *isolated* from each other by
//! virtue of Rust's owned types (which of course may only be owned by
@ -36,13 +32,6 @@
//! the main task panics the application will exit with a non-zero
//! exit code.
//!
//! # Basic task scheduling
//!
//! By default, every task is created with the same "flavor" as the calling task.
//! This flavor refers to the scheduling mode, with two possibilities currently
//! being 1:1 and M:N modes. Green (M:N) tasks are cooperatively scheduled and
//! native (1:1) tasks are scheduled by the OS kernel.
//!
//! ## Example
//!
//! ```rust
@ -50,46 +39,6 @@
//! println!("Hello, World!");
//! })
//! ```
//!
//! # Advanced task scheduling
//!
//! Task spawning can also be configured to use a particular scheduler, to
//! redirect the new task's output, or to yield a `future` representing the
//! task's final result. The configuration is established using the
//! `TaskBuilder` API:
//!
//! ## Example
//!
//! ```rust
//! extern crate green;
//! extern crate native;
//!
//! use std::task::TaskBuilder;
//! use green::{SchedPool, PoolConfig, GreenTaskBuilder};
//! use native::NativeTaskBuilder;
//!
//! # fn main() {
//! // Create a green scheduler pool with the default configuration
//! let mut pool = SchedPool::new(PoolConfig::new());
//!
//! // Spawn a task in the green pool
//! let mut fut_green = TaskBuilder::new().green(&mut pool).try_future(proc() {
//! /* ... */
//! });
//!
//! // Spawn a native task
//! let mut fut_native = TaskBuilder::new().native().try_future(proc() {
//! /* ... */
//! });
//!
//! // Wait for both tasks to finish, recording their outcome
//! let res_green = fut_green.unwrap();
//! let res_native = fut_native.unwrap();
//!
//! // Shut down the green scheduler pool
//! pool.shutdown();
//! # }
//! ```
#![unstable = "The task spawning model will be changed as part of runtime reform, and the module \
will likely be renamed from `task` to `thread`."]
@ -101,33 +50,13 @@ use kinds::{Send, marker};
use option::{None, Some, Option};
use boxed::Box;
use result::Result;
use rt::local::Local;
use rt::task;
use rt::task::Task;
use rustrt::local::Local;
use rustrt::task;
use rustrt::task::Task;
use str::{Str, SendStr, IntoMaybeOwned};
use string::{String, ToString};
use sync::Future;
/// A means of spawning a task
pub trait Spawner {
/// Spawn a task, given low-level task options.
fn spawn(self, opts: task::TaskOpts, f: proc():Send);
}
/// The default task spawner, which spawns siblings to the current task.
pub struct SiblingSpawner;
impl Spawner for SiblingSpawner {
fn spawn(self, opts: task::TaskOpts, f: proc():Send) {
// bind tb to provide type annotation
let tb: Option<Box<Task>> = Local::try_take();
match tb {
Some(t) => t.spawn_sibling(opts, f),
None => panic!("need a local task to spawn a sibling task"),
};
}
}
/// The task builder type.
///
/// Provides detailed control over the properties and behavior of new tasks.
@ -139,7 +68,7 @@ impl Spawner for SiblingSpawner {
// when you try to reuse the builder to spawn a new task. We'll just
// sidestep that whole issue by making builders uncopyable and making
// the run function move them in.
pub struct TaskBuilder<S = SiblingSpawner> {
pub struct TaskBuilder {
// A name for the task-to-be, for identification in panic messages
name: Option<SendStr>,
// The size of the stack for the spawned task
@ -148,88 +77,60 @@ pub struct TaskBuilder<S = SiblingSpawner> {
stdout: Option<Box<Writer + Send>>,
// Task-local stderr
stderr: Option<Box<Writer + Send>>,
// The mechanics of actually spawning the task (i.e.: green or native)
spawner: S,
// Optionally wrap the eventual task body
gen_body: Option<proc(v: proc():Send):Send -> proc():Send>,
nocopy: marker::NoCopy,
}
impl TaskBuilder<SiblingSpawner> {
impl TaskBuilder {
/// Generate the base configuration for spawning a task, off of which more
/// configuration methods can be chained.
pub fn new() -> TaskBuilder<SiblingSpawner> {
pub fn new() -> TaskBuilder {
TaskBuilder {
name: None,
stack_size: None,
stdout: None,
stderr: None,
spawner: SiblingSpawner,
gen_body: None,
nocopy: marker::NoCopy,
}
}
}
impl<S: Spawner> TaskBuilder<S> {
impl TaskBuilder {
/// Name the task-to-be. Currently the name is used for identification
/// only in panic messages.
#[unstable = "IntoMaybeOwned will probably change."]
pub fn named<T: IntoMaybeOwned<'static>>(mut self, name: T) -> TaskBuilder<S> {
pub fn named<T: IntoMaybeOwned<'static>>(mut self, name: T) -> TaskBuilder {
self.name = Some(name.into_maybe_owned());
self
}
/// Set the size of the stack for the new task.
pub fn stack_size(mut self, size: uint) -> TaskBuilder<S> {
pub fn stack_size(mut self, size: uint) -> TaskBuilder {
self.stack_size = Some(size);
self
}
/// Redirect task-local stdout.
#[experimental = "May not want to make stdio overridable here."]
pub fn stdout(mut self, stdout: Box<Writer + Send>) -> TaskBuilder<S> {
pub fn stdout(mut self, stdout: Box<Writer + Send>) -> TaskBuilder {
self.stdout = Some(stdout);
self
}
/// Redirect task-local stderr.
#[experimental = "May not want to make stdio overridable here."]
pub fn stderr(mut self, stderr: Box<Writer + Send>) -> TaskBuilder<S> {
pub fn stderr(mut self, stderr: Box<Writer + Send>) -> TaskBuilder {
self.stderr = Some(stderr);
self
}
/// Set the spawning mechanism for the task.
///
/// The `TaskBuilder` API configures a task to be spawned, but defers to the
/// "spawner" to actually create and spawn the task. The `spawner` method
/// should not be called directly by `TaskBuiler` clients. It is intended
/// for use by downstream crates (like `native` and `green`) that implement
/// tasks. These downstream crates then add extension methods to the
/// builder, like `.native()` and `.green(pool)`, that actually set the
/// spawner.
pub fn spawner<T: Spawner>(self, spawner: T) -> TaskBuilder<T> {
// repackage the entire TaskBuilder since its type is changing.
let TaskBuilder {
name, stack_size, stdout, stderr, spawner: _, gen_body, nocopy
} = self;
TaskBuilder {
name: name,
stack_size: stack_size,
stdout: stdout,
stderr: stderr,
spawner: spawner,
gen_body: gen_body,
nocopy: nocopy,
}
}
// Where spawning actually happens (whether yielding a future or not)
fn spawn_internal(self, f: proc():Send,
on_exit: Option<proc(Result<(), Box<Any + Send>>):Send>) {
let TaskBuilder {
name, stack_size, stdout, stderr, spawner, mut gen_body, nocopy: _
name, stack_size, stdout, stderr, mut gen_body, nocopy: _
} = self;
let f = match gen_body.take() {
Some(gen) => gen(f),
@ -241,13 +142,13 @@ impl<S: Spawner> TaskBuilder<S> {
stack_size: stack_size,
};
if stdout.is_some() || stderr.is_some() {
spawner.spawn(opts, proc() {
Task::spawn(opts, proc() {
let _ = stdout.map(stdio::set_stdout);
let _ = stderr.map(stdio::set_stderr);
f();
})
} else {
spawner.spawn(opts, f)
Task::spawn(opts, f)
}
}
@ -336,7 +237,7 @@ pub fn try_future<T:Send>(f: proc():Send -> T) -> Future<Result<T, Box<Any + Sen
/// Read the name of the current task.
#[stable]
pub fn name() -> Option<String> {
use rt::task::Task;
use rustrt::task::Task;
let task = Local::borrow(None::<Task>);
match task.name {
@ -348,18 +249,15 @@ pub fn name() -> Option<String> {
/// Yield control to the task scheduler.
#[unstable = "Name will change."]
pub fn deschedule() {
use rt::local::Local;
// FIXME(#7544): Optimize this, since we know we won't block.
let task: Box<Task> = Local::take();
task.yield_now();
use rustrt::task::Task;
Task::yield_now();
}
/// True if the running task is currently panicking (e.g. will return `true` inside a
/// destructor that is run while unwinding the stack after a call to `panic!()`).
#[unstable = "May move to a different module."]
pub fn failing() -> bool {
use rt::task::Task;
use rustrt::task::Task;
Local::borrow(None::<Task>).unwinder.unwinding()
}

View File

@ -42,7 +42,6 @@
//! ```
//! use std::sync::Arc;
//! use std::sync::atomic::{AtomicUint, SeqCst};
//! use std::task::deschedule;
//!
//! fn main() {
//! let spinlock = Arc::new(AtomicUint::new(1));
@ -53,13 +52,7 @@
//! });
//!
//! // Wait for the other task to release the lock
//! while spinlock.load(SeqCst) != 0 {
//! // Since tasks may not be preemptive (if they are green threads)
//! // yield to the scheduler to let the other task run. Low level
//! // concurrent code needs to take into account Rust's two threading
//! // models.
//! deschedule();
//! }
//! while spinlock.load(SeqCst) != 0 {}
//! }
//! ```
//!

View File

@ -65,10 +65,6 @@
//! the `try_send` method on a `SyncSender`, but no other operations are
//! guaranteed to be safe.
//!
//! Additionally, channels can interoperate between runtimes. If one task in a
//! program is running on libnative and another is running on libgreen, they can
//! still communicate with one another using channels.
//!
//! # Example
//!
//! Simple usage:
@ -328,13 +324,10 @@ pub use self::TrySendError::*;
use self::Flavor::*;
use alloc::arc::Arc;
use alloc::boxed::Box;
use core::cell::Cell;
use core::kinds::marker;
use core::mem;
use core::cell::UnsafeCell;
use rustrt::local::Local;
use rustrt::task::{Task, BlockedTask};
use rustrt::task::BlockedTask;
pub use comm::select::{Select, Handle};
@ -343,23 +336,16 @@ macro_rules! test (
mod $name {
#![allow(unused_imports)]
extern crate rustrt;
use std::prelude::*;
use native;
use comm::*;
use super::*;
use super::super::*;
use std::task;
fn f() $b
$(#[$a])* #[test] fn uv() { f() }
$(#[$a])* #[test] fn native() {
use native;
let (tx, rx) = channel();
spawn(proc() { tx.send(f()) });
rx.recv();
}
$(#[$a])* #[test] fn f() { $b }
}
)
)
@ -370,16 +356,11 @@ mod shared;
mod stream;
mod sync;
// Use a power of 2 to allow LLVM to optimize to something that's not a
// division, this is hit pretty regularly.
static RESCHED_FREQ: int = 256;
/// The receiving-half of Rust's channel type. This half can only be owned by
/// one task
#[unstable]
pub struct Receiver<T> {
inner: UnsafeCell<Flavor<T>>,
receives: Cell<uint>,
// can't share in an arc
_marker: marker::NoSync,
}
@ -397,7 +378,6 @@ pub struct Messages<'a, T:'a> {
#[unstable]
pub struct Sender<T> {
inner: UnsafeCell<Flavor<T>>,
sends: Cell<uint>,
// can't share in an arc
_marker: marker::NoSync,
}
@ -544,7 +524,6 @@ impl<T: Send> Sender<T> {
fn new(inner: Flavor<T>) -> Sender<T> {
Sender {
inner: UnsafeCell::new(inner),
sends: Cell::new(0),
_marker: marker::NoSync,
}
}
@ -608,21 +587,6 @@ impl<T: Send> Sender<T> {
/// ```
#[unstable = "this function may be renamed to send() in the future"]
pub fn send_opt(&self, t: T) -> Result<(), T> {
// In order to prevent starvation of other tasks in situations where
// a task sends repeatedly without ever receiving, we occasionally
// yield instead of doing a send immediately.
//
// Don't unconditionally attempt to yield because the TLS overhead can
// be a bit much, and also use `try_take` instead of `take` because
// there's no reason that this send shouldn't be usable off the
// runtime.
let cnt = self.sends.get() + 1;
self.sends.set(cnt);
if cnt % (RESCHED_FREQ as uint) == 0 {
let task: Option<Box<Task>> = Local::try_take();
task.map(|t| t.maybe_yield());
}
let (new_inner, ret) = match *unsafe { self.inner() } {
Oneshot(ref p) => {
unsafe {
@ -809,7 +773,7 @@ impl<T: Send> Drop for SyncSender<T> {
impl<T: Send> Receiver<T> {
fn new(inner: Flavor<T>) -> Receiver<T> {
Receiver { inner: UnsafeCell::new(inner), receives: Cell::new(0), _marker: marker::NoSync }
Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync }
}
/// Blocks waiting for a value on this receiver
@ -854,17 +818,6 @@ impl<T: Send> Receiver<T> {
/// This function cannot panic.
#[unstable = "the return type of this function may be altered"]
pub fn try_recv(&self) -> Result<T, TryRecvError> {
// If a thread is spinning in try_recv, we should take the opportunity
// to reschedule things occasionally. See notes above in scheduling on
// sends for why this doesn't always hit TLS, and also for why this uses
// `try_take` instead of `take`.
let cnt = self.receives.get() + 1;
self.receives.set(cnt);
if cnt % (RESCHED_FREQ as uint) == 0 {
let task: Option<Box<Task>> = Local::try_take();
task.map(|t| t.maybe_yield());
}
loop {
let new_port = match *unsafe { self.inner() } {
Oneshot(ref p) => {
@ -1561,7 +1514,7 @@ mod test {
})
test!(fn sends_off_the_runtime() {
use std::rt::thread::Thread;
use rustrt::thread::Thread;
let (tx, rx) = channel();
let t = Thread::start(proc() {
@ -1576,7 +1529,7 @@ mod test {
})
test!(fn try_recvs_off_the_runtime() {
use std::rt::thread::Thread;
use rustrt::thread::Thread;
let (tx, rx) = channel();
let (cdone, pdone) = channel();
@ -2026,7 +1979,7 @@ mod sync_tests {
})
test!(fn try_recvs_off_the_runtime() {
use std::rt::thread::Thread;
use rustrt::thread::Thread;
let (tx, rx) = sync_channel::<()>(0);
let (cdone, pdone) = channel();

View File

@ -279,17 +279,6 @@ impl<T: Send> Packet<T> {
// because the remote sender should finish their enqueue
// operation "very quickly".
//
// Note that this yield loop does *not* attempt to do a green
// yield (regardless of the context), but *always* performs an
// OS-thread yield. The reasoning for this is that the pusher in
// question which is causing the inconsistent state is
// guaranteed to *not* be a blocked task (green tasks can't get
// pre-empted), so it must be on a different OS thread. Also,
// `try_recv` is normally a "guaranteed no rescheduling" context
// in a green-thread situation. By yielding control of the
// thread, we will hopefully allow time for the remote task on
// the other OS thread to make progress.
//
// Avoiding this yield loop would require a different queue
// abstraction which provides the guarantee that after M
// pushes have succeeded, at least M pops will succeed. The

View File

@ -414,7 +414,7 @@ mod tests {
use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
use std::mem;
use std::rt::thread::Thread;
use rustrt::thread::Thread;
use std::rand;
use std::rand::Rng;
use atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,

View File

@ -38,7 +38,6 @@ extern crate collections;
extern crate rustrt;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
pub use alloc::arc::{Arc, Weak};
@ -54,7 +53,6 @@ pub mod atomic;
// Concurrent data structures
mod mpsc_intrusive;
pub mod spsc_queue;
pub mod mpsc_queue;
pub mod mpmc_bounded_queue;

View File

@ -1,144 +0,0 @@
/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module implements an intrusive MPSC queue. This queue is incredibly
//! unsafe (due to use of unsafe pointers for nodes), and hence is not public.
#![experimental]
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/intrusive-mpsc-node-based-queue
use core::prelude::*;
use core::atomic;
use core::mem;
use core::cell::UnsafeCell;
// NB: all links are done as AtomicUint instead of AtomicPtr to allow for static
// initialization.
pub struct Node<T> {
pub next: atomic::AtomicUint,
pub data: T,
}
pub struct DummyNode {
pub next: atomic::AtomicUint,
}
pub struct Queue<T> {
pub head: atomic::AtomicUint,
pub tail: UnsafeCell<*mut Node<T>>,
pub stub: DummyNode,
}
impl<T: Send> Queue<T> {
pub fn new() -> Queue<T> {
Queue {
head: atomic::AtomicUint::new(0),
tail: UnsafeCell::new(0 as *mut Node<T>),
stub: DummyNode {
next: atomic::AtomicUint::new(0),
},
}
}
pub unsafe fn push(&self, node: *mut Node<T>) {
(*node).next.store(0, atomic::Release);
let prev = self.head.swap(node as uint, atomic::AcqRel);
// Note that this code is slightly modified to allow static
// initialization of these queues with rust's flavor of static
// initialization.
if prev == 0 {
self.stub.next.store(node as uint, atomic::Release);
} else {
let prev = prev as *mut Node<T>;
(*prev).next.store(node as uint, atomic::Release);
}
}
/// You'll note that the other MPSC queue in std::sync is non-intrusive and
/// returns a `PopResult` here to indicate when the queue is inconsistent.
/// An "inconsistent state" in the other queue means that a pusher has
/// pushed, but it hasn't finished linking the rest of the chain.
///
/// This queue also suffers from this problem, but I currently haven't been
/// able to detangle when this actually happens. This code is translated
/// verbatim from the website above, and is more complicated than the
/// non-intrusive version.
///
/// Right now consumers of this queue must be ready for this fact. Just
/// because `pop` returns `None` does not mean that there is not data
/// on the queue.
pub unsafe fn pop(&self) -> Option<*mut Node<T>> {
let tail = *self.tail.get();
let mut tail = if !tail.is_null() {tail} else {
mem::transmute(&self.stub)
};
let mut next = (*tail).next(atomic::Relaxed);
if tail as uint == &self.stub as *const DummyNode as uint {
if next.is_null() {
return None;
}
*self.tail.get() = next;
tail = next;
next = (*next).next(atomic::Relaxed);
}
if !next.is_null() {
*self.tail.get() = next;
return Some(tail);
}
let head = self.head.load(atomic::Acquire) as *mut Node<T>;
if tail != head {
return None;
}
let stub = mem::transmute(&self.stub);
self.push(stub);
next = (*tail).next(atomic::Relaxed);
if !next.is_null() {
*self.tail.get() = next;
return Some(tail);
}
return None
}
}
impl<T: Send> Node<T> {
pub fn new(t: T) -> Node<T> {
Node {
data: t,
next: atomic::AtomicUint::new(0),
}
}
pub unsafe fn next(&self, ord: atomic::Ordering) -> *mut Node<T> {
mem::transmute::<uint, *mut Node<T>>(self.next.load(ord))
}
}

View File

@ -8,80 +8,20 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A proper mutex implementation regardless of the "flavor of task" which is
//! acquiring the lock.
//! A simple native mutex implementation. Warning: this API is likely
//! to change soon.
// # Implementation of Rust mutexes
//
// Most answers to the question of "how do I use a mutex" are "use pthreads",
// but for Rust this isn't quite sufficient. Green threads cannot acquire an OS
// mutex because they can context switch among many OS threads, leading to
// deadlocks with other green threads.
//
// Another problem for green threads grabbing an OS mutex is that POSIX dictates
// that unlocking a mutex on a different thread from where it was locked is
// undefined behavior. Remember that green threads can migrate among OS threads,
// so this would mean that we would have to pin green threads to OS threads,
// which is less than ideal.
//
// ## Using deschedule/reawaken
//
// We already have primitives for descheduling/reawakening tasks, so they're the
// first obvious choice when implementing a mutex. The idea would be to have a
// concurrent queue that everyone is pushed on to, and then the owner of the
// mutex is the one popping from the queue.
//
// Unfortunately, this is not very performant for native tasks. The suspected
// reason for this is that each native thread is suspended on its own condition
// variable, unique from all the other threads. In this situation, the kernel
// has no idea what the scheduling semantics are of the user program, so all of
// the threads are distributed among all cores on the system. This ends up
// having very expensive wakeups of remote cores high up in the profile when
// handing off the mutex among native tasks. On the other hand, when using an OS
// mutex, the kernel knows that all native threads are contended on the same
// mutex, so they're in theory all migrated to a single core (fast context
// switching).
//
// ## Mixing implementations
//
// From that above information, we have two constraints. The first is that
// green threads can't touch os mutexes, and the second is that native tasks
// pretty much *must* touch an os mutex.
//
// As a compromise, the queueing implementation is used for green threads and
// the os mutex is used for native threads (why not have both?). This ends up
// leading to fairly decent performance for both native threads and green
// threads on various workloads (uncontended and contended).
//
// The crux of this implementation is an atomic work which is CAS'd on many
// times in order to manage a few flags about who's blocking where and whether
// it's locked or not.
#![allow(dead_code)]
use core::prelude::*;
use self::Flavor::*;
use alloc::boxed::Box;
use core::atomic;
use core::mem;
use core::cell::UnsafeCell;
use rustrt::local::Local;
use rustrt::mutex;
use rustrt::task::{BlockedTask, Task};
use rustrt::thread::Thread;
use mpsc_intrusive as q;
pub const LOCKED: uint = 1 << 0;
pub const GREEN_BLOCKED: uint = 1 << 1;
pub const NATIVE_BLOCKED: uint = 1 << 2;
pub const BLOCKED: uint = 1 << 1;
/// A mutual exclusion primitive useful for protecting shared data
///
/// This mutex is an implementation of a lock for all flavors of tasks which may
/// be grabbing. A common problem with green threads is that they cannot grab
/// locks (if they reschedule during the lock a contender could deadlock the
/// system), but this mutex does *not* suffer this problem.
///
/// This mutex will properly block tasks waiting for the lock to become
/// available. The mutex can also be statically initialized or created via a
/// `new` constructor.
@ -107,14 +47,6 @@ pub struct Mutex {
lock: Box<StaticMutex>,
}
#[deriving(PartialEq, Show)]
enum Flavor {
Unlocked,
TryLockAcquisition,
GreenAcquisition,
NativeAcquisition,
}
/// The static mutex type is provided to allow for static allocation of mutexes.
///
/// Note that this is a separate type because using a Mutex correctly means that
@ -137,310 +69,35 @@ enum Flavor {
/// // lock is unlocked here.
/// ```
pub struct StaticMutex {
/// Current set of flags on this mutex
state: atomic::AtomicUint,
/// an OS mutex used by native threads
lock: mutex::StaticNativeMutex,
/// Type of locking operation currently on this mutex
flavor: UnsafeCell<Flavor>,
/// uint-cast of the green thread waiting for this mutex
green_blocker: UnsafeCell<uint>,
/// uint-cast of the native thread waiting for this mutex
native_blocker: UnsafeCell<uint>,
/// A concurrent mpsc queue used by green threads, along with a count used
/// to figure out when to dequeue and enqueue.
q: q::Queue<uint>,
green_cnt: atomic::AtomicUint,
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
#[must_use]
pub struct Guard<'a> {
lock: &'a StaticMutex,
guard: mutex::LockGuard<'a>,
}
fn lift_guard(guard: mutex::LockGuard) -> Guard {
Guard { guard: guard }
}
/// Static initialization of a mutex. This constant can be used to initialize
/// other mutex constants.
pub const MUTEX_INIT: StaticMutex = StaticMutex {
lock: mutex::NATIVE_MUTEX_INIT,
state: atomic::INIT_ATOMIC_UINT,
flavor: UnsafeCell { value: Unlocked },
green_blocker: UnsafeCell { value: 0 },
native_blocker: UnsafeCell { value: 0 },
green_cnt: atomic::INIT_ATOMIC_UINT,
q: q::Queue {
head: atomic::INIT_ATOMIC_UINT,
tail: UnsafeCell { value: 0 as *mut q::Node<uint> },
stub: q::DummyNode {
next: atomic::INIT_ATOMIC_UINT,
}
}
lock: mutex::NATIVE_MUTEX_INIT
};
impl StaticMutex {
/// Attempts to grab this lock, see `Mutex::try_lock`
pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
// Attempt to steal the mutex from an unlocked state.
//
// FIXME: this can mess up the fairness of the mutex, seems bad
match self.state.compare_and_swap(0, LOCKED, atomic::SeqCst) {
0 => {
// After acquiring the mutex, we can safely access the inner
// fields.
let prev = unsafe {
mem::replace(&mut *self.flavor.get(), TryLockAcquisition)
};
assert_eq!(prev, Unlocked);
Some(Guard::new(self))
}
_ => None
}
unsafe { self.lock.trylock().map(lift_guard) }
}
/// Acquires this lock, see `Mutex::lock`
pub fn lock<'a>(&'a self) -> Guard<'a> {
// First, attempt to steal the mutex from an unlocked state. The "fast
// path" needs to have as few atomic instructions as possible, and this
// one cmpxchg is already pretty expensive.
//
// FIXME: this can mess up the fairness of the mutex, seems bad
match self.try_lock() {
Some(guard) => return guard,
None => {}
}
// After we've failed the fast path, then we delegate to the different
// locking protocols for green/native tasks. This will select two tasks
// to continue further (one native, one green).
let t: Box<Task> = Local::take();
let can_block = t.can_block();
let native_bit;
if can_block {
self.native_lock(t);
native_bit = NATIVE_BLOCKED;
} else {
self.green_lock(t);
native_bit = GREEN_BLOCKED;
}
// After we've arbitrated among task types, attempt to re-acquire the
// lock (avoids a deschedule). This is very important to do in order to
// allow threads coming out of the native_lock function to try their
// best to not hit a cvar in deschedule.
let mut old = match self.state.compare_and_swap(0, LOCKED,
atomic::SeqCst) {
0 => {
let flavor = if can_block {
NativeAcquisition
} else {
GreenAcquisition
};
// We've acquired the lock, so this unsafe access to flavor is
// allowed.
unsafe { *self.flavor.get() = flavor; }
return Guard::new(self)
}
old => old,
};
// Alright, everything else failed. We need to deschedule ourselves and
// flag ourselves as waiting. Note that this case should only happen
// regularly in native/green contention. Due to try_lock and the header
// of lock stealing the lock, it's also possible for native/native
// contention to hit this location, but as less common.
let t: Box<Task> = Local::take();
t.deschedule(1, |task| {
let task = unsafe { task.cast_to_uint() };
// These accesses are protected by the respective native/green
// mutexes which were acquired above.
let prev = if can_block {
unsafe { mem::replace(&mut *self.native_blocker.get(), task) }
} else {
unsafe { mem::replace(&mut *self.green_blocker.get(), task) }
};
assert_eq!(prev, 0);
loop {
assert_eq!(old & native_bit, 0);
// If the old state was locked, then we need to flag ourselves
// as blocking in the state. If the old state was unlocked, then
// we attempt to acquire the mutex. Everything here is a CAS
// loop that'll eventually make progress.
if old & LOCKED != 0 {
old = match self.state.compare_and_swap(old,
old | native_bit,
atomic::SeqCst) {
n if n == old => return Ok(()),
n => n
};
} else {
assert_eq!(old, 0);
old = match self.state.compare_and_swap(old,
old | LOCKED,
atomic::SeqCst) {
n if n == old => {
// After acquiring the lock, we have access to the
// flavor field, and we've regained access to our
// respective native/green blocker field.
let prev = if can_block {
unsafe {
*self.native_blocker.get() = 0;
mem::replace(&mut *self.flavor.get(),
NativeAcquisition)
}
} else {
unsafe {
*self.green_blocker.get() = 0;
mem::replace(&mut *self.flavor.get(),
GreenAcquisition)
}
};
assert_eq!(prev, Unlocked);
return Err(unsafe {
BlockedTask::cast_from_uint(task)
})
}
n => n,
};
}
}
});
Guard::new(self)
}
// Tasks which can block are super easy. These tasks just call the blocking
// `lock()` function on an OS mutex
fn native_lock(&self, t: Box<Task>) {
Local::put(t);
unsafe { self.lock.lock_noguard(); }
}
fn native_unlock(&self) {
unsafe { self.lock.unlock_noguard(); }
}
fn green_lock(&self, t: Box<Task>) {
// Green threads flag their presence with an atomic counter, and if they
// fail to be the first to the mutex, they enqueue themselves on a
// concurrent internal queue with a stack-allocated node.
//
// FIXME: There isn't a cancellation currently of an enqueue, forcing
// the unlocker to spin for a bit.
if self.green_cnt.fetch_add(1, atomic::SeqCst) == 0 {
Local::put(t);
return
}
let mut node = q::Node::new(0);
t.deschedule(1, |task| {
unsafe {
node.data = task.cast_to_uint();
self.q.push(&mut node);
}
Ok(())
});
}
fn green_unlock(&self) {
// If we're the only green thread, then no need to check the queue,
// otherwise the fixme above forces us to spin for a bit.
if self.green_cnt.fetch_sub(1, atomic::SeqCst) == 1 { return }
let node;
loop {
match unsafe { self.q.pop() } {
Some(t) => { node = t; break; }
None => Thread::yield_now(),
}
}
let task = unsafe { BlockedTask::cast_from_uint((*node).data) };
task.wake().map(|t| t.reawaken());
}
fn unlock(&self) {
// Unlocking this mutex is a little tricky. We favor any task that is
// manually blocked (not in each of the separate locks) in order to help
// provide a little fairness (green threads will wake up the pending
// native thread and native threads will wake up the pending green
// thread).
//
// There's also the question of when we unlock the actual green/native
// locking halves as well. If we're waking up someone, then we can wait
// to unlock until we've acquired the task to wake up (we're guaranteed
// the mutex memory is still valid when there's contenders), but as soon
// as we don't find any contenders we must unlock the mutex, and *then*
// flag the mutex as unlocked.
//
// This flagging can fail, leading to another round of figuring out if a
// task needs to be woken, and in this case it's ok that the "mutex
// halves" are unlocked, we're just mainly dealing with the atomic state
// of the outer mutex.
let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) };
let mut state = self.state.load(atomic::SeqCst);
let mut unlocked = false;
let task;
loop {
assert!(state & LOCKED != 0);
if state & GREEN_BLOCKED != 0 {
self.unset(state, GREEN_BLOCKED);
task = unsafe {
*self.flavor.get() = GreenAcquisition;
let task = mem::replace(&mut *self.green_blocker.get(), 0);
BlockedTask::cast_from_uint(task)
};
break;
} else if state & NATIVE_BLOCKED != 0 {
self.unset(state, NATIVE_BLOCKED);
task = unsafe {
*self.flavor.get() = NativeAcquisition;
let task = mem::replace(&mut *self.native_blocker.get(), 0);
BlockedTask::cast_from_uint(task)
};
break;
} else {
assert_eq!(state, LOCKED);
if !unlocked {
match flavor {
GreenAcquisition => { self.green_unlock(); }
NativeAcquisition => { self.native_unlock(); }
TryLockAcquisition => {}
Unlocked => unreachable!(),
}
unlocked = true;
}
match self.state.compare_and_swap(LOCKED, 0, atomic::SeqCst) {
LOCKED => return,
n => { state = n; }
}
}
}
if !unlocked {
match flavor {
GreenAcquisition => { self.green_unlock(); }
NativeAcquisition => { self.native_unlock(); }
TryLockAcquisition => {}
Unlocked => unreachable!(),
}
}
task.wake().map(|t| t.reawaken());
}
/// Loops around a CAS to unset the `bit` in `state`
fn unset(&self, mut state: uint, bit: uint) {
loop {
assert!(state & bit != 0);
let new = state ^ bit;
match self.state.compare_and_swap(state, new, atomic::SeqCst) {
n if n == state => break,
n => { state = n; }
}
}
lift_guard(unsafe { self.lock.lock() })
}
/// Deallocates resources associated with this static mutex.
@ -463,12 +120,6 @@ impl Mutex {
pub fn new() -> Mutex {
Mutex {
lock: box StaticMutex {
state: atomic::AtomicUint::new(0),
flavor: UnsafeCell::new(Unlocked),
green_blocker: UnsafeCell::new(0),
native_blocker: UnsafeCell::new(0),
green_cnt: atomic::AtomicUint::new(0),
q: q::Queue::new(),
lock: unsafe { mutex::StaticNativeMutex::new() },
}
}
@ -494,25 +145,6 @@ impl Mutex {
pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() }
}
impl<'a> Guard<'a> {
fn new<'b>(lock: &'b StaticMutex) -> Guard<'b> {
if cfg!(debug) {
// once we've acquired a lock, it's ok to access the flavor
assert!(unsafe { *lock.flavor.get() != Unlocked });
assert!(lock.state.load(atomic::SeqCst) & LOCKED != 0);
}
Guard { lock: lock }
}
}
#[unsafe_destructor]
impl<'a> Drop for Guard<'a> {
#[inline]
fn drop(&mut self) {
self.lock.unlock();
}
}
impl Drop for Mutex {
fn drop(&mut self) {
// This is actually safe b/c we know that there is no further usage of

View File

@ -22,10 +22,10 @@ use util::small_vector::SmallVector;
use std::mem;
pub fn maybe_inject_crates_ref(krate: ast::Crate, alt_std_name: Option<String>, any_exe: bool)
pub fn maybe_inject_crates_ref(krate: ast::Crate, alt_std_name: Option<String>)
-> ast::Crate {
if use_std(&krate) {
inject_crates_ref(krate, alt_std_name, any_exe)
inject_crates_ref(krate, alt_std_name)
} else {
krate
}
@ -43,17 +43,12 @@ fn use_std(krate: &ast::Crate) -> bool {
!attr::contains_name(krate.attrs.as_slice(), "no_std")
}
fn use_start(krate: &ast::Crate) -> bool {
!attr::contains_name(krate.attrs.as_slice(), "no_start")
}
fn no_prelude(attrs: &[ast::Attribute]) -> bool {
attr::contains_name(attrs, "no_implicit_prelude")
}
struct StandardLibraryInjector<'a> {
alt_std_name: Option<String>,
any_exe: bool,
}
impl<'a> fold::Folder for StandardLibraryInjector<'a> {
@ -80,23 +75,6 @@ impl<'a> fold::Folder for StandardLibraryInjector<'a> {
span: DUMMY_SP
});
if use_start(&krate) && self.any_exe {
let visible_rt_name = "rt";
let actual_rt_name = "native";
// Gensym the ident so it can't be named
let visible_rt_name = token::gensym_ident(visible_rt_name);
let actual_rt_name = token::intern_and_get_ident(actual_rt_name);
vis.push(ast::ViewItem {
node: ast::ViewItemExternCrate(visible_rt_name,
Some((actual_rt_name, ast::CookedStr)),
ast::DUMMY_NODE_ID),
attrs: Vec::new(),
vis: ast::Inherited,
span: DUMMY_SP
});
}
// `extern crate` must be precede `use` items
mem::swap(&mut vis, &mut krate.module.view_items);
krate.module.view_items.extend(vis.into_iter());
@ -118,12 +96,9 @@ impl<'a> fold::Folder for StandardLibraryInjector<'a> {
}
}
fn inject_crates_ref(krate: ast::Crate,
alt_std_name: Option<String>,
any_exe: bool) -> ast::Crate {
fn inject_crates_ref(krate: ast::Crate, alt_std_name: Option<String>) -> ast::Crate {
let mut fold = StandardLibraryInjector {
alt_std_name: alt_std_name,
any_exe: any_exe,
};
fold.fold_crate(krate)
}

View File

@ -1,41 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_start]
extern crate green;
use std::task::spawn;
use std::os;
use std::uint;
// Very simple spawn rate test. Spawn N tasks that do nothing and
// return.
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, green::basic::event_loop, main)
}
fn main() {
let args = os::args();
let args = args.as_slice();
let n = if args.len() == 2 {
from_str::<uint>(args[1].as_slice()).unwrap()
} else {
100000
};
for _ in range(0, n) {
spawn(proc() {});
}
}

View File

@ -1,25 +0,0 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is (hopefully) a quick test to get a good idea about spawning
// performance in libgreen.
extern crate green;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, green::basic::event_loop, main)
}
fn main() {
for _ in range(1u32, 100_000) {
spawn(proc() {})
}
}

View File

@ -3,7 +3,6 @@
#![feature(globs)]
#[phase(plugin, link)]
extern crate "std" as std;
extern crate "native" as rt;
#[prelude_import]
use std::prelude::*;
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT

View File

@ -1,21 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android (FIXME #11419)
// error-pattern:explicit panic
extern crate native;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
native::start(argc, argv, proc() {
panic!();
})
}

View File

@ -11,11 +11,11 @@
#![crate_name="boot"]
#![crate_type="dylib"]
extern crate native;
use std::rt;
#[no_mangle] // this needs to get called from C
pub extern "C" fn foo(argc: int, argv: *const *const u8) -> int {
native::start(argc, argv, proc() {
rt::start(argc, argv, proc() {
spawn(proc() {
println!("hello");
});

View File

@ -10,18 +10,11 @@
// no-pretty-expanded FIXME #15189
// ignore-windows FIXME #13259
extern crate native;
use std::os;
use std::io::process::Command;
use std::finally::Finally;
use std::str;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
native::start(argc, argv, main)
}
#[inline(never)]
fn foo() {
let _v = vec![1i, 2, 3];
@ -64,7 +57,9 @@ fn runtest(me: &str) {
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(out.error.as_slice()).unwrap();
assert!(s.contains("stack backtrace") && s.contains("double::h"),
// loosened the following from double::h to double:: due to
// spurious failures on mac, 32bit, optimized
assert!(s.contains("stack backtrace") && s.contains("double::"),
"bad output3: {}", s);
// Make sure a stack trace isn't printed too many times

View File

@ -15,7 +15,6 @@
#[phase(plugin, link)]
extern crate log;
extern crate native;
use log::{set_logger, Logger, LogRecord};
use std::fmt;
@ -30,13 +29,6 @@ impl Logger for MyWriter {
}
}
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
native::start(argc, argv, proc() {
main();
})
}
fn main() {
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));

View File

@ -9,9 +9,10 @@
// except according to those terms.
extern crate libc;
extern crate rustrt;
use std::mem;
use std::rt::thread::Thread;
use rustrt::thread::Thread;
#[link(name = "rust_test_helpers")]
extern {

View File

@ -8,17 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate native;
use std::io::timer;
use std::time::Duration;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
native::start(argc, argv, main)
}
fn main() {
timer::sleep(Duration::milliseconds(250));
}

View File

@ -8,24 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate green;
static mut DROP: int = 0i;
static mut DROP_S: int = 0i;
static mut DROP_T: int = 0i;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
let ret = green::start(argc, argv, green::basic::event_loop, main);
unsafe {
assert_eq!(2, DROP);
assert_eq!(1, DROP_S);
assert_eq!(1, DROP_T);
}
ret
}
struct S;
impl Drop for S {
fn drop(&mut self) {
@ -48,7 +34,7 @@ impl Drop for T {
}
fn g(ref _t: T) {}
fn main() {
fn do_test() {
let s = S;
f(s);
unsafe {
@ -59,3 +45,12 @@ fn main() {
g(t);
unsafe { assert_eq!(1, DROP_T); }
}
fn main() {
do_test();
unsafe {
assert_eq!(2, DROP);
assert_eq!(1, DROP_S);
assert_eq!(1, DROP_T);
}
}

View File

@ -8,9 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rustrt;
pub fn main() {
unsafe {
let x = Some(::std::rt::exclusive::Exclusive::new(true));
let x = Some(::rustrt::exclusive::Exclusive::new(true));
match x {
Some(ref z) if *z.lock() => {
assert!(*z.lock());

View File

@ -1,28 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android (FIXME #11419)
extern crate native;
static mut set: bool = false;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
// make sure that native::start always waits for all children to finish
native::start(argc, argv, proc() {
spawn(proc() {
unsafe { set = true; }
});
});
// if we didn't set the global, then return a nonzero code
if unsafe {set} {0} else {1}
}

View File

@ -36,9 +36,12 @@ fn main() {
let args = os::args();
let args = args.as_slice();
if args.len() > 1 && args[1].as_slice() == "recurse" {
let (tx, rx) = channel();
spawn(proc() {
recurse();
tx.send(());
});
rx.recv();
} else {
let recurse = Command::new(args[0].as_slice()).arg("recurse").output().unwrap();
assert!(!recurse.status.success());

View File

@ -16,8 +16,6 @@
// non-ASCII characters. The child process ensures all the strings are
// intact.
extern crate native;
use std::io;
use std::io::fs;
use std::io::Command;

View File

@ -8,12 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate native;
extern crate rustrt;
use std::io::process::{Command, ProcessOutput};
use std::os;
use std::str;
use std::rt::unwind::try;
use std::rt;
use rustrt::unwind::try;
local_data_key!(foo: int)
@ -36,7 +38,7 @@ fn start(argc: int, argv: *const *const u8) -> int {
return 0
}
native::start(argc, argv, main)
rt::start(argc, argv, main)
}
fn main() {

View File

@ -8,6 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rustrt;
struct Point {x: int, y: int, z: int}
@ -15,7 +16,7 @@ fn f(p: &mut Point) { p.z = 13; }
pub fn main() {
unsafe {
let x = Some(::std::rt::exclusive::Exclusive::new(true));
let x = Some(::rustrt::exclusive::Exclusive::new(true));
match x {
Some(ref z) if *z.lock() => {
assert!(*z.lock());