auto merge of #8387 : brson/rust/nooldrt, r=brson
This commit is contained in:
commit
e81e81f234
11
mk/rt.mk
11
mk/rt.mk
|
@ -66,30 +66,21 @@ RUNTIME_CXXS_$(1)_$(2) := \
|
|||
rt/sync/timer.cpp \
|
||||
rt/sync/lock_and_signal.cpp \
|
||||
rt/sync/rust_thread.cpp \
|
||||
rt/rust.cpp \
|
||||
rt/rust_builtin.cpp \
|
||||
rt/rust_run_program.cpp \
|
||||
rt/rust_env.cpp \
|
||||
rt/rust_rng.cpp \
|
||||
rt/rust_sched_loop.cpp \
|
||||
rt/rust_sched_launcher.cpp \
|
||||
rt/rust_sched_driver.cpp \
|
||||
rt/rust_scheduler.cpp \
|
||||
rt/rust_sched_reaper.cpp \
|
||||
rt/rust_task.cpp \
|
||||
rt/rust_stack.cpp \
|
||||
rt/rust_upcall.cpp \
|
||||
rt/rust_uv.cpp \
|
||||
rt/rust_crate_map.cpp \
|
||||
rt/rust_log.cpp \
|
||||
rt/rust_gc_metadata.cpp \
|
||||
rt/rust_util.cpp \
|
||||
rt/rust_log.cpp \
|
||||
rt/rust_exchange_alloc.cpp \
|
||||
rt/isaac/randport.cpp \
|
||||
rt/miniz.cpp \
|
||||
rt/rust_kernel.cpp \
|
||||
rt/rust_abi.cpp \
|
||||
rt/rust_debug.cpp \
|
||||
rt/memory_region.cpp \
|
||||
rt/boxed_region.cpp \
|
||||
rt/arch/$$(HOST_$(1))/context.cpp \
|
||||
|
|
|
@ -596,14 +596,14 @@ mod tests {
|
|||
let (c,p) = (Cell::new(c), Cell::new(p));
|
||||
do task::spawn || {
|
||||
// wait until parent gets in
|
||||
comm::recv_one(p.take());
|
||||
p.take().recv();
|
||||
do arc2.access_cond |state, cond| {
|
||||
*state = true;
|
||||
cond.signal();
|
||||
}
|
||||
}
|
||||
do arc.access_cond |state, cond| {
|
||||
comm::send_one(c.take(), ());
|
||||
c.take().send(());
|
||||
assert!(!*state);
|
||||
while !*state {
|
||||
cond.wait();
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
use std::cast;
|
||||
use std::cell::Cell;
|
||||
use std::comm::{PortOne, oneshot, send_one, recv_one};
|
||||
use std::comm::{PortOne, oneshot};
|
||||
use std::task;
|
||||
use std::util::replace;
|
||||
|
||||
|
@ -123,7 +123,7 @@ pub fn from_port<A:Send>(port: PortOne<A>) -> Future<A> {
|
|||
|
||||
let port = Cell::new(port);
|
||||
do from_fn {
|
||||
recv_one(port.take())
|
||||
port.take().recv()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ pub fn spawn<A:Send>(blk: ~fn() -> A) -> Future<A> {
|
|||
let chan = Cell::new(chan);
|
||||
do task::spawn {
|
||||
let chan = chan.take();
|
||||
send_one(chan, blk());
|
||||
chan.send(blk());
|
||||
}
|
||||
|
||||
return from_port(port);
|
||||
|
@ -163,7 +163,7 @@ mod test {
|
|||
use future::*;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::comm::{oneshot, send_one};
|
||||
use std::comm::oneshot;
|
||||
use std::task;
|
||||
|
||||
#[test]
|
||||
|
@ -175,7 +175,7 @@ mod test {
|
|||
#[test]
|
||||
fn test_from_port() {
|
||||
let (po, ch) = oneshot();
|
||||
send_one(ch, ~"whale");
|
||||
ch.send(~"whale");
|
||||
let mut f = from_port(po);
|
||||
assert_eq!(f.get(), ~"whale");
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
use std::borrow;
|
||||
use std::comm;
|
||||
use std::comm::SendDeferred;
|
||||
use std::comm::{GenericPort, Peekable};
|
||||
use std::task;
|
||||
use std::unstable::sync::{Exclusive, UnsafeAtomicRcBox};
|
||||
use std::unstable::atomics;
|
||||
|
@ -111,7 +112,7 @@ impl<Q:Send> Sem<Q> {
|
|||
/* do 1000.times { task::yield(); } */
|
||||
// Need to wait outside the exclusive.
|
||||
if waiter_nobe.is_some() {
|
||||
let _ = comm::recv_one(waiter_nobe.unwrap());
|
||||
let _ = waiter_nobe.unwrap().recv();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -235,7 +236,7 @@ impl<'self> Condvar<'self> {
|
|||
do (|| {
|
||||
unsafe {
|
||||
do task::rekillable {
|
||||
let _ = comm::recv_one(WaitEnd.take_unwrap());
|
||||
let _ = WaitEnd.take_unwrap().recv();
|
||||
}
|
||||
}
|
||||
}).finally {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
/// parallelism.
|
||||
|
||||
|
||||
use std::comm::Chan;
|
||||
use std::comm::{Chan, GenericChan, GenericPort};
|
||||
use std::comm;
|
||||
use std::task::SchedMode;
|
||||
use std::task;
|
||||
|
|
|
@ -29,7 +29,7 @@ use time::precise_time_ns;
|
|||
use treemap::TreeMap;
|
||||
|
||||
use std::clone::Clone;
|
||||
use std::comm::{stream, SharedChan};
|
||||
use std::comm::{stream, SharedChan, GenericPort, GenericChan};
|
||||
use std::libc;
|
||||
use std::either;
|
||||
use std::io;
|
||||
|
|
|
@ -18,7 +18,7 @@ use arc::{Arc,RWArc};
|
|||
use treemap::TreeMap;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::comm::{PortOne, oneshot, send_one, recv_one};
|
||||
use std::comm::{PortOne, oneshot};
|
||||
use std::either::{Either, Left, Right};
|
||||
use std::io;
|
||||
use std::run;
|
||||
|
@ -331,7 +331,7 @@ impl<'self> Prep<'self> {
|
|||
};
|
||||
let chan = chan.take();
|
||||
let v = blk(&exe);
|
||||
send_one(chan, (exe, v));
|
||||
chan.send((exe, v));
|
||||
}
|
||||
Right(port)
|
||||
}
|
||||
|
@ -355,7 +355,7 @@ impl<'self, T:Send +
|
|||
None => fail!(),
|
||||
Some(Left(v)) => v,
|
||||
Some(Right(port)) => {
|
||||
let (exe, v) = recv_one(port);
|
||||
let (exe, v) = port.recv();
|
||||
let s = json_encode(&v);
|
||||
do prep.ctxt.db.write |db| {
|
||||
db.cache(prep.fn_name,
|
||||
|
|
|
@ -275,24 +275,11 @@ pub mod raw {
|
|||
}
|
||||
|
||||
fn local_realloc(ptr: *(), size: uint) -> *() {
|
||||
use rt;
|
||||
use rt::OldTaskContext;
|
||||
use rt::local::Local;
|
||||
use rt::task::Task;
|
||||
|
||||
if rt::context() == OldTaskContext {
|
||||
unsafe {
|
||||
return rust_local_realloc(ptr, size as libc::size_t);
|
||||
}
|
||||
|
||||
extern {
|
||||
#[fast_ffi]
|
||||
fn rust_local_realloc(ptr: *(), size: libc::size_t) -> *();
|
||||
}
|
||||
} else {
|
||||
do Local::borrow::<Task, *()> |task| {
|
||||
task.heap.realloc(ptr as *libc::c_void, size) as *()
|
||||
}
|
||||
do Local::borrow::<Task, *()> |task| {
|
||||
task.heap.realloc(ptr as *libc::c_void, size) as *()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,13 +56,8 @@ unsafe fn each_live_alloc(read_next_before: bool,
|
|||
|
||||
#[cfg(unix)]
|
||||
fn debug_mem() -> bool {
|
||||
use rt;
|
||||
use rt::OldTaskContext;
|
||||
// XXX: Need to port the environment struct to newsched
|
||||
match rt::context() {
|
||||
OldTaskContext => ::rt::env::get().debug_mem,
|
||||
_ => false
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
|
@ -147,15 +142,3 @@ pub unsafe fn annihilate() {
|
|||
dbg.write_str("\n");
|
||||
}
|
||||
}
|
||||
|
||||
/// Bindings to the runtime
|
||||
pub mod rustrt {
|
||||
use libc::c_void;
|
||||
|
||||
#[link_name = "rustrt"]
|
||||
extern {
|
||||
#[rust_stack]
|
||||
// FIXME (#4386): Unable to make following method private.
|
||||
pub fn rust_get_task() -> *c_void;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,13 +14,11 @@ Message passing
|
|||
|
||||
#[allow(missing_doc)];
|
||||
|
||||
use either::{Either, Left, Right};
|
||||
use clone::Clone;
|
||||
use kinds::Send;
|
||||
use option::{Option, Some};
|
||||
use unstable::sync::Exclusive;
|
||||
use option::Option;
|
||||
pub use rt::comm::SendDeferred;
|
||||
use rtcomm = rt::comm;
|
||||
use rt;
|
||||
|
||||
/// A trait for things that can send multiple messages.
|
||||
pub trait GenericChan<T> {
|
||||
|
@ -52,614 +50,146 @@ pub trait Peekable<T> {
|
|||
fn peek(&self) -> bool;
|
||||
}
|
||||
|
||||
/// An endpoint that can send many messages.
|
||||
pub struct Chan<T> {
|
||||
inner: Either<pipesy::Chan<T>, rtcomm::Chan<T>>
|
||||
pub struct PortOne<T> { x: rtcomm::PortOne<T> }
|
||||
pub struct ChanOne<T> { x: rtcomm::ChanOne<T> }
|
||||
|
||||
pub fn oneshot<T: Send>() -> (PortOne<T>, ChanOne<T>) {
|
||||
let (p, c) = rtcomm::oneshot();
|
||||
(PortOne { x: p }, ChanOne { x: c })
|
||||
}
|
||||
|
||||
/// An endpoint that can receive many messages.
|
||||
pub struct Port<T> {
|
||||
inner: Either<pipesy::Port<T>, rtcomm::Port<T>>
|
||||
pub struct Port<T> { x: rtcomm::Port<T> }
|
||||
pub struct Chan<T> { x: rtcomm::Chan<T> }
|
||||
|
||||
pub fn stream<T: Send>() -> (Port<T>, Chan<T>) {
|
||||
let (p, c) = rtcomm::stream();
|
||||
(Port { x: p }, Chan { x: c })
|
||||
}
|
||||
|
||||
/** Creates a `(Port, Chan)` pair.
|
||||
pub struct SharedChan<T> { x: rtcomm::SharedChan<T> }
|
||||
|
||||
These allow sending or receiving an unlimited number of messages.
|
||||
impl<T: Send> SharedChan<T> {
|
||||
pub fn new(c: Chan<T>) -> SharedChan<T> {
|
||||
let Chan { x: c } = c;
|
||||
SharedChan { x: rtcomm::SharedChan::new(c) }
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
pub fn stream<T:Send>() -> (Port<T>, Chan<T>) {
|
||||
let (port, chan) = match rt::context() {
|
||||
rt::OldTaskContext => match pipesy::stream() {
|
||||
(p, c) => (Left(p), Left(c))
|
||||
},
|
||||
_ => match rtcomm::stream() {
|
||||
(p, c) => (Right(p), Right(c))
|
||||
}
|
||||
};
|
||||
let port = Port { inner: port };
|
||||
let chan = Chan { inner: chan };
|
||||
return (port, chan);
|
||||
impl<T: Send> ChanOne<T> {
|
||||
pub fn send(self, val: T) {
|
||||
let ChanOne { x: c } = self;
|
||||
c.send(val)
|
||||
}
|
||||
|
||||
pub fn try_send(self, val: T) -> bool {
|
||||
let ChanOne { x: c } = self;
|
||||
c.try_send(val)
|
||||
}
|
||||
|
||||
pub fn send_deferred(self, val: T) {
|
||||
let ChanOne { x: c } = self;
|
||||
c.send_deferred(val)
|
||||
}
|
||||
|
||||
pub fn try_send_deferred(self, val: T) -> bool {
|
||||
let ChanOne{ x: c } = self;
|
||||
c.try_send_deferred(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> PortOne<T> {
|
||||
pub fn recv(self) -> T {
|
||||
let PortOne { x: p } = self;
|
||||
p.recv()
|
||||
}
|
||||
|
||||
pub fn try_recv(self) -> Option<T> {
|
||||
let PortOne { x: p } = self;
|
||||
p.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Peekable<T> for PortOne<T> {
|
||||
fn peek(&self) -> bool {
|
||||
let &PortOne { x: ref p } = self;
|
||||
p.peek()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericChan<T> for Chan<T> {
|
||||
fn send(&self, x: T) {
|
||||
match self.inner {
|
||||
Left(ref chan) => chan.send(x),
|
||||
Right(ref chan) => chan.send(x)
|
||||
}
|
||||
fn send(&self, val: T) {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericSmartChan<T> for Chan<T> {
|
||||
fn try_send(&self, x: T) -> bool {
|
||||
match self.inner {
|
||||
Left(ref chan) => chan.try_send(x),
|
||||
Right(ref chan) => chan.try_send(x)
|
||||
}
|
||||
fn try_send(&self, val: T) -> bool {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.try_send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> SendDeferred<T> for Chan<T> {
|
||||
fn send_deferred(&self, x: T) {
|
||||
match self.inner {
|
||||
Left(ref chan) => chan.send(x),
|
||||
Right(ref chan) => chan.send_deferred(x)
|
||||
}
|
||||
fn send_deferred(&self, val: T) {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.send_deferred(val)
|
||||
}
|
||||
fn try_send_deferred(&self, x: T) -> bool {
|
||||
match self.inner {
|
||||
Left(ref chan) => chan.try_send(x),
|
||||
Right(ref chan) => chan.try_send_deferred(x)
|
||||
}
|
||||
|
||||
fn try_send_deferred(&self, val: T) -> bool {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.try_send_deferred(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericPort<T> for Port<T> {
|
||||
fn recv(&self) -> T {
|
||||
match self.inner {
|
||||
Left(ref port) => port.recv(),
|
||||
Right(ref port) => port.recv()
|
||||
}
|
||||
let &Port { x: ref p } = self;
|
||||
p.recv()
|
||||
}
|
||||
|
||||
fn try_recv(&self) -> Option<T> {
|
||||
match self.inner {
|
||||
Left(ref port) => port.try_recv(),
|
||||
Right(ref port) => port.try_recv()
|
||||
}
|
||||
let &Port { x: ref p } = self;
|
||||
p.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Peekable<T> for Port<T> {
|
||||
fn peek(&self) -> bool {
|
||||
match self.inner {
|
||||
Left(ref port) => port.peek(),
|
||||
Right(ref port) => port.peek()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A channel that can be shared between many senders.
|
||||
pub struct SharedChan<T> {
|
||||
inner: Either<Exclusive<pipesy::Chan<T>>, rtcomm::SharedChan<T>>
|
||||
}
|
||||
|
||||
impl<T: Send> SharedChan<T> {
|
||||
/// Converts a `chan` into a `shared_chan`.
|
||||
pub fn new(c: Chan<T>) -> SharedChan<T> {
|
||||
let Chan { inner } = c;
|
||||
let c = match inner {
|
||||
Left(c) => Left(Exclusive::new(c)),
|
||||
Right(c) => Right(rtcomm::SharedChan::new(c))
|
||||
};
|
||||
SharedChan { inner: c }
|
||||
let &Port { x: ref p } = self;
|
||||
p.peek()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericChan<T> for SharedChan<T> {
|
||||
fn send(&self, x: T) {
|
||||
match self.inner {
|
||||
Left(ref chan) => {
|
||||
unsafe {
|
||||
let mut xx = Some(x);
|
||||
do chan.with_imm |chan| {
|
||||
chan.send(xx.take_unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
Right(ref chan) => chan.send(x)
|
||||
}
|
||||
fn send(&self, val: T) {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericSmartChan<T> for SharedChan<T> {
|
||||
fn try_send(&self, x: T) -> bool {
|
||||
match self.inner {
|
||||
Left(ref chan) => {
|
||||
unsafe {
|
||||
let mut xx = Some(x);
|
||||
do chan.with_imm |chan| {
|
||||
chan.try_send(xx.take_unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
Right(ref chan) => chan.try_send(x)
|
||||
}
|
||||
fn try_send(&self, val: T) -> bool {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.try_send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> ::clone::Clone for SharedChan<T> {
|
||||
impl<T: Send> SendDeferred<T> for SharedChan<T> {
|
||||
fn send_deferred(&self, val: T) {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.send_deferred(val)
|
||||
}
|
||||
|
||||
fn try_send_deferred(&self, val: T) -> bool {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.try_send_deferred(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for SharedChan<T> {
|
||||
fn clone(&self) -> SharedChan<T> {
|
||||
SharedChan { inner: self.inner.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PortOne<T> {
|
||||
inner: Either<pipesy::PortOne<T>, rtcomm::PortOne<T>>
|
||||
}
|
||||
|
||||
pub struct ChanOne<T> {
|
||||
inner: Either<pipesy::ChanOne<T>, rtcomm::ChanOne<T>>
|
||||
}
|
||||
|
||||
pub fn oneshot<T: Send>() -> (PortOne<T>, ChanOne<T>) {
|
||||
let (port, chan) = match rt::context() {
|
||||
rt::OldTaskContext => match pipesy::oneshot() {
|
||||
(p, c) => (Left(p), Left(c)),
|
||||
},
|
||||
_ => match rtcomm::oneshot() {
|
||||
(p, c) => (Right(p), Right(c))
|
||||
}
|
||||
};
|
||||
let port = PortOne { inner: port };
|
||||
let chan = ChanOne { inner: chan };
|
||||
return (port, chan);
|
||||
}
|
||||
|
||||
impl<T: Send> PortOne<T> {
|
||||
pub fn recv(self) -> T {
|
||||
let PortOne { inner } = self;
|
||||
match inner {
|
||||
Left(p) => p.recv(),
|
||||
Right(p) => p.recv()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_recv(self) -> Option<T> {
|
||||
let PortOne { inner } = self;
|
||||
match inner {
|
||||
Left(p) => p.try_recv(),
|
||||
Right(p) => p.try_recv()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> ChanOne<T> {
|
||||
pub fn send(self, data: T) {
|
||||
let ChanOne { inner } = self;
|
||||
match inner {
|
||||
Left(p) => p.send(data),
|
||||
Right(p) => p.send(data)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_send(self, data: T) -> bool {
|
||||
let ChanOne { inner } = self;
|
||||
match inner {
|
||||
Left(p) => p.try_send(data),
|
||||
Right(p) => p.try_send(data)
|
||||
}
|
||||
}
|
||||
pub fn send_deferred(self, data: T) {
|
||||
let ChanOne { inner } = self;
|
||||
match inner {
|
||||
Left(p) => p.send(data),
|
||||
Right(p) => p.send_deferred(data)
|
||||
}
|
||||
}
|
||||
pub fn try_send_deferred(self, data: T) -> bool {
|
||||
let ChanOne { inner } = self;
|
||||
match inner {
|
||||
Left(p) => p.try_send(data),
|
||||
Right(p) => p.try_send_deferred(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn recv_one<T: Send>(port: PortOne<T>) -> T {
|
||||
let PortOne { inner } = port;
|
||||
match inner {
|
||||
Left(p) => pipesy::recv_one(p),
|
||||
Right(p) => p.recv()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_recv_one<T: Send>(port: PortOne<T>) -> Option<T> {
|
||||
let PortOne { inner } = port;
|
||||
match inner {
|
||||
Left(p) => pipesy::try_recv_one(p),
|
||||
Right(p) => p.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_one<T: Send>(chan: ChanOne<T>, data: T) {
|
||||
let ChanOne { inner } = chan;
|
||||
match inner {
|
||||
Left(c) => pipesy::send_one(c, data),
|
||||
Right(c) => c.send(data)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_send_one<T: Send>(chan: ChanOne<T>, data: T) -> bool {
|
||||
let ChanOne { inner } = chan;
|
||||
match inner {
|
||||
Left(c) => pipesy::try_send_one(c, data),
|
||||
Right(c) => c.try_send(data)
|
||||
}
|
||||
}
|
||||
|
||||
mod pipesy {
|
||||
|
||||
use kinds::Send;
|
||||
use option::{Option, Some, None};
|
||||
use pipes::{recv, try_recv, peek};
|
||||
use super::{GenericChan, GenericSmartChan, GenericPort, Peekable};
|
||||
use cast::transmute_mut;
|
||||
|
||||
/*proto! oneshot (
|
||||
Oneshot:send<T:Send> {
|
||||
send(T) -> !
|
||||
}
|
||||
)*/
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub mod oneshot {
|
||||
use std::kinds::Send;
|
||||
use ptr::to_mut_unsafe_ptr;
|
||||
|
||||
pub fn init<T: Send>() -> (server::Oneshot<T>, client::Oneshot<T>) {
|
||||
pub use std::pipes::HasBuffer;
|
||||
|
||||
let buffer = ~::std::pipes::Buffer {
|
||||
header: ::std::pipes::BufferHeader(),
|
||||
data: __Buffer {
|
||||
Oneshot: ::std::pipes::mk_packet::<Oneshot<T>>()
|
||||
},
|
||||
};
|
||||
do ::std::pipes::entangle_buffer(buffer) |buffer, data| {
|
||||
data.Oneshot.set_buffer(buffer);
|
||||
to_mut_unsafe_ptr(&mut data.Oneshot)
|
||||
}
|
||||
}
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum Oneshot<T> { pub send(T), }
|
||||
#[allow(non_camel_case_types)]
|
||||
pub struct __Buffer<T> {
|
||||
Oneshot: ::std::pipes::Packet<Oneshot<T>>,
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub mod client {
|
||||
|
||||
use std::kinds::Send;
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub fn try_send<T: Send>(pipe: Oneshot<T>, x_0: T) ->
|
||||
::std::option::Option<()> {
|
||||
{
|
||||
use super::send;
|
||||
let message = send(x_0);
|
||||
if ::std::pipes::send(pipe, message) {
|
||||
::std::pipes::rt::make_some(())
|
||||
} else { ::std::pipes::rt::make_none() }
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub fn send<T: Send>(pipe: Oneshot<T>, x_0: T) {
|
||||
{
|
||||
use super::send;
|
||||
let message = send(x_0);
|
||||
::std::pipes::send(pipe, message);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type Oneshot<T> =
|
||||
::std::pipes::SendPacketBuffered<super::Oneshot<T>,
|
||||
super::__Buffer<T>>;
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub mod server {
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type Oneshot<T> =
|
||||
::std::pipes::RecvPacketBuffered<super::Oneshot<T>,
|
||||
super::__Buffer<T>>;
|
||||
}
|
||||
}
|
||||
|
||||
/// The send end of a oneshot pipe.
|
||||
pub struct ChanOne<T> {
|
||||
contents: oneshot::client::Oneshot<T>
|
||||
}
|
||||
|
||||
impl<T> ChanOne<T> {
|
||||
pub fn new(contents: oneshot::client::Oneshot<T>) -> ChanOne<T> {
|
||||
ChanOne {
|
||||
contents: contents
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The receive end of a oneshot pipe.
|
||||
pub struct PortOne<T> {
|
||||
contents: oneshot::server::Oneshot<T>
|
||||
}
|
||||
|
||||
impl<T> PortOne<T> {
|
||||
pub fn new(contents: oneshot::server::Oneshot<T>) -> PortOne<T> {
|
||||
PortOne {
|
||||
contents: contents
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialiase a (send-endpoint, recv-endpoint) oneshot pipe pair.
|
||||
pub fn oneshot<T: Send>() -> (PortOne<T>, ChanOne<T>) {
|
||||
let (port, chan) = oneshot::init();
|
||||
(PortOne::new(port), ChanOne::new(chan))
|
||||
}
|
||||
|
||||
impl<T: Send> PortOne<T> {
|
||||
pub fn recv(self) -> T { recv_one(self) }
|
||||
pub fn try_recv(self) -> Option<T> { try_recv_one(self) }
|
||||
pub fn unwrap(self) -> oneshot::server::Oneshot<T> {
|
||||
match self {
|
||||
PortOne { contents: s } => s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> ChanOne<T> {
|
||||
pub fn send(self, data: T) { send_one(self, data) }
|
||||
pub fn try_send(self, data: T) -> bool { try_send_one(self, data) }
|
||||
pub fn unwrap(self) -> oneshot::client::Oneshot<T> {
|
||||
match self {
|
||||
ChanOne { contents: s } => s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Receive a message from a oneshot pipe, failing if the connection was
|
||||
* closed.
|
||||
*/
|
||||
pub fn recv_one<T: Send>(port: PortOne<T>) -> T {
|
||||
match port {
|
||||
PortOne { contents: port } => {
|
||||
let oneshot::send(message) = recv(port);
|
||||
message
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Receive a message from a oneshot pipe unless the connection was closed.
|
||||
pub fn try_recv_one<T: Send> (port: PortOne<T>) -> Option<T> {
|
||||
match port {
|
||||
PortOne { contents: port } => {
|
||||
let message = try_recv(port);
|
||||
|
||||
if message.is_none() {
|
||||
None
|
||||
} else {
|
||||
let oneshot::send(message) = message.unwrap();
|
||||
Some(message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a message on a oneshot pipe, failing if the connection was closed.
|
||||
pub fn send_one<T: Send>(chan: ChanOne<T>, data: T) {
|
||||
match chan {
|
||||
ChanOne { contents: chan } => oneshot::client::send(chan, data),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a message on a oneshot pipe, or return false if the connection was
|
||||
* closed.
|
||||
*/
|
||||
pub fn try_send_one<T: Send>(chan: ChanOne<T>, data: T) -> bool {
|
||||
match chan {
|
||||
ChanOne { contents: chan } => {
|
||||
oneshot::client::try_send(chan, data).is_some()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Streams - Make pipes a little easier in general.
|
||||
|
||||
/*proto! streamp (
|
||||
Open:send<T: Send> {
|
||||
data(T) -> Open<T>
|
||||
}
|
||||
)*/
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub mod streamp {
|
||||
use std::kinds::Send;
|
||||
|
||||
pub fn init<T: Send>() -> (server::Open<T>, client::Open<T>) {
|
||||
pub use std::pipes::HasBuffer;
|
||||
::std::pipes::entangle()
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum Open<T> { pub data(T, server::Open<T>), }
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub mod client {
|
||||
use std::kinds::Send;
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub fn try_data<T: Send>(pipe: Open<T>, x_0: T) ->
|
||||
::std::option::Option<Open<T>> {
|
||||
{
|
||||
use super::data;
|
||||
let (s, c) = ::std::pipes::entangle();
|
||||
let message = data(x_0, s);
|
||||
if ::std::pipes::send(pipe, message) {
|
||||
::std::pipes::rt::make_some(c)
|
||||
} else { ::std::pipes::rt::make_none() }
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub fn data<T: Send>(pipe: Open<T>, x_0: T) -> Open<T> {
|
||||
{
|
||||
use super::data;
|
||||
let (s, c) = ::std::pipes::entangle();
|
||||
let message = data(x_0, s);
|
||||
::std::pipes::send(pipe, message);
|
||||
c
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type Open<T> = ::std::pipes::SendPacket<super::Open<T>>;
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub mod server {
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type Open<T> = ::std::pipes::RecvPacket<super::Open<T>>;
|
||||
}
|
||||
}
|
||||
|
||||
/// An endpoint that can send many messages.
|
||||
#[unsafe_mut_field(endp)]
|
||||
pub struct Chan<T> {
|
||||
endp: Option<streamp::client::Open<T>>
|
||||
}
|
||||
|
||||
/// An endpoint that can receive many messages.
|
||||
#[unsafe_mut_field(endp)]
|
||||
pub struct Port<T> {
|
||||
endp: Option<streamp::server::Open<T>>,
|
||||
}
|
||||
|
||||
/** Creates a `(Port, Chan)` pair.
|
||||
|
||||
These allow sending or receiving an unlimited number of messages.
|
||||
|
||||
*/
|
||||
pub fn stream<T:Send>() -> (Port<T>, Chan<T>) {
|
||||
let (s, c) = streamp::init();
|
||||
|
||||
(Port {
|
||||
endp: Some(s)
|
||||
}, Chan {
|
||||
endp: Some(c)
|
||||
})
|
||||
}
|
||||
|
||||
impl<T: Send> GenericChan<T> for Chan<T> {
|
||||
#[inline]
|
||||
fn send(&self, x: T) {
|
||||
unsafe {
|
||||
let self_endp = transmute_mut(&self.endp);
|
||||
*self_endp = Some(streamp::client::data(self_endp.take_unwrap(), x))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericSmartChan<T> for Chan<T> {
|
||||
#[inline]
|
||||
fn try_send(&self, x: T) -> bool {
|
||||
unsafe {
|
||||
let self_endp = transmute_mut(&self.endp);
|
||||
match streamp::client::try_data(self_endp.take_unwrap(), x) {
|
||||
Some(next) => {
|
||||
*self_endp = Some(next);
|
||||
true
|
||||
}
|
||||
None => false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericPort<T> for Port<T> {
|
||||
#[inline]
|
||||
fn recv(&self) -> T {
|
||||
unsafe {
|
||||
let self_endp = transmute_mut(&self.endp);
|
||||
let endp = self_endp.take();
|
||||
let streamp::data(x, endp) = recv(endp.unwrap());
|
||||
*self_endp = Some(endp);
|
||||
x
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_recv(&self) -> Option<T> {
|
||||
unsafe {
|
||||
let self_endp = transmute_mut(&self.endp);
|
||||
let endp = self_endp.take();
|
||||
match try_recv(endp.unwrap()) {
|
||||
Some(streamp::data(x, endp)) => {
|
||||
*self_endp = Some(endp);
|
||||
Some(x)
|
||||
}
|
||||
None => None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Peekable<T> for Port<T> {
|
||||
#[inline]
|
||||
fn peek(&self) -> bool {
|
||||
unsafe {
|
||||
let self_endp = transmute_mut(&self.endp);
|
||||
let mut endp = self_endp.take();
|
||||
let peek = match endp {
|
||||
Some(ref mut endp) => peek(endp),
|
||||
None => fail!("peeking empty stream")
|
||||
};
|
||||
*self_endp = endp;
|
||||
peek
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use either::Right;
|
||||
use super::{Chan, Port, oneshot, stream};
|
||||
|
||||
#[test]
|
||||
fn test_oneshot() {
|
||||
let (p, c) = oneshot();
|
||||
|
||||
c.send(());
|
||||
|
||||
p.recv()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_peek_terminated() {
|
||||
let (port, chan): (Port<int>, Chan<int>) = stream();
|
||||
|
||||
{
|
||||
// Destroy the channel
|
||||
let _chan = chan;
|
||||
}
|
||||
|
||||
assert!(!port.peek());
|
||||
let &SharedChan { x: ref c } = self;
|
||||
SharedChan { x: c.clone() }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,18 +14,11 @@ use option::*;
|
|||
use os;
|
||||
use either::*;
|
||||
use rt;
|
||||
use rt::OldTaskContext;
|
||||
use rt::logging::{Logger, StdErrLogger};
|
||||
|
||||
/// Turns on logging to stdout globally
|
||||
pub fn console_on() {
|
||||
if rt::context() == OldTaskContext {
|
||||
unsafe {
|
||||
rustrt::rust_log_console_on();
|
||||
}
|
||||
} else {
|
||||
rt::logging::console_on();
|
||||
}
|
||||
rt::logging::console_on();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -41,45 +34,24 @@ pub fn console_off() {
|
|||
return;
|
||||
}
|
||||
|
||||
if rt::context() == OldTaskContext {
|
||||
unsafe {
|
||||
rustrt::rust_log_console_off();
|
||||
}
|
||||
} else {
|
||||
rt::logging::console_off();
|
||||
}
|
||||
rt::logging::console_off();
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[lang="log_type"]
|
||||
#[allow(missing_doc)]
|
||||
pub fn log_type<T>(level: u32, object: &T) {
|
||||
use cast;
|
||||
use container::Container;
|
||||
pub fn log_type<T>(_level: u32, object: &T) {
|
||||
use io;
|
||||
use libc;
|
||||
use repr;
|
||||
use rt;
|
||||
use str;
|
||||
use vec;
|
||||
|
||||
let bytes = do io::with_bytes_writer |writer| {
|
||||
repr::write_repr(writer, object);
|
||||
};
|
||||
|
||||
match rt::context() {
|
||||
rt::OldTaskContext => {
|
||||
unsafe {
|
||||
let len = bytes.len() as libc::size_t;
|
||||
rustrt::rust_log_str(level, cast::transmute(vec::raw::to_ptr(bytes)), len);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// XXX: Bad allocation
|
||||
let msg = str::from_bytes(bytes);
|
||||
newsched_log_str(msg);
|
||||
}
|
||||
}
|
||||
// XXX: Bad allocation
|
||||
let msg = str::from_bytes(bytes);
|
||||
newsched_log_str(msg);
|
||||
}
|
||||
|
||||
fn newsched_log_str(msg: ~str) {
|
||||
|
@ -100,15 +72,3 @@ fn newsched_log_str(msg: ~str) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod rustrt {
|
||||
use libc;
|
||||
|
||||
extern {
|
||||
pub fn rust_log_console_on();
|
||||
pub fn rust_log_console_off();
|
||||
pub fn rust_log_str(level: u32,
|
||||
string: *libc::c_char,
|
||||
size: libc::size_t);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,11 +61,8 @@ pub mod rustrt {
|
|||
use libc;
|
||||
|
||||
extern {
|
||||
pub fn rust_get_argc() -> c_int;
|
||||
pub fn rust_get_argv() -> **c_char;
|
||||
pub fn rust_path_is_dir(path: *libc::c_char) -> c_int;
|
||||
pub fn rust_path_exists(path: *libc::c_char) -> c_int;
|
||||
pub fn rust_set_exit_status(code: libc::intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1104,15 +1101,7 @@ pub fn last_os_error() -> ~str {
|
|||
*/
|
||||
pub fn set_exit_status(code: int) {
|
||||
use rt;
|
||||
use rt::OldTaskContext;
|
||||
|
||||
if rt::context() == OldTaskContext {
|
||||
unsafe {
|
||||
rustrt::rust_set_exit_status(code as libc::intptr_t);
|
||||
}
|
||||
} else {
|
||||
rt::util::set_exit_status(code);
|
||||
}
|
||||
rt::util::set_exit_status(code);
|
||||
}
|
||||
|
||||
unsafe fn load_argc_and_argv(argc: c_int, argv: **c_char) -> ~[~str] {
|
||||
|
@ -1142,19 +1131,10 @@ pub fn real_args() -> ~[~str] {
|
|||
#[cfg(target_os = "freebsd")]
|
||||
pub fn real_args() -> ~[~str] {
|
||||
use rt;
|
||||
use rt::NewRtContext;
|
||||
|
||||
if rt::context() == NewRtContext {
|
||||
match rt::args::clone() {
|
||||
Some(args) => args,
|
||||
None => fail!("process arguments not initialized")
|
||||
}
|
||||
} else {
|
||||
unsafe {
|
||||
let argc = rustrt::rust_get_argc();
|
||||
let argv = rustrt::rust_get_argv();
|
||||
load_argc_and_argv(argc, argv)
|
||||
}
|
||||
match rt::args::clone() {
|
||||
Some(args) => args,
|
||||
None => fail!("process arguments not initialized")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,870 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/*! Runtime support for message passing with protocol enforcement.
|
||||
|
||||
|
||||
Pipes consist of two endpoints. One endpoint can send messages and
|
||||
the other can receive messages. The set of legal messages and which
|
||||
directions they can flow at any given point are determined by a
|
||||
protocol. Below is an example protocol.
|
||||
|
||||
~~~ {.rust}
|
||||
proto! pingpong (
|
||||
ping: send {
|
||||
ping -> pong
|
||||
}
|
||||
pong: recv {
|
||||
pong -> ping
|
||||
}
|
||||
)
|
||||
~~~
|
||||
|
||||
The `proto!` syntax extension will convert this into a module called
|
||||
`pingpong`, which includes a set of types and functions that can be
|
||||
used to write programs that follow the pingpong protocol.
|
||||
|
||||
*/
|
||||
|
||||
/* IMPLEMENTATION NOTES
|
||||
|
||||
The initial design for this feature is available at:
|
||||
|
||||
https://github.com/eholk/rust/wiki/Proposal-for-channel-contracts
|
||||
|
||||
Much of the design in that document is still accurate. There are
|
||||
several components for the pipe implementation. First of all is the
|
||||
syntax extension. To see how that works, it is best see comments in
|
||||
libsyntax/ext/pipes.rs.
|
||||
|
||||
This module includes two related pieces of the runtime
|
||||
implementation: support for unbounded and bounded
|
||||
protocols. The main difference between the two is the type of the
|
||||
buffer that is carried along in the endpoint data structures.
|
||||
|
||||
|
||||
The heart of the implementation is the packet type. It contains a
|
||||
header and a payload field. Much of the code in this module deals with
|
||||
the header field. This is where the synchronization information is
|
||||
stored. In the case of a bounded protocol, the header also includes a
|
||||
pointer to the buffer the packet is contained in.
|
||||
|
||||
Packets represent a single message in a protocol. The payload field
|
||||
gets instatiated at the type of the message, which is usually an enum
|
||||
generated by the pipe compiler. Packets are conceptually single use,
|
||||
although in bounded protocols they are reused each time around the
|
||||
loop.
|
||||
|
||||
|
||||
Packets are usually handled through a send_packet_buffered or
|
||||
recv_packet_buffered object. Each packet is referenced by one
|
||||
send_packet and one recv_packet, and these wrappers enforce that only
|
||||
one end can send and only one end can receive. The structs also
|
||||
include a destructor that marks packets are terminated if the sender
|
||||
or receiver destroys the object before sending or receiving a value.
|
||||
|
||||
The *_packet_buffered structs take two type parameters. The first is
|
||||
the message type for the current packet (or state). The second
|
||||
represents the type of the whole buffer. For bounded protocols, the
|
||||
protocol compiler generates a struct with a field for each protocol
|
||||
state. This generated struct is used as the buffer type parameter. For
|
||||
unbounded protocols, the buffer is simply one packet, so there is a
|
||||
shorthand struct called send_packet and recv_packet, where the buffer
|
||||
type is just `packet<T>`. Using the same underlying structure for both
|
||||
bounded and unbounded protocols allows for less code duplication.
|
||||
|
||||
*/
|
||||
|
||||
#[allow(missing_doc)];
|
||||
|
||||
use container::Container;
|
||||
use cast::{forget, transmute, transmute_copy, transmute_mut};
|
||||
use either::{Either, Left, Right};
|
||||
use iterator::{Iterator, IteratorUtil};
|
||||
use kinds::Send;
|
||||
use libc;
|
||||
use ops::Drop;
|
||||
use option::{None, Option, Some};
|
||||
use unstable::finally::Finally;
|
||||
use unstable::intrinsics;
|
||||
use ptr;
|
||||
use ptr::RawPtr;
|
||||
use task;
|
||||
use vec::{OwnedVector, MutableVector};
|
||||
use util::replace;
|
||||
|
||||
static SPIN_COUNT: uint = 0;
|
||||
|
||||
#[deriving(Eq)]
|
||||
enum State {
|
||||
Empty,
|
||||
Full,
|
||||
Blocked,
|
||||
Terminated
|
||||
}
|
||||
|
||||
pub struct BufferHeader {
|
||||
// Tracks whether this buffer needs to be freed. We can probably
|
||||
// get away with restricting it to 0 or 1, if we're careful.
|
||||
ref_count: int,
|
||||
|
||||
// We may want a drop, and to be careful about stringing this
|
||||
// thing along.
|
||||
}
|
||||
|
||||
pub fn BufferHeader() -> BufferHeader {
|
||||
BufferHeader {
|
||||
ref_count: 0
|
||||
}
|
||||
}
|
||||
|
||||
// This is for protocols to associate extra data to thread around.
|
||||
pub struct Buffer<T> {
|
||||
header: BufferHeader,
|
||||
data: T,
|
||||
}
|
||||
|
||||
pub struct PacketHeader {
|
||||
state: State,
|
||||
blocked_task: *rust_task,
|
||||
|
||||
// This is a transmute_copy of a ~buffer, that can also be cast
|
||||
// to a buffer_header if need be.
|
||||
buffer: *libc::c_void,
|
||||
}
|
||||
|
||||
pub fn PacketHeader() -> PacketHeader {
|
||||
PacketHeader {
|
||||
state: Empty,
|
||||
blocked_task: ptr::null(),
|
||||
buffer: ptr::null()
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketHeader {
|
||||
// Returns the old state.
|
||||
pub unsafe fn mark_blocked(&mut self, this: *rust_task) -> State {
|
||||
rustrt::rust_task_ref(this);
|
||||
let old_task = swap_task(&mut self.blocked_task, this);
|
||||
assert!(old_task.is_null());
|
||||
swap_state_acq(&mut self.state, Blocked)
|
||||
}
|
||||
|
||||
pub unsafe fn unblock(&mut self) {
|
||||
let old_task = swap_task(&mut self.blocked_task, ptr::null());
|
||||
if !old_task.is_null() {
|
||||
rustrt::rust_task_deref(old_task)
|
||||
}
|
||||
match swap_state_acq(&mut self.state, Empty) {
|
||||
Empty | Blocked => (),
|
||||
Terminated => self.state = Terminated,
|
||||
Full => self.state = Full
|
||||
}
|
||||
}
|
||||
|
||||
// unsafe because this can do weird things to the space/time
|
||||
// continuum. It ends making multiple unique pointers to the same
|
||||
// thing. You'll probably want to forget them when you're done.
|
||||
pub unsafe fn buf_header(&mut self) -> ~BufferHeader {
|
||||
assert!(self.buffer.is_not_null());
|
||||
transmute_copy(&self.buffer)
|
||||
}
|
||||
|
||||
pub fn set_buffer<T:Send>(&mut self, b: ~Buffer<T>) {
|
||||
unsafe {
|
||||
self.buffer = transmute_copy(&b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Packet<T> {
|
||||
header: PacketHeader,
|
||||
payload: Option<T>,
|
||||
}
|
||||
|
||||
pub trait HasBuffer {
|
||||
fn set_buffer(&mut self, b: *libc::c_void);
|
||||
}
|
||||
|
||||
impl<T:Send> HasBuffer for Packet<T> {
|
||||
fn set_buffer(&mut self, b: *libc::c_void) {
|
||||
self.header.buffer = b;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mk_packet<T:Send>() -> Packet<T> {
|
||||
Packet {
|
||||
header: PacketHeader(),
|
||||
payload: None,
|
||||
}
|
||||
}
|
||||
fn unibuffer<T>() -> ~Buffer<Packet<T>> {
|
||||
let mut b = ~Buffer {
|
||||
header: BufferHeader(),
|
||||
data: Packet {
|
||||
header: PacketHeader(),
|
||||
payload: None,
|
||||
}
|
||||
};
|
||||
|
||||
unsafe {
|
||||
b.data.header.buffer = transmute_copy(&b);
|
||||
}
|
||||
b
|
||||
}
|
||||
|
||||
pub fn packet<T>() -> *mut Packet<T> {
|
||||
let mut b = unibuffer();
|
||||
let p = ptr::to_mut_unsafe_ptr(&mut b.data);
|
||||
// We'll take over memory management from here.
|
||||
unsafe {
|
||||
forget(b);
|
||||
}
|
||||
p
|
||||
}
|
||||
|
||||
pub fn entangle_buffer<T:Send,Tstart:Send>(
|
||||
mut buffer: ~Buffer<T>,
|
||||
init: &fn(*libc::c_void, x: &mut T) -> *mut Packet<Tstart>)
|
||||
-> (RecvPacketBuffered<Tstart, T>, SendPacketBuffered<Tstart, T>) {
|
||||
unsafe {
|
||||
let p = init(transmute_copy(&buffer), &mut buffer.data);
|
||||
forget(buffer);
|
||||
(RecvPacketBuffered(p), SendPacketBuffered(p))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn swap_task(dst: &mut *rust_task, src: *rust_task) -> *rust_task {
|
||||
// It might be worth making both acquire and release versions of
|
||||
// this.
|
||||
unsafe {
|
||||
transmute(intrinsics::atomic_xchg(transmute(dst), src as int))
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type rust_task = libc::c_void;
|
||||
|
||||
pub mod rustrt {
|
||||
use libc;
|
||||
use super::rust_task;
|
||||
|
||||
extern {
|
||||
#[rust_stack]
|
||||
pub fn rust_get_task() -> *rust_task;
|
||||
#[rust_stack]
|
||||
pub fn rust_task_ref(task: *rust_task);
|
||||
pub fn rust_task_deref(task: *rust_task);
|
||||
|
||||
#[rust_stack]
|
||||
pub fn task_clear_event_reject(task: *rust_task);
|
||||
|
||||
pub fn task_wait_event(this: *rust_task, killed: &mut *libc::c_void)
|
||||
-> bool;
|
||||
pub fn task_signal_event(target: *rust_task, event: *libc::c_void);
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_event(this: *rust_task) -> *libc::c_void {
|
||||
unsafe {
|
||||
let mut event = ptr::null();
|
||||
|
||||
let killed = rustrt::task_wait_event(this, &mut event);
|
||||
if killed && !task::failing() {
|
||||
fail!("killed")
|
||||
}
|
||||
event
|
||||
}
|
||||
}
|
||||
|
||||
fn swap_state_acq(dst: &mut State, src: State) -> State {
|
||||
unsafe {
|
||||
transmute(intrinsics::atomic_xchg_acq(transmute(dst), src as int))
|
||||
}
|
||||
}
|
||||
|
||||
fn swap_state_rel(dst: &mut State, src: State) -> State {
|
||||
unsafe {
|
||||
transmute(intrinsics::atomic_xchg_rel(transmute(dst), src as int))
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn get_buffer<T>(p: *mut PacketHeader) -> ~Buffer<T> {
|
||||
transmute((*p).buf_header())
|
||||
}
|
||||
|
||||
// This could probably be done with SharedMutableState to avoid move_it!().
|
||||
struct BufferResource<T> {
|
||||
buffer: ~Buffer<T>,
|
||||
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T> Drop for BufferResource<T> {
|
||||
fn drop(&self) {
|
||||
unsafe {
|
||||
// FIXME(#4330) Need self by value to get mutability.
|
||||
let this: &mut BufferResource<T> = transmute_mut(self);
|
||||
|
||||
let null_buffer: ~Buffer<T> = transmute(ptr::null::<Buffer<T>>());
|
||||
let mut b = replace(&mut this.buffer, null_buffer);
|
||||
|
||||
//let p = ptr::to_unsafe_ptr(*b);
|
||||
//error!("drop %?", p);
|
||||
let old_count = intrinsics::atomic_xsub_rel(
|
||||
&mut b.header.ref_count,
|
||||
1);
|
||||
//let old_count = atomic_xchng_rel(b.header.ref_count, 0);
|
||||
if old_count == 1 {
|
||||
// The new count is 0.
|
||||
|
||||
// go go gadget drop glue
|
||||
}
|
||||
else {
|
||||
forget(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn BufferResource<T>(mut b: ~Buffer<T>) -> BufferResource<T> {
|
||||
//let p = ptr::to_unsafe_ptr(*b);
|
||||
//error!("take %?", p);
|
||||
unsafe {
|
||||
intrinsics::atomic_xadd_acq(&mut b.header.ref_count, 1);
|
||||
}
|
||||
|
||||
BufferResource {
|
||||
// tjc: ????
|
||||
buffer: b
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send<T,Tbuffer>(mut p: SendPacketBuffered<T,Tbuffer>,
|
||||
payload: T)
|
||||
-> bool {
|
||||
let header = p.header();
|
||||
let p_ = p.unwrap();
|
||||
let p = unsafe { &mut *p_ };
|
||||
assert_eq!(ptr::to_unsafe_ptr(&(p.header)), header);
|
||||
assert!(p.payload.is_none());
|
||||
p.payload = Some(payload);
|
||||
let old_state = swap_state_rel(&mut p.header.state, Full);
|
||||
match old_state {
|
||||
Empty => {
|
||||
// Yay, fastpath.
|
||||
|
||||
// The receiver will eventually clean this up.
|
||||
//unsafe { forget(p); }
|
||||
return true;
|
||||
}
|
||||
Full => fail!("duplicate send"),
|
||||
Blocked => {
|
||||
debug!("waking up task for %?", p_);
|
||||
let old_task = swap_task(&mut p.header.blocked_task, ptr::null());
|
||||
if !old_task.is_null() {
|
||||
unsafe {
|
||||
rustrt::task_signal_event(
|
||||
old_task,
|
||||
ptr::to_unsafe_ptr(&(p.header)) as *libc::c_void);
|
||||
rustrt::rust_task_deref(old_task);
|
||||
}
|
||||
}
|
||||
|
||||
// The receiver will eventually clean this up.
|
||||
//unsafe { forget(p); }
|
||||
return true;
|
||||
}
|
||||
Terminated => {
|
||||
// The receiver will never receive this. Rely on drop_glue
|
||||
// to clean everything up.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Receives a message from a pipe.
|
||||
|
||||
Fails if the sender closes the connection.
|
||||
|
||||
*/
|
||||
pub fn recv<T:Send,Tbuffer:Send>(
|
||||
p: RecvPacketBuffered<T, Tbuffer>) -> T {
|
||||
try_recv(p).expect("connection closed")
|
||||
}
|
||||
|
||||
/** Attempts to receive a message from a pipe.
|
||||
|
||||
Returns `None` if the sender has closed the connection without sending
|
||||
a message, or `Some(T)` if a message was received.
|
||||
|
||||
*/
|
||||
pub fn try_recv<T:Send,Tbuffer:Send>(mut p: RecvPacketBuffered<T, Tbuffer>)
|
||||
-> Option<T> {
|
||||
let p_ = p.unwrap();
|
||||
let p = unsafe { &mut *p_ };
|
||||
|
||||
do (|| {
|
||||
try_recv_(p)
|
||||
}).finally {
|
||||
unsafe {
|
||||
if task::failing() {
|
||||
p.header.state = Terminated;
|
||||
let old_task = swap_task(&mut p.header.blocked_task, ptr::null());
|
||||
if !old_task.is_null() {
|
||||
rustrt::rust_task_deref(old_task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_recv_<T:Send>(p: &mut Packet<T>) -> Option<T> {
|
||||
// optimistic path
|
||||
match p.header.state {
|
||||
Full => {
|
||||
let payload = p.payload.take();
|
||||
p.header.state = Empty;
|
||||
return Some(payload.unwrap())
|
||||
},
|
||||
Terminated => return None,
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// regular path
|
||||
let this = unsafe { rustrt::rust_get_task() };
|
||||
unsafe {
|
||||
rustrt::task_clear_event_reject(this);
|
||||
rustrt::rust_task_ref(this);
|
||||
};
|
||||
debug!("blocked = %x this = %x", p.header.blocked_task as uint,
|
||||
this as uint);
|
||||
let old_task = swap_task(&mut p.header.blocked_task, this);
|
||||
debug!("blocked = %x this = %x old_task = %x",
|
||||
p.header.blocked_task as uint,
|
||||
this as uint, old_task as uint);
|
||||
assert!(old_task.is_null());
|
||||
let mut first = true;
|
||||
let mut count = SPIN_COUNT;
|
||||
loop {
|
||||
unsafe {
|
||||
rustrt::task_clear_event_reject(this);
|
||||
}
|
||||
|
||||
let old_state = swap_state_acq(&mut p.header.state,
|
||||
Blocked);
|
||||
match old_state {
|
||||
Empty => {
|
||||
debug!("no data available on %?, going to sleep.", p);
|
||||
if count == 0 {
|
||||
wait_event(this);
|
||||
}
|
||||
else {
|
||||
count -= 1;
|
||||
// FIXME (#524): Putting the yield here destroys a lot
|
||||
// of the benefit of spinning, since we still go into
|
||||
// the scheduler at every iteration. However, without
|
||||
// this everything spins too much because we end up
|
||||
// sometimes blocking the thing we are waiting on.
|
||||
task::yield();
|
||||
}
|
||||
debug!("woke up, p.state = %?", p.header.state);
|
||||
}
|
||||
Blocked => if first {
|
||||
fail!("blocking on already blocked packet")
|
||||
},
|
||||
Full => {
|
||||
let payload = p.payload.take();
|
||||
let old_task = swap_task(&mut p.header.blocked_task, ptr::null());
|
||||
if !old_task.is_null() {
|
||||
unsafe {
|
||||
rustrt::rust_task_deref(old_task);
|
||||
}
|
||||
}
|
||||
p.header.state = Empty;
|
||||
return Some(payload.unwrap())
|
||||
}
|
||||
Terminated => {
|
||||
// This assert detects when we've accidentally unsafely
|
||||
// casted too big of a number to a state.
|
||||
assert_eq!(old_state, Terminated);
|
||||
|
||||
let old_task = swap_task(&mut p.header.blocked_task, ptr::null());
|
||||
if !old_task.is_null() {
|
||||
unsafe {
|
||||
rustrt::rust_task_deref(old_task);
|
||||
}
|
||||
}
|
||||
return None;
|
||||
}
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if messages are available.
|
||||
pub fn peek<T:Send,Tb:Send>(p: &mut RecvPacketBuffered<T, Tb>) -> bool {
|
||||
unsafe {
|
||||
match (*p.header()).state {
|
||||
Empty | Terminated => false,
|
||||
Blocked => fail!("peeking on blocked packet"),
|
||||
Full => true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn sender_terminate<T:Send>(p: *mut Packet<T>) {
|
||||
let p = unsafe {
|
||||
&mut *p
|
||||
};
|
||||
match swap_state_rel(&mut p.header.state, Terminated) {
|
||||
Empty => {
|
||||
// The receiver will eventually clean up.
|
||||
}
|
||||
Blocked => {
|
||||
// wake up the target
|
||||
let old_task = swap_task(&mut p.header.blocked_task, ptr::null());
|
||||
if !old_task.is_null() {
|
||||
unsafe {
|
||||
rustrt::task_signal_event(
|
||||
old_task,
|
||||
ptr::to_unsafe_ptr(&(p.header)) as *libc::c_void);
|
||||
rustrt::rust_task_deref(old_task);
|
||||
}
|
||||
}
|
||||
// The receiver will eventually clean up.
|
||||
}
|
||||
Full => {
|
||||
// This is impossible
|
||||
fail!("you dun goofed")
|
||||
}
|
||||
Terminated => {
|
||||
assert!(p.header.blocked_task.is_null());
|
||||
// I have to clean up, use drop_glue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn receiver_terminate<T:Send>(p: *mut Packet<T>) {
|
||||
let p = unsafe {
|
||||
&mut *p
|
||||
};
|
||||
match swap_state_rel(&mut p.header.state, Terminated) {
|
||||
Empty => {
|
||||
assert!(p.header.blocked_task.is_null());
|
||||
// the sender will clean up
|
||||
}
|
||||
Blocked => {
|
||||
let old_task = swap_task(&mut p.header.blocked_task, ptr::null());
|
||||
if !old_task.is_null() {
|
||||
unsafe {
|
||||
rustrt::rust_task_deref(old_task);
|
||||
assert_eq!(old_task, rustrt::rust_get_task());
|
||||
}
|
||||
}
|
||||
}
|
||||
Terminated | Full => {
|
||||
assert!(p.header.blocked_task.is_null());
|
||||
// I have to clean up, use drop_glue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns when one of the packet headers reports data is available.
|
||||
|
||||
This function is primarily intended for building higher level waiting
|
||||
functions, such as `select`, `select2`, etc.
|
||||
|
||||
It takes a vector slice of packet_headers and returns an index into
|
||||
that vector. The index points to an endpoint that has either been
|
||||
closed by the sender or has a message waiting to be received.
|
||||
|
||||
*/
|
||||
pub fn wait_many<T: Selectable>(pkts: &mut [T]) -> uint {
|
||||
let this = unsafe {
|
||||
rustrt::rust_get_task()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
rustrt::task_clear_event_reject(this);
|
||||
}
|
||||
|
||||
let mut data_avail = false;
|
||||
let mut ready_packet = pkts.len();
|
||||
for (i, p) in pkts.mut_iter().enumerate() {
|
||||
unsafe {
|
||||
let p = &mut *p.header();
|
||||
let old = p.mark_blocked(this);
|
||||
match old {
|
||||
Full | Terminated => {
|
||||
data_avail = true;
|
||||
ready_packet = i;
|
||||
(*p).state = old;
|
||||
break;
|
||||
}
|
||||
Blocked => fail!("blocking on blocked packet"),
|
||||
Empty => ()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while !data_avail {
|
||||
debug!("sleeping on %? packets", pkts.len());
|
||||
let event = wait_event(this) as *PacketHeader;
|
||||
|
||||
let mut pos = None;
|
||||
for (i, p) in pkts.mut_iter().enumerate() {
|
||||
if p.header() == event {
|
||||
pos = Some(i);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
match pos {
|
||||
Some(i) => {
|
||||
ready_packet = i;
|
||||
data_avail = true;
|
||||
}
|
||||
None => debug!("ignoring spurious event, %?", event)
|
||||
}
|
||||
}
|
||||
|
||||
debug!("%?", &mut pkts[ready_packet]);
|
||||
|
||||
for p in pkts.mut_iter() {
|
||||
unsafe {
|
||||
(*p.header()).unblock()
|
||||
}
|
||||
}
|
||||
|
||||
debug!("%?, %?", ready_packet, &mut pkts[ready_packet]);
|
||||
|
||||
unsafe {
|
||||
assert!((*pkts[ready_packet].header()).state == Full
|
||||
|| (*pkts[ready_packet].header()).state == Terminated);
|
||||
}
|
||||
|
||||
ready_packet
|
||||
}
|
||||
|
||||
/** The sending end of a pipe. It can be used to send exactly one
|
||||
message.
|
||||
|
||||
*/
|
||||
pub type SendPacket<T> = SendPacketBuffered<T, Packet<T>>;
|
||||
|
||||
pub fn SendPacket<T>(p: *mut Packet<T>) -> SendPacket<T> {
|
||||
SendPacketBuffered(p)
|
||||
}
|
||||
|
||||
pub struct SendPacketBuffered<T, Tbuffer> {
|
||||
p: Option<*mut Packet<T>>,
|
||||
buffer: Option<BufferResource<Tbuffer>>,
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T:Send,Tbuffer:Send> Drop for SendPacketBuffered<T,Tbuffer> {
|
||||
fn drop(&self) {
|
||||
unsafe {
|
||||
let this: &mut SendPacketBuffered<T,Tbuffer> = transmute(self);
|
||||
if this.p != None {
|
||||
sender_terminate(this.p.take_unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn SendPacketBuffered<T,Tbuffer>(p: *mut Packet<T>)
|
||||
-> SendPacketBuffered<T,Tbuffer> {
|
||||
SendPacketBuffered {
|
||||
p: Some(p),
|
||||
buffer: unsafe {
|
||||
Some(BufferResource(get_buffer(&mut (*p).header)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T,Tbuffer> SendPacketBuffered<T,Tbuffer> {
|
||||
pub fn unwrap(&mut self) -> *mut Packet<T> {
|
||||
self.p.take_unwrap()
|
||||
}
|
||||
|
||||
pub fn header(&mut self) -> *mut PacketHeader {
|
||||
match self.p {
|
||||
Some(packet) => unsafe {
|
||||
let packet = &mut *packet;
|
||||
let header = ptr::to_mut_unsafe_ptr(&mut packet.header);
|
||||
header
|
||||
},
|
||||
None => fail!("packet already consumed")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reuse_buffer(&mut self) -> BufferResource<Tbuffer> {
|
||||
//error!("send reuse_buffer");
|
||||
self.buffer.take_unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the receive end of a pipe. It can receive exactly one
|
||||
/// message.
|
||||
pub type RecvPacket<T> = RecvPacketBuffered<T, Packet<T>>;
|
||||
|
||||
pub fn RecvPacket<T>(p: *mut Packet<T>) -> RecvPacket<T> {
|
||||
RecvPacketBuffered(p)
|
||||
}
|
||||
|
||||
pub struct RecvPacketBuffered<T, Tbuffer> {
|
||||
p: Option<*mut Packet<T>>,
|
||||
buffer: Option<BufferResource<Tbuffer>>,
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T:Send,Tbuffer:Send> Drop for RecvPacketBuffered<T,Tbuffer> {
|
||||
fn drop(&self) {
|
||||
unsafe {
|
||||
let this: &mut RecvPacketBuffered<T,Tbuffer> = transmute(self);
|
||||
if this.p != None {
|
||||
receiver_terminate(this.p.take_unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T:Send,Tbuffer:Send> RecvPacketBuffered<T, Tbuffer> {
|
||||
pub fn unwrap(&mut self) -> *mut Packet<T> {
|
||||
self.p.take_unwrap()
|
||||
}
|
||||
|
||||
pub fn reuse_buffer(&mut self) -> BufferResource<Tbuffer> {
|
||||
self.buffer.take_unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T:Send,Tbuffer:Send> Selectable for RecvPacketBuffered<T, Tbuffer> {
|
||||
fn header(&mut self) -> *mut PacketHeader {
|
||||
match self.p {
|
||||
Some(packet) => unsafe {
|
||||
let packet = &mut *packet;
|
||||
let header = ptr::to_mut_unsafe_ptr(&mut packet.header);
|
||||
header
|
||||
},
|
||||
None => fail!("packet already consumed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn RecvPacketBuffered<T,Tbuffer>(p: *mut Packet<T>)
|
||||
-> RecvPacketBuffered<T,Tbuffer> {
|
||||
RecvPacketBuffered {
|
||||
p: Some(p),
|
||||
buffer: unsafe {
|
||||
Some(BufferResource(get_buffer(&mut (*p).header)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn entangle<T>() -> (RecvPacket<T>, SendPacket<T>) {
|
||||
let p = packet();
|
||||
(RecvPacket(p), SendPacket(p))
|
||||
}
|
||||
|
||||
/** Receives a message from one of two endpoints.
|
||||
|
||||
The return value is `left` if the first endpoint received something,
|
||||
or `right` if the second endpoint receives something. In each case,
|
||||
the result includes the other endpoint as well so it can be used
|
||||
again. Below is an example of using `select2`.
|
||||
|
||||
~~~ {.rust}
|
||||
match select2(a, b) {
|
||||
left((none, b)) {
|
||||
// endpoint a was closed.
|
||||
}
|
||||
right((a, none)) {
|
||||
// endpoint b was closed.
|
||||
}
|
||||
left((Some(_), b)) {
|
||||
// endpoint a received a message
|
||||
}
|
||||
right(a, Some(_)) {
|
||||
// endpoint b received a message.
|
||||
}
|
||||
}
|
||||
~~~
|
||||
|
||||
Sometimes messages will be available on both endpoints at once. In
|
||||
this case, `select2` may return either `left` or `right`.
|
||||
|
||||
*/
|
||||
pub fn select2<A:Send,Ab:Send,B:Send,Bb:Send>(
|
||||
mut a: RecvPacketBuffered<A, Ab>,
|
||||
mut b: RecvPacketBuffered<B, Bb>)
|
||||
-> Either<(Option<A>, RecvPacketBuffered<B, Bb>),
|
||||
(RecvPacketBuffered<A, Ab>, Option<B>)> {
|
||||
let mut endpoints = [ a.header(), b.header() ];
|
||||
let i = wait_many(endpoints);
|
||||
match i {
|
||||
0 => Left((try_recv(a), b)),
|
||||
1 => Right((a, try_recv(b))),
|
||||
_ => fail!("select2 return an invalid packet")
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Selectable {
|
||||
fn header(&mut self) -> *mut PacketHeader;
|
||||
}
|
||||
|
||||
impl Selectable for *mut PacketHeader {
|
||||
fn header(&mut self) -> *mut PacketHeader { *self }
|
||||
}
|
||||
|
||||
/// Returns the index of an endpoint that is ready to receive.
|
||||
pub fn selecti<T:Selectable>(endpoints: &mut [T]) -> uint {
|
||||
wait_many(endpoints)
|
||||
}
|
||||
|
||||
/// Returns 0 or 1 depending on which endpoint is ready to receive
|
||||
pub fn select2i<A:Selectable,B:Selectable>(a: &mut A, b: &mut B)
|
||||
-> Either<(), ()> {
|
||||
let mut endpoints = [ a.header(), b.header() ];
|
||||
match wait_many(endpoints) {
|
||||
0 => Left(()),
|
||||
1 => Right(()),
|
||||
_ => fail!("wait returned unexpected index")
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits on a set of endpoints. Returns a message, its index, and a
|
||||
/// list of the remaining endpoints.
|
||||
pub fn select<T:Send,Tb:Send>(mut endpoints: ~[RecvPacketBuffered<T, Tb>])
|
||||
-> (uint,
|
||||
Option<T>,
|
||||
~[RecvPacketBuffered<T, Tb>]) {
|
||||
let mut endpoint_headers = ~[];
|
||||
for endpoint in endpoints.mut_iter() {
|
||||
endpoint_headers.push(endpoint.header());
|
||||
}
|
||||
|
||||
let ready = wait_many(endpoint_headers);
|
||||
let mut remaining = endpoints;
|
||||
let port = remaining.swap_remove(ready);
|
||||
let result = try_recv(port);
|
||||
(ready, result, remaining)
|
||||
}
|
||||
|
||||
pub mod rt {
|
||||
use option::{None, Option, Some};
|
||||
|
||||
// These are used to hide the option constructors from the
|
||||
// compiler because their names are changing
|
||||
pub fn make_some<T>(val: T) -> Option<T> { Some(val) }
|
||||
pub fn make_none<T>() -> Option<T> { None }
|
||||
}
|
|
@ -9,7 +9,7 @@
|
|||
// except according to those terms.
|
||||
|
||||
use cast::transmute;
|
||||
use libc::{c_char, c_void, size_t, STDERR_FILENO};
|
||||
use libc::{c_char, size_t, STDERR_FILENO};
|
||||
use io;
|
||||
use io::{Writer, WriterUtil};
|
||||
use option::{Option, None, Some};
|
||||
|
@ -20,9 +20,6 @@ use sys;
|
|||
use unstable::raw;
|
||||
use vec::ImmutableVector;
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
type rust_task = c_void;
|
||||
|
||||
pub static FROZEN_BIT: uint = 1 << (uint::bits - 1);
|
||||
pub static MUT_BIT: uint = 1 << (uint::bits - 2);
|
||||
static ALL_BITS: uint = FROZEN_BIT | MUT_BIT;
|
||||
|
@ -35,34 +32,12 @@ struct BorrowRecord {
|
|||
}
|
||||
|
||||
fn try_take_task_borrow_list() -> Option<~[BorrowRecord]> {
|
||||
unsafe {
|
||||
let cur_task: *rust_task = rust_try_get_task();
|
||||
if cur_task.is_not_null() {
|
||||
let ptr = rust_take_task_borrow_list(cur_task);
|
||||
if ptr.is_null() {
|
||||
None
|
||||
} else {
|
||||
let v: ~[BorrowRecord] = transmute(ptr);
|
||||
Some(v)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
// XXX
|
||||
None
|
||||
}
|
||||
|
||||
fn swap_task_borrow_list(f: &fn(~[BorrowRecord]) -> ~[BorrowRecord]) {
|
||||
unsafe {
|
||||
let cur_task: *rust_task = rust_try_get_task();
|
||||
if cur_task.is_not_null() {
|
||||
let mut borrow_list: ~[BorrowRecord] = {
|
||||
let ptr = rust_take_task_borrow_list(cur_task);
|
||||
if ptr.is_null() { ~[] } else { transmute(ptr) }
|
||||
};
|
||||
borrow_list = f(borrow_list);
|
||||
rust_set_task_borrow_list(cur_task, transmute(borrow_list));
|
||||
}
|
||||
}
|
||||
fn swap_task_borrow_list(_f: &fn(~[BorrowRecord]) -> ~[BorrowRecord]) {
|
||||
// XXX
|
||||
}
|
||||
|
||||
pub unsafe fn clear_task_borrow_list() {
|
||||
|
@ -113,7 +88,8 @@ unsafe fn debug_borrow<T>(tag: &'static str,
|
|||
//! A useful debugging function that prints a pointer + tag + newline
|
||||
//! without allocating memory.
|
||||
|
||||
if ENABLE_DEBUG && ::rt::env::get().debug_borrow {
|
||||
// XXX
|
||||
if false {
|
||||
debug_borrow_slow(tag, p, old_bits, new_bits, filename, line);
|
||||
}
|
||||
|
||||
|
@ -269,15 +245,3 @@ pub unsafe fn check_not_borrowed(a: *u8,
|
|||
fail_borrowed(a, file, line);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
extern {
|
||||
#[rust_stack]
|
||||
pub fn rust_take_task_borrow_list(task: *rust_task) -> *c_void;
|
||||
|
||||
#[rust_stack]
|
||||
pub fn rust_set_task_borrow_list(task: *rust_task, map: *c_void);
|
||||
|
||||
#[rust_stack]
|
||||
pub fn rust_try_get_task() -> *rust_task;
|
||||
}
|
||||
|
|
|
@ -11,50 +11,9 @@
|
|||
//! Runtime environment settings
|
||||
|
||||
use from_str::FromStr;
|
||||
use libc::{size_t, c_char, c_int};
|
||||
use option::{Some, None};
|
||||
use os;
|
||||
|
||||
// OLD RT stuff
|
||||
|
||||
pub struct Environment {
|
||||
/// The number of threads to use by default
|
||||
num_sched_threads: size_t,
|
||||
/// The minimum size of a stack segment
|
||||
min_stack_size: size_t,
|
||||
/// The maximum amount of total stack per task before aborting
|
||||
max_stack_size: size_t,
|
||||
/// The default logging configuration
|
||||
logspec: *c_char,
|
||||
/// Record and report detailed information about memory leaks
|
||||
detailed_leaks: bool,
|
||||
/// Seed the random number generator
|
||||
rust_seed: *c_char,
|
||||
/// Poison allocations on free
|
||||
poison_on_free: bool,
|
||||
/// The argc value passed to main
|
||||
argc: c_int,
|
||||
/// The argv value passed to main
|
||||
argv: **c_char,
|
||||
/// Print GC debugging info (true if env var RUST_DEBUG_MEM is set)
|
||||
debug_mem: bool,
|
||||
/// Print GC debugging info (true if env var RUST_DEBUG_BORROW is set)
|
||||
debug_borrow: bool,
|
||||
}
|
||||
|
||||
/// Get the global environment settings
|
||||
/// # Safety Note
|
||||
/// This will abort the process if run outside of task context
|
||||
pub fn get() -> &Environment {
|
||||
unsafe { rust_get_rt_env() }
|
||||
}
|
||||
|
||||
extern {
|
||||
fn rust_get_rt_env() -> &Environment;
|
||||
}
|
||||
|
||||
// NEW RT stuff
|
||||
|
||||
// Note that these are all accessed without any synchronization.
|
||||
// They are expected to be initialized once then left alone.
|
||||
|
||||
|
|
|
@ -8,7 +8,8 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use prelude::*;
|
||||
use option::Option;
|
||||
use comm::{GenericPort, GenericChan};
|
||||
use super::{Reader, Writer};
|
||||
|
||||
struct PortReader<P>;
|
||||
|
|
|
@ -13,9 +13,6 @@
|
|||
use libc;
|
||||
use libc::{c_void, uintptr_t, size_t};
|
||||
use ops::Drop;
|
||||
use option::{Some, None};
|
||||
use rt;
|
||||
use rt::OldTaskContext;
|
||||
use rt::local::Local;
|
||||
use rt::task::Task;
|
||||
use unstable::raw;
|
||||
|
@ -87,32 +84,14 @@ impl Drop for LocalHeap {
|
|||
|
||||
// A little compatibility function
|
||||
pub unsafe fn local_free(ptr: *libc::c_char) {
|
||||
// XXX: Unsafe borrow for speed. Lame.
|
||||
match Local::try_unsafe_borrow::<Task>() {
|
||||
Some(task) => {
|
||||
(*task).heap.free(ptr as *libc::c_void);
|
||||
}
|
||||
None => {
|
||||
rust_upcall_free_noswitch(ptr);
|
||||
|
||||
extern {
|
||||
#[fast_ffi]
|
||||
fn rust_upcall_free_noswitch(ptr: *libc::c_char);
|
||||
}
|
||||
}
|
||||
do Local::borrow::<Task,()> |task| {
|
||||
task.heap.free(ptr as *libc::c_void);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn live_allocs() -> *raw::Box<()> {
|
||||
let region = match rt::context() {
|
||||
OldTaskContext => {
|
||||
unsafe { rust_current_boxed_region() }
|
||||
}
|
||||
_ => {
|
||||
do Local::borrow::<Task, *BoxedRegion> |task| {
|
||||
task.heap.boxed_region
|
||||
}
|
||||
}
|
||||
let region = do Local::borrow::<Task, *BoxedRegion> |task| {
|
||||
task.heap.boxed_region
|
||||
};
|
||||
|
||||
return unsafe { (*region).live_allocs };
|
||||
|
@ -140,8 +119,6 @@ extern {
|
|||
size: size_t) -> *OpaqueBox;
|
||||
#[fast_ffi]
|
||||
fn rust_boxed_region_free(region: *BoxedRegion, box: *OpaqueBox);
|
||||
#[fast_ffi]
|
||||
fn rust_current_boxed_region() -> *BoxedRegion;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -120,7 +120,7 @@ mod context;
|
|||
/// Bindings to system threading libraries.
|
||||
mod thread;
|
||||
|
||||
/// The runtime configuration, read from environment variables
|
||||
/// The runtime configuration, read from environment variables.
|
||||
pub mod env;
|
||||
|
||||
/// The local, managed heap
|
||||
|
@ -401,35 +401,6 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int {
|
|||
}
|
||||
}
|
||||
|
||||
/// Possible contexts in which Rust code may be executing.
|
||||
/// Different runtime services are available depending on context.
|
||||
/// Mostly used for determining if we're using the new scheduler
|
||||
/// or the old scheduler.
|
||||
#[deriving(Eq)]
|
||||
pub enum RuntimeContext {
|
||||
// Running in an old-style task
|
||||
OldTaskContext,
|
||||
// Not old task context
|
||||
NewRtContext
|
||||
}
|
||||
|
||||
/// Determine the current RuntimeContext
|
||||
pub fn context() -> RuntimeContext {
|
||||
|
||||
use task::rt::rust_task;
|
||||
|
||||
if unsafe { rust_try_get_task().is_not_null() } {
|
||||
return OldTaskContext;
|
||||
} else {
|
||||
return NewRtContext;
|
||||
}
|
||||
|
||||
extern {
|
||||
#[rust_stack]
|
||||
pub fn rust_try_get_task() -> *rust_task;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn in_sched_context() -> bool {
|
||||
unsafe {
|
||||
match Local::try_unsafe_borrow::<Task>() {
|
||||
|
@ -456,4 +427,4 @@ pub fn in_green_task_context() -> bool {
|
|||
None => false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -515,8 +515,8 @@ mod test {
|
|||
|
||||
do run_in_newsched_task {
|
||||
let (port, chan) = oneshot();
|
||||
send_one(chan, 10);
|
||||
assert!(recv_one(port) == 10);
|
||||
chan.send(10);
|
||||
assert!(port.recv() == 10);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -448,13 +448,6 @@ pub unsafe fn get_base_from_buf(buf: uv_buf_t) -> *u8 {
|
|||
pub unsafe fn get_len_from_buf(buf: uv_buf_t) -> size_t {
|
||||
return rust_uv_get_len_from_buf(buf);
|
||||
}
|
||||
pub unsafe fn malloc_buf_base_of(suggested_size: size_t) -> *u8 {
|
||||
return rust_uv_malloc_buf_base_of(suggested_size);
|
||||
}
|
||||
pub unsafe fn free_base_of_buf(buf: uv_buf_t) {
|
||||
rust_uv_free_base_of_buf(buf);
|
||||
}
|
||||
|
||||
pub unsafe fn get_last_err_info(uv_loop: *c_void) -> ~str {
|
||||
let err = last_error(uv_loop);
|
||||
let err_ptr = ptr::to_unsafe_ptr(&err);
|
||||
|
@ -558,8 +551,6 @@ extern {
|
|||
repeat: libc::uint64_t) -> c_int;
|
||||
fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int;
|
||||
|
||||
fn rust_uv_malloc_buf_base_of(sug_size: size_t) -> *u8;
|
||||
fn rust_uv_free_base_of_buf(buf: uv_buf_t);
|
||||
fn rust_uv_get_stream_handle_from_connect_req(connect_req: *uv_connect_t) -> *uv_stream_t;
|
||||
fn rust_uv_get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t;
|
||||
fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void;
|
||||
|
|
|
@ -164,7 +164,6 @@ pub mod trie;
|
|||
|
||||
pub mod task;
|
||||
pub mod comm;
|
||||
pub mod pipes;
|
||||
pub mod local_data;
|
||||
|
||||
|
||||
|
@ -213,7 +212,6 @@ mod std {
|
|||
pub use kinds;
|
||||
pub use local_data;
|
||||
pub use sys;
|
||||
pub use pipes;
|
||||
pub use unstable;
|
||||
pub use str;
|
||||
pub use os;
|
||||
|
|
|
@ -21,15 +21,6 @@ use str::StrSlice;
|
|||
use str;
|
||||
use unstable::intrinsics;
|
||||
|
||||
pub mod rustrt {
|
||||
use libc::{c_char, size_t};
|
||||
|
||||
extern {
|
||||
#[rust_stack]
|
||||
pub fn rust_upcall_fail(expr: *c_char, file: *c_char, line: size_t);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the size of a type
|
||||
#[inline]
|
||||
pub fn size_of<T>() -> uint {
|
||||
|
@ -136,55 +127,44 @@ impl FailWithCause for &'static str {
|
|||
pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! {
|
||||
use either::Left;
|
||||
use option::{Some, None};
|
||||
use rt::{context, OldTaskContext, in_green_task_context};
|
||||
use rt::in_green_task_context;
|
||||
use rt::task::Task;
|
||||
use rt::local::Local;
|
||||
use rt::logging::Logger;
|
||||
use str::Str;
|
||||
|
||||
let context = context();
|
||||
match context {
|
||||
OldTaskContext => {
|
||||
unsafe {
|
||||
rustrt::rust_upcall_fail(msg, file, line);
|
||||
cast::transmute(())
|
||||
unsafe {
|
||||
// XXX: Bad re-allocations. fail! needs some refactoring
|
||||
let msg = str::raw::from_c_str(msg);
|
||||
let file = str::raw::from_c_str(file);
|
||||
|
||||
// XXX: Logging doesn't work correctly in non-task context because it
|
||||
// invokes the local heap
|
||||
if in_green_task_context() {
|
||||
// XXX: Logging doesn't work here - the check to call the log
|
||||
// function never passes - so calling the log function directly.
|
||||
do Local::borrow::<Task, ()> |task| {
|
||||
let msg = match task.name {
|
||||
Some(ref name) =>
|
||||
fmt!("task '%s' failed at '%s', %s:%i",
|
||||
name.as_slice(), msg, file, line as int),
|
||||
None =>
|
||||
fmt!("task <unnamed> failed at '%s', %s:%i",
|
||||
msg, file, line as int)
|
||||
};
|
||||
|
||||
task.logger.log(Left(msg));
|
||||
}
|
||||
} else {
|
||||
rterrln!("failed in non-task context at '%s', %s:%i",
|
||||
msg, file, line as int);
|
||||
}
|
||||
_ => {
|
||||
unsafe {
|
||||
// XXX: Bad re-allocations. fail! needs some refactoring
|
||||
let msg = str::raw::from_c_str(msg);
|
||||
let file = str::raw::from_c_str(file);
|
||||
|
||||
// XXX: Logging doesn't work correctly in non-task context because it
|
||||
// invokes the local heap
|
||||
if in_green_task_context() {
|
||||
// XXX: Logging doesn't work here - the check to call the log
|
||||
// function never passes - so calling the log function directly.
|
||||
do Local::borrow::<Task, ()> |task| {
|
||||
let msg = match task.name {
|
||||
Some(ref name) =>
|
||||
fmt!("task '%s' failed at '%s', %s:%i",
|
||||
name.as_slice(), msg, file, line as int),
|
||||
None =>
|
||||
fmt!("task <unnamed> failed at '%s', %s:%i",
|
||||
msg, file, line as int)
|
||||
};
|
||||
|
||||
task.logger.log(Left(msg));
|
||||
}
|
||||
} else {
|
||||
rterrln!("failed in non-task context at '%s', %s:%i",
|
||||
msg, file, line as int);
|
||||
}
|
||||
|
||||
let task = Local::unsafe_borrow::<Task>();
|
||||
if (*task).unwinder.unwinding {
|
||||
rtabort!("unwinding again");
|
||||
}
|
||||
(*task).unwinder.begin_unwind();
|
||||
}
|
||||
let task = Local::unsafe_borrow::<Task>();
|
||||
if (*task).unwinder.unwinding {
|
||||
rtabort!("unwinding again");
|
||||
}
|
||||
(*task).unwinder.begin_unwind();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,32 +15,21 @@ use libc;
|
|||
use local_data;
|
||||
use prelude::*;
|
||||
use ptr;
|
||||
use task::rt;
|
||||
use unstable::raw;
|
||||
use util;
|
||||
|
||||
use super::rt::rust_task;
|
||||
use rt::task::{Task, LocalStorage};
|
||||
|
||||
pub enum Handle {
|
||||
OldHandle(*rust_task),
|
||||
NewHandle(*mut LocalStorage)
|
||||
}
|
||||
|
||||
impl Handle {
|
||||
pub fn new() -> Handle {
|
||||
use rt::{context, OldTaskContext};
|
||||
use rt::local::Local;
|
||||
unsafe {
|
||||
match context() {
|
||||
OldTaskContext => {
|
||||
OldHandle(rt::rust_get_task())
|
||||
}
|
||||
_ => {
|
||||
let task = Local::unsafe_borrow::<Task>();
|
||||
NewHandle(&mut (*task).storage)
|
||||
}
|
||||
}
|
||||
let task = Local::unsafe_borrow::<Task>();
|
||||
NewHandle(&mut (*task).storage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -109,26 +98,6 @@ fn cleanup_task_local_map(map_ptr: *libc::c_void) {
|
|||
// Gets the map from the runtime. Lazily initialises if not done so already.
|
||||
unsafe fn get_local_map(handle: Handle) -> &mut TaskLocalMap {
|
||||
|
||||
unsafe fn oldsched_map(task: *rust_task) -> &mut TaskLocalMap {
|
||||
extern fn cleanup_extern_cb(map_ptr: *libc::c_void) {
|
||||
cleanup_task_local_map(map_ptr);
|
||||
}
|
||||
|
||||
// Relies on the runtime initialising the pointer to null.
|
||||
// Note: the map is an owned pointer and is "owned" by TLS. It is moved
|
||||
// into the tls slot for this task, and then mutable loans are taken
|
||||
// from this slot to modify the map.
|
||||
let map_ptr = rt::rust_get_task_local_data(task);
|
||||
if (*map_ptr).is_null() {
|
||||
// First time TLS is used, create a new map and set up the necessary
|
||||
// TLS information for its safe destruction
|
||||
let map: TaskLocalMap = ~[];
|
||||
*map_ptr = cast::transmute(map);
|
||||
rt::rust_task_local_data_atexit(task, cleanup_extern_cb);
|
||||
}
|
||||
return cast::transmute(map_ptr);
|
||||
}
|
||||
|
||||
unsafe fn newsched_map(local: *mut LocalStorage) -> &mut TaskLocalMap {
|
||||
// This is based on the same idea as the oldsched code above.
|
||||
match &mut *local {
|
||||
|
@ -152,7 +121,6 @@ unsafe fn get_local_map(handle: Handle) -> &mut TaskLocalMap {
|
|||
}
|
||||
|
||||
match handle {
|
||||
OldHandle(task) => oldsched_map(task),
|
||||
NewHandle(local_storage) => newsched_map(local_storage)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ use cmp::Eq;
|
|||
use comm::{stream, Chan, GenericChan, GenericPort, Port};
|
||||
use result::Result;
|
||||
use result;
|
||||
use rt::{context, OldTaskContext, in_green_task_context};
|
||||
use rt::in_green_task_context;
|
||||
use rt::local::Local;
|
||||
use unstable::finally::Finally;
|
||||
use util;
|
||||
|
@ -54,7 +54,6 @@ use util;
|
|||
#[cfg(test)] use task;
|
||||
|
||||
mod local_data_priv;
|
||||
pub mod rt;
|
||||
pub mod spawn;
|
||||
|
||||
/**
|
||||
|
@ -535,35 +534,21 @@ pub fn with_task_name<U>(blk: &fn(Option<&str>) -> U) -> U {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
fail!("no task name exists in %?", context())
|
||||
fail!("no task name exists in non-green task context")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn yield() {
|
||||
//! Yield control to the task scheduler
|
||||
|
||||
use rt::{context, OldTaskContext};
|
||||
use rt::local::Local;
|
||||
use rt::sched::Scheduler;
|
||||
|
||||
unsafe {
|
||||
match context() {
|
||||
OldTaskContext => {
|
||||
let task_ = rt::rust_get_task();
|
||||
let killed = rt::rust_task_yield(task_);
|
||||
if killed && !failing() {
|
||||
fail!("killed");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// XXX: What does yield really mean in newsched?
|
||||
// FIXME(#7544): Optimize this, since we know we won't block.
|
||||
let sched = Local::take::<Scheduler>();
|
||||
do sched.deschedule_running_task_and_then |sched, task| {
|
||||
sched.enqueue_blocked_task(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
// XXX: What does yield really mean in newsched?
|
||||
// FIXME(#7544): Optimize this, since we know we won't block.
|
||||
let sched = Local::take::<Scheduler>();
|
||||
do sched.deschedule_running_task_and_then |sched, task| {
|
||||
sched.enqueue_blocked_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -572,17 +557,8 @@ pub fn failing() -> bool {
|
|||
|
||||
use rt::task::Task;
|
||||
|
||||
match context() {
|
||||
OldTaskContext => {
|
||||
unsafe {
|
||||
rt::rust_task_is_unwinding(rt::rust_get_task())
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
do Local::borrow::<Task, bool> |local| {
|
||||
local.unwinder.unwinding
|
||||
}
|
||||
}
|
||||
do Local::borrow::<Task, bool> |local| {
|
||||
local.unwinder.unwinding
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -605,29 +581,19 @@ pub fn unkillable<U>(f: &fn() -> U) -> U {
|
|||
use rt::task::Task;
|
||||
|
||||
unsafe {
|
||||
match context() {
|
||||
OldTaskContext => {
|
||||
let t = rt::rust_get_task();
|
||||
do (|| {
|
||||
rt::rust_task_inhibit_kill(t);
|
||||
f()
|
||||
}).finally {
|
||||
rt::rust_task_allow_kill(t);
|
||||
}
|
||||
}
|
||||
_ if in_green_task_context() => {
|
||||
// The inhibits/allows might fail and need to borrow the task.
|
||||
let t = Local::unsafe_borrow::<Task>();
|
||||
do (|| {
|
||||
(*t).death.inhibit_kill((*t).unwinder.unwinding);
|
||||
f()
|
||||
}).finally {
|
||||
(*t).death.allow_kill((*t).unwinder.unwinding);
|
||||
}
|
||||
if in_green_task_context() {
|
||||
// The inhibits/allows might fail and need to borrow the task.
|
||||
let t = Local::unsafe_borrow::<Task>();
|
||||
do (|| {
|
||||
(*t).death.inhibit_kill((*t).unwinder.unwinding);
|
||||
f()
|
||||
}).finally {
|
||||
(*t).death.allow_kill((*t).unwinder.unwinding);
|
||||
}
|
||||
} else {
|
||||
// FIXME(#3095): This should be an rtabort as soon as the scheduler
|
||||
// no longer uses a workqueue implemented with an Exclusive.
|
||||
_ => f()
|
||||
f()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -636,27 +602,17 @@ pub fn unkillable<U>(f: &fn() -> U) -> U {
|
|||
pub unsafe fn rekillable<U>(f: &fn() -> U) -> U {
|
||||
use rt::task::Task;
|
||||
|
||||
match context() {
|
||||
OldTaskContext => {
|
||||
let t = rt::rust_get_task();
|
||||
do (|| {
|
||||
rt::rust_task_allow_kill(t);
|
||||
f()
|
||||
}).finally {
|
||||
rt::rust_task_inhibit_kill(t);
|
||||
}
|
||||
}
|
||||
_ if in_green_task_context() => {
|
||||
let t = Local::unsafe_borrow::<Task>();
|
||||
do (|| {
|
||||
(*t).death.allow_kill((*t).unwinder.unwinding);
|
||||
f()
|
||||
}).finally {
|
||||
(*t).death.inhibit_kill((*t).unwinder.unwinding);
|
||||
}
|
||||
if in_green_task_context() {
|
||||
let t = Local::unsafe_borrow::<Task>();
|
||||
do (|| {
|
||||
(*t).death.allow_kill((*t).unwinder.unwinding);
|
||||
f()
|
||||
}).finally {
|
||||
(*t).death.inhibit_kill((*t).unwinder.unwinding);
|
||||
}
|
||||
} else {
|
||||
// FIXME(#3095): As in unkillable().
|
||||
_ => f()
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1034,14 +990,8 @@ fn test_try_fail() {
|
|||
|
||||
#[cfg(test)]
|
||||
fn get_sched_id() -> int {
|
||||
if context() == OldTaskContext {
|
||||
unsafe {
|
||||
rt::rust_get_sched_id() as int
|
||||
}
|
||||
} else {
|
||||
do Local::borrow::<::rt::sched::Scheduler, int> |sched| {
|
||||
sched.sched_id() as int
|
||||
}
|
||||
do Local::borrow::<::rt::sched::Scheduler, int> |sched| {
|
||||
sched.sched_id() as int
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/*!
|
||||
|
||||
The task interface to the runtime
|
||||
|
||||
*/
|
||||
|
||||
#[doc(hidden)];
|
||||
|
||||
use libc;
|
||||
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type sched_id = int;
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type task_id = int;
|
||||
|
||||
// These are both opaque runtime/compiler types that we don't know the
|
||||
// structure of and should only deal with via unsafe pointer
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type rust_task = libc::c_void;
|
||||
#[allow(non_camel_case_types)] // runtime type
|
||||
pub type rust_closure = libc::c_void;
|
||||
|
||||
extern {
|
||||
#[rust_stack]
|
||||
pub fn rust_task_yield(task: *rust_task) -> bool;
|
||||
|
||||
pub fn rust_get_sched_id() -> sched_id;
|
||||
pub fn rust_new_sched(num_threads: libc::uintptr_t) -> sched_id;
|
||||
|
||||
pub fn get_task_id() -> task_id;
|
||||
#[rust_stack]
|
||||
pub fn rust_get_task() -> *rust_task;
|
||||
|
||||
pub fn new_task() -> *rust_task;
|
||||
pub fn rust_new_task_in_sched(id: sched_id) -> *rust_task;
|
||||
|
||||
pub fn start_task(task: *rust_task, closure: *rust_closure);
|
||||
|
||||
pub fn rust_task_is_unwinding(task: *rust_task) -> bool;
|
||||
pub fn rust_osmain_sched_id() -> sched_id;
|
||||
#[rust_stack]
|
||||
pub fn rust_task_inhibit_kill(t: *rust_task);
|
||||
#[rust_stack]
|
||||
pub fn rust_task_allow_kill(t: *rust_task);
|
||||
#[rust_stack]
|
||||
pub fn rust_task_inhibit_yield(t: *rust_task);
|
||||
#[rust_stack]
|
||||
pub fn rust_task_allow_yield(t: *rust_task);
|
||||
pub fn rust_task_kill_other(task: *rust_task);
|
||||
pub fn rust_task_kill_all(task: *rust_task);
|
||||
|
||||
#[rust_stack]
|
||||
pub fn rust_get_task_local_data(task: *rust_task) -> *mut *libc::c_void;
|
||||
#[rust_stack]
|
||||
pub fn rust_task_local_data_atexit(task: *rust_task, cleanup_fn: *u8);
|
||||
}
|
|
@ -81,9 +81,6 @@ use container::MutableMap;
|
|||
use comm::{Chan, GenericChan, oneshot};
|
||||
use hashmap::{HashSet, HashSetConsumeIterator};
|
||||
use local_data;
|
||||
use task::local_data_priv::{local_get, local_set, OldHandle};
|
||||
use task::rt::rust_task;
|
||||
use task::rt;
|
||||
use task::{Failure, SingleThreaded};
|
||||
use task::{Success, TaskOpts, TaskResult};
|
||||
use task::unkillable;
|
||||
|
@ -91,7 +88,7 @@ use to_bytes::IterBytes;
|
|||
use uint;
|
||||
use util;
|
||||
use unstable::sync::Exclusive;
|
||||
use rt::{OldTaskContext, NewRtContext, context, in_green_task_context};
|
||||
use rt::in_green_task_context;
|
||||
use rt::local::Local;
|
||||
use rt::task::{Task, Sched};
|
||||
use rt::kill::KillHandle;
|
||||
|
@ -107,14 +104,12 @@ use rt::work_queue::WorkQueue;
|
|||
// Transitionary.
|
||||
#[deriving(Eq)]
|
||||
enum TaskHandle {
|
||||
OldTask(*rust_task),
|
||||
NewTask(KillHandle),
|
||||
}
|
||||
|
||||
impl Clone for TaskHandle {
|
||||
fn clone(&self) -> TaskHandle {
|
||||
match *self {
|
||||
OldTask(x) => OldTask(x),
|
||||
NewTask(ref x) => NewTask(x.clone()),
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +118,6 @@ impl Clone for TaskHandle {
|
|||
impl IterBytes for TaskHandle {
|
||||
fn iter_bytes(&self, lsb0: bool, f: &fn(buf: &[u8]) -> bool) -> bool {
|
||||
match *self {
|
||||
OldTask(ref x) => x.iter_bytes(lsb0, f),
|
||||
NewTask(ref x) => x.iter_bytes(lsb0, f),
|
||||
}
|
||||
}
|
||||
|
@ -498,7 +492,6 @@ struct RuntimeGlue;
|
|||
impl RuntimeGlue {
|
||||
unsafe fn kill_task(task: TaskHandle) {
|
||||
match task {
|
||||
OldTask(ptr) => rt::rust_task_kill_other(ptr),
|
||||
NewTask(handle) => {
|
||||
let mut handle = handle;
|
||||
do handle.kill().map_move |killed_task| {
|
||||
|
@ -513,7 +506,6 @@ impl RuntimeGlue {
|
|||
|
||||
unsafe fn kill_all_tasks(task: &TaskHandle) {
|
||||
match *task {
|
||||
OldTask(ptr) => rt::rust_task_kill_all(ptr),
|
||||
// FIXME(#7544): Remove the kill_all feature entirely once the
|
||||
// oldsched goes away.
|
||||
NewTask(ref _handle) => rtabort!("can't kill_all in newsched"),
|
||||
|
@ -521,12 +513,8 @@ impl RuntimeGlue {
|
|||
}
|
||||
|
||||
fn with_task_handle_and_failing(blk: &fn(TaskHandle, bool)) {
|
||||
match context() {
|
||||
OldTaskContext => unsafe {
|
||||
let me = rt::rust_get_task();
|
||||
blk(OldTask(me), rt::rust_task_is_unwinding(me))
|
||||
},
|
||||
NewRtContext if in_green_task_context() => unsafe {
|
||||
if in_green_task_context() {
|
||||
unsafe {
|
||||
// Can't use safe borrow, because the taskgroup destructor needs to
|
||||
// access the scheduler again to send kill signals to other tasks.
|
||||
let me = Local::unsafe_borrow::<Task>();
|
||||
|
@ -534,36 +522,15 @@ impl RuntimeGlue {
|
|||
// Will probably have to wait until the old rt is gone.
|
||||
blk(NewTask((*me).death.kill_handle.get_ref().clone()),
|
||||
(*me).unwinder.unwinding)
|
||||
},
|
||||
NewRtContext => rtabort!("task dying in bad context"),
|
||||
}
|
||||
} else {
|
||||
rtabort!("task dying in bad context")
|
||||
}
|
||||
}
|
||||
|
||||
fn with_my_taskgroup<U>(blk: &fn(&Taskgroup) -> U) -> U {
|
||||
match context() {
|
||||
OldTaskContext => unsafe {
|
||||
let me = rt::rust_get_task();
|
||||
do local_get(OldHandle(me), taskgroup_key()) |g| {
|
||||
match g {
|
||||
None => {
|
||||
// Main task, doing first spawn ever. Lazily initialise here.
|
||||
let mut members = TaskSet::new();
|
||||
members.insert(OldTask(me));
|
||||
let tasks = Exclusive::new(Some(TaskGroupData {
|
||||
members: members,
|
||||
descendants: TaskSet::new(),
|
||||
}));
|
||||
// Main task/group has no ancestors, no notifier, etc.
|
||||
let group = @@mut Taskgroup(tasks, AncestorList(None),
|
||||
true, None);
|
||||
local_set(OldHandle(me), taskgroup_key(), group);
|
||||
blk(&**group)
|
||||
}
|
||||
Some(&group) => blk(&**group)
|
||||
}
|
||||
}
|
||||
},
|
||||
NewRtContext if in_green_task_context() => unsafe {
|
||||
if in_green_task_context() {
|
||||
unsafe {
|
||||
// Can't use safe borrow, because creating new hashmaps for the
|
||||
// tasksets requires an rng, which needs to borrow the sched.
|
||||
let me = Local::unsafe_borrow::<Task>();
|
||||
|
@ -587,8 +554,9 @@ impl RuntimeGlue {
|
|||
}
|
||||
Some(ref group) => group,
|
||||
})
|
||||
},
|
||||
NewRtContext => rtabort!("spawning in bad context"),
|
||||
}
|
||||
} else {
|
||||
rtabort!("spawning in bad context")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -598,7 +566,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
|
|||
-> Option<(TaskGroupArc, AncestorList, bool)> {
|
||||
// FIXME(#7544): Not safe to lazily initialize in the old runtime. Remove
|
||||
// this context check once 'spawn_raw_oldsched' is gone.
|
||||
if context() == OldTaskContext || linked || supervised {
|
||||
if linked || supervised {
|
||||
// with_my_taskgroup will lazily initialize the parent's taskgroup if
|
||||
// it doesn't yet exist. We don't want to call it in the unlinked case.
|
||||
do RuntimeGlue::with_my_taskgroup |spawner_group| {
|
||||
|
@ -665,10 +633,10 @@ fn enlist_many(child: TaskHandle, child_arc: &TaskGroupArc,
|
|||
}
|
||||
|
||||
pub fn spawn_raw(opts: TaskOpts, f: ~fn()) {
|
||||
match context() {
|
||||
OldTaskContext => spawn_raw_oldsched(opts, f),
|
||||
_ if in_green_task_context() => spawn_raw_newsched(opts, f),
|
||||
_ => fail!("can't spawn from this context")
|
||||
if in_green_task_context() {
|
||||
spawn_raw_newsched(opts, f)
|
||||
} else {
|
||||
fail!("can't spawn from this context")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -810,85 +778,6 @@ fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) {
|
|||
|
||||
}
|
||||
|
||||
fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) {
|
||||
|
||||
let (child_tg, ancestors, is_main) =
|
||||
gen_child_taskgroup(opts.linked, opts.supervised).expect("old runtime needs TG");
|
||||
|
||||
unsafe {
|
||||
let child_data = Cell::new((child_tg, ancestors, f));
|
||||
// Being killed with the unsafe task/closure pointers would leak them.
|
||||
do unkillable {
|
||||
let (child_tg, ancestors, f) = child_data.take(); // :(
|
||||
// Create child task.
|
||||
let new_task = match opts.sched.mode {
|
||||
DefaultScheduler => rt::new_task(),
|
||||
_ => new_task_in_sched()
|
||||
};
|
||||
assert!(!new_task.is_null());
|
||||
// Getting killed after here would leak the task.
|
||||
let child_wrapper = make_child_wrapper(new_task, child_tg,
|
||||
ancestors, is_main, opts.notify_chan.take(), f);
|
||||
|
||||
let closure = cast::transmute(&child_wrapper);
|
||||
|
||||
// Getting killed between these two calls would free the child's
|
||||
// closure. (Reordering them wouldn't help - then getting killed
|
||||
// between them would leak.)
|
||||
rt::start_task(new_task, closure);
|
||||
cast::forget(child_wrapper);
|
||||
}
|
||||
}
|
||||
|
||||
// This function returns a closure-wrapper that we pass to the child task.
|
||||
// (1) It sets up the notification channel.
|
||||
// (2) It attempts to enlist in the child's group and all ancestor groups.
|
||||
// (3a) If any of those fails, it leaves all groups, and does nothing.
|
||||
// (3b) Otherwise it builds a task control structure and puts it in TLS,
|
||||
// (4) ...and runs the provided body function.
|
||||
fn make_child_wrapper(child: *rust_task, child_arc: TaskGroupArc,
|
||||
ancestors: AncestorList, is_main: bool,
|
||||
notify_chan: Option<Chan<TaskResult>>,
|
||||
f: ~fn())
|
||||
-> ~fn() {
|
||||
let child_data = Cell::new((notify_chan, child_arc, ancestors));
|
||||
let result: ~fn() = || {
|
||||
let (notify_chan, child_arc, ancestors) = child_data.take(); // :(
|
||||
let mut ancestors = ancestors;
|
||||
// Child task runs this code.
|
||||
|
||||
// Even if the below code fails to kick the child off, we must
|
||||
// send Something on the notify channel.
|
||||
|
||||
let notifier = notify_chan.map_move(|c| AutoNotify(c));
|
||||
|
||||
if enlist_many(OldTask(child), &child_arc, &mut ancestors) {
|
||||
let group = @@mut Taskgroup(child_arc, ancestors, is_main, notifier);
|
||||
unsafe {
|
||||
local_set(OldHandle(child), taskgroup_key(), group);
|
||||
}
|
||||
|
||||
// Run the child's body.
|
||||
f();
|
||||
|
||||
// TLS cleanup code will exit the taskgroup.
|
||||
}
|
||||
|
||||
// Run the box annihilator.
|
||||
// FIXME #4428: Crashy.
|
||||
// unsafe { cleanup::annihilate(); }
|
||||
};
|
||||
return result;
|
||||
}
|
||||
|
||||
fn new_task_in_sched() -> *rust_task {
|
||||
unsafe {
|
||||
let sched_id = rt::rust_new_sched(1);
|
||||
rt::rust_new_task_in_sched(sched_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spawn_raw_simple() {
|
||||
let (po, ch) = stream();
|
||||
|
|
|
@ -11,34 +11,13 @@
|
|||
//! Runtime calls emitted by the compiler.
|
||||
|
||||
use cast::transmute;
|
||||
use libc::{c_char, c_uchar, c_void, size_t, uintptr_t, c_int};
|
||||
use option::{Some, None};
|
||||
use libc::{c_char, c_uchar, c_void, size_t, uintptr_t};
|
||||
use str;
|
||||
use sys;
|
||||
use rt::task::Task;
|
||||
use rt::local::Local;
|
||||
use rt::borrowck;
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub type rust_task = c_void;
|
||||
|
||||
pub mod rustrt {
|
||||
use unstable::lang::rust_task;
|
||||
use libc::{c_char, uintptr_t};
|
||||
|
||||
extern {
|
||||
#[rust_stack]
|
||||
pub fn rust_upcall_malloc(td: *c_char, size: uintptr_t) -> *c_char;
|
||||
#[rust_stack]
|
||||
pub fn rust_upcall_free(ptr: *c_char);
|
||||
#[fast_ffi]
|
||||
pub fn rust_upcall_malloc_noswitch(td: *c_char, size: uintptr_t)
|
||||
-> *c_char;
|
||||
#[rust_stack]
|
||||
pub fn rust_try_get_task() -> *rust_task;
|
||||
}
|
||||
}
|
||||
|
||||
#[lang="fail_"]
|
||||
pub fn fail_(expr: *c_char, file: *c_char, line: size_t) -> ! {
|
||||
sys::begin_unwind_(expr, file, line);
|
||||
|
@ -56,15 +35,14 @@ pub fn fail_bounds_check(file: *c_char, line: size_t,
|
|||
|
||||
#[lang="malloc"]
|
||||
pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char {
|
||||
// XXX: Unsafe borrow for speed. Lame.
|
||||
match Local::try_unsafe_borrow::<Task>() {
|
||||
Some(task) => {
|
||||
(*task).heap.alloc(td as *c_void, size as uint) as *c_char
|
||||
}
|
||||
None => {
|
||||
rustrt::rust_upcall_malloc_noswitch(td, size)
|
||||
}
|
||||
let mut alloc = ::ptr::null();
|
||||
do Local::borrow::<Task,()> |task| {
|
||||
rtdebug!("task pointer: %x, heap pointer: %x",
|
||||
::borrow::to_uint(task),
|
||||
::borrow::to_uint(&task.heap));
|
||||
alloc = task.heap.alloc(td as *c_void, size as uint) as *c_char;
|
||||
}
|
||||
return alloc;
|
||||
}
|
||||
|
||||
// NB: Calls to free CANNOT be allowed to fail, as throwing an exception from
|
||||
|
@ -129,23 +107,11 @@ pub unsafe fn annihilate() {
|
|||
pub fn start(main: *u8, argc: int, argv: **c_char,
|
||||
crate_map: *u8) -> int {
|
||||
use rt;
|
||||
use os;
|
||||
|
||||
unsafe {
|
||||
let use_old_rt = os::getenv("RUST_OLDRT").is_some();
|
||||
if use_old_rt {
|
||||
return rust_start(main as *c_void, argc as c_int, argv,
|
||||
crate_map as *c_void) as int;
|
||||
} else {
|
||||
return do rt::start(argc, argv as **u8, crate_map) {
|
||||
let main: extern "Rust" fn() = transmute(main);
|
||||
main();
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
extern {
|
||||
fn rust_start(main: *c_void, argc: c_int, argv: **c_char,
|
||||
crate_map: *c_void) -> c_int;
|
||||
return do rt::start(argc, argv as **u8, crate_map) {
|
||||
let main: extern "Rust" fn() = transmute(main);
|
||||
main();
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -280,39 +280,19 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
|
|||
// FIXME(#8140) should not be pub
|
||||
pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
|
||||
use rt::task::Task;
|
||||
use task::rt;
|
||||
use rt::local::Local;
|
||||
use rt::{context, OldTaskContext};
|
||||
use rt::in_green_task_context;
|
||||
|
||||
match context() {
|
||||
OldTaskContext => {
|
||||
let t = rt::rust_get_task();
|
||||
do (|| {
|
||||
rt::rust_task_inhibit_kill(t);
|
||||
rt::rust_task_inhibit_yield(t);
|
||||
f()
|
||||
}).finally {
|
||||
rt::rust_task_allow_yield(t);
|
||||
rt::rust_task_allow_kill(t);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let t = Local::try_unsafe_borrow::<Task>();
|
||||
match t {
|
||||
Some(t) => {
|
||||
do (|| {
|
||||
(*t).death.inhibit_yield();
|
||||
f()
|
||||
}).finally {
|
||||
(*t).death.allow_yield();
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// FIXME(#3095): As in unkillable().
|
||||
f()
|
||||
}
|
||||
}
|
||||
if in_green_task_context() {
|
||||
let t = Local::unsafe_borrow::<Task>();
|
||||
do (|| {
|
||||
(*t).death.inhibit_yield();
|
||||
f()
|
||||
}).finally {
|
||||
(*t).death.allow_yield();
|
||||
}
|
||||
} else {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#include "memory_region.h"
|
||||
#include "boxed_region.h"
|
||||
#include "rust_globals.h"
|
||||
#include "rust_task.h"
|
||||
#include "rust_env.h"
|
||||
#include "rust_util.h"
|
||||
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/**
|
||||
* Main entry point into the Rust runtime. Here we initialize the kernel,
|
||||
* create the initial scheduler and run the main task.
|
||||
*/
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_util.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "rust_gc_metadata.h"
|
||||
|
||||
void* global_crate_map = NULL;
|
||||
|
||||
/**
|
||||
The runtime entrypoint. The (C ABI) main function generated by rustc calls
|
||||
`rust_start`, providing the address of the Rust ABI main function, the
|
||||
platform argument vector, and a `crate_map` the provides some logging
|
||||
metadata.
|
||||
*/
|
||||
extern "C" CDECL int
|
||||
rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) {
|
||||
|
||||
// Load runtime configuration options from the environment.
|
||||
// FIXME #1497: Should provide a way to get these from the command
|
||||
// line as well.
|
||||
rust_env *env = load_env(argc, argv);
|
||||
|
||||
global_crate_map = crate_map;
|
||||
|
||||
update_gc_metadata(crate_map);
|
||||
|
||||
update_log_settings(crate_map, env->logspec);
|
||||
|
||||
rust_kernel *kernel = new rust_kernel(env);
|
||||
|
||||
// Create the main task
|
||||
rust_sched_id sched_id = kernel->main_sched_id();
|
||||
rust_scheduler *sched = kernel->get_scheduler_by_id(sched_id);
|
||||
assert(sched != NULL);
|
||||
rust_task *root_task = sched->create_task(NULL, "main");
|
||||
|
||||
// Schedule the main Rust task
|
||||
root_task->start((spawn_fn)main_fn, NULL, NULL);
|
||||
|
||||
// At this point the task lifecycle is responsible for it
|
||||
// and our pointer may not be valid
|
||||
root_task = NULL;
|
||||
|
||||
// Run the kernel until all schedulers exit
|
||||
int ret = kernel->run();
|
||||
|
||||
delete kernel;
|
||||
free_env(env);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
// fill-column: 78;
|
||||
// indent-tabs-mode: nil
|
||||
// c-basic-offset: 4
|
||||
// buffer-file-coding-system: utf-8-unix
|
||||
// End:
|
||||
//
|
|
@ -10,14 +10,16 @@
|
|||
|
||||
/* Foreign builtins. */
|
||||
|
||||
#include "rust_sched_loop.h"
|
||||
#include "rust_task.h"
|
||||
#include "rust_util.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "sync/timer.h"
|
||||
#include "sync/rust_thread.h"
|
||||
#include "sync/lock_and_signal.h"
|
||||
#include "memory_region.h"
|
||||
#include "boxed_region.h"
|
||||
#include "rust_abi.h"
|
||||
#include "rust_rng.h"
|
||||
#include "vg/valgrind.h"
|
||||
#include "sp.h"
|
||||
|
||||
#include <time.h>
|
||||
|
||||
|
@ -68,12 +70,6 @@ rust_env_pairs() {
|
|||
}
|
||||
#endif
|
||||
|
||||
extern "C" CDECL void *
|
||||
rust_local_realloc(rust_opaque_box *ptr, size_t size) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->boxed.realloc(ptr, size);
|
||||
}
|
||||
|
||||
extern "C" CDECL size_t
|
||||
rand_seed_size() {
|
||||
return rng_seed_size();
|
||||
|
@ -150,12 +146,6 @@ debug_static_mut_check_four() {
|
|||
assert(debug_static_mut == 4);
|
||||
}
|
||||
|
||||
extern "C" CDECL void *
|
||||
debug_get_stk_seg() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->stk;
|
||||
}
|
||||
|
||||
extern "C" CDECL char*
|
||||
#if defined(__WIN32__)
|
||||
rust_list_dir_val(WIN32_FIND_DATA* entry_ptr) {
|
||||
|
@ -383,162 +373,25 @@ rust_mktime(rust_tm* timeptr) {
|
|||
return mktime(&t);
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_sched_id
|
||||
rust_get_sched_id() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->sched->get_id();
|
||||
}
|
||||
|
||||
extern "C" CDECL int
|
||||
rust_get_argc() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->kernel->env->argc;
|
||||
}
|
||||
|
||||
extern "C" CDECL char**
|
||||
rust_get_argv() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->kernel->env->argv;
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_sched_id
|
||||
rust_new_sched(uintptr_t threads) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
assert(threads > 0 && "Can't create a scheduler with no threads, silly!");
|
||||
return task->kernel->create_scheduler(threads);
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_task_id
|
||||
get_task_id() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->id;
|
||||
}
|
||||
|
||||
static rust_task*
|
||||
new_task_common(rust_scheduler *sched, rust_task *parent) {
|
||||
return sched->create_task(parent, NULL);
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_task*
|
||||
new_task() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
rust_sched_id sched_id = task->kernel->main_sched_id();
|
||||
rust_scheduler *sched = task->kernel->get_scheduler_by_id(sched_id);
|
||||
assert(sched != NULL && "should always have a main scheduler");
|
||||
return new_task_common(sched, task);
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_task*
|
||||
rust_new_task_in_sched(rust_sched_id id) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
rust_scheduler *sched = task->kernel->get_scheduler_by_id(id);
|
||||
if (sched == NULL)
|
||||
return NULL;
|
||||
return new_task_common(sched, task);
|
||||
}
|
||||
|
||||
extern "C" rust_task *
|
||||
rust_get_task() {
|
||||
return rust_get_current_task();
|
||||
}
|
||||
|
||||
extern "C" rust_task *
|
||||
rust_try_get_task() {
|
||||
return rust_try_get_current_task();
|
||||
}
|
||||
|
||||
extern "C" CDECL stk_seg *
|
||||
rust_get_stack_segment() {
|
||||
return rust_get_current_task()->stk;
|
||||
}
|
||||
|
||||
extern "C" CDECL stk_seg *
|
||||
rust_get_c_stack() {
|
||||
return rust_get_current_task()->get_c_stack();
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
start_task(rust_task *target, fn_env_pair *f) {
|
||||
target->start(f->f, f->env, NULL);
|
||||
}
|
||||
|
||||
// This is called by an intrinsic on the Rust stack and must run
|
||||
// entirely in the red zone. Do not call on the C stack.
|
||||
extern "C" CDECL MUST_CHECK bool
|
||||
rust_task_yield(rust_task *task, bool *killed) {
|
||||
return task->yield();
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_set_exit_status(intptr_t code) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
task->kernel->set_exit_status((int)code);
|
||||
}
|
||||
|
||||
extern void log_console_on();
|
||||
static lock_and_signal log_lock;
|
||||
static bool log_to_console = true;
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_log_console_on() {
|
||||
log_console_on();
|
||||
scoped_lock with(log_lock);
|
||||
log_to_console = true;
|
||||
}
|
||||
|
||||
extern void log_console_off();
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_log_console_off() {
|
||||
log_console_off();
|
||||
scoped_lock with(log_lock);
|
||||
log_to_console = false;
|
||||
}
|
||||
|
||||
extern bool should_log_console();
|
||||
|
||||
extern "C" CDECL uintptr_t
|
||||
rust_should_log_console() {
|
||||
return (uintptr_t)should_log_console();
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_sched_id
|
||||
rust_osmain_sched_id() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->kernel->osmain_sched_id();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_inhibit_kill(rust_task *task) {
|
||||
task->inhibit_kill();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_allow_kill(rust_task *task) {
|
||||
task->allow_kill();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_inhibit_yield(rust_task *task) {
|
||||
task->inhibit_yield();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_allow_yield(rust_task *task) {
|
||||
task->allow_yield();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_kill_other(rust_task *task) { /* Used for linked failure */
|
||||
task->kill();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_kill_all(rust_task *task) { /* Used for linked failure */
|
||||
task->fail_sched_loop();
|
||||
// This must not happen twice.
|
||||
static bool main_taskgroup_failed = false;
|
||||
assert(!main_taskgroup_failed);
|
||||
main_taskgroup_failed = true;
|
||||
}
|
||||
|
||||
extern "C" CDECL
|
||||
bool rust_task_is_unwinding(rust_task *rt) {
|
||||
return rt->unwinding;
|
||||
scoped_lock with(log_lock);
|
||||
return log_to_console;
|
||||
}
|
||||
|
||||
extern "C" lock_and_signal*
|
||||
|
@ -561,71 +414,6 @@ rust_unlock_little_lock(lock_and_signal *lock) {
|
|||
lock->unlock();
|
||||
}
|
||||
|
||||
// get/atexit task_local_data can run on the rust stack for speed.
|
||||
extern "C" void **
|
||||
rust_get_task_local_data(rust_task *task) {
|
||||
return &task->task_local_data;
|
||||
}
|
||||
extern "C" void
|
||||
rust_task_local_data_atexit(rust_task *task, void (*cleanup_fn)(void *data)) {
|
||||
task->task_local_data_cleanup = cleanup_fn;
|
||||
}
|
||||
|
||||
// set/get/atexit task_borrow_list can run on the rust stack for speed.
|
||||
extern "C" void *
|
||||
rust_take_task_borrow_list(rust_task *task) {
|
||||
void *r = task->borrow_list;
|
||||
task->borrow_list = NULL;
|
||||
return r;
|
||||
}
|
||||
extern "C" void
|
||||
rust_set_task_borrow_list(rust_task *task, void *data) {
|
||||
assert(task->borrow_list == NULL);
|
||||
assert(data != NULL);
|
||||
task->borrow_list = data;
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
task_clear_event_reject(rust_task *task) {
|
||||
task->clear_event_reject();
|
||||
}
|
||||
|
||||
// Waits on an event, returning the pointer to the event that unblocked this
|
||||
// task.
|
||||
extern "C" MUST_CHECK bool
|
||||
task_wait_event(rust_task *task, void **result) {
|
||||
// Maybe (if not too slow) assert that the passed in task is the currently
|
||||
// running task. We wouldn't want to wait some other task.
|
||||
|
||||
return task->wait_event(result);
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
task_signal_event(rust_task *target, void *event) {
|
||||
target->signal_event(event);
|
||||
}
|
||||
|
||||
// Can safely run on the rust stack.
|
||||
extern "C" void
|
||||
rust_task_ref(rust_task *task) {
|
||||
task->ref();
|
||||
}
|
||||
|
||||
// Don't run on the rust stack!
|
||||
extern "C" void
|
||||
rust_task_deref(rust_task *task) {
|
||||
task->deref();
|
||||
}
|
||||
|
||||
// Don't run on the Rust stack!
|
||||
extern "C" void
|
||||
rust_log_str(uint32_t level, const char *str, size_t size) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
task->sched_loop->get_log().log(task, level, "%.*s", (int)size, str);
|
||||
}
|
||||
|
||||
extern "C" CDECL void record_sp_limit(void *limit);
|
||||
|
||||
class raw_thread: public rust_thread {
|
||||
public:
|
||||
fn_env_pair fn;
|
||||
|
@ -684,12 +472,6 @@ rust_readdir() {
|
|||
|
||||
#endif
|
||||
|
||||
extern "C" rust_env*
|
||||
rust_get_rt_env() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->kernel->env;
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
pthread_key_t rt_key = -1;
|
||||
#else
|
||||
|
@ -737,12 +519,6 @@ rust_delete_memory_region(memory_region *region) {
|
|||
delete region;
|
||||
}
|
||||
|
||||
extern "C" CDECL boxed_region*
|
||||
rust_current_boxed_region() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return &task->boxed;
|
||||
}
|
||||
|
||||
extern "C" CDECL boxed_region*
|
||||
rust_new_boxed_region(memory_region *region,
|
||||
uintptr_t poison_on_free) {
|
||||
|
@ -848,6 +624,12 @@ rust_drop_change_dir_lock() {
|
|||
change_dir_lock.unlock();
|
||||
}
|
||||
|
||||
// Used by i386 __morestack
|
||||
extern "C" CDECL uintptr_t
|
||||
rust_get_task() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#ifndef RUST_CRATE_MAP_H
|
||||
#define RUST_CRATE_MAP_H
|
||||
|
||||
#include "rust_log.h"
|
||||
#include "rust_globals.h"
|
||||
#include <stdint.h>
|
||||
|
||||
struct mod_entry {
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
// Routines useful when debugging the Rust runtime.
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "rust_abi.h"
|
||||
#include "rust_debug.h"
|
||||
#include "rust_task.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
namespace {
|
||||
|
||||
debug::flag track_origins("RUST_TRACK_ORIGINS");
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
namespace debug {
|
||||
|
||||
void
|
||||
maybe_track_origin(rust_task *task, void *ptr) {
|
||||
if (!*track_origins)
|
||||
return;
|
||||
task->debug.origins[ptr] =
|
||||
stack_walk::symbolicate(stack_walk::backtrace());
|
||||
}
|
||||
|
||||
void
|
||||
maybe_untrack_origin(rust_task *task, void *ptr) {
|
||||
if (!*track_origins)
|
||||
return;
|
||||
task->debug.origins.erase(ptr);
|
||||
}
|
||||
|
||||
// This function is intended to be called by the debugger.
|
||||
void
|
||||
dump_origin(rust_task *task, void *ptr) {
|
||||
if (!*track_origins) {
|
||||
std::cerr << "Try again with RUST_TRACK_ORIGINS=1." << std::endl;
|
||||
} else if (task->debug.origins.find(ptr) == task->debug.origins.end()) {
|
||||
std::cerr << "Pointer " << std::hex << (uintptr_t)ptr <<
|
||||
" does not have a tracked origin." << std::endl;
|
||||
} else {
|
||||
std::cerr << "Origin of pointer " << std::hex << (uintptr_t)ptr <<
|
||||
":" << std::endl << task->debug.origins[ptr] <<
|
||||
std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
} // end namespace debug
|
|
@ -1,59 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
// Routines useful when debugging the Rust runtime.
|
||||
|
||||
#ifndef RUST_DEBUG_H
|
||||
#define RUST_DEBUG_H
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <cstdlib>
|
||||
|
||||
struct rust_task;
|
||||
|
||||
namespace debug {
|
||||
|
||||
class flag {
|
||||
private:
|
||||
const char *name;
|
||||
bool valid;
|
||||
bool value;
|
||||
|
||||
public:
|
||||
flag(const char *in_name) : name(in_name), valid(false) {}
|
||||
|
||||
bool operator*() {
|
||||
// FIXME (#2689): We ought to lock this.
|
||||
if (!valid) {
|
||||
char *ev = getenv(name);
|
||||
value = ev && ev[0] != '\0' && ev[0] != '0';
|
||||
valid = true;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
class task_debug_info {
|
||||
public:
|
||||
std::map<void *,std::string> origins;
|
||||
};
|
||||
|
||||
std::string backtrace();
|
||||
|
||||
void maybe_track_origin(rust_task *task, void *ptr);
|
||||
void maybe_untrack_origin(rust_task *task, void *ptr);
|
||||
|
||||
// This function is intended to be called by the debugger.
|
||||
void dump_origin(rust_task *task, void *ptr);
|
||||
|
||||
} // end namespace debug
|
||||
|
||||
#endif
|
|
@ -1,322 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_util.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "rust_sched_launcher.h"
|
||||
#include <algorithm>
|
||||
|
||||
#define KLOG_(...) \
|
||||
KLOG(this, kern, __VA_ARGS__)
|
||||
#define KLOG_ERR_(field, ...) \
|
||||
KLOG_LVL(this, field, log_err, __VA_ARGS__)
|
||||
|
||||
rust_kernel::rust_kernel(rust_env *env) :
|
||||
_log(NULL),
|
||||
max_task_id(INIT_TASK_ID-1), // sync_add_and_fetch increments first
|
||||
rval(0),
|
||||
max_sched_id(1),
|
||||
killed(false),
|
||||
already_exiting(false),
|
||||
sched_reaper(this),
|
||||
osmain_driver(NULL),
|
||||
non_weak_tasks(0),
|
||||
env(env)
|
||||
{
|
||||
// Create the single threaded scheduler that will run on the platform's
|
||||
// main thread
|
||||
rust_manual_sched_launcher_factory *osmain_launchfac =
|
||||
new rust_manual_sched_launcher_factory();
|
||||
osmain_scheduler = create_scheduler(osmain_launchfac, 1, false);
|
||||
osmain_driver = osmain_launchfac->get_driver();
|
||||
|
||||
// Create the primary scheduler
|
||||
rust_thread_sched_launcher_factory *main_launchfac =
|
||||
new rust_thread_sched_launcher_factory();
|
||||
main_scheduler = create_scheduler(main_launchfac,
|
||||
env->num_sched_threads,
|
||||
false);
|
||||
|
||||
sched_reaper.start();
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::log(uint32_t level, char const *fmt, ...) {
|
||||
char buf[BUF_BYTES];
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
_log.trace_ln(NULL, level, buf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::fatal(char const *fmt, ...) {
|
||||
char buf[BUF_BYTES];
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
_log.trace_ln(NULL, (uint32_t)0, buf);
|
||||
exit(1);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void *
|
||||
rust_kernel::malloc(size_t size, const char *tag) {
|
||||
return exchange_alloc.malloc(size);
|
||||
}
|
||||
|
||||
void *
|
||||
rust_kernel::realloc(void *mem, size_t size) {
|
||||
return exchange_alloc.realloc(mem, size);
|
||||
}
|
||||
|
||||
void rust_kernel::free(void *mem) {
|
||||
exchange_alloc.free(mem);
|
||||
}
|
||||
|
||||
rust_sched_id
|
||||
rust_kernel::create_scheduler(size_t num_threads) {
|
||||
rust_thread_sched_launcher_factory *launchfac =
|
||||
new rust_thread_sched_launcher_factory();
|
||||
return create_scheduler(launchfac, num_threads, true);
|
||||
}
|
||||
|
||||
rust_sched_id
|
||||
rust_kernel::create_scheduler(rust_sched_launcher_factory *launchfac,
|
||||
size_t num_threads, bool allow_exit) {
|
||||
rust_sched_id id;
|
||||
rust_scheduler *sched;
|
||||
{
|
||||
scoped_lock with(sched_lock);
|
||||
|
||||
/*if (sched_table.size() == 2) {
|
||||
// The main and OS main schedulers may not exit while there are
|
||||
// other schedulers
|
||||
KLOG_("Disallowing main scheduler to exit");
|
||||
rust_scheduler *main_sched =
|
||||
get_scheduler_by_id_nolock(main_scheduler);
|
||||
assert(main_sched != NULL);
|
||||
main_sched->disallow_exit();
|
||||
}
|
||||
if (sched_table.size() == 1) {
|
||||
KLOG_("Disallowing osmain scheduler to exit");
|
||||
rust_scheduler *osmain_sched =
|
||||
get_scheduler_by_id_nolock(osmain_scheduler);
|
||||
assert(osmain_sched != NULL);
|
||||
osmain_sched->disallow_exit();
|
||||
}*/
|
||||
|
||||
id = max_sched_id++;
|
||||
assert(id != INTPTR_MAX && "Hit the maximum scheduler id");
|
||||
sched = new (this, "rust_scheduler")
|
||||
rust_scheduler(this, num_threads, id, allow_exit, killed,
|
||||
launchfac);
|
||||
bool is_new = sched_table
|
||||
.insert(std::pair<rust_sched_id,
|
||||
rust_scheduler*>(id, sched)).second;
|
||||
assert(is_new && "Reusing a sched id?");
|
||||
}
|
||||
sched->start_task_threads();
|
||||
return id;
|
||||
}
|
||||
|
||||
rust_scheduler *
|
||||
rust_kernel::get_scheduler_by_id(rust_sched_id id) {
|
||||
scoped_lock with(sched_lock);
|
||||
return get_scheduler_by_id_nolock(id);
|
||||
}
|
||||
|
||||
rust_scheduler *
|
||||
rust_kernel::get_scheduler_by_id_nolock(rust_sched_id id) {
|
||||
if (id == 0) {
|
||||
return NULL;
|
||||
}
|
||||
sched_lock.must_have_lock();
|
||||
sched_map::iterator iter = sched_table.find(id);
|
||||
if (iter != sched_table.end()) {
|
||||
return iter->second;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::release_scheduler_id(rust_sched_id id) {
|
||||
scoped_lock with(sched_lock);
|
||||
join_list.push_back(id);
|
||||
sched_lock.signal();
|
||||
}
|
||||
|
||||
/*
|
||||
Called by rust_sched_reaper to join every terminating scheduler thread,
|
||||
so that we can be sure they have completely exited before the process exits.
|
||||
If we don't join them then we can see valgrind errors due to un-freed pthread
|
||||
memory.
|
||||
*/
|
||||
void
|
||||
rust_kernel::wait_for_schedulers()
|
||||
{
|
||||
scoped_lock with(sched_lock);
|
||||
while (!sched_table.empty()) {
|
||||
while (!join_list.empty()) {
|
||||
rust_sched_id id = join_list.back();
|
||||
KLOG_("Deleting scheduler %d", id);
|
||||
join_list.pop_back();
|
||||
sched_map::iterator iter = sched_table.find(id);
|
||||
assert(iter != sched_table.end());
|
||||
rust_scheduler *sched = iter->second;
|
||||
sched_table.erase(iter);
|
||||
sched->join_task_threads();
|
||||
sched->deref();
|
||||
/*if (sched_table.size() == 2) {
|
||||
KLOG_("Allowing main scheduler to exit");
|
||||
// It's only the main schedulers left. Tell them to exit
|
||||
rust_scheduler *main_sched =
|
||||
get_scheduler_by_id_nolock(main_scheduler);
|
||||
assert(main_sched != NULL);
|
||||
main_sched->allow_exit();
|
||||
}
|
||||
if (sched_table.size() == 1) {
|
||||
KLOG_("Allowing osmain scheduler to exit");
|
||||
rust_scheduler *osmain_sched =
|
||||
get_scheduler_by_id_nolock(osmain_scheduler);
|
||||
assert(osmain_sched != NULL);
|
||||
osmain_sched->allow_exit();
|
||||
}*/
|
||||
}
|
||||
if (!sched_table.empty()) {
|
||||
sched_lock.wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Called on the main thread to run the osmain scheduler to completion,
|
||||
then wait for schedulers to exit */
|
||||
int
|
||||
rust_kernel::run() {
|
||||
assert(osmain_driver != NULL);
|
||||
osmain_driver->start_main_loop();
|
||||
sched_reaper.join();
|
||||
return rval;
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::fail() {
|
||||
// FIXME (#908): On windows we're getting "Application has
|
||||
// requested the Runtime to terminate it in an unusual way" when
|
||||
// trying to shutdown cleanly.
|
||||
set_exit_status(PROC_FAIL_CODE);
|
||||
#if defined(__WIN32__)
|
||||
exit(rval);
|
||||
#endif
|
||||
// I think this only needs to be done by one task ever; as it is,
|
||||
// multiple tasks invoking kill_all might get here. Currently libcore
|
||||
// ensures only one task will ever invoke it, but this would really be
|
||||
// fine either way, so I'm leaving it as it is. -- bblum
|
||||
|
||||
// Copy the list of schedulers so that we don't hold the lock while
|
||||
// running kill_all_tasks. Refcount to ensure they stay alive.
|
||||
std::vector<rust_scheduler*> scheds;
|
||||
{
|
||||
scoped_lock with(sched_lock);
|
||||
// All schedulers created after this flag is set will be doomed.
|
||||
killed = true;
|
||||
for (sched_map::iterator iter = sched_table.begin();
|
||||
iter != sched_table.end(); iter++) {
|
||||
iter->second->ref();
|
||||
scheds.push_back(iter->second);
|
||||
}
|
||||
}
|
||||
|
||||
for (std::vector<rust_scheduler*>::iterator iter = scheds.begin();
|
||||
iter != scheds.end(); iter++) {
|
||||
(*iter)->kill_all_tasks();
|
||||
(*iter)->deref();
|
||||
}
|
||||
}
|
||||
|
||||
rust_task_id
|
||||
rust_kernel::generate_task_id() {
|
||||
rust_task_id id = sync::increment(max_task_id);
|
||||
assert(id != INTPTR_MAX && "Hit the maximum task id");
|
||||
return id;
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::set_exit_status(int code) {
|
||||
scoped_lock with(rval_lock);
|
||||
// If we've already failed then that's the code we're going to use
|
||||
if (rval != PROC_FAIL_CODE) {
|
||||
rval = code;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::inc_live_count() {
|
||||
uintptr_t new_non_weak_tasks = sync::increment(non_weak_tasks);
|
||||
KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::dec_live_count() {
|
||||
uintptr_t new_non_weak_tasks = sync::decrement(non_weak_tasks);
|
||||
KLOG_("New non-weak tasks %" PRIdPTR, new_non_weak_tasks);
|
||||
if (new_non_weak_tasks == 0) {
|
||||
begin_shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::allow_scheduler_exit() {
|
||||
scoped_lock with(sched_lock);
|
||||
|
||||
KLOG_("Allowing main scheduler to exit");
|
||||
// It's only the main schedulers left. Tell them to exit
|
||||
rust_scheduler *main_sched =
|
||||
get_scheduler_by_id_nolock(main_scheduler);
|
||||
assert(main_sched != NULL);
|
||||
main_sched->allow_exit();
|
||||
|
||||
KLOG_("Allowing osmain scheduler to exit");
|
||||
rust_scheduler *osmain_sched =
|
||||
get_scheduler_by_id_nolock(osmain_scheduler);
|
||||
assert(osmain_sched != NULL);
|
||||
osmain_sched->allow_exit();
|
||||
}
|
||||
|
||||
void
|
||||
rust_kernel::begin_shutdown() {
|
||||
{
|
||||
scoped_lock with(sched_lock);
|
||||
// FIXME #4410: This shouldn't be necessary, but because of
|
||||
// unweaken_task this may end up getting called multiple times.
|
||||
if (already_exiting) {
|
||||
return;
|
||||
} else {
|
||||
already_exiting = true;
|
||||
}
|
||||
}
|
||||
|
||||
allow_scheduler_exit();
|
||||
}
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
// fill-column: 78;
|
||||
// indent-tabs-mode: nil
|
||||
// c-basic-offset: 4
|
||||
// buffer-file-coding-system: utf-8-unix
|
||||
// End:
|
||||
//
|
|
@ -1,167 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
/**
|
||||
A single runtime instance.
|
||||
|
||||
The kernel is primarily responsible for managing the lifetime of
|
||||
schedulers, which in turn run rust tasks. It provides a memory
|
||||
allocator and logging service for use by other runtime components,
|
||||
it creates unique task ids.
|
||||
|
||||
The kernel runs until there are no live schedulers.
|
||||
|
||||
The kernel internally runs an additional, special scheduler called
|
||||
the 'osmain' (or platform) scheduler, which schedules tasks on the
|
||||
thread that is running the kernel (normally the thread on which the
|
||||
C main function was called). This scheduler may be used by Rust
|
||||
code for interacting with platform APIs that insist on being called
|
||||
from the main thread.
|
||||
|
||||
The requirements of the osmain scheduler has resulted in a complex
|
||||
process for creating and running scheduler loops that involves
|
||||
a thing called a 'rust_sched_launcher_factory' whose function I've
|
||||
already forgotten. rust_scheduler is the main scheduler class,
|
||||
and tasks are scheduled on individual threads by rust_sched_loop.
|
||||
|
||||
Ideally all the in-memory Rust state is encapsulated by a kernel
|
||||
instance, but there is still some truly global data in the runtime
|
||||
(like the check claims flag).
|
||||
*/
|
||||
|
||||
#ifndef RUST_KERNEL_H
|
||||
#define RUST_KERNEL_H
|
||||
|
||||
#include "rust_globals.h"
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "rust_exchange_alloc.h"
|
||||
#include "rust_log.h"
|
||||
#include "rust_sched_reaper.h"
|
||||
#include "rust_type.h"
|
||||
#include "sync/lock_and_signal.h"
|
||||
|
||||
class rust_scheduler;
|
||||
class rust_sched_driver;
|
||||
class rust_sched_launcher_factory;
|
||||
struct rust_task_thread;
|
||||
|
||||
// Scheduler, task handles. These uniquely identify within a
|
||||
// single kernel instance the objects they represent.
|
||||
typedef intptr_t rust_sched_id;
|
||||
typedef intptr_t rust_task_id;
|
||||
|
||||
typedef std::map<rust_sched_id, rust_scheduler*> sched_map;
|
||||
|
||||
class rust_kernel {
|
||||
rust_exchange_alloc exchange_alloc;
|
||||
rust_log _log;
|
||||
|
||||
// The next task id
|
||||
rust_task_id max_task_id;
|
||||
|
||||
lock_and_signal rval_lock;
|
||||
int rval;
|
||||
|
||||
// Protects max_sched_id and sched_table, join_list, killed,
|
||||
// already_exiting
|
||||
lock_and_signal sched_lock;
|
||||
// The next scheduler id
|
||||
rust_sched_id max_sched_id;
|
||||
// A map from scheduler ids to schedulers. When this is empty
|
||||
// the kernel terminates
|
||||
sched_map sched_table;
|
||||
// A list of scheduler ids that are ready to exit
|
||||
std::vector<rust_sched_id> join_list;
|
||||
// Whether or not the runtime has to die (triggered when the root/main
|
||||
// task group fails). This propagates to all new schedulers and tasks
|
||||
// created after it is set.
|
||||
bool killed;
|
||||
bool already_exiting;
|
||||
|
||||
|
||||
rust_sched_reaper sched_reaper;
|
||||
|
||||
// The primary scheduler
|
||||
rust_sched_id main_scheduler;
|
||||
// The single-threaded scheduler that uses the main thread
|
||||
rust_sched_id osmain_scheduler;
|
||||
// Runs the single-threaded scheduler that executes tasks
|
||||
// on the main thread
|
||||
rust_sched_driver *osmain_driver;
|
||||
|
||||
// An atomically updated count of the live, 'non-weak' tasks
|
||||
uintptr_t non_weak_tasks;
|
||||
|
||||
rust_scheduler* get_scheduler_by_id_nolock(rust_sched_id id);
|
||||
void allow_scheduler_exit();
|
||||
void begin_shutdown();
|
||||
|
||||
public:
|
||||
struct rust_env *env;
|
||||
|
||||
rust_kernel(rust_env *env);
|
||||
|
||||
void log(uint32_t level, char const *fmt, ...);
|
||||
void fatal(char const *fmt, ...);
|
||||
|
||||
void *malloc(size_t size, const char *tag);
|
||||
void *realloc(void *mem, size_t size);
|
||||
void free(void *mem);
|
||||
rust_exchange_alloc *region() { return &exchange_alloc; }
|
||||
|
||||
void fail();
|
||||
|
||||
rust_sched_id create_scheduler(size_t num_threads);
|
||||
rust_sched_id create_scheduler(rust_sched_launcher_factory *launchfac,
|
||||
size_t num_threads, bool allow_exit);
|
||||
rust_scheduler* get_scheduler_by_id(rust_sched_id id);
|
||||
// Called by a scheduler to indicate that it is terminating
|
||||
void release_scheduler_id(rust_sched_id id);
|
||||
void wait_for_schedulers();
|
||||
int run();
|
||||
|
||||
rust_task_id generate_task_id();
|
||||
|
||||
void set_exit_status(int code);
|
||||
|
||||
rust_sched_id main_sched_id() { return main_scheduler; }
|
||||
rust_sched_id osmain_sched_id() { return osmain_scheduler; }
|
||||
|
||||
void inc_live_count();
|
||||
void dec_live_count();
|
||||
|
||||
};
|
||||
|
||||
template <typename T> struct kernel_owned {
|
||||
inline void *operator new(size_t size, rust_kernel *kernel,
|
||||
const char *tag) {
|
||||
return kernel->malloc(size, tag);
|
||||
}
|
||||
|
||||
void operator delete(void *ptr) {
|
||||
((T *)ptr)->kernel->free(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* RUST_KERNEL_H */
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
// fill-column: 78;
|
||||
// indent-tabs-mode: nil
|
||||
// c-basic-offset: 4
|
||||
// buffer-file-coding-system: utf-8-unix
|
||||
// End:
|
||||
//
|
|
@ -17,161 +17,6 @@
|
|||
#include "rust_crate_map.h"
|
||||
#include "util/array_list.h"
|
||||
#include "rust_util.h"
|
||||
#include "rust_task.h"
|
||||
|
||||
/**
|
||||
* Synchronizes access to the underlying logging mechanism.
|
||||
*/
|
||||
static lock_and_signal _log_lock;
|
||||
/**
|
||||
* Indicates whether we are outputting to the console.
|
||||
* Protected by _log_lock;
|
||||
*/
|
||||
static bool _log_to_console = true;
|
||||
|
||||
/*
|
||||
* Request that console logging be turned on.
|
||||
*/
|
||||
void
|
||||
log_console_on() {
|
||||
scoped_lock with(_log_lock);
|
||||
_log_to_console = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request that console logging be turned off. Can be
|
||||
* overridden by the environment.
|
||||
*/
|
||||
void
|
||||
log_console_off() {
|
||||
scoped_lock with(_log_lock);
|
||||
_log_to_console = false;
|
||||
}
|
||||
|
||||
bool
|
||||
should_log_console() {
|
||||
scoped_lock with(_log_lock);
|
||||
return _log_to_console;
|
||||
}
|
||||
|
||||
rust_log::rust_log(rust_sched_loop *sched_loop) :
|
||||
_sched_loop(sched_loop) {
|
||||
}
|
||||
|
||||
rust_log::~rust_log() {
|
||||
|
||||
}
|
||||
|
||||
const uint16_t
|
||||
hash(uintptr_t ptr) {
|
||||
# if(ULONG_MAX == 0xFFFFFFFF)
|
||||
// Robert Jenkins' 32 bit integer hash function
|
||||
ptr = (ptr + 0x7ed55d16) + (ptr << 12);
|
||||
ptr = (ptr ^ 0xc761c23c) ^ (ptr >> 19);
|
||||
ptr = (ptr + 0x165667b1) + (ptr << 5);
|
||||
ptr = (ptr + 0xd3a2646c) ^ (ptr << 9);
|
||||
ptr = (ptr + 0xfd7046c5) + (ptr << 3);
|
||||
ptr = (ptr ^ 0xb55a4f09) ^ (ptr >> 16);
|
||||
# elif(ULONG_MAX == 0xFFFFFFFFFFFFFFFF)
|
||||
// "hash64shift()" from http://www.concentric.net/~Ttwang/tech/inthash.htm
|
||||
ptr = (~ptr) + (ptr << 21); // ptr = (ptr << 21) - ptr - 1;
|
||||
ptr = ptr ^ (ptr >> 24);
|
||||
ptr = (ptr + (ptr << 3)) + (ptr << 8); // ptr * 265
|
||||
ptr = ptr ^ (ptr >> 14);
|
||||
ptr = (ptr + (ptr << 2)) + (ptr << 4); // ptr * 21
|
||||
ptr = ptr ^ (ptr >> 28);
|
||||
ptr = ptr + (ptr << 31);
|
||||
# else
|
||||
# error "hash() not defined for this pointer size"
|
||||
# endif
|
||||
return (uint16_t) ptr;
|
||||
}
|
||||
|
||||
char *
|
||||
copy_string(char *dst, const char *src, size_t length) {
|
||||
return strncpy(dst, src, length) + length;
|
||||
}
|
||||
|
||||
char *
|
||||
append_string(char *buffer, const char *format, ...) {
|
||||
if (buffer != NULL && format) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
size_t off = strlen(buffer);
|
||||
vsnprintf(buffer + off, BUF_BYTES - off, format, args);
|
||||
va_end(args);
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void
|
||||
rust_log::log(rust_task* task, uint32_t level, char const *fmt, ...) {
|
||||
char buf[BUF_BYTES];
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
int formattedbytes = vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
if( formattedbytes and (unsigned)formattedbytes > BUF_BYTES ){
|
||||
const char truncatedstr[] = "[...]";
|
||||
memcpy( &buf[BUF_BYTES-sizeof(truncatedstr)],
|
||||
truncatedstr,
|
||||
sizeof(truncatedstr));
|
||||
}
|
||||
trace_ln(task, level, buf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void
|
||||
rust_log::trace_ln(char *prefix, char *message) {
|
||||
char buffer[BUF_BYTES] = "";
|
||||
_log_lock.lock();
|
||||
append_string(buffer, "%s", prefix);
|
||||
append_string(buffer, "%s", message);
|
||||
if (_log_to_console) {
|
||||
fprintf(stderr, "rust: %s\n", buffer);
|
||||
fflush(stderr);
|
||||
}
|
||||
_log_lock.unlock();
|
||||
}
|
||||
|
||||
void
|
||||
rust_log::trace_ln(rust_task *task, uint32_t level, char *message) {
|
||||
|
||||
if (task) {
|
||||
// There is not enough room to be logging on the rust stack
|
||||
assert(!task->on_rust_stack() && "logging on rust stack");
|
||||
}
|
||||
|
||||
// FIXME (#2672): The scheduler and task names used to have meaning,
|
||||
// but they are always equal to 'main' currently
|
||||
#if 0
|
||||
|
||||
#if defined(__WIN32__)
|
||||
uint32_t thread_id = 0;
|
||||
#else
|
||||
uint32_t thread_id = hash((uintptr_t) pthread_self());
|
||||
#endif
|
||||
|
||||
char prefix[BUF_BYTES] = "";
|
||||
if (_sched_loop && _sched_loop-.name) {
|
||||
append_string(prefix, "%04" PRIxPTR ":%.10s:",
|
||||
thread_id, _sched_loop->name);
|
||||
} else {
|
||||
append_string(prefix, "%04" PRIxPTR ":0x%08" PRIxPTR ":",
|
||||
thread_id, (uintptr_t) _sched_loop);
|
||||
}
|
||||
if (task) {
|
||||
if (task->name) {
|
||||
append_string(prefix, "%.10s:", task->name);
|
||||
} else {
|
||||
append_string(prefix, "0x%08" PRIxPTR ":", (uintptr_t) task);
|
||||
}
|
||||
}
|
||||
#else
|
||||
char prefix[BUF_BYTES] = "";
|
||||
#endif
|
||||
|
||||
trace_ln(prefix, message);
|
||||
}
|
||||
|
||||
// Reading log directives and setting log level vars
|
||||
|
||||
|
|
|
@ -18,53 +18,6 @@ const uint32_t log_warn = 2;
|
|||
const uint32_t log_info = 3;
|
||||
const uint32_t log_debug = 4;
|
||||
|
||||
#define LOG(task, field, ...) \
|
||||
DLOG_LVL(log_debug, task, task->sched_loop, field, __VA_ARGS__)
|
||||
#define LOG_ERR(task, field, ...) \
|
||||
DLOG_LVL(log_err, task, task->sched_loop, field, __VA_ARGS__)
|
||||
#define DLOG(sched_loop, field, ...) \
|
||||
DLOG_LVL(log_debug, NULL, sched_loop, field, __VA_ARGS__)
|
||||
#define DLOG_ERR(sched_loop, field, ...) \
|
||||
DLOG_LVL(log_err, NULL, sched_loop, field, __VA_ARGS__)
|
||||
#define LOGPTR(sched_loop, msg, ptrval) \
|
||||
DLOG_LVL(log_debug, NULL, sched_loop, mem, "%s 0x%" PRIxPTR, msg, ptrval)
|
||||
#define DLOG_LVL(lvl, task, sched_loop, field, ...) \
|
||||
do { \
|
||||
rust_sched_loop* _d_ = sched_loop; \
|
||||
if (log_rt_##field >= lvl && _d_->log_lvl >= lvl) { \
|
||||
_d_->get_log().log(task, lvl, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define KLOG(k, field, ...) \
|
||||
KLOG_LVL(k, field, log_debug, __VA_ARGS__)
|
||||
#define KLOG_LVL(k, field, lvl, ...) \
|
||||
do { \
|
||||
if (log_rt_##field >= lvl) { \
|
||||
(k)->log(lvl, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
struct rust_sched_loop;
|
||||
struct rust_task;
|
||||
|
||||
class rust_log {
|
||||
|
||||
public:
|
||||
rust_log(rust_sched_loop *sched_loop);
|
||||
virtual ~rust_log();
|
||||
|
||||
void log(rust_task* task, uint32_t level, char const *fmt, ...);
|
||||
void trace_ln(rust_task *task, uint32_t level, char *message);
|
||||
void trace_ln(char *prefix, char *message);
|
||||
bool is_tracing(uint32_t type_bits);
|
||||
|
||||
private:
|
||||
rust_sched_loop *_sched_loop;
|
||||
bool _use_labels;
|
||||
void trace_ln(rust_task *task, char *message);
|
||||
};
|
||||
|
||||
void update_log_settings(void* crate_map, char* settings);
|
||||
|
||||
extern uint32_t log_rt_mem;
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
// except according to those terms.
|
||||
|
||||
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_globals.h"
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <crt_externs.h>
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "rust_sched_driver.h"
|
||||
#include "rust_sched_loop.h"
|
||||
|
||||
rust_sched_driver::rust_sched_driver(rust_sched_loop *sched_loop)
|
||||
: sched_loop(sched_loop),
|
||||
signalled(false) {
|
||||
|
||||
assert(sched_loop != NULL);
|
||||
sched_loop->on_pump_loop(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the main scheduler loop which performs task scheduling for this
|
||||
* domain.
|
||||
*
|
||||
* Returns once no more tasks can be scheduled and all task ref_counts
|
||||
* drop to zero.
|
||||
*/
|
||||
void
|
||||
rust_sched_driver::start_main_loop() {
|
||||
assert(sched_loop != NULL);
|
||||
|
||||
#ifdef __APPLE__
|
||||
{
|
||||
char buf[64];
|
||||
snprintf(buf, sizeof(buf), "scheduler loop %d", sched_loop->get_id());
|
||||
// pthread_setname_np seems to have a different signature and
|
||||
// different behavior on different platforms. Thus, this is
|
||||
// only for Mac at the moment. There are equivalent versions
|
||||
// for Linux that we can add if needed.
|
||||
pthread_setname_np(buf);
|
||||
}
|
||||
#endif
|
||||
|
||||
rust_sched_loop_state state = sched_loop_state_keep_going;
|
||||
while (state != sched_loop_state_exit) {
|
||||
DLOG(sched_loop, dom, "pumping scheduler");
|
||||
state = sched_loop->run_single_turn();
|
||||
|
||||
if (state == sched_loop_state_block) {
|
||||
scoped_lock with(lock);
|
||||
if (!signalled) {
|
||||
DLOG(sched_loop, dom, "blocking scheduler");
|
||||
lock.wait();
|
||||
}
|
||||
signalled = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_driver::signal() {
|
||||
scoped_lock with(lock);
|
||||
signalled = true;
|
||||
lock.signal();
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#ifndef RUST_SCHED_DRIVER_H
|
||||
#define RUST_SCHED_DRIVER_H
|
||||
|
||||
#include "sync/lock_and_signal.h"
|
||||
#include "rust_signal.h"
|
||||
|
||||
struct rust_sched_loop;
|
||||
|
||||
class rust_sched_driver : public rust_signal {
|
||||
private:
|
||||
rust_sched_loop *sched_loop;
|
||||
lock_and_signal lock;
|
||||
bool signalled;
|
||||
|
||||
public:
|
||||
rust_sched_driver(rust_sched_loop *sched_loop);
|
||||
|
||||
void start_main_loop();
|
||||
|
||||
virtual void signal();
|
||||
};
|
||||
|
||||
#endif /* RUST_SCHED_DRIVER_H */
|
|
@ -1,49 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
#include "rust_sched_launcher.h"
|
||||
#include "rust_scheduler.h"
|
||||
|
||||
const size_t SCHED_STACK_SIZE = 1024*100;
|
||||
|
||||
rust_sched_launcher::rust_sched_launcher(rust_scheduler *sched, int id,
|
||||
bool killed)
|
||||
: kernel(sched->kernel),
|
||||
sched_loop(sched, id, killed),
|
||||
driver(&sched_loop) {
|
||||
}
|
||||
|
||||
rust_thread_sched_launcher::rust_thread_sched_launcher(rust_scheduler *sched,
|
||||
int id, bool killed)
|
||||
: rust_sched_launcher(sched, id, killed),
|
||||
rust_thread(SCHED_STACK_SIZE) {
|
||||
}
|
||||
|
||||
rust_manual_sched_launcher::rust_manual_sched_launcher(rust_scheduler *sched,
|
||||
int id, bool killed)
|
||||
: rust_sched_launcher(sched, id, killed) {
|
||||
}
|
||||
|
||||
rust_sched_launcher *
|
||||
rust_thread_sched_launcher_factory::create(rust_scheduler *sched, int id,
|
||||
bool killed) {
|
||||
return new(sched->kernel, "rust_thread_sched_launcher")
|
||||
rust_thread_sched_launcher(sched, id, killed);
|
||||
}
|
||||
|
||||
rust_sched_launcher *
|
||||
rust_manual_sched_launcher_factory::create(rust_scheduler *sched, int id,
|
||||
bool killed) {
|
||||
assert(launcher == NULL && "I can only track one sched_launcher");
|
||||
launcher = new(sched->kernel, "rust_manual_sched_launcher")
|
||||
rust_manual_sched_launcher(sched, id, killed);
|
||||
return launcher;
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#ifndef RUST_SCHED_LAUNCHER_H
|
||||
#define RUST_SCHED_LAUNCHER_H
|
||||
|
||||
#include "sync/rust_thread.h"
|
||||
#include "rust_sched_driver.h"
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_sched_loop.h"
|
||||
|
||||
class rust_sched_launcher : public kernel_owned<rust_sched_launcher> {
|
||||
public:
|
||||
rust_kernel *kernel;
|
||||
|
||||
private:
|
||||
rust_sched_loop sched_loop;
|
||||
|
||||
private:
|
||||
// private and undefined to disable copying
|
||||
rust_sched_launcher(const rust_sched_launcher& rhs);
|
||||
rust_sched_launcher& operator=(const rust_sched_launcher& rhs);
|
||||
|
||||
protected:
|
||||
rust_sched_driver driver;
|
||||
|
||||
public:
|
||||
rust_sched_launcher(rust_scheduler *sched, int id, bool killed);
|
||||
virtual ~rust_sched_launcher() { }
|
||||
|
||||
virtual void start() = 0;
|
||||
virtual void join() = 0;
|
||||
rust_sched_loop *get_loop() { return &sched_loop; }
|
||||
};
|
||||
|
||||
class rust_thread_sched_launcher
|
||||
:public rust_sched_launcher,
|
||||
private rust_thread {
|
||||
public:
|
||||
rust_thread_sched_launcher(rust_scheduler *sched, int id, bool killed);
|
||||
virtual void start() { rust_thread::start(); }
|
||||
virtual void join() { rust_thread::join(); }
|
||||
virtual void run() { driver.start_main_loop(); }
|
||||
};
|
||||
|
||||
class rust_manual_sched_launcher : public rust_sched_launcher {
|
||||
public:
|
||||
rust_manual_sched_launcher(rust_scheduler *sched, int id, bool killed);
|
||||
virtual void start() { }
|
||||
virtual void join() { }
|
||||
rust_sched_driver *get_driver() { return &driver; };
|
||||
};
|
||||
|
||||
class rust_sched_launcher_factory {
|
||||
public:
|
||||
virtual ~rust_sched_launcher_factory() { }
|
||||
virtual rust_sched_launcher *
|
||||
create(rust_scheduler *sched, int id, bool killed) = 0;
|
||||
};
|
||||
|
||||
class rust_thread_sched_launcher_factory
|
||||
: public rust_sched_launcher_factory {
|
||||
public:
|
||||
virtual rust_sched_launcher *create(rust_scheduler *sched, int id,
|
||||
bool killed);
|
||||
};
|
||||
|
||||
class rust_manual_sched_launcher_factory
|
||||
: public rust_sched_launcher_factory {
|
||||
private:
|
||||
rust_manual_sched_launcher *launcher;
|
||||
public:
|
||||
rust_manual_sched_launcher_factory() : launcher(NULL) { }
|
||||
virtual rust_sched_launcher *create(rust_scheduler *sched, int id,
|
||||
bool killed);
|
||||
rust_sched_driver *get_driver() {
|
||||
assert(launcher != NULL);
|
||||
return launcher->get_driver();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // RUST_SCHED_LAUNCHER_H
|
|
@ -1,431 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
#include "rust_sched_loop.h"
|
||||
#include "rust_util.h"
|
||||
#include "rust_scheduler.h"
|
||||
|
||||
#ifndef _WIN32
|
||||
pthread_key_t rust_sched_loop::task_key;
|
||||
#else
|
||||
DWORD rust_sched_loop::task_key;
|
||||
#endif
|
||||
|
||||
const size_t C_STACK_SIZE = 2*1024*1024;
|
||||
|
||||
bool rust_sched_loop::tls_initialized = false;
|
||||
|
||||
rust_sched_loop::rust_sched_loop(rust_scheduler *sched, int id, bool killed) :
|
||||
_log(this),
|
||||
id(id),
|
||||
should_exit(false),
|
||||
cached_c_stack(NULL),
|
||||
extra_c_stack(NULL),
|
||||
cached_big_stack(NULL),
|
||||
extra_big_stack(NULL),
|
||||
dead_task(NULL),
|
||||
killed(killed),
|
||||
pump_signal(NULL),
|
||||
kernel(sched->kernel),
|
||||
sched(sched),
|
||||
log_lvl(log_debug),
|
||||
min_stack_size(kernel->env->min_stack_size),
|
||||
local_region(false, kernel->env->detailed_leaks, kernel->env->poison_on_free),
|
||||
// FIXME #2891: calculate a per-scheduler name.
|
||||
name("main")
|
||||
{
|
||||
LOGPTR(this, "new dom", (uintptr_t)this);
|
||||
rng_init(&rng, kernel->env->rust_seed, NULL, 0);
|
||||
|
||||
if (!tls_initialized)
|
||||
init_tls();
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::activate(rust_task *task) {
|
||||
lock.must_have_lock();
|
||||
task->ctx.next = &c_context;
|
||||
DLOG(this, task, "descheduling...");
|
||||
lock.unlock();
|
||||
prepare_c_stack(task);
|
||||
task->ctx.swap(c_context);
|
||||
task->cleanup_after_turn();
|
||||
unprepare_c_stack();
|
||||
lock.lock();
|
||||
DLOG(this, task, "task has returned");
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
rust_sched_loop::fail() {
|
||||
_log.log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed",
|
||||
name, this);
|
||||
kernel->fail();
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::kill_all_tasks() {
|
||||
std::vector<rust_task*> all_tasks;
|
||||
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
// Any task created after this will be killed. See transition, below.
|
||||
killed = true;
|
||||
|
||||
for (size_t i = 0; i < running_tasks.length(); i++) {
|
||||
rust_task *t = running_tasks[i];
|
||||
t->ref();
|
||||
all_tasks.push_back(t);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < blocked_tasks.length(); i++) {
|
||||
rust_task *t = blocked_tasks[i];
|
||||
t->ref();
|
||||
all_tasks.push_back(t);
|
||||
}
|
||||
}
|
||||
|
||||
while (!all_tasks.empty()) {
|
||||
rust_task *task = all_tasks.back();
|
||||
all_tasks.pop_back();
|
||||
task->kill();
|
||||
task->deref();
|
||||
}
|
||||
}
|
||||
|
||||
size_t
|
||||
rust_sched_loop::number_of_live_tasks() {
|
||||
lock.must_have_lock();
|
||||
return running_tasks.length() + blocked_tasks.length();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete any dead tasks.
|
||||
*/
|
||||
void
|
||||
rust_sched_loop::reap_dead_tasks() {
|
||||
lock.must_have_lock();
|
||||
|
||||
if (dead_task == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Dereferencing the task will probably cause it to be released
|
||||
// from the scheduler, which may end up trying to take this lock
|
||||
lock.unlock();
|
||||
|
||||
dead_task->delete_all_stacks();
|
||||
// Deref the task, which may cause it to request us to release it
|
||||
dead_task->deref();
|
||||
dead_task = NULL;
|
||||
|
||||
lock.lock();
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::release_task(rust_task *task) {
|
||||
// Nobody should have a ref to the task at this point
|
||||
assert(task->get_ref_count() == 0);
|
||||
// Now delete the task, which will require using this thread's
|
||||
// memory region.
|
||||
delete task;
|
||||
// Now release the task from the scheduler, which may trigger this
|
||||
// thread to exit
|
||||
sched->release_task();
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules a running task for execution. Only running tasks can be
|
||||
* activated. Blocked tasks have to be unblocked before they can be
|
||||
* activated.
|
||||
*
|
||||
* Returns NULL if no tasks can be scheduled.
|
||||
*/
|
||||
rust_task *
|
||||
rust_sched_loop::schedule_task() {
|
||||
lock.must_have_lock();
|
||||
size_t tasks = running_tasks.length();
|
||||
if (tasks > 0) {
|
||||
size_t i = (tasks > 1) ? (rng_gen_u32(&rng) % tasks) : 0;
|
||||
return running_tasks[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::log_state() {
|
||||
if (log_rt_task < log_debug) return;
|
||||
|
||||
if (!running_tasks.is_empty()) {
|
||||
_log.log(NULL, log_debug, "running tasks:");
|
||||
for (size_t i = 0; i < running_tasks.length(); i++) {
|
||||
_log.log(NULL, log_debug, "\t task: %s @0x%" PRIxPTR,
|
||||
running_tasks[i]->name,
|
||||
running_tasks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (!blocked_tasks.is_empty()) {
|
||||
_log.log(NULL, log_debug, "blocked tasks:");
|
||||
for (size_t i = 0; i < blocked_tasks.length(); i++) {
|
||||
_log.log(NULL, log_debug, "\t task: %s @0x%" PRIxPTR
|
||||
", blocked on: 0x%" PRIxPTR " '%s'",
|
||||
blocked_tasks[i]->name, blocked_tasks[i],
|
||||
blocked_tasks[i]->get_cond(),
|
||||
blocked_tasks[i]->get_cond_name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::on_pump_loop(rust_signal *signal) {
|
||||
assert(pump_signal == NULL);
|
||||
assert(signal != NULL);
|
||||
pump_signal = signal;
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::pump_loop() {
|
||||
assert(pump_signal != NULL);
|
||||
pump_signal->signal();
|
||||
}
|
||||
|
||||
rust_sched_loop_state
|
||||
rust_sched_loop::run_single_turn() {
|
||||
DLOG(this, task,
|
||||
"scheduler %d resuming ...", id);
|
||||
|
||||
lock.lock();
|
||||
|
||||
if (!should_exit) {
|
||||
assert(dead_task == NULL && "Tasks should only die after running");
|
||||
|
||||
DLOG(this, dom, "worker %d, number_of_live_tasks = %d",
|
||||
id, number_of_live_tasks());
|
||||
|
||||
rust_task *scheduled_task = schedule_task();
|
||||
|
||||
if (scheduled_task == NULL) {
|
||||
log_state();
|
||||
DLOG(this, task,
|
||||
"all tasks are blocked, scheduler id %d yielding ...",
|
||||
id);
|
||||
|
||||
lock.unlock();
|
||||
return sched_loop_state_block;
|
||||
}
|
||||
|
||||
scheduled_task->assert_is_running();
|
||||
|
||||
DLOG(this, task,
|
||||
"activating task %s 0x%" PRIxPTR
|
||||
", state: %s",
|
||||
scheduled_task->name,
|
||||
(uintptr_t)scheduled_task,
|
||||
state_name(scheduled_task->get_state()));
|
||||
|
||||
place_task_in_tls(scheduled_task);
|
||||
|
||||
DLOG(this, task,
|
||||
"Running task %p on worker %d",
|
||||
scheduled_task, id);
|
||||
activate(scheduled_task);
|
||||
|
||||
DLOG(this, task,
|
||||
"returned from task %s @0x%" PRIxPTR
|
||||
" in state '%s', worker id=%d" PRIxPTR,
|
||||
scheduled_task->name,
|
||||
(uintptr_t)scheduled_task,
|
||||
state_name(scheduled_task->get_state()),
|
||||
id);
|
||||
|
||||
reap_dead_tasks();
|
||||
|
||||
lock.unlock();
|
||||
return sched_loop_state_keep_going;
|
||||
} else {
|
||||
assert(running_tasks.is_empty() && "Should have no running tasks");
|
||||
assert(blocked_tasks.is_empty() && "Should have no blocked tasks");
|
||||
assert(dead_task == NULL && "Should have no dead tasks");
|
||||
|
||||
DLOG(this, dom, "finished main-loop %d", id);
|
||||
|
||||
lock.unlock();
|
||||
|
||||
assert(!extra_c_stack);
|
||||
if (cached_c_stack) {
|
||||
destroy_exchange_stack(kernel->region(), cached_c_stack);
|
||||
cached_c_stack = NULL;
|
||||
}
|
||||
assert(!extra_big_stack);
|
||||
if (cached_big_stack) {
|
||||
destroy_exchange_stack(kernel->region(), cached_big_stack);
|
||||
cached_big_stack = NULL;
|
||||
}
|
||||
|
||||
sched->release_task_thread();
|
||||
return sched_loop_state_exit;
|
||||
}
|
||||
}
|
||||
|
||||
rust_task *
|
||||
rust_sched_loop::create_task(rust_task *spawner, const char *name) {
|
||||
rust_task *task =
|
||||
new (this->kernel, "rust_task")
|
||||
rust_task(this, task_state_newborn,
|
||||
name, kernel->env->min_stack_size);
|
||||
DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",
|
||||
task, spawner ? spawner->name : "(none)", name);
|
||||
|
||||
task->id = kernel->generate_task_id();
|
||||
return task;
|
||||
}
|
||||
|
||||
rust_task_list *
|
||||
rust_sched_loop::state_list(rust_task_state state) {
|
||||
switch (state) {
|
||||
case task_state_running:
|
||||
return &running_tasks;
|
||||
case task_state_blocked:
|
||||
return &blocked_tasks;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const char *
|
||||
rust_sched_loop::state_name(rust_task_state state) {
|
||||
switch (state) {
|
||||
case task_state_newborn:
|
||||
return "newborn";
|
||||
case task_state_running:
|
||||
return "running";
|
||||
case task_state_blocked:
|
||||
return "blocked";
|
||||
case task_state_dead:
|
||||
return "dead";
|
||||
default:
|
||||
assert(false);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::transition(rust_task *task,
|
||||
rust_task_state src, rust_task_state dst,
|
||||
rust_cond *cond, const char* cond_name) {
|
||||
scoped_lock with(lock);
|
||||
DLOG(this, task,
|
||||
"task %s " PTR " state change '%s' -> '%s' while in '%s'",
|
||||
name, (uintptr_t)this, state_name(src), state_name(dst),
|
||||
state_name(task->get_state()));
|
||||
assert(task->get_state() == src);
|
||||
rust_task_list *src_list = state_list(src);
|
||||
if (src_list) {
|
||||
src_list->remove(task);
|
||||
}
|
||||
rust_task_list *dst_list = state_list(dst);
|
||||
if (dst_list) {
|
||||
dst_list->append(task);
|
||||
}
|
||||
if (dst == task_state_dead) {
|
||||
assert(dead_task == NULL);
|
||||
dead_task = task;
|
||||
}
|
||||
task->set_state(dst, cond, cond_name);
|
||||
|
||||
// If the entire runtime is failing, newborn tasks must be doomed.
|
||||
if (src == task_state_newborn && killed) {
|
||||
task->kill_inner();
|
||||
}
|
||||
|
||||
pump_loop();
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
void
|
||||
rust_sched_loop::init_tls() {
|
||||
int result = pthread_key_create(&task_key, NULL);
|
||||
assert(!result && "Couldn't create the TLS key!");
|
||||
tls_initialized = true;
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::place_task_in_tls(rust_task *task) {
|
||||
int result = pthread_setspecific(task_key, task);
|
||||
assert(!result && "Couldn't place the task in TLS!");
|
||||
task->record_stack_limit();
|
||||
}
|
||||
#else
|
||||
void
|
||||
rust_sched_loop::init_tls() {
|
||||
task_key = TlsAlloc();
|
||||
assert(task_key != TLS_OUT_OF_INDEXES && "Couldn't create the TLS key!");
|
||||
tls_initialized = true;
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::place_task_in_tls(rust_task *task) {
|
||||
BOOL result = TlsSetValue(task_key, task);
|
||||
assert(result && "Couldn't place the task in TLS!");
|
||||
task->record_stack_limit();
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
rust_sched_loop::exit() {
|
||||
scoped_lock with(lock);
|
||||
DLOG(this, dom, "Requesting exit for thread %d", id);
|
||||
should_exit = true;
|
||||
pump_loop();
|
||||
}
|
||||
|
||||
// Before activating each task, make sure we have a C stack available.
|
||||
// It needs to be allocated ahead of time (while we're on our own
|
||||
// stack), because once we're on the Rust stack we won't have enough
|
||||
// room to do the allocation
|
||||
void
|
||||
rust_sched_loop::prepare_c_stack(rust_task *task) {
|
||||
assert(!extra_c_stack);
|
||||
if (!cached_c_stack && !task->have_c_stack()) {
|
||||
cached_c_stack = create_exchange_stack(kernel->region(),
|
||||
C_STACK_SIZE);
|
||||
}
|
||||
assert(!extra_big_stack);
|
||||
if (!cached_big_stack) {
|
||||
cached_big_stack = create_exchange_stack(kernel->region(),
|
||||
C_STACK_SIZE +
|
||||
(C_STACK_SIZE * 2));
|
||||
cached_big_stack->is_big = 1;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_loop::unprepare_c_stack() {
|
||||
if (extra_c_stack) {
|
||||
destroy_exchange_stack(kernel->region(), extra_c_stack);
|
||||
extra_c_stack = NULL;
|
||||
}
|
||||
if (extra_big_stack) {
|
||||
destroy_exchange_stack(kernel->region(), extra_big_stack);
|
||||
extra_big_stack = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
// fill-column: 70;
|
||||
// indent-tabs-mode: nil
|
||||
// c-basic-offset: 4
|
||||
// buffer-file-coding-system: utf-8-unix
|
||||
// End:
|
||||
//
|
|
@ -1,252 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#ifndef RUST_SCHED_LOOP_H
|
||||
#define RUST_SCHED_LOOP_H
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "rust_log.h"
|
||||
#include "rust_rng.h"
|
||||
#include "rust_stack.h"
|
||||
#include "rust_signal.h"
|
||||
#include "context.h"
|
||||
#include "util/indexed_list.h"
|
||||
|
||||
enum rust_task_state {
|
||||
task_state_newborn,
|
||||
task_state_running,
|
||||
task_state_blocked,
|
||||
task_state_dead
|
||||
};
|
||||
|
||||
/*
|
||||
The result of every turn of the scheduler loop. Instructs the loop
|
||||
driver how to proceed.
|
||||
*/
|
||||
enum rust_sched_loop_state {
|
||||
sched_loop_state_keep_going,
|
||||
sched_loop_state_block,
|
||||
sched_loop_state_exit
|
||||
};
|
||||
|
||||
class rust_kernel;
|
||||
class rust_scheduler;
|
||||
struct rust_task;
|
||||
|
||||
typedef indexed_list<rust_task> rust_task_list;
|
||||
|
||||
struct rust_sched_loop
|
||||
{
|
||||
private:
|
||||
|
||||
lock_and_signal lock;
|
||||
|
||||
// Fields known only by the runtime:
|
||||
rust_log _log;
|
||||
|
||||
const int id;
|
||||
|
||||
static bool tls_initialized;
|
||||
|
||||
#ifndef __WIN32__
|
||||
static pthread_key_t task_key;
|
||||
#else
|
||||
static DWORD task_key;
|
||||
#endif
|
||||
|
||||
context c_context;
|
||||
rust_rng rng;
|
||||
bool should_exit;
|
||||
|
||||
stk_seg *cached_c_stack;
|
||||
stk_seg *extra_c_stack;
|
||||
stk_seg *cached_big_stack;
|
||||
stk_seg *extra_big_stack;
|
||||
|
||||
rust_task_list running_tasks;
|
||||
rust_task_list blocked_tasks;
|
||||
rust_task *dead_task;
|
||||
bool killed;
|
||||
|
||||
rust_signal *pump_signal;
|
||||
|
||||
void prepare_c_stack(rust_task *task);
|
||||
void unprepare_c_stack();
|
||||
|
||||
rust_task_list *state_list(rust_task_state state);
|
||||
const char *state_name(rust_task_state state);
|
||||
|
||||
void pump_loop();
|
||||
|
||||
private:
|
||||
// private and undefined to disable copying
|
||||
rust_sched_loop(const rust_sched_loop& rhs);
|
||||
rust_sched_loop& operator=(const rust_sched_loop& rhs);
|
||||
|
||||
public:
|
||||
rust_kernel *kernel;
|
||||
rust_scheduler *sched;
|
||||
|
||||
// NB: this is used to filter *runtime-originating* debug
|
||||
// logging, on a per-scheduler basis. It's not likely what
|
||||
// you want to expose to the user in terms of per-task
|
||||
// or per-module logging control. By default all schedulers
|
||||
// are set to debug-level logging here, and filtered by
|
||||
// runtime category using the pseudo-modules ::rt::foo.
|
||||
uint32_t log_lvl;
|
||||
|
||||
size_t min_stack_size;
|
||||
memory_region local_region;
|
||||
|
||||
const char *const name; // Used for debugging
|
||||
|
||||
// Only a pointer to 'name' is kept, so it must live as long as this
|
||||
// domain.
|
||||
rust_sched_loop(rust_scheduler *sched, int id, bool killed);
|
||||
void activate(rust_task *task);
|
||||
rust_log & get_log();
|
||||
void fail();
|
||||
|
||||
size_t number_of_live_tasks();
|
||||
|
||||
void reap_dead_tasks();
|
||||
rust_task *schedule_task();
|
||||
|
||||
void on_pump_loop(rust_signal *signal);
|
||||
rust_sched_loop_state run_single_turn();
|
||||
|
||||
void log_state();
|
||||
|
||||
void kill_all_tasks();
|
||||
bool doomed();
|
||||
|
||||
rust_task *create_task(rust_task *spawner, const char *name);
|
||||
|
||||
void transition(rust_task *task,
|
||||
rust_task_state src, rust_task_state dst,
|
||||
rust_cond *cond, const char* cond_name);
|
||||
|
||||
void init_tls();
|
||||
void place_task_in_tls(rust_task *task);
|
||||
|
||||
static rust_task *get_task_tls();
|
||||
static rust_task *try_get_task_tls();
|
||||
|
||||
// Called by each task when they are ready to be destroyed
|
||||
void release_task(rust_task *task);
|
||||
|
||||
// Tells the scheduler to exit it's scheduling loop and thread
|
||||
void exit();
|
||||
|
||||
// Called by tasks when they need a stack on which to run C code
|
||||
stk_seg *borrow_c_stack();
|
||||
void return_c_stack(stk_seg *stack);
|
||||
|
||||
// Called by tasks when they need a big stack
|
||||
stk_seg *borrow_big_stack();
|
||||
void return_big_stack(stk_seg *stack);
|
||||
|
||||
int get_id() { return this->id; }
|
||||
};
|
||||
|
||||
inline rust_log &
|
||||
rust_sched_loop::get_log() {
|
||||
return _log;
|
||||
}
|
||||
|
||||
inline rust_task* rust_sched_loop::try_get_task_tls()
|
||||
{
|
||||
if (!tls_initialized)
|
||||
return NULL;
|
||||
#ifdef __WIN32__
|
||||
rust_task *task = reinterpret_cast<rust_task *>
|
||||
(TlsGetValue(task_key));
|
||||
#else
|
||||
rust_task *task = reinterpret_cast<rust_task *>
|
||||
(pthread_getspecific(task_key));
|
||||
#endif
|
||||
return task;
|
||||
}
|
||||
|
||||
inline rust_task* rust_sched_loop::get_task_tls()
|
||||
{
|
||||
rust_task *task = try_get_task_tls();
|
||||
assert(task && "Couldn't get the task from TLS!");
|
||||
return task;
|
||||
}
|
||||
|
||||
// NB: Runs on the Rust stack
|
||||
inline stk_seg *
|
||||
rust_sched_loop::borrow_c_stack() {
|
||||
assert(cached_c_stack);
|
||||
stk_seg *your_stack;
|
||||
if (extra_c_stack) {
|
||||
your_stack = extra_c_stack;
|
||||
extra_c_stack = NULL;
|
||||
} else {
|
||||
your_stack = cached_c_stack;
|
||||
cached_c_stack = NULL;
|
||||
}
|
||||
return your_stack;
|
||||
}
|
||||
|
||||
// NB: Runs on the Rust stack
|
||||
inline void
|
||||
rust_sched_loop::return_c_stack(stk_seg *stack) {
|
||||
assert(!extra_c_stack);
|
||||
if (!cached_c_stack) {
|
||||
cached_c_stack = stack;
|
||||
} else {
|
||||
extra_c_stack = stack;
|
||||
}
|
||||
}
|
||||
|
||||
// NB: Runs on the Rust stack. Might return NULL!
|
||||
inline stk_seg *
|
||||
rust_sched_loop::borrow_big_stack() {
|
||||
stk_seg *your_stack;
|
||||
if (extra_big_stack) {
|
||||
your_stack = extra_big_stack;
|
||||
extra_big_stack = NULL;
|
||||
} else {
|
||||
// NB: This may be null if we're asking for a *second*
|
||||
// big stack, in which case the caller will fall back to a slow path
|
||||
your_stack = cached_big_stack;
|
||||
cached_big_stack = NULL;
|
||||
}
|
||||
return your_stack;
|
||||
}
|
||||
|
||||
// NB: Runs on the Rust stack
|
||||
inline void
|
||||
rust_sched_loop::return_big_stack(stk_seg *stack) {
|
||||
assert(!extra_big_stack);
|
||||
assert(stack);
|
||||
if (!cached_big_stack)
|
||||
cached_big_stack = stack;
|
||||
else
|
||||
extra_big_stack = stack;
|
||||
}
|
||||
|
||||
// this is needed to appease the circular dependency gods
|
||||
#include "rust_task.h"
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
// fill-column: 78;
|
||||
// indent-tabs-mode: nil
|
||||
// c-basic-offset: 4
|
||||
// buffer-file-coding-system: utf-8-unix
|
||||
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
|
||||
// End:
|
||||
//
|
||||
|
||||
#endif /* RUST_SCHED_LOOP_H */
|
|
@ -1,25 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_sched_reaper.h"
|
||||
|
||||
// NB: We're using a very small stack here
|
||||
const size_t STACK_SIZE = 1024*20;
|
||||
|
||||
rust_sched_reaper::rust_sched_reaper(rust_kernel *kernel)
|
||||
: rust_thread(STACK_SIZE), kernel(kernel) {
|
||||
}
|
||||
|
||||
void
|
||||
rust_sched_reaper::run() {
|
||||
kernel->wait_for_schedulers();
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#ifndef RUST_SCHED_REAPER_H
|
||||
#define RUST_SCHED_REAPER_H
|
||||
|
||||
#include "sync/rust_thread.h"
|
||||
|
||||
class rust_kernel;
|
||||
|
||||
/* Responsible for joining with rust_schedulers */
|
||||
class rust_sched_reaper : public rust_thread {
|
||||
private:
|
||||
rust_kernel *kernel;
|
||||
public:
|
||||
rust_sched_reaper(rust_kernel *kernel);
|
||||
virtual void run();
|
||||
};
|
||||
|
||||
#endif /* RUST_SCHED_REAPER_H */
|
|
@ -1,203 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "rust_task.h"
|
||||
#include "rust_util.h"
|
||||
#include "rust_sched_launcher.h"
|
||||
|
||||
rust_scheduler::rust_scheduler(rust_kernel *kernel,
|
||||
size_t max_num_threads,
|
||||
rust_sched_id id,
|
||||
bool allow_exit,
|
||||
bool killed,
|
||||
rust_sched_launcher_factory *launchfac) :
|
||||
ref_count(1),
|
||||
kernel(kernel),
|
||||
live_threads(0),
|
||||
live_tasks(0),
|
||||
cur_thread(0),
|
||||
may_exit(allow_exit),
|
||||
killed(killed),
|
||||
launchfac(launchfac),
|
||||
max_num_threads(max_num_threads),
|
||||
id(id)
|
||||
{
|
||||
// Create the first thread
|
||||
scoped_lock with(lock);
|
||||
threads.push(create_task_thread(0));
|
||||
}
|
||||
|
||||
void rust_scheduler::delete_this() {
|
||||
destroy_task_threads();
|
||||
delete launchfac;
|
||||
delete this;
|
||||
}
|
||||
|
||||
rust_sched_launcher *
|
||||
rust_scheduler::create_task_thread(int id) {
|
||||
lock.must_have_lock();
|
||||
live_threads++;
|
||||
rust_sched_launcher *thread = launchfac->create(this, id, killed);
|
||||
KLOG(kernel, kern, "created task thread: " PTR
|
||||
", id: %d, live_threads: %d",
|
||||
thread, id, live_threads);
|
||||
return thread;
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::destroy_task_thread(rust_sched_launcher *thread) {
|
||||
KLOG(kernel, kern, "deleting task thread: " PTR, thread);
|
||||
delete thread;
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::destroy_task_threads() {
|
||||
scoped_lock with(lock);
|
||||
for(size_t i = 0; i < threads.size(); ++i) {
|
||||
destroy_task_thread(threads[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::start_task_threads()
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
for(size_t i = 0; i < threads.size(); ++i) {
|
||||
rust_sched_launcher *thread = threads[i];
|
||||
thread->start();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::join_task_threads()
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
for(size_t i = 0; i < threads.size(); ++i) {
|
||||
rust_sched_launcher *thread = threads[i];
|
||||
thread->join();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::kill_all_tasks() {
|
||||
array_list<rust_sched_launcher *> copied_threads;
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
killed = true;
|
||||
for (size_t i = 0; i < threads.size(); ++i) {
|
||||
copied_threads.push(threads[i]);
|
||||
}
|
||||
}
|
||||
for(size_t i = 0; i < copied_threads.size(); ++i) {
|
||||
rust_sched_launcher *thread = copied_threads[i];
|
||||
thread->get_loop()->kill_all_tasks();
|
||||
}
|
||||
}
|
||||
|
||||
rust_task *
|
||||
rust_scheduler::create_task(rust_task *spawner, const char *name) {
|
||||
size_t thread_no;
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
live_tasks++;
|
||||
|
||||
if (cur_thread < threads.size()) {
|
||||
thread_no = cur_thread;
|
||||
} else {
|
||||
assert(threads.size() < max_num_threads);
|
||||
thread_no = threads.size();
|
||||
rust_sched_launcher *thread = create_task_thread(thread_no);
|
||||
thread->start();
|
||||
threads.push(thread);
|
||||
}
|
||||
cur_thread = (thread_no + 1) % max_num_threads;
|
||||
}
|
||||
KLOG(kernel, kern, "Creating task %s, on thread %d.", name, thread_no);
|
||||
kernel->inc_live_count();
|
||||
rust_sched_launcher *thread = threads[thread_no];
|
||||
return thread->get_loop()->create_task(spawner, name);
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::release_task() {
|
||||
bool need_exit = false;
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
live_tasks--;
|
||||
if (live_tasks == 0 && may_exit) {
|
||||
need_exit = true;
|
||||
}
|
||||
}
|
||||
kernel->dec_live_count();
|
||||
if (need_exit) {
|
||||
exit();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::exit() {
|
||||
// Take a copy of the number of threads. After the last thread exits this
|
||||
// scheduler will get destroyed, and our fields will cease to exist.
|
||||
//
|
||||
// This is also the reason we can't use the lock here (as in the other
|
||||
// cases when accessing `threads`), after the loop the lock won't exist
|
||||
// anymore. This is safe because this method is only called when all the
|
||||
// task are dead, so there is no chance of a task trying to create new
|
||||
// threads.
|
||||
size_t current_num_threads = threads.size();
|
||||
for(size_t i = 0; i < current_num_threads; ++i) {
|
||||
threads[i]->get_loop()->exit();
|
||||
}
|
||||
}
|
||||
|
||||
size_t
|
||||
rust_scheduler::max_number_of_threads() {
|
||||
return max_num_threads;
|
||||
}
|
||||
|
||||
size_t
|
||||
rust_scheduler::number_of_threads() {
|
||||
scoped_lock with(lock);
|
||||
return threads.size();
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::release_task_thread() {
|
||||
uintptr_t new_live_threads;
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
new_live_threads = --live_threads;
|
||||
}
|
||||
if (new_live_threads == 0) {
|
||||
kernel->release_scheduler_id(id);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::allow_exit() {
|
||||
bool need_exit = false;
|
||||
{
|
||||
scoped_lock with(lock);
|
||||
may_exit = true;
|
||||
need_exit = live_tasks == 0;
|
||||
}
|
||||
if (need_exit) {
|
||||
exit();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_scheduler::disallow_exit() {
|
||||
scoped_lock with(lock);
|
||||
may_exit = false;
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/**
|
||||
The rust scheduler. Schedulers may be added to the kernel
|
||||
dynamically and they run until there are no more tasks to
|
||||
schedule. Most of the scheduler work is carried out in worker
|
||||
threads by rust_sched_loop.
|
||||
*/
|
||||
|
||||
#ifndef RUST_SCHEDULER_H
|
||||
#define RUST_SCHEDULER_H
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "util/array_list.h"
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_refcount.h"
|
||||
|
||||
class rust_sched_launcher;
|
||||
class rust_sched_launcher_factory;
|
||||
|
||||
class rust_scheduler : public kernel_owned<rust_scheduler> {
|
||||
RUST_ATOMIC_REFCOUNT();
|
||||
// FIXME (#2693): Make these private
|
||||
public:
|
||||
rust_kernel *kernel;
|
||||
private:
|
||||
// Protects live_threads, live_tasks, cur_thread, may_exit
|
||||
lock_and_signal lock;
|
||||
// When this hits zero we'll tell the kernel to release us
|
||||
uintptr_t live_threads;
|
||||
// When this hits zero we'll tell the threads to exit
|
||||
uintptr_t live_tasks;
|
||||
size_t cur_thread;
|
||||
bool may_exit;
|
||||
bool killed;
|
||||
|
||||
rust_sched_launcher_factory *launchfac;
|
||||
array_list<rust_sched_launcher *> threads;
|
||||
const size_t max_num_threads;
|
||||
|
||||
rust_sched_id id;
|
||||
|
||||
void destroy_task_threads();
|
||||
|
||||
rust_sched_launcher *create_task_thread(int id);
|
||||
void destroy_task_thread(rust_sched_launcher *thread);
|
||||
|
||||
void exit();
|
||||
|
||||
// Called when refcount reaches zero
|
||||
void delete_this();
|
||||
|
||||
private:
|
||||
// private and undefined to disable copying
|
||||
rust_scheduler(const rust_scheduler& rhs);
|
||||
rust_scheduler& operator=(const rust_scheduler& rhs);
|
||||
|
||||
public:
|
||||
rust_scheduler(rust_kernel *kernel, size_t max_num_threads,
|
||||
rust_sched_id id, bool allow_exit, bool killed,
|
||||
rust_sched_launcher_factory *launchfac);
|
||||
|
||||
void start_task_threads();
|
||||
void join_task_threads();
|
||||
void kill_all_tasks();
|
||||
rust_task* create_task(rust_task *spawner, const char *name);
|
||||
|
||||
void release_task();
|
||||
|
||||
size_t max_number_of_threads();
|
||||
size_t number_of_threads();
|
||||
// Called by each thread when it terminates. When all threads
|
||||
// terminate the scheduler does as well.
|
||||
void release_task_thread();
|
||||
|
||||
rust_sched_id get_id() { return id; }
|
||||
// Tells the scheduler that as soon as it runs out of tasks
|
||||
// to run it should exit
|
||||
void allow_exit();
|
||||
void disallow_exit();
|
||||
};
|
||||
|
||||
#endif /* RUST_SCHEDULER_H */
|
|
@ -1,742 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
|
||||
#ifndef __WIN32__
|
||||
#ifdef __ANDROID__
|
||||
#include "rust_android_dummy.h"
|
||||
#else
|
||||
#include <execinfo.h>
|
||||
#endif
|
||||
#endif
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
|
||||
#include "rust_task.h"
|
||||
#include "rust_env.h"
|
||||
#include "rust_globals.h"
|
||||
#include "rust_crate_map.h"
|
||||
|
||||
// Tasks
|
||||
rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
|
||||
const char *name, size_t init_stack_sz) :
|
||||
ref_count(1),
|
||||
id(0),
|
||||
stk(NULL),
|
||||
runtime_sp(0),
|
||||
sched(sched_loop->sched),
|
||||
sched_loop(sched_loop),
|
||||
kernel(sched_loop->kernel),
|
||||
name(name),
|
||||
list_index(-1),
|
||||
boxed(&local_region, sched_loop->kernel->env->poison_on_free),
|
||||
local_region(&sched_loop->local_region),
|
||||
unwinding(false),
|
||||
total_stack_sz(0),
|
||||
task_local_data(NULL),
|
||||
task_local_data_cleanup(NULL),
|
||||
borrow_list(NULL),
|
||||
state(state),
|
||||
cond(NULL),
|
||||
cond_name("none"),
|
||||
event_reject(false),
|
||||
event(NULL),
|
||||
killed(false),
|
||||
reentered_rust_stack(false),
|
||||
disallow_kill(0),
|
||||
disallow_yield(0),
|
||||
c_stack(NULL),
|
||||
next_c_sp(0),
|
||||
next_rust_sp(0)
|
||||
{
|
||||
LOGPTR(sched_loop, "new task", (uintptr_t)this);
|
||||
DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)",
|
||||
sizeof *this, sizeof *this);
|
||||
|
||||
new_stack(init_stack_sz);
|
||||
}
|
||||
|
||||
// NB: This does not always run on the task's scheduler thread
|
||||
void
|
||||
rust_task::delete_this()
|
||||
{
|
||||
DLOG(sched_loop, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
|
||||
name, (uintptr_t)this, ref_count);
|
||||
|
||||
/* FIXME (#2677): tighten this up, there are some more
|
||||
assertions that hold at task-lifecycle events. */
|
||||
assert(ref_count == 0); // ||
|
||||
// (ref_count == 1 && this == sched->root_task));
|
||||
|
||||
// The borrow list should be freed in the task annihilator
|
||||
assert(!borrow_list);
|
||||
|
||||
sched_loop->release_task(this);
|
||||
}
|
||||
|
||||
// All failure goes through me. Put your breakpoints here!
|
||||
extern "C" void
|
||||
rust_task_fail(rust_task *task,
|
||||
char const *expr,
|
||||
char const *file,
|
||||
size_t line) {
|
||||
assert(task != NULL);
|
||||
task->begin_failure(expr, file, line);
|
||||
}
|
||||
|
||||
struct spawn_args {
|
||||
rust_task *task;
|
||||
spawn_fn f;
|
||||
rust_opaque_box *envptr;
|
||||
void *argptr;
|
||||
};
|
||||
|
||||
struct cleanup_args {
|
||||
spawn_args *spargs;
|
||||
bool threw_exception;
|
||||
};
|
||||
|
||||
void
|
||||
annihilate_boxes(rust_task *task);
|
||||
|
||||
void
|
||||
cleanup_task(cleanup_args *args) {
|
||||
spawn_args *a = args->spargs;
|
||||
bool threw_exception = args->threw_exception;
|
||||
rust_task *task = a->task;
|
||||
|
||||
{
|
||||
scoped_lock with(task->lifecycle_lock);
|
||||
if (task->killed && !threw_exception) {
|
||||
LOG(task, task, "Task killed during termination");
|
||||
threw_exception = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up TLS. This will only be set if TLS was used to begin with.
|
||||
// Because this is a crust function, it must be called from the C stack.
|
||||
if (task->task_local_data_cleanup != NULL) {
|
||||
// This assert should hold but it's not our job to ensure it (and
|
||||
// the condition might change). Handled in libcore/task.rs.
|
||||
// assert(task->task_local_data != NULL);
|
||||
task->task_local_data_cleanup(task->task_local_data);
|
||||
task->task_local_data = NULL;
|
||||
} else if (threw_exception && task->id == INIT_TASK_ID) {
|
||||
// Edge case: If main never spawns any tasks, but fails anyway, TLS
|
||||
// won't be around to take down the kernel (task.rs:kill_taskgroup,
|
||||
// rust_task_kill_all). Do it here instead.
|
||||
// (Note that children tasks can not init their TLS if they were
|
||||
// killed too early, so we need to check main's task id too.)
|
||||
task->fail_sched_loop();
|
||||
// This must not happen twice.
|
||||
static bool main_task_failed_without_spawning = false;
|
||||
assert(!main_task_failed_without_spawning);
|
||||
main_task_failed_without_spawning = true;
|
||||
}
|
||||
|
||||
// Call the box annihilator.
|
||||
cratemap* map = reinterpret_cast<cratemap*>(global_crate_map);
|
||||
task->call_on_rust_stack(NULL, const_cast<void*>(map->annihilate_fn()));
|
||||
|
||||
task->die();
|
||||
|
||||
#ifdef __WIN32__
|
||||
assert(!threw_exception && "No exception-handling yet on windows builds");
|
||||
#endif
|
||||
}
|
||||
|
||||
// This runs on the Rust stack
|
||||
void task_start_wrapper(spawn_args *a)
|
||||
{
|
||||
rust_task *task = a->task;
|
||||
|
||||
bool threw_exception = false;
|
||||
try {
|
||||
a->f(a->envptr, a->argptr);
|
||||
} catch (rust_task *ex) {
|
||||
assert(ex == task && "Expected this task to be thrown for unwinding");
|
||||
threw_exception = true;
|
||||
|
||||
if (task->c_stack) {
|
||||
task->return_c_stack();
|
||||
}
|
||||
|
||||
// Since we call glue code below we need to make sure we
|
||||
// have the stack limit set up correctly
|
||||
task->reset_stack_limit();
|
||||
}
|
||||
|
||||
// We should have returned any C stack by now
|
||||
assert(task->c_stack == NULL);
|
||||
|
||||
rust_opaque_box* env = a->envptr;
|
||||
if(env) {
|
||||
// free the environment (which should be a unique closure).
|
||||
const type_desc *td = env->td;
|
||||
td->drop_glue(NULL,
|
||||
box_body(env));
|
||||
task->kernel->region()->free(env);
|
||||
}
|
||||
|
||||
// The cleanup work needs lots of stack
|
||||
cleanup_args ca = {a, threw_exception};
|
||||
task->call_on_c_stack(&ca, (void*)cleanup_task);
|
||||
|
||||
task->ctx.next->swap(task->ctx);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::start(spawn_fn spawnee_fn,
|
||||
rust_opaque_box *envptr,
|
||||
void *argptr)
|
||||
{
|
||||
LOG(this, task, "starting task from fn 0x%" PRIxPTR
|
||||
" with env 0x%" PRIxPTR " and arg 0x%" PRIxPTR,
|
||||
spawnee_fn, envptr, argptr);
|
||||
|
||||
assert(stk->data != NULL);
|
||||
|
||||
char *sp = (char *)stk->end;
|
||||
|
||||
sp -= sizeof(spawn_args);
|
||||
|
||||
spawn_args *a = (spawn_args *)sp;
|
||||
|
||||
a->task = this;
|
||||
a->envptr = envptr;
|
||||
a->argptr = argptr;
|
||||
a->f = spawnee_fn;
|
||||
|
||||
ctx.call((void *)task_start_wrapper, a, sp);
|
||||
|
||||
this->start();
|
||||
}
|
||||
|
||||
void rust_task::start()
|
||||
{
|
||||
transition(task_state_newborn, task_state_running, NULL, "none");
|
||||
}
|
||||
|
||||
bool
|
||||
rust_task::must_fail_from_being_killed() {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
return must_fail_from_being_killed_inner();
|
||||
}
|
||||
|
||||
bool
|
||||
rust_task::must_fail_from_being_killed_inner() {
|
||||
lifecycle_lock.must_have_lock();
|
||||
return killed && !reentered_rust_stack && disallow_kill == 0;
|
||||
}
|
||||
|
||||
void rust_task_yield_fail(rust_task *task) {
|
||||
LOG_ERR(task, task, "task %" PRIxPTR " yielded in an atomic section",
|
||||
task);
|
||||
task->fail();
|
||||
}
|
||||
|
||||
// Only run this on the rust stack
|
||||
MUST_CHECK bool rust_task::yield() {
|
||||
bool killed = false;
|
||||
|
||||
if (disallow_yield > 0) {
|
||||
call_on_c_stack(this, (void *)rust_task_yield_fail);
|
||||
}
|
||||
|
||||
// This check is largely superfluous; it's the one after the context swap
|
||||
// that really matters. This one allows us to assert a useful invariant.
|
||||
|
||||
// NB: This takes lifecycle_lock three times, and I believe that none of
|
||||
// them are actually necessary, as per #3213. Removing the locks here may
|
||||
// cause *harmless* races with a killer... but I didn't observe any
|
||||
// substantial performance improvement from removing them, even with
|
||||
// msgsend-ring-pipes, and also it's my last day, so I'm not about to
|
||||
// remove them. -- bblum
|
||||
if (must_fail_from_being_killed()) {
|
||||
{
|
||||
scoped_lock with(lifecycle_lock);
|
||||
assert(!(state == task_state_blocked));
|
||||
}
|
||||
killed = true;
|
||||
}
|
||||
|
||||
// Return to the scheduler.
|
||||
ctx.next->swap(ctx);
|
||||
|
||||
if (must_fail_from_being_killed()) {
|
||||
killed = true;
|
||||
}
|
||||
return killed;
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::kill() {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
kill_inner();
|
||||
}
|
||||
|
||||
void rust_task::kill_inner() {
|
||||
lifecycle_lock.must_have_lock();
|
||||
|
||||
// Multiple kills should be able to safely race, but check anyway.
|
||||
if (killed) {
|
||||
LOG(this, task, "task %s @0x%" PRIxPTR " already killed", name, this);
|
||||
return;
|
||||
}
|
||||
|
||||
// Note the distinction here: kill() is when you're in an upcall
|
||||
// from task A and want to force-fail task B, you do B->kill().
|
||||
// If you want to fail yourself you do self->fail().
|
||||
LOG(this, task, "killing task %s @0x%" PRIxPTR, name, this);
|
||||
// When the task next goes to yield or resume it will fail
|
||||
killed = true;
|
||||
// Unblock the task so it can unwind.
|
||||
|
||||
if (state == task_state_blocked &&
|
||||
must_fail_from_being_killed_inner()) {
|
||||
wakeup_inner(cond);
|
||||
}
|
||||
|
||||
LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::fail() {
|
||||
// See note in ::kill() regarding who should call this.
|
||||
fail(NULL, NULL, 0);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::fail(char const *expr, char const *file, size_t line) {
|
||||
rust_task_fail(this, expr, file, line);
|
||||
}
|
||||
|
||||
// Called only by rust_task_fail
|
||||
void
|
||||
rust_task::begin_failure(char const *expr, char const *file, size_t line) {
|
||||
|
||||
if (expr) {
|
||||
LOG_ERR(this, task, "task failed at '%s', %s:%" PRIdPTR,
|
||||
expr, file, line);
|
||||
}
|
||||
|
||||
DLOG(sched_loop, task, "task %s @0x%" PRIxPTR " failing", name, this);
|
||||
backtrace();
|
||||
unwinding = true;
|
||||
#ifndef __WIN32__
|
||||
throw this;
|
||||
#else
|
||||
die();
|
||||
// FIXME (#908): Need unwinding on windows. This will end up aborting
|
||||
fail_sched_loop();
|
||||
#endif
|
||||
}
|
||||
|
||||
void rust_task::fail_sched_loop() {
|
||||
sched_loop->fail();
|
||||
}
|
||||
|
||||
void rust_task::assert_is_running()
|
||||
{
|
||||
scoped_lock with(lifecycle_lock);
|
||||
assert(state == task_state_running);
|
||||
}
|
||||
|
||||
// FIXME (#2851) Remove this code when rust_port goes away?
|
||||
bool
|
||||
rust_task::blocked_on(rust_cond *on)
|
||||
{
|
||||
lifecycle_lock.must_have_lock();
|
||||
return cond == on;
|
||||
}
|
||||
|
||||
void *
|
||||
rust_task::malloc(size_t sz, const char *tag, type_desc *td)
|
||||
{
|
||||
return local_region.malloc(sz, tag);
|
||||
}
|
||||
|
||||
void *
|
||||
rust_task::realloc(void *data, size_t sz)
|
||||
{
|
||||
return local_region.realloc(data, sz);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::free(void *p)
|
||||
{
|
||||
local_region.free(p);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::transition(rust_task_state src, rust_task_state dst,
|
||||
rust_cond *cond, const char* cond_name) {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
transition_inner(src, dst, cond, cond_name);
|
||||
}
|
||||
|
||||
void rust_task::transition_inner(rust_task_state src, rust_task_state dst,
|
||||
rust_cond *cond, const char* cond_name) {
|
||||
lifecycle_lock.must_have_lock();
|
||||
sched_loop->transition(this, src, dst, cond, cond_name);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::set_state(rust_task_state state,
|
||||
rust_cond *cond, const char* cond_name) {
|
||||
lifecycle_lock.must_have_lock();
|
||||
this->state = state;
|
||||
this->cond = cond;
|
||||
this->cond_name = cond_name;
|
||||
}
|
||||
|
||||
bool
|
||||
rust_task::block(rust_cond *on, const char* name) {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
return block_inner(on, name);
|
||||
}
|
||||
|
||||
bool
|
||||
rust_task::block_inner(rust_cond *on, const char* name) {
|
||||
if (must_fail_from_being_killed_inner()) {
|
||||
// We're already going to die. Don't block. Tell the task to fail
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
|
||||
(uintptr_t) on, (uintptr_t) cond);
|
||||
assert(cond == NULL && "Cannot block an already blocked task.");
|
||||
assert(on != NULL && "Cannot block on a NULL object.");
|
||||
|
||||
transition_inner(task_state_running, task_state_blocked, on, name);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::wakeup(rust_cond *from) {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
wakeup_inner(from);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::wakeup_inner(rust_cond *from) {
|
||||
assert(cond != NULL && "Cannot wake up unblocked task.");
|
||||
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
|
||||
(uintptr_t) cond, (uintptr_t) from);
|
||||
assert(cond == from && "Cannot wake up blocked task on wrong condition.");
|
||||
|
||||
transition_inner(task_state_blocked, task_state_running, NULL, "none");
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::die() {
|
||||
transition(task_state_running, task_state_dead, NULL, "none");
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::backtrace() {
|
||||
if (log_rt_backtrace <= log_err) return;
|
||||
#ifndef __WIN32__
|
||||
void *call_stack[256];
|
||||
int nframes = ::backtrace(call_stack, 256);
|
||||
backtrace_symbols_fd(call_stack + 1, nframes - 1, 2);
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t
|
||||
rust_task::get_next_stack_size(size_t min, size_t current, size_t requested) {
|
||||
LOG(this, mem, "calculating new stack size for 0x%" PRIxPTR, this);
|
||||
LOG(this, mem,
|
||||
"min: %" PRIdPTR " current: %" PRIdPTR " requested: %" PRIdPTR,
|
||||
min, current, requested);
|
||||
|
||||
// Allocate at least enough to accomodate the next frame, plus a little
|
||||
// slack to avoid thrashing
|
||||
size_t sz = std::max(min, requested + (requested / 2));
|
||||
|
||||
// And double the stack size each allocation
|
||||
const size_t max = 1024 * 1024;
|
||||
size_t next = std::min(max, current * 2);
|
||||
|
||||
sz = std::max(sz, next);
|
||||
|
||||
LOG(this, mem, "next stack size: %" PRIdPTR, sz);
|
||||
assert(requested <= sz);
|
||||
return sz;
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::free_stack(stk_seg *stk) {
|
||||
LOGPTR(sched_loop, "freeing stk segment", (uintptr_t)stk);
|
||||
total_stack_sz -= user_stack_size(stk);
|
||||
destroy_stack(&local_region, stk);
|
||||
}
|
||||
|
||||
void
|
||||
new_stack_slow(new_stack_args *args) {
|
||||
args->task->new_stack(args->requested_sz);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::new_stack(size_t requested_sz) {
|
||||
LOG(this, mem, "creating new stack for task %" PRIxPTR, this);
|
||||
if (stk) {
|
||||
::check_stack_canary(stk);
|
||||
}
|
||||
|
||||
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
|
||||
size_t min_sz = sched_loop->min_stack_size;
|
||||
|
||||
// Try to reuse an existing stack segment
|
||||
while (stk != NULL && stk->next != NULL) {
|
||||
size_t next_sz = user_stack_size(stk->next);
|
||||
if (min_sz <= next_sz && requested_sz <= next_sz) {
|
||||
LOG(this, mem, "reusing existing stack");
|
||||
stk = stk->next;
|
||||
return;
|
||||
} else {
|
||||
LOG(this, mem, "existing stack is not big enough");
|
||||
stk_seg *new_next = stk->next->next;
|
||||
free_stack(stk->next);
|
||||
stk->next = new_next;
|
||||
if (new_next) {
|
||||
new_next->prev = stk;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The size of the current stack segment, excluding red zone
|
||||
size_t current_sz = 0;
|
||||
if (stk != NULL) {
|
||||
current_sz = user_stack_size(stk);
|
||||
}
|
||||
// The calculated size of the new stack, excluding red zone
|
||||
size_t rust_stk_sz = get_next_stack_size(min_sz,
|
||||
current_sz, requested_sz);
|
||||
|
||||
size_t max_stack = kernel->env->max_stack_size;
|
||||
size_t used_stack = total_stack_sz + rust_stk_sz;
|
||||
|
||||
// Don't allow stacks to grow forever. During unwinding we have to allow
|
||||
// for more stack than normal in order to allow destructors room to run,
|
||||
// arbitrarily selected as 2x the maximum stack size.
|
||||
if (!unwinding && used_stack > max_stack) {
|
||||
LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack", this);
|
||||
abort();
|
||||
} else if (unwinding && used_stack > max_stack * 2) {
|
||||
LOG_ERR(this, task,
|
||||
"task %" PRIxPTR " ran out of stack during unwinding", this);
|
||||
abort();
|
||||
}
|
||||
|
||||
size_t sz = rust_stk_sz + RED_ZONE_SIZE;
|
||||
stk_seg *new_stk = create_stack(&local_region, sz);
|
||||
LOGPTR(sched_loop, "new stk", (uintptr_t)new_stk);
|
||||
new_stk->task = this;
|
||||
new_stk->next = NULL;
|
||||
new_stk->prev = stk;
|
||||
if (stk) {
|
||||
stk->next = new_stk;
|
||||
}
|
||||
LOGPTR(sched_loop, "stk end", new_stk->end);
|
||||
|
||||
stk = new_stk;
|
||||
total_stack_sz += user_stack_size(new_stk);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::cleanup_after_turn() {
|
||||
// Delete any spare stack segments that were left
|
||||
// behind by calls to prev_stack
|
||||
assert(stk);
|
||||
|
||||
while (stk->next) {
|
||||
stk_seg *new_next = stk->next->next;
|
||||
assert (!stk->next->is_big);
|
||||
free_stack(stk->next);
|
||||
|
||||
stk->next = new_next;
|
||||
}
|
||||
}
|
||||
|
||||
// NB: Runs on the Rust stack. Returns true if we successfully allocated the big
|
||||
// stack and false otherwise.
|
||||
bool
|
||||
rust_task::new_big_stack() {
|
||||
assert(stk);
|
||||
|
||||
stk_seg *borrowed_big_stack = sched_loop->borrow_big_stack();
|
||||
if (!borrowed_big_stack) {
|
||||
return false;
|
||||
}
|
||||
|
||||
borrowed_big_stack->task = this;
|
||||
borrowed_big_stack->next = stk->next;
|
||||
if (borrowed_big_stack->next)
|
||||
borrowed_big_stack->next->prev = borrowed_big_stack;
|
||||
borrowed_big_stack->prev = stk;
|
||||
stk->next = borrowed_big_stack;
|
||||
|
||||
stk = borrowed_big_stack;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
|
||||
// Not positive these bounds for sp are correct. I think that the first
|
||||
// possible value for esp on a new stack is stk->end, which points to the
|
||||
// address before the first value to be pushed onto a new stack. The last
|
||||
// possible address we can push data to is stk->data. Regardless, there's
|
||||
// so much slop at either end that we should never hit one of these
|
||||
// boundaries.
|
||||
return (uintptr_t)stk->data <= sp && sp <= stk->end;
|
||||
}
|
||||
|
||||
/*
|
||||
Called by landing pads during unwinding to figure out which stack segment we
|
||||
are currently running on and record the stack limit (which was not restored
|
||||
when unwinding through __morestack).
|
||||
*/
|
||||
void
|
||||
rust_task::reset_stack_limit() {
|
||||
uintptr_t sp = get_sp();
|
||||
bool reseted = false;
|
||||
while (!sp_in_stk_seg(sp, stk)) {
|
||||
reseted = true;
|
||||
prev_stack();
|
||||
assert(stk != NULL && "Failed to find the current stack");
|
||||
}
|
||||
|
||||
// Each call to prev_stack will record the stack limit. If we *didn't*
|
||||
// call prev_stack then we still need to record it now to catch a corner case:
|
||||
// the throw to initiate unwinding starts on the C stack while sp limit is 0.
|
||||
// If we don't set the limit here then the rust code run subsequently will
|
||||
// will veer into the red zone. Lame!
|
||||
if (!reseted) {
|
||||
record_stack_limit();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::check_stack_canary() {
|
||||
::check_stack_canary(stk);
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::delete_all_stacks() {
|
||||
assert(!on_rust_stack());
|
||||
// Delete all the stacks. There may be more than one if the task failed
|
||||
// and no landing pads stopped to clean up.
|
||||
assert(stk->next == NULL);
|
||||
while (stk != NULL) {
|
||||
stk_seg *prev = stk->prev;
|
||||
|
||||
if (stk->is_big)
|
||||
sched_loop->return_big_stack(stk);
|
||||
else
|
||||
free_stack(stk);
|
||||
|
||||
stk = prev;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Returns true if we're currently running on the Rust stack
|
||||
*/
|
||||
bool
|
||||
rust_task::on_rust_stack() {
|
||||
if (stk == NULL) {
|
||||
// This only happens during construction
|
||||
return false;
|
||||
}
|
||||
|
||||
uintptr_t sp = get_sp();
|
||||
bool in_first_segment = sp_in_stk_seg(sp, stk);
|
||||
if (in_first_segment) {
|
||||
return true;
|
||||
} else if (stk->prev != NULL) {
|
||||
// This happens only when calling the upcall to delete
|
||||
// a stack segment
|
||||
bool in_second_segment = sp_in_stk_seg(sp, stk->prev);
|
||||
return in_second_segment;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// NB: In inhibit_kill and allow_kill, helgrind would complain that we need to
|
||||
// hold lifecycle_lock while accessing disallow_kill. Even though another
|
||||
// killing task may access disallow_kill concurrently, this is not racy
|
||||
// because the killer only cares if this task is blocking, and block() already
|
||||
// uses proper locking. See https://github.com/mozilla/rust/issues/3213 .
|
||||
|
||||
void
|
||||
rust_task::inhibit_kill() {
|
||||
// Here might be good, though not mandatory, to check if we have to die.
|
||||
disallow_kill++;
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::allow_kill() {
|
||||
assert(disallow_kill > 0 && "Illegal allow_kill(): already killable!");
|
||||
disallow_kill--;
|
||||
}
|
||||
|
||||
void rust_task::inhibit_yield() {
|
||||
disallow_yield++;
|
||||
}
|
||||
|
||||
void rust_task::allow_yield() {
|
||||
assert(disallow_yield > 0 && "Illegal allow_yield(): already yieldable!");
|
||||
disallow_yield--;
|
||||
}
|
||||
|
||||
MUST_CHECK bool rust_task::wait_event(void **result) {
|
||||
bool killed = false;
|
||||
scoped_lock with(lifecycle_lock);
|
||||
|
||||
if(!event_reject) {
|
||||
block_inner(&event_cond, "waiting on event");
|
||||
lifecycle_lock.unlock();
|
||||
killed = yield();
|
||||
lifecycle_lock.lock();
|
||||
} else if (must_fail_from_being_killed_inner()) {
|
||||
// If the deschedule was rejected, yield won't do our killed check for
|
||||
// us. For thoroughness, do it here. FIXME (#524)
|
||||
killed = true;
|
||||
}
|
||||
|
||||
event_reject = false;
|
||||
*result = event;
|
||||
return killed;
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::signal_event(void *event) {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
|
||||
this->event = event;
|
||||
event_reject = true;
|
||||
if(task_state_blocked == state) {
|
||||
wakeup_inner(&event_cond);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
// fill-column: 78;
|
||||
// indent-tabs-mode: nil
|
||||
// c-basic-offset: 4
|
||||
// buffer-file-coding-system: utf-8-unix
|
||||
// End:
|
||||
//
|
|
@ -1,681 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/**
|
||||
The rust task is a cooperatively-scheduled green thread that executes
|
||||
Rust code on a segmented stack.
|
||||
|
||||
This class has too many responsibilities:
|
||||
|
||||
* Working with the scheduler loop to signal and respond to state changes,
|
||||
and dealing with all the thread synchronization issues involved
|
||||
|
||||
* Managing the dynamically resizing list of Rust stack segments
|
||||
|
||||
* Switching between running Rust code on the Rust segmented stack and
|
||||
foreign C code on large stacks owned by the scheduler
|
||||
|
||||
# Lifetime
|
||||
|
||||
The lifetime of a rust_task object closely mirrors that of a running Rust
|
||||
task object, but they are not identical. In particular, the rust_task is an
|
||||
atomically reference counted object that might be accessed from arbitrary
|
||||
threads at any time. This may keep the task from being destroyed even after
|
||||
the task is dead from a Rust task lifecycle perspective. The rust_tasks are
|
||||
reference counted in the following places:
|
||||
|
||||
* By the task's lifetime (i.e., running tasks hold a reference to themself)
|
||||
|
||||
* In the rust_task_kill_all -> rust_kernel::fail ->
|
||||
rust_sched_loop::kill_all_tasks path. When a task brings down the whole
|
||||
runtime, each sched_loop must use refcounts to take a 'snapshot' of all
|
||||
existing tasks so it can be sure to kill all of them.
|
||||
|
||||
* In core::pipes, tasks that use select() use reference counts to avoid
|
||||
use-after-free races with multiple different signallers.
|
||||
|
||||
# Death
|
||||
|
||||
All task death goes through a single central path: The task invokes
|
||||
rust_task::die(), which invokes transition(task_state_dead), which pumps
|
||||
the scheduler loop, which switches to rust_sched_loop::run_single_turn(),
|
||||
which calls reap_dead_tasks(), which cleans up the task's stack segments
|
||||
and drops the reference count.
|
||||
|
||||
When a task's reference count hits zero, rust_sched_loop::release_task()
|
||||
is called. This frees the memory and deregisters the task from the kernel,
|
||||
which may trigger the sched_loop, the scheduler, and/or the kernel to exit
|
||||
completely in the case it was the last task alive.
|
||||
|
||||
die() is called from two places: the successful exit path, in cleanup_task,
|
||||
and on failure (on linux, this is also in cleanup_task, after unwinding
|
||||
completes; on windows, it is in begin_failure).
|
||||
|
||||
Tasks do not force-quit other tasks; a task die()s only itself. However...
|
||||
|
||||
# Killing
|
||||
|
||||
Tasks may kill each other. This happens when propagating failure between
|
||||
tasks (see the task::spawn options interface). The code path for this is
|
||||
rust_task_kill_other() -> rust_task::kill().
|
||||
|
||||
It also happens when the main ("root") task (or any task in that task's
|
||||
linked-failure-group) fails: this brings down the whole runtime, and kills
|
||||
all tasks in all groups. The code path for this is rust_task_kill_all() ->
|
||||
rust_kernel::fail() -> rust_scheduler::kill_all_tasks() ->
|
||||
rust_sched_loop::kill_all_tasks() -> rust_task::kill().
|
||||
|
||||
In either case, killing a task involves, under the protection of its
|
||||
lifecycle_lock, (a) setting the 'killed' flag, and (b) checking if it is
|
||||
'blocked'* and if so punting it awake.
|
||||
(* and also isn't unkillable, which may happen via task::unkillable()
|
||||
or via calling an extern rust function from C.)
|
||||
|
||||
The killed task will then (wake up if it was asleep, and) eventually call
|
||||
yield() (or wait_event()), which will check the killed flag, see that it is
|
||||
true, and then invoke 'fail', which begins the death process described
|
||||
above.
|
||||
|
||||
Three things guarantee concurrency safety in this whole affair:
|
||||
|
||||
* The lifecycle_lock protects tasks accessing each other's state: it makes
|
||||
killing-and-waking up atomic with respect to a task in block() deciding
|
||||
whether it's allowed to go to sleep, so tasks can't 'escape' being woken.
|
||||
|
||||
* In the case of linked failure propagation, we ensure (in task.rs) that
|
||||
tasks can only see another task's rust_task pointer if that task is
|
||||
already alive. Even before entering the runtime failure path, a task will
|
||||
access (locked) the linked-failure data structures to remove its task
|
||||
pointer so that no subsequently-failing tasks will do a use-after-free.
|
||||
|
||||
* In the case of bringing down the whole runtime, each sched_loop takes an
|
||||
"atomic snapshot" of all its tasks, protected by the sched_loop's lock,
|
||||
and also sets a 'failing' flag so that any subsequently-failing task will
|
||||
know that it must fail immediately upon creation (which is also checked
|
||||
under the same lock). A similar process exists at the one-step-higher
|
||||
level of the kernel killing all the schedulers (the kernel snapshots all
|
||||
the schedulers and sets a 'failing' flag in the scheduler table).
|
||||
*/
|
||||
|
||||
#ifndef RUST_TASK_H
|
||||
#define RUST_TASK_H
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "util/array_list.h"
|
||||
#include "context.h"
|
||||
#include "rust_debug.h"
|
||||
#include "rust_kernel.h"
|
||||
#include "boxed_region.h"
|
||||
#include "rust_stack.h"
|
||||
#include "rust_type.h"
|
||||
#include "rust_sched_loop.h"
|
||||
#include "sp.h"
|
||||
|
||||
// The amount of extra space at the end of each stack segment, available
|
||||
// to the rt, compiler and dynamic linker for running small functions
|
||||
// FIXME (#1509): We want this to be 128 but need to slim the red zone calls
|
||||
// down, disable lazy symbol relocation, and other things we haven't
|
||||
// discovered yet
|
||||
#define RZ_LINUX_32 (1024*2)
|
||||
#define RZ_LINUX_64 (1024*2)
|
||||
#define RZ_MAC_32 (1024*20)
|
||||
#define RZ_MAC_64 (1024*20)
|
||||
#define RZ_WIN_32 (1024*20)
|
||||
#define RZ_BSD_32 (1024*20)
|
||||
#define RZ_BSD_64 (1024*20)
|
||||
|
||||
// The threshold beyond which we switch to the C stack.
|
||||
#define STACK_THRESHOLD (1024 * 1024)
|
||||
|
||||
#ifdef __linux__
|
||||
#ifdef __i386__
|
||||
#define RED_ZONE_SIZE RZ_LINUX_32
|
||||
#endif
|
||||
#ifdef __x86_64__
|
||||
#define RED_ZONE_SIZE RZ_LINUX_64
|
||||
#endif
|
||||
#ifdef __mips__
|
||||
#define RED_ZONE_SIZE RZ_MAC_32
|
||||
#endif
|
||||
#ifdef __arm__
|
||||
#define RED_ZONE_SIZE RZ_LINUX_32
|
||||
#endif
|
||||
#endif
|
||||
#ifdef __APPLE__
|
||||
#ifdef __i386__
|
||||
#define RED_ZONE_SIZE RZ_MAC_32
|
||||
#endif
|
||||
#ifdef __x86_64__
|
||||
#define RED_ZONE_SIZE RZ_MAC_64
|
||||
#endif
|
||||
#endif
|
||||
#ifdef __WIN32__
|
||||
#ifdef __i386__
|
||||
#define RED_ZONE_SIZE RZ_WIN_32
|
||||
#endif
|
||||
#ifdef __x86_64__
|
||||
#define RED_ZONE_SIZE RZ_WIN_64
|
||||
#endif
|
||||
#endif
|
||||
#ifdef __FreeBSD__
|
||||
#ifdef __i386__
|
||||
#define RED_ZONE_SIZE RZ_BSD_32
|
||||
#endif
|
||||
#ifdef __x86_64__
|
||||
#define RED_ZONE_SIZE RZ_BSD_64
|
||||
#endif
|
||||
#endif
|
||||
#ifdef __ANDROID__
|
||||
#define RED_ZONE_SIZE RZ_MAC_32
|
||||
#endif
|
||||
|
||||
#ifndef RED_ZONE_SIZE
|
||||
# error "Red zone not defined for this platform"
|
||||
#endif
|
||||
|
||||
struct frame_glue_fns {
|
||||
uintptr_t mark_glue_off;
|
||||
uintptr_t drop_glue_off;
|
||||
uintptr_t reloc_glue_off;
|
||||
};
|
||||
|
||||
// std::lib::task::task_result
|
||||
typedef unsigned long task_result;
|
||||
#define tr_success 0
|
||||
#define tr_failure 1
|
||||
|
||||
struct spawn_args;
|
||||
struct cleanup_args;
|
||||
struct reset_args;
|
||||
struct new_stack_args;
|
||||
|
||||
// std::lib::task::task_notification
|
||||
//
|
||||
// since it's currently a unary tag, we only add the fields.
|
||||
struct task_notification {
|
||||
rust_task_id id;
|
||||
task_result result; // task_result
|
||||
};
|
||||
|
||||
extern "C" void
|
||||
rust_task_fail(rust_task *task,
|
||||
char const *expr,
|
||||
char const *file,
|
||||
size_t line);
|
||||
|
||||
struct
|
||||
rust_task : public kernel_owned<rust_task>
|
||||
{
|
||||
RUST_ATOMIC_REFCOUNT();
|
||||
|
||||
rust_task_id id;
|
||||
|
||||
context ctx;
|
||||
stk_seg *stk;
|
||||
uintptr_t runtime_sp; // Runtime sp while task running.
|
||||
rust_scheduler *sched;
|
||||
rust_sched_loop *sched_loop;
|
||||
|
||||
// Fields known only to the runtime.
|
||||
rust_kernel *kernel;
|
||||
const char *const name;
|
||||
int32_t list_index;
|
||||
|
||||
boxed_region boxed;
|
||||
memory_region local_region;
|
||||
|
||||
// Indicates that fail() has been called and we are cleaning up.
|
||||
// We use this to suppress the "killed" flag during calls to yield.
|
||||
bool unwinding;
|
||||
|
||||
bool propagate_failure;
|
||||
|
||||
debug::task_debug_info debug;
|
||||
|
||||
// The amount of stack we're using, excluding red zones
|
||||
size_t total_stack_sz;
|
||||
|
||||
// Used by rust task management routines in libcore/task.rs.
|
||||
void *task_local_data;
|
||||
void (*task_local_data_cleanup)(void *data);
|
||||
|
||||
// Contains a ~[BorrowRecord] pointer, or NULL.
|
||||
//
|
||||
// Used by borrow management code in libcore/unstable/lang.rs.
|
||||
void *borrow_list;
|
||||
|
||||
private:
|
||||
|
||||
// Protects state, cond, cond_name
|
||||
// Protects the killed flag, disallow_kill flag, reentered_rust_stack
|
||||
lock_and_signal lifecycle_lock;
|
||||
rust_task_state state;
|
||||
rust_cond *cond;
|
||||
const char *cond_name;
|
||||
|
||||
bool event_reject;
|
||||
rust_cond event_cond;
|
||||
void *event;
|
||||
|
||||
// Indicates that the task was killed and needs to unwind
|
||||
bool killed;
|
||||
// Indicates that we've called back into Rust from C
|
||||
bool reentered_rust_stack;
|
||||
unsigned long disallow_kill;
|
||||
unsigned long disallow_yield;
|
||||
|
||||
// The stack used for running C code, borrowed from the scheduler thread
|
||||
stk_seg *c_stack;
|
||||
uintptr_t next_c_sp;
|
||||
uintptr_t next_rust_sp;
|
||||
|
||||
// Called when the atomic refcount reaches zero
|
||||
void delete_this();
|
||||
|
||||
bool new_big_stack();
|
||||
void new_stack_fast(size_t requested_sz);
|
||||
void new_stack(size_t requested_sz);
|
||||
void free_stack(stk_seg *stk);
|
||||
size_t get_next_stack_size(size_t min, size_t current, size_t requested);
|
||||
|
||||
void return_c_stack();
|
||||
|
||||
void transition(rust_task_state src, rust_task_state dst,
|
||||
rust_cond *cond, const char* cond_name);
|
||||
void transition_inner(rust_task_state src, rust_task_state dst,
|
||||
rust_cond *cond, const char* cond_name);
|
||||
|
||||
bool must_fail_from_being_killed_inner();
|
||||
// Called by rust_task_fail to unwind on failure
|
||||
void begin_failure(char const *expr,
|
||||
char const *file,
|
||||
size_t line);
|
||||
|
||||
friend void task_start_wrapper(spawn_args *a);
|
||||
friend void cleanup_task(cleanup_args *a);
|
||||
friend void reset_stack_limit_on_c_stack(reset_args *a);
|
||||
friend void new_stack_slow(new_stack_args *a);
|
||||
friend void rust_task_fail(rust_task *task,
|
||||
char const *expr,
|
||||
char const *file,
|
||||
size_t line);
|
||||
|
||||
bool block_inner(rust_cond *on, const char* name);
|
||||
void wakeup_inner(rust_cond *from);
|
||||
bool blocked_on(rust_cond *cond);
|
||||
|
||||
private:
|
||||
// private and undefined to disable copying
|
||||
rust_task(const rust_task& rhs);
|
||||
rust_task& operator=(const rust_task& rhs);
|
||||
|
||||
public:
|
||||
|
||||
// Only a pointer to 'name' is kept, so it must live as long as this task.
|
||||
rust_task(rust_sched_loop *sched_loop,
|
||||
rust_task_state state,
|
||||
const char *name,
|
||||
size_t init_stack_sz);
|
||||
|
||||
void start(spawn_fn spawnee_fn,
|
||||
rust_opaque_box *env,
|
||||
void *args);
|
||||
void start();
|
||||
void assert_is_running();
|
||||
|
||||
void *malloc(size_t sz, const char *tag, type_desc *td=0);
|
||||
void *realloc(void *data, size_t sz);
|
||||
void free(void *p);
|
||||
|
||||
void set_state(rust_task_state state,
|
||||
rust_cond *cond, const char* cond_name);
|
||||
|
||||
bool block(rust_cond *on, const char* name);
|
||||
void wakeup(rust_cond *from);
|
||||
void die();
|
||||
|
||||
// Print a backtrace, if the "bt" logging option is on.
|
||||
void backtrace();
|
||||
|
||||
// Yields control to the scheduler. Called from the Rust stack
|
||||
// Returns TRUE if the task was killed and needs to fail.
|
||||
MUST_CHECK bool yield();
|
||||
|
||||
// Fail this task (assuming caller-on-stack is different task).
|
||||
void kill();
|
||||
void kill_inner();
|
||||
|
||||
// Indicates that we've been killed and now is an apropriate
|
||||
// time to fail as a result
|
||||
bool must_fail_from_being_killed();
|
||||
|
||||
// Fail self, assuming caller-on-stack is this task.
|
||||
void fail();
|
||||
void fail(char const *expr, char const *file, size_t line);
|
||||
|
||||
// Propagate failure to the entire rust runtime.
|
||||
void fail_sched_loop();
|
||||
|
||||
void *calloc(size_t size, const char *tag);
|
||||
|
||||
// Use this function sparingly. Depending on the ref count is generally
|
||||
// not at all safe.
|
||||
intptr_t get_ref_count() const { return ref_count; }
|
||||
|
||||
void *next_stack(size_t stk_sz, void *args_addr, size_t args_sz);
|
||||
void prev_stack();
|
||||
void record_stack_limit();
|
||||
void reset_stack_limit();
|
||||
|
||||
bool on_rust_stack();
|
||||
void check_stack_canary();
|
||||
void delete_all_stacks();
|
||||
|
||||
void call_on_c_stack(void *args, void *fn_ptr);
|
||||
void call_on_rust_stack(void *args, void *fn_ptr);
|
||||
bool have_c_stack() { return c_stack != NULL; }
|
||||
stk_seg *get_c_stack() { return c_stack; }
|
||||
|
||||
rust_task_state get_state() { return state; }
|
||||
rust_cond *get_cond() { return cond; }
|
||||
const char *get_cond_name() { return cond_name; }
|
||||
|
||||
void clear_event_reject() {
|
||||
this->event_reject = false;
|
||||
}
|
||||
|
||||
// Returns TRUE if the task was killed and needs to fail.
|
||||
MUST_CHECK bool wait_event(void **result);
|
||||
void signal_event(void *event);
|
||||
|
||||
void cleanup_after_turn();
|
||||
|
||||
void inhibit_kill();
|
||||
void allow_kill();
|
||||
void inhibit_yield();
|
||||
void allow_yield();
|
||||
};
|
||||
|
||||
template <typename T> struct task_owned {
|
||||
inline void *operator new(size_t size, rust_task *task,
|
||||
const char *tag) {
|
||||
return task->malloc(size, tag);
|
||||
}
|
||||
|
||||
inline void *operator new[](size_t size, rust_task *task,
|
||||
const char *tag) {
|
||||
return task->malloc(size, tag);
|
||||
}
|
||||
|
||||
inline void *operator new(size_t size, rust_task &task,
|
||||
const char *tag) {
|
||||
return task.malloc(size, tag);
|
||||
}
|
||||
|
||||
inline void *operator new[](size_t size, rust_task &task,
|
||||
const char *tag) {
|
||||
return task.malloc(size, tag);
|
||||
}
|
||||
|
||||
void operator delete(void *ptr) {
|
||||
((T *)ptr)->task->free(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
// This is the function that switches between the C and the Rust stack by
|
||||
// calling another function with a single void* argument while changing the
|
||||
// stack pointer. It has a funny name because gdb doesn't normally like to
|
||||
// backtrace through split stacks (thinks it indicates a bug), but has a
|
||||
// special case to allow functions named __morestack to move the stack pointer
|
||||
// around.
|
||||
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
|
||||
|
||||
inline static uintptr_t
|
||||
sanitize_next_sp(uintptr_t next_sp) {
|
||||
|
||||
// Since I'm not precisely sure where the next stack pointer sits in
|
||||
// relation to where the context switch actually happened, nor in relation
|
||||
// to the amount of stack needed for calling __morestack I've added some
|
||||
// extra bytes here.
|
||||
|
||||
// FIXME (#2698): On the rust stack this potentially puts is quite far
|
||||
// into the red zone. Might want to just allocate a new rust stack every
|
||||
// time we switch back to rust.
|
||||
const uintptr_t padding = 16;
|
||||
|
||||
return align_down(next_sp - padding);
|
||||
}
|
||||
|
||||
inline void
|
||||
rust_task::call_on_c_stack(void *args, void *fn_ptr) {
|
||||
// Too expensive to check
|
||||
// assert(on_rust_stack());
|
||||
|
||||
// The shim functions generated by rustc contain the morestack prologue,
|
||||
// so we need to let them know they have enough stack.
|
||||
record_sp_limit(0);
|
||||
|
||||
uintptr_t prev_rust_sp = next_rust_sp;
|
||||
next_rust_sp = get_sp();
|
||||
|
||||
bool borrowed_a_c_stack = false;
|
||||
uintptr_t sp;
|
||||
if (c_stack == NULL) {
|
||||
c_stack = sched_loop->borrow_c_stack();
|
||||
next_c_sp = align_down(c_stack->end);
|
||||
sp = next_c_sp;
|
||||
borrowed_a_c_stack = true;
|
||||
} else {
|
||||
sp = sanitize_next_sp(next_c_sp);
|
||||
}
|
||||
|
||||
__morestack(args, fn_ptr, sp);
|
||||
|
||||
// Note that we may not actually get here if we threw an exception,
|
||||
// in which case we will return the c stack when the exception is caught.
|
||||
if (borrowed_a_c_stack) {
|
||||
return_c_stack();
|
||||
}
|
||||
|
||||
next_rust_sp = prev_rust_sp;
|
||||
|
||||
record_stack_limit();
|
||||
}
|
||||
|
||||
inline void
|
||||
rust_task::call_on_rust_stack(void *args, void *fn_ptr) {
|
||||
// Too expensive to check
|
||||
// assert(!on_rust_stack());
|
||||
|
||||
// Because of the hack in the other function that disables the stack limit
|
||||
// when entering the C stack, here we restore the stack limit again.
|
||||
record_stack_limit();
|
||||
|
||||
assert(get_sp_limit() != 0 && "Stack must be configured");
|
||||
assert(next_rust_sp);
|
||||
|
||||
// Unlocked access. Might "race" a killer, but harmlessly. This code is
|
||||
// only run by the task itself, so cannot race itself. See the comment
|
||||
// above inhibit_kill (or #3213) in rust_task.cpp for justification.
|
||||
bool had_reentered_rust_stack = reentered_rust_stack;
|
||||
reentered_rust_stack = true;
|
||||
|
||||
uintptr_t prev_c_sp = next_c_sp;
|
||||
next_c_sp = get_sp();
|
||||
|
||||
uintptr_t sp = sanitize_next_sp(next_rust_sp);
|
||||
|
||||
// FIXME (#2047): There are times when this is called and needs
|
||||
// to be able to throw, and we don't account for that.
|
||||
__morestack(args, fn_ptr, sp);
|
||||
|
||||
next_c_sp = prev_c_sp;
|
||||
reentered_rust_stack = had_reentered_rust_stack;
|
||||
|
||||
record_sp_limit(0);
|
||||
}
|
||||
|
||||
inline void
|
||||
rust_task::return_c_stack() {
|
||||
// Too expensive to check
|
||||
// assert(on_rust_stack());
|
||||
assert(c_stack != NULL);
|
||||
sched_loop->return_c_stack(c_stack);
|
||||
c_stack = NULL;
|
||||
next_c_sp = 0;
|
||||
}
|
||||
|
||||
// NB: This runs on the Rust stack
|
||||
inline void *
|
||||
rust_task::next_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
|
||||
new_stack_fast(stk_sz + args_sz);
|
||||
assert(stk->end - (uintptr_t)stk->data >= stk_sz + args_sz
|
||||
&& "Did not receive enough stack");
|
||||
uint8_t *new_sp = (uint8_t*)stk->end;
|
||||
// Push the function arguments to the new stack
|
||||
new_sp = align_down(new_sp - args_sz);
|
||||
|
||||
// I don't know exactly where the region ends that valgrind needs us
|
||||
// to mark accessible. On x86_64 these extra bytes aren't needed, but
|
||||
// on i386 we get errors without.
|
||||
const int fudge_bytes = 16;
|
||||
reuse_valgrind_stack(stk, new_sp - fudge_bytes);
|
||||
|
||||
memcpy(new_sp, args_addr, args_sz);
|
||||
record_stack_limit();
|
||||
return new_sp;
|
||||
}
|
||||
|
||||
// The amount of stack in a segment available to Rust code
|
||||
inline size_t
|
||||
user_stack_size(stk_seg *stk) {
|
||||
return (size_t)(stk->end
|
||||
- (uintptr_t)&stk->data[0]
|
||||
- RED_ZONE_SIZE);
|
||||
}
|
||||
|
||||
struct new_stack_args {
|
||||
rust_task *task;
|
||||
size_t requested_sz;
|
||||
};
|
||||
|
||||
void
|
||||
new_stack_slow(new_stack_args *args);
|
||||
|
||||
// NB: This runs on the Rust stack
|
||||
// This is the new stack fast path, in which we
|
||||
// reuse the next cached stack segment
|
||||
inline void
|
||||
rust_task::new_stack_fast(size_t requested_sz) {
|
||||
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
|
||||
size_t min_sz = sched_loop->min_stack_size;
|
||||
|
||||
if (requested_sz > STACK_THRESHOLD) {
|
||||
if (new_big_stack())
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to reuse an existing stack segment
|
||||
if (stk != NULL && stk->next != NULL) {
|
||||
size_t next_sz = user_stack_size(stk->next);
|
||||
if (min_sz <= next_sz && requested_sz <= next_sz) {
|
||||
stk = stk->next;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
new_stack_args args = {this, requested_sz};
|
||||
call_on_c_stack(&args, (void*)new_stack_slow);
|
||||
}
|
||||
|
||||
// NB: This runs on the Rust stack
|
||||
inline void
|
||||
rust_task::prev_stack() {
|
||||
// We're not going to actually delete anything now because that would
|
||||
// require switching to the C stack and be costly. Instead we'll just move
|
||||
// up the link list and clean up later, either in new_stack or after our
|
||||
// turn ends on the scheduler.
|
||||
if (stk->is_big) {
|
||||
stk_seg *ss = stk;
|
||||
stk = stk->prev;
|
||||
|
||||
// Unlink the big stack.
|
||||
if (ss->next)
|
||||
ss->next->prev = ss->prev;
|
||||
if (ss->prev)
|
||||
ss->prev->next = ss->next;
|
||||
|
||||
sched_loop->return_big_stack(ss);
|
||||
} else {
|
||||
stk = stk->prev;
|
||||
}
|
||||
|
||||
record_stack_limit();
|
||||
}
|
||||
|
||||
// The LLVM-generated segmented-stack function prolog compares the amount of
|
||||
// stack needed for each frame to the end-of-stack pointer stored in the
|
||||
// TCB. As an optimization, when the frame size is less than 256 bytes, it
|
||||
// will simply compare %esp to the stack limit instead of subtracting the
|
||||
// frame size. As a result we need our stack limit to account for those 256
|
||||
// bytes.
|
||||
const unsigned LIMIT_OFFSET = 256;
|
||||
|
||||
inline void
|
||||
rust_task::record_stack_limit() {
|
||||
assert(stk);
|
||||
assert((uintptr_t)stk->end - RED_ZONE_SIZE
|
||||
- (uintptr_t)stk->data >= LIMIT_OFFSET
|
||||
&& "Stack size must be greater than LIMIT_OFFSET");
|
||||
record_sp_limit(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
|
||||
}
|
||||
|
||||
inline rust_task* rust_try_get_current_task() {
|
||||
uintptr_t sp_limit = get_sp_limit();
|
||||
|
||||
// FIXME (#1226) - Because of a hack in upcall_call_shim_on_c_stack this
|
||||
// value is sometimes inconveniently set to 0, so we can't use this
|
||||
// method of retreiving the task pointer and need to fall back to TLS.
|
||||
if (sp_limit == 0)
|
||||
return rust_sched_loop::try_get_task_tls();
|
||||
|
||||
// The stack pointer boundary is stored in a quickly-accessible location
|
||||
// in the TCB. From that we can calculate the address of the stack segment
|
||||
// structure it belongs to, and in that structure is a pointer to the task
|
||||
// that owns it.
|
||||
uintptr_t seg_addr =
|
||||
sp_limit - RED_ZONE_SIZE - LIMIT_OFFSET - sizeof(stk_seg);
|
||||
stk_seg *stk = (stk_seg*) seg_addr;
|
||||
|
||||
// Make sure we've calculated the right address
|
||||
::check_stack_canary(stk);
|
||||
assert(stk->task != NULL && "task pointer not in stack structure");
|
||||
return stk->task;
|
||||
}
|
||||
|
||||
inline rust_task* rust_get_current_task() {
|
||||
rust_task* task = rust_try_get_current_task();
|
||||
assert(task != NULL && "no current task");
|
||||
return task;
|
||||
}
|
||||
|
||||
//
|
||||
// Local Variables:
|
||||
// mode: C++
|
||||
// fill-column: 78;
|
||||
// indent-tabs-mode: nil
|
||||
// c-basic-offset: 4
|
||||
// buffer-file-coding-system: utf-8-unix
|
||||
// End:
|
||||
//
|
||||
|
||||
#endif /* RUST_TASK_H */
|
|
@ -10,12 +10,10 @@
|
|||
|
||||
// Helper functions used only in tests
|
||||
|
||||
#include "rust_sched_loop.h"
|
||||
#include "rust_task.h"
|
||||
#include "rust_util.h"
|
||||
#include "rust_scheduler.h"
|
||||
#include "sync/timer.h"
|
||||
#include "sync/rust_thread.h"
|
||||
#include "sync/lock_and_signal.h"
|
||||
#include "rust_abi.h"
|
||||
|
||||
// These functions are used in the unit tests for C ABI calls.
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
*/
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "rust_task.h"
|
||||
#include "rust_sched_loop.h"
|
||||
#include "rust_upcall.h"
|
||||
#include "rust_util.h"
|
||||
|
||||
|
@ -29,28 +27,6 @@ typedef int _Unwind_Action;
|
|||
struct _Unwind_Context;
|
||||
struct _Unwind_Exception;
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define LOG_UPCALL_ENTRY(task) \
|
||||
LOG(task, upcall, \
|
||||
"> UPCALL %s - task: %s 0x%" PRIxPTR \
|
||||
" retpc: x%" PRIxPTR, \
|
||||
__FUNCTION__, \
|
||||
(task)->name, (task), \
|
||||
__builtin_return_address(0));
|
||||
#else
|
||||
#define LOG_UPCALL_ENTRY(task) \
|
||||
LOG(task, upcall, "> UPCALL task: %s @x%" PRIxPTR, \
|
||||
(task)->name, (task));
|
||||
#endif
|
||||
|
||||
#define UPCALL_SWITCH_STACK(T, A, F) \
|
||||
call_upcall_on_c_stack(T, (void*)A, (void*)F)
|
||||
|
||||
inline void
|
||||
call_upcall_on_c_stack(rust_task *task, void *args, void *fn_ptr) {
|
||||
task->call_on_c_stack(args, fn_ptr);
|
||||
}
|
||||
|
||||
typedef void (*CDECL stack_switch_shim)(void*);
|
||||
|
||||
/**********************************************************************
|
||||
|
@ -62,21 +38,8 @@ typedef void (*CDECL stack_switch_shim)(void*);
|
|||
*/
|
||||
extern "C" CDECL void
|
||||
upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
|
||||
rust_task *task = rust_try_get_current_task();
|
||||
|
||||
if (task) {
|
||||
// We're running in task context, do a stack switch
|
||||
try {
|
||||
task->call_on_c_stack(args, fn_ptr);
|
||||
} catch (...) {
|
||||
// Logging here is not reliable
|
||||
assert(false && "Foreign code threw an exception");
|
||||
}
|
||||
} else {
|
||||
// There's no task. Call the function and hope for the best
|
||||
stack_switch_shim f = (stack_switch_shim)fn_ptr;
|
||||
f(args);
|
||||
}
|
||||
stack_switch_shim f = (stack_switch_shim)fn_ptr;
|
||||
f(args);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -85,171 +48,9 @@ upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
|
|||
*/
|
||||
extern "C" CDECL void
|
||||
upcall_call_shim_on_rust_stack(void *args, void *fn_ptr) {
|
||||
rust_task *task = rust_try_get_current_task();
|
||||
|
||||
if (task) {
|
||||
try {
|
||||
task->call_on_rust_stack(args, fn_ptr);
|
||||
} catch (...) {
|
||||
// We can't count on being able to unwind through arbitrary
|
||||
// code. Our best option is to just fail hard.
|
||||
// Logging here is not reliable
|
||||
assert(false
|
||||
&& "Rust task failed after reentering the Rust stack");
|
||||
}
|
||||
} else {
|
||||
// There's no task. Call the function and hope for the best
|
||||
stack_switch_shim f = (stack_switch_shim)fn_ptr;
|
||||
f(args);
|
||||
}
|
||||
}
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
struct s_fail_args {
|
||||
rust_task *task;
|
||||
char const *expr;
|
||||
char const *file;
|
||||
size_t line;
|
||||
};
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_fail(s_fail_args *args) {
|
||||
rust_task *task = args->task;
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
task->fail(args->expr, args->file, args->line);
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_fail(char const *expr,
|
||||
char const *file,
|
||||
size_t line) {
|
||||
rust_task *task = rust_try_get_current_task();
|
||||
if (task == NULL) {
|
||||
// FIXME #5161: Need to think about what to do here
|
||||
printf("failure outside of a task");
|
||||
abort();
|
||||
}
|
||||
s_fail_args args = {task,expr,file,line};
|
||||
UPCALL_SWITCH_STACK(task, &args, upcall_s_fail);
|
||||
}
|
||||
|
||||
// FIXME (#2861): Alias used by libcore/rt.rs to avoid naming conflicts with
|
||||
// autogenerated wrappers for upcall_fail. Remove this when we fully move away
|
||||
// away from the C upcall path.
|
||||
extern "C" CDECL void
|
||||
rust_upcall_fail(char const *expr,
|
||||
char const *file,
|
||||
size_t line) {
|
||||
upcall_fail(expr, file, line);
|
||||
}
|
||||
|
||||
struct s_trace_args {
|
||||
rust_task *task;
|
||||
char const *msg;
|
||||
char const *file;
|
||||
size_t line;
|
||||
};
|
||||
|
||||
/**********************************************************************
|
||||
* Allocate an object in the task-local heap.
|
||||
*/
|
||||
|
||||
struct s_malloc_args {
|
||||
rust_task *task;
|
||||
uintptr_t retval;
|
||||
type_desc *td;
|
||||
uintptr_t size;
|
||||
};
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_malloc(s_malloc_args *args) {
|
||||
rust_task *task = args->task;
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
LOG(task, mem, "upcall malloc(0x%" PRIxPTR ")", args->td);
|
||||
|
||||
rust_opaque_box *box = task->boxed.malloc(args->td, args->size);
|
||||
void *body = box_body(box);
|
||||
|
||||
debug::maybe_track_origin(task, box);
|
||||
|
||||
LOG(task, mem,
|
||||
"upcall malloc(0x%" PRIxPTR ") = box 0x%" PRIxPTR
|
||||
" with body 0x%" PRIxPTR,
|
||||
args->td, (uintptr_t)box, (uintptr_t)body);
|
||||
|
||||
args->retval = (uintptr_t)box;
|
||||
}
|
||||
|
||||
extern "C" CDECL uintptr_t
|
||||
upcall_malloc(type_desc *td, uintptr_t size) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
s_malloc_args args = {task, 0, td, size};
|
||||
UPCALL_SWITCH_STACK(task, &args, upcall_s_malloc);
|
||||
return args.retval;
|
||||
}
|
||||
|
||||
// FIXME (#2861): Alias used by libcore/rt.rs to avoid naming conflicts with
|
||||
// autogenerated wrappers for upcall_malloc. Remove this when we fully move
|
||||
// away from the C upcall path.
|
||||
extern "C" CDECL uintptr_t
|
||||
rust_upcall_malloc(type_desc *td, uintptr_t size) {
|
||||
return upcall_malloc(td, size);
|
||||
}
|
||||
|
||||
extern "C" CDECL uintptr_t
|
||||
rust_upcall_malloc_noswitch(type_desc *td, uintptr_t size) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
s_malloc_args args = {task, 0, td, size};
|
||||
upcall_s_malloc(&args);
|
||||
return args.retval;
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* Called whenever an object in the task-local heap is freed.
|
||||
*/
|
||||
|
||||
struct s_free_args {
|
||||
rust_task *task;
|
||||
void *ptr;
|
||||
};
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_s_free(s_free_args *args) {
|
||||
rust_task *task = args->task;
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
rust_sched_loop *sched_loop = task->sched_loop;
|
||||
DLOG(sched_loop, mem,
|
||||
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
|
||||
(uintptr_t)args->ptr);
|
||||
|
||||
debug::maybe_untrack_origin(task, args->ptr);
|
||||
|
||||
rust_opaque_box *box = (rust_opaque_box*) args->ptr;
|
||||
task->boxed.free(box);
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_free(void* ptr) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
s_free_args args = {task,ptr};
|
||||
UPCALL_SWITCH_STACK(task, &args, upcall_s_free);
|
||||
}
|
||||
|
||||
// FIXME (#2861): Alias used by libcore/rt.rs to avoid naming conflicts with
|
||||
// autogenerated wrappers for upcall_free. Remove this when we fully move away
|
||||
// away from the C upcall path.
|
||||
extern "C" CDECL void
|
||||
rust_upcall_free(void* ptr) {
|
||||
upcall_free(ptr);
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
rust_upcall_free_noswitch(void* ptr) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
s_free_args args = {task,ptr};
|
||||
upcall_s_free(&args);
|
||||
// There's no task. Call the function and hope for the best
|
||||
stack_switch_shim f = (stack_switch_shim)fn_ptr;
|
||||
f(args);
|
||||
}
|
||||
|
||||
/**********************************************************************/
|
||||
|
@ -293,41 +94,21 @@ upcall_rust_personality(int version,
|
|||
s_rust_personality_args args = {(_Unwind_Reason_Code)0,
|
||||
version, actions, exception_class,
|
||||
ue_header, context};
|
||||
rust_task *task = rust_try_get_current_task();
|
||||
|
||||
if (task == NULL) {
|
||||
// Assuming we're running with the new scheduler
|
||||
upcall_s_rust_personality(&args);
|
||||
return args.retval;
|
||||
}
|
||||
|
||||
// The personality function is run on the stack of the
|
||||
// last function that threw or landed, which is going
|
||||
// to sometimes be the C stack. If we're on the Rust stack
|
||||
// then switch to the C stack.
|
||||
|
||||
if (task->on_rust_stack()) {
|
||||
UPCALL_SWITCH_STACK(task, &args, upcall_s_rust_personality);
|
||||
} else {
|
||||
upcall_s_rust_personality(&args);
|
||||
}
|
||||
upcall_s_rust_personality(&args);
|
||||
return args.retval;
|
||||
}
|
||||
|
||||
// NB: This needs to be blazing fast. Don't switch stacks
|
||||
extern "C" CDECL void *
|
||||
upcall_new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
|
||||
rust_task *task = rust_get_current_task();
|
||||
return task->next_stack(stk_sz,
|
||||
args_addr,
|
||||
args_sz);
|
||||
assert(false && "newsched shouldn't be growing the stack");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// NB: This needs to be blazing fast. Don't switch stacks
|
||||
extern "C" CDECL void
|
||||
upcall_del_stack() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
task->prev_stack();
|
||||
assert(false && "newsched shouldn't be growing the stack");
|
||||
}
|
||||
|
||||
// Landing pads need to call this to insert the
|
||||
|
@ -336,12 +117,6 @@ upcall_del_stack() {
|
|||
// needs to acquire the value of the stack pointer
|
||||
extern "C" CDECL void
|
||||
upcall_reset_stack_limit() {
|
||||
rust_task *task = rust_try_get_current_task();
|
||||
if (task != NULL) {
|
||||
task->reset_stack_limit();
|
||||
} else {
|
||||
// We must be in a newsched task
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
|
|
@ -12,7 +12,8 @@
|
|||
#define RUST_UTIL_H
|
||||
|
||||
#include <limits.h>
|
||||
#include "rust_task.h"
|
||||
#include "rust_exchange_alloc.h"
|
||||
#include "rust_type.h"
|
||||
#include "rust_env.h"
|
||||
|
||||
extern struct type_desc str_body_tydesc;
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
#include "uv.h"
|
||||
|
||||
#include "rust_globals.h"
|
||||
#include "rust_task.h"
|
||||
#include "rust_log.h"
|
||||
|
||||
// extern fn pointers
|
||||
typedef void (*extern_async_op_cb)(uv_loop_t* loop, void* data,
|
||||
|
@ -35,43 +33,6 @@ struct handle_data {
|
|||
extern_close_cb close_cb;
|
||||
};
|
||||
|
||||
// helpers
|
||||
static void*
|
||||
current_kernel_malloc(size_t size, const char* tag) {
|
||||
void* ptr = rust_get_current_task()->kernel->malloc(size, tag);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void
|
||||
current_kernel_free(void* ptr) {
|
||||
rust_get_current_task()->kernel->free(ptr);
|
||||
}
|
||||
|
||||
static handle_data*
|
||||
new_handle_data_from(uint8_t* buf, extern_simple_cb cb) {
|
||||
handle_data* data = (handle_data*)current_kernel_malloc(
|
||||
sizeof(handle_data),
|
||||
"handle_data");
|
||||
memcpy(data->id_buf, buf, RUST_UV_HANDLE_LEN);
|
||||
data->cb = cb;
|
||||
return data;
|
||||
}
|
||||
|
||||
// libuv callback impls
|
||||
static void
|
||||
foreign_extern_async_op_cb(uv_async_t* handle, int status) {
|
||||
extern_async_op_cb cb = (extern_async_op_cb)handle->data;
|
||||
void* loop_data = handle->loop->data;
|
||||
cb(handle->loop, loop_data, handle);
|
||||
}
|
||||
|
||||
static void
|
||||
foreign_async_cb(uv_async_t* handle, int status) {
|
||||
handle_data* handle_d = (handle_data*)handle->data;
|
||||
void* loop_data = handle->loop->data;
|
||||
handle_d->cb(handle_d->id_buf, loop_data);
|
||||
}
|
||||
|
||||
static void
|
||||
foreign_timer_cb(uv_timer_t* handle, int status) {
|
||||
handle_data* handle_d = (handle_data*)handle->data;
|
||||
|
@ -84,18 +45,6 @@ foreign_close_cb(uv_handle_t* handle) {
|
|||
handle_data* data = (handle_data*)handle->data;
|
||||
data->close_cb(data->id_buf, handle, handle->loop->data);
|
||||
}
|
||||
|
||||
static void
|
||||
foreign_close_op_cb(uv_handle_t* op_handle) {
|
||||
current_kernel_free(op_handle);
|
||||
// uv_run() should return after this..
|
||||
}
|
||||
|
||||
// foreign fns bound in rust
|
||||
extern "C" void
|
||||
rust_uv_free(void* ptr) {
|
||||
current_kernel_free(ptr);
|
||||
}
|
||||
extern "C" void*
|
||||
rust_uv_loop_new() {
|
||||
return (void*)uv_loop_new();
|
||||
|
@ -127,24 +76,6 @@ rust_uv_loop_set_data(uv_loop_t* loop, void* data) {
|
|||
loop->data = data;
|
||||
}
|
||||
|
||||
extern "C" void*
|
||||
rust_uv_bind_op_cb(uv_loop_t* loop, extern_async_op_cb cb) {
|
||||
uv_async_t* async = (uv_async_t*)current_kernel_malloc(
|
||||
sizeof(uv_async_t),
|
||||
"uv_async_t");
|
||||
uv_async_init(loop, async, foreign_extern_async_op_cb);
|
||||
async->data = (void*)cb;
|
||||
// decrement the ref count, so that our async bind
|
||||
// doesn't count towards keeping the loop alive
|
||||
//uv_unref(loop);
|
||||
return async;
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_stop_op_cb(uv_handle_t* op_handle) {
|
||||
uv_close(op_handle, foreign_close_op_cb);
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_run(uv_loop_t* loop) {
|
||||
uv_run(loop, UV_RUN_DEFAULT);
|
||||
|
@ -167,18 +98,6 @@ rust_uv_hilvl_close(uv_handle_t* handle, extern_close_cb cb) {
|
|||
uv_close(handle, foreign_close_cb);
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_hilvl_close_async(uv_async_t* handle) {
|
||||
current_kernel_free(handle->data);
|
||||
current_kernel_free(handle);
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_hilvl_close_timer(uv_async_t* handle) {
|
||||
current_kernel_free(handle->data);
|
||||
current_kernel_free(handle);
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_async_send(uv_async_t* handle) {
|
||||
uv_async_send(handle);
|
||||
|
@ -191,32 +110,6 @@ rust_uv_async_init(uv_loop_t* loop_handle,
|
|||
return uv_async_init(loop_handle, async_handle, cb);
|
||||
}
|
||||
|
||||
extern "C" void*
|
||||
rust_uv_hilvl_async_init(uv_loop_t* loop, extern_simple_cb cb,
|
||||
uint8_t* buf) {
|
||||
uv_async_t* async = (uv_async_t*)current_kernel_malloc(
|
||||
sizeof(uv_async_t),
|
||||
"uv_async_t");
|
||||
uv_async_init(loop, async, foreign_async_cb);
|
||||
handle_data* data = new_handle_data_from(buf, cb);
|
||||
async->data = data;
|
||||
|
||||
return async;
|
||||
}
|
||||
|
||||
extern "C" void*
|
||||
rust_uv_hilvl_timer_init(uv_loop_t* loop, extern_simple_cb cb,
|
||||
uint8_t* buf) {
|
||||
uv_timer_t* new_timer = (uv_timer_t*)current_kernel_malloc(
|
||||
sizeof(uv_timer_t),
|
||||
"uv_timer_t");
|
||||
uv_timer_init(loop, new_timer);
|
||||
handle_data* data = new_handle_data_from(buf, cb);
|
||||
new_timer->data = data;
|
||||
|
||||
return new_timer;
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_hilvl_timer_start(uv_timer_t* the_timer, uint32_t timeout,
|
||||
uint32_t repeat) {
|
||||
|
@ -469,15 +362,6 @@ rust_uv_get_stream_handle_from_write_req(uv_write_t* write_req) {
|
|||
return write_req->handle;
|
||||
}
|
||||
|
||||
extern "C" uv_buf_t
|
||||
current_kernel_malloc_alloc_cb(uv_handle_t* handle,
|
||||
size_t suggested_size) {
|
||||
char* base_ptr = (char*)current_kernel_malloc(sizeof(char)
|
||||
* suggested_size,
|
||||
"uv_buf_t_base_val");
|
||||
return uv_buf_init(base_ptr, suggested_size);
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_buf_init(uv_buf_t* out_buf, char* base, size_t len) {
|
||||
*out_buf = uv_buf_init(base, len);
|
||||
|
@ -563,16 +447,6 @@ rust_uv_read_stop(uv_stream_t* stream) {
|
|||
return uv_read_stop(stream);
|
||||
}
|
||||
|
||||
extern "C" char*
|
||||
rust_uv_malloc_buf_base_of(size_t suggested_size) {
|
||||
return (char*) current_kernel_malloc(sizeof(char)*suggested_size,
|
||||
"uv_buf_t base");
|
||||
}
|
||||
extern "C" void
|
||||
rust_uv_free_base_of_buf(uv_buf_t buf) {
|
||||
current_kernel_free(buf.base);
|
||||
}
|
||||
|
||||
extern "C" struct sockaddr_in
|
||||
rust_uv_ip4_addr(const char* ip, int port) {
|
||||
struct sockaddr_in addr = uv_ip4_addr(ip, port);
|
||||
|
@ -639,16 +513,6 @@ rust_uv_ip6_port(struct sockaddr_in6* src) {
|
|||
return ntohs(src->sin6_port);
|
||||
}
|
||||
|
||||
extern "C" void*
|
||||
rust_uv_current_kernel_malloc(size_t size) {
|
||||
return current_kernel_malloc(size, "rust_uv_current_kernel_malloc");
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_uv_current_kernel_free(void* mem) {
|
||||
current_kernel_free(mem);
|
||||
}
|
||||
|
||||
extern "C" int
|
||||
rust_uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* handle,
|
||||
uv_getaddrinfo_cb cb,
|
||||
|
|
|
@ -1,27 +1,19 @@
|
|||
debug_get_stk_seg
|
||||
debug_abi_1
|
||||
debug_abi_2
|
||||
debug_static_mut
|
||||
debug_static_mut_check_four
|
||||
get_task_id
|
||||
get_time
|
||||
rust_tzset
|
||||
rust_gmtime
|
||||
rust_localtime
|
||||
rust_timegm
|
||||
rust_mktime
|
||||
new_task
|
||||
precise_time_ns
|
||||
rand_free
|
||||
rand_new_seeded
|
||||
rand_seed_size
|
||||
rand_gen_seed
|
||||
rand_next
|
||||
rust_get_sched_id
|
||||
rust_get_argc
|
||||
rust_get_argv
|
||||
rust_new_sched
|
||||
rust_new_task_in_sched
|
||||
rust_path_is_dir
|
||||
rust_path_exists
|
||||
rust_get_stdin
|
||||
|
@ -35,55 +27,26 @@ rust_log_console_off
|
|||
rust_should_log_console
|
||||
rust_set_environ
|
||||
rust_unset_sigprocmask
|
||||
rust_set_exit_status
|
||||
rust_start
|
||||
rust_env_pairs
|
||||
rust_task_yield
|
||||
rust_task_is_unwinding
|
||||
rust_get_task
|
||||
rust_try_get_task
|
||||
rust_get_stack_segment
|
||||
rust_get_c_stack
|
||||
rust_log_str
|
||||
start_task
|
||||
rust_local_realloc
|
||||
task_clear_event_reject
|
||||
task_wait_event
|
||||
task_signal_event
|
||||
upcall_fail
|
||||
upcall_free
|
||||
upcall_malloc
|
||||
upcall_rust_personality
|
||||
upcall_call_shim_on_c_stack
|
||||
upcall_call_shim_on_rust_stack
|
||||
upcall_new_stack
|
||||
upcall_del_stack
|
||||
upcall_reset_stack_limit
|
||||
rust_upcall_fail
|
||||
rust_upcall_free
|
||||
rust_upcall_free_noswitch
|
||||
rust_upcall_malloc
|
||||
rust_upcall_malloc_noswitch
|
||||
rust_uv_loop_new
|
||||
rust_uv_loop_delete
|
||||
rust_uv_walk
|
||||
rust_uv_loop_set_data
|
||||
rust_uv_bind_op_cb
|
||||
rust_uv_stop_op_cb
|
||||
rust_uv_run
|
||||
rust_uv_close
|
||||
rust_uv_hilvl_close
|
||||
rust_uv_hilvl_close_async
|
||||
rust_uv_hilvl_close_timer
|
||||
rust_uv_async_send
|
||||
rust_uv_async_init
|
||||
rust_uv_hilvl_async_init
|
||||
rust_uv_hilvl_timer_init
|
||||
rust_uv_hilvl_timer_start
|
||||
rust_uv_timer_init
|
||||
rust_uv_timer_start
|
||||
rust_uv_timer_stop
|
||||
rust_uv_free
|
||||
rust_uv_tcp_init
|
||||
rust_uv_buf_init
|
||||
rust_uv_last_error
|
||||
|
@ -124,8 +87,6 @@ rust_uv_accept
|
|||
rust_uv_write
|
||||
rust_uv_read_start
|
||||
rust_uv_read_stop
|
||||
rust_uv_malloc_buf_base_of
|
||||
rust_uv_free_base_of_buf
|
||||
rust_uv_is_ipv4_addrinfo
|
||||
rust_uv_is_ipv6_addrinfo
|
||||
rust_uv_get_next_addrinfo
|
||||
|
@ -155,8 +116,6 @@ rust_uv_get_data_for_req
|
|||
rust_uv_set_data_for_req
|
||||
rust_uv_get_base_from_buf
|
||||
rust_uv_get_len_from_buf
|
||||
rust_uv_current_kernel_malloc
|
||||
rust_uv_current_kernel_free
|
||||
rust_uv_getaddrinfo
|
||||
rust_uv_freeaddrinfo
|
||||
rust_uv_idle_new
|
||||
|
@ -172,21 +131,10 @@ rust_dbg_lock_wait
|
|||
rust_dbg_lock_signal
|
||||
rust_dbg_call
|
||||
rust_dbg_do_nothing
|
||||
rust_osmain_sched_id
|
||||
rust_task_inhibit_kill
|
||||
rust_task_allow_kill
|
||||
rust_task_inhibit_yield
|
||||
rust_task_allow_yield
|
||||
rust_task_kill_other
|
||||
rust_task_kill_all
|
||||
rust_create_little_lock
|
||||
rust_destroy_little_lock
|
||||
rust_lock_little_lock
|
||||
rust_unlock_little_lock
|
||||
rust_get_task_local_data
|
||||
rust_task_local_data_atexit
|
||||
rust_task_ref
|
||||
rust_task_deref
|
||||
tdefl_compress_mem_to_heap
|
||||
tinfl_decompress_mem_to_heap
|
||||
rust_gc_metadata
|
||||
|
@ -221,7 +169,6 @@ rust_dbg_extern_return_TwoU32s
|
|||
rust_dbg_extern_return_TwoU64s
|
||||
rust_dbg_extern_identity_double
|
||||
rust_dbg_extern_identity_u8
|
||||
rust_get_rt_env
|
||||
rust_uv_handle_size
|
||||
rust_uv_req_size
|
||||
rust_uv_handle_type_max
|
||||
|
@ -241,8 +188,6 @@ rust_boxed_region_realloc
|
|||
rust_boxed_region_free
|
||||
rust_try
|
||||
rust_begin_unwind
|
||||
rust_take_task_borrow_list
|
||||
rust_set_task_borrow_list
|
||||
rust_valgrind_stack_register
|
||||
rust_valgrind_stack_deregister
|
||||
rust_take_env_lock
|
||||
|
@ -251,7 +196,6 @@ rust_update_log_settings
|
|||
rust_running_on_valgrind
|
||||
rust_get_num_cpus
|
||||
rust_get_global_args_ptr
|
||||
rust_current_boxed_region
|
||||
rust_take_global_args_lock
|
||||
rust_drop_global_args_lock
|
||||
rust_set_exit_status_newrt
|
||||
|
@ -259,3 +203,4 @@ rust_get_exit_status_newrt
|
|||
rust_take_change_dir_lock
|
||||
rust_drop_change_dir_lock
|
||||
rust_get_test_int
|
||||
rust_get_task
|
|
@ -1,35 +0,0 @@
|
|||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
pub mod stream {
|
||||
pub enum Stream<T:Send> { send(T, ::stream::server::Stream<T>), }
|
||||
pub mod server {
|
||||
use std::option;
|
||||
use std::pipes;
|
||||
|
||||
impl<T:Send> Stream<T> {
|
||||
pub fn recv() -> extern fn(v: Stream<T>) -> ::stream::Stream<T> {
|
||||
// resolve really should report just one error here.
|
||||
// Change the test case when it changes.
|
||||
pub fn recv(pipe: Stream<T>) -> ::stream::Stream<T> { //~ ERROR attempt to use a type argument out of scope
|
||||
//~^ ERROR use of undeclared type name
|
||||
//~^^ ERROR attempt to use a type argument out of scope
|
||||
//~^^^ ERROR use of undeclared type name
|
||||
pipes::recv(pipe).unwrap()
|
||||
}
|
||||
recv
|
||||
}
|
||||
}
|
||||
|
||||
pub type Stream<T:Send> = pipes::RecvPacket<::stream::Stream<T>>;
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {}
|
|
@ -81,17 +81,6 @@ fn test_ptr() {
|
|||
}
|
||||
}
|
||||
|
||||
mod test {
|
||||
use std::libc;
|
||||
|
||||
#[abi = "cdecl"]
|
||||
#[nolink]
|
||||
extern {
|
||||
pub fn rust_get_sched_id() -> libc::intptr_t;
|
||||
pub fn get_task_id() -> libc::intptr_t;
|
||||
}
|
||||
}
|
||||
|
||||
#[deriving(Eq)]
|
||||
struct p {
|
||||
x: int,
|
||||
|
|
|
@ -15,12 +15,12 @@ mod rustrt {
|
|||
|
||||
#[abi = "cdecl"]
|
||||
extern {
|
||||
pub fn get_task_id() -> libc::intptr_t;
|
||||
pub fn rust_get_test_int() -> libc::intptr_t;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
unsafe {
|
||||
let _foo = rustrt::get_task_id;
|
||||
let _foo = rustrt::rust_get_test_int;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,13 +8,13 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#[link(name = "get_task_id")];
|
||||
#[link(name = "rust_get_test_int")];
|
||||
|
||||
mod rustrt {
|
||||
use std::libc;
|
||||
|
||||
extern {
|
||||
pub fn get_task_id() -> libc::intptr_t;
|
||||
pub fn rust_get_test_int() -> libc::intptr_t;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,13 +8,11 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use std::{pipes, io, task, comm};
|
||||
|
||||
fn main() {
|
||||
let (port, chan) = comm::stream();
|
||||
let (port, chan) = stream();
|
||||
|
||||
do task::spawn {
|
||||
io::println(port.recv());
|
||||
do spawn {
|
||||
println(port.recv());
|
||||
}
|
||||
|
||||
chan.send("hello, world");
|
||||
|
|
|
@ -174,7 +174,7 @@ mod test_foreign_items {
|
|||
#[attr];
|
||||
|
||||
#[attr]
|
||||
fn get_task_id() -> libc::intptr_t;
|
||||
fn rust_get_test_int() -> libc::intptr_t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue