green: Switch field privacy as necessary
This commit is contained in:
parent
eb08e8fec2
commit
b9b0ed521d
@ -22,9 +22,9 @@ use std::raw;
|
||||
// then misalign the regs again.
|
||||
pub struct Context {
|
||||
/// Hold the registers while the task or scheduler is suspended
|
||||
priv regs: ~Registers,
|
||||
regs: ~Registers,
|
||||
/// Lower bound and upper bound for the stack
|
||||
priv stack_bounds: Option<(uint, uint)>,
|
||||
stack_bounds: Option<(uint, uint)>,
|
||||
}
|
||||
|
||||
pub type InitFn = extern "C" fn(uint, *(), *()) -> !;
|
||||
|
@ -22,10 +22,10 @@ pub struct Coroutine {
|
||||
///
|
||||
/// Servo needs this to be public in order to tell SpiderMonkey
|
||||
/// about the stack bounds.
|
||||
current_stack_segment: Stack,
|
||||
pub current_stack_segment: Stack,
|
||||
|
||||
/// Always valid if the task is alive and not running.
|
||||
saved_context: Context
|
||||
pub saved_context: Context
|
||||
}
|
||||
|
||||
impl Coroutine {
|
||||
|
@ -296,10 +296,10 @@ pub fn run(event_loop_factory: fn() -> ~rtio::EventLoop:Send,
|
||||
/// Configuration of how an M:N pool of schedulers is spawned.
|
||||
pub struct PoolConfig {
|
||||
/// The number of schedulers (OS threads) to spawn into this M:N pool.
|
||||
threads: uint,
|
||||
pub threads: uint,
|
||||
/// A factory function used to create new event loops. If this is not
|
||||
/// specified then the default event loop factory is used.
|
||||
event_loop_factory: fn() -> ~rtio::EventLoop:Send,
|
||||
pub event_loop_factory: fn() -> ~rtio::EventLoop:Send,
|
||||
}
|
||||
|
||||
impl PoolConfig {
|
||||
@ -316,17 +316,17 @@ impl PoolConfig {
|
||||
/// A structure representing a handle to a pool of schedulers. This handle is
|
||||
/// used to keep the pool alive and also reap the status from the pool.
|
||||
pub struct SchedPool {
|
||||
priv id: uint,
|
||||
priv threads: ~[Thread<()>],
|
||||
priv handles: ~[SchedHandle],
|
||||
priv stealers: ~[deque::Stealer<~task::GreenTask>],
|
||||
priv next_friend: uint,
|
||||
priv stack_pool: StackPool,
|
||||
priv deque_pool: deque::BufferPool<~task::GreenTask>,
|
||||
priv sleepers: SleeperList,
|
||||
priv factory: fn() -> ~rtio::EventLoop:Send,
|
||||
priv task_state: TaskState,
|
||||
priv tasks_done: Receiver<()>,
|
||||
id: uint,
|
||||
threads: ~[Thread<()>],
|
||||
handles: ~[SchedHandle],
|
||||
stealers: ~[deque::Stealer<~task::GreenTask>],
|
||||
next_friend: uint,
|
||||
stack_pool: StackPool,
|
||||
deque_pool: deque::BufferPool<~task::GreenTask>,
|
||||
sleepers: SleeperList,
|
||||
factory: fn() -> ~rtio::EventLoop:Send,
|
||||
task_state: TaskState,
|
||||
tasks_done: Receiver<()>,
|
||||
}
|
||||
|
||||
/// This is an internal state shared among a pool of schedulers. This is used to
|
||||
|
@ -23,11 +23,11 @@ pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) {
|
||||
}
|
||||
|
||||
pub struct Producer<T> {
|
||||
priv inner: UnsafeArc<mpsc::Queue<T>>,
|
||||
inner: UnsafeArc<mpsc::Queue<T>>,
|
||||
}
|
||||
|
||||
pub struct Consumer<T> {
|
||||
priv inner: UnsafeArc<mpsc::Queue<T>>,
|
||||
inner: UnsafeArc<mpsc::Queue<T>>,
|
||||
}
|
||||
|
||||
impl<T: Send> Consumer<T> {
|
||||
|
@ -39,7 +39,12 @@ pub struct Scheduler {
|
||||
/// ID number of the pool that this scheduler is a member of. When
|
||||
/// reawakening green tasks, this is used to ensure that tasks aren't
|
||||
/// reawoken on the wrong pool of schedulers.
|
||||
pool_id: uint,
|
||||
pub pool_id: uint,
|
||||
/// The pool of stacks that this scheduler has cached
|
||||
pub stack_pool: StackPool,
|
||||
/// Bookkeeping for the number of tasks which are currently running around
|
||||
/// inside this pool of schedulers
|
||||
pub task_state: TaskState,
|
||||
/// There are N work queues, one per scheduler.
|
||||
work_queue: deque::Worker<~GreenTask>,
|
||||
/// Work queues for the other schedulers. These are created by
|
||||
@ -64,7 +69,6 @@ pub struct Scheduler {
|
||||
/// A flag to indicate we've received the shutdown message and should
|
||||
/// no longer try to go to sleep, but exit instead.
|
||||
no_sleep: bool,
|
||||
stack_pool: StackPool,
|
||||
/// The scheduler runs on a special task. When it is not running
|
||||
/// it is stored here instead of the work queue.
|
||||
sched_task: Option<~GreenTask>,
|
||||
@ -87,9 +91,6 @@ pub struct Scheduler {
|
||||
/// A flag to tell the scheduler loop it needs to do some stealing
|
||||
/// in order to introduce randomness as part of a yield
|
||||
steal_for_yield: bool,
|
||||
/// Bookkeeping for the number of tasks which are currently running around
|
||||
/// inside this pool of schedulers
|
||||
task_state: TaskState,
|
||||
|
||||
// n.b. currently destructors of an object are run in top-to-bottom in order
|
||||
// of field declaration. Due to its nature, the pausable idle callback
|
||||
@ -99,7 +100,7 @@ pub struct Scheduler {
|
||||
// destroyed before it's actually destroyed.
|
||||
|
||||
/// The event loop used to drive the scheduler and perform I/O
|
||||
event_loop: ~EventLoop:Send,
|
||||
pub event_loop: ~EventLoop:Send,
|
||||
}
|
||||
|
||||
/// An indication of how hard to work on a given operation, the difference
|
||||
@ -893,9 +894,9 @@ pub enum SchedMessage {
|
||||
}
|
||||
|
||||
pub struct SchedHandle {
|
||||
priv remote: ~RemoteCallback:Send,
|
||||
priv queue: msgq::Producer<SchedMessage>,
|
||||
sched_id: uint
|
||||
remote: ~RemoteCallback:Send,
|
||||
queue: msgq::Producer<SchedMessage>,
|
||||
pub sched_id: uint
|
||||
}
|
||||
|
||||
impl SchedHandle {
|
||||
|
@ -16,7 +16,7 @@ use std::sync::mpmc_bounded_queue::Queue;
|
||||
use sched::SchedHandle;
|
||||
|
||||
pub struct SleeperList {
|
||||
priv q: Queue<SchedHandle>,
|
||||
q: Queue<SchedHandle>,
|
||||
}
|
||||
|
||||
impl SleeperList {
|
||||
|
@ -15,9 +15,9 @@ use std::libc;
|
||||
|
||||
/// A task's stack. The name "Stack" is a vestige of segmented stacks.
|
||||
pub struct Stack {
|
||||
priv buf: MemoryMap,
|
||||
priv min_size: uint,
|
||||
priv valgrind_id: libc::c_uint,
|
||||
buf: MemoryMap,
|
||||
min_size: uint,
|
||||
valgrind_id: libc::c_uint,
|
||||
}
|
||||
|
||||
// Try to use MAP_STACK on platforms that support it (it's what we're doing
|
||||
@ -126,7 +126,7 @@ impl Drop for Stack {
|
||||
pub struct StackPool {
|
||||
// Ideally this would be some datastructure that preserved ordering on
|
||||
// Stack.min_size.
|
||||
priv stacks: ~[Stack],
|
||||
stacks: ~[Stack],
|
||||
}
|
||||
|
||||
impl StackPool {
|
||||
|
@ -42,32 +42,32 @@ pub struct GreenTask {
|
||||
/// context and the stack that this task owns. This field is optional to
|
||||
/// relinquish ownership back to a scheduler to recycle stacks at a later
|
||||
/// date.
|
||||
coroutine: Option<Coroutine>,
|
||||
pub coroutine: Option<Coroutine>,
|
||||
|
||||
/// Optional handle back into the home sched pool of this task. This field
|
||||
/// is lazily initialized.
|
||||
handle: Option<SchedHandle>,
|
||||
pub handle: Option<SchedHandle>,
|
||||
|
||||
/// Slot for maintaining ownership of a scheduler. If a task is running,
|
||||
/// this value will be Some(sched) where the task is running on "sched".
|
||||
sched: Option<~Scheduler>,
|
||||
pub sched: Option<~Scheduler>,
|
||||
|
||||
/// Temporary ownership slot of a std::rt::task::Task object. This is used
|
||||
/// to squirrel that libstd task away while we're performing green task
|
||||
/// operations.
|
||||
task: Option<~Task>,
|
||||
pub task: Option<~Task>,
|
||||
|
||||
/// Dictates whether this is a sched task or a normal green task
|
||||
task_type: TaskType,
|
||||
pub task_type: TaskType,
|
||||
|
||||
/// Home pool that this task was spawned into. This field is lazily
|
||||
/// initialized until when the task is initially scheduled, and is used to
|
||||
/// make sure that tasks are always woken up in the correct pool of
|
||||
/// schedulers.
|
||||
pool_id: uint,
|
||||
pub pool_id: uint,
|
||||
|
||||
// See the comments in the scheduler about why this is necessary
|
||||
nasty_deschedule_lock: NativeMutex,
|
||||
pub nasty_deschedule_lock: NativeMutex,
|
||||
}
|
||||
|
||||
pub enum TaskType {
|
||||
|
Loading…
Reference in New Issue
Block a user