auto merge of #10080 : brson/rust/sched_queue, r=brson

Rebase and update of #9710
This commit is contained in:
bors 2013-10-27 20:21:29 -07:00
commit d664ca2635
6 changed files with 446 additions and 105 deletions

View File

@ -76,6 +76,8 @@ exceptions = [
"rt/isaac/randport.cpp", # public domain
"rt/isaac/rand.h", # public domain
"rt/isaac/standard.h", # public domain
"libstd/rt/mpsc_queue.rs", # BSD
"libstd/rt/mpmc_bounded_queue.rs", # BSD
]
def check_license(name, contents):

View File

@ -11,83 +11,45 @@
//! A concurrent queue that supports multiple producers and a
//! single consumer.
use container::Container;
use kinds::Send;
use vec::OwnedVector;
use cell::Cell;
use option::*;
use unstable::sync::{UnsafeArc, LittleLock};
use option::Option;
use clone::Clone;
use rt::mpsc_queue::Queue;
pub struct MessageQueue<T> {
priv state: UnsafeArc<State<T>>
}
struct State<T> {
count: uint,
queue: ~[T],
lock: LittleLock
priv queue: Queue<T>
}
impl<T: Send> MessageQueue<T> {
pub fn new() -> MessageQueue<T> {
MessageQueue {
state: UnsafeArc::new(State {
count: 0,
queue: ~[],
lock: LittleLock::new()
})
queue: Queue::new()
}
}
#[inline]
pub fn push(&mut self, value: T) {
unsafe {
let value = Cell::new(value);
let state = self.state.get();
do (*state).lock.lock {
(*state).count += 1;
(*state).queue.push(value.take());
}
}
self.queue.push(value)
}
#[inline]
pub fn pop(&mut self) -> Option<T> {
unsafe {
let state = self.state.get();
do (*state).lock.lock {
if !(*state).queue.is_empty() {
(*state).count += 1;
Some((*state).queue.shift())
} else {
None
}
}
}
self.queue.pop()
}
/// A pop that may sometimes miss enqueued elements, but is much faster
/// to give up without doing any synchronization
#[inline]
pub fn casual_pop(&mut self) -> Option<T> {
unsafe {
let state = self.state.get();
// NB: Unsynchronized check
if (*state).count == 0 { return None; }
do (*state).lock.lock {
if !(*state).queue.is_empty() {
(*state).count += 1;
Some((*state).queue.shift())
} else {
None
}
}
}
self.queue.pop()
}
}
impl<T: Send> Clone for MessageQueue<T> {
fn clone(&self) -> MessageQueue<T> {
MessageQueue {
state: self.state.clone()
queue: self.queue.clone()
}
}
}

View File

@ -136,6 +136,12 @@ mod work_queue;
/// A parallel queue.
mod message_queue;
/// A mostly lock-free multi-producer, single consumer queue.
mod mpsc_queue;
/// A lock-free multi-producer, multi-consumer bounded queue.
mod mpmc_bounded_queue;
/// A parallel data structure for tracking sleeping schedulers.
mod sleeper_list;

View File

@ -0,0 +1,213 @@
/* Multi-producer/multi-consumer bounded queue
* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
use unstable::sync::UnsafeArc;
use unstable::atomics::{AtomicUint,Relaxed,Release,Acquire};
use option::*;
use vec;
use clone::Clone;
use kinds::Send;
use num::{Exponential,Algebraic,Round};
struct Node<T> {
sequence: AtomicUint,
value: Option<T>,
}
struct State<T> {
pad0: [u8, ..64],
buffer: ~[Node<T>],
mask: uint,
pad1: [u8, ..64],
enqueue_pos: AtomicUint,
pad2: [u8, ..64],
dequeue_pos: AtomicUint,
pad3: [u8, ..64],
}
struct Queue<T> {
priv state: UnsafeArc<State<T>>,
}
impl<T: Send> State<T> {
fn with_capacity(capacity: uint) -> State<T> {
let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 {
if capacity < 2 {
2u
} else {
// use next power of 2 as capacity
2f64.pow(&((capacity as f64).log2().ceil())) as uint
}
} else {
capacity
};
let buffer = do vec::from_fn(capacity) |i:uint| {
Node{sequence:AtomicUint::new(i),value:None}
};
State{
pad0: [0, ..64],
buffer: buffer,
mask: capacity-1,
pad1: [0, ..64],
enqueue_pos: AtomicUint::new(0),
pad2: [0, ..64],
dequeue_pos: AtomicUint::new(0),
pad3: [0, ..64],
}
}
fn push(&mut self, value: T) -> bool {
let mask = self.mask;
let mut pos = self.enqueue_pos.load(Relaxed);
loop {
let node = &mut self.buffer[pos & mask];
let seq = node.sequence.load(Acquire);
let diff: int = seq as int - pos as int;
if diff == 0 {
let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
if enqueue_pos == pos {
node.value = Some(value);
node.sequence.store(pos+1, Release);
break
} else {
pos = enqueue_pos;
}
} else if (diff < 0) {
return false
} else {
pos = self.enqueue_pos.load(Relaxed);
}
}
true
}
fn pop(&mut self) -> Option<T> {
let mask = self.mask;
let mut pos = self.dequeue_pos.load(Relaxed);
loop {
let node = &mut self.buffer[pos & mask];
let seq = node.sequence.load(Acquire);
let diff: int = seq as int - (pos + 1) as int;
if diff == 0 {
let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
if dequeue_pos == pos {
let value = node.value.take();
node.sequence.store(pos + mask + 1, Release);
return value
} else {
pos = dequeue_pos;
}
} else if diff < 0 {
return None
} else {
pos = self.dequeue_pos.load(Relaxed);
}
}
}
}
impl<T: Send> Queue<T> {
pub fn with_capacity(capacity: uint) -> Queue<T> {
Queue{
state: UnsafeArc::new(State::with_capacity(capacity))
}
}
pub fn push(&mut self, value: T) -> bool {
unsafe { (*self.state.get()).push(value) }
}
pub fn pop(&mut self) -> Option<T> {
unsafe { (*self.state.get()).pop() }
}
}
impl<T: Send> Clone for Queue<T> {
fn clone(&self) -> Queue<T> {
Queue {
state: self.state.clone()
}
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use option::*;
use task;
use comm;
use super::Queue;
#[test]
fn test() {
let nthreads = 8u;
let nmsgs = 1000u;
let mut q = Queue::with_capacity(nthreads*nmsgs);
assert_eq!(None, q.pop());
for _ in range(0, nthreads) {
let (port, chan) = comm::stream();
chan.send(q.clone());
do task::spawn_sched(task::SingleThreaded) {
let mut q = port.recv();
for i in range(0, nmsgs) {
assert!(q.push(i));
}
}
}
let mut completion_ports = ~[];
for _ in range(0, nthreads) {
let (completion_port, completion_chan) = comm::stream();
completion_ports.push(completion_port);
let (port, chan) = comm::stream();
chan.send(q.clone());
do task::spawn_sched(task::SingleThreaded) {
let mut q = port.recv();
let mut i = 0u;
loop {
match q.pop() {
None => {},
Some(_) => {
i += 1;
if i == nmsgs { break }
}
}
}
completion_chan.send(i);
}
}
for completion_port in completion_ports.iter() {
assert_eq!(nmsgs, completion_port.recv());
}
}
}

205
src/libstd/rt/mpsc_queue.rs Normal file
View File

@ -0,0 +1,205 @@
/* Multi-producer/single-consumer queue
* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
// http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue
use unstable::sync::UnsafeArc;
use unstable::atomics::{AtomicPtr,Relaxed,Release,Acquire};
use ptr::{mut_null, to_mut_unsafe_ptr};
use cast;
use option::*;
use clone::Clone;
use kinds::Send;
struct Node<T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
impl<T> Node<T> {
fn empty() -> Node<T> {
Node{next: AtomicPtr::new(mut_null()), value: None}
}
fn with_value(value: T) -> Node<T> {
Node{next: AtomicPtr::new(mut_null()), value: Some(value)}
}
}
struct State<T> {
pad0: [u8, ..64],
head: AtomicPtr<Node<T>>,
pad1: [u8, ..64],
stub: Node<T>,
pad2: [u8, ..64],
tail: *mut Node<T>,
pad3: [u8, ..64],
}
struct Queue<T> {
priv state: UnsafeArc<State<T>>,
}
impl<T: Send> Clone for Queue<T> {
fn clone(&self) -> Queue<T> {
Queue {
state: self.state.clone()
}
}
}
impl<T: Send> State<T> {
pub fn new() -> State<T> {
State{
pad0: [0, ..64],
head: AtomicPtr::new(mut_null()),
pad1: [0, ..64],
stub: Node::<T>::empty(),
pad2: [0, ..64],
tail: mut_null(),
pad3: [0, ..64],
}
}
fn init(&mut self) {
let stub = self.get_stub_unsafe();
self.head.store(stub, Relaxed);
self.tail = stub;
}
fn get_stub_unsafe(&mut self) -> *mut Node<T> {
to_mut_unsafe_ptr(&mut self.stub)
}
fn push(&mut self, value: T) {
unsafe {
let node = cast::transmute(~Node::with_value(value));
self.push_node(node);
}
}
fn push_node(&mut self, node: *mut Node<T>) {
unsafe {
(*node).next.store(mut_null(), Release);
let prev = self.head.swap(node, Relaxed);
(*prev).next.store(node, Release);
}
}
fn pop(&mut self) -> Option<T> {
unsafe {
let mut tail = self.tail;
let mut next = (*tail).next.load(Acquire);
let stub = self.get_stub_unsafe();
if tail == stub {
if mut_null() == next {
return None
}
self.tail = next;
tail = next;
next = (*next).next.load(Acquire);
}
if next != mut_null() {
let tail: ~Node<T> = cast::transmute(tail);
self.tail = next;
return tail.value
}
let head = self.head.load(Relaxed);
if tail != head {
return None
}
self.push_node(stub);
next = (*tail).next.load(Acquire);
if next != mut_null() {
let tail: ~Node<T> = cast::transmute(tail);
self.tail = next;
return tail.value
}
}
None
}
}
impl<T: Send> Queue<T> {
pub fn new() -> Queue<T> {
unsafe {
let q = Queue{state: UnsafeArc::new(State::new())};
(*q.state.get()).init();
q
}
}
pub fn push(&mut self, value: T) {
unsafe { (*self.state.get()).push(value) }
}
pub fn pop(&mut self) -> Option<T> {
unsafe{ (*self.state.get()).pop() }
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use option::*;
use task;
use comm;
use super::Queue;
#[test]
fn test() {
let nthreads = 8u;
let nmsgs = 1000u;
let mut q = Queue::new();
assert_eq!(None, q.pop());
for _ in range(0, nthreads) {
let (port, chan) = comm::stream();
chan.send(q.clone());
do task::spawn_sched(task::SingleThreaded) {
let mut q = port.recv();
for i in range(0, nmsgs) {
q.push(i);
}
}
}
let mut i = 0u;
loop {
match q.pop() {
None => {},
Some(_) => {
i += 1;
if i == nthreads*nmsgs { break }
}
}
}
}
}

View File

@ -11,84 +11,37 @@
//! Maintains a shared list of sleeping schedulers. Schedulers
//! use this to wake each other up.
use container::Container;
use vec::OwnedVector;
use option::{Option, Some, None};
use cell::Cell;
use unstable::sync::{UnsafeArc, LittleLock};
use rt::sched::SchedHandle;
use rt::mpmc_bounded_queue::Queue;
use option::*;
use clone::Clone;
pub struct SleeperList {
priv state: UnsafeArc<State>
}
struct State {
count: uint,
stack: ~[SchedHandle],
lock: LittleLock
priv q: Queue<SchedHandle>,
}
impl SleeperList {
pub fn new() -> SleeperList {
SleeperList {
state: UnsafeArc::new(State {
count: 0,
stack: ~[],
lock: LittleLock::new()
})
}
SleeperList{q: Queue::with_capacity(8*1024)}
}
pub fn push(&mut self, handle: SchedHandle) {
let handle = Cell::new(handle);
unsafe {
let state = self.state.get();
do (*state).lock.lock {
(*state).count += 1;
(*state).stack.push(handle.take());
}
}
pub fn push(&mut self, value: SchedHandle) {
assert!(self.q.push(value))
}
pub fn pop(&mut self) -> Option<SchedHandle> {
unsafe {
let state = self.state.get();
do (*state).lock.lock {
if !(*state).stack.is_empty() {
(*state).count -= 1;
Some((*state).stack.pop())
} else {
None
}
}
}
self.q.pop()
}
/// A pop that may sometimes miss enqueued elements, but is much faster
/// to give up without doing any synchronization
pub fn casual_pop(&mut self) -> Option<SchedHandle> {
unsafe {
let state = self.state.get();
// NB: Unsynchronized check
if (*state).count == 0 { return None; }
do (*state).lock.lock {
if !(*state).stack.is_empty() {
// NB: count is also protected by the lock
(*state).count -= 1;
Some((*state).stack.pop())
} else {
None
}
}
}
self.q.pop()
}
}
impl Clone for SleeperList {
fn clone(&self) -> SleeperList {
SleeperList {
state: self.state.clone()
q: self.q.clone()
}
}
}