sync: Move underneath libstd

This commit is the final step in the libstd facade, #13851. The purpose of this
commit is to move libsync underneath the standard library, behind the facade.
This will allow core primitives like channels, queues, and atomics to all live
in the same location.

There were a few notable changes and a few breaking changes as part of this
movement:

* The `Vec` and `String` types are reexported at the top level of libcollections
* The `unreachable!()` macro was copied to libcore
* The `std::rt::thread` module was moved to librustrt, but it is still
  reexported at the same location.
* The `std::comm` module was moved to libsync
* The `sync::comm` module was moved under `sync::comm`, and renamed to `duplex`.
  It is now a private module with types/functions being reexported under
  `sync::comm`. This is a breaking change for any existing users of duplex
  streams.
* All concurrent queues/deques were moved directly under libsync. They are also
  all marked with #![experimental] for now if they are public.
* The `task_pool` and `future` modules no longer live in libsync, but rather
  live under `std::sync`. They will forever live at this location, but they may
  move to libsync if the `std::task` module moves as well.

[breaking-change]
This commit is contained in:
Alex Crichton 2014-06-07 11:13:26 -07:00
parent c690191a84
commit b1c9ce9c6f
61 changed files with 383 additions and 362 deletions

View File

@ -61,16 +61,16 @@ DEPS_rlibc :=
DEPS_alloc := core libc native:jemalloc
DEPS_debug := std
DEPS_rustrt := alloc core libc collections native:rustrt_native
DEPS_std := core libc rand alloc collections rustrt \
DEPS_std := core libc rand alloc collections rustrt sync \
native:rust_builtin native:backtrace
DEPS_graphviz := std
DEPS_green := std native:context_switch
DEPS_rustuv := std native:uv native:uv_support
DEPS_native := std
DEPS_syntax := std term serialize log fmt_macros debug
DEPS_rustc := syntax native:rustllvm flate arena serialize sync getopts \
DEPS_rustc := syntax native:rustllvm flate arena serialize getopts \
time log graphviz debug
DEPS_rustdoc := rustc native:hoedown serialize sync getopts \
DEPS_rustdoc := rustc native:hoedown serialize getopts \
test time debug
DEPS_flate := std native:miniz
DEPS_arena := std
@ -80,17 +80,17 @@ DEPS_serialize := std log
DEPS_term := std log
DEPS_semver := std
DEPS_uuid := std serialize
DEPS_sync := std alloc
DEPS_sync := core alloc rustrt collections
DEPS_getopts := std
DEPS_collections := core alloc
DEPS_fourcc := rustc syntax std
DEPS_hexfloat := rustc syntax std
DEPS_num := std
DEPS_test := std getopts serialize term time regex native:rust_test_helpers
DEPS_time := std serialize sync
DEPS_time := std serialize
DEPS_rand := core
DEPS_url := std
DEPS_log := std sync
DEPS_log := std
DEPS_regex := std
DEPS_regex_macros = rustc syntax std regex
DEPS_fmt_macros = std

View File

@ -269,7 +269,7 @@ later.
The basic example below illustrates this.
~~~
extern crate sync;
use std::sync::Future;
# fn main() {
# fn make_a_sandwich() {};
@ -278,7 +278,7 @@ fn fib(n: u64) -> u64 {
12586269025
}
let mut delayed_fib = sync::Future::spawn(proc() fib(50));
let mut delayed_fib = Future::spawn(proc() fib(50));
make_a_sandwich();
println!("fib(50) = {}", delayed_fib.get())
# }
@ -294,7 +294,7 @@ Here is another example showing how futures allow you to background computations
be distributed on the available cores.
~~~
# extern crate sync;
# use std::sync::Future;
fn partial_sum(start: uint) -> f64 {
let mut local_sum = 0f64;
for num in range(start*100000, (start+1)*100000) {
@ -304,7 +304,7 @@ fn partial_sum(start: uint) -> f64 {
}
fn main() {
let mut futures = Vec::from_fn(1000, |ind| sync::Future::spawn( proc() { partial_sum(ind) }));
let mut futures = Vec::from_fn(1000, |ind| Future::spawn( proc() { partial_sum(ind) }));
let mut final_res = 0f64;
for ft in futures.mut_iter() {
@ -329,10 +329,8 @@ Here is a small example showing how to use Arcs. We wish to run concurrently sev
a single large vector of floats. Each task needs the full vector to perform its duty.
~~~
extern crate sync;
use sync::Arc;
use std::rand;
use std::sync::Arc;
fn pnorm(nums: &[f64], p: uint) -> f64 {
nums.iter().fold(0.0, |a, b| a + b.powf(p as f64)).powf(1.0 / (p as f64))
@ -357,9 +355,8 @@ at the power given as argument and takes the inverse power of this value). The A
created by the line
~~~
# extern crate sync;
# use std::rand;
# use sync::Arc;
# use std::sync::Arc;
# fn main() {
# let numbers = Vec::from_fn(1000000, |_| rand::random::<f64>());
let numbers_arc=Arc::new(numbers);
@ -371,9 +368,8 @@ it's contents. Within the task's procedure, the captured Arc reference can be us
reference to the underlying vector as if it were local.
~~~
# extern crate sync;
# use std::rand;
# use sync::Arc;
# use std::sync::Arc;
# fn pnorm(nums: &[f64], p: uint) -> f64 { 4.0 }
# fn main() {
# let numbers=Vec::from_fn(1000000, |_| rand::random::<f64>());
@ -461,9 +457,9 @@ the string in response. The child terminates when it receives `0`.
Here is the function that implements the child task:
~~~
extern crate sync;
use std::comm::DuplexStream;
# fn main() {
fn stringifier(channel: &sync::DuplexStream<String, uint>) {
fn stringifier(channel: &DuplexStream<String, uint>) {
let mut value: uint;
loop {
value = channel.recv();
@ -485,10 +481,10 @@ response itself is simply the stringified version of the received value,
Here is the code for the parent task:
~~~
extern crate sync;
use std::comm::duplex;
# use std::task::spawn;
# use sync::DuplexStream;
# fn stringifier(channel: &sync::DuplexStream<String, uint>) {
# use std::comm::DuplexStream;
# fn stringifier(channel: &DuplexStream<String, uint>) {
# let mut value: uint;
# loop {
# value = channel.recv();
@ -498,7 +494,7 @@ extern crate sync;
# }
# fn main() {
let (from_child, to_child) = sync::duplex();
let (from_child, to_child) = duplex();
spawn(proc() {
stringifier(&to_child);

View File

@ -297,8 +297,7 @@ an atomically reference counted box ("A.R.C." == "atomically reference counted")
Here's some code:
```
extern crate sync;
use sync::Arc;
use std::sync::Arc;
fn main() {
let numbers = vec![1,2,3];
@ -344,8 +343,7 @@ Let's take the same example yet again,
and modify it to mutate the shared state:
```
extern crate sync;
use sync::{Arc, Mutex};
use std::sync::{Arc, Mutex};
fn main() {
let numbers = vec![1,2,3];

View File

@ -38,9 +38,9 @@ exceptions = [
"rt/isaac/randport.cpp", # public domain
"rt/isaac/rand.h", # public domain
"rt/isaac/standard.h", # public domain
"libstd/sync/mpsc_queue.rs", # BSD
"libstd/sync/spsc_queue.rs", # BSD
"libstd/sync/mpmc_bounded_queue.rs", # BSD
"libsync/mpsc_queue.rs", # BSD
"libsync/spsc_queue.rs", # BSD
"libsync/mpmc_bounded_queue.rs", # BSD
"libsync/mpsc_intrusive.rs", # BSD
"test/bench/shootout-fannkuch-redux.rs", # BSD
"test/bench/shootout-meteor.rs", # BSD

View File

@ -33,9 +33,7 @@ use heap::deallocate;
/// task.
///
/// ```rust
/// extern crate sync;
///
/// use sync::Arc;
/// use std::sync::Arc;
///
/// fn main() {
/// let numbers = Vec::from_fn(100, |i| i as f32);
@ -276,7 +274,7 @@ mod tests {
use std::task;
use std::vec::Vec;
use super::{Arc, Weak};
use sync::Mutex;
use std::sync::Mutex;
struct Canary(*mut atomics::AtomicUint);

View File

@ -84,7 +84,6 @@ extern crate libc;
// Allow testing this library
#[cfg(test)] extern crate debug;
#[cfg(test)] extern crate sync;
#[cfg(test)] extern crate native;
#[cfg(test, stage0)] #[phase(syntax, link)] extern crate std;
#[cfg(test, stage0)] #[phase(syntax, link)] extern crate log;

View File

@ -52,8 +52,10 @@ pub use enum_set::EnumSet;
pub use priority_queue::PriorityQueue;
pub use ringbuf::RingBuf;
pub use smallintmap::SmallIntMap;
pub use string::String;
pub use treemap::{TreeMap, TreeSet};
pub use trie::{TrieMap, TrieSet};
pub use vec::Vec;
mod macros;

View File

@ -121,16 +121,14 @@ impl AtomicBool {
///
/// # Examples
///
/// ```ignore
/// # // FIXME: Needs PR #12430
/// extern crate sync;
///
/// use sync::Arc;
/// ```rust
/// use std::sync::Arc;
/// use std::sync::atomics::{AtomicBool, SeqCst};
/// use std::task::deschedule;
///
/// fn main() {
/// let spinlock = Arc::new(AtomicBool::new(false));
/// let spinlock_clone = spin_lock.clone();
/// let spinlock_clone = spinlock.clone();
///
/// spawn(proc() {
/// with_lock(&spinlock, || println!("task 1 in lock"));
@ -155,7 +153,7 @@ impl AtomicBool {
/// f();
///
/// // Release the lock
/// spinlock.store(false);
/// spinlock.store(false, SeqCst);
/// }
/// ```
#[inline]

View File

@ -131,3 +131,6 @@ macro_rules! write(
format_args_method!($dst, write_fmt, $($arg)*)
})
)
#[macro_export]
macro_rules! unreachable( () => (fail!("unreachable code")) )

View File

@ -117,8 +117,6 @@ if logging is disabled, none of the components of the log will be executed.
#![feature(macro_rules)]
#![deny(missing_doc, deprecated_owned_vector)]
extern crate sync;
use std::fmt;
use std::io::LineBufferedWriter;
use std::io;
@ -126,8 +124,7 @@ use std::mem;
use std::os;
use std::rt;
use std::slice;
use sync::one::{Once, ONCE_INIT};
use std::sync::{Once, ONCE_INIT};
use directive::LOG_LEVEL_NAMES;

View File

@ -386,7 +386,7 @@ pub mod write {
}
unsafe fn configure_llvm(sess: &Session) {
use sync::one::{Once, ONCE_INIT};
use std::sync::{Once, ONCE_INIT};
static mut INIT: Once = ONCE_INIT;
// Copy what clang does by turning on loop vectorization at O2 and

View File

@ -38,7 +38,6 @@ extern crate getopts;
extern crate graphviz;
extern crate libc;
extern crate serialize;
extern crate sync;
extern crate syntax;
extern crate time;

View File

@ -2296,7 +2296,7 @@ pub fn trans_crate(krate: ast::Crate,
// Before we touch LLVM, make sure that multithreading is enabled.
unsafe {
use sync::one::{Once, ONCE_INIT};
use std::sync::{Once, ONCE_INIT};
static mut INIT: Once = ONCE_INIT;
static mut POISONED: bool = false;
INIT.doit(|| {

View File

@ -39,8 +39,8 @@ use std::io::{fs, File, BufferedWriter, MemWriter, BufferedReader};
use std::io;
use std::str;
use std::string::String;
use std::sync::Arc;
use sync::Arc;
use serialize::json::ToJson;
use syntax::ast;
use syntax::ast_util;

View File

@ -21,7 +21,6 @@ extern crate getopts;
extern crate libc;
extern crate rustc;
extern crate serialize;
extern crate sync;
extern crate syntax;
extern crate testing = "test";
extern crate time;

View File

@ -15,7 +15,8 @@
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
#![feature(macro_rules, phase, globs, thread_local, managed_boxes, asm)]
#![feature(macro_rules, phase, globs, thread_local, managed_boxes, asm,
linkage)]
#![no_std]
#![experimental]
@ -58,6 +59,7 @@ mod libunwind;
pub mod args;
pub mod bookkeeping;
pub mod c_str;
pub mod exclusive;
pub mod local;
pub mod local_data;
@ -66,8 +68,8 @@ pub mod mutex;
pub mod rtio;
pub mod stack;
pub mod task;
pub mod thread;
pub mod unwind;
pub mod c_str;
/// The interface to the current runtime.
///

View File

@ -15,15 +15,15 @@
//! which are not used for scheduling in any way.
#![allow(non_camel_case_types)]
#![allow(unsigned_negate)]
use kinds::Send;
use core::prelude::*;
use alloc::owned::Box;
use core::mem;
use core::uint;
use libc;
use mem;
use ops::Drop;
use option::{Option, Some, None};
use owned::Box;
use uint;
use stack;
type StartFn = extern "C" fn(*libc::c_void) -> imp::rust_thread_return;
@ -43,7 +43,6 @@ static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
// and invoke it.
#[no_split_stack]
extern fn thread_start(main: *libc::c_void) -> imp::rust_thread_return {
use rt::stack;
unsafe {
stack::record_stack_bounds(0, uint::MAX);
let f: Box<proc()> = mem::transmute(main);
@ -146,16 +145,16 @@ impl<T: Send> Drop for Thread<T> {
#[cfg(windows)]
mod imp {
use mem;
use cmp;
use kinds::Send;
use core::prelude::*;
use alloc::owned::Box;
use core::cmp;
use core::mem;
use core::ptr;
use libc;
use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
LPVOID, DWORD, LPDWORD, HANDLE};
use os;
use owned::Box;
use ptr;
use rt::stack::RED_ZONE;
use stack::RED_ZONE;
pub type rust_thread = HANDLE;
pub type rust_thread_return = DWORD;
@ -178,7 +177,7 @@ mod imp {
if ret as uint == 0 {
// be sure to not leak the closure
let _p: Box<proc():Send> = mem::transmute(arg);
fail!("failed to spawn native thread: {}", os::last_os_error());
fail!("failed to spawn native thread: {}", ret);
}
return ret;
}
@ -214,15 +213,16 @@ mod imp {
#[cfg(unix)]
mod imp {
use cmp;
use kinds::Send;
use core::prelude::*;
use alloc::owned::Box;
use core::cmp;
use core::mem;
use core::ptr;
use libc::consts::os::posix01::{PTHREAD_CREATE_JOINABLE, PTHREAD_STACK_MIN};
use libc;
use mem;
use os;
use owned::Box;
use ptr;
use rt::stack::RED_ZONE;
use stack::RED_ZONE;
pub type rust_thread = libc::pthread_t;
pub type rust_thread_return = *u8;
@ -243,14 +243,15 @@ mod imp {
// EINVAL means |stack_size| is either too small or not a
// multiple of the system page size. Because it's definitely
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
// Round up to the nearest page and try again.
let page_size = os::page_size();
let stack_size = (stack_size + page_size - 1) & (-(page_size - 1) - 1);
// Round up to the neareast page and try again.
let page_size = libc::sysconf(libc::_SC_PAGESIZE) as uint;
let stack_size = (stack_size + page_size - 1) &
(-(page_size as int - 1) as uint - 1);
assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t), 0);
},
errno => {
// This cannot really happen.
fail!("pthread_attr_setstacksize() error: {} ({})", os::last_os_error(), errno);
fail!("pthread_attr_setstacksize() error: {}", errno);
},
};
@ -261,7 +262,7 @@ mod imp {
if ret != 0 {
// be sure to not leak the closure
let _p: Box<proc():Send> = mem::transmute(arg);
fail!("failed to spawn native thread: {}", os::last_os_error());
fail!("failed to spawn native thread: {}", ret);
}
native
}
@ -288,7 +289,6 @@ mod imp {
// is non-null before calling it!
#[cfg(target_os = "linux")]
fn min_stack_size(attr: *libc::pthread_attr_t) -> libc::size_t {
use ptr::RawPtr;
type F = unsafe extern "C" fn(*libc::pthread_attr_t) -> libc::size_t;
extern {
#[linkage = "extern_weak"]

View File

@ -126,6 +126,7 @@ extern crate alloc;
extern crate core;
extern crate core_collections = "collections";
extern crate core_rand = "rand";
extern crate core_sync = "sync";
extern crate libc;
extern crate rustrt;
@ -172,6 +173,8 @@ pub use core_collections::vec;
pub use rustrt::c_str;
pub use rustrt::local_data;
pub use core_sync::comm;
// Run tests with libgreen instead of libnative.
//
// FIXME: This egregiously hacks around starting the test runner in a different
@ -234,10 +237,8 @@ pub mod collections;
/* Tasks and communication */
pub mod task;
pub mod comm;
pub mod sync;
/* Runtime and platform support */
pub mod c_vec;

View File

@ -63,13 +63,10 @@ pub use self::util::{default_sched_threads, min_stack, running_on_valgrind};
// Reexport functionality from librustrt and other crates underneath the
// standard library which work together to create the entire runtime.
pub use alloc::{heap, libc_heap};
pub use rustrt::{task, local, mutex, exclusive, stack, args, rtio};
pub use rustrt::{task, local, mutex, exclusive, stack, args, rtio, thread};
pub use rustrt::{Stdio, Stdout, Stderr, begin_unwind, begin_unwind_fmt};
pub use rustrt::{bookkeeping, at_exit, unwind, DEFAULT_ERROR_CODE, Runtime};
// Bindings to system threading libraries.
pub mod thread;
// Simple backtrace functionality (to print on failure)
pub mod backtrace;

View File

@ -15,7 +15,7 @@
* # Example
*
* ```rust
* use sync::Future;
* use std::sync::Future;
* # fn fib(n: uint) -> uint {42};
* # fn make_a_sandwich() {};
* let mut delayed_fib = Future::spawn(proc() { fib(5000) });
@ -26,7 +26,11 @@
#![allow(missing_doc)]
use std::mem::replace;
use core::prelude::*;
use core::mem::replace;
use comm::{Receiver, channel};
use task::spawn;
/// A type encapsulating the result of a computation which may not be complete
pub struct Future<A> {
@ -137,9 +141,9 @@ impl<A:Send> Future<A> {
#[cfg(test)]
mod test {
use future::Future;
use std::task;
use prelude::*;
use sync::Future;
use task;
#[test]
fn test_from_value() {

View File

@ -15,8 +15,14 @@
//! and/or blocking at all, but rather provide the necessary tools to build
//! other types of concurrent primitives.
pub mod atomics;
pub mod deque;
pub mod mpmc_bounded_queue;
pub mod mpsc_queue;
pub mod spsc_queue;
pub use core_sync::{atomics, deque, mpmc_bounded_queue, mpsc_queue, spsc_queue};
pub use core_sync::{Arc, Weak, Mutex, MutexGuard, Condvar, Barrier};
pub use core_sync::{RWLock, RWLockReadGuard, RWLockWriteGuard};
pub use core_sync::{Semaphore, SemaphoreGuard};
pub use core_sync::one::{Once, ONCE_INIT};
pub use self::future::Future;
pub use self::task_pool::TaskPool;
mod future;
mod task_pool;

View File

@ -13,7 +13,12 @@
/// A task pool abstraction. Useful for achieving predictable CPU
/// parallelism.
use std::task;
use core::prelude::*;
use task;
use task::spawn;
use vec::Vec;
use comm::{channel, Sender};
enum Msg<T> {
Execute(proc(&T):Send),

View File

@ -40,9 +40,7 @@
//! A simple spinlock:
//!
//! ```
//! extern crate sync;
//!
//! use sync::Arc;
//! use std::sync::Arc;
//! use std::sync::atomics::{AtomicUint, SeqCst};
//! use std::task::deschedule;
//!
@ -68,9 +66,7 @@
//! Transferring a heap object with `AtomicOption`:
//!
//! ```
//! extern crate sync;
//!
//! use sync::Arc;
//! use std::sync::Arc;
//! use std::sync::atomics::{AtomicOption, SeqCst};
//!
//! fn main() {
@ -105,10 +101,10 @@
//! }
//! ```
use mem;
use ops::Drop;
use option::{Option,Some,None};
use owned::Box;
use core::prelude::*;
use alloc::owned::Box;
use core::mem;
pub use core::atomics::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
pub use core::atomics::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
@ -188,7 +184,7 @@ impl<T> Drop for AtomicOption<T> {
#[cfg(test)]
mod test {
use option::*;
use std::prelude::*;
use super::*;
#[test]

View File

@ -16,7 +16,10 @@ Higher level communication abstractions.
#![allow(missing_doc)]
use std::comm;
use core::prelude::*;
use comm;
use comm::{Sender, Receiver, channel};
/// An extension of `pipes::stream` that allows both sending and receiving.
pub struct DuplexStream<S, R> {
@ -53,11 +56,11 @@ impl<S:Send,R:Send> DuplexStream<S, R> {
#[cfg(test)]
mod test {
use std::prelude::*;
use comm::{duplex};
#[test]
pub fn DuplexStream1() {
pub fn duplex_stream_1() {
let (left, right) = duplex();
left.send("abc".to_string());

View File

@ -271,36 +271,32 @@
// And now that you've seen all the races that I found and attempted to fix,
// here's the code for you to find some more!
use alloc::arc::Arc;
use core::prelude::*;
use cell::Cell;
use clone::Clone;
use iter::Iterator;
use kinds::Send;
use kinds::marker;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
use ty::Unsafe;
use alloc::arc::Arc;
use alloc::owned::Box;
use core::cell::Cell;
use core::kinds::marker;
use core::mem;
use core::ty::Unsafe;
use rustrt::local::Local;
use rustrt::task::{Task, BlockedTask};
pub use comm::select::{Select, Handle};
pub use comm::duplex::{DuplexStream, duplex};
macro_rules! test (
{ fn $name:ident() $b:block $(#[$a:meta])*} => (
mod $name {
#![allow(unused_imports)]
use std::prelude::*;
use native;
use comm::*;
use prelude::*;
use super::*;
use super::super::*;
use owned::Box;
use task;
use std::task;
fn f() $b
@ -315,10 +311,11 @@ macro_rules! test (
)
)
mod select;
mod duplex;
mod oneshot;
mod stream;
mod select;
mod shared;
mod stream;
mod sync;
// Use a power of 2 to allow LLVM to optimize to something that's not a
@ -984,10 +981,10 @@ impl<T: Send> Drop for Receiver<T> {
#[cfg(test)]
mod test {
use prelude::*;
use std::prelude::*;
use native;
use os;
use std::os;
use super::*;
pub fn stress_factor() -> uint {
@ -1480,7 +1477,7 @@ mod test {
})
test!(fn sends_off_the_runtime() {
use rt::thread::Thread;
use std::rt::thread::Thread;
let (tx, rx) = channel();
let t = Thread::start(proc() {
@ -1495,7 +1492,7 @@ mod test {
})
test!(fn try_recvs_off_the_runtime() {
use rt::thread::Thread;
use std::rt::thread::Thread;
let (tx, rx) = channel();
let (cdone, pdone) = channel();
@ -1520,8 +1517,8 @@ mod test {
#[cfg(test)]
mod sync_tests {
use prelude::*;
use os;
use std::prelude::*;
use std::os;
pub fn stress_factor() -> uint {
match os::getenv("RUST_TEST_STRESS") {

View File

@ -32,16 +32,15 @@
/// The one caveat to consider is that when a port sees a disconnected channel
/// it must check for data because there is no "data plus upgrade" state.
use core::prelude::*;
use alloc::owned::Box;
use core::mem;
use rustrt::local::Local;
use rustrt::task::{Task, BlockedTask};
use atomics;
use comm::Receiver;
use kinds::Send;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use result::{Result, Ok, Err};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
use sync::atomics;
// Various states you can find a port in.
static EMPTY: uint = 0;

View File

@ -45,20 +45,17 @@
#![allow(dead_code)]
use cell::Cell;
use iter::Iterator;
use kinds::Send;
use kinds::marker;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use ptr::RawPtr;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
use super::Receiver;
use uint;
use core::prelude::*;
use alloc::owned::Box;
use core::cell::Cell;
use core::kinds::marker;
use core::mem;
use core::uint;
use rustrt::local::Local;
use rustrt::task::{Task, BlockedTask};
use comm::Receiver;
/// The "receiver set" of the select interface. This structure is used to manage
/// a set of receivers which are being selected over.
@ -321,8 +318,27 @@ impl Iterator<*mut Handle<'static, ()>> for Packets {
#[cfg(test)]
#[allow(unused_imports)]
mod test {
use std::prelude::*;
use super::super::*;
use prelude::*;
// Don't use the libstd version so we can pull in the right Select structure
// (std::comm points at the wrong one)
macro_rules! select {
(
$($name:pat = $rx:ident.$meth:ident() => $code:expr),+
) => ({
use comm::Select;
let sel = Select::new();
$( let mut $rx = sel.handle(&$rx); )+
unsafe {
$( $rx.add(); )+
}
let ret = sel.wait();
$( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+
{ unreachable!() }
})
}
test!(fn smoke() {
let (tx1, rx1) = channel::<int>();

View File

@ -18,21 +18,18 @@
/// module. You'll also note that the implementation of the shared and stream
/// channels are quite similar, and this is no coincidence!
use cmp;
use int;
use iter::Iterator;
use kinds::Send;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::mutex::NativeMutex;
use rt::task::{Task, BlockedTask};
use rt::thread::Thread;
use sync::atomics;
use core::prelude::*;
use mpsc = sync::mpsc_queue;
use alloc::owned::Box;
use core::cmp;
use core::int;
use rustrt::local::Local;
use rustrt::mutex::NativeMutex;
use rustrt::task::{Task, BlockedTask};
use rustrt::thread::Thread;
use atomics;
use mpsc = mpsc_queue;
static DISCONNECTED: int = int::MIN;
static FUDGE: int = 1024;

View File

@ -17,20 +17,18 @@
/// High level implementation details can be found in the comment of the parent
/// module.
use cmp;
use core::prelude::*;
use alloc::owned::Box;
use core::cmp;
use core::int;
use rustrt::local::Local;
use rustrt::task::{Task, BlockedTask};
use rustrt::thread::Thread;
use atomics;
use comm::Receiver;
use int;
use iter::Iterator;
use kinds::Send;
use ops::Drop;
use option::{Some, None};
use owned::Box;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
use rt::thread::Thread;
use spsc = sync::spsc_queue;
use sync::atomics;
use spsc = spsc_queue;
static DISCONNECTED: int = int::MIN;
#[cfg(test)]

View File

@ -33,21 +33,18 @@
/// of a synchronous channel. There are a few branches for the unbuffered case,
/// but they're mostly just relevant to blocking senders.
use core::prelude::*;
use alloc::owned::Box;
use collections::Vec;
use collections::Collection;
use iter::Iterator;
use kinds::Send;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use ptr::RawPtr;
use result::{Result, Ok, Err};
use rt::local::Local;
use rt::mutex::{NativeMutex, LockGuard};
use rt::task::{Task, BlockedTask};
use sync::atomics;
use ty::Unsafe;
use vec::Vec;
use core::mem;
use core::ty::Unsafe;
use rustrt::local::Local;
use rustrt::mutex::{NativeMutex, LockGuard};
use rustrt::task::{Task, BlockedTask};
use atomics;
pub struct Packet<T> {
/// Only field outside of the mutex. Just done for kicks, but mainly because

View File

@ -42,29 +42,26 @@
//! let mut stealer2 = stealer.clone();
//! stealer2.steal();
#![experimental]
// NB: the "buffer pool" strategy is not done for speed, but rather for
// correctness. For more info, see the comment on `swap_buffer`
// FIXME: all atomic operations in this module use a SeqCst ordering. That is
// probably overkill
use alloc::arc::Arc;
use core::prelude::*;
use clone::Clone;
use iter::{range, Iterator};
use kinds::Send;
use kinds::marker;
use mem::{forget, min_align_of, size_of, transmute, overwrite};
use ops::Drop;
use option::{Option, Some, None};
use owned::Box;
use ptr::RawPtr;
use ptr;
use rt::heap::{allocate, deallocate};
use slice::ImmutableVector;
use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
use rt::exclusive::Exclusive;
use vec::Vec;
use alloc::arc::Arc;
use alloc::heap::{allocate, deallocate};
use alloc::owned::Box;
use collections::Vec;
use core::kinds::marker;
use core::mem::{forget, min_align_of, size_of, transmute};
use core::ptr;
use rustrt::exclusive::Exclusive;
use atomics::{AtomicInt, AtomicPtr, SeqCst};
// Once the queue is less than 1/K full, then it will be downsized. Note that
// the deque requires that this number be less than 2.
@ -148,7 +145,7 @@ impl<T: Send> BufferPool<T> {
/// Allocates a new buffer pool which in turn can be used to allocate new
/// deques.
pub fn new() -> BufferPool<T> {
BufferPool { pool: Arc::new(Exclusive::new(vec!())) }
BufferPool { pool: Arc::new(Exclusive::new(Vec::new())) }
}
/// Allocates a new work-stealing deque which will send/receiving memory to
@ -380,7 +377,7 @@ impl<T: Send> Buffer<T> {
// Unsafe because this unsafely overwrites possibly uninitialized or
// initialized data.
unsafe fn put(&self, i: int, t: T) {
overwrite(self.elem(i) as *mut T, t);
ptr::write(self.elem(i) as *mut T, t);
}
// Again, unsafe because this has incredibly dubious ownership violations.
@ -405,17 +402,16 @@ impl<T: Send> Drop for Buffer<T> {
#[cfg(test)]
mod tests {
use prelude::*;
use std::prelude::*;
use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
use mem;
use owned::Box;
use rt::thread::Thread;
use rand;
use rand::Rng;
use sync::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use vec;
use std::mem;
use std::rt::thread::Thread;
use std::rand;
use std::rand::Rng;
use atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use std::vec;
#[test]
fn smoke() {
@ -631,7 +627,6 @@ mod tests {
let mut rng = rand::task_rng();
let mut myhit = false;
let mut iter = 0;
'outer: loop {
for _ in range(0, rng.gen_range(0, AMT)) {
if !myhit && rng.gen_range(0, 3) == 2 {
@ -644,12 +639,9 @@ mod tests {
w.push((1, 2));
}
}
iter += 1;
debug!("loop iteration {}", iter);
for (i, slot) in hits.iter().enumerate() {
for slot in hits.iter() {
let amt = slot.load(SeqCst);
debug!("thread {}: {}", i, amt);
if amt == 0 { continue 'outer; }
}
if myhit {

View File

@ -8,9 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Concurrency-enabled mechanisms and primitives.
*/
//! Core concurrency-enabled mechanisms and primitives.
//!
//! This crate contains the implementations of Rust's core synchronization
//! primitives. This includes channels, mutexes, condition variables, etc.
//!
//! The interface of this crate is experimental, and it is not recommended to
//! use this crate specifically. Instead, its functionality is reexported
//! through `std::sync`.
#![crate_id = "sync#0.11.0-pre"]
#![crate_type = "rlib"]
@ -20,22 +25,24 @@
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(phase)]
#![feature(phase, globs, macro_rules)]
#![deny(deprecated_owned_vector)]
#![deny(missing_doc)]
#![no_std]
#[cfg(test, stage0)]
#[phase(syntax, link)] extern crate log;
#[cfg(test, not(stage0))]
#[phase(plugin, link)] extern crate log;
#[cfg(stage0)]
#[phase(syntax, link)] extern crate core;
#[cfg(not(stage0))]
#[phase(plugin, link)] extern crate core;
extern crate alloc;
extern crate collections;
extern crate rustrt;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
#[cfg(test, stage0)] #[phase(syntax, link)] extern crate std;
#[cfg(test, not(stage0))] #[phase(plugin, link)] extern crate std;
pub use comm::{DuplexStream, duplex};
pub use task_pool::TaskPool;
pub use future::Future;
pub use alloc::arc::{Arc, Weak};
pub use lock::{Mutex, MutexGuard, Condvar, Barrier,
RWLock, RWLockReadGuard, RWLockWriteGuard};
@ -43,12 +50,33 @@ pub use lock::{Mutex, MutexGuard, Condvar, Barrier,
// The mutex/rwlock in this module are not meant for reexport
pub use raw::{Semaphore, SemaphoreGuard};
mod comm;
mod future;
mod lock;
// Core building blocks for all primitives in this crate
pub mod atomics;
// Concurrent data structures
mod mpsc_intrusive;
mod task_pool;
pub mod spsc_queue;
pub mod mpsc_queue;
pub mod mpmc_bounded_queue;
pub mod deque;
// Low-level concurrency primitives
pub mod raw;
pub mod mutex;
pub mod one;
// Message-passing based communication
pub mod comm;
// Higher level primitives based on those above
mod lock;
#[cfg(not(test))]
mod std {
pub use core::{fmt, option, cmp, clone};
}

View File

@ -19,8 +19,11 @@
//! after grabbing the lock, the second task will immediately fail because the
//! lock is now poisoned.
use std::task;
use std::ty::Unsafe;
use core::prelude::*;
use core::ty::Unsafe;
use rustrt::local::Local;
use rustrt::task::Task;
use raw;
@ -33,6 +36,10 @@ struct PoisonOnFail<'a> {
failed: bool,
}
fn failing() -> bool {
Local::borrow(None::<Task>).unwinder.unwinding()
}
impl<'a> PoisonOnFail<'a> {
fn check(flag: bool, name: &str) {
if flag {
@ -44,7 +51,7 @@ impl<'a> PoisonOnFail<'a> {
PoisonOnFail::check(*flag, name);
PoisonOnFail {
flag: flag,
failed: task::failing()
failed: failing()
}
}
}
@ -52,7 +59,7 @@ impl<'a> PoisonOnFail<'a> {
#[unsafe_destructor]
impl<'a> Drop for PoisonOnFail<'a> {
fn drop(&mut self) {
if !self.failed && task::failing() {
if !self.failed && failing() {
*self.flag = true;
}
}
@ -449,6 +456,7 @@ impl Barrier {
#[cfg(test)]
mod tests {
use std::prelude::*;
use std::comm::Empty;
use std::task;
use std::task::TaskBuilder;

View File

@ -25,19 +25,19 @@
* policies, either expressed or implied, of Dmitry Vyukov.
*/
#![experimental]
#![allow(missing_doc, dead_code)]
// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
use alloc::arc::Arc;
use core::prelude::*;
use clone::Clone;
use kinds::Send;
use num::next_power_of_two;
use option::{Option, Some, None};
use sync::atomics::{AtomicUint,Relaxed,Release,Acquire};
use vec::Vec;
use ty::Unsafe;
use alloc::arc::Arc;
use collections::Vec;
use core::num::next_power_of_two;
use core::ty::Unsafe;
use atomics::{AtomicUint,Relaxed,Release,Acquire};
struct Node<T> {
sequence: AtomicUint,
@ -165,7 +165,7 @@ impl<T: Send> Clone for Queue<T> {
#[cfg(test)]
mod tests {
use prelude::*;
use std::prelude::*;
use super::Queue;
use native;

View File

@ -30,12 +30,16 @@
//! This module implements an intrusive MPSC queue. This queue is incredibly
//! unsafe (due to use of unsafe pointers for nodes), and hence is not public.
#![experimental]
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/intrusive-mpsc-node-based-queue
use std::mem;
use std::sync::atomics;
use std::ty::Unsafe;
use core::prelude::*;
use core::atomics;
use core::mem;
use core::ty::Unsafe;
// NB: all links are done as AtomicUint instead of AtomicPtr to allow for static
// initialization.

View File

@ -35,17 +35,18 @@
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
#![experimental]
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
use kinds::Send;
use mem;
use ops::Drop;
use option::{Option, None, Some};
use owned::Box;
use ptr::RawPtr;
use sync::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
use ty::Unsafe;
use core::prelude::*;
use alloc::owned::Box;
use core::mem;
use core::ty::Unsafe;
use atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
/// A result of the `pop` function.
pub enum PopResult<T> {
@ -156,7 +157,7 @@ impl<T: Send> Drop for Queue<T> {
#[cfg(test)]
mod tests {
use prelude::*;
use std::prelude::*;
use alloc::arc::Arc;

View File

@ -57,14 +57,17 @@
// times in order to manage a few flags about who's blocking where and whether
// it's locked or not.
use std::kinds::marker;
use std::mem;
use std::rt::local::Local;
use std::rt::task::{BlockedTask, Task};
use std::rt::thread::Thread;
use std::sync::atomics;
use std::ty::Unsafe;
use std::rt::mutex;
use core::prelude::*;
use alloc::owned::Box;
use core::atomics;
use core::kinds::marker;
use core::mem;
use core::ty::Unsafe;
use rustrt::local::Local;
use rustrt::mutex;
use rustrt::task::{BlockedTask, Task};
use rustrt::thread::Thread;
use q = mpsc_intrusive;
@ -402,7 +405,7 @@ impl StaticMutex {
GreenAcquisition => { self.green_unlock(); }
NativeAcquisition => { self.native_unlock(); }
TryLockAcquisition => {}
Unlocked => unreachable!()
Unlocked => unreachable!(),
}
unlocked = true;
}
@ -417,7 +420,7 @@ impl StaticMutex {
GreenAcquisition => { self.green_unlock(); }
NativeAcquisition => { self.native_unlock(); }
TryLockAcquisition => {}
Unlocked => unreachable!()
Unlocked => unreachable!(),
}
}
@ -517,8 +520,9 @@ impl Drop for Mutex {
#[cfg(test)]
mod test {
extern crate native;
use std::prelude::*;
use super::{Mutex, StaticMutex, MUTEX_INIT};
use native;
#[test]
fn smoke() {

View File

@ -13,8 +13,10 @@
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
use std::int;
use std::sync::atomics;
use core::prelude::*;
use core::int;
use core::atomics;
use mutex::{StaticMutex, MUTEX_INIT};
@ -124,8 +126,9 @@ impl Once {
#[cfg(test)]
mod test {
use super::{ONCE_INIT, Once};
use std::prelude::*;
use std::task;
use super::{ONCE_INIT, Once};
#[test]
fn smoke_once() {

View File

@ -15,13 +15,17 @@
//! `sync` crate which wrap values directly and provide safer abstractions for
//! containing data.
use std::kinds::marker;
use std::mem;
use std::sync::atomics;
use std::ty::Unsafe;
use std::finally::Finally;
use core::prelude::*;
use core::atomics;
use core::finally::Finally;
use core::kinds::marker;
use core::mem;
use core::ty::Unsafe;
use collections::Vec;
use mutex;
use comm::{Receiver, Sender, channel};
/****************************************************************************
* Internals
@ -608,6 +612,8 @@ impl<'a> Drop for RWLockReadGuard<'a> {
#[cfg(test)]
mod tests {
use std::prelude::*;
use Arc;
use super::{Semaphore, Mutex, RWLock, Condvar};

View File

@ -33,14 +33,15 @@
//! concurrently between two tasks. This data structure is safe to use and
//! enforces the semantics that there is one pusher and one popper.
use kinds::Send;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use ptr::RawPtr;
use sync::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
use ty::Unsafe;
#![experimental]
use core::prelude::*;
use alloc::owned::Box;
use core::mem;
use core::ty::Unsafe;
use atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
// Node within the linked list queue of messages to send
struct Node<T> {
@ -226,7 +227,7 @@ impl<T: Send> Drop for Queue<T> {
#[cfg(test)]
mod test {
use prelude::*;
use std::prelude::*;
use alloc::arc::Arc;
use native;

View File

@ -29,8 +29,6 @@
extern crate serialize;
extern crate libc;
#[cfg(target_os = "macos")]
extern crate sync;
use std::io::BufReader;
use std::num;
@ -168,7 +166,7 @@ pub fn precise_time_ns() -> u64 {
fn os_precise_time_ns() -> u64 {
static mut TIMEBASE: libc::mach_timebase_info = libc::mach_timebase_info { numer: 0,
denom: 0 };
static mut ONCE: sync::one::Once = sync::one::ONCE_INIT;
static mut ONCE: std::sync::Once = std::sync::ONCE_INIT;
unsafe {
ONCE.doit(|| {
imp::mach_timebase_info(&mut TIMEBASE);

View File

@ -15,10 +15,9 @@
// This also serves as a pipes test, because Arcs are implemented with pipes.
extern crate sync;
extern crate time;
use sync::{Arc, Future, Mutex};
use std::sync::{Arc, Future, Mutex};
use std::os;
use std::uint;

View File

@ -15,11 +15,9 @@
// This also serves as a pipes test, because Arcs are implemented with pipes.
extern crate sync;
extern crate time;
use sync::{RWLock, Arc};
use sync::Future;
use std::sync::{RWLock, Arc, Future};
use std::os;
use std::uint;

View File

@ -8,11 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
extern crate arena;
use std::iter::range_step;
use sync::Future;
use std::sync::Future;
use arena::TypedArena;
enum Tree<'a> {

View File

@ -10,12 +10,9 @@
// ignore-android see #10393 #13206
extern crate sync;
use std::string::String;
use std::slice;
use sync::Arc;
use sync::Future;
use std::sync::{Arc, Future};
static TABLE: [u8, ..4] = [ 'A' as u8, 'C' as u8, 'G' as u8, 'T' as u8 ];
static TABLE_SIZE: uint = 2 << 16;

View File

@ -13,13 +13,10 @@
// ignore-pretty very bad with line comments
extern crate sync;
use std::io;
use std::os;
use std::simd::f64x2;
use sync::Future;
use sync::Arc;
use std::sync::{Arc, Future};
static ITER: int = 50;
static LIMIT: f64 = 2.0;

View File

@ -40,9 +40,8 @@
#![feature(phase)]
#[phase(plugin)] extern crate green;
extern crate sync;
use sync::Arc;
use std::sync::Arc;
green_start!(main)

View File

@ -47,11 +47,10 @@
extern crate regex;
#[phase(plugin)]extern crate regex_macros;
extern crate sync;
use std::io;
use regex::{NoExpand, Regex};
use sync::Arc;
use std::sync::{Arc, Future};
fn count_matches(seq: &str, variant: &Regex) -> int {
let mut n = 0;
@ -75,7 +74,7 @@ fn main() {
let seq_arc = Arc::new(seq.clone()); // copy before it moves
let clen = seq.len();
let mut seqlen = sync::Future::spawn(proc() {
let mut seqlen = Future::spawn(proc() {
let substs = ~[
(regex!("B"), "(c|g|t)"),
(regex!("D"), "(a|g|t)"),
@ -111,7 +110,7 @@ fn main() {
for variant in variants.move_iter() {
let seq_arc_copy = seq_arc.clone();
variant_strs.push(variant.to_str().to_owned());
counts.push(sync::Future::spawn(proc() {
counts.push(Future::spawn(proc() {
count_matches(seq_arc_copy.as_slice(), &variant)
}));
}

View File

@ -11,13 +11,12 @@
#![feature(phase)]
#![allow(non_snake_case_functions)]
#[phase(plugin)] extern crate green;
extern crate sync;
use std::from_str::FromStr;
use std::iter::count;
use std::cmp::min;
use std::os;
use sync::{Arc, RWLock};
use std::sync::{Arc, RWLock};
green_start!(main)

View File

@ -10,9 +10,7 @@
// issue 7327
extern crate sync;
use sync::Arc;
use std::sync::Arc;
struct A { y: Arc<int>, x: Arc<int> }

View File

@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
use sync::Future;
use std::sync::Future;
fn main() {
let f = Future::from_value(());

View File

@ -10,9 +10,7 @@
// error-pattern: use of moved value
extern crate sync;
use sync::Arc;
use std::sync::Arc;
use std::task;
fn main() {

View File

@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
use sync::Arc;
use std::sync::Arc;
use std::task;
fn main() {

View File

@ -12,8 +12,7 @@
// This program would segfault if it were legal.
#![feature(once_fns)]
extern crate sync;
use sync::Arc;
use std::sync::Arc;
fn foo(blk: proc()) {
blk();

View File

@ -12,8 +12,7 @@
// This program would segfault if it were legal.
#![feature(once_fns)]
extern crate sync;
use sync::Arc;
use std::sync::Arc;
fn foo(blk: once ||) {
blk();

View File

@ -11,8 +11,7 @@
// Testing guarantees provided by once functions.
// This program would segfault if it were legal.
extern crate sync;
use sync::Arc;
use std::sync::Arc;
fn foo(blk: ||) {
blk();

View File

@ -10,8 +10,7 @@
// error-pattern:explicit failure
extern crate sync;
use sync::Arc;
use std::sync::Arc;
enum e<T> { e(Arc<T>) }

View File

@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
use sync::Arc;
use std::sync::Arc;
fn dispose(_x: Arc<bool>) { }
pub fn main() {

View File

@ -12,8 +12,7 @@
#![feature(once_fns)]
extern crate sync;
use sync::Arc;
use std::sync::Arc;
fn foo(blk: proc()) {
blk();

View File

@ -12,8 +12,7 @@
#![feature(once_fns)]
extern crate sync;
use sync::Arc;
use std::sync::Arc;
fn foo(blk: once ||) {
blk();

View File

@ -12,9 +12,7 @@
// and shared between tasks as long as all types fulfill Send.
extern crate sync;
use sync::Arc;
use std::sync::Arc;
use std::task;
trait Pet {