Use `const fn` to abstract away the contents of UnsafeCell & friends.

This commit is contained in:
Eduard Burtescu 2015-05-27 11:18:36 +03:00
parent 6e8e4f847c
commit 377b0900ae
76 changed files with 417 additions and 525 deletions

View File

@ -399,7 +399,7 @@ fn test_map_in_place_zero_sized() {
#[test]
fn test_map_in_place_zero_drop_count() {
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Clone, PartialEq, Debug)]
struct Nothing;
@ -413,7 +413,7 @@ fn test_map_in_place_zero_drop_count() {
}
}
const NUM_ELEMENTS: usize = 2;
static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
let v = repeat(Nothing).take(NUM_ELEMENTS).collect::<Vec<_>>();

View File

@ -76,7 +76,6 @@ use marker::Sync;
use intrinsics;
use cell::UnsafeCell;
use marker::PhantomData;
use default::Default;
@ -87,8 +86,8 @@ pub struct AtomicBool {
}
impl Default for AtomicBool {
fn default() -> AtomicBool {
ATOMIC_BOOL_INIT
fn default() -> Self {
Self::new(Default::default())
}
}
@ -101,8 +100,8 @@ pub struct AtomicIsize {
}
impl Default for AtomicIsize {
fn default() -> AtomicIsize {
ATOMIC_ISIZE_INIT
fn default() -> Self {
Self::new(Default::default())
}
}
@ -115,8 +114,8 @@ pub struct AtomicUsize {
}
impl Default for AtomicUsize {
fn default() -> AtomicUsize {
ATOMIC_USIZE_INIT
fn default() -> Self {
Self::new(Default::default())
}
}
@ -125,8 +124,7 @@ unsafe impl Sync for AtomicUsize {}
/// A raw pointer type which can be safely shared between threads.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicPtr<T> {
p: UnsafeCell<usize>,
_marker: PhantomData<*mut T>,
p: UnsafeCell<*mut T>,
}
impl<T> Default for AtomicPtr<T> {
@ -175,16 +173,13 @@ pub enum Ordering {
/// An `AtomicBool` initialized to `false`.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ATOMIC_BOOL_INIT: AtomicBool =
AtomicBool { v: UnsafeCell { value: 0 } };
pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
/// An `AtomicIsize` initialized to `0`.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ATOMIC_ISIZE_INIT: AtomicIsize =
AtomicIsize { v: UnsafeCell { value: 0 } };
pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
/// An `AtomicUsize` initialized to `0`.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ATOMIC_USIZE_INIT: AtomicUsize =
AtomicUsize { v: UnsafeCell { value: 0, } };
pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
const UINT_TRUE: usize = !0;
@ -202,9 +197,8 @@ impl AtomicBool {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(v: bool) -> AtomicBool {
let val = if v { UINT_TRUE } else { 0 };
AtomicBool { v: UnsafeCell::new(val) }
pub const fn new(v: bool) -> AtomicBool {
AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
}
/// Loads a value from the bool.
@ -445,7 +439,7 @@ impl AtomicIsize {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(v: isize) -> AtomicIsize {
pub const fn new(v: isize) -> AtomicIsize {
AtomicIsize {v: UnsafeCell::new(v)}
}
@ -633,7 +627,7 @@ impl AtomicUsize {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(v: usize) -> AtomicUsize {
pub const fn new(v: usize) -> AtomicUsize {
AtomicUsize { v: UnsafeCell::new(v) }
}
@ -821,9 +815,8 @@ impl<T> AtomicPtr<T> {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p: UnsafeCell::new(p as usize),
_marker: PhantomData }
pub const fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p: UnsafeCell::new(p) }
}
/// Loads a value from the pointer.
@ -848,7 +841,7 @@ impl<T> AtomicPtr<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn load(&self, order: Ordering) -> *mut T {
unsafe {
atomic_load(self.p.get(), order) as *mut T
atomic_load(self.p.get() as *mut usize, order) as *mut T
}
}
@ -875,7 +868,7 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn store(&self, ptr: *mut T, order: Ordering) {
unsafe { atomic_store(self.p.get(), ptr as usize, order); }
unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
}
/// Stores a value into the pointer, returning the old value.
@ -897,7 +890,7 @@ impl<T> AtomicPtr<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
unsafe { atomic_swap(self.p.get(), ptr as usize, order) as *mut T }
unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
}
/// Stores a value into the pointer if the current value is the same as the expected value.
@ -925,7 +918,7 @@ impl<T> AtomicPtr<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
unsafe {
atomic_compare_and_swap(self.p.get(), old as usize,
atomic_compare_and_swap(self.p.get() as *mut usize, old as usize,
new as usize, order) as *mut T
}
}

View File

@ -170,7 +170,7 @@ impl<T:Copy> Cell<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn new(value: T) -> Cell<T> {
pub const fn new(value: T) -> Cell<T> {
Cell {
value: UnsafeCell::new(value),
}
@ -302,7 +302,7 @@ impl<T> RefCell<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn new(value: T) -> RefCell<T> {
pub const fn new(value: T) -> RefCell<T> {
RefCell {
value: UnsafeCell::new(value),
borrow: Cell::new(UNUSED),
@ -663,7 +663,7 @@ impl<T> UnsafeCell<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn new(value: T) -> UnsafeCell<T> {
pub const fn new(value: T) -> UnsafeCell<T> {
UnsafeCell { value: value }
}

View File

@ -74,6 +74,7 @@
#![feature(concat_idents)]
#![feature(reflect)]
#![feature(custom_attribute)]
#![feature(const_fn)]
#[macro_use]
mod macros;

View File

@ -70,13 +70,15 @@ fn int_xor() {
assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
}
static S_BOOL : AtomicBool = ATOMIC_BOOL_INIT;
static S_INT : AtomicIsize = ATOMIC_ISIZE_INIT;
static S_UINT : AtomicUsize = ATOMIC_USIZE_INIT;
static S_FALSE: AtomicBool = AtomicBool::new(false);
static S_TRUE: AtomicBool = AtomicBool::new(true);
static S_INT: AtomicIsize = AtomicIsize::new(0);
static S_UINT: AtomicUsize = AtomicUsize::new(0);
#[test]
fn static_init() {
assert!(!S_BOOL.load(SeqCst));
assert!(!S_FALSE.load(SeqCst));
assert!(S_TRUE.load(SeqCst));
assert!(S_INT.load(SeqCst) == 0);
assert!(S_UINT.load(SeqCst) == 0);
}

View File

@ -184,7 +184,7 @@ use std::mem;
use std::env;
use std::rt;
use std::slice;
use std::sync::{Once, ONCE_INIT, StaticMutex, MUTEX_INIT};
use std::sync::{Once, StaticMutex};
use directive::LOG_LEVEL_NAMES;
@ -200,7 +200,7 @@ pub const MAX_LOG_LEVEL: u32 = 255;
/// The default logging level of a crate if no other is specified.
const DEFAULT_LOG_LEVEL: u32 = 1;
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
/// An unsafe constant that is the maximum logging level of any module
/// specified. This is the first line of defense to determining whether a
@ -367,7 +367,7 @@ pub struct LogLocation {
/// module's log statement should be emitted or not.
#[doc(hidden)]
pub fn mod_enabled(level: u32, module: &str) -> bool {
static INIT: Once = ONCE_INIT;
static INIT: Once = Once::new();
INIT.call_once(init);
// It's possible for many threads are in this function, only one of them

View File

@ -32,7 +32,7 @@ use std::env;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT};
use std::sync::atomic::{AtomicBool, Ordering};
use syntax::ast;
fn print_help_message() {
@ -76,7 +76,7 @@ pub fn maybe_print_constraints_for<'a, 'tcx>(region_vars: &RegionVarBindings<'a,
let output_path = {
let output_template = match requested_output {
Ok(ref s) if &**s == "help" => {
static PRINTED_YET: AtomicBool = ATOMIC_BOOL_INIT;
static PRINTED_YET: AtomicBool = AtomicBool::new(false);
if !PRINTED_YET.load(Ordering::SeqCst) {
print_help_message();
PRINTED_YET.store(true, Ordering::SeqCst);

View File

@ -1005,8 +1005,8 @@ pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
}
unsafe fn configure_llvm(sess: &Session) {
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
use std::sync::Once;
static INIT: Once = Once::new();
// Copy what clang does by turning on loop vectorization at O2 and
// slp vectorization at O3

View File

@ -39,6 +39,7 @@
#![feature(path_ext)]
#![feature(fs)]
#![feature(path_relative_from)]
#![feature(std_misc)]
#![allow(trivial_casts)]

View File

@ -2653,8 +2653,8 @@ pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
// Before we touch LLVM, make sure that multithreading is enabled.
unsafe {
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
use std::sync::Once;
static INIT: Once = Once::new();
static mut POISONED: bool = false;
INIT.call_once(|| {
if llvm::LLVMStartMultithreaded() != 1 {

View File

@ -211,8 +211,8 @@ mod dl {
pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
F: FnOnce() -> T,
{
use sync::{StaticMutex, MUTEX_INIT};
static LOCK: StaticMutex = MUTEX_INIT;
use sync::StaticMutex;
static LOCK: StaticMutex = StaticMutex::new();
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire
// sequence

View File

@ -23,8 +23,8 @@ use ffi::{OsStr, OsString};
use fmt;
use io;
use path::{Path, PathBuf};
use sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering};
use sync::{StaticMutex, MUTEX_INIT};
use sync::atomic::{AtomicIsize, Ordering};
use sync::StaticMutex;
use sys::os as os_imp;
/// Returns the current working directory as a `PathBuf`.
@ -70,7 +70,7 @@ pub fn set_current_dir<P: AsRef<Path>>(p: P) -> io::Result<()> {
os_imp::chdir(p.as_ref())
}
static ENV_LOCK: StaticMutex = MUTEX_INIT;
static ENV_LOCK: StaticMutex = StaticMutex::new();
/// An iterator over a snapshot of the environment variables of this process.
///
@ -475,7 +475,7 @@ pub fn current_exe() -> io::Result<PathBuf> {
os_imp::current_exe()
}
static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT;
static EXIT_STATUS: AtomicIsize = AtomicIsize::new(0);
/// Sets the process exit code
///

View File

@ -11,31 +11,31 @@
use prelude::v1::*;
use boxed;
use cell::UnsafeCell;
use cell::Cell;
use rt;
use sync::{StaticMutex, Arc};
pub struct Lazy<T> {
pub lock: StaticMutex,
pub ptr: UnsafeCell<*mut Arc<T>>,
pub init: fn() -> Arc<T>,
lock: StaticMutex,
ptr: Cell<*mut Arc<T>>,
init: fn() -> Arc<T>,
}
unsafe impl<T> Sync for Lazy<T> {}
macro_rules! lazy_init {
($init:expr) => (::io::lazy::Lazy {
lock: ::sync::MUTEX_INIT,
ptr: ::cell::UnsafeCell { value: 0 as *mut _ },
init: $init,
})
}
impl<T: Send + Sync + 'static> Lazy<T> {
pub const fn new(init: fn() -> Arc<T>) -> Lazy<T> {
Lazy {
lock: StaticMutex::new(),
ptr: Cell::new(0 as *mut _),
init: init
}
}
pub fn get(&'static self) -> Option<Arc<T>> {
let _g = self.lock.lock();
let ptr = self.ptr.get();
unsafe {
let ptr = *self.ptr.get();
if ptr.is_null() {
Some(self.init())
} else if ptr as usize == 1 {
@ -53,14 +53,14 @@ impl<T: Send + Sync + 'static> Lazy<T> {
// `Arc`.
let registered = rt::at_exit(move || {
let g = self.lock.lock();
let ptr = *self.ptr.get();
*self.ptr.get() = 1 as *mut _;
let ptr = self.ptr.get();
self.ptr.set(1 as *mut _);
drop(g);
drop(Box::from_raw(ptr))
});
let ret = (self.init)();
if registered.is_ok() {
*self.ptr.get() = boxed::into_raw(Box::new(ret.clone()));
self.ptr.set(boxed::into_raw(Box::new(ret.clone())));
}
return ret
}

View File

@ -36,13 +36,12 @@ pub use self::stdio::{StdoutLock, StderrLock, StdinLock};
#[doc(no_inline, hidden)]
pub use self::stdio::{set_panic, set_print};
#[macro_use] mod lazy;
pub mod prelude;
mod buffered;
mod cursor;
mod error;
mod impls;
mod lazy;
mod util;
mod stdio;

View File

@ -122,7 +122,7 @@ pub struct StdinLock<'a> {
/// locked version, `StdinLock`, implements both `Read` and `BufRead`, however.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdin() -> Stdin {
static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = lazy_init!(stdin_init);
static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = Lazy::new(stdin_init);
return Stdin {
inner: INSTANCE.get().expect("cannot access stdin during shutdown"),
};
@ -236,7 +236,7 @@ pub struct StdoutLock<'a> {
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdout() -> Stdout {
static INSTANCE: Lazy<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = lazy_init!(stdout_init);
static INSTANCE: Lazy<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = Lazy::new(stdout_init);
return Stdout {
inner: INSTANCE.get().expect("cannot access stdout during shutdown"),
};
@ -308,7 +308,7 @@ pub struct StderrLock<'a> {
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
static INSTANCE: Lazy<ReentrantMutex<RefCell<StderrRaw>>> = lazy_init!(stderr_init);
static INSTANCE: Lazy<ReentrantMutex<RefCell<StderrRaw>>> = Lazy::new(stderr_init);
return Stderr {
inner: INSTANCE.get().expect("cannot access stderr during shutdown"),
};

View File

@ -109,6 +109,7 @@
#![feature(box_syntax)]
#![feature(collections)]
#![feature(core)]
#![feature(const_fn)]
#![feature(into_cow)]
#![feature(lang_items)]
#![feature(libc)]

View File

@ -12,9 +12,9 @@ use prelude::v1::*;
use env;
use net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr, ToSocketAddrs};
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use sync::atomic::{AtomicUsize, Ordering};
static PORT: AtomicUsize = ATOMIC_USIZE_INIT;
static PORT: AtomicUsize = AtomicUsize::new(0);
pub fn next_test_ip4() -> SocketAddr {
let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port();

View File

@ -96,11 +96,11 @@ mod imp {
target_arch = "aarch64",
target_arch = "powerpc")))]
fn is_getrandom_available() -> bool {
use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use sync::{Once, ONCE_INIT};
use sync::atomic::{AtomicBool, Ordering};
use sync::Once;
static CHECKER: Once = ONCE_INIT;
static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
static CHECKER: Once = Once::new();
static AVAILABLE: AtomicBool = AtomicBool::new(false);
CHECKER.call_once(|| {
let mut buf: [u8; 0] = [];

View File

@ -52,10 +52,10 @@ mod imp {
use mem;
use ffi::CStr;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
static mut GLOBAL_ARGS_PTR: usize = 0;
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
pub unsafe fn init(argc: isize, argv: *const *const u8) {
let args = load_argc_and_argv(argc, argv);

View File

@ -20,7 +20,7 @@ use boxed;
use boxed::Box;
use vec::Vec;
use thunk::Thunk;
use sys_common::mutex::{Mutex, MUTEX_INIT};
use sys_common::mutex::Mutex;
type Queue = Vec<Thunk<'static>>;
@ -28,7 +28,7 @@ type Queue = Vec<Thunk<'static>>;
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
static LOCK: Mutex = MUTEX_INIT;
static LOCK: Mutex = Mutex::new();
static mut QUEUE: *mut Queue = 0 as *mut Queue;
// The maximum number of times the cleanup routines will be run. While running

View File

@ -22,7 +22,7 @@ pub use sys::backtrace::write;
// For now logging is turned off by default, and this function checks to see
// whether the magical environment variable is present to see if it's turned on.
pub fn log_enabled() -> bool {
static ENABLED: atomic::AtomicIsize = atomic::ATOMIC_ISIZE_INIT;
static ENABLED: atomic::AtomicIsize = atomic::AtomicIsize::new(0);
match ENABLED.load(Ordering::SeqCst) {
1 => return false,
2 => return true,

View File

@ -72,7 +72,7 @@ use intrinsics;
use libc::c_void;
use mem;
use sync::atomic::{self, Ordering};
use sys_common::mutex::{Mutex, MUTEX_INIT};
use sys_common::mutex::Mutex;
// The actual unwinding implementation is cfg'd here, and we've got two current
// implementations. One goes through SEH on Windows and the other goes through
@ -89,15 +89,15 @@ pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: u32);
// For more information, see below.
const MAX_CALLBACKS: usize = 16;
static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] =
[atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT];
static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
[atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0)];
static CALLBACK_CNT: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
@ -243,7 +243,7 @@ fn begin_unwind_inner(msg: Box<Any + Send>,
// `std::sync` one as accessing TLS can cause weird recursive problems (and
// we don't need poison checking).
unsafe {
static LOCK: Mutex = MUTEX_INIT;
static LOCK: Mutex = Mutex::new();
static mut INIT: bool = false;
LOCK.lock();
if !INIT {

View File

@ -42,7 +42,7 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
}
pub fn min_stack() -> usize {
static MIN: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
match MIN.load(Ordering::SeqCst) {
0 => {}
n => return n - 1,

View File

@ -10,7 +10,7 @@
use prelude::v1::*;
use sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use sync::atomic::{AtomicUsize, Ordering};
use sync::{mutex, MutexGuard, PoisonError};
use sys_common::condvar as sys;
use sys_common::mutex as sys_mutex;
@ -84,10 +84,7 @@ pub struct StaticCondvar {
/// Constant initializer for a statically allocated condition variable.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
inner: sys::CONDVAR_INIT,
mutex: ATOMIC_USIZE_INIT,
};
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar::new();
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and
@ -96,7 +93,7 @@ impl Condvar {
pub fn new() -> Condvar {
Condvar {
inner: box StaticCondvar {
inner: unsafe { sys::Condvar::new() },
inner: sys::Condvar::new(),
mutex: AtomicUsize::new(0),
}
}
@ -234,6 +231,16 @@ impl Drop for Condvar {
}
impl StaticCondvar {
/// Creates a new condition variable
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
pub const fn new() -> StaticCondvar {
StaticCondvar {
inner: sys::Condvar::new(),
mutex: AtomicUsize::new(0),
}
}
/// Blocks the current thread until this condition variable receives a
/// notification.
///
@ -388,10 +395,10 @@ impl StaticCondvar {
mod tests {
use prelude::v1::*;
use super::{StaticCondvar, CONDVAR_INIT};
use super::StaticCondvar;
use sync::mpsc::channel;
use sync::{StaticMutex, MUTEX_INIT, Condvar, Mutex, Arc};
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use sync::{StaticMutex, Condvar, Mutex, Arc};
use sync::atomic::{AtomicUsize, Ordering};
use thread;
use time::Duration;
use u32;
@ -405,7 +412,7 @@ mod tests {
#[test]
fn static_smoke() {
static C: StaticCondvar = CONDVAR_INIT;
static C: StaticCondvar = StaticCondvar::new();
C.notify_one();
C.notify_all();
unsafe { C.destroy(); }
@ -413,8 +420,8 @@ mod tests {
#[test]
fn notify_one() {
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let _t = thread::spawn(move|| {
@ -464,8 +471,8 @@ mod tests {
#[test]
fn wait_timeout_ms() {
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let (g, _no_timeout) = C.wait_timeout_ms(g, 1).unwrap();
@ -483,9 +490,9 @@ mod tests {
#[test]
fn wait_timeout_with() {
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
static S: AtomicUsize = ATOMIC_USIZE_INIT;
static C: StaticCondvar = StaticCondvar::new();
static M: StaticMutex = StaticMutex::new();
static S: AtomicUsize = AtomicUsize::new(0);
let g = M.lock().unwrap();
let (g, success) = C.wait_timeout_with(g, Duration::new(0, 1000), |_| {
@ -530,9 +537,9 @@ mod tests {
#[test]
#[should_panic]
fn two_mutexes() {
static M1: StaticMutex = MUTEX_INIT;
static M2: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = CONDVAR_INIT;
static M1: StaticMutex = StaticMutex::new();
static M2: StaticMutex = StaticMutex::new();
static C: StaticCondvar = StaticCondvar::new();
let mut g = M1.lock().unwrap();
let _t = thread::spawn(move|| {

View File

@ -11,7 +11,7 @@
//! Generic support for building blocking abstractions.
use thread::{self, Thread};
use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use sync::atomic::{AtomicBool, Ordering};
use sync::Arc;
use marker::{Sync, Send};
use mem;
@ -41,7 +41,7 @@ impl !Sync for WaitToken {}
pub fn tokens() -> (WaitToken, SignalToken) {
let inner = Arc::new(Inner {
thread: thread::current(),
woken: ATOMIC_BOOL_INIT,
woken: AtomicBool::new(false),
});
let wait_token = WaitToken {
inner: inner.clone(),

View File

@ -178,17 +178,14 @@ impl<'a, T: ?Sized> !marker::Send for MutexGuard<'a, T> {}
/// other mutex constants.
#[unstable(feature = "std_misc",
reason = "may be merged with Mutex in the future")]
pub const MUTEX_INIT: StaticMutex = StaticMutex {
lock: sys::MUTEX_INIT,
poison: poison::FLAG_INIT,
};
pub const MUTEX_INIT: StaticMutex = StaticMutex::new();
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box MUTEX_INIT,
inner: box StaticMutex::new(),
data: UnsafeCell::new(t),
}
}
@ -271,9 +268,19 @@ impl<T: ?Sized + fmt::Debug + 'static> fmt::Debug for Mutex<T> {
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
impl StaticMutex {
/// Creates a new mutex in an unlocked state ready for use.
#[unstable(feature = "std_misc",
reason = "may be merged with Mutex in the future")]
pub const fn new() -> StaticMutex {
StaticMutex {
lock: sys::Mutex::new(),
poison: poison::Flag::new(),
}
}
/// Acquires this lock, see `Mutex::lock`
#[inline]
#[unstable(feature = "std_misc",
@ -365,7 +372,7 @@ mod tests {
use prelude::v1::*;
use sync::mpsc::channel;
use sync::{Arc, Mutex, StaticMutex, MUTEX_INIT, Condvar};
use sync::{Arc, Mutex, StaticMutex, Condvar};
use thread;
struct Packet<T: Send>(Arc<(Mutex<T>, Condvar)>);
@ -382,7 +389,7 @@ mod tests {
#[test]
fn smoke_static() {
static M: StaticMutex = MUTEX_INIT;
static M: StaticMutex = StaticMutex::new();
unsafe {
drop(M.lock().unwrap());
drop(M.lock().unwrap());
@ -392,7 +399,7 @@ mod tests {
#[test]
fn lots_and_lots() {
static M: StaticMutex = MUTEX_INIT;
static M: StaticMutex = StaticMutex::new();
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;

View File

@ -16,8 +16,8 @@
use prelude::v1::*;
use isize;
use sync::atomic::{AtomicIsize, Ordering, ATOMIC_ISIZE_INIT};
use sync::{StaticMutex, MUTEX_INIT};
use sync::atomic::{AtomicIsize, Ordering};
use sync::StaticMutex;
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
@ -44,13 +44,19 @@ pub struct Once {
/// Initialization value for static `Once` values.
#[stable(feature = "rust1", since = "1.0.0")]
pub const ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
cnt: ATOMIC_ISIZE_INIT,
lock_cnt: ATOMIC_ISIZE_INIT,
};
pub const ONCE_INIT: Once = Once::new();
impl Once {
/// Creates a new `Once` value.
#[unstable(feature = "std_misc")]
pub const fn new() -> Once {
Once {
mutex: StaticMutex::new(),
cnt: AtomicIsize::new(0),
lock_cnt: AtomicIsize::new(0),
}
}
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
@ -129,12 +135,12 @@ mod tests {
use prelude::v1::*;
use thread;
use super::{ONCE_INIT, Once};
use super::Once;
use sync::mpsc::channel;
#[test]
fn smoke_once() {
static O: Once = ONCE_INIT;
static O: Once = Once::new();
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
@ -144,7 +150,7 @@ mod tests {
#[test]
fn stampede_once() {
static O: Once = ONCE_INIT;
static O: Once = Once::new();
static mut run: bool = false;
let (tx, rx) = channel();

View File

@ -102,10 +102,7 @@ pub struct StaticRwLock {
/// Constant initialization for a statically-initialized rwlock.
#[unstable(feature = "std_misc",
reason = "may be merged with RwLock in the future")]
pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock {
lock: sys::RWLOCK_INIT,
poison: poison::FLAG_INIT,
};
pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new();
/// RAII structure used to release the shared read access of a lock when
/// dropped.
@ -142,7 +139,7 @@ impl<T> RwLock<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> RwLock<T> {
RwLock { inner: box RW_LOCK_INIT, data: UnsafeCell::new(t) }
RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
}
}
@ -280,9 +277,19 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
impl StaticRwLock {
/// Creates a new rwlock.
#[unstable(feature = "std_misc",
reason = "may be merged with RwLock in the future")]
pub const fn new() -> StaticRwLock {
StaticRwLock {
lock: sys::RWLock::new(),
poison: poison::Flag::new(),
}
}
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
@ -420,7 +427,7 @@ mod tests {
use rand::{self, Rng};
use sync::mpsc::channel;
use thread;
use sync::{Arc, RwLock, StaticRwLock, TryLockError, RW_LOCK_INIT};
use sync::{Arc, RwLock, StaticRwLock, TryLockError};
#[test]
fn smoke() {
@ -433,7 +440,7 @@ mod tests {
#[test]
fn static_smoke() {
static R: StaticRwLock = RW_LOCK_INIT;
static R: StaticRwLock = StaticRwLock::new();
drop(R.read().unwrap());
drop(R.write().unwrap());
drop((R.read().unwrap(), R.read().unwrap()));
@ -443,7 +450,7 @@ mod tests {
#[test]
fn frob() {
static R: StaticRwLock = RW_LOCK_INIT;
static R: StaticRwLock = StaticRwLock::new();
const N: usize = 10;
const M: usize = 1000;

View File

@ -20,16 +20,12 @@ use sys::condvar as imp;
/// this type.
pub struct Condvar(imp::Condvar);
/// Static initializer for condition variables.
pub const CONDVAR_INIT: Condvar = Condvar(imp::CONDVAR_INIT);
impl Condvar {
/// Creates a new condition variable for use.
///
/// Behavior is undefined if the condition variable is moved after it is
/// first used with any of the functions below.
#[inline]
pub unsafe fn new() -> Condvar { Condvar(imp::Condvar::new()) }
pub const fn new() -> Condvar { Condvar(imp::Condvar::new()) }
/// Signals one waiter on this condition variable to wake up.
#[inline]

View File

@ -20,10 +20,13 @@ pub struct Mutex(imp::Mutex);
unsafe impl Sync for Mutex {}
/// Constant initializer for statically allocated mutexes.
pub const MUTEX_INIT: Mutex = Mutex(imp::MUTEX_INIT);
impl Mutex {
/// Creates a new mutex for use.
///
/// Behavior is undefined if the mutex is moved after it is
/// first used with any of the functions below.
pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
/// Locks the mutex blocking the current thread until it is available.
///
/// Behavior is undefined if the mutex has been moved between this and any

View File

@ -10,26 +10,28 @@
use prelude::v1::*;
use marker::Reflect;
use cell::UnsafeCell;
use cell::Cell;
use error::{Error};
use fmt;
use marker::Reflect;
use thread;
pub struct Flag { failed: UnsafeCell<bool> }
pub struct Flag { failed: Cell<bool> }
// This flag is only ever accessed with a lock previously held. Note that this
// a totally private structure.
unsafe impl Send for Flag {}
unsafe impl Sync for Flag {}
pub const FLAG_INIT: Flag = Flag { failed: UnsafeCell { value: false } };
impl Flag {
pub const fn new() -> Flag {
Flag { failed: Cell::new(false) }
}
#[inline]
pub fn borrow(&self) -> LockResult<Guard> {
let ret = Guard { panicking: thread::panicking() };
if unsafe { *self.failed.get() } {
if self.get() {
Err(PoisonError::new(ret))
} else {
Ok(ret)
@ -39,13 +41,13 @@ impl Flag {
#[inline]
pub fn done(&self, guard: &Guard) {
if !guard.panicking && thread::panicking() {
unsafe { *self.failed.get() = true; }
self.failed.set(true);
}
}
#[inline]
pub fn get(&self) -> bool {
unsafe { *self.failed.get() }
self.failed.get()
}
}

View File

@ -54,7 +54,7 @@ impl<T> ReentrantMutex<T> {
unsafe {
let mut mutex = ReentrantMutex {
inner: box sys::ReentrantMutex::uninitialized(),
poison: poison::FLAG_INIT,
poison: poison::Flag::new(),
data: t,
};
mutex.inner.init();

View File

@ -17,10 +17,13 @@ use sys::rwlock as imp;
/// safer types at the top level of this crate instead of this type.
pub struct RWLock(imp::RWLock);
/// Constant initializer for static RWLocks.
pub const RWLOCK_INIT: RWLock = RWLock(imp::RWLOCK_INIT);
impl RWLock {
/// Creates a new reader-writer lock for use.
///
/// Behavior is undefined if the reader-writer lock is moved after it is
/// first used with any of the functions below.
pub const fn new() -> RWLock { RWLock(imp::RWLock::new()) }
/// Acquires shared access to the underlying lock, blocking the current
/// thread to do so.
///

View File

@ -86,19 +86,13 @@ use sys::thread_local as imp;
/// }
/// ```
pub struct StaticKey {
/// Inner static TLS key (internals), created with by `INIT_INNER` in this
/// module.
pub inner: StaticKeyInner,
/// Inner static TLS key (internals).
key: AtomicUsize,
/// Destructor for the TLS value.
///
/// See `Key::new` for information about when the destructor runs and how
/// it runs.
pub dtor: Option<unsafe extern fn(*mut u8)>,
}
/// Inner contents of `StaticKey`, created by the `INIT_INNER` constant.
pub struct StaticKeyInner {
key: AtomicUsize,
dtor: Option<unsafe extern fn(*mut u8)>,
}
/// A type for a safely managed OS-based TLS slot.
@ -129,19 +123,16 @@ pub struct Key {
/// Constant initialization value for static TLS keys.
///
/// This value specifies no destructor by default.
pub const INIT: StaticKey = StaticKey {
inner: INIT_INNER,
dtor: None,
};
/// Constant initialization value for the inner part of static TLS keys.
///
/// This value allows specific configuration of the destructor for a TLS key.
pub const INIT_INNER: StaticKeyInner = StaticKeyInner {
key: atomic::ATOMIC_USIZE_INIT,
};
pub const INIT: StaticKey = StaticKey::new(None);
impl StaticKey {
pub const fn new(dtor: Option<unsafe extern fn(*mut u8)>) -> StaticKey {
StaticKey {
key: atomic::AtomicUsize::new(0),
dtor: dtor
}
}
/// Gets the value associated with this TLS key
///
/// This will lazily allocate a TLS key from the OS if one has not already
@ -164,7 +155,7 @@ impl StaticKey {
/// Note that this does *not* run the user-provided destructor if one was
/// specified at definition time. Doing so must be done manually.
pub unsafe fn destroy(&self) {
match self.inner.key.swap(0, Ordering::SeqCst) {
match self.key.swap(0, Ordering::SeqCst) {
0 => {}
n => { imp::destroy(n as imp::Key) }
}
@ -172,7 +163,7 @@ impl StaticKey {
#[inline]
unsafe fn key(&self) -> imp::Key {
match self.inner.key.load(Ordering::Relaxed) {
match self.key.load(Ordering::Relaxed) {
0 => self.lazy_init() as imp::Key,
n => n as imp::Key
}
@ -197,7 +188,7 @@ impl StaticKey {
key2
};
assert!(key != 0);
match self.inner.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
match self.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
// The CAS succeeded, so we've created the actual key
0 => key as usize,
// If someone beat us to the punch, use their key instead
@ -245,7 +236,7 @@ impl Drop for Key {
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::{Key, StaticKey, INIT_INNER};
use super::{Key, StaticKey};
fn assert_sync<T: Sync>() {}
fn assert_send<T: Send>() {}
@ -267,8 +258,8 @@ mod tests {
#[test]
fn statik() {
static K1: StaticKey = StaticKey { inner: INIT_INNER, dtor: None };
static K2: StaticKey = StaticKey { inner: INIT_INNER, dtor: None };
static K1: StaticKey = StaticKey::new(None);
static K2: StaticKey = StaticKey::new(None);
unsafe {
assert!(K1.get().is_null());

View File

@ -91,7 +91,7 @@ use io;
use libc;
use mem;
use str;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
use sys_common::backtrace::*;
@ -117,7 +117,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
// while it doesn't requires lock for work as everything is
// local, it still displays much nicer backtraces when a
// couple of threads panic simultaneously
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
try!(writeln!(w, "stack backtrace:"));
@ -148,7 +148,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
// is semi-reasonable in terms of printing anyway, and we know that all
// I/O done here is blocking I/O, not green I/O, so we don't have to
// worry about this being a native vs green mutex.
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
try!(writeln!(w, "stack backtrace:"));

View File

@ -23,13 +23,8 @@ pub struct Condvar { inner: UnsafeCell<ffi::pthread_cond_t> }
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
pub const CONDVAR_INIT: Condvar = Condvar {
inner: UnsafeCell { value: ffi::PTHREAD_COND_INITIALIZER },
};
impl Condvar {
#[inline]
pub unsafe fn new() -> Condvar {
pub const fn new() -> Condvar {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
Condvar { inner: UnsafeCell::new(ffi::PTHREAD_COND_INITIALIZER) }

View File

@ -21,20 +21,15 @@ pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t {
m.inner.get()
}
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[allow(dead_code)] // sys isn't exported yet
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
pub const fn new() -> Mutex {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
MUTEX_INIT
Mutex { inner: UnsafeCell::new(ffi::PTHREAD_MUTEX_INITIALIZER) }
}
#[inline]
pub unsafe fn lock(&self) {

View File

@ -216,8 +216,8 @@ pub fn current_exe() -> io::Result<PathBuf> {
#[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
pub fn current_exe() -> io::Result<PathBuf> {
use sync::{StaticMutex, MUTEX_INIT};
static LOCK: StaticMutex = MUTEX_INIT;
use sync::StaticMutex;
static LOCK: StaticMutex = StaticMutex::new();
extern {
fn rust_current_exe() -> *const c_char;

View File

@ -16,14 +16,13 @@ use sys::sync as ffi;
pub struct RWLock { inner: UnsafeCell<ffi::pthread_rwlock_t> }
pub const RWLOCK_INIT: RWLock = RWLock {
inner: UnsafeCell { value: ffi::PTHREAD_RWLOCK_INITIALIZER },
};
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
pub const fn new() -> RWLock {
RWLock { inner: UnsafeCell::new(ffi::PTHREAD_RWLOCK_INITIALIZER) }
}
#[inline]
pub unsafe fn read(&self) {
let r = ffi::pthread_rwlock_rdlock(self.inner.get());

View File

@ -330,10 +330,10 @@ pub mod guard {
#[cfg(target_os = "linux")]
fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
use dynamic_lib::DynamicLibrary;
use sync::{Once, ONCE_INIT};
use sync::Once;
type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t;
static INIT: Once = ONCE_INIT;
static INIT: Once = Once::new();
static mut __pthread_get_minstack: Option<F> = None;
INIT.call_once(|| {

View File

@ -8,6 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)] // sys isn't exported yet
use prelude::v1::*;
use libc::c_int;

View File

@ -17,7 +17,7 @@ mod inner {
use libc;
use time::Duration;
use ops::Sub;
use sync::{Once, ONCE_INIT};
use sync::Once;
use super::NSEC_PER_SEC;
pub struct SteadyTime {
@ -42,7 +42,7 @@ mod inner {
numer: 0,
denom: 0,
};
static ONCE: Once = ONCE_INIT;
static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {

View File

@ -36,7 +36,7 @@ use mem;
use path::Path;
use ptr;
use str;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
use sys_common::backtrace::*;
@ -295,7 +295,7 @@ impl Drop for Cleanup {
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = MUTEX_INIT;
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
// Open up dbghelp.dll, we don't link to it explicitly because it can't

View File

@ -340,10 +340,10 @@ pub mod compat {
-> $rettype:ty { $fallback:expr }) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use sync::atomic::{AtomicUsize, Ordering};
use mem;
static PTR: AtomicUsize = ATOMIC_USIZE_INIT;
static PTR: AtomicUsize = AtomicUsize::new(0);
fn load() -> usize {
::sys::c::compat::store_func(&PTR,

View File

@ -22,13 +22,10 @@ pub struct Condvar { inner: UnsafeCell<ffi::CONDITION_VARIABLE> }
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
pub const CONDVAR_INIT: Condvar = Condvar {
inner: UnsafeCell { value: ffi::CONDITION_VARIABLE_INIT }
};
impl Condvar {
#[inline]
pub unsafe fn new() -> Condvar { CONDVAR_INIT }
pub const fn new() -> Condvar {
Condvar { inner: UnsafeCell::new(ffi::CONDITION_VARIABLE_INIT) }
}
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {

View File

@ -16,10 +16,6 @@ use mem;
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
@ -41,6 +37,9 @@ pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
// is there there are no guarantees of fairness.
impl Mutex {
pub const fn new() -> Mutex {
Mutex { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
}
#[inline]
pub unsafe fn lock(&self) {
ffi::AcquireSRWLockExclusive(self.inner.get())

View File

@ -18,7 +18,7 @@ use net::SocketAddr;
use num::One;
use ops::Neg;
use rt;
use sync::{Once, ONCE_INIT};
use sync::Once;
use sys::c;
use sys_common::{AsInner, FromInner};
@ -29,7 +29,7 @@ pub struct Socket(libc::SOCKET);
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
static START: Once = ONCE_INIT;
static START: Once = Once::new();
START.call_once(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();

View File

@ -24,7 +24,7 @@ use mem;
use os::windows::ffi::OsStrExt;
use path::Path;
use ptr;
use sync::{StaticMutex, MUTEX_INIT};
use sync::StaticMutex;
use sys::c;
use sys::fs::{OpenOptions, File};
use sys::handle::Handle;
@ -169,7 +169,7 @@ impl Process {
try!(unsafe {
// `CreateProcess` is racy!
// http://support.microsoft.com/kb/315939
static CREATE_PROCESS_LOCK: StaticMutex = MUTEX_INIT;
static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new();
let _lock = CREATE_PROCESS_LOCK.lock();
cvt(CreateProcessW(ptr::null(),

View File

@ -15,14 +15,13 @@ use sys::sync as ffi;
pub struct RWLock { inner: UnsafeCell<ffi::SRWLOCK> }
pub const RWLOCK_INIT: RWLock = RWLock {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
pub const fn new() -> RWLock {
RWLock { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
}
#[inline]
pub unsafe fn read(&self) {
ffi::AcquireSRWLockShared(self.inner.get())

View File

@ -15,7 +15,7 @@ use libc::types::os::arch::extra::{DWORD, LPVOID, BOOL};
use boxed;
use ptr;
use rt;
use sys_common::mutex::{MUTEX_INIT, Mutex};
use sys_common::mutex::Mutex;
pub type Key = DWORD;
pub type Dtor = unsafe extern fn(*mut u8);
@ -58,7 +58,7 @@ pub type Dtor = unsafe extern fn(*mut u8);
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
static DTOR_LOCK: Mutex = MUTEX_INIT;
static DTOR_LOCK: Mutex = Mutex::new();
static mut DTORS: *mut Vec<(Key, Dtor)> = 0 as *mut _;
// -------------------------------------------------------------------------

View File

@ -10,7 +10,7 @@
use libc;
use ops::Sub;
use time::Duration;
use sync::{Once, ONCE_INIT};
use sync::Once;
const NANOS_PER_SEC: u64 = 1_000_000_000;
@ -28,7 +28,7 @@ impl SteadyTime {
fn frequency() -> libc::LARGE_INTEGER {
static mut FREQUENCY: libc::LARGE_INTEGER = 0;
static ONCE: Once = ONCE_INIT;
static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {

View File

@ -18,12 +18,7 @@ use cell::UnsafeCell;
// Sure wish we had macro hygiene, no?
#[doc(hidden)]
pub mod __impl {
pub use super::imp::Key as KeyInner;
pub use super::imp::destroy_value;
pub use sys_common::thread_local::INIT_INNER as OS_INIT_INNER;
pub use sys_common::thread_local::StaticKey as OsStaticKey;
}
pub use self::imp::Key as __KeyInner;
/// A thread local storage key which owns its contents.
///
@ -76,55 +71,10 @@ pub struct LocalKey<T> {
//
// This is trivially devirtualizable by LLVM because we never store anything
// to this field and rustc can declare the `static` as constant as well.
#[doc(hidden)]
pub inner: fn() -> &'static __impl::KeyInner<UnsafeCell<Option<T>>>,
inner: fn() -> &'static __KeyInner<T>,
// initialization routine to invoke to create a value
#[doc(hidden)]
pub init: fn() -> T,
}
/// Declare a new thread local storage key of type `std::thread::LocalKey`.
///
/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information.
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable]
macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
static $name: ::std::thread::LocalKey<$t> = {
use std::cell::UnsafeCell as __UnsafeCell;
use std::thread::__local::KeyInner as __KeyInner;
use std::option::Option as __Option;
use std::option::Option::None as __None;
__thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
__UnsafeCell { value: __None }
});
fn __init() -> $t { $init }
fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
&__KEY
}
::std::thread::LocalKey { inner: __getit, init: __init }
};
);
(pub static $name:ident: $t:ty = $init:expr) => (
pub static $name: ::std::thread::LocalKey<$t> = {
use std::cell::UnsafeCell as __UnsafeCell;
use std::thread::__local::KeyInner as __KeyInner;
use std::option::Option as __Option;
use std::option::Option::None as __None;
__thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
__UnsafeCell { value: __None }
});
fn __init() -> $t { $init }
fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
&__KEY
}
::std::thread::LocalKey { inner: __getit, init: __init }
};
);
init: fn() -> T,
}
// Macro pain #4586:
@ -147,50 +97,37 @@ macro_rules! thread_local {
// To get around this, we're forced to inject the #[cfg] logic into the macro
// itself. Woohoo.
/// Declare a new thread local storage key of type `std::thread::LocalKey`.
///
/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information.
#[macro_export]
#[doc(hidden)]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable]
macro_rules! __thread_local_inner {
macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::__local::KeyInner<$t> =
__thread_local_inner!($init, $t);
static $name: ::std::thread::LocalKey<$t> = {
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
static __KEY: ::std::thread::__LocalKeyInner<$t> =
::std::thread::__LocalKeyInner::new();
fn __init() -> $t { $init }
fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY }
::std::thread::LocalKey::new(__getit, __init)
};
);
(pub static $name:ident: $t:ty = $init:expr) => (
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::__local::KeyInner<$t> =
__thread_local_inner!($init, $t);
pub static $name: ::std::thread::LocalKey<$t> = {
#[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
not(target_arch = "aarch64")),
thread_local)]
static __KEY: ::std::thread::__LocalKeyInner<$t> =
::std::thread::__LocalKeyInner::new();
fn __init() -> $t { $init }
fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY }
::std::thread::LocalKey::new(__getit, __init)
};
);
($init:expr, $t:ty) => ({
#[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))]
const _INIT: ::std::thread::__local::KeyInner<$t> = {
::std::thread::__local::KeyInner {
inner: ::std::cell::UnsafeCell { value: $init },
dtor_registered: ::std::cell::UnsafeCell { value: false },
dtor_running: ::std::cell::UnsafeCell { value: false },
}
};
#[allow(trivial_casts)]
#[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))]
const _INIT: ::std::thread::__local::KeyInner<$t> = {
::std::thread::__local::KeyInner {
inner: ::std::cell::UnsafeCell { value: $init },
os: ::std::thread::__local::OsStaticKey {
inner: ::std::thread::__local::OS_INIT_INNER,
dtor: ::std::option::Option::Some(
::std::thread::__local::destroy_value::<$t>
),
},
}
};
_INIT
});
}
/// Indicator of the state of a thread local storage key.
@ -225,6 +162,14 @@ pub enum LocalKeyState {
}
impl<T: 'static> LocalKey<T> {
#[doc(hidden)]
pub const fn new(inner: fn() -> &'static __KeyInner<T>, init: fn() -> T) -> LocalKey<T> {
LocalKey {
inner: inner,
init: init
}
}
/// Acquires a reference to the value in this TLS key.
///
/// This will lazily initialize the value if this thread has not referenced
@ -300,44 +245,45 @@ impl<T: 'static> LocalKey<T> {
mod imp {
use prelude::v1::*;
use cell::UnsafeCell;
use cell::{Cell, UnsafeCell};
use intrinsics;
use ptr;
pub struct Key<T> {
// Place the inner bits in an `UnsafeCell` to currently get around the
// "only Sync statics" restriction. This allows any type to be placed in
// the cell.
//
// Note that all access requires `T: 'static` so it can't be a type with
// any borrowed pointers still.
pub inner: UnsafeCell<T>,
inner: UnsafeCell<Option<T>>,
// Metadata to keep track of the state of the destructor. Remember that
// these variables are thread-local, not global.
pub dtor_registered: UnsafeCell<bool>, // should be Cell
pub dtor_running: UnsafeCell<bool>, // should be Cell
dtor_registered: Cell<bool>,
dtor_running: Cell<bool>,
}
unsafe impl<T> ::marker::Sync for Key<T> { }
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
if intrinsics::needs_drop::<T>() && *self.dtor_running.get() {
pub const fn new() -> Key<T> {
Key {
inner: UnsafeCell::new(None),
dtor_registered: Cell::new(false),
dtor_running: Cell::new(false)
}
}
pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
if intrinsics::needs_drop::<T>() && self.dtor_running.get() {
return None
}
self.register_dtor();
Some(&*self.inner.get())
Some(&self.inner)
}
unsafe fn register_dtor(&self) {
if !intrinsics::needs_drop::<T>() || *self.dtor_registered.get() {
if !intrinsics::needs_drop::<T>() || self.dtor_registered.get() {
return
}
register_dtor(self as *const _ as *mut u8,
destroy_value::<T>);
*self.dtor_registered.get() = true;
self.dtor_registered.set(true);
}
}
@ -354,6 +300,7 @@ mod imp {
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
use boxed;
use mem;
use ptr;
use libc;
use sys_common::thread_local as os;
@ -381,10 +328,7 @@ mod imp {
// *should* be the case that this loop always terminates because we
// provide the guarantee that a TLS key cannot be set after it is
// flagged for destruction.
static DTORS: os::StaticKey = os::StaticKey {
inner: os::INIT_INNER,
dtor: Some(run_dtors as unsafe extern "C" fn(*mut u8)),
};
static DTORS: os::StaticKey = os::StaticKey::new(Some(run_dtors));
type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>;
if DTORS.get().is_null() {
let v: Box<List> = box Vec::new();
@ -422,8 +366,8 @@ mod imp {
// Right before we run the user destructor be sure to flag the
// destructor as running for this thread so calls to `get` will return
// `None`.
*(*ptr).dtor_running.get() = true;
ptr::read((*ptr).inner.get());
(*ptr).dtor_running.set(true);
intrinsics::drop_in_place((*ptr).inner.get());
}
}
@ -433,54 +377,50 @@ mod imp {
use prelude::v1::*;
use alloc::boxed;
use cell::UnsafeCell;
use mem;
use cell::{Cell, UnsafeCell};
use marker;
use ptr;
use sys_common::thread_local::StaticKey as OsStaticKey;
pub struct Key<T> {
// Statically allocated initialization expression, using an `UnsafeCell`
// for the same reasons as above.
pub inner: UnsafeCell<T>,
// OS-TLS key that we'll use to key off.
pub os: OsStaticKey,
os: OsStaticKey,
marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> ::marker::Sync for Key<T> { }
struct Value<T: 'static> {
key: &'static Key<T>,
value: T,
value: UnsafeCell<Option<T>>,
}
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
self.ptr().map(|p| &*p)
impl<T: 'static> Key<T> {
pub const fn new() -> Key<T> {
Key {
os: OsStaticKey::new(Some(destroy_value::<T>)),
marker: marker::PhantomData
}
}
unsafe fn ptr(&'static self) -> Option<*mut T> {
pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
let ptr = self.os.get() as *mut Value<T>;
if !ptr.is_null() {
if ptr as usize == 1 {
return None
}
return Some(&mut (*ptr).value as *mut T);
return Some(&(*ptr).value);
}
// If the lookup returned null, we haven't initialized our own local
// copy, so do that now.
//
// Also note that this transmute_copy should be ok because the value
// `inner` is already validated to be a valid `static` value, so we
// should be able to freely copy the bits.
let ptr: Box<Value<T>> = box Value {
key: self,
value: mem::transmute_copy(&self.inner),
value: UnsafeCell::new(None),
};
let ptr = boxed::into_raw(ptr);
self.os.set(ptr as *mut u8);
Some(&mut (*ptr).value as *mut T)
Some(&(*ptr).value)
}
}
@ -505,7 +445,7 @@ mod tests {
use prelude::v1::*;
use sync::mpsc::{channel, Sender};
use cell::UnsafeCell;
use cell::{Cell, UnsafeCell};
use super::LocalKeyState;
use thread;
@ -520,23 +460,23 @@ mod tests {
#[test]
fn smoke_no_dtor() {
thread_local!(static FOO: UnsafeCell<i32> = UnsafeCell { value: 1 });
thread_local!(static FOO: Cell<i32> = Cell::new(1));
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 1);
*f.get() = 2;
FOO.with(|f| {
assert_eq!(f.get(), 1);
f.set(2);
});
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 1);
FOO.with(|f| {
assert_eq!(f.get(), 1);
});
tx.send(()).unwrap();
});
rx.recv().unwrap();
FOO.with(|f| unsafe {
assert_eq!(*f.get(), 2);
FOO.with(|f| {
assert_eq!(f.get(), 2);
});
}
@ -565,9 +505,7 @@ mod tests {
#[test]
fn smoke_dtor() {
thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell {
value: None
});
thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
let (tx, rx) = channel();
let _t = thread::spawn(move|| unsafe {
@ -583,12 +521,8 @@ mod tests {
fn circular() {
struct S1;
struct S2;
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell {
value: None
});
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell::new(None));
static mut HITS: u32 = 0;
impl Drop for S1 {
@ -626,9 +560,7 @@ mod tests {
#[test]
fn self_referential() {
struct S1;
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {
@ -644,12 +576,8 @@ mod tests {
#[test]
fn dtors_in_dtors_in_dtors() {
struct S1(Sender<()>);
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
value: None
});
thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell {
value: None
});
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {

View File

@ -216,8 +216,7 @@ pub use self::local::{LocalKey, LocalKeyState};
consider stabilizing its interface")]
pub use self::scoped_tls::ScopedKey;
#[doc(hidden)] pub use self::local::__impl as __local;
#[doc(hidden)] pub use self::scoped_tls::__impl as __scoped;
#[doc(hidden)] pub use self::local::__KeyInner as __LocalKeyInner;
////////////////////////////////////////////////////////////////////////////////
// Builder

View File

@ -43,13 +43,6 @@
use prelude::v1::*;
// macro hygiene sure would be nice, wouldn't it?
#[doc(hidden)]
pub mod __impl {
pub use super::imp::KeyInner;
pub use sys_common::thread_local::INIT as OS_INIT;
}
/// Type representing a thread local storage key corresponding to a reference
/// to the type parameter `T`.
///
@ -60,7 +53,7 @@ pub mod __impl {
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
pub struct ScopedKey<T> { #[doc(hidden)] pub inner: __impl::KeyInner<T> }
pub struct ScopedKey<T> { inner: imp::KeyInner<T> }
/// Declare a new scoped thread local storage key.
///
@ -71,18 +64,6 @@ pub struct ScopedKey<T> { #[doc(hidden)] pub inner: __impl::KeyInner<T> }
#[macro_export]
#[allow_internal_unstable]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
__scoped_thread_local_inner!(static $name: $t);
);
(pub static $name:ident: $t:ty) => (
__scoped_thread_local_inner!(pub static $name: $t);
);
}
#[macro_export]
#[doc(hidden)]
#[allow_internal_unstable]
macro_rules! __scoped_thread_local_inner {
(static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
@ -91,7 +72,7 @@ macro_rules! __scoped_thread_local_inner {
target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::ScopedKey<$t> =
__scoped_thread_local_inner!($t);
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
@ -101,42 +82,19 @@ macro_rules! __scoped_thread_local_inner {
target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::ScopedKey<$t> =
__scoped_thread_local_inner!($t);
::std::thread::ScopedKey::new();
);
($t:ty) => ({
use std::thread::ScopedKey as __Key;
#[cfg(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")))]
const _INIT: __Key<$t> = __Key {
inner: ::std::thread::__scoped::KeyInner {
inner: ::std::cell::UnsafeCell { value: 0 as *mut _ },
}
};
#[cfg(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64"))]
const _INIT: __Key<$t> = __Key {
inner: ::std::thread::__scoped::KeyInner {
inner: ::std::thread::__scoped::OS_INIT,
marker: ::std::marker::PhantomData::<::std::cell::Cell<$t>>,
}
};
_INIT
})
}
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
impl<T> ScopedKey<T> {
#[doc(hidden)]
pub const fn new() -> ScopedKey<T> {
ScopedKey { inner: imp::KeyInner::new() }
}
/// Inserts a value into this scoped thread local storage slot for a
/// duration of a closure.
///
@ -170,7 +128,7 @@ impl<T> ScopedKey<T> {
F: FnOnce() -> R,
{
struct Reset<'a, T: 'a> {
key: &'a __impl::KeyInner<T>,
key: &'a imp::KeyInner<T>,
val: *mut T,
}
impl<'a, T> Drop for Reset<'a, T> {
@ -231,19 +189,18 @@ impl<T> ScopedKey<T> {
target_os = "openbsd",
target_arch = "aarch64")))]
mod imp {
use std::cell::UnsafeCell;
use std::cell::Cell;
#[doc(hidden)]
pub struct KeyInner<T> { pub inner: UnsafeCell<*mut T> }
pub struct KeyInner<T> { inner: Cell<*mut T> }
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
pub unsafe fn set(&self, ptr: *mut T) { *self.inner.get() = ptr; }
#[doc(hidden)]
pub unsafe fn get(&self) -> *mut T { *self.inner.get() }
pub const fn new() -> KeyInner<T> {
KeyInner { inner: Cell::new(0 as *mut _) }
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); }
pub unsafe fn get(&self) -> *mut T { self.inner.get() }
}
}
@ -253,23 +210,27 @@ mod imp {
target_os = "openbsd",
target_arch = "aarch64"))]
mod imp {
use prelude::v1::*;
use cell::Cell;
use marker;
use std::cell::Cell;
use sys_common::thread_local::StaticKey as OsStaticKey;
#[doc(hidden)]
pub struct KeyInner<T> {
pub inner: OsStaticKey,
pub marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
unsafe impl<T> marker::Sync for KeyInner<T> { }
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
pub const fn new() -> KeyInner<T> {
KeyInner {
inner: OsStaticKey::new(None),
marker: marker::PhantomData
}
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) }
#[doc(hidden)]
pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ }
}
}

View File

@ -11,12 +11,12 @@
use std::sync::atomic;
pub const C1: usize = 1;
pub const C2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub const C2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
pub const C3: fn() = foo;
pub const C4: usize = C1 * C1 + C1 / C1;
pub const C5: &'static usize = &C4;
pub static S1: usize = 3;
pub static S2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub static S2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
fn foo() {}

View File

@ -18,9 +18,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

View File

@ -26,9 +26,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

View File

@ -17,9 +17,9 @@ use std::cell::Cell;
use id::Id;
mod s {
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

View File

@ -17,9 +17,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1

View File

@ -16,7 +16,7 @@ use self::foo::S;
mod foo {
use std::cell::{UnsafeCell};
static mut count : UnsafeCell<u64> = UnsafeCell { value: 1 };
static mut count : UnsafeCell<u64> = UnsafeCell::new(1);
pub struct S { pub a: u8, pub b: String, secret_uid: u64 }

View File

@ -10,12 +10,12 @@
use std::cell::UnsafeCell;
const A: UnsafeCell<usize> = UnsafeCell { value: 1 };
const A: UnsafeCell<usize> = UnsafeCell::new(1);
const B: &'static UnsafeCell<usize> = &A;
//~^ ERROR: cannot borrow a constant which contains interior mutability
struct C { a: UnsafeCell<usize> }
const D: C = C { a: UnsafeCell { value: 1 } };
const D: C = C { a: UnsafeCell::new(1) };
const E: &'static UnsafeCell<usize> = &D.a;
//~^ ERROR: cannot borrow a constant which contains interior mutability
const F: &'static C = &D;

View File

@ -17,6 +17,5 @@ static boxed: Box<RefCell<isize>> = box RefCell::new(0);
//~^ ERROR allocations are not allowed in statics
//~| ERROR the trait `core::marker::Sync` is not implemented for the type
//~| ERROR the trait `core::marker::Sync` is not implemented for the type
//~| ERROR E0015
fn main() { }

View File

@ -15,11 +15,11 @@ use std::sync::atomic::*;
use std::ptr;
fn main() {
let x = ATOMIC_BOOL_INIT;
let x = AtomicBool::new(false);
let x = *&x; //~ ERROR: cannot move out of borrowed content
let x = ATOMIC_ISIZE_INIT;
let x = AtomicIsize::new(0);
let x = *&x; //~ ERROR: cannot move out of borrowed content
let x = ATOMIC_USIZE_INIT;
let x = AtomicUsize::new(0);
let x = *&x; //~ ERROR: cannot move out of borrowed content
let x: AtomicPtr<usize> = AtomicPtr::new(ptr::null_mut());
let x = *&x; //~ ERROR: cannot move out of borrowed content

View File

@ -28,9 +28,9 @@ use id::Id;
mod s {
#![allow(unstable)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
static S_COUNT: AtomicUsize = AtomicUsize::new(0);
/// generates globally unique count (global across the current
/// process, that is)

View File

@ -19,7 +19,7 @@
// This test makes sure that the compiler doesn't crash when trying to assign
// debug locations to const-expressions.
use std::sync::MUTEX_INIT;
use std::sync::StaticMutex;
use std::cell::UnsafeCell;
const CONSTANT: u64 = 3 + 4;
@ -49,7 +49,7 @@ const VEC: [u32; 8] = [0; 8];
const NESTED: (Struct, TupleStruct) = (STRUCT, TUPLE_STRUCT);
const UNSAFE_CELL: UnsafeCell<bool> = UnsafeCell { value: false };
const UNSAFE_CELL: UnsafeCell<bool> = UnsafeCell::new(false);
fn main() {
let mut _constant = CONSTANT;
@ -61,6 +61,6 @@ fn main() {
let mut _string = STRING;
let mut _vec = VEC;
let mut _nested = NESTED;
let mut _extern = MUTEX_INIT;
let mut _extern = StaticMutex::new();
let mut _unsafe_cell = UNSAFE_CELL;
}

View File

@ -22,7 +22,7 @@ enum E {
C = 2
}
static FLAG: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
static FLAG: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
impl Drop for E {
fn drop(&mut self) {

View File

@ -13,10 +13,10 @@
// `T`. Issue #20300.
use std::marker::{PhantomData};
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{AtomicUsize};
use std::sync::atomic::Ordering::SeqCst;
static COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
static COUNTER: AtomicUsize = AtomicUsize::new(0);
// Preamble.
trait Trait { type Item; }

View File

@ -12,9 +12,9 @@
// destructor.
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);

View File

@ -12,9 +12,9 @@
// destructor.
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);

View File

@ -38,8 +38,8 @@ unsafe impl<T: Send> Sync for UnsafeEnum<T> {}
static STATIC1: UnsafeEnum<isize> = UnsafeEnum::VariantSafe;
static STATIC2: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell { value: 1 });
const CONST: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell { value: 1 });
static STATIC2: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(1));
const CONST: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(1));
static STATIC3: MyUnsafe<isize> = MyUnsafe{value: CONST};
static STATIC4: &'static MyUnsafePack<isize> = &STATIC2;
@ -50,7 +50,7 @@ struct Wrap<T> {
unsafe impl<T: Send> Sync for Wrap<T> {}
static UNSAFE: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell{value: 2});
static UNSAFE: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(2));
static WRAPPED_UNSAFE: Wrap<&'static MyUnsafePack<isize>> = Wrap { value: &UNSAFE };
fn main() {

View File

@ -15,10 +15,10 @@
extern crate issue_17718 as other;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
const C1: usize = 1;
const C2: AtomicUsize = ATOMIC_USIZE_INIT;
const C2: AtomicUsize = AtomicUsize::new(0);
const C3: fn() = foo;
const C4: usize = C1 * C1 + C1 / C1;
const C5: &'static usize = &C4;
@ -28,7 +28,7 @@ const C6: usize = {
};
static S1: usize = 3;
static S2: AtomicUsize = ATOMIC_USIZE_INIT;
static S2: AtomicUsize = AtomicUsize::new(0);
mod test {
static A: usize = 4;

View File

@ -13,7 +13,7 @@
// construction.
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{Ordering, AtomicUsize};
#[derive(Debug)]
struct Noisy(u8);
@ -69,7 +69,7 @@ pub fn main() {
assert_eq!(0x03_04, event_log());
}
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
fn reset_log() {
LOG.store(0, Ordering::SeqCst);

View File

@ -14,9 +14,9 @@
use std::thread;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);

View File

@ -12,7 +12,7 @@
// even when no Drop-implementations are involved.
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{Ordering, AtomicUsize};
struct W { wrapped: u32 }
struct S { f0: W, _f1: i32 }
@ -34,7 +34,7 @@ pub fn main() {
"expect: 0x{:x} actual: 0x{:x}", expect, actual);
}
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
fn event_log() -> usize {
LOG.load(Ordering::SeqCst)

View File

@ -12,7 +12,7 @@
// even when no Drop-implementations are involved.
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::{Ordering, AtomicUsize};
struct W { wrapped: u32 }
struct S { f0: W, _f1: i32 }
@ -31,7 +31,7 @@ pub fn main() {
"expect: 0x{:x} actual: 0x{:x}", expect, actual);
}
static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG: AtomicUsize = AtomicUsize::new(0);
fn event_log() -> usize {
LOG.load(Ordering::SeqCst)

View File

@ -11,7 +11,7 @@
#![feature(rand, core)]
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::__rand::{thread_rng, Rng};
use std::thread;
@ -20,20 +20,20 @@ const MAX_LEN: usize = 32;
static drop_counts: [AtomicUsize; MAX_LEN] =
// FIXME #5244: AtomicUsize is not Copy.
[
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
AtomicUsize::new(0), AtomicUsize::new(0),
];
static creation_count: AtomicUsize = ATOMIC_USIZE_INIT;
static creation_count: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord)]
struct DropCounter { x: u32, creation_id: usize }