From 8b6cda3ce681d4d95c3097d12ed754975b4a07f6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 10 Jan 2015 13:42:48 -0800 Subject: [PATCH] Rename AtomicInt and AtomicUint Change any use of AtomicInt to AtomicIsize and AtomicUint to AtomicUsize Closes #20893 [breaking-change] --- src/liballoc/arc.rs | 18 +- src/libcollections/vec.rs | 4 +- src/libcore/atomic.rs | 385 ++++++++++++------ src/libcoretest/atomic.rs | 16 +- src/libstd/io/test.rs | 6 +- src/libstd/os.rs | 4 +- src/libstd/rt/backtrace.rs | 2 +- src/libstd/rt/unwind.rs | 20 +- src/libstd/rt/util.rs | 2 +- src/libstd/sync/condvar.rs | 8 +- src/libstd/sync/mpsc/oneshot.rs | 6 +- src/libstd/sync/mpsc/shared.rs | 18 +- src/libstd/sync/mpsc/spsc_queue.rs | 10 +- src/libstd/sync/mpsc/stream.rs | 10 +- src/libstd/sync/mpsc/sync.rs | 6 +- src/libstd/sync/once.rs | 10 +- src/libstd/sys/common/thread_local.rs | 6 +- src/libstd/sys/unix/timer.rs | 2 +- src/libstd/sys/windows/mutex.rs | 8 +- src/test/auxiliary/issue-17718.rs | 4 +- .../compile-fail/std-uncopyable-atomics.rs | 4 +- src/test/run-pass/issue-17718.rs | 6 +- src/test/run-pass/tcp-accept-stress.rs | 4 +- src/test/run-pass/vector-sort-panic-safe.rs | 28 +- 24 files changed, 360 insertions(+), 227 deletions(-) diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 290dd21d666..c0cd034abfa 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -137,8 +137,8 @@ unsafe impl Send for Weak { } unsafe impl Sync for Weak { } struct ArcInner { - strong: atomic::AtomicUint, - weak: atomic::AtomicUint, + strong: atomic::AtomicUsize, + weak: atomic::AtomicUsize, data: T, } @@ -161,8 +161,8 @@ impl Arc { // Start the weak pointer count as 1 which is the weak pointer that's // held by all the strong pointers (kinda), see std/rc.rs for more info let x = box ArcInner { - strong: atomic::AtomicUint::new(1), - weak: atomic::AtomicUint::new(1), + strong: atomic::AtomicUsize::new(1), + weak: atomic::AtomicUsize::new(1), data: data, }; Arc { _ptr: unsafe { NonZero::new(mem::transmute(x)) } } @@ -619,7 +619,7 @@ mod tests { use super::{Arc, Weak, weak_count, strong_count}; use std::sync::Mutex; - struct Canary(*mut atomic::AtomicUint); + struct Canary(*mut atomic::AtomicUsize); impl Drop for Canary { @@ -743,16 +743,16 @@ mod tests { #[test] fn drop_arc() { - let mut canary = atomic::AtomicUint::new(0); - let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint)); + let mut canary = atomic::AtomicUsize::new(0); + let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); drop(x); assert!(canary.load(Acquire) == 1); } #[test] fn drop_arc_weak() { - let mut canary = atomic::AtomicUint::new(0); - let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint)); + let mut canary = atomic::AtomicUsize::new(0); + let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); let arc_weak = arc.downgrade(); assert!(canary.load(Acquire) == 0); drop(arc); diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index 47afc78bc12..e5f9b2513e2 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -2199,7 +2199,7 @@ mod tests { #[test] fn test_map_in_place_zero_drop_count() { - use std::sync::atomic::{AtomicUint, Ordering, ATOMIC_UINT_INIT}; + use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; #[derive(Clone, PartialEq, Show)] struct Nothing; @@ -2213,7 +2213,7 @@ mod tests { } } const NUM_ELEMENTS: uint = 2; - static DROP_COUNTER: AtomicUint = ATOMIC_UINT_INIT; + static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; let v = repeat(Nothing).take(NUM_ELEMENTS).collect::>(); diff --git a/src/libcore/atomic.rs b/src/libcore/atomic.rs index e740a929252..aa93d9ed837 100644 --- a/src/libcore/atomic.rs +++ b/src/libcore/atomic.rs @@ -15,7 +15,7 @@ //! types. //! //! This module defines atomic versions of a select number of primitive -//! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`. +//! types, including `AtomicBool`, `AtomicIsize`, `AtomicUsize`, and `AtomicOption`. //! Atomic types present operations that, when used correctly, synchronize //! updates between threads. //! @@ -41,11 +41,11 @@ //! //! ``` //! use std::sync::Arc; -//! use std::sync::atomic::{AtomicUint, Ordering}; +//! use std::sync::atomic::{AtomicUsize, Ordering}; //! use std::thread::Thread; //! //! fn main() { -//! let spinlock = Arc::new(AtomicUint::new(1)); +//! let spinlock = Arc::new(AtomicUsize::new(1)); //! //! let spinlock_clone = spinlock.clone(); //! Thread::spawn(move|| { @@ -60,9 +60,9 @@ //! Keep a global count of live tasks: //! //! ``` -//! use std::sync::atomic::{AtomicUint, Ordering, ATOMIC_UINT_INIT}; +//! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; //! -//! static GLOBAL_TASK_COUNT: AtomicUint = ATOMIC_UINT_INIT; +//! static GLOBAL_TASK_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; //! //! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, Ordering::SeqCst); //! println!("live tasks: {}", old_task_count + 1); @@ -80,31 +80,31 @@ use cell::UnsafeCell; /// A boolean type which can be safely shared between threads. #[stable] pub struct AtomicBool { - v: UnsafeCell, + v: UnsafeCell, } unsafe impl Sync for AtomicBool {} /// A signed integer type which can be safely shared between threads. -#[unstable = "awaiting int/uint conventions, may be renamed"] -pub struct AtomicInt { - v: UnsafeCell, +#[stable] +pub struct AtomicIsize { + v: UnsafeCell, } -unsafe impl Sync for AtomicInt {} +unsafe impl Sync for AtomicIsize {} /// An unsigned integer type which can be safely shared between threads. -#[unstable = "awaiting int/uint conventions, may be renamed"] -pub struct AtomicUint { - v: UnsafeCell, +#[stable] +pub struct AtomicUsize { + v: UnsafeCell, } -unsafe impl Sync for AtomicUint {} +unsafe impl Sync for AtomicUsize {} /// A raw pointer type which can be safely shared between threads. #[stable] pub struct AtomicPtr { - p: UnsafeCell, + p: UnsafeCell, } unsafe impl Sync for AtomicPtr {} @@ -149,17 +149,17 @@ pub enum Ordering { #[stable] pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool { v: UnsafeCell { value: 0 } }; -/// An `AtomicInt` initialized to `0`. -#[unstable = "awaiting int/uint conventions, may be renamed"] -pub const ATOMIC_INT_INIT: AtomicInt = - AtomicInt { v: UnsafeCell { value: 0 } }; -/// An `AtomicUint` initialized to `0`. -#[unstable = "awaiting int/uint conventions, may be renamed"] -pub const ATOMIC_UINT_INIT: AtomicUint = - AtomicUint { v: UnsafeCell { value: 0, } }; +/// An `AtomicIsize` initialized to `0`. +#[stable] +pub const ATOMIC_ISIZE_INIT: AtomicIsize = + AtomicIsize { v: UnsafeCell { value: 0 } }; +/// An `AtomicUsize` initialized to `0`. +#[stable] +pub const ATOMIC_USIZE_INIT: AtomicUsize = + AtomicUsize { v: UnsafeCell { value: 0, } }; // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly -const UINT_TRUE: uint = -1; +const UINT_TRUE: usize = -1; impl AtomicBool { /// Creates a new `AtomicBool`. @@ -199,7 +199,7 @@ impl AtomicBool { #[inline] #[stable] pub fn load(&self, order: Ordering) -> bool { - unsafe { atomic_load(self.v.get() as *const uint, order) > 0 } + unsafe { atomic_load(self.v.get() as *const usize, order) > 0 } } /// Stores a value into the bool. @@ -323,7 +323,7 @@ impl AtomicBool { /// /// let foo = AtomicBool::new(true); /// assert_eq!(true, foo.fetch_nand(true, Ordering::SeqCst)); - /// assert_eq!(0, foo.load(Ordering::SeqCst) as int); + /// assert_eq!(0, foo.load(Ordering::SeqCst) as usize); /// assert_eq!(false, foo.load(Ordering::SeqCst)); /// /// let foo = AtomicBool::new(false); @@ -403,23 +403,23 @@ impl AtomicBool { } } -#[unstable = "awaiting int/uint conventions, types may change"] -impl AtomicInt { - /// Creates a new `AtomicInt`. +#[stable] +impl AtomicIsize { + /// Creates a new `AtomicIsize`. /// /// # Examples /// /// ``` - /// use std::sync::atomic::AtomicInt; + /// use std::sync::atomic::AtomicIsize; /// - /// let atomic_forty_two = AtomicInt::new(42); + /// let atomic_forty_two = AtomicIsize::new(42); /// ``` #[inline] - pub fn new(v: int) -> AtomicInt { - AtomicInt {v: UnsafeCell::new(v)} + pub fn new(v: isize) -> AtomicIsize { + AtomicIsize {v: UnsafeCell::new(v)} } - /// Loads a value from the int. + /// Loads a value from the isize. /// /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. /// @@ -430,58 +430,58 @@ impl AtomicInt { /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let some_int = AtomicInt::new(5); + /// let some_isize = AtomicIsize::new(5); /// - /// let value = some_int.load(Ordering::Relaxed); + /// let value = some_isize.load(Ordering::Relaxed); /// ``` #[inline] - pub fn load(&self, order: Ordering) -> int { - unsafe { atomic_load(self.v.get() as *const int, order) } + pub fn load(&self, order: Ordering) -> isize { + unsafe { atomic_load(self.v.get() as *const isize, order) } } - /// Stores a value into the int. + /// Stores a value into the isize. /// /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let some_int = AtomicInt::new(5); + /// let some_isize = AtomicIsize::new(5); /// - /// some_int.store(10, Ordering::Relaxed); + /// some_isize.store(10, Ordering::Relaxed); /// ``` /// /// # Panics /// /// Panics if `order` is `Acquire` or `AcqRel`. #[inline] - pub fn store(&self, val: int, order: Ordering) { + pub fn store(&self, val: isize, order: Ordering) { unsafe { atomic_store(self.v.get(), val, order); } } - /// Stores a value into the int, returning the old value. + /// Stores a value into the isize, returning the old value. /// /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let some_int = AtomicInt::new(5); + /// let some_isize = AtomicIsize::new(5); /// - /// let value = some_int.swap(10, Ordering::Relaxed); + /// let value = some_isize.swap(10, Ordering::Relaxed); /// ``` #[inline] - pub fn swap(&self, val: int, order: Ordering) -> int { + pub fn swap(&self, val: isize, order: Ordering) -> isize { unsafe { atomic_swap(self.v.get(), val, order) } } - /// Stores a value into the int if the current value is the same as the expected value. + /// Stores a value into the isize if the current value is the same as the expected value. /// /// If the return value is equal to `old` then the value was updated. /// @@ -491,112 +491,112 @@ impl AtomicInt { /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let some_int = AtomicInt::new(5); + /// let some_isize = AtomicIsize::new(5); /// - /// let value = some_int.compare_and_swap(5, 10, Ordering::Relaxed); + /// let value = some_isize.compare_and_swap(5, 10, Ordering::Relaxed); /// ``` #[inline] - pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int { + pub fn compare_and_swap(&self, old: isize, new: isize, order: Ordering) -> isize { unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } } - /// Add an int to the current value, returning the previous value. + /// Add an isize to the current value, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let foo = AtomicInt::new(0); + /// let foo = AtomicIsize::new(0); /// assert_eq!(0, foo.fetch_add(10, Ordering::SeqCst)); /// assert_eq!(10, foo.load(Ordering::SeqCst)); /// ``` #[inline] - pub fn fetch_add(&self, val: int, order: Ordering) -> int { + pub fn fetch_add(&self, val: isize, order: Ordering) -> isize { unsafe { atomic_add(self.v.get(), val, order) } } - /// Subtract an int from the current value, returning the previous value. + /// Subtract an isize from the current value, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let foo = AtomicInt::new(0); + /// let foo = AtomicIsize::new(0); /// assert_eq!(0, foo.fetch_sub(10, Ordering::SeqCst)); /// assert_eq!(-10, foo.load(Ordering::SeqCst)); /// ``` #[inline] - pub fn fetch_sub(&self, val: int, order: Ordering) -> int { + pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize { unsafe { atomic_sub(self.v.get(), val, order) } } - /// Bitwise and with the current int, returning the previous value. + /// Bitwise and with the current isize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let foo = AtomicInt::new(0b101101); + /// let foo = AtomicIsize::new(0b101101); /// assert_eq!(0b101101, foo.fetch_and(0b110011, Ordering::SeqCst)); /// assert_eq!(0b100001, foo.load(Ordering::SeqCst)); #[inline] - pub fn fetch_and(&self, val: int, order: Ordering) -> int { + pub fn fetch_and(&self, val: isize, order: Ordering) -> isize { unsafe { atomic_and(self.v.get(), val, order) } } - /// Bitwise or with the current int, returning the previous value. + /// Bitwise or with the current isize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let foo = AtomicInt::new(0b101101); + /// let foo = AtomicIsize::new(0b101101); /// assert_eq!(0b101101, foo.fetch_or(0b110011, Ordering::SeqCst)); /// assert_eq!(0b111111, foo.load(Ordering::SeqCst)); #[inline] - pub fn fetch_or(&self, val: int, order: Ordering) -> int { + pub fn fetch_or(&self, val: isize, order: Ordering) -> isize { unsafe { atomic_or(self.v.get(), val, order) } } - /// Bitwise xor with the current int, returning the previous value. + /// Bitwise xor with the current isize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicInt, Ordering}; + /// use std::sync::atomic::{AtomicIsize, Ordering}; /// - /// let foo = AtomicInt::new(0b101101); + /// let foo = AtomicIsize::new(0b101101); /// assert_eq!(0b101101, foo.fetch_xor(0b110011, Ordering::SeqCst)); /// assert_eq!(0b011110, foo.load(Ordering::SeqCst)); #[inline] - pub fn fetch_xor(&self, val: int, order: Ordering) -> int { + pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize { unsafe { atomic_xor(self.v.get(), val, order) } } } -#[unstable = "awaiting int/uint conventions, types may change"] -impl AtomicUint { - /// Creates a new `AtomicUint`. +#[stable] +impl AtomicUsize { + /// Creates a new `AtomicUsize`. /// /// # Examples /// /// ``` - /// use std::sync::atomic::AtomicUint; + /// use std::sync::atomic::AtomicUsize; /// - /// let atomic_forty_two = AtomicUint::new(42u); + /// let atomic_forty_two = AtomicUsize::new(42u); /// ``` #[inline] - pub fn new(v: uint) -> AtomicUint { - AtomicUint { v: UnsafeCell::new(v) } + pub fn new(v: usize) -> AtomicUsize { + AtomicUsize { v: UnsafeCell::new(v) } } - /// Loads a value from the uint. + /// Loads a value from the usize. /// /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. /// @@ -607,58 +607,58 @@ impl AtomicUint { /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let some_uint = AtomicUint::new(5); + /// let some_usize = AtomicUsize::new(5); /// - /// let value = some_uint.load(Ordering::Relaxed); + /// let value = some_usize.load(Ordering::Relaxed); /// ``` #[inline] - pub fn load(&self, order: Ordering) -> uint { - unsafe { atomic_load(self.v.get() as *const uint, order) } + pub fn load(&self, order: Ordering) -> usize { + unsafe { atomic_load(self.v.get() as *const usize, order) } } - /// Stores a value into the uint. + /// Stores a value into the usize. /// /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let some_uint = AtomicUint::new(5); + /// let some_usize = AtomicUsize::new(5); /// - /// some_uint.store(10, Ordering::Relaxed); + /// some_usize.store(10, Ordering::Relaxed); /// ``` /// /// # Panics /// /// Panics if `order` is `Acquire` or `AcqRel`. #[inline] - pub fn store(&self, val: uint, order: Ordering) { + pub fn store(&self, val: usize, order: Ordering) { unsafe { atomic_store(self.v.get(), val, order); } } - /// Stores a value into the uint, returning the old value. + /// Stores a value into the usize, returning the old value. /// /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let some_uint = AtomicUint::new(5); + /// let some_usize= AtomicUsize::new(5); /// - /// let value = some_uint.swap(10, Ordering::Relaxed); + /// let value = some_usize.swap(10, Ordering::Relaxed); /// ``` #[inline] - pub fn swap(&self, val: uint, order: Ordering) -> uint { + pub fn swap(&self, val: usize, order: Ordering) -> usize { unsafe { atomic_swap(self.v.get(), val, order) } } - /// Stores a value into the uint if the current value is the same as the expected value. + /// Stores a value into the usize if the current value is the same as the expected value. /// /// If the return value is equal to `old` then the value was updated. /// @@ -668,91 +668,91 @@ impl AtomicUint { /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let some_uint = AtomicUint::new(5); + /// let some_usize = AtomicUsize::new(5); /// - /// let value = some_uint.compare_and_swap(5, 10, Ordering::Relaxed); + /// let value = some_usize.compare_and_swap(5, 10, Ordering::Relaxed); /// ``` #[inline] - pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint { + pub fn compare_and_swap(&self, old: usize, new: usize, order: Ordering) -> usize { unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } } - /// Add to the current uint, returning the previous value. + /// Add to the current usize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let foo = AtomicUint::new(0); + /// let foo = AtomicUsize::new(0); /// assert_eq!(0, foo.fetch_add(10, Ordering::SeqCst)); /// assert_eq!(10, foo.load(Ordering::SeqCst)); /// ``` #[inline] - pub fn fetch_add(&self, val: uint, order: Ordering) -> uint { + pub fn fetch_add(&self, val: usize, order: Ordering) -> usize { unsafe { atomic_add(self.v.get(), val, order) } } - /// Subtract from the current uint, returning the previous value. + /// Subtract from the current usize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let foo = AtomicUint::new(10); + /// let foo = AtomicUsize::new(10); /// assert_eq!(10, foo.fetch_sub(10, Ordering::SeqCst)); /// assert_eq!(0, foo.load(Ordering::SeqCst)); /// ``` #[inline] - pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint { + pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize { unsafe { atomic_sub(self.v.get(), val, order) } } - /// Bitwise and with the current uint, returning the previous value. + /// Bitwise and with the current usize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let foo = AtomicUint::new(0b101101); + /// let foo = AtomicUsize::new(0b101101); /// assert_eq!(0b101101, foo.fetch_and(0b110011, Ordering::SeqCst)); /// assert_eq!(0b100001, foo.load(Ordering::SeqCst)); #[inline] - pub fn fetch_and(&self, val: uint, order: Ordering) -> uint { + pub fn fetch_and(&self, val: usize, order: Ordering) -> usize { unsafe { atomic_and(self.v.get(), val, order) } } - /// Bitwise or with the current uint, returning the previous value. + /// Bitwise or with the current usize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let foo = AtomicUint::new(0b101101); + /// let foo = AtomicUsize::new(0b101101); /// assert_eq!(0b101101, foo.fetch_or(0b110011, Ordering::SeqCst)); /// assert_eq!(0b111111, foo.load(Ordering::SeqCst)); #[inline] - pub fn fetch_or(&self, val: uint, order: Ordering) -> uint { + pub fn fetch_or(&self, val: usize, order: Ordering) -> usize { unsafe { atomic_or(self.v.get(), val, order) } } - /// Bitwise xor with the current uint, returning the previous value. + /// Bitwise xor with the current usize, returning the previous value. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUint, Ordering}; + /// use std::sync::atomic::{AtomicUsize, Ordering}; /// - /// let foo = AtomicUint::new(0b101101); + /// let foo = AtomicUsize::new(0b101101); /// assert_eq!(0b101101, foo.fetch_xor(0b110011, Ordering::SeqCst)); /// assert_eq!(0b011110, foo.load(Ordering::SeqCst)); #[inline] - pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint { + pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize { unsafe { atomic_xor(self.v.get(), val, order) } } } @@ -771,7 +771,7 @@ impl AtomicPtr { #[inline] #[stable] pub fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p: UnsafeCell::new(p as uint) } + AtomicPtr { p: UnsafeCell::new(p as usize) } } /// Loads a value from the pointer. @@ -823,7 +823,7 @@ impl AtomicPtr { #[inline] #[stable] pub fn store(&self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(self.p.get(), ptr as uint, order); } + unsafe { atomic_store(self.p.get(), ptr as usize, order); } } /// Stores a value into the pointer, returning the old value. @@ -845,7 +845,7 @@ impl AtomicPtr { #[inline] #[stable] pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(self.p.get(), ptr as uint, order) as *mut T } + unsafe { atomic_swap(self.p.get(), ptr as usize, order) as *mut T } } /// Stores a value into the pointer if the current value is the same as the expected value. @@ -872,8 +872,8 @@ impl AtomicPtr { #[stable] pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { unsafe { - atomic_compare_and_swap(self.p.get(), old as uint, - new as uint, order) as *mut T + atomic_compare_and_swap(self.p.get(), old as usize, + new as usize, order) as *mut T } } } @@ -1035,3 +1035,134 @@ pub fn fence(order: Ordering) { } } } + +#[deprecated="renamed to AtomicIsize"] +#[allow(missing_docs)] +pub struct AtomicInt { + v: UnsafeCell, +} + +unsafe impl Sync for AtomicInt {} + +#[deprecated="renamed to AtomicUsize"] +#[allow(missing_docs)] +pub struct AtomicUint { + v: UnsafeCell, +} + +unsafe impl Sync for AtomicUint {} + +#[deprecated="use ATOMIC_ISIZE_INIT instead"] +#[allow(missing_docs, deprecated)] +pub const ATOMIC_INT_INIT: AtomicInt = + AtomicInt { v: UnsafeCell { value: 0 } }; +#[deprecated="use ATOMIC_USIZE_INIT instead"] +#[allow(missing_docs, deprecated)] +pub const ATOMIC_UINT_INIT: AtomicUint = + AtomicUint { v: UnsafeCell { value: 0, } }; + +#[allow(missing_docs, deprecated)] +impl AtomicInt { + #[inline] + pub fn new(v: int) -> AtomicInt { + AtomicInt {v: UnsafeCell::new(v)} + } + + #[inline] + pub fn load(&self, order: Ordering) -> int { + unsafe { atomic_load(self.v.get() as *const int, order) } + } + + #[inline] + pub fn store(&self, val: int, order: Ordering) { + unsafe { atomic_store(self.v.get(), val, order); } + } + + #[inline] + pub fn swap(&self, val: int, order: Ordering) -> int { + unsafe { atomic_swap(self.v.get(), val, order) } + } + + #[inline] + pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int { + unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } + } + + #[inline] + pub fn fetch_add(&self, val: int, order: Ordering) -> int { + unsafe { atomic_add(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_sub(&self, val: int, order: Ordering) -> int { + unsafe { atomic_sub(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_and(&self, val: int, order: Ordering) -> int { + unsafe { atomic_and(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_or(&self, val: int, order: Ordering) -> int { + unsafe { atomic_or(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_xor(&self, val: int, order: Ordering) -> int { + unsafe { atomic_xor(self.v.get(), val, order) } + } +} + +#[allow(missing_docs, deprecated)] +impl AtomicUint { + #[inline] + pub fn new(v: uint) -> AtomicUint { + AtomicUint { v: UnsafeCell::new(v) } + } + + #[inline] + pub fn load(&self, order: Ordering) -> uint { + unsafe { atomic_load(self.v.get() as *const uint, order) } + } + + #[inline] + pub fn store(&self, val: uint, order: Ordering) { + unsafe { atomic_store(self.v.get(), val, order); } + } + + #[inline] + pub fn swap(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_swap(self.v.get(), val, order) } + } + + #[inline] + pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint { + unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } + } + + #[inline] + pub fn fetch_add(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_add(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_sub(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_and(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_and(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_or(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_or(self.v.get(), val, order) } + } + + #[inline] + pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_xor(self.v.get(), val, order) } + } +} diff --git a/src/libcoretest/atomic.rs b/src/libcoretest/atomic.rs index f8e943ec9f6..8e3c7f4595a 100644 --- a/src/libcoretest/atomic.rs +++ b/src/libcoretest/atomic.rs @@ -30,49 +30,49 @@ fn bool_and() { #[test] fn uint_and() { - let x = AtomicUint::new(0xf731); + let x = AtomicUsize::new(0xf731); assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); } #[test] fn uint_or() { - let x = AtomicUint::new(0xf731); + let x = AtomicUsize::new(0xf731); assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); } #[test] fn uint_xor() { - let x = AtomicUint::new(0xf731); + let x = AtomicUsize::new(0xf731); assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); } #[test] fn int_and() { - let x = AtomicInt::new(0xf731); + let x = AtomicIsize::new(0xf731); assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); } #[test] fn int_or() { - let x = AtomicInt::new(0xf731); + let x = AtomicIsize::new(0xf731); assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); } #[test] fn int_xor() { - let x = AtomicInt::new(0xf731); + let x = AtomicIsize::new(0xf731); assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); } static S_BOOL : AtomicBool = ATOMIC_BOOL_INIT; -static S_INT : AtomicInt = ATOMIC_INT_INIT; -static S_UINT : AtomicUint = ATOMIC_UINT_INIT; +static S_INT : AtomicIsize = ATOMIC_ISIZE_INIT; +static S_UINT : AtomicUsize = ATOMIC_USIZE_INIT; #[test] fn static_init() { diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs index 67c14dc2dc1..6de466eb20b 100644 --- a/src/libstd/io/test.rs +++ b/src/libstd/io/test.rs @@ -15,18 +15,18 @@ use prelude::v1::*; use libc; use os; use std::io::net::ip::*; -use sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Ordering}; +use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; /// Get a port number, starting at 9600, for use in tests pub fn next_test_port() -> u16 { - static NEXT_OFFSET: AtomicUint = ATOMIC_UINT_INIT; + static NEXT_OFFSET: AtomicUsize = ATOMIC_USIZE_INIT; base_port() + NEXT_OFFSET.fetch_add(1, Ordering::Relaxed) as u16 } // iOS has a pretty long tmpdir path which causes pipe creation // to like: invalid argument: path must be smaller than SUN_LEN fn next_test_unix_socket() -> String { - static COUNT: AtomicUint = ATOMIC_UINT_INIT; + static COUNT: AtomicUsize = ATOMIC_USIZE_INIT; // base port and pid are an attempt to be unique between multiple // test-runners of different configurations running on one // buildbot, the count is to be unique within this executable. diff --git a/src/libstd/os.rs b/src/libstd/os.rs index 6e3949b9e22..588f729d800 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -54,7 +54,7 @@ use result::Result::{Err, Ok}; use slice::{AsSlice, SliceExt}; use str::{Str, StrExt}; use string::{String, ToString}; -use sync::atomic::{AtomicInt, ATOMIC_INT_INIT, Ordering}; +use sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering}; use vec::Vec; #[cfg(unix)] use ffi::{self, CString}; @@ -590,7 +590,7 @@ pub fn last_os_error() -> String { error_string(errno() as uint) } -static EXIT_STATUS: AtomicInt = ATOMIC_INT_INIT; +static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT; /// Sets the process exit code /// diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs index bb0b6fe804b..f2d66e1a4d7 100644 --- a/src/libstd/rt/backtrace.rs +++ b/src/libstd/rt/backtrace.rs @@ -22,7 +22,7 @@ pub use sys::backtrace::write; // For now logging is turned off by default, and this function checks to see // whether the magical environment variable is present to see if it's turned on. pub fn log_enabled() -> bool { - static ENABLED: atomic::AtomicInt = atomic::ATOMIC_INT_INIT; + static ENABLED: atomic::AtomicIsize = atomic::ATOMIC_ISIZE_INIT; match ENABLED.load(Ordering::SeqCst) { 1 => return false, 2 => return true, diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs index 4cd0b29688a..6326e4c08f1 100644 --- a/src/libstd/rt/unwind.rs +++ b/src/libstd/rt/unwind.rs @@ -83,16 +83,16 @@ pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint); // // For more information, see below. const MAX_CALLBACKS: uint = 16; -static CALLBACKS: [atomic::AtomicUint; MAX_CALLBACKS] = - [atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT, - atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT, - atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT, - atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT, - atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT, - atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT, - atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT, - atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT]; -static CALLBACK_CNT: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT; +static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] = + [atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, + atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, + atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, + atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, + atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, + atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, + atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, + atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT]; +static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; thread_local! { static PANICKING: Cell = Cell::new(false) } diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index c076f0a7c6c..be293e8d4cb 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -46,7 +46,7 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool { } pub fn min_stack() -> uint { - static MIN: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT; + static MIN: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; match MIN.load(Ordering::SeqCst) { 0 => {} n => return n - 1, diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs index 3c0ae71255d..bcd5f56a353 100644 --- a/src/libstd/sync/condvar.rs +++ b/src/libstd/sync/condvar.rs @@ -10,7 +10,7 @@ use prelude::v1::*; -use sync::atomic::{AtomicUint, Ordering, ATOMIC_UINT_INIT}; +use sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; use sync::poison::{self, LockResult}; use sys_common::condvar as sys; use sys_common::mutex as sys_mutex; @@ -78,7 +78,7 @@ unsafe impl Sync for Condvar {} #[unstable = "may be merged with Condvar in the future"] pub struct StaticCondvar { inner: sys::Condvar, - mutex: AtomicUint, + mutex: AtomicUsize, } unsafe impl Send for StaticCondvar {} @@ -88,7 +88,7 @@ unsafe impl Sync for StaticCondvar {} #[unstable = "may be merged with Condvar in the future"] pub const CONDVAR_INIT: StaticCondvar = StaticCondvar { inner: sys::CONDVAR_INIT, - mutex: ATOMIC_UINT_INIT, + mutex: ATOMIC_USIZE_INIT, }; impl Condvar { @@ -99,7 +99,7 @@ impl Condvar { Condvar { inner: box StaticCondvar { inner: unsafe { sys::Condvar::new() }, - mutex: AtomicUint::new(0), + mutex: AtomicUsize::new(0), } } } diff --git a/src/libstd/sync/mpsc/oneshot.rs b/src/libstd/sync/mpsc/oneshot.rs index 5c2331d0f2e..ca667e65e30 100644 --- a/src/libstd/sync/mpsc/oneshot.rs +++ b/src/libstd/sync/mpsc/oneshot.rs @@ -42,7 +42,7 @@ use core::prelude::*; use sync::mpsc::Receiver; use sync::mpsc::blocking::{self, SignalToken}; use core::mem; -use sync::atomic::{AtomicUint, Ordering}; +use sync::atomic::{AtomicUsize, Ordering}; // Various states you can find a port in. const EMPTY: uint = 0; // initial state: no data, no blocked reciever @@ -56,7 +56,7 @@ const DISCONNECTED: uint = 2; // channel is disconnected OR upgraded pub struct Packet { // Internal state of the chan/port pair (stores the blocked task as well) - state: AtomicUint, + state: AtomicUsize, // One-shot data slot location data: Option, // when used for the second time, a oneshot channel must be upgraded, and @@ -93,7 +93,7 @@ impl Packet { Packet { data: None, upgrade: NothingSent, - state: AtomicUint::new(EMPTY), + state: AtomicUsize::new(EMPTY), } } diff --git a/src/libstd/sync/mpsc/shared.rs b/src/libstd/sync/mpsc/shared.rs index 4295d116aed..c97af4c6bca 100644 --- a/src/libstd/sync/mpsc/shared.rs +++ b/src/libstd/sync/mpsc/shared.rs @@ -25,7 +25,7 @@ use core::prelude::*; use core::cmp; use core::int; -use sync::atomic::{AtomicUint, AtomicInt, AtomicBool, Ordering}; +use sync::atomic::{AtomicUsize, AtomicIsize, AtomicBool, Ordering}; use sync::mpsc::blocking::{self, SignalToken}; use sync::mpsc::mpsc_queue as mpsc; use sync::mpsc::select::StartResult::*; @@ -42,17 +42,17 @@ const MAX_STEALS: int = 1 << 20; pub struct Packet { queue: mpsc::Queue, - cnt: AtomicInt, // How many items are on this channel + cnt: AtomicIsize, // How many items are on this channel steals: int, // How many times has a port received without blocking? - to_wake: AtomicUint, // SignalToken for wake up + to_wake: AtomicUsize, // SignalToken for wake up // The number of channels which are currently using this packet. - channels: AtomicInt, + channels: AtomicIsize, // See the discussion in Port::drop and the channel send methods for what // these are used for port_dropped: AtomicBool, - sender_drain: AtomicInt, + sender_drain: AtomicIsize, // this lock protects various portions of this implementation during // select() @@ -70,12 +70,12 @@ impl Packet { pub fn new() -> Packet { let p = Packet { queue: mpsc::Queue::new(), - cnt: AtomicInt::new(0), + cnt: AtomicIsize::new(0), steals: 0, - to_wake: AtomicUint::new(0), - channels: AtomicInt::new(2), + to_wake: AtomicUsize::new(0), + channels: AtomicIsize::new(2), port_dropped: AtomicBool::new(false), - sender_drain: AtomicInt::new(0), + sender_drain: AtomicIsize::new(0), select_lock: Mutex::new(()), }; return p; diff --git a/src/libstd/sync/mpsc/spsc_queue.rs b/src/libstd/sync/mpsc/spsc_queue.rs index 46c69f6f547..34fd6bb70dc 100644 --- a/src/libstd/sync/mpsc/spsc_queue.rs +++ b/src/libstd/sync/mpsc/spsc_queue.rs @@ -41,7 +41,7 @@ use alloc::boxed::Box; use core::mem; use core::cell::UnsafeCell; -use sync::atomic::{AtomicPtr, AtomicUint, Ordering}; +use sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; // Node within the linked list queue of messages to send struct Node { @@ -69,8 +69,8 @@ pub struct Queue { // Cache maintenance fields. Additions and subtractions are stored // separately in order to allow them to use nonatomic addition/subtraction. cache_bound: uint, - cache_additions: AtomicUint, - cache_subtractions: AtomicUint, + cache_additions: AtomicUsize, + cache_subtractions: AtomicUsize, } unsafe impl Send for Queue { } @@ -117,8 +117,8 @@ impl Queue { first: UnsafeCell::new(n1), tail_copy: UnsafeCell::new(n1), cache_bound: bound, - cache_additions: AtomicUint::new(0), - cache_subtractions: AtomicUint::new(0), + cache_additions: AtomicUsize::new(0), + cache_subtractions: AtomicUsize::new(0), } } diff --git a/src/libstd/sync/mpsc/stream.rs b/src/libstd/sync/mpsc/stream.rs index f4b20c7b742..a03add8c532 100644 --- a/src/libstd/sync/mpsc/stream.rs +++ b/src/libstd/sync/mpsc/stream.rs @@ -28,7 +28,7 @@ use core::cmp; use core::int; use thread::Thread; -use sync::atomic::{AtomicInt, AtomicUint, Ordering, AtomicBool}; +use sync::atomic::{AtomicIsize, AtomicUsize, Ordering, AtomicBool}; use sync::mpsc::Receiver; use sync::mpsc::blocking::{self, SignalToken}; use sync::mpsc::spsc_queue as spsc; @@ -42,9 +42,9 @@ const MAX_STEALS: int = 1 << 20; pub struct Packet { queue: spsc::Queue>, // internal queue for all message - cnt: AtomicInt, // How many items are on this channel + cnt: AtomicIsize, // How many items are on this channel steals: int, // How many times has a port received without blocking? - to_wake: AtomicUint, // SignalToken for the blocked thread to wake up + to_wake: AtomicUsize, // SignalToken for the blocked thread to wake up port_dropped: AtomicBool, // flag if the channel has been destroyed. } @@ -79,9 +79,9 @@ impl Packet { Packet { queue: unsafe { spsc::Queue::new(128) }, - cnt: AtomicInt::new(0), + cnt: AtomicIsize::new(0), steals: 0, - to_wake: AtomicUint::new(0), + to_wake: AtomicUsize::new(0), port_dropped: AtomicBool::new(false), } diff --git a/src/libstd/sync/mpsc/sync.rs b/src/libstd/sync/mpsc/sync.rs index b2cc807eb11..30304dffb75 100644 --- a/src/libstd/sync/mpsc/sync.rs +++ b/src/libstd/sync/mpsc/sync.rs @@ -41,7 +41,7 @@ use self::Blocker::*; use vec::Vec; use core::mem; -use sync::atomic::{Ordering, AtomicUint}; +use sync::atomic::{Ordering, AtomicUsize}; use sync::mpsc::blocking::{self, WaitToken, SignalToken}; use sync::mpsc::select::StartResult::{self, Installed, Abort}; use sync::{Mutex, MutexGuard}; @@ -49,7 +49,7 @@ use sync::{Mutex, MutexGuard}; pub struct Packet { /// Only field outside of the mutex. Just done for kicks, but mainly because /// the other shared channel already had the code implemented - channels: AtomicUint, + channels: AtomicUsize, lock: Mutex>, } @@ -138,7 +138,7 @@ fn wakeup(token: SignalToken, guard: MutexGuard>) { impl Packet { pub fn new(cap: uint) -> Packet { Packet { - channels: AtomicUint::new(1), + channels: AtomicUsize::new(1), lock: Mutex::new(State { disconnected: false, blocker: NoneBlocked, diff --git a/src/libstd/sync/once.rs b/src/libstd/sync/once.rs index 3bf2ae277e0..6231a91833d 100644 --- a/src/libstd/sync/once.rs +++ b/src/libstd/sync/once.rs @@ -17,7 +17,7 @@ use int; use marker::Sync; use mem::drop; use ops::FnOnce; -use sync::atomic::{AtomicInt, Ordering, ATOMIC_INT_INIT}; +use sync::atomic::{AtomicIsize, Ordering, ATOMIC_ISIZE_INIT}; use sync::{StaticMutex, MUTEX_INIT}; /// A synchronization primitive which can be used to run a one-time global @@ -39,8 +39,8 @@ use sync::{StaticMutex, MUTEX_INIT}; #[stable] pub struct Once { mutex: StaticMutex, - cnt: AtomicInt, - lock_cnt: AtomicInt, + cnt: AtomicIsize, + lock_cnt: AtomicIsize, } unsafe impl Sync for Once {} @@ -49,8 +49,8 @@ unsafe impl Sync for Once {} #[stable] pub const ONCE_INIT: Once = Once { mutex: MUTEX_INIT, - cnt: ATOMIC_INT_INIT, - lock_cnt: ATOMIC_INT_INIT, + cnt: ATOMIC_ISIZE_INIT, + lock_cnt: ATOMIC_ISIZE_INIT, }; impl Once { diff --git a/src/libstd/sys/common/thread_local.rs b/src/libstd/sys/common/thread_local.rs index e9af796c674..edd16e0c062 100644 --- a/src/libstd/sys/common/thread_local.rs +++ b/src/libstd/sys/common/thread_local.rs @@ -58,7 +58,7 @@ use prelude::v1::*; -use sync::atomic::{self, AtomicUint, Ordering}; +use sync::atomic::{self, AtomicUsize, Ordering}; use sync::{Mutex, Once, ONCE_INIT}; use sys::thread_local as imp; @@ -97,7 +97,7 @@ pub struct StaticKey { /// Inner contents of `StaticKey`, created by the `INIT_INNER` constant. pub struct StaticKeyInner { - key: AtomicUint, + key: AtomicUsize, } /// A type for a safely managed OS-based TLS slot. @@ -137,7 +137,7 @@ pub const INIT: StaticKey = StaticKey { /// /// This value allows specific configuration of the destructor for a TLS key. pub const INIT_INNER: StaticKeyInner = StaticKeyInner { - key: atomic::ATOMIC_UINT_INIT, + key: atomic::ATOMIC_USIZE_INIT, }; static INIT_KEYS: Once = ONCE_INIT; diff --git a/src/libstd/sys/unix/timer.rs b/src/libstd/sys/unix/timer.rs index 62f3242a206..c0c231a9e73 100644 --- a/src/libstd/sys/unix/timer.rs +++ b/src/libstd/sys/unix/timer.rs @@ -211,7 +211,7 @@ impl Timer { // instead of () HELPER.boot(|| {}, helper); - static ID: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT; + static ID: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; let id = ID.fetch_add(1, Ordering::Relaxed); Ok(Timer { id: id, diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs index 1def99a3741..fcdd4ff7c54 100644 --- a/src/libstd/sys/windows/mutex.rs +++ b/src/libstd/sys/windows/mutex.rs @@ -10,7 +10,7 @@ use prelude::v1::*; -use sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Ordering}; +use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use alloc::{self, heap}; use libc::DWORD; @@ -18,9 +18,9 @@ use sys::sync as ffi; const SPIN_COUNT: DWORD = 4000; -pub struct Mutex { inner: AtomicUint } +pub struct Mutex { inner: AtomicUsize } -pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_UINT_INIT }; +pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_USIZE_INIT }; unsafe impl Sync for Mutex {} @@ -32,7 +32,7 @@ pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION { impl Mutex { #[inline] pub unsafe fn new() -> Mutex { - Mutex { inner: AtomicUint::new(init_lock() as uint) } + Mutex { inner: AtomicUsize::new(init_lock() as uint) } } #[inline] pub unsafe fn lock(&self) { diff --git a/src/test/auxiliary/issue-17718.rs b/src/test/auxiliary/issue-17718.rs index 689610d799e..cbe56b00c13 100644 --- a/src/test/auxiliary/issue-17718.rs +++ b/src/test/auxiliary/issue-17718.rs @@ -11,12 +11,12 @@ use std::sync::atomic; pub const C1: uint = 1; -pub const C2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT; +pub const C2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; pub const C3: fn() = foo; pub const C4: uint = C1 * C1 + C1 / C1; pub const C5: &'static uint = &C4; pub static S1: uint = 3; -pub static S2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT; +pub static S2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; fn foo() {} diff --git a/src/test/compile-fail/std-uncopyable-atomics.rs b/src/test/compile-fail/std-uncopyable-atomics.rs index f27fa6470a6..9807fc43140 100644 --- a/src/test/compile-fail/std-uncopyable-atomics.rs +++ b/src/test/compile-fail/std-uncopyable-atomics.rs @@ -17,9 +17,9 @@ use std::ptr; fn main() { let x = ATOMIC_BOOL_INIT; let x = *&x; //~ ERROR: cannot move out of borrowed content - let x = ATOMIC_INT_INIT; + let x = ATOMIC_ISIZE_INIT; let x = *&x; //~ ERROR: cannot move out of borrowed content - let x = ATOMIC_UINT_INIT; + let x = ATOMIC_USIZE_INIT; let x = *&x; //~ ERROR: cannot move out of borrowed content let x: AtomicPtr = AtomicPtr::new(ptr::null_mut()); let x = *&x; //~ ERROR: cannot move out of borrowed content diff --git a/src/test/run-pass/issue-17718.rs b/src/test/run-pass/issue-17718.rs index 44cf0dd8b8e..e4782e28928 100644 --- a/src/test/run-pass/issue-17718.rs +++ b/src/test/run-pass/issue-17718.rs @@ -12,10 +12,10 @@ extern crate "issue-17718" as other; -use std::sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; const C1: uint = 1; -const C2: AtomicUint = ATOMIC_UINT_INIT; +const C2: AtomicUsize = ATOMIC_USIZE_INIT; const C3: fn() = foo; const C4: uint = C1 * C1 + C1 / C1; const C5: &'static uint = &C4; @@ -25,7 +25,7 @@ const C6: uint = { }; static S1: uint = 3; -static S2: AtomicUint = ATOMIC_UINT_INIT; +static S2: AtomicUsize = ATOMIC_USIZE_INIT; mod test { static A: uint = 4; diff --git a/src/test/run-pass/tcp-accept-stress.rs b/src/test/run-pass/tcp-accept-stress.rs index cad71732034..c7149fa503b 100644 --- a/src/test/run-pass/tcp-accept-stress.rs +++ b/src/test/run-pass/tcp-accept-stress.rs @@ -15,7 +15,7 @@ use std::io::{TcpListener, Listener, Acceptor, EndOfFile, TcpStream}; use std::sync::Arc; -use std::sync::atomic::{AtomicUint, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::thread::Thread; @@ -30,7 +30,7 @@ fn test() { let mut l = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = l.socket_name().unwrap(); let mut a = l.listen().unwrap(); - let cnt = Arc::new(AtomicUint::new(0)); + let cnt = Arc::new(AtomicUsize::new(0)); let (srv_tx, srv_rx) = channel(); let (cli_tx, cli_rx) = channel(); diff --git a/src/test/run-pass/vector-sort-panic-safe.rs b/src/test/run-pass/vector-sort-panic-safe.rs index 29bf82a81d6..9e74c6da548 100644 --- a/src/test/run-pass/vector-sort-panic-safe.rs +++ b/src/test/run-pass/vector-sort-panic-safe.rs @@ -8,27 +8,29 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use std::rand::{thread_rng, Rng, Rand}; use std::thread::Thread; const REPEATS: uint = 5; const MAX_LEN: uint = 32; -static drop_counts: [AtomicUint; MAX_LEN] = - // FIXME #5244: AtomicUint is not Copy. +static drop_counts: [AtomicUsize; MAX_LEN] = + // FIXME #5244: AtomicUsize is not Copy. [ - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, - - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, - ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ]; -static creation_count: AtomicUint = ATOMIC_UINT_INIT; +static creation_count: AtomicUsize = ATOMIC_USIZE_INIT; #[derive(Clone, PartialEq, PartialOrd, Eq, Ord)] struct DropCounter { x: uint, creation_id: uint }