From 71448d7c37b84a0d4713441bc4c9ef6d851df62b Mon Sep 17 00:00:00 2001 From: Huon Wilson Date: Tue, 27 Aug 2013 20:00:57 +1000 Subject: [PATCH] Rename UnsafeAtomicRcBox to UnsafeArc. Fixes #7674. --- src/libextra/arc.rs | 14 +++--- src/libextra/sync.rs | 6 +-- src/libstd/rt/comm.rs | 10 ++-- src/libstd/rt/kill.rs | 14 +++--- src/libstd/rt/message_queue.rs | 6 +-- src/libstd/rt/mod.rs | 4 +- src/libstd/rt/sleeper_list.rs | 6 +-- src/libstd/unstable/sync.rs | 84 +++++++++++++++++----------------- 8 files changed, 72 insertions(+), 72 deletions(-) diff --git a/src/libextra/arc.rs b/src/libextra/arc.rs index 1df69945a60..792fb7f9ca7 100644 --- a/src/libextra/arc.rs +++ b/src/libextra/arc.rs @@ -44,7 +44,7 @@ use sync; use sync::{Mutex, RWLock}; use std::cast; -use std::unstable::sync::UnsafeAtomicRcBox; +use std::unstable::sync::UnsafeArc; use std::task; use std::borrow; @@ -108,7 +108,7 @@ impl<'self> Condvar<'self> { ****************************************************************************/ /// An atomically reference counted wrapper for shared immutable state. -pub struct Arc { priv x: UnsafeAtomicRcBox } +pub struct Arc { priv x: UnsafeArc } /** @@ -118,7 +118,7 @@ pub struct Arc { priv x: UnsafeAtomicRcBox } impl Arc { /// Create an atomically reference counted wrapper. pub fn new(data: T) -> Arc { - Arc { x: UnsafeAtomicRcBox::new(data) } + Arc { x: UnsafeArc::new(data) } } pub fn get<'a>(&'a self) -> &'a T { @@ -160,7 +160,7 @@ impl Clone for Arc { #[doc(hidden)] struct MutexArcInner { priv lock: Mutex, priv failed: bool, priv data: T } /// An Arc with mutable data protected by a blocking mutex. -struct MutexArc { priv x: UnsafeAtomicRcBox> } +struct MutexArc { priv x: UnsafeArc> } impl Clone for MutexArc { @@ -187,7 +187,7 @@ impl MutexArc { lock: Mutex::new_with_condvars(num_condvars), failed: false, data: user_data }; - MutexArc { x: UnsafeAtomicRcBox::new(data) } + MutexArc { x: UnsafeArc::new(data) } } /** @@ -309,7 +309,7 @@ struct RWArcInner { priv lock: RWLock, priv failed: bool, priv data: T } */ #[no_freeze] struct RWArc { - priv x: UnsafeAtomicRcBox>, + priv x: UnsafeArc>, } impl Clone for RWArc { @@ -335,7 +335,7 @@ impl RWArc { lock: RWLock::new_with_condvars(num_condvars), failed: false, data: user_data }; - RWArc { x: UnsafeAtomicRcBox::new(data), } + RWArc { x: UnsafeArc::new(data), } } /** diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs index afb4cf3943a..66406351305 100644 --- a/src/libextra/sync.rs +++ b/src/libextra/sync.rs @@ -21,7 +21,7 @@ use std::comm; use std::comm::SendDeferred; use std::comm::{GenericPort, Peekable}; use std::task; -use std::unstable::sync::{Exclusive, UnsafeAtomicRcBox}; +use std::unstable::sync::{Exclusive, UnsafeArc}; use std::unstable::atomics; use std::unstable::finally::Finally; use std::util; @@ -448,7 +448,7 @@ struct RWLockInner { pub struct RWLock { priv order_lock: Semaphore, priv access_lock: Sem<~[WaitQueue]>, - priv state: UnsafeAtomicRcBox, + priv state: UnsafeArc, } impl RWLock { @@ -460,7 +460,7 @@ impl RWLock { * Similar to mutex_with_condvars. */ pub fn new_with_condvars(num_condvars: uint) -> RWLock { - let state = UnsafeAtomicRcBox::new(RWLockInner { + let state = UnsafeArc::new(RWLockInner { read_mode: false, read_count: atomics::AtomicUint::new(0), }); diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index bd83e286156..b547d3c9c30 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -21,7 +21,7 @@ use rt::local::Local; use rt::select::{SelectInner, SelectPortInner}; use select::{Select, SelectPort}; use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Relaxed, SeqCst}; -use unstable::sync::UnsafeAtomicRcBox; +use unstable::sync::UnsafeArc; use util::Void; use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable}; use cell::Cell; @@ -567,14 +567,14 @@ impl<'self, T> SelectPort for &'self Port { } pub struct SharedChan { // Just like Chan, but a shared AtomicOption instead of Cell - priv next: UnsafeAtomicRcBox>> + priv next: UnsafeArc>> } impl SharedChan { pub fn new(chan: Chan) -> SharedChan { let next = chan.next.take(); let next = AtomicOption::new(~next); - SharedChan { next: UnsafeAtomicRcBox::new(next) } + SharedChan { next: UnsafeArc::new(next) } } } @@ -620,7 +620,7 @@ impl Clone for SharedChan { pub struct SharedPort { // The next port on which we will receive the next port on which we will receive T - priv next_link: UnsafeAtomicRcBox>>> + priv next_link: UnsafeArc>>> } impl SharedPort { @@ -630,7 +630,7 @@ impl SharedPort { let (next_link_port, next_link_chan) = oneshot(); next_link_chan.send(next_data_port); let next_link = AtomicOption::new(~next_link_port); - SharedPort { next_link: UnsafeAtomicRcBox::new(next_link) } + SharedPort { next_link: UnsafeArc::new(next_link) } } } diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs index b0b425e3aee..e31a98a9a7a 100644 --- a/src/libstd/rt/kill.rs +++ b/src/libstd/rt/kill.rs @@ -159,7 +159,7 @@ use rt::task::Task; use task::spawn::Taskgroup; use to_bytes::IterBytes; use unstable::atomics::{AtomicUint, Relaxed}; -use unstable::sync::{UnsafeAtomicRcBox, LittleLock}; +use unstable::sync::{UnsafeArc, LittleLock}; use util; static KILLED_MSG: &'static str = "killed by linked failure"; @@ -170,7 +170,7 @@ static KILL_KILLED: uint = 1; static KILL_UNKILLABLE: uint = 2; struct KillFlag(AtomicUint); -type KillFlagHandle = UnsafeAtomicRcBox; +type KillFlagHandle = UnsafeArc; /// A handle to a blocked task. Usually this means having the ~Task pointer by /// ownership, but if the task is killable, a killer can steal it at any time. @@ -211,7 +211,7 @@ struct KillHandleInner { /// State shared between tasks used for task killing during linked failure. #[deriving(Clone)] -pub struct KillHandle(UnsafeAtomicRcBox); +pub struct KillHandle(UnsafeArc); /// Per-task state related to task death, killing, failure, etc. pub struct Death { @@ -317,7 +317,7 @@ impl BlockedTask { let handles = match self { Unkillable(task) => { let flag = unsafe { KillFlag(AtomicUint::new(cast::transmute(task))) }; - UnsafeAtomicRcBox::newN(flag, num_handles) + UnsafeArc::newN(flag, num_handles) } Killable(flag_arc) => flag_arc.cloneN(num_handles), }; @@ -380,8 +380,8 @@ impl Eq for KillHandle { impl KillHandle { pub fn new() -> (KillHandle, KillFlagHandle) { let (flag, flag_clone) = - UnsafeAtomicRcBox::new2(KillFlag(AtomicUint::new(KILL_RUNNING))); - let handle = KillHandle(UnsafeAtomicRcBox::new(KillHandleInner { + UnsafeArc::new2(KillFlag(AtomicUint::new(KILL_RUNNING))); + let handle = KillHandle(UnsafeArc::new(KillHandleInner { // Linked failure fields killed: flag, unkillable: AtomicUint::new(KILL_RUNNING), @@ -460,7 +460,7 @@ impl KillHandle { pub fn notify_immediate_failure(&mut self) { // A benign data race may happen here if there are failing sibling // tasks that were also spawned-watched. The refcount's write barriers - // in UnsafeAtomicRcBox ensure that this write will be seen by the + // in UnsafeArc ensure that this write will be seen by the // unwrapper/destructor, whichever task may unwrap it. unsafe { (*self.get()).any_child_failed = true; } } diff --git a/src/libstd/rt/message_queue.rs b/src/libstd/rt/message_queue.rs index 2bbcaff6d28..99b5156b319 100644 --- a/src/libstd/rt/message_queue.rs +++ b/src/libstd/rt/message_queue.rs @@ -16,11 +16,11 @@ use kinds::Send; use vec::OwnedVector; use cell::Cell; use option::*; -use unstable::sync::{UnsafeAtomicRcBox, LittleLock}; +use unstable::sync::{UnsafeArc, LittleLock}; use clone::Clone; pub struct MessageQueue { - priv state: UnsafeAtomicRcBox> + priv state: UnsafeArc> } struct State { @@ -32,7 +32,7 @@ struct State { impl MessageQueue { pub fn new() -> MessageQueue { MessageQueue { - state: UnsafeAtomicRcBox::new(State { + state: UnsafeArc::new(State { count: 0, queue: ~[], lock: LittleLock::new() diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 0d59d5780cc..7436efb5bf5 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -74,7 +74,7 @@ use rt::thread::Thread; use rt::work_queue::WorkQueue; use rt::uv::uvio::UvEventLoop; use unstable::atomics::{AtomicInt, SeqCst}; -use unstable::sync::UnsafeAtomicRcBox; +use unstable::sync::UnsafeArc; use vec::{OwnedVector, MutableVector}; /// The global (exchange) heap. @@ -311,7 +311,7 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int { // Create a shared cell for transmitting the process exit // code from the main task to this function. - let exit_code = UnsafeAtomicRcBox::new(AtomicInt::new(0)); + let exit_code = UnsafeArc::new(AtomicInt::new(0)); let exit_code_clone = exit_code.clone(); // When the main task exits, after all the tasks in the main diff --git a/src/libstd/rt/sleeper_list.rs b/src/libstd/rt/sleeper_list.rs index 7232afd6594..f4fdf15cda6 100644 --- a/src/libstd/rt/sleeper_list.rs +++ b/src/libstd/rt/sleeper_list.rs @@ -15,12 +15,12 @@ use container::Container; use vec::OwnedVector; use option::{Option, Some, None}; use cell::Cell; -use unstable::sync::{UnsafeAtomicRcBox, LittleLock}; +use unstable::sync::{UnsafeArc, LittleLock}; use rt::sched::SchedHandle; use clone::Clone; pub struct SleeperList { - priv state: UnsafeAtomicRcBox + priv state: UnsafeArc } struct State { @@ -32,7 +32,7 @@ struct State { impl SleeperList { pub fn new() -> SleeperList { SleeperList { - state: UnsafeAtomicRcBox::new(State { + state: UnsafeArc::new(State { count: 0, stack: ~[], lock: LittleLock::new() diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs index 6fa0e0eb8c1..67d7ee99616 100644 --- a/src/libstd/unstable/sync.rs +++ b/src/libstd/unstable/sync.rs @@ -26,11 +26,11 @@ use vec; /// An atomically reference counted pointer. /// /// Enforces no shared-memory safety. -pub struct UnsafeAtomicRcBox { +pub struct UnsafeArc { data: *mut libc::c_void, } -struct AtomicRcBoxData { +struct ArcData { count: AtomicUint, // An unwrapper uses this protocol to communicate with the "other" task that // drops the last refcount on an arc. Unfortunately this can't be a proper @@ -42,51 +42,51 @@ struct AtomicRcBoxData { } unsafe fn new_inner(data: T, refcount: uint) -> *mut libc::c_void { - let data = ~AtomicRcBoxData { count: AtomicUint::new(refcount), - unwrapper: AtomicOption::empty(), - data: Some(data) }; + let data = ~ArcData { count: AtomicUint::new(refcount), + unwrapper: AtomicOption::empty(), + data: Some(data) }; cast::transmute(data) } -impl UnsafeAtomicRcBox { - pub fn new(data: T) -> UnsafeAtomicRcBox { - unsafe { UnsafeAtomicRcBox { data: new_inner(data, 1) } } +impl UnsafeArc { + pub fn new(data: T) -> UnsafeArc { + unsafe { UnsafeArc { data: new_inner(data, 1) } } } /// As new(), but returns an extra pre-cloned handle. - pub fn new2(data: T) -> (UnsafeAtomicRcBox, UnsafeAtomicRcBox) { + pub fn new2(data: T) -> (UnsafeArc, UnsafeArc) { unsafe { let ptr = new_inner(data, 2); - (UnsafeAtomicRcBox { data: ptr }, UnsafeAtomicRcBox { data: ptr }) + (UnsafeArc { data: ptr }, UnsafeArc { data: ptr }) } } /// As new(), but returns a vector of as many pre-cloned handles as requested. - pub fn newN(data: T, num_handles: uint) -> ~[UnsafeAtomicRcBox] { + pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc] { unsafe { if num_handles == 0 { ~[] // need to free data here } else { let ptr = new_inner(data, num_handles); - vec::from_fn(num_handles, |_| UnsafeAtomicRcBox { data: ptr }) + vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) } } } /// As newN(), but from an already-existing handle. Uses one xadd. - pub fn cloneN(self, num_handles: uint) -> ~[UnsafeAtomicRcBox] { + pub fn cloneN(self, num_handles: uint) -> ~[UnsafeArc] { if num_handles == 0 { ~[] // The "num_handles - 1" trick (below) fails in the 0 case. } else { unsafe { - let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + let mut data: ~ArcData = cast::transmute(self.data); // Minus one because we are recycling the given handle's refcount. let old_count = data.count.fetch_add(num_handles - 1, Acquire); // let old_count = data.count.fetch_add(num_handles, Acquire); assert!(old_count >= 1); let ptr = cast::transmute(data); cast::forget(self); // Don't run the destructor on this handle. - vec::from_fn(num_handles, |_| UnsafeAtomicRcBox { data: ptr }) + vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) } } } @@ -94,7 +94,7 @@ impl UnsafeAtomicRcBox { #[inline] pub fn get(&self) -> *mut T { unsafe { - let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + let mut data: ~ArcData = cast::transmute(self.data); assert!(data.count.load(Relaxed) > 0); let r: *mut T = data.data.get_mut_ref(); cast::forget(data); @@ -105,7 +105,7 @@ impl UnsafeAtomicRcBox { #[inline] pub fn get_immut(&self) -> *T { unsafe { - let data: ~AtomicRcBoxData = cast::transmute(self.data); + let data: ~ArcData = cast::transmute(self.data); assert!(data.count.load(Relaxed) > 0); let r: *T = data.data.get_ref(); cast::forget(data); @@ -122,7 +122,7 @@ impl UnsafeAtomicRcBox { do task::unkillable { unsafe { let mut this = this.take(); - let mut data: ~AtomicRcBoxData = cast::transmute(this.data); + let mut data: ~ArcData = cast::transmute(this.data); // Set up the unwrap protocol. let (p1,c1) = comm::oneshot(); // () let (p2,c2) = comm::oneshot(); // bool @@ -139,7 +139,7 @@ impl UnsafeAtomicRcBox { // We were the last owner. Can unwrap immediately. // AtomicOption's destructor will free the server endpoint. // FIXME(#3224): it should be like this - // let ~AtomicRcBoxData { data: user_data, _ } = data; + // let ~ArcData { data: user_data, _ } = data; // user_data data.data.take_unwrap() } else { @@ -154,7 +154,7 @@ impl UnsafeAtomicRcBox { let (c2, data) = c2_and_data.take(); c2.send(true); // FIXME(#3224): it should be like this - // let ~AtomicRcBoxData { data: user_data, _ } = data; + // let ~ArcData { data: user_data, _ } = data; // user_data let mut data = data; data.data.take_unwrap() @@ -183,10 +183,10 @@ impl UnsafeAtomicRcBox { /// As unwrap above, but without blocking. Returns 'Left(self)' if this is /// not the last reference; 'Right(unwrapped_data)' if so. - pub fn try_unwrap(self) -> Either, T> { + pub fn try_unwrap(self) -> Either, T> { unsafe { let mut this = self; // FIXME(#4330) mutable self - let mut data: ~AtomicRcBoxData = cast::transmute(this.data); + let mut data: ~ArcData = cast::transmute(this.data); // This can of course race with anybody else who has a handle, but in // such a case, the returned count will always be at least 2. If we // see 1, no race was possible. All that matters is 1 or not-1. @@ -209,27 +209,27 @@ impl UnsafeAtomicRcBox { } } -impl Clone for UnsafeAtomicRcBox { - fn clone(&self) -> UnsafeAtomicRcBox { +impl Clone for UnsafeArc { + fn clone(&self) -> UnsafeArc { unsafe { - let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + let mut data: ~ArcData = cast::transmute(self.data); // This barrier might be unnecessary, but I'm not sure... let old_count = data.count.fetch_add(1, Acquire); assert!(old_count >= 1); cast::forget(data); - return UnsafeAtomicRcBox { data: self.data }; + return UnsafeArc { data: self.data }; } } } #[unsafe_destructor] -impl Drop for UnsafeAtomicRcBox{ +impl Drop for UnsafeArc{ fn drop(&self) { unsafe { if self.data.is_null() { return; // Happens when destructing an unwrapper's handle. } - let mut data: ~AtomicRcBoxData = cast::transmute(self.data); + let mut data: ~ArcData = cast::transmute(self.data); // Must be acquire+release, not just release, to make sure this // doesn't get reordered to after the unwrapper pointer load. let old_count = data.count.fetch_sub(1, SeqCst); @@ -355,7 +355,7 @@ struct ExData { * need to block or deschedule while accessing shared state, use extra::sync::RWArc. */ pub struct Exclusive { - x: UnsafeAtomicRcBox> + x: UnsafeArc> } impl Clone for Exclusive { @@ -373,7 +373,7 @@ impl Exclusive { data: user_data }; Exclusive { - x: UnsafeAtomicRcBox::new(data) + x: UnsafeArc::new(data) } } @@ -441,7 +441,7 @@ mod tests { use comm; use option::*; use prelude::*; - use super::{Exclusive, UnsafeAtomicRcBox, atomically}; + use super::{Exclusive, UnsafeArc, atomically}; use task; use util; @@ -506,44 +506,44 @@ mod tests { #[test] fn arclike_newN() { // Tests that the many-refcounts-at-once constructors don't leak. - let _ = UnsafeAtomicRcBox::new2(~~"hello"); - let x = UnsafeAtomicRcBox::newN(~~"hello", 0); + let _ = UnsafeArc::new2(~~"hello"); + let x = UnsafeArc::newN(~~"hello", 0); assert_eq!(x.len(), 0) - let x = UnsafeAtomicRcBox::newN(~~"hello", 1); + let x = UnsafeArc::newN(~~"hello", 1); assert_eq!(x.len(), 1) - let x = UnsafeAtomicRcBox::newN(~~"hello", 10); + let x = UnsafeArc::newN(~~"hello", 10); assert_eq!(x.len(), 10) } #[test] fn arclike_cloneN() { // Tests that the many-refcounts-at-once special-clone doesn't leak. - let x = UnsafeAtomicRcBox::new(~~"hello"); + let x = UnsafeArc::new(~~"hello"); let x = x.cloneN(0); assert_eq!(x.len(), 0); - let x = UnsafeAtomicRcBox::new(~~"hello"); + let x = UnsafeArc::new(~~"hello"); let x = x.cloneN(1); assert_eq!(x.len(), 1); - let x = UnsafeAtomicRcBox::new(~~"hello"); + let x = UnsafeArc::new(~~"hello"); let x = x.cloneN(10); assert_eq!(x.len(), 10); } #[test] fn arclike_unwrap_basic() { - let x = UnsafeAtomicRcBox::new(~~"hello"); + let x = UnsafeArc::new(~~"hello"); assert!(x.unwrap() == ~~"hello"); } #[test] fn arclike_try_unwrap() { - let x = UnsafeAtomicRcBox::new(~~"hello"); + let x = UnsafeArc::new(~~"hello"); assert!(x.try_unwrap().expect_right("try_unwrap failed") == ~~"hello"); } #[test] fn arclike_try_unwrap_fail() { - let x = UnsafeAtomicRcBox::new(~~"hello"); + let x = UnsafeArc::new(~~"hello"); let x2 = x.clone(); let left_x = x.try_unwrap(); assert!(left_x.is_left()); @@ -554,7 +554,7 @@ mod tests { #[test] fn arclike_try_unwrap_unwrap_race() { // When an unwrap and a try_unwrap race, the unwrapper should always win. - let x = UnsafeAtomicRcBox::new(~~"hello"); + let x = UnsafeArc::new(~~"hello"); let x2 = Cell::new(x.clone()); let (p,c) = comm::stream(); do task::spawn {