Rename UnsafeAtomicRcBox to UnsafeArc. Fixes #7674.

This commit is contained in:
Huon Wilson 2013-08-27 20:00:57 +10:00
parent 604ab9477a
commit 71448d7c37
8 changed files with 72 additions and 72 deletions

View File

@ -44,7 +44,7 @@ use sync;
use sync::{Mutex, RWLock};
use std::cast;
use std::unstable::sync::UnsafeAtomicRcBox;
use std::unstable::sync::UnsafeArc;
use std::task;
use std::borrow;
@ -108,7 +108,7 @@ impl<'self> Condvar<'self> {
****************************************************************************/
/// An atomically reference counted wrapper for shared immutable state.
pub struct Arc<T> { priv x: UnsafeAtomicRcBox<T> }
pub struct Arc<T> { priv x: UnsafeArc<T> }
/**
@ -118,7 +118,7 @@ pub struct Arc<T> { priv x: UnsafeAtomicRcBox<T> }
impl<T:Freeze+Send> Arc<T> {
/// Create an atomically reference counted wrapper.
pub fn new(data: T) -> Arc<T> {
Arc { x: UnsafeAtomicRcBox::new(data) }
Arc { x: UnsafeArc::new(data) }
}
pub fn get<'a>(&'a self) -> &'a T {
@ -160,7 +160,7 @@ impl<T:Freeze + Send> Clone for Arc<T> {
#[doc(hidden)]
struct MutexArcInner<T> { priv lock: Mutex, priv failed: bool, priv data: T }
/// An Arc with mutable data protected by a blocking mutex.
struct MutexArc<T> { priv x: UnsafeAtomicRcBox<MutexArcInner<T>> }
struct MutexArc<T> { priv x: UnsafeArc<MutexArcInner<T>> }
impl<T:Send> Clone for MutexArc<T> {
@ -187,7 +187,7 @@ impl<T:Send> MutexArc<T> {
lock: Mutex::new_with_condvars(num_condvars),
failed: false, data: user_data
};
MutexArc { x: UnsafeAtomicRcBox::new(data) }
MutexArc { x: UnsafeArc::new(data) }
}
/**
@ -309,7 +309,7 @@ struct RWArcInner<T> { priv lock: RWLock, priv failed: bool, priv data: T }
*/
#[no_freeze]
struct RWArc<T> {
priv x: UnsafeAtomicRcBox<RWArcInner<T>>,
priv x: UnsafeArc<RWArcInner<T>>,
}
impl<T:Freeze + Send> Clone for RWArc<T> {
@ -335,7 +335,7 @@ impl<T:Freeze + Send> RWArc<T> {
lock: RWLock::new_with_condvars(num_condvars),
failed: false, data: user_data
};
RWArc { x: UnsafeAtomicRcBox::new(data), }
RWArc { x: UnsafeArc::new(data), }
}
/**

View File

@ -21,7 +21,7 @@ use std::comm;
use std::comm::SendDeferred;
use std::comm::{GenericPort, Peekable};
use std::task;
use std::unstable::sync::{Exclusive, UnsafeAtomicRcBox};
use std::unstable::sync::{Exclusive, UnsafeArc};
use std::unstable::atomics;
use std::unstable::finally::Finally;
use std::util;
@ -448,7 +448,7 @@ struct RWLockInner {
pub struct RWLock {
priv order_lock: Semaphore,
priv access_lock: Sem<~[WaitQueue]>,
priv state: UnsafeAtomicRcBox<RWLockInner>,
priv state: UnsafeArc<RWLockInner>,
}
impl RWLock {
@ -460,7 +460,7 @@ impl RWLock {
* Similar to mutex_with_condvars.
*/
pub fn new_with_condvars(num_condvars: uint) -> RWLock {
let state = UnsafeAtomicRcBox::new(RWLockInner {
let state = UnsafeArc::new(RWLockInner {
read_mode: false,
read_count: atomics::AtomicUint::new(0),
});

View File

@ -21,7 +21,7 @@ use rt::local::Local;
use rt::select::{SelectInner, SelectPortInner};
use select::{Select, SelectPort};
use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Relaxed, SeqCst};
use unstable::sync::UnsafeAtomicRcBox;
use unstable::sync::UnsafeArc;
use util::Void;
use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable};
use cell::Cell;
@ -567,14 +567,14 @@ impl<'self, T> SelectPort<T> for &'self Port<T> { }
pub struct SharedChan<T> {
// Just like Chan, but a shared AtomicOption instead of Cell
priv next: UnsafeAtomicRcBox<AtomicOption<StreamChanOne<T>>>
priv next: UnsafeArc<AtomicOption<StreamChanOne<T>>>
}
impl<T> SharedChan<T> {
pub fn new(chan: Chan<T>) -> SharedChan<T> {
let next = chan.next.take();
let next = AtomicOption::new(~next);
SharedChan { next: UnsafeAtomicRcBox::new(next) }
SharedChan { next: UnsafeArc::new(next) }
}
}
@ -620,7 +620,7 @@ impl<T> Clone for SharedChan<T> {
pub struct SharedPort<T> {
// The next port on which we will receive the next port on which we will receive T
priv next_link: UnsafeAtomicRcBox<AtomicOption<PortOne<StreamPortOne<T>>>>
priv next_link: UnsafeArc<AtomicOption<PortOne<StreamPortOne<T>>>>
}
impl<T> SharedPort<T> {
@ -630,7 +630,7 @@ impl<T> SharedPort<T> {
let (next_link_port, next_link_chan) = oneshot();
next_link_chan.send(next_data_port);
let next_link = AtomicOption::new(~next_link_port);
SharedPort { next_link: UnsafeAtomicRcBox::new(next_link) }
SharedPort { next_link: UnsafeArc::new(next_link) }
}
}

View File

@ -159,7 +159,7 @@ use rt::task::Task;
use task::spawn::Taskgroup;
use to_bytes::IterBytes;
use unstable::atomics::{AtomicUint, Relaxed};
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
use unstable::sync::{UnsafeArc, LittleLock};
use util;
static KILLED_MSG: &'static str = "killed by linked failure";
@ -170,7 +170,7 @@ static KILL_KILLED: uint = 1;
static KILL_UNKILLABLE: uint = 2;
struct KillFlag(AtomicUint);
type KillFlagHandle = UnsafeAtomicRcBox<KillFlag>;
type KillFlagHandle = UnsafeArc<KillFlag>;
/// A handle to a blocked task. Usually this means having the ~Task pointer by
/// ownership, but if the task is killable, a killer can steal it at any time.
@ -211,7 +211,7 @@ struct KillHandleInner {
/// State shared between tasks used for task killing during linked failure.
#[deriving(Clone)]
pub struct KillHandle(UnsafeAtomicRcBox<KillHandleInner>);
pub struct KillHandle(UnsafeArc<KillHandleInner>);
/// Per-task state related to task death, killing, failure, etc.
pub struct Death {
@ -317,7 +317,7 @@ impl BlockedTask {
let handles = match self {
Unkillable(task) => {
let flag = unsafe { KillFlag(AtomicUint::new(cast::transmute(task))) };
UnsafeAtomicRcBox::newN(flag, num_handles)
UnsafeArc::newN(flag, num_handles)
}
Killable(flag_arc) => flag_arc.cloneN(num_handles),
};
@ -380,8 +380,8 @@ impl Eq for KillHandle {
impl KillHandle {
pub fn new() -> (KillHandle, KillFlagHandle) {
let (flag, flag_clone) =
UnsafeAtomicRcBox::new2(KillFlag(AtomicUint::new(KILL_RUNNING)));
let handle = KillHandle(UnsafeAtomicRcBox::new(KillHandleInner {
UnsafeArc::new2(KillFlag(AtomicUint::new(KILL_RUNNING)));
let handle = KillHandle(UnsafeArc::new(KillHandleInner {
// Linked failure fields
killed: flag,
unkillable: AtomicUint::new(KILL_RUNNING),
@ -460,7 +460,7 @@ impl KillHandle {
pub fn notify_immediate_failure(&mut self) {
// A benign data race may happen here if there are failing sibling
// tasks that were also spawned-watched. The refcount's write barriers
// in UnsafeAtomicRcBox ensure that this write will be seen by the
// in UnsafeArc ensure that this write will be seen by the
// unwrapper/destructor, whichever task may unwrap it.
unsafe { (*self.get()).any_child_failed = true; }
}

View File

@ -16,11 +16,11 @@ use kinds::Send;
use vec::OwnedVector;
use cell::Cell;
use option::*;
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
use unstable::sync::{UnsafeArc, LittleLock};
use clone::Clone;
pub struct MessageQueue<T> {
priv state: UnsafeAtomicRcBox<State<T>>
priv state: UnsafeArc<State<T>>
}
struct State<T> {
@ -32,7 +32,7 @@ struct State<T> {
impl<T: Send> MessageQueue<T> {
pub fn new() -> MessageQueue<T> {
MessageQueue {
state: UnsafeAtomicRcBox::new(State {
state: UnsafeArc::new(State {
count: 0,
queue: ~[],
lock: LittleLock::new()

View File

@ -74,7 +74,7 @@ use rt::thread::Thread;
use rt::work_queue::WorkQueue;
use rt::uv::uvio::UvEventLoop;
use unstable::atomics::{AtomicInt, SeqCst};
use unstable::sync::UnsafeAtomicRcBox;
use unstable::sync::UnsafeArc;
use vec::{OwnedVector, MutableVector};
/// The global (exchange) heap.
@ -311,7 +311,7 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int {
// Create a shared cell for transmitting the process exit
// code from the main task to this function.
let exit_code = UnsafeAtomicRcBox::new(AtomicInt::new(0));
let exit_code = UnsafeArc::new(AtomicInt::new(0));
let exit_code_clone = exit_code.clone();
// When the main task exits, after all the tasks in the main

View File

@ -15,12 +15,12 @@ use container::Container;
use vec::OwnedVector;
use option::{Option, Some, None};
use cell::Cell;
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
use unstable::sync::{UnsafeArc, LittleLock};
use rt::sched::SchedHandle;
use clone::Clone;
pub struct SleeperList {
priv state: UnsafeAtomicRcBox<State>
priv state: UnsafeArc<State>
}
struct State {
@ -32,7 +32,7 @@ struct State {
impl SleeperList {
pub fn new() -> SleeperList {
SleeperList {
state: UnsafeAtomicRcBox::new(State {
state: UnsafeArc::new(State {
count: 0,
stack: ~[],
lock: LittleLock::new()

View File

@ -26,11 +26,11 @@ use vec;
/// An atomically reference counted pointer.
///
/// Enforces no shared-memory safety.
pub struct UnsafeAtomicRcBox<T> {
pub struct UnsafeArc<T> {
data: *mut libc::c_void,
}
struct AtomicRcBoxData<T> {
struct ArcData<T> {
count: AtomicUint,
// An unwrapper uses this protocol to communicate with the "other" task that
// drops the last refcount on an arc. Unfortunately this can't be a proper
@ -42,51 +42,51 @@ struct AtomicRcBoxData<T> {
}
unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut libc::c_void {
let data = ~AtomicRcBoxData { count: AtomicUint::new(refcount),
unwrapper: AtomicOption::empty(),
data: Some(data) };
let data = ~ArcData { count: AtomicUint::new(refcount),
unwrapper: AtomicOption::empty(),
data: Some(data) };
cast::transmute(data)
}
impl<T: Send> UnsafeAtomicRcBox<T> {
pub fn new(data: T) -> UnsafeAtomicRcBox<T> {
unsafe { UnsafeAtomicRcBox { data: new_inner(data, 1) } }
impl<T: Send> UnsafeArc<T> {
pub fn new(data: T) -> UnsafeArc<T> {
unsafe { UnsafeArc { data: new_inner(data, 1) } }
}
/// As new(), but returns an extra pre-cloned handle.
pub fn new2(data: T) -> (UnsafeAtomicRcBox<T>, UnsafeAtomicRcBox<T>) {
pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
unsafe {
let ptr = new_inner(data, 2);
(UnsafeAtomicRcBox { data: ptr }, UnsafeAtomicRcBox { data: ptr })
(UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
}
}
/// As new(), but returns a vector of as many pre-cloned handles as requested.
pub fn newN(data: T, num_handles: uint) -> ~[UnsafeAtomicRcBox<T>] {
pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc<T>] {
unsafe {
if num_handles == 0 {
~[] // need to free data here
} else {
let ptr = new_inner(data, num_handles);
vec::from_fn(num_handles, |_| UnsafeAtomicRcBox { data: ptr })
vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
}
}
}
/// As newN(), but from an already-existing handle. Uses one xadd.
pub fn cloneN(self, num_handles: uint) -> ~[UnsafeAtomicRcBox<T>] {
pub fn cloneN(self, num_handles: uint) -> ~[UnsafeArc<T>] {
if num_handles == 0 {
~[] // The "num_handles - 1" trick (below) fails in the 0 case.
} else {
unsafe {
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
let mut data: ~ArcData<T> = cast::transmute(self.data);
// Minus one because we are recycling the given handle's refcount.
let old_count = data.count.fetch_add(num_handles - 1, Acquire);
// let old_count = data.count.fetch_add(num_handles, Acquire);
assert!(old_count >= 1);
let ptr = cast::transmute(data);
cast::forget(self); // Don't run the destructor on this handle.
vec::from_fn(num_handles, |_| UnsafeAtomicRcBox { data: ptr })
vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
}
}
}
@ -94,7 +94,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
#[inline]
pub fn get(&self) -> *mut T {
unsafe {
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
let mut data: ~ArcData<T> = cast::transmute(self.data);
assert!(data.count.load(Relaxed) > 0);
let r: *mut T = data.data.get_mut_ref();
cast::forget(data);
@ -105,7 +105,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
#[inline]
pub fn get_immut(&self) -> *T {
unsafe {
let data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
let data: ~ArcData<T> = cast::transmute(self.data);
assert!(data.count.load(Relaxed) > 0);
let r: *T = data.data.get_ref();
cast::forget(data);
@ -122,7 +122,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
do task::unkillable {
unsafe {
let mut this = this.take();
let mut data: ~AtomicRcBoxData<T> = cast::transmute(this.data);
let mut data: ~ArcData<T> = cast::transmute(this.data);
// Set up the unwrap protocol.
let (p1,c1) = comm::oneshot(); // ()
let (p2,c2) = comm::oneshot(); // bool
@ -139,7 +139,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
// We were the last owner. Can unwrap immediately.
// AtomicOption's destructor will free the server endpoint.
// FIXME(#3224): it should be like this
// let ~AtomicRcBoxData { data: user_data, _ } = data;
// let ~ArcData { data: user_data, _ } = data;
// user_data
data.data.take_unwrap()
} else {
@ -154,7 +154,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
let (c2, data) = c2_and_data.take();
c2.send(true);
// FIXME(#3224): it should be like this
// let ~AtomicRcBoxData { data: user_data, _ } = data;
// let ~ArcData { data: user_data, _ } = data;
// user_data
let mut data = data;
data.data.take_unwrap()
@ -183,10 +183,10 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
/// As unwrap above, but without blocking. Returns 'Left(self)' if this is
/// not the last reference; 'Right(unwrapped_data)' if so.
pub fn try_unwrap(self) -> Either<UnsafeAtomicRcBox<T>, T> {
pub fn try_unwrap(self) -> Either<UnsafeArc<T>, T> {
unsafe {
let mut this = self; // FIXME(#4330) mutable self
let mut data: ~AtomicRcBoxData<T> = cast::transmute(this.data);
let mut data: ~ArcData<T> = cast::transmute(this.data);
// This can of course race with anybody else who has a handle, but in
// such a case, the returned count will always be at least 2. If we
// see 1, no race was possible. All that matters is 1 or not-1.
@ -209,27 +209,27 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
}
}
impl<T: Send> Clone for UnsafeAtomicRcBox<T> {
fn clone(&self) -> UnsafeAtomicRcBox<T> {
impl<T: Send> Clone for UnsafeArc<T> {
fn clone(&self) -> UnsafeArc<T> {
unsafe {
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
let mut data: ~ArcData<T> = cast::transmute(self.data);
// This barrier might be unnecessary, but I'm not sure...
let old_count = data.count.fetch_add(1, Acquire);
assert!(old_count >= 1);
cast::forget(data);
return UnsafeAtomicRcBox { data: self.data };
return UnsafeArc { data: self.data };
}
}
}
#[unsafe_destructor]
impl<T> Drop for UnsafeAtomicRcBox<T>{
impl<T> Drop for UnsafeArc<T>{
fn drop(&self) {
unsafe {
if self.data.is_null() {
return; // Happens when destructing an unwrapper's handle.
}
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
let mut data: ~ArcData<T> = cast::transmute(self.data);
// Must be acquire+release, not just release, to make sure this
// doesn't get reordered to after the unwrapper pointer load.
let old_count = data.count.fetch_sub(1, SeqCst);
@ -355,7 +355,7 @@ struct ExData<T> {
* need to block or deschedule while accessing shared state, use extra::sync::RWArc.
*/
pub struct Exclusive<T> {
x: UnsafeAtomicRcBox<ExData<T>>
x: UnsafeArc<ExData<T>>
}
impl<T:Send> Clone for Exclusive<T> {
@ -373,7 +373,7 @@ impl<T:Send> Exclusive<T> {
data: user_data
};
Exclusive {
x: UnsafeAtomicRcBox::new(data)
x: UnsafeArc::new(data)
}
}
@ -441,7 +441,7 @@ mod tests {
use comm;
use option::*;
use prelude::*;
use super::{Exclusive, UnsafeAtomicRcBox, atomically};
use super::{Exclusive, UnsafeArc, atomically};
use task;
use util;
@ -506,44 +506,44 @@ mod tests {
#[test]
fn arclike_newN() {
// Tests that the many-refcounts-at-once constructors don't leak.
let _ = UnsafeAtomicRcBox::new2(~~"hello");
let x = UnsafeAtomicRcBox::newN(~~"hello", 0);
let _ = UnsafeArc::new2(~~"hello");
let x = UnsafeArc::newN(~~"hello", 0);
assert_eq!(x.len(), 0)
let x = UnsafeAtomicRcBox::newN(~~"hello", 1);
let x = UnsafeArc::newN(~~"hello", 1);
assert_eq!(x.len(), 1)
let x = UnsafeAtomicRcBox::newN(~~"hello", 10);
let x = UnsafeArc::newN(~~"hello", 10);
assert_eq!(x.len(), 10)
}
#[test]
fn arclike_cloneN() {
// Tests that the many-refcounts-at-once special-clone doesn't leak.
let x = UnsafeAtomicRcBox::new(~~"hello");
let x = UnsafeArc::new(~~"hello");
let x = x.cloneN(0);
assert_eq!(x.len(), 0);
let x = UnsafeAtomicRcBox::new(~~"hello");
let x = UnsafeArc::new(~~"hello");
let x = x.cloneN(1);
assert_eq!(x.len(), 1);
let x = UnsafeAtomicRcBox::new(~~"hello");
let x = UnsafeArc::new(~~"hello");
let x = x.cloneN(10);
assert_eq!(x.len(), 10);
}
#[test]
fn arclike_unwrap_basic() {
let x = UnsafeAtomicRcBox::new(~~"hello");
let x = UnsafeArc::new(~~"hello");
assert!(x.unwrap() == ~~"hello");
}
#[test]
fn arclike_try_unwrap() {
let x = UnsafeAtomicRcBox::new(~~"hello");
let x = UnsafeArc::new(~~"hello");
assert!(x.try_unwrap().expect_right("try_unwrap failed") == ~~"hello");
}
#[test]
fn arclike_try_unwrap_fail() {
let x = UnsafeAtomicRcBox::new(~~"hello");
let x = UnsafeArc::new(~~"hello");
let x2 = x.clone();
let left_x = x.try_unwrap();
assert!(left_x.is_left());
@ -554,7 +554,7 @@ mod tests {
#[test]
fn arclike_try_unwrap_unwrap_race() {
// When an unwrap and a try_unwrap race, the unwrapper should always win.
let x = UnsafeAtomicRcBox::new(~~"hello");
let x = UnsafeArc::new(~~"hello");
let x2 = Cell::new(x.clone());
let (p,c) = comm::stream();
do task::spawn {