Paper over privacy issues with Deref by changing field names.

Types that implement Deref can cause weird error messages due to their
private fields conflicting with a field of the type they deref to, e.g.,
previously

    struct Foo { x: int }

    let a: Arc<Foo> = ...;
    println!("{}", a.x);

would complain the the `x` field of `Arc` was private (since Arc has a
private field called `x`) rather than just ignoring it.

This patch doesn't fix that issue, but does mean one would have to write
`a._ptr` to hit the same error message, which seems far less
common. (This patch `_`-prefixes all private fields of
`Deref`-implementing types.)

cc #12808
This commit is contained in:
Huon Wilson 2014-05-24 21:35:53 +10:00
parent 9e244d7084
commit 9698221f91
5 changed files with 93 additions and 74 deletions

View File

@ -54,7 +54,9 @@ use heap::deallocate;
/// ```
#[unsafe_no_drop_flag]
pub struct Arc<T> {
x: *mut ArcInner<T>,
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut ArcInner<T>,
}
/// A weak pointer to an `Arc`.
@ -63,7 +65,9 @@ pub struct Arc<T> {
/// used to break cycles between `Arc` pointers.
#[unsafe_no_drop_flag]
pub struct Weak<T> {
x: *mut ArcInner<T>,
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut ArcInner<T>,
}
struct ArcInner<T> {
@ -83,7 +87,7 @@ impl<T: Share + Send> Arc<T> {
weak: atomics::AtomicUint::new(1),
data: data,
};
Arc { x: unsafe { mem::transmute(x) } }
Arc { _ptr: unsafe { mem::transmute(x) } }
}
#[inline]
@ -93,7 +97,7 @@ impl<T: Share + Send> Arc<T> {
// `ArcInner` structure itself is `Share` because the inner data is
// `Share` as well, so we're ok loaning out an immutable pointer to
// these contents.
unsafe { &*self.x }
unsafe { &*self._ptr }
}
/// Downgrades a strong pointer to a weak pointer
@ -104,7 +108,7 @@ impl<T: Share + Send> Arc<T> {
pub fn downgrade(&self) -> Weak<T> {
// See the clone() impl for why this is relaxed
self.inner().weak.fetch_add(1, atomics::Relaxed);
Weak { x: self.x }
Weak { _ptr: self._ptr }
}
}
@ -128,7 +132,7 @@ impl<T: Share + Send> Clone for Arc<T> {
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
self.inner().strong.fetch_add(1, atomics::Relaxed);
Arc { x: self.x }
Arc { _ptr: self._ptr }
}
}
@ -166,7 +170,7 @@ impl<T: Share + Send> Drop for Arc<T> {
// This structure has #[unsafe_no_drop_flag], so this drop glue may run
// more than once (but it is guaranteed to be zeroed after the first if
// it's run more than once)
if self.x.is_null() { return }
if self._ptr.is_null() { return }
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
@ -198,7 +202,7 @@ impl<T: Share + Send> Drop for Arc<T> {
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
atomics::fence(atomics::Acquire);
unsafe { deallocate(self.x as *mut u8, size_of::<ArcInner<T>>(),
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
@ -218,14 +222,14 @@ impl<T: Share + Send> Weak<T> {
let n = inner.strong.load(atomics::SeqCst);
if n == 0 { return None }
let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
if old == n { return Some(Arc { x: self.x }) }
if old == n { return Some(Arc { _ptr: self._ptr }) }
}
}
#[inline]
fn inner<'a>(&'a self) -> &'a ArcInner<T> {
// See comments above for why this is "safe"
unsafe { &*self.x }
unsafe { &*self._ptr }
}
}
@ -234,7 +238,7 @@ impl<T: Share + Send> Clone for Weak<T> {
fn clone(&self) -> Weak<T> {
// See comments in Arc::clone() for why this is relaxed
self.inner().weak.fetch_add(1, atomics::Relaxed);
Weak { x: self.x }
Weak { _ptr: self._ptr }
}
}
@ -242,14 +246,14 @@ impl<T: Share + Send> Clone for Weak<T> {
impl<T: Share + Send> Drop for Weak<T> {
fn drop(&mut self) {
// see comments above for why this check is here
if self.x.is_null() { return }
if self._ptr.is_null() { return }
// If we find out that we were the last weak pointer, then its time to
// deallocate the data entirely. See the discussion in Arc::drop() about
// the memory orderings
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
atomics::fence(atomics::Acquire);
unsafe { deallocate(self.x as *mut u8, size_of::<ArcInner<T>>(),
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
@ -261,7 +265,7 @@ mod tests {
use std::clone::Clone;
use std::comm::channel;
use std::mem::drop;
use std::ops::{Drop, Deref, DerefMut};
use std::ops::Drop;
use std::option::{Option, Some, None};
use std::sync::atomics;
use std::task;
@ -374,7 +378,7 @@ mod tests {
let a = Arc::new(Cycle { x: Mutex::new(None) });
let b = a.clone().downgrade();
*a.deref().x.lock().deref_mut() = Some(b);
*a.x.lock() = Some(b);
// hopefully we don't double-free (or leak)...
}

View File

@ -45,9 +45,11 @@ struct RcBox<T> {
/// Immutable reference counted pointer type
#[unsafe_no_drop_flag]
pub struct Rc<T> {
ptr: *mut RcBox<T>,
nosend: marker::NoSend,
noshare: marker::NoShare
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Rc<T> {
@ -60,13 +62,13 @@ impl<T> Rc<T> {
// destructor never frees the allocation while the
// strong destructor is running, even if the weak
// pointer is stored inside the strong one.
ptr: transmute(box RcBox {
_ptr: transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
}),
nosend: marker::NoSend,
noshare: marker::NoShare
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
@ -77,9 +79,9 @@ impl<T> Rc<T> {
pub fn downgrade(&self) -> Weak<T> {
self.inc_weak();
Weak {
ptr: self.ptr,
nosend: marker::NoSend,
noshare: marker::NoShare
_ptr: self._ptr,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
@ -96,7 +98,7 @@ impl<T> Deref<T> for Rc<T> {
impl<T> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
if !self.ptr.is_null() {
if !self._ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
ptr::read(self.deref()); // destroy the contained object
@ -106,7 +108,7 @@ impl<T> Drop for Rc<T> {
self.dec_weak();
if self.weak() == 0 {
deallocate(self.ptr as *mut u8, size_of::<RcBox<T>>(),
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
@ -119,7 +121,7 @@ impl<T> Clone for Rc<T> {
#[inline]
fn clone(&self) -> Rc<T> {
self.inc_strong();
Rc { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare }
Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
@ -154,9 +156,11 @@ impl<T: TotalOrd> TotalOrd for Rc<T> {
/// Weak reference to a reference-counted box
#[unsafe_no_drop_flag]
pub struct Weak<T> {
ptr: *mut RcBox<T>,
nosend: marker::NoSend,
noshare: marker::NoShare
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Weak<T> {
@ -166,7 +170,7 @@ impl<T> Weak<T> {
None
} else {
self.inc_strong();
Some(Rc { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare })
Some(Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare })
}
}
}
@ -175,12 +179,12 @@ impl<T> Weak<T> {
impl<T> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
if !self.ptr.is_null() {
if !self._ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to
// zero if all the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(self.ptr as *mut u8, size_of::<RcBox<T>>(),
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
@ -192,7 +196,7 @@ impl<T> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Weak<T> {
self.inc_weak();
Weak { ptr: self.ptr, nosend: marker::NoSend, noshare: marker::NoShare }
Weak { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
@ -221,12 +225,12 @@ trait RcBoxPtr<T> {
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self.ptr) } }
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self.ptr) } }
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
#[cfg(test)]

View File

@ -251,7 +251,7 @@ impl<T> RefCell<T> {
WRITING => None,
borrow => {
self.borrow.set(borrow + 1);
Some(Ref { parent: self })
Some(Ref { _parent: self })
}
}
}
@ -281,7 +281,7 @@ impl<T> RefCell<T> {
match self.borrow.get() {
UNUSED => {
self.borrow.set(WRITING);
Some(RefMut { parent: self })
Some(RefMut { _parent: self })
},
_ => None
}
@ -317,22 +317,24 @@ impl<T: Eq> Eq for RefCell<T> {
/// Wraps a borrowed reference to a value in a `RefCell` box.
pub struct Ref<'b, T> {
parent: &'b RefCell<T>
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_parent: &'b RefCell<T>
}
#[unsafe_destructor]
impl<'b, T> Drop for Ref<'b, T> {
fn drop(&mut self) {
let borrow = self.parent.borrow.get();
let borrow = self._parent.borrow.get();
debug_assert!(borrow != WRITING && borrow != UNUSED);
self.parent.borrow.set(borrow - 1);
self._parent.borrow.set(borrow - 1);
}
}
impl<'b, T> Deref<T> for Ref<'b, T> {
#[inline]
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*self.parent.value.get() }
unsafe { &*self._parent.value.get() }
}
}
@ -346,40 +348,42 @@ impl<'b, T> Deref<T> for Ref<'b, T> {
pub fn clone_ref<'b, T>(orig: &Ref<'b, T>) -> Ref<'b, T> {
// Since this Ref exists, we know the borrow flag
// is not set to WRITING.
let borrow = orig.parent.borrow.get();
let borrow = orig._parent.borrow.get();
debug_assert!(borrow != WRITING && borrow != UNUSED);
orig.parent.borrow.set(borrow + 1);
orig._parent.borrow.set(borrow + 1);
Ref {
parent: orig.parent,
_parent: orig._parent,
}
}
/// Wraps a mutable borrowed reference to a value in a `RefCell` box.
pub struct RefMut<'b, T> {
parent: &'b RefCell<T>
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_parent: &'b RefCell<T>
}
#[unsafe_destructor]
impl<'b, T> Drop for RefMut<'b, T> {
fn drop(&mut self) {
let borrow = self.parent.borrow.get();
let borrow = self._parent.borrow.get();
debug_assert!(borrow == WRITING);
self.parent.borrow.set(UNUSED);
self._parent.borrow.set(UNUSED);
}
}
impl<'b, T> Deref<T> for RefMut<'b, T> {
#[inline]
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*self.parent.value.get() }
unsafe { &*self._parent.value.get() }
}
}
impl<'b, T> DerefMut<T> for RefMut<'b, T> {
#[inline]
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *self.parent.value.get() }
unsafe { &mut *self._parent.value.get() }
}
}

View File

@ -127,10 +127,12 @@ fn key_to_key_value<T: 'static>(key: Key<T>) -> *u8 {
/// The task-local data can be accessed through this value, and when this
/// structure is dropped it will return the borrow on the data.
pub struct Ref<T> {
ptr: &'static T,
key: Key<T>,
index: uint,
nosend: marker::NoSend,
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: &'static T,
_key: Key<T>,
_index: uint,
_nosend: marker::NoSend,
}
impl<T: 'static> KeyValue<T> {
@ -233,7 +235,7 @@ impl<T: 'static> KeyValue<T> {
let data = data as *Box<LocalData:Send> as *raw::TraitObject;
&mut *((*data).data as *mut T)
};
Ref { ptr: ptr, index: pos, nosend: marker::NoSend, key: self }
Ref { _ptr: ptr, _index: pos, _nosend: marker::NoSend, _key: self }
})
}
@ -252,7 +254,7 @@ impl<T: 'static> KeyValue<T> {
}
impl<T: 'static> Deref<T> for Ref<T> {
fn deref<'a>(&'a self) -> &'a T { self.ptr }
fn deref<'a>(&'a self) -> &'a T { self._ptr }
}
#[unsafe_destructor]
@ -260,7 +262,7 @@ impl<T: 'static> Drop for Ref<T> {
fn drop(&mut self) {
let map = unsafe { get_local_map() };
let (_, _, ref mut loan) = *map.get_mut(self.index).get_mut_ref();
let (_, _, ref mut loan) = *map.get_mut(self._index).get_mut_ref();
*loan -= 1;
}
}

View File

@ -174,7 +174,9 @@ pub struct Mutex<T> {
/// An guard which is created by locking a mutex. Through this guard the
/// underlying data can be accessed.
pub struct MutexGuard<'a, T> {
data: &'a mut T,
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_data: &'a mut T,
/// Inner condition variable connected to the locked mutex that this guard
/// was created from. This can be used for atomic-unlock-and-deschedule.
pub cond: Condvar<'a>,
@ -221,7 +223,7 @@ impl<T: Send> Mutex<T> {
let data = unsafe { &mut *self.data.get() };
MutexGuard {
data: data,
_data: data,
cond: Condvar {
name: "Mutex",
poison: PoisonOnFail::new(poison, "Mutex"),
@ -232,10 +234,10 @@ impl<T: Send> Mutex<T> {
}
impl<'a, T: Send> Deref<T> for MutexGuard<'a, T> {
fn deref<'a>(&'a self) -> &'a T { &*self.data }
fn deref<'a>(&'a self) -> &'a T { &*self._data }
}
impl<'a, T: Send> DerefMut<T> for MutexGuard<'a, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self.data }
fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
}
/****************************************************************************
@ -272,7 +274,9 @@ pub struct RWLock<T> {
/// A guard which is created by locking an rwlock in write mode. Through this
/// guard the underlying data can be accessed.
pub struct RWLockWriteGuard<'a, T> {
data: &'a mut T,
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_data: &'a mut T,
/// Inner condition variable that can be used to sleep on the write mode of
/// this rwlock.
pub cond: Condvar<'a>,
@ -281,8 +285,10 @@ pub struct RWLockWriteGuard<'a, T> {
/// A guard which is created by locking an rwlock in read mode. Through this
/// guard the underlying data can be accessed.
pub struct RWLockReadGuard<'a, T> {
data: &'a T,
guard: raw::RWLockReadGuard<'a>,
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_data: &'a T,
_guard: raw::RWLockReadGuard<'a>,
}
impl<T: Send + Share> RWLock<T> {
@ -320,7 +326,7 @@ impl<T: Send + Share> RWLock<T> {
let data = unsafe { &mut *self.data.get() };
RWLockWriteGuard {
data: data,
_data: data,
cond: Condvar {
name: "RWLock",
poison: PoisonOnFail::new(poison, "RWLock"),
@ -340,8 +346,8 @@ impl<T: Send + Share> RWLock<T> {
let guard = self.lock.read();
PoisonOnFail::check(unsafe { *self.failed.get() }, "RWLock");
RWLockReadGuard {
guard: guard,
data: unsafe { &*self.data.get() },
_guard: guard,
_data: unsafe { &*self.data.get() },
}
}
}
@ -351,25 +357,25 @@ impl<'a, T: Send + Share> RWLockWriteGuard<'a, T> {
///
/// This will allow pending readers to come into the lock.
pub fn downgrade(self) -> RWLockReadGuard<'a, T> {
let RWLockWriteGuard { data, cond } = self;
let RWLockWriteGuard { _data, cond } = self;
// convert the data to read-only explicitly
let data = &*data;
let data = &*_data;
let guard = match cond.inner {
InnerMutex(..) => unreachable!(),
InnerRWLock(guard) => guard.downgrade()
};
RWLockReadGuard { guard: guard, data: data }
RWLockReadGuard { _guard: guard, _data: data }
}
}
impl<'a, T: Send + Share> Deref<T> for RWLockReadGuard<'a, T> {
fn deref<'a>(&'a self) -> &'a T { self.data }
fn deref<'a>(&'a self) -> &'a T { self._data }
}
impl<'a, T: Send + Share> Deref<T> for RWLockWriteGuard<'a, T> {
fn deref<'a>(&'a self) -> &'a T { &*self.data }
fn deref<'a>(&'a self) -> &'a T { &*self._data }
}
impl<'a, T: Send + Share> DerefMut<T> for RWLockWriteGuard<'a, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self.data }
fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
}
/****************************************************************************
@ -812,4 +818,3 @@ mod tests {
}
}
}