Rollup merge of #77657 - fusion-engineering-forks:cleanup-cloudabi-sync, r=dtolnay

Cleanup cloudabi mutexes and condvars

This gets rid of lots of unnecessary unsafety.

All the AtomicU32s were wrapped in UnsafeCell or UnsafeCell<MaybeUninit>, and raw pointers were used to get to the AtomicU32 inside. This change cleans that up by using AtomicU32 directly.

Also replaces a UnsafeCell<u32> by a safer Cell<u32>.

@rustbot modify labels: +C-cleanup
This commit is contained in:
Dylan DPC 2020-10-16 02:10:17 +02:00 committed by GitHub
commit 9b8c0eb107
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 65 additions and 77 deletions

View File

@ -1,4 +1,3 @@
use crate::cell::UnsafeCell;
use crate::mem;
use crate::sync::atomic::{AtomicU32, Ordering};
use crate::sys::cloudabi::abi;
@ -12,7 +11,7 @@ extern "C" {
}
pub struct Condvar {
condvar: UnsafeCell<AtomicU32>,
condvar: AtomicU32,
}
pub type MovableCondvar = Condvar;
@ -20,29 +19,28 @@ pub type MovableCondvar = Condvar;
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
const NEW: Condvar =
Condvar { condvar: UnsafeCell::new(AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0)) };
impl Condvar {
pub const fn new() -> Condvar {
NEW
Condvar { condvar: AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0) }
}
pub unsafe fn init(&mut self) {}
pub unsafe fn notify_one(&self) {
let condvar = self.condvar.get();
if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
let ret = abi::condvar_signal(condvar as *mut abi::condvar, abi::scope::PRIVATE, 1);
if self.condvar.load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
let ret = abi::condvar_signal(
&self.condvar as *const AtomicU32 as *mut abi::condvar,
abi::scope::PRIVATE,
1,
);
assert_eq!(ret, abi::errno::SUCCESS, "Failed to signal on condition variable");
}
}
pub unsafe fn notify_all(&self) {
let condvar = self.condvar.get();
if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
if self.condvar.load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
let ret = abi::condvar_signal(
condvar as *mut abi::condvar,
&self.condvar as *const AtomicU32 as *mut abi::condvar,
abi::scope::PRIVATE,
abi::nthreads::MAX,
);
@ -53,20 +51,19 @@ impl Condvar {
pub unsafe fn wait(&self, mutex: &Mutex) {
let mutex = mutex::raw(mutex);
assert_eq!(
(*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
mutex.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
"This lock is not write-locked by this thread"
);
// Call into the kernel to wait on the condition variable.
let condvar = self.condvar.get();
let subscription = abi::subscription {
type_: abi::eventtype::CONDVAR,
union: abi::subscription_union {
condvar: abi::subscription_condvar {
condvar: condvar as *mut abi::condvar,
condvar: &self.condvar as *const AtomicU32 as *mut abi::condvar,
condvar_scope: abi::scope::PRIVATE,
lock: mutex as *mut abi::lock,
lock: mutex as *const AtomicU32 as *mut abi::lock,
lock_scope: abi::scope::PRIVATE,
},
},
@ -86,13 +83,12 @@ impl Condvar {
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let mutex = mutex::raw(mutex);
assert_eq!(
(*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
mutex.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
"This lock is not write-locked by this thread"
);
// Call into the kernel to wait on the condition variable.
let condvar = self.condvar.get();
let timeout =
checked_dur2intervals(&dur).expect("overflow converting duration to nanoseconds");
let subscriptions = [
@ -100,9 +96,9 @@ impl Condvar {
type_: abi::eventtype::CONDVAR,
union: abi::subscription_union {
condvar: abi::subscription_condvar {
condvar: condvar as *mut abi::condvar,
condvar: &self.condvar as *const AtomicU32 as *mut abi::condvar,
condvar_scope: abi::scope::PRIVATE,
lock: mutex as *mut abi::lock,
lock: mutex as *const AtomicU32 as *mut abi::lock,
lock_scope: abi::scope::PRIVATE,
},
},
@ -124,7 +120,7 @@ impl Condvar {
let mut nevents: mem::MaybeUninit<usize> = mem::MaybeUninit::uninit();
let ret = abi::poll(
subscriptions.as_ptr(),
mem::MaybeUninit::first_ptr_mut(&mut events),
mem::MaybeUninit::slice_as_mut_ptr(&mut events),
2,
nevents.as_mut_ptr(),
);
@ -144,9 +140,8 @@ impl Condvar {
}
pub unsafe fn destroy(&self) {
let condvar = self.condvar.get();
assert_eq!(
(*condvar).load(Ordering::Relaxed),
self.condvar.load(Ordering::Relaxed),
abi::CONDVAR_HAS_NO_WAITERS.0,
"Attempted to destroy a condition variable with blocked threads"
);

View File

@ -1,4 +1,4 @@
use crate::cell::UnsafeCell;
use crate::cell::Cell;
use crate::mem;
use crate::mem::MaybeUninit;
use crate::sync::atomic::{AtomicU32, Ordering};
@ -17,7 +17,7 @@ pub struct Mutex(RWLock);
pub type MovableMutex = Mutex;
pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 {
pub unsafe fn raw(m: &Mutex) -> &AtomicU32 {
rwlock::raw(&m.0)
}
@ -50,28 +50,23 @@ impl Mutex {
}
pub struct ReentrantMutex {
lock: UnsafeCell<MaybeUninit<AtomicU32>>,
recursion: UnsafeCell<MaybeUninit<u32>>,
lock: AtomicU32,
recursion: Cell<u32>,
}
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex {
lock: UnsafeCell::new(MaybeUninit::uninit()),
recursion: UnsafeCell::new(MaybeUninit::uninit()),
}
ReentrantMutex { lock: AtomicU32::new(abi::LOCK_UNLOCKED.0), recursion: Cell::new(0) }
}
pub unsafe fn init(&self) {
*self.lock.get() = MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0));
*self.recursion.get() = MaybeUninit::new(0);
}
pub unsafe fn init(&self) {}
pub unsafe fn try_lock(&self) -> bool {
// Attempt to acquire the lock.
let lock = (*self.lock.get()).as_mut_ptr();
let recursion = (*self.recursion.get()).as_mut_ptr();
if let Err(old) = (*lock).compare_exchange(
if let Err(old) = self.lock.compare_exchange(
abi::LOCK_UNLOCKED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
Ordering::Acquire,
@ -80,14 +75,14 @@ impl ReentrantMutex {
// If we fail to acquire the lock, it may be the case
// that we've already acquired it and may need to recurse.
if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 {
*recursion += 1;
self.recursion.set(self.recursion.get() + 1);
true
} else {
false
}
} else {
// Success.
assert_eq!(*recursion, 0, "Mutex has invalid recursion count");
assert_eq!(self.recursion.get(), 0, "Mutex has invalid recursion count");
true
}
}
@ -95,7 +90,7 @@ impl ReentrantMutex {
pub unsafe fn lock(&self) {
if !self.try_lock() {
// Call into the kernel to acquire a write lock.
let lock = self.lock.get();
let lock = &self.lock as *const AtomicU32;
let subscription = abi::subscription {
type_: abi::eventtype::LOCK_WRLOCK,
union: abi::subscription_union {
@ -116,17 +111,17 @@ impl ReentrantMutex {
}
pub unsafe fn unlock(&self) {
let lock = (*self.lock.get()).as_mut_ptr();
let recursion = (*self.recursion.get()).as_mut_ptr();
assert_eq!(
(*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
self.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
"This mutex is locked by a different thread"
);
if *recursion > 0 {
*recursion -= 1;
} else if !(*lock)
let r = self.recursion.get();
if r > 0 {
self.recursion.set(r - 1);
} else if !self
.lock
.compare_exchange(
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
abi::LOCK_UNLOCKED.0,
@ -137,19 +132,20 @@ impl ReentrantMutex {
{
// Lock is managed by kernelspace. Call into the kernel
// to unblock waiting threads.
let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
let ret = abi::lock_unlock(
&self.lock as *const AtomicU32 as *mut abi::lock,
abi::scope::PRIVATE,
);
assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex");
}
}
pub unsafe fn destroy(&self) {
let lock = (*self.lock.get()).as_mut_ptr();
let recursion = (*self.recursion.get()).as_mut_ptr();
assert_eq!(
(*lock).load(Ordering::Relaxed),
self.lock.load(Ordering::Relaxed),
abi::LOCK_UNLOCKED.0,
"Attempted to destroy locked mutex"
);
assert_eq!(*recursion, 0, "Recursion counter invalid");
assert_eq!(self.recursion.get(), 0, "Recursion counter invalid");
}
}

View File

@ -1,4 +1,3 @@
use crate::cell::UnsafeCell;
use crate::mem;
use crate::mem::MaybeUninit;
use crate::sync::atomic::{AtomicU32, Ordering};
@ -13,28 +12,25 @@ extern "C" {
static mut RDLOCKS_ACQUIRED: u32 = 0;
pub struct RWLock {
lock: UnsafeCell<AtomicU32>,
lock: AtomicU32,
}
pub unsafe fn raw(r: &RWLock) -> *mut AtomicU32 {
r.lock.get()
pub unsafe fn raw(r: &RWLock) -> &AtomicU32 {
&r.lock
}
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
const NEW: RWLock = RWLock { lock: UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)) };
impl RWLock {
pub const fn new() -> RWLock {
NEW
RWLock { lock: AtomicU32::new(abi::LOCK_UNLOCKED.0) }
}
pub unsafe fn try_read(&self) -> bool {
let lock = self.lock.get();
let mut old = abi::LOCK_UNLOCKED.0;
while let Err(cur) =
(*lock).compare_exchange_weak(old, old + 1, Ordering::Acquire, Ordering::Relaxed)
self.lock.compare_exchange_weak(old, old + 1, Ordering::Acquire, Ordering::Relaxed)
{
if (cur & abi::LOCK_WRLOCKED.0) != 0 {
// Another thread already has a write lock.
@ -61,12 +57,11 @@ impl RWLock {
pub unsafe fn read(&self) {
if !self.try_read() {
// Call into the kernel to acquire a read lock.
let lock = self.lock.get();
let subscription = abi::subscription {
type_: abi::eventtype::LOCK_RDLOCK,
union: abi::subscription_union {
lock: abi::subscription_lock {
lock: lock as *mut abi::lock,
lock: &self.lock as *const AtomicU32 as *mut abi::lock,
lock_scope: abi::scope::PRIVATE,
},
},
@ -96,11 +91,10 @@ impl RWLock {
assert!(RDLOCKS_ACQUIRED > 0, "Bad lock count");
let mut old = 1;
loop {
let lock = self.lock.get();
if old == 1 | abi::LOCK_KERNEL_MANAGED.0 {
// Last read lock while threads are waiting. Attempt to upgrade
// to a write lock before calling into the kernel to unlock.
if let Err(cur) = (*lock).compare_exchange_weak(
if let Err(cur) = self.lock.compare_exchange_weak(
old,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 | abi::LOCK_KERNEL_MANAGED.0,
Ordering::Acquire,
@ -109,7 +103,10 @@ impl RWLock {
old = cur;
} else {
// Call into the kernel to unlock.
let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
let ret = abi::lock_unlock(
&self.lock as *const AtomicU32 as *mut abi::lock,
abi::scope::PRIVATE,
);
assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock");
break;
}
@ -122,7 +119,7 @@ impl RWLock {
0,
"Attempted to read-unlock a write-locked rwlock"
);
if let Err(cur) = (*lock).compare_exchange_weak(
if let Err(cur) = self.lock.compare_exchange_weak(
old,
old - 1,
Ordering::Acquire,
@ -140,8 +137,7 @@ impl RWLock {
pub unsafe fn try_write(&self) -> bool {
// Attempt to acquire the lock.
let lock = self.lock.get();
if let Err(old) = (*lock).compare_exchange(
if let Err(old) = self.lock.compare_exchange(
abi::LOCK_UNLOCKED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
Ordering::Acquire,
@ -163,12 +159,11 @@ impl RWLock {
pub unsafe fn write(&self) {
if !self.try_write() {
// Call into the kernel to acquire a write lock.
let lock = self.lock.get();
let subscription = abi::subscription {
type_: abi::eventtype::LOCK_WRLOCK,
union: abi::subscription_union {
lock: abi::subscription_lock {
lock: lock as *mut abi::lock,
lock: &self.lock as *const AtomicU32 as *mut abi::lock,
lock_scope: abi::scope::PRIVATE,
},
},
@ -184,14 +179,14 @@ impl RWLock {
}
pub unsafe fn write_unlock(&self) {
let lock = self.lock.get();
assert_eq!(
(*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
self.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
"This rwlock is not write-locked by this thread"
);
if !(*lock)
if !self
.lock
.compare_exchange(
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
abi::LOCK_UNLOCKED.0,
@ -202,15 +197,17 @@ impl RWLock {
{
// Lock is managed by kernelspace. Call into the kernel
// to unblock waiting threads.
let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
let ret = abi::lock_unlock(
&self.lock as *const AtomicU32 as *mut abi::lock,
abi::scope::PRIVATE,
);
assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock");
}
}
pub unsafe fn destroy(&self) {
let lock = self.lock.get();
assert_eq!(
(*lock).load(Ordering::Relaxed),
self.lock.load(Ordering::Relaxed),
abi::LOCK_UNLOCKED.0,
"Attempted to destroy locked rwlock"
);