Small cleanups in Windows Mutex.
- Move `held` into the boxed part, since the SRW lock implementation does not use this. This makes the Mutex 50% smaller. - Use `Cell` instead of `UnsafeCell` for `held`, such that `.replace()` can be used. - Add some comments.
This commit is contained in:
parent
2d6cbd21b2
commit
1016deb592
@ -19,20 +19,25 @@
|
|||||||
//! CriticalSection is used and we keep track of who's holding the mutex to
|
//! CriticalSection is used and we keep track of who's holding the mutex to
|
||||||
//! detect recursive locks.
|
//! detect recursive locks.
|
||||||
|
|
||||||
use crate::cell::UnsafeCell;
|
use crate::cell::{Cell, UnsafeCell};
|
||||||
use crate::mem::{self, MaybeUninit};
|
use crate::mem::{self, MaybeUninit};
|
||||||
use crate::sync::atomic::{AtomicUsize, Ordering};
|
use crate::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use crate::sys::c;
|
use crate::sys::c;
|
||||||
use crate::sys::compat;
|
use crate::sys::compat;
|
||||||
|
|
||||||
pub struct Mutex {
|
pub struct Mutex {
|
||||||
|
// This is either directly an SRWLOCK (if supported), or a Box<Inner> otherwise.
|
||||||
lock: AtomicUsize,
|
lock: AtomicUsize,
|
||||||
held: UnsafeCell<bool>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl Send for Mutex {}
|
unsafe impl Send for Mutex {}
|
||||||
unsafe impl Sync for Mutex {}
|
unsafe impl Sync for Mutex {}
|
||||||
|
|
||||||
|
struct Inner {
|
||||||
|
remutex: ReentrantMutex,
|
||||||
|
held: Cell<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
enum Kind {
|
enum Kind {
|
||||||
SRWLock = 1,
|
SRWLock = 1,
|
||||||
@ -51,7 +56,6 @@ impl Mutex {
|
|||||||
// This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
|
// This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
|
||||||
// initializing an SRWLOCK here.
|
// initializing an SRWLOCK here.
|
||||||
lock: AtomicUsize::new(0),
|
lock: AtomicUsize::new(0),
|
||||||
held: UnsafeCell::new(false),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -60,10 +64,11 @@ impl Mutex {
|
|||||||
match kind() {
|
match kind() {
|
||||||
Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
|
Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
|
||||||
Kind::CriticalSection => {
|
Kind::CriticalSection => {
|
||||||
let re = self.remutex();
|
let inner = &mut *self.inner();
|
||||||
(*re).lock();
|
inner.remutex.lock();
|
||||||
if !self.flag_locked() {
|
if inner.held.replace(true) {
|
||||||
(*re).unlock();
|
// It was already locked, so we got a recursive lock which we do not want.
|
||||||
|
inner.remutex.unlock();
|
||||||
panic!("cannot recursively lock a mutex");
|
panic!("cannot recursively lock a mutex");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -73,23 +78,27 @@ impl Mutex {
|
|||||||
match kind() {
|
match kind() {
|
||||||
Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
|
Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
|
||||||
Kind::CriticalSection => {
|
Kind::CriticalSection => {
|
||||||
let re = self.remutex();
|
let inner = &mut *self.inner();
|
||||||
if !(*re).try_lock() {
|
if !inner.remutex.try_lock() {
|
||||||
|
false
|
||||||
|
} else if inner.held.replace(true) {
|
||||||
|
// It was already locked, so we got a recursive lock which we do not want.
|
||||||
|
inner.remutex.unlock();
|
||||||
false
|
false
|
||||||
} else if self.flag_locked() {
|
|
||||||
true
|
|
||||||
} else {
|
} else {
|
||||||
(*re).unlock();
|
true
|
||||||
false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub unsafe fn unlock(&self) {
|
pub unsafe fn unlock(&self) {
|
||||||
*self.held.get() = false;
|
|
||||||
match kind() {
|
match kind() {
|
||||||
Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
|
Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
|
||||||
Kind::CriticalSection => (*self.remutex()).unlock(),
|
Kind::CriticalSection => {
|
||||||
|
let inner = &mut *(self.lock.load(Ordering::SeqCst) as *mut Inner);
|
||||||
|
inner.held.set(false);
|
||||||
|
inner.remutex.unlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub unsafe fn destroy(&self) {
|
pub unsafe fn destroy(&self) {
|
||||||
@ -98,37 +107,28 @@ impl Mutex {
|
|||||||
Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
|
Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
|
||||||
0 => {}
|
0 => {}
|
||||||
n => {
|
n => {
|
||||||
Box::from_raw(n as *mut ReentrantMutex).destroy();
|
Box::from_raw(n as *mut Inner).remutex.destroy();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn remutex(&self) -> *mut ReentrantMutex {
|
unsafe fn inner(&self) -> *mut Inner {
|
||||||
match self.lock.load(Ordering::SeqCst) {
|
match self.lock.load(Ordering::SeqCst) {
|
||||||
0 => {}
|
0 => {}
|
||||||
n => return n as *mut _,
|
n => return n as *mut _,
|
||||||
}
|
}
|
||||||
let re = box ReentrantMutex::uninitialized();
|
let inner = box Inner { remutex: ReentrantMutex::uninitialized(), held: Cell::new(false) };
|
||||||
re.init();
|
inner.remutex.init();
|
||||||
let re = Box::into_raw(re);
|
let inner = Box::into_raw(inner);
|
||||||
match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
|
match self.lock.compare_and_swap(0, inner as usize, Ordering::SeqCst) {
|
||||||
0 => re,
|
0 => inner,
|
||||||
n => {
|
n => {
|
||||||
Box::from_raw(re).destroy();
|
Box::from_raw(inner).remutex.destroy();
|
||||||
n as *mut _
|
n as *mut _
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn flag_locked(&self) -> bool {
|
|
||||||
if *self.held.get() {
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
*self.held.get() = true;
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn kind() -> Kind {
|
fn kind() -> Kind {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user