auto merge of #20367 : retep998/rust/master, r=alexcrichton

Also adjusted some of the FFI definitions because apparently they don't use the long pointer prefix.
Gives a free performance boost because `SRWLock` is several times faster than `CriticalRegion` on every Windows system tested.
Fixes #19962
This commit is contained in:
bors 2015-01-13 19:11:47 +00:00
commit 4fd1e6235d
3 changed files with 49 additions and 83 deletions

View File

@ -27,16 +27,18 @@ impl Condvar {
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
let r = ffi::SleepConditionVariableCS(self.inner.get(),
mutex::raw(mutex),
libc::INFINITE);
let r = ffi::SleepConditionVariableSRW(self.inner.get(),
mutex::raw(mutex),
libc::INFINITE,
0);
debug_assert!(r != 0);
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let r = ffi::SleepConditionVariableCS(self.inner.get(),
mutex::raw(mutex),
dur.num_milliseconds() as DWORD);
let r = ffi::SleepConditionVariableSRW(self.inner.get(),
mutex::raw(mutex),
dur.num_milliseconds() as DWORD,
0);
if r == 0 {
const ERROR_TIMEOUT: DWORD = 0x5B4;
debug_assert_eq!(os::errno() as uint, ERROR_TIMEOUT as uint);

View File

@ -8,73 +8,51 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use alloc::{self, heap};
use libc::DWORD;
use marker::Sync;
use cell::UnsafeCell;
use sys::sync as ffi;
const SPIN_COUNT: DWORD = 4000;
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
pub struct Mutex { inner: AtomicUsize }
pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_USIZE_INIT };
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Sync for Mutex {}
#[inline]
pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION {
m.get()
pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
m.inner.get()
}
// So you might be asking why we're using SRWLock instead of CriticalSection?
//
// 1. SRWLock is several times faster than CriticalSection according to benchmarks performed on both
// Windows 8 and Windows 7.
//
// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix implementation
// deadlocks so consistency is preferred. See #19962 for more details.
//
// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy is there there are
// no guarantees of fairness.
impl Mutex {
#[inline]
pub unsafe fn new() -> Mutex {
Mutex { inner: AtomicUsize::new(init_lock() as uint) }
}
pub unsafe fn new() -> Mutex { MUTEX_INIT }
#[inline]
pub unsafe fn lock(&self) {
ffi::EnterCriticalSection(self.get())
ffi::AcquireSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::TryEnterCriticalSection(self.get()) != 0
ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
}
#[inline]
pub unsafe fn unlock(&self) {
ffi::LeaveCriticalSection(self.get())
ffi::ReleaseSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn destroy(&self) {
let lock = self.inner.swap(0, Ordering::SeqCst);
if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) }
}
unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION {
match self.inner.load(Ordering::SeqCst) {
0 => {}
n => return n as ffi::LPCRITICAL_SECTION
}
let lock = init_lock();
match self.inner.compare_and_swap(0, lock as uint, Ordering::SeqCst) {
0 => return lock as ffi::LPCRITICAL_SECTION,
_ => {}
}
free_lock(lock);
return self.inner.load(Ordering::SeqCst) as ffi::LPCRITICAL_SECTION;
// ...
}
}
unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION {
let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8)
as ffi::LPCRITICAL_SECTION;
if block.is_null() { alloc::oom() }
ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
return block;
}
unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) {
ffi::DeleteCriticalSection(h);
heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8);
}

View File

@ -8,17 +8,12 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{BOOL, DWORD, c_void, LPVOID};
use libc::{BOOL, DWORD, c_void, LPVOID, c_ulong};
use libc::types::os::arch::extra::BOOLEAN;
pub type LPCRITICAL_SECTION = *mut c_void;
pub type LPCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
pub type LPSRWLOCK = *mut SRWLOCK;
#[cfg(target_arch = "x86")]
pub const CRITICAL_SECTION_SIZE: uint = 24;
#[cfg(target_arch = "x86_64")]
pub const CRITICAL_SECTION_SIZE: uint = 40;
pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
pub type PSRWLOCK = *mut SRWLOCK;
pub type ULONG = c_ulong;
#[repr(C)]
pub struct CONDITION_VARIABLE { pub ptr: LPVOID }
@ -31,28 +26,19 @@ pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE {
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ };
extern "system" {
// critical sections
pub fn InitializeCriticalSectionAndSpinCount(
lpCriticalSection: LPCRITICAL_SECTION,
dwSpinCount: DWORD) -> BOOL;
pub fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
pub fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
pub fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
pub fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
// condition variables
pub fn SleepConditionVariableCS(ConditionVariable: LPCONDITION_VARIABLE,
CriticalSection: LPCRITICAL_SECTION,
dwMilliseconds: DWORD) -> BOOL;
pub fn WakeConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
pub fn WakeAllConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE,
SRWLock: PSRWLOCK,
dwMilliseconds: DWORD,
Flags: ULONG) -> BOOL;
pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
// slim rwlocks
pub fn AcquireSRWLockExclusive(SRWLock: LPSRWLOCK);
pub fn AcquireSRWLockShared(SRWLock: LPSRWLOCK);
pub fn ReleaseSRWLockExclusive(SRWLock: LPSRWLOCK);
pub fn ReleaseSRWLockShared(SRWLock: LPSRWLOCK);
pub fn TryAcquireSRWLockExclusive(SRWLock: LPSRWLOCK) -> BOOLEAN;
pub fn TryAcquireSRWLockShared(SRWLock: LPSRWLOCK) -> BOOLEAN;
pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
}