diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index f440442ca30..559c4dc9c7c 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -1032,7 +1032,7 @@ extern "system" { // Functions that aren't available on every version of Windows that we support, // but we still use them and just provide some form of a fallback implementation. compat_fn! { - kernel32: + "kernel32": pub fn CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR, _lpTargetFileName: LPCWSTR, diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs index d6d433f9d08..3f25f05e1b9 100644 --- a/library/std/src/sys/windows/compat.rs +++ b/library/std/src/sys/windows/compat.rs @@ -12,7 +12,6 @@ //! function is available but afterwards it's just a load and a jump. use crate::ffi::CString; -use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::sys::c; pub fn lookup(module: &str, symbol: &str) -> Option { @@ -28,45 +27,69 @@ pub fn lookup(module: &str, symbol: &str) -> Option { } } -pub fn store_func(ptr: &AtomicUsize, module: &str, symbol: &str, fallback: usize) -> usize { - let value = lookup(module, symbol).unwrap_or(fallback); - ptr.store(value, Ordering::SeqCst); - value -} - macro_rules! compat_fn { - ($module:ident: $( + ($module:literal: $( $(#[$meta:meta])* - pub fn $symbol:ident($($argname:ident: $argtype:ty),*) - -> $rettype:ty { - $($body:expr);* - } + pub fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty $body:block )*) => ($( - #[allow(unused_variables)] $(#[$meta])* - pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype { + pub mod $symbol { + use super::*; use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::mem; + type F = unsafe extern "system" fn($($argtype),*) -> $rettype; static PTR: AtomicUsize = AtomicUsize::new(0); + #[allow(unused_variables)] + unsafe extern "system" fn fallback($($argname: $argtype),*) -> $rettype $body + + /// This address is stored in `PTR` to incidate an unavailable API. + /// + /// This way, call() will end up calling fallback() if it is unavailable. + /// + /// This is a `static` to avoid rustc duplicating `fn fallback()` + /// into both load() and is_available(), which would break + /// is_available()'s comparison. By using the same static variable + /// in both places, they'll refer to the same (copy of the) + /// function. + /// + /// LLVM merging the address of fallback with other functions + /// (because of unnamed_addr) is fine, since it's only compared to + /// an address from GetProcAddress from an external dll. + static FALLBACK: F = fallback; + + #[cold] fn load() -> usize { - crate::sys::compat::store_func(&PTR, - stringify!($module), - stringify!($symbol), - fallback as usize) - } - unsafe extern "system" fn fallback($($argname: $argtype),*) - -> $rettype { - $($body);* + // There is no locking here. It's okay if this is executed by multiple threads in + // parallel. `lookup` will result in the same value, and it's okay if they overwrite + // eachothers result as long as they do so atomically. We don't need any guarantees + // about memory ordering, as this involves just a single atomic variable which is + // not used to protect or order anything else. + let addr = crate::sys::compat::lookup($module, stringify!($symbol)) + .unwrap_or(FALLBACK as usize); + PTR.store(addr, Ordering::Relaxed); + addr } - let addr = match PTR.load(Ordering::SeqCst) { - 0 => load(), - n => n, - }; - mem::transmute::(addr)($($argname),*) + fn addr() -> usize { + match PTR.load(Ordering::Relaxed) { + 0 => load(), + addr => addr, + } + } + + #[allow(dead_code)] + pub fn is_available() -> bool { + addr() != FALLBACK as usize + } + + pub unsafe fn call($($argname: $argtype),*) -> $rettype { + mem::transmute::(addr())($($argname),*) + } } + + pub use $symbol::call as $symbol; )*) } diff --git a/library/std/src/sys/windows/mutex.rs b/library/std/src/sys/windows/mutex.rs index 1e09b95c872..e2aaca59fe2 100644 --- a/library/std/src/sys/windows/mutex.rs +++ b/library/std/src/sys/windows/mutex.rs @@ -23,7 +23,6 @@ use crate::cell::{Cell, UnsafeCell}; use crate::mem::{self, MaybeUninit}; use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::sys::c; -use crate::sys::compat; pub struct Mutex { // This is either directly an SRWLOCK (if supported), or a Box otherwise. @@ -40,8 +39,8 @@ struct Inner { #[derive(Clone, Copy)] enum Kind { - SRWLock = 1, - CriticalSection = 2, + SRWLock, + CriticalSection, } #[inline] @@ -130,21 +129,7 @@ impl Mutex { } fn kind() -> Kind { - static KIND: AtomicUsize = AtomicUsize::new(0); - - let val = KIND.load(Ordering::SeqCst); - if val == Kind::SRWLock as usize { - return Kind::SRWLock; - } else if val == Kind::CriticalSection as usize { - return Kind::CriticalSection; - } - - let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") { - None => Kind::CriticalSection, - Some(..) => Kind::SRWLock, - }; - KIND.store(ret as usize, Ordering::SeqCst); - ret + if c::AcquireSRWLockExclusive::is_available() { Kind::SRWLock } else { Kind::CriticalSection } } pub struct ReentrantMutex {