Auto merge of #74667 - Manishearth:rollup-s6k59sw, r=Manishearth
Rollup of 8 pull requests Successful merges: - #74141 (libstd/libcore: fix various typos) - #74490 (add a Backtrace::disabled function) - #74548 (one more Path::with_extension example, to demonstrate behavior) - #74587 (Prefer constant over function) - #74606 (Remove Linux workarounds for missing CLOEXEC support) - #74637 (Make str point to primitive page) - #74654 (require type defaults to be after const generic parameters) - #74659 (Improve codegen for unchecked float casts on wasm) Failed merges: r? @ghost
This commit is contained in:
commit
2bbfa02b1b
@ -65,7 +65,7 @@ use crate::vec::Vec;
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// You can create a `String` from [a literal string][str] with [`String::from`]:
|
||||
/// You can create a `String` from [a literal string][`str`] with [`String::from`]:
|
||||
///
|
||||
/// [`String::from`]: From::from
|
||||
///
|
||||
@ -268,7 +268,8 @@ use crate::vec::Vec;
|
||||
///
|
||||
/// Here, there's no need to allocate more memory inside the loop.
|
||||
///
|
||||
/// [`&str`]: str
|
||||
/// [`str`]: type@str
|
||||
/// [`&str`]: type@str
|
||||
/// [`Deref`]: core::ops::Deref
|
||||
/// [`as_str()`]: String::as_str
|
||||
#[derive(PartialOrd, Eq, Ord)]
|
||||
|
@ -677,7 +677,7 @@ impl AsRef<str> for str {
|
||||
///
|
||||
///
|
||||
/// However there is one case where `!` syntax can be used
|
||||
/// before `!` is stabilized as a full-fleged type: in the position of a function’s return type.
|
||||
/// before `!` is stabilized as a full-fledged type: in the position of a function’s return type.
|
||||
/// Specifically, it is possible implementations for two different function pointer types:
|
||||
///
|
||||
/// ```
|
||||
|
@ -43,7 +43,7 @@ struct SipHasher24 {
|
||||
///
|
||||
/// SipHash is a general-purpose hashing function: it runs at a good
|
||||
/// speed (competitive with Spooky and City) and permits strong _keyed_
|
||||
/// hashing. This lets you key your hashtables from a strong RNG, such as
|
||||
/// hashing. This lets you key your hash tables from a strong RNG, such as
|
||||
/// [`rand::os::OsRng`](https://doc.rust-lang.org/rand/rand/os/struct.OsRng.html).
|
||||
///
|
||||
/// Although the SipHash algorithm is considered to be generally strong,
|
||||
|
@ -15,7 +15,7 @@
|
||||
//!
|
||||
//! If an intrinsic is supposed to be used from a `const fn` with a `rustc_const_stable` attribute,
|
||||
//! the intrinsic's attribute must be `rustc_const_stable`, too. Such a change should not be done
|
||||
//! without T-lang consulation, because it bakes a feature into the language that cannot be
|
||||
//! without T-lang consultation, because it bakes a feature into the language that cannot be
|
||||
//! replicated in user code without compiler support.
|
||||
//!
|
||||
//! # Volatiles
|
||||
@ -994,7 +994,7 @@ extern "rust-intrinsic" {
|
||||
/// [`std::mem::align_of`](../../std/mem/fn.align_of.html).
|
||||
#[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
|
||||
pub fn min_align_of<T>() -> usize;
|
||||
/// The prefered alignment of a type.
|
||||
/// The preferred alignment of a type.
|
||||
///
|
||||
/// This intrinsic does not have a stable counterpart.
|
||||
#[rustc_const_unstable(feature = "const_pref_align_of", issue = "none")]
|
||||
@ -1246,14 +1246,14 @@ extern "rust-intrinsic" {
|
||||
/// assert!(mid <= len);
|
||||
/// unsafe {
|
||||
/// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice);
|
||||
/// // first: transmute is not typesafe; all it checks is that T and
|
||||
/// // first: transmute is not type safe; all it checks is that T and
|
||||
/// // U are of the same size. Second, right here, you have two
|
||||
/// // mutable references pointing to the same memory.
|
||||
/// (&mut slice[0..mid], &mut slice2[mid..len])
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// // This gets rid of the typesafety problems; `&mut *` will *only* give
|
||||
/// // This gets rid of the type safety problems; `&mut *` will *only* give
|
||||
/// // you an `&mut T` from an `&mut T` or `*mut T`.
|
||||
/// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize)
|
||||
/// -> (&mut [T], &mut [T]) {
|
||||
|
@ -1069,7 +1069,7 @@ pub trait Iterator {
|
||||
/// let vec = iter.collect::<Vec<_>>();
|
||||
///
|
||||
/// // We have more elements which could fit in u32 (4, 5), but `map_while` returned `None` for `-3`
|
||||
/// // (as the `predicate` returned `None`) and `collect` stops at the first `None` entcountered.
|
||||
/// // (as the `predicate` returned `None`) and `collect` stops at the first `None` encountered.
|
||||
/// assert_eq!(vec, vec![0, 1, 2]);
|
||||
/// ```
|
||||
///
|
||||
|
@ -1047,7 +1047,7 @@ pub(crate) mod builtin {
|
||||
};
|
||||
}
|
||||
|
||||
/// Includes a utf8-encoded file as a string.
|
||||
/// Includes a UTF-8 encoded file as a string.
|
||||
///
|
||||
/// The file is located relative to the current file (similarly to how
|
||||
/// modules are found). The provided path is interpreted in a platform-specific
|
||||
|
@ -348,11 +348,11 @@ pub fn size_of_val<T: ?Sized>(val: &T) -> usize {
|
||||
///
|
||||
/// - If `T` is `Sized`, this function is always safe to call.
|
||||
/// - If the unsized tail of `T` is:
|
||||
/// - a [slice], then the length of the slice tail must be an intialized
|
||||
/// - a [slice], then the length of the slice tail must be an initialized
|
||||
/// integer, and the size of the *entire value*
|
||||
/// (dynamic tail length + statically sized prefix) must fit in `isize`.
|
||||
/// - a [trait object], then the vtable part of the pointer must point
|
||||
/// to a valid vtable acquired by an unsizing coersion, and the size
|
||||
/// to a valid vtable acquired by an unsizing coercion, and the size
|
||||
/// of the *entire value* (dynamic tail length + statically sized prefix)
|
||||
/// must fit in `isize`.
|
||||
/// - an (unstable) [extern type], then this function is always safe to
|
||||
@ -483,11 +483,11 @@ pub fn align_of_val<T: ?Sized>(val: &T) -> usize {
|
||||
///
|
||||
/// - If `T` is `Sized`, this function is always safe to call.
|
||||
/// - If the unsized tail of `T` is:
|
||||
/// - a [slice], then the length of the slice tail must be an intialized
|
||||
/// - a [slice], then the length of the slice tail must be an initialized
|
||||
/// integer, and the size of the *entire value*
|
||||
/// (dynamic tail length + statically sized prefix) must fit in `isize`.
|
||||
/// - a [trait object], then the vtable part of the pointer must point
|
||||
/// to a valid vtable acquired by an unsizing coersion, and the size
|
||||
/// to a valid vtable acquired by an unsizing coercion, and the size
|
||||
/// of the *entire value* (dynamic tail length + statically sized prefix)
|
||||
/// must fit in `isize`.
|
||||
/// - an (unstable) [extern type], then this function is always safe to
|
||||
|
@ -687,7 +687,7 @@ impl f64 {
|
||||
/// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
|
||||
///
|
||||
/// Rather than trying to preserve signaling-ness cross-platform, this
|
||||
/// implementation favours preserving the exact bits. This means that
|
||||
/// implementation favors preserving the exact bits. This means that
|
||||
/// any payloads encoded in NaNs will be preserved even if the result of
|
||||
/// this method is sent over the network from an x86 machine to a MIPS one.
|
||||
///
|
||||
@ -696,7 +696,7 @@ impl f64 {
|
||||
///
|
||||
/// If the input isn't NaN, then there is no portability concern.
|
||||
///
|
||||
/// If you don't care about signalingness (very likely), then there is no
|
||||
/// If you don't care about signaling-ness (very likely), then there is no
|
||||
/// portability concern.
|
||||
///
|
||||
/// Note that this function is distinct from `as` casting, which attempts to
|
||||
|
@ -128,7 +128,7 @@
|
||||
//!
|
||||
//! Crucially, we have to be able to rely on [`drop`] being called. If an element
|
||||
//! could be deallocated or otherwise invalidated without calling [`drop`], the pointers into it
|
||||
//! from its neighbouring elements would become invalid, which would break the data structure.
|
||||
//! from its neighboring elements would become invalid, which would break the data structure.
|
||||
//!
|
||||
//! Therefore, pinning also comes with a [`drop`]-related guarantee.
|
||||
//!
|
||||
|
@ -331,13 +331,13 @@ impl<T: ?Sized> *const T {
|
||||
intrinsics::ptr_guaranteed_eq(self, other)
|
||||
}
|
||||
|
||||
/// Returns whether two pointers are guaranteed to be inequal.
|
||||
/// Returns whether two pointers are guaranteed to be unequal.
|
||||
///
|
||||
/// At runtime this function behaves like `self != other`.
|
||||
/// However, in some contexts (e.g., compile-time evaluation),
|
||||
/// it is not always possible to determine the inequality of two pointers, so this function may
|
||||
/// spuriously return `false` for pointers that later actually turn out to be inequal.
|
||||
/// But when it returns `true`, the pointers are guaranteed to be inequal.
|
||||
/// spuriously return `false` for pointers that later actually turn out to be unequal.
|
||||
/// But when it returns `true`, the pointers are guaranteed to be unequal.
|
||||
///
|
||||
/// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
|
||||
/// comparisons for which both functions return `false`.
|
||||
|
@ -317,13 +317,13 @@ impl<T: ?Sized> *mut T {
|
||||
intrinsics::ptr_guaranteed_eq(self as *const _, other as *const _)
|
||||
}
|
||||
|
||||
/// Returns whether two pointers are guaranteed to be inequal.
|
||||
/// Returns whether two pointers are guaranteed to be unequal.
|
||||
///
|
||||
/// At runtime this function behaves like `self != other`.
|
||||
/// However, in some contexts (e.g., compile-time evaluation),
|
||||
/// it is not always possible to determine the inequality of two pointers, so this function may
|
||||
/// spuriously return `false` for pointers that later actually turn out to be inequal.
|
||||
/// But when it returns `true`, the pointers are guaranteed to be inequal.
|
||||
/// spuriously return `false` for pointers that later actually turn out to be unequal.
|
||||
/// But when it returns `true`, the pointers are guaranteed to be unequal.
|
||||
///
|
||||
/// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
|
||||
/// comparisons for which both functions return `false`.
|
||||
|
@ -172,7 +172,7 @@ impl<T> NonNull<[T]> {
|
||||
/// assert_eq!(unsafe { slice.as_ref()[2] }, 7);
|
||||
/// ```
|
||||
///
|
||||
/// (Note that this example artifically demonstrates a use of this method,
|
||||
/// (Note that this example artificially demonstrates a use of this method,
|
||||
/// but `let slice = NonNull::from(&x[..]);` would be a better way to write code like this.)
|
||||
#[unstable(feature = "nonnull_slice_from_raw_parts", issue = "71941")]
|
||||
#[rustc_const_unstable(feature = "const_nonnull_slice_from_raw_parts", issue = "71941")]
|
||||
|
@ -1118,13 +1118,26 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
|
||||
fn visit_generics(&mut self, generics: &'a Generics) {
|
||||
let mut prev_ty_default = None;
|
||||
for param in &generics.params {
|
||||
if let GenericParamKind::Type { ref default, .. } = param.kind {
|
||||
if default.is_some() {
|
||||
match param.kind {
|
||||
GenericParamKind::Lifetime => (),
|
||||
GenericParamKind::Type { default: Some(_), .. } => {
|
||||
prev_ty_default = Some(param.ident.span);
|
||||
} else if let Some(span) = prev_ty_default {
|
||||
self.err_handler()
|
||||
.span_err(span, "type parameters with a default must be trailing");
|
||||
break;
|
||||
}
|
||||
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
|
||||
if let Some(span) = prev_ty_default {
|
||||
let mut err = self.err_handler().struct_span_err(
|
||||
span,
|
||||
"type parameters with a default must be trailing",
|
||||
);
|
||||
if matches!(param.kind, GenericParamKind::Const { .. }) {
|
||||
err.note(
|
||||
"using type defaults and const parameters \
|
||||
in the same parameter list is currently not permitted",
|
||||
);
|
||||
}
|
||||
err.emit();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -510,6 +510,14 @@ impl CodegenCx<'b, 'tcx> {
|
||||
ifn!("llvm.wasm.trunc.saturate.signed.i32.f64", fn(t_f64) -> t_i32);
|
||||
ifn!("llvm.wasm.trunc.saturate.signed.i64.f32", fn(t_f32) -> t_i64);
|
||||
ifn!("llvm.wasm.trunc.saturate.signed.i64.f64", fn(t_f64) -> t_i64);
|
||||
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
|
||||
ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
|
||||
ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
|
||||
ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
|
||||
ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
|
||||
ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
|
||||
ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
|
||||
ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
|
||||
|
||||
ifn!("llvm.trap", fn() -> void);
|
||||
ifn!("llvm.debugtrap", fn() -> void);
|
||||
|
@ -629,27 +629,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
}
|
||||
|
||||
sym::float_to_int_unchecked => {
|
||||
if float_type_width(arg_tys[0]).is_none() {
|
||||
span_invalid_monomorphization_error(
|
||||
tcx.sess,
|
||||
span,
|
||||
&format!(
|
||||
"invalid monomorphization of `float_to_int_unchecked` \
|
||||
let float_width = match float_type_width(arg_tys[0]) {
|
||||
Some(width) => width,
|
||||
None => {
|
||||
span_invalid_monomorphization_error(
|
||||
tcx.sess,
|
||||
span,
|
||||
&format!(
|
||||
"invalid monomorphization of `float_to_int_unchecked` \
|
||||
intrinsic: expected basic float type, \
|
||||
found `{}`",
|
||||
arg_tys[0]
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
match int_type_width_signed(ret_ty, self.cx) {
|
||||
Some((width, signed)) => {
|
||||
if signed {
|
||||
self.fptosi(args[0].immediate(), self.cx.type_ix(width))
|
||||
} else {
|
||||
self.fptoui(args[0].immediate(), self.cx.type_ix(width))
|
||||
}
|
||||
arg_tys[0]
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let (width, signed) = match int_type_width_signed(ret_ty, self.cx) {
|
||||
Some(pair) => pair,
|
||||
None => {
|
||||
span_invalid_monomorphization_error(
|
||||
tcx.sess,
|
||||
@ -663,7 +660,49 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// The LLVM backend can reorder and speculate `fptosi` and
|
||||
// `fptoui`, so on WebAssembly the codegen for this instruction
|
||||
// is quite heavyweight. To avoid this heavyweight codegen we
|
||||
// instead use the raw wasm intrinsics which will lower to one
|
||||
// instruction in WebAssembly (`iNN.trunc_fMM_{s,u}`). This one
|
||||
// instruction will trap if the operand is out of bounds, but
|
||||
// that's ok since this intrinsic is UB if the operands are out
|
||||
// of bounds, so the behavior can be different on WebAssembly
|
||||
// than other targets.
|
||||
//
|
||||
// Note, however, that when the `nontrapping-fptoint` feature is
|
||||
// enabled in LLVM then LLVM will lower `fptosi` to
|
||||
// `iNN.trunc_sat_fMM_{s,u}`, so if that's the case we don't
|
||||
// bother with intrinsics.
|
||||
let mut result = None;
|
||||
if self.sess().target.target.arch == "wasm32"
|
||||
&& !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint)
|
||||
{
|
||||
let name = match (width, float_width, signed) {
|
||||
(32, 32, true) => Some("llvm.wasm.trunc.signed.i32.f32"),
|
||||
(32, 64, true) => Some("llvm.wasm.trunc.signed.i32.f64"),
|
||||
(64, 32, true) => Some("llvm.wasm.trunc.signed.i64.f32"),
|
||||
(64, 64, true) => Some("llvm.wasm.trunc.signed.i64.f64"),
|
||||
(32, 32, false) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
|
||||
(32, 64, false) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
|
||||
(64, 32, false) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
|
||||
(64, 64, false) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
|
||||
_ => None,
|
||||
};
|
||||
if let Some(name) = name {
|
||||
let intrinsic = self.get_intrinsic(name);
|
||||
result = Some(self.call(intrinsic, &[args[0].immediate()], None));
|
||||
}
|
||||
}
|
||||
result.unwrap_or_else(|| {
|
||||
if signed {
|
||||
self.fptosi(args[0].immediate(), self.cx.type_ix(width))
|
||||
} else {
|
||||
self.fptoui(args[0].immediate(), self.cx.type_ix(width))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
sym::discriminant_value => {
|
||||
|
@ -291,6 +291,12 @@ impl Backtrace {
|
||||
Backtrace::create(Backtrace::force_capture as usize)
|
||||
}
|
||||
|
||||
/// Forcibly captures a disabled backtrace, regardless of environment
|
||||
/// variable configuration.
|
||||
pub const fn disabled() -> Backtrace {
|
||||
Backtrace { inner: Inner::Disabled }
|
||||
}
|
||||
|
||||
// Capture a backtrace which start just before the function addressed by
|
||||
// `ip`
|
||||
fn create(ip: usize) -> Backtrace {
|
||||
|
@ -1500,7 +1500,7 @@ mod tests {
|
||||
assert_approx_eq!(f32::from_bits(0x44a72000), 1337.0);
|
||||
assert_approx_eq!(f32::from_bits(0xc1640000), -14.25);
|
||||
|
||||
// Check that NaNs roundtrip their bits regardless of signalingness
|
||||
// Check that NaNs roundtrip their bits regardless of signaling-ness
|
||||
// 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
|
||||
let masked_nan1 = f32::NAN.to_bits() ^ 0x002A_AAAA;
|
||||
let masked_nan2 = f32::NAN.to_bits() ^ 0x0055_5555;
|
||||
|
@ -1523,7 +1523,7 @@ mod tests {
|
||||
assert_approx_eq!(f64::from_bits(0x4094e40000000000), 1337.0);
|
||||
assert_approx_eq!(f64::from_bits(0xc02c800000000000), -14.25);
|
||||
|
||||
// Check that NaNs roundtrip their bits regardless of signalingness
|
||||
// Check that NaNs roundtrip their bits regardless of signaling-ness
|
||||
// 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
|
||||
let masked_nan1 = f64::NAN.to_bits() ^ 0x000A_AAAA_AAAA_AAAA;
|
||||
let masked_nan2 = f64::NAN.to_bits() ^ 0x0005_5555_5555_5555;
|
||||
|
@ -285,7 +285,7 @@ pub trait MetadataExt {
|
||||
/// ```
|
||||
#[stable(feature = "metadata_ext2", since = "1.8.0")]
|
||||
fn st_ctime_nsec(&self) -> i64;
|
||||
/// Returns the "preferred" blocksize for efficient filesystem I/O.
|
||||
/// Returns the "preferred" block size for efficient filesystem I/O.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
|
@ -289,7 +289,7 @@ pub trait MetadataExt {
|
||||
/// ```
|
||||
#[stable(feature = "metadata_ext2", since = "1.8.0")]
|
||||
fn st_ctime_nsec(&self) -> i64;
|
||||
/// Returns the "preferred" blocksize for efficient filesystem I/O.
|
||||
/// Returns the "preferred" block size for efficient filesystem I/O.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
|
@ -2244,6 +2244,9 @@ impl Path {
|
||||
///
|
||||
/// let path = Path::new("foo.rs");
|
||||
/// assert_eq!(path.with_extension("txt"), PathBuf::from("foo.txt"));
|
||||
///
|
||||
/// let path = Path::new("foo.tar.gz");
|
||||
/// assert_eq!(path.with_extension(""), PathBuf::from("foo.tar"));
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn with_extension<S: AsRef<OsStr>>(&self, extension: S) -> PathBuf {
|
||||
|
@ -19,7 +19,7 @@ impl FileDesc {
|
||||
self.fd
|
||||
}
|
||||
|
||||
/// Extracts the actual filedescriptor without closing it.
|
||||
/// Extracts the actual file descriptor without closing it.
|
||||
pub fn into_raw(self) -> Fd {
|
||||
let fd = self.fd;
|
||||
mem::forget(self);
|
||||
|
@ -624,7 +624,7 @@ pub trait MetadataExt {
|
||||
/// ```
|
||||
#[stable(feature = "metadata_ext", since = "1.1.0")]
|
||||
fn ctime_nsec(&self) -> i64;
|
||||
/// Returns the blocksize for filesystem I/O.
|
||||
/// Returns the block size for filesystem I/O.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
@ -635,7 +635,7 @@ pub trait MetadataExt {
|
||||
///
|
||||
/// fn main() -> io::Result<()> {
|
||||
/// let meta = fs::metadata("some_file")?;
|
||||
/// let blocksize = meta.blksize();
|
||||
/// let block_size = meta.blksize();
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
|
@ -3,28 +3,28 @@
|
||||
use crate::cmp;
|
||||
use crate::io::{self, Initializer, IoSlice, IoSliceMut, Read};
|
||||
use crate::mem;
|
||||
use crate::sync::atomic::{AtomicBool, Ordering};
|
||||
use crate::sys::cvt;
|
||||
use crate::sys_common::AsInner;
|
||||
|
||||
use libc::{c_int, c_void, ssize_t};
|
||||
use libc::{c_int, c_void};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileDesc {
|
||||
fd: c_int,
|
||||
}
|
||||
|
||||
fn max_len() -> usize {
|
||||
// The maximum read limit on most posix-like systems is `SSIZE_MAX`,
|
||||
// with the man page quoting that if the count of bytes to read is
|
||||
// greater than `SSIZE_MAX` the result is "unspecified".
|
||||
//
|
||||
// On macOS, however, apparently the 64-bit libc is either buggy or
|
||||
// intentionally showing odd behavior by rejecting any read with a size
|
||||
// larger than or equal to INT_MAX. To handle both of these the read
|
||||
// size is capped on both platforms.
|
||||
if cfg!(target_os = "macos") { <c_int>::MAX as usize - 1 } else { <ssize_t>::MAX as usize }
|
||||
}
|
||||
// The maximum read limit on most POSIX-like systems is `SSIZE_MAX`,
|
||||
// with the man page quoting that if the count of bytes to read is
|
||||
// greater than `SSIZE_MAX` the result is "unspecified".
|
||||
//
|
||||
// On macOS, however, apparently the 64-bit libc is either buggy or
|
||||
// intentionally showing odd behavior by rejecting any read with a size
|
||||
// larger than or equal to INT_MAX. To handle both of these the read
|
||||
// size is capped on both platforms.
|
||||
#[cfg(target_os = "macos")]
|
||||
const READ_LIMIT: usize = c_int::MAX as usize - 1;
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
const READ_LIMIT: usize = libc::ssize_t::MAX as usize;
|
||||
|
||||
impl FileDesc {
|
||||
pub fn new(fd: c_int) -> FileDesc {
|
||||
@ -44,7 +44,7 @@ impl FileDesc {
|
||||
|
||||
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let ret = cvt(unsafe {
|
||||
libc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), max_len()))
|
||||
libc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT))
|
||||
})?;
|
||||
Ok(ret as usize)
|
||||
}
|
||||
@ -92,7 +92,7 @@ impl FileDesc {
|
||||
cvt_pread64(
|
||||
self.fd,
|
||||
buf.as_mut_ptr() as *mut c_void,
|
||||
cmp::min(buf.len(), max_len()),
|
||||
cmp::min(buf.len(), READ_LIMIT),
|
||||
offset as i64,
|
||||
)
|
||||
.map(|n| n as usize)
|
||||
@ -101,7 +101,7 @@ impl FileDesc {
|
||||
|
||||
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
|
||||
let ret = cvt(unsafe {
|
||||
libc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), max_len()))
|
||||
libc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT))
|
||||
})?;
|
||||
Ok(ret as usize)
|
||||
}
|
||||
@ -144,7 +144,7 @@ impl FileDesc {
|
||||
cvt_pwrite64(
|
||||
self.fd,
|
||||
buf.as_ptr() as *const c_void,
|
||||
cmp::min(buf.len(), max_len()),
|
||||
cmp::min(buf.len(), READ_LIMIT),
|
||||
offset as i64,
|
||||
)
|
||||
.map(|n| n as usize)
|
||||
@ -223,50 +223,9 @@ impl FileDesc {
|
||||
pub fn duplicate(&self) -> io::Result<FileDesc> {
|
||||
// We want to atomically duplicate this file descriptor and set the
|
||||
// CLOEXEC flag, and currently that's done via F_DUPFD_CLOEXEC. This
|
||||
// flag, however, isn't supported on older Linux kernels (earlier than
|
||||
// 2.6.24).
|
||||
//
|
||||
// To detect this and ensure that CLOEXEC is still set, we
|
||||
// follow a strategy similar to musl [1] where if passing
|
||||
// F_DUPFD_CLOEXEC causes `fcntl` to return EINVAL it means it's not
|
||||
// supported (the third parameter, 0, is always valid), so we stop
|
||||
// trying that.
|
||||
//
|
||||
// Also note that Android doesn't have F_DUPFD_CLOEXEC, but get it to
|
||||
// resolve so we at least compile this.
|
||||
//
|
||||
// [1]: http://comments.gmane.org/gmane.linux.lib.musl.general/2963
|
||||
#[cfg(any(target_os = "android", target_os = "haiku"))]
|
||||
use libc::F_DUPFD as F_DUPFD_CLOEXEC;
|
||||
#[cfg(not(any(target_os = "android", target_os = "haiku")))]
|
||||
use libc::F_DUPFD_CLOEXEC;
|
||||
|
||||
let make_filedesc = |fd| {
|
||||
let fd = FileDesc::new(fd);
|
||||
fd.set_cloexec()?;
|
||||
Ok(fd)
|
||||
};
|
||||
static TRY_CLOEXEC: AtomicBool = AtomicBool::new(!cfg!(target_os = "android"));
|
||||
let fd = self.raw();
|
||||
if TRY_CLOEXEC.load(Ordering::Relaxed) {
|
||||
match cvt(unsafe { libc::fcntl(fd, F_DUPFD_CLOEXEC, 0) }) {
|
||||
// We *still* call the `set_cloexec` method as apparently some
|
||||
// linux kernel at some point stopped setting CLOEXEC even
|
||||
// though it reported doing so on F_DUPFD_CLOEXEC.
|
||||
Ok(fd) => {
|
||||
return Ok(if cfg!(target_os = "linux") {
|
||||
make_filedesc(fd)?
|
||||
} else {
|
||||
FileDesc::new(fd)
|
||||
});
|
||||
}
|
||||
Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {
|
||||
TRY_CLOEXEC.store(false, Ordering::Relaxed);
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
cvt(unsafe { libc::fcntl(fd, libc::F_DUPFD, 0) }).and_then(make_filedesc)
|
||||
// is a POSIX flag that was added to Linux in 2.6.24.
|
||||
let fd = cvt(unsafe { libc::fcntl(self.raw(), libc::F_DUPFD_CLOEXEC, 0) })?;
|
||||
Ok(FileDesc::new(fd))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -708,56 +708,7 @@ impl File {
|
||||
// However, since this is a variadic function, C integer promotion rules mean that on
|
||||
// the ABI level, this still gets passed as `c_int` (aka `u32` on Unix platforms).
|
||||
let fd = cvt_r(|| unsafe { open64(path.as_ptr(), flags, opts.mode as c_int) })?;
|
||||
let fd = FileDesc::new(fd);
|
||||
|
||||
// Currently the standard library supports Linux 2.6.18 which did not
|
||||
// have the O_CLOEXEC flag (passed above). If we're running on an older
|
||||
// Linux kernel then the flag is just ignored by the OS. After we open
|
||||
// the first file, we check whether it has CLOEXEC set. If it doesn't,
|
||||
// we will explicitly ask for a CLOEXEC fd for every further file we
|
||||
// open, if it does, we will skip that step.
|
||||
//
|
||||
// The CLOEXEC flag, however, is supported on versions of macOS/BSD/etc
|
||||
// that we support, so we only do this on Linux currently.
|
||||
#[cfg(target_os = "linux")]
|
||||
fn ensure_cloexec(fd: &FileDesc) -> io::Result<()> {
|
||||
use crate::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
const OPEN_CLOEXEC_UNKNOWN: usize = 0;
|
||||
const OPEN_CLOEXEC_SUPPORTED: usize = 1;
|
||||
const OPEN_CLOEXEC_NOTSUPPORTED: usize = 2;
|
||||
static OPEN_CLOEXEC: AtomicUsize = AtomicUsize::new(OPEN_CLOEXEC_UNKNOWN);
|
||||
|
||||
let need_to_set;
|
||||
match OPEN_CLOEXEC.load(Ordering::Relaxed) {
|
||||
OPEN_CLOEXEC_UNKNOWN => {
|
||||
need_to_set = !fd.get_cloexec()?;
|
||||
OPEN_CLOEXEC.store(
|
||||
if need_to_set {
|
||||
OPEN_CLOEXEC_NOTSUPPORTED
|
||||
} else {
|
||||
OPEN_CLOEXEC_SUPPORTED
|
||||
},
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
}
|
||||
OPEN_CLOEXEC_SUPPORTED => need_to_set = false,
|
||||
OPEN_CLOEXEC_NOTSUPPORTED => need_to_set = true,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
if need_to_set {
|
||||
fd.set_cloexec()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn ensure_cloexec(_: &FileDesc) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
ensure_cloexec(&fd)?;
|
||||
Ok(File(fd))
|
||||
Ok(File(FileDesc::new(fd)))
|
||||
}
|
||||
|
||||
pub fn file_attr(&self) -> io::Result<FileAttr> {
|
||||
|
@ -54,31 +54,26 @@ impl Socket {
|
||||
|
||||
pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
|
||||
unsafe {
|
||||
// On linux we first attempt to pass the SOCK_CLOEXEC flag to
|
||||
// atomically create the socket and set it as CLOEXEC. Support for
|
||||
// this option, however, was added in 2.6.27, and we still support
|
||||
// 2.6.18 as a kernel, so if the returned error is EINVAL we
|
||||
// fallthrough to the fallback.
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
match cvt(libc::socket(fam, ty | libc::SOCK_CLOEXEC, 0)) {
|
||||
Ok(fd) => return Ok(Socket(FileDesc::new(fd))),
|
||||
Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {}
|
||||
Err(e) => return Err(e),
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_os = "linux")] {
|
||||
// On Linux we pass the SOCK_CLOEXEC flag to atomically create
|
||||
// the socket and set it as CLOEXEC, added in 2.6.27.
|
||||
let fd = cvt(libc::socket(fam, ty | libc::SOCK_CLOEXEC, 0))?;
|
||||
Ok(Socket(FileDesc::new(fd)))
|
||||
} else {
|
||||
let fd = cvt(libc::socket(fam, ty, 0))?;
|
||||
let fd = FileDesc::new(fd);
|
||||
fd.set_cloexec()?;
|
||||
let socket = Socket(fd);
|
||||
|
||||
// macOS and iOS use `SO_NOSIGPIPE` as a `setsockopt`
|
||||
// flag to disable `SIGPIPE` emission on socket.
|
||||
#[cfg(target_vendor = "apple")]
|
||||
setsockopt(&socket, libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1)?;
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
}
|
||||
|
||||
let fd = cvt(libc::socket(fam, ty, 0))?;
|
||||
let fd = FileDesc::new(fd);
|
||||
fd.set_cloexec()?;
|
||||
let socket = Socket(fd);
|
||||
|
||||
// macOS and iOS use `SO_NOSIGPIPE` as a `setsockopt`
|
||||
// flag to disable `SIGPIPE` emission on socket.
|
||||
#[cfg(target_vendor = "apple")]
|
||||
setsockopt(&socket, libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1)?;
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,24 +81,20 @@ impl Socket {
|
||||
unsafe {
|
||||
let mut fds = [0, 0];
|
||||
|
||||
// Like above, see if we can set cloexec atomically
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
match cvt(libc::socketpair(fam, ty | libc::SOCK_CLOEXEC, 0, fds.as_mut_ptr())) {
|
||||
Ok(_) => {
|
||||
return Ok((Socket(FileDesc::new(fds[0])), Socket(FileDesc::new(fds[1]))));
|
||||
}
|
||||
Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {}
|
||||
Err(e) => return Err(e),
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_os = "linux")] {
|
||||
// Like above, set cloexec atomically
|
||||
cvt(libc::socketpair(fam, ty | libc::SOCK_CLOEXEC, 0, fds.as_mut_ptr()))?;
|
||||
Ok((Socket(FileDesc::new(fds[0])), Socket(FileDesc::new(fds[1]))))
|
||||
} else {
|
||||
cvt(libc::socketpair(fam, ty, 0, fds.as_mut_ptr()))?;
|
||||
let a = FileDesc::new(fds[0]);
|
||||
let b = FileDesc::new(fds[1]);
|
||||
a.set_cloexec()?;
|
||||
b.set_cloexec()?;
|
||||
Ok((Socket(a), Socket(b)))
|
||||
}
|
||||
}
|
||||
|
||||
cvt(libc::socketpair(fam, ty, 0, fds.as_mut_ptr()))?;
|
||||
let a = FileDesc::new(fds[0]);
|
||||
let b = FileDesc::new(fds[1]);
|
||||
a.set_cloexec()?;
|
||||
b.set_cloexec()?;
|
||||
Ok((Socket(a), Socket(b)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -177,30 +168,20 @@ impl Socket {
|
||||
pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> {
|
||||
// Unfortunately the only known way right now to accept a socket and
|
||||
// atomically set the CLOEXEC flag is to use the `accept4` syscall on
|
||||
// Linux. This was added in 2.6.28, however, and because we support
|
||||
// 2.6.18 we must detect this support dynamically.
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
syscall! {
|
||||
fn accept4(
|
||||
fd: c_int,
|
||||
addr: *mut sockaddr,
|
||||
addr_len: *mut socklen_t,
|
||||
flags: c_int
|
||||
) -> c_int
|
||||
}
|
||||
let res = cvt_r(|| unsafe { accept4(self.0.raw(), storage, len, libc::SOCK_CLOEXEC) });
|
||||
match res {
|
||||
Ok(fd) => return Ok(Socket(FileDesc::new(fd))),
|
||||
Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => {}
|
||||
Err(e) => return Err(e),
|
||||
// Linux. This was added in 2.6.28, glibc 2.10 and musl 0.9.5.
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_os = "linux")] {
|
||||
let fd = cvt_r(|| unsafe {
|
||||
libc::accept4(self.0.raw(), storage, len, libc::SOCK_CLOEXEC)
|
||||
})?;
|
||||
Ok(Socket(FileDesc::new(fd)))
|
||||
} else {
|
||||
let fd = cvt_r(|| unsafe { libc::accept(self.0.raw(), storage, len) })?;
|
||||
let fd = FileDesc::new(fd);
|
||||
fd.set_cloexec()?;
|
||||
Ok(Socket(fd))
|
||||
}
|
||||
}
|
||||
|
||||
let fd = cvt_r(|| unsafe { libc::accept(self.0.raw(), storage, len) })?;
|
||||
let fd = FileDesc::new(fd);
|
||||
fd.set_cloexec()?;
|
||||
Ok(Socket(fd))
|
||||
}
|
||||
|
||||
pub fn duplicate(&self) -> io::Result<Socket> {
|
||||
|
@ -71,6 +71,7 @@ pub fn errno() -> i32 {
|
||||
|
||||
/// Sets the platform-specific value of errno
|
||||
#[cfg(all(not(target_os = "linux"), not(target_os = "dragonfly")))] // needed for readdir and syscall!
|
||||
#[allow(dead_code)] // but not all target cfgs actually end up using it
|
||||
pub fn set_errno(e: i32) {
|
||||
unsafe { *errno_location() = e as c_int }
|
||||
}
|
||||
|
@ -1,11 +1,8 @@
|
||||
use crate::io::{self, IoSlice, IoSliceMut};
|
||||
use crate::mem;
|
||||
use crate::sync::atomic::{AtomicBool, Ordering};
|
||||
use crate::sys::fd::FileDesc;
|
||||
use crate::sys::{cvt, cvt_r};
|
||||
|
||||
use libc::c_int;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Anonymous pipes
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -13,46 +10,32 @@ use libc::c_int;
|
||||
pub struct AnonPipe(FileDesc);
|
||||
|
||||
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
|
||||
syscall! { fn pipe2(fds: *mut c_int, flags: c_int) -> c_int }
|
||||
static INVALID: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
let mut fds = [0; 2];
|
||||
|
||||
// Unfortunately the only known way right now to create atomically set the
|
||||
// CLOEXEC flag is to use the `pipe2` syscall on Linux. This was added in
|
||||
// 2.6.27, however, and because we support 2.6.18 we must detect this
|
||||
// support dynamically.
|
||||
if cfg!(any(
|
||||
target_os = "dragonfly",
|
||||
target_os = "freebsd",
|
||||
target_os = "linux",
|
||||
target_os = "netbsd",
|
||||
target_os = "openbsd",
|
||||
target_os = "redox"
|
||||
)) && !INVALID.load(Ordering::SeqCst)
|
||||
{
|
||||
// Note that despite calling a glibc function here we may still
|
||||
// get ENOSYS. Glibc has `pipe2` since 2.9 and doesn't try to
|
||||
// emulate on older kernels, so if you happen to be running on
|
||||
// an older kernel you may see `pipe2` as a symbol but still not
|
||||
// see the syscall.
|
||||
match cvt(unsafe { pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC) }) {
|
||||
Ok(_) => {
|
||||
return Ok((AnonPipe(FileDesc::new(fds[0])), AnonPipe(FileDesc::new(fds[1]))));
|
||||
}
|
||||
Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => {
|
||||
INVALID.store(true, Ordering::SeqCst);
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
// The only known way right now to create atomically set the CLOEXEC flag is
|
||||
// to use the `pipe2` syscall. This was added to Linux in 2.6.27, glibc 2.9
|
||||
// and musl 0.9.3, and some other targets also have it.
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(any(
|
||||
target_os = "dragonfly",
|
||||
target_os = "freebsd",
|
||||
target_os = "linux",
|
||||
target_os = "netbsd",
|
||||
target_os = "openbsd",
|
||||
target_os = "redox"
|
||||
))] {
|
||||
cvt(unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC) })?;
|
||||
Ok((AnonPipe(FileDesc::new(fds[0])), AnonPipe(FileDesc::new(fds[1]))))
|
||||
} else {
|
||||
cvt(unsafe { libc::pipe(fds.as_mut_ptr()) })?;
|
||||
|
||||
let fd0 = FileDesc::new(fds[0]);
|
||||
let fd1 = FileDesc::new(fds[1]);
|
||||
fd0.set_cloexec()?;
|
||||
fd1.set_cloexec()?;
|
||||
Ok((AnonPipe(fd0), AnonPipe(fd1)))
|
||||
}
|
||||
}
|
||||
cvt(unsafe { libc::pipe(fds.as_mut_ptr()) })?;
|
||||
|
||||
let fd0 = FileDesc::new(fds[0]);
|
||||
let fd1 = FileDesc::new(fds[1]);
|
||||
fd0.set_cloexec()?;
|
||||
fd1.set_cloexec()?;
|
||||
Ok((AnonPipe(fd0), AnonPipe(fd1)))
|
||||
}
|
||||
|
||||
impl AnonPipe {
|
||||
|
@ -16,6 +16,11 @@
|
||||
//! symbol, but that caused Debian to detect an unnecessarily strict versioned
|
||||
//! dependency on libc6 (#23628).
|
||||
|
||||
// There are a variety of `#[cfg]`s controlling which targets are involved in
|
||||
// each instance of `weak!` and `syscall!`. Rather than trying to unify all of
|
||||
// that, we'll just allow that some unix targets don't use this module at all.
|
||||
#![allow(dead_code, unused_macros)]
|
||||
|
||||
use crate::ffi::CStr;
|
||||
use crate::marker;
|
||||
use crate::mem;
|
||||
|
@ -13,12 +13,10 @@ pub struct FileDesc {
|
||||
fd: c_int,
|
||||
}
|
||||
|
||||
fn max_len() -> usize {
|
||||
// The maximum read limit on most posix-like systems is `SSIZE_MAX`,
|
||||
// with the man page quoting that if the count of bytes to read is
|
||||
// greater than `SSIZE_MAX` the result is "unspecified".
|
||||
<ssize_t>::MAX as usize
|
||||
}
|
||||
// The maximum read limit on most POSIX-like systems is `SSIZE_MAX`,
|
||||
// with the man page quoting that if the count of bytes to read is
|
||||
// greater than `SSIZE_MAX` the result is "unspecified".
|
||||
const READ_LIMIT: usize = ssize_t::MAX as usize;
|
||||
|
||||
impl FileDesc {
|
||||
pub fn new(fd: c_int) -> FileDesc {
|
||||
@ -29,7 +27,7 @@ impl FileDesc {
|
||||
self.fd
|
||||
}
|
||||
|
||||
/// Extracts the actual filedescriptor without closing it.
|
||||
/// Extracts the actual file descriptor without closing it.
|
||||
pub fn into_raw(self) -> c_int {
|
||||
let fd = self.fd;
|
||||
mem::forget(self);
|
||||
@ -38,7 +36,7 @@ impl FileDesc {
|
||||
|
||||
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let ret = cvt(unsafe {
|
||||
libc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), max_len()))
|
||||
libc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT))
|
||||
})?;
|
||||
Ok(ret as usize)
|
||||
}
|
||||
@ -79,7 +77,7 @@ impl FileDesc {
|
||||
cvt_pread(
|
||||
self.fd,
|
||||
buf.as_mut_ptr() as *mut c_void,
|
||||
cmp::min(buf.len(), max_len()),
|
||||
cmp::min(buf.len(), READ_LIMIT),
|
||||
offset as i64,
|
||||
)
|
||||
.map(|n| n as usize)
|
||||
@ -88,7 +86,7 @@ impl FileDesc {
|
||||
|
||||
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
|
||||
let ret = cvt(unsafe {
|
||||
libc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), max_len()))
|
||||
libc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT))
|
||||
})?;
|
||||
Ok(ret as usize)
|
||||
}
|
||||
@ -124,7 +122,7 @@ impl FileDesc {
|
||||
cvt_pwrite(
|
||||
self.fd,
|
||||
buf.as_ptr() as *const c_void,
|
||||
cmp::min(buf.len(), max_len()),
|
||||
cmp::min(buf.len(), READ_LIMIT),
|
||||
offset as i64,
|
||||
)
|
||||
.map(|n| n as usize)
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::cmp::Ordering;
|
||||
use crate::time::Duration;
|
||||
use ::core::hash::{Hash, Hasher};
|
||||
use core::hash::{Hash, Hasher};
|
||||
|
||||
pub use self::inner::{Instant, SystemTime, UNIX_EPOCH};
|
||||
use crate::convert::TryInto;
|
||||
|
@ -597,14 +597,14 @@ fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> {
|
||||
///
|
||||
/// WASI has no fundamental capability to do this. All syscalls and operations
|
||||
/// are relative to already-open file descriptors. The C library, however,
|
||||
/// manages a map of preopened file descriptors to their path, and then the C
|
||||
/// manages a map of pre-opened file descriptors to their path, and then the C
|
||||
/// library provides an API to look at this. In other words, when you want to
|
||||
/// open a path `p`, you have to find a previously opened file descriptor in a
|
||||
/// global table and then see if `p` is relative to that file descriptor.
|
||||
///
|
||||
/// This function, if successful, will return two items:
|
||||
///
|
||||
/// * The first is a `ManuallyDrop<WasiFd>`. This represents a preopened file
|
||||
/// * The first is a `ManuallyDrop<WasiFd>`. This represents a pre-opened file
|
||||
/// descriptor which we don't have ownership of, but we can use. You shouldn't
|
||||
/// actually drop the `fd`.
|
||||
///
|
||||
@ -619,7 +619,7 @@ fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> {
|
||||
/// appropriate rights for performing `rights` actions.
|
||||
///
|
||||
/// Note that this can fail if `p` doesn't look like it can be opened relative
|
||||
/// to any preopened file descriptor.
|
||||
/// to any pre-opened file descriptor.
|
||||
fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> {
|
||||
let p = CString::new(p.as_os_str().as_bytes())?;
|
||||
unsafe {
|
||||
@ -627,7 +627,7 @@ fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> {
|
||||
let fd = __wasilibc_find_relpath(p.as_ptr(), &mut ret);
|
||||
if fd == -1 {
|
||||
let msg = format!(
|
||||
"failed to find a preopened file descriptor \
|
||||
"failed to find a pre-opened file descriptor \
|
||||
through which {:?} could be opened",
|
||||
p
|
||||
);
|
||||
|
@ -2,6 +2,7 @@
|
||||
// unchecked intrinsics.
|
||||
|
||||
// compile-flags: -C opt-level=3
|
||||
// ignore-wasm32 the wasm target is tested in `wasm_casts_*`
|
||||
|
||||
#![crate_type = "lib"]
|
||||
|
||||
|
@ -38,7 +38,6 @@ pub fn cast_f32_i32(a: f32) -> i32 {
|
||||
a as _
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: @cast_f64_u64
|
||||
#[no_mangle]
|
||||
pub fn cast_f64_u64(a: f64) -> u64 {
|
||||
@ -84,13 +83,10 @@ pub fn cast_f32_u8(a: f32) -> u8 {
|
||||
a as _
|
||||
}
|
||||
|
||||
|
||||
|
||||
// CHECK-LABEL: @cast_unchecked_f64_i64
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f64_i64(a: f64) -> i64 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptosi double {{.*}} to i64
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
|
||||
// CHECK-NEXT: ret i64 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
@ -98,8 +94,7 @@ pub unsafe fn cast_unchecked_f64_i64(a: f64) -> i64 {
|
||||
// CHECK-LABEL: @cast_unchecked_f64_i32
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f64_i32(a: f64) -> i32 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptosi double {{.*}} to i32
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
|
||||
// CHECK-NEXT: ret i32 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
@ -107,8 +102,7 @@ pub unsafe fn cast_unchecked_f64_i32(a: f64) -> i32 {
|
||||
// CHECK-LABEL: @cast_unchecked_f32_i64
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f32_i64(a: f32) -> i64 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptosi float {{.*}} to i64
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
|
||||
// CHECK-NEXT: ret i64 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
@ -116,18 +110,15 @@ pub unsafe fn cast_unchecked_f32_i64(a: f32) -> i64 {
|
||||
// CHECK-LABEL: @cast_unchecked_f32_i32
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f32_i32(a: f32) -> i32 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptosi float {{.*}} to i32
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}}
|
||||
// CHECK-NEXT: ret i32 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: @cast_unchecked_f64_u64
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f64_u64(a: f64) -> u64 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptoui double {{.*}} to i64
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
|
||||
// CHECK-NEXT: ret i64 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
@ -135,8 +126,7 @@ pub unsafe fn cast_unchecked_f64_u64(a: f64) -> u64 {
|
||||
// CHECK-LABEL: @cast_unchecked_f64_u32
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f64_u32(a: f64) -> u32 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptoui double {{.*}} to i32
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
|
||||
// CHECK-NEXT: ret i32 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
@ -144,8 +134,7 @@ pub unsafe fn cast_unchecked_f64_u32(a: f64) -> u32 {
|
||||
// CHECK-LABEL: @cast_unchecked_f32_u64
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f32_u64(a: f32) -> u64 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptoui float {{.*}} to i64
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
|
||||
// CHECK-NEXT: ret i64 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
@ -153,8 +142,7 @@ pub unsafe fn cast_unchecked_f32_u64(a: f32) -> u64 {
|
||||
// CHECK-LABEL: @cast_unchecked_f32_u32
|
||||
#[no_mangle]
|
||||
pub unsafe fn cast_unchecked_f32_u32(a: f32) -> u32 {
|
||||
// CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}}
|
||||
// CHECK: fptoui float {{.*}} to i32
|
||||
// CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}}
|
||||
// CHECK-NEXT: ret i32 {{.*}}
|
||||
a.to_int_unchecked()
|
||||
}
|
||||
|
8
src/test/ui/const-generics/defaults/wrong-order.rs
Normal file
8
src/test/ui/const-generics/defaults/wrong-order.rs
Normal file
@ -0,0 +1,8 @@
|
||||
#![feature(const_generics)] //~ WARN the feature `const_generics` is incomplete
|
||||
|
||||
struct A<T = u32, const N: usize> {
|
||||
//~^ ERROR type parameters with a default must be trailing
|
||||
arg: T,
|
||||
}
|
||||
|
||||
fn main() {}
|
19
src/test/ui/const-generics/defaults/wrong-order.stderr
Normal file
19
src/test/ui/const-generics/defaults/wrong-order.stderr
Normal file
@ -0,0 +1,19 @@
|
||||
error: type parameters with a default must be trailing
|
||||
--> $DIR/wrong-order.rs:3:10
|
||||
|
|
||||
LL | struct A<T = u32, const N: usize> {
|
||||
| ^
|
||||
|
|
||||
= note: using type defaults and const parameters in the same parameter list is currently not permitted
|
||||
|
||||
warning: the feature `const_generics` is incomplete and may not be safe to use and/or cause compiler crashes
|
||||
--> $DIR/wrong-order.rs:1:12
|
||||
|
|
||||
LL | #![feature(const_generics)]
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(incomplete_features)]` on by default
|
||||
= note: see issue #44580 <https://github.com/rust-lang/rust/issues/44580> for more information
|
||||
|
||||
error: aborting due to previous error; 1 warning emitted
|
||||
|
Loading…
Reference in New Issue
Block a user