Auto merge of #70884 - Dylan-DPC:rollup-r3raqdf, r=jonas-schievink

Rollup of 5 pull requests

Successful merges:

 - #70201 (Small tweaks in ToOwned::clone_into)
 - #70762 (Miri leak check: memory reachable through globals is not leaked)
 - #70846 (Keep codegen units unmerged when building compiler builtins)
 - #70854 (Use assoc int submodules)
 - #70857 (Don't import integer and float modules, use assoc consts 2)

Failed merges:

r? @ghost
This commit is contained in:
bors 2020-04-07 13:43:30 +00:00
commit 42abbd8878
39 changed files with 255 additions and 156 deletions

@ -1 +1 @@
Subproject commit 411197b0e77590c967e37e8f6ec681abd359afe8
Subproject commit 6eb24d6e9c0773d4aee68ed5fca121ce3cdf676a

@ -1 +1 @@
Subproject commit edd2a7e687358712608896730c083cb76c7b401a
Subproject commit a6638463efc7631bc0e8dc67ccd256d4e1b61f1a

View File

@ -608,7 +608,7 @@ unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> {
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if mem::size_of::<usize>() < 8 && alloc_size > core::isize::MAX as usize {
if mem::size_of::<usize>() < 8 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow)
} else {
Ok(())

View File

@ -733,14 +733,14 @@ impl<T: Clone> ToOwned for [T] {
fn clone_into(&self, target: &mut Vec<T>) {
// drop anything in target that will not be overwritten
target.truncate(self.len());
let len = target.len();
// reuse the contained values' allocations/resources.
target.clone_from_slice(&self[..len]);
// target.len <= self.len due to the truncate above, so the
// slice here is always in-bounds.
target.extend_from_slice(&self[len..]);
// slices here are always in-bounds.
let (init, tail) = self.split_at(target.len());
// reuse the contained values' allocations/resources.
target.clone_from_slice(init);
target.extend_from_slice(tail);
}
}

View File

@ -50,7 +50,7 @@ fn trait_object() {
#[test]
fn float_nan_ne() {
let x = Arc::new(std::f32::NAN);
let x = Arc::new(f32::NAN);
assert!(x != x);
assert!(!(x == x));
}

View File

@ -475,7 +475,7 @@ fn test_range_large() {
#[test]
fn test_range_inclusive_max_value() {
let max = std::usize::MAX;
let max = usize::MAX;
let map: BTreeMap<_, _> = vec![(max, 0)].into_iter().collect();
assert_eq!(map.range(max..=max).collect::<Vec<_>>(), &[(&max, &0)]);

View File

@ -50,7 +50,7 @@ fn trait_object() {
#[test]
fn float_nan_ne() {
let x = Rc::new(std::f32::NAN);
let x = Rc::new(f32::NAN);
assert!(x != x);
assert!(!(x == x));
}

View File

@ -1,4 +1,3 @@
use std::f64;
use test::Bencher;
#[bench]

View File

@ -5,7 +5,6 @@ mod strategy {
use core::num::flt2dec::MAX_SIG_DIGITS;
use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
use std::f64;
use std::io::Write;
use std::vec::Vec;
use test::Bencher;

View File

@ -76,7 +76,6 @@ fn test_cmp_by() {
#[test]
fn test_partial_cmp_by() {
use core::cmp::Ordering;
use core::f64;
let f = |x: i32, y: i32| (x * x).partial_cmp(&y);
let xs = || [1, 2, 3, 4].iter().copied();
@ -2894,7 +2893,7 @@ fn test_is_sorted() {
assert!(![1, 3, 2].iter().is_sorted());
assert!([0].iter().is_sorted());
assert!(std::iter::empty::<i32>().is_sorted());
assert!(![0.0, 1.0, std::f32::NAN].iter().is_sorted());
assert!(![0.0, 1.0, f32::NAN].iter().is_sorted());
assert!([-2, -1, 0, 3].iter().is_sorted());
assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs()));
assert!(!["c", "bb", "aaa"].iter().is_sorted());

View File

@ -1,8 +1,6 @@
use core::num::dec2flt::rawfp::RawFloat;
use core::num::dec2flt::rawfp::{fp_to_float, next_float, prev_float, round_normal};
use core::num::diy_float::Fp;
use std::f32;
use std::f64;
fn integer_decode(f: f64) -> (u64, i16, i8) {
RawFloat::integer_decode(f)

View File

@ -205,8 +205,6 @@ test_impl_from! { test_u32f64, u32, f64 }
// Float -> Float
#[test]
fn test_f32f64() {
use core::f32;
let max: f64 = f32::MAX.into();
assert_eq!(max as f32, f32::MAX);
assert!(max.is_normal());
@ -704,5 +702,5 @@ macro_rules! test_float {
};
}
test_float!(f32, f32, ::core::f32::INFINITY, ::core::f32::NEG_INFINITY, ::core::f32::NAN);
test_float!(f64, f64, ::core::f64::INFINITY, ::core::f64::NEG_INFINITY, ::core::f64::NAN);
test_float!(f32, f32, f32::INFINITY, f32::NEG_INFINITY, f32::NAN);
test_float!(f64, f64, f64::INFINITY, f64::NEG_INFINITY, f64::NAN);

View File

@ -61,25 +61,23 @@ fn test_range_inclusive() {
#[test]
fn test_range_is_empty() {
use core::f32::*;
assert!(!(0.0..10.0).is_empty());
assert!((-0.0..0.0).is_empty());
assert!((10.0..0.0).is_empty());
assert!(!(NEG_INFINITY..INFINITY).is_empty());
assert!((EPSILON..NAN).is_empty());
assert!((NAN..EPSILON).is_empty());
assert!((NAN..NAN).is_empty());
assert!(!(f32::NEG_INFINITY..f32::INFINITY).is_empty());
assert!((f32::EPSILON..f32::NAN).is_empty());
assert!((f32::NAN..f32::EPSILON).is_empty());
assert!((f32::NAN..f32::NAN).is_empty());
assert!(!(0.0..=10.0).is_empty());
assert!(!(-0.0..=0.0).is_empty());
assert!((10.0..=0.0).is_empty());
assert!(!(NEG_INFINITY..=INFINITY).is_empty());
assert!((EPSILON..=NAN).is_empty());
assert!((NAN..=EPSILON).is_empty());
assert!((NAN..=NAN).is_empty());
assert!(!(f32::NEG_INFINITY..=f32::INFINITY).is_empty());
assert!((f32::EPSILON..=f32::NAN).is_empty());
assert!((f32::NAN..=f32::EPSILON).is_empty());
assert!((f32::NAN..=f32::NAN).is_empty());
}
#[test]

View File

@ -1108,14 +1108,14 @@ mod slice_index {
// note: using 0 specifically ensures that the result of overflowing is 0..0,
// so that `get` doesn't simply return None for the wrong reason.
bad: data[0 ..= ::std::usize::MAX];
bad: data[0 ..= usize::MAX];
message: "maximum usize";
}
in mod rangetoinclusive_overflow {
data: [0, 1];
bad: data[..= ::std::usize::MAX];
bad: data[..= usize::MAX];
message: "maximum usize";
}
} // panic_cases!
@ -1709,7 +1709,7 @@ fn test_is_sorted() {
assert!(![1, 3, 2].is_sorted());
assert!([0].is_sorted());
assert!(empty.is_sorted());
assert!(![0.0, 1.0, std::f32::NAN].is_sorted());
assert!(![0.0, 1.0, f32::NAN].is_sorted());
assert!([-2, -1, 0, 3].is_sorted());
assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
assert!(!["c", "bb", "aaa"].is_sorted());

View File

@ -14,7 +14,7 @@ fn creation() {
#[test]
#[should_panic]
fn new_overflow() {
let _ = Duration::new(::core::u64::MAX, 1_000_000_000);
let _ = Duration::new(u64::MAX, 1_000_000_000);
}
#[test]
@ -86,7 +86,7 @@ fn checked_add() {
Duration::new(0, 500_000_000).checked_add(Duration::new(0, 500_000_001)),
Some(Duration::new(1, 1))
);
assert_eq!(Duration::new(1, 0).checked_add(Duration::new(::core::u64::MAX, 0)), None);
assert_eq!(Duration::new(1, 0).checked_add(Duration::new(u64::MAX, 0)), None);
}
#[test]
@ -133,7 +133,7 @@ fn checked_mul() {
assert_eq!(Duration::new(1, 1).checked_mul(3), Some(Duration::new(3, 3)));
assert_eq!(Duration::new(0, 500_000_001).checked_mul(4), Some(Duration::new(2, 4)));
assert_eq!(Duration::new(0, 500_000_001).checked_mul(4000), Some(Duration::new(2000, 4000)));
assert_eq!(Duration::new(::core::u64::MAX - 1, 0).checked_mul(2), None);
assert_eq!(Duration::new(u64::MAX - 1, 0).checked_mul(2), None);
}
#[test]

View File

@ -1,5 +1,4 @@
use std::cmp::Ordering::{Equal, Greater, Less};
use std::f64::NAN;
#[test]
fn test_clone() {
@ -34,12 +33,12 @@ fn test_partial_ord() {
assert!(big >= small);
assert!(big >= big);
assert!(!((1.0f64, 2.0f64) < (NAN, 3.0)));
assert!(!((1.0f64, 2.0f64) <= (NAN, 3.0)));
assert!(!((1.0f64, 2.0f64) > (NAN, 3.0)));
assert!(!((1.0f64, 2.0f64) >= (NAN, 3.0)));
assert!(((1.0f64, 2.0f64) < (2.0, NAN)));
assert!(!((2.0f64, 2.0f64) < (2.0, NAN)));
assert!(!((1.0f64, 2.0f64) < (f64::NAN, 3.0)));
assert!(!((1.0f64, 2.0f64) <= (f64::NAN, 3.0)));
assert!(!((1.0f64, 2.0f64) > (f64::NAN, 3.0)));
assert!(!((1.0f64, 2.0f64) >= (f64::NAN, 3.0)));
assert!(((1.0f64, 2.0f64) < (2.0, f64::NAN)));
assert!(!((2.0f64, 2.0f64) < (2.0, f64::NAN)));
}
#[test]

View File

@ -1528,7 +1528,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
}
}
pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
pub const CODEGEN_WORKER_ID: usize = usize::MAX;
/// `FatalError` is explicitly not `Send`.
#[must_use]

View File

@ -507,7 +507,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
}
pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
pub const CODEGEN_WORKER_ID: usize = usize::MAX;
pub fn codegen_crate<B: ExtraBackendMethods>(
backend: B,

View File

@ -1361,7 +1361,7 @@ impl EmitterWriter {
let mut multilines = FxHashMap::default();
// Get the left-side margin to remove it
let mut whitespace_margin = std::usize::MAX;
let mut whitespace_margin = usize::MAX;
for line_idx in 0..annotated_file.lines.len() {
let file = annotated_file.file.clone();
let line = &annotated_file.lines[line_idx];
@ -1373,19 +1373,19 @@ impl EmitterWriter {
}
}
}
if whitespace_margin == std::usize::MAX {
if whitespace_margin == usize::MAX {
whitespace_margin = 0;
}
// Left-most column any visible span points at.
let mut span_left_margin = std::usize::MAX;
let mut span_left_margin = usize::MAX;
for line in &annotated_file.lines {
for ann in &line.annotations {
span_left_margin = min(span_left_margin, ann.start_col);
span_left_margin = min(span_left_margin, ann.end_col);
}
}
if span_left_margin == std::usize::MAX {
if span_left_margin == usize::MAX {
span_left_margin = 0;
}
@ -1421,7 +1421,7 @@ impl EmitterWriter {
} else {
termize::dimensions()
.map(|(w, _)| w.saturating_sub(code_offset))
.unwrap_or(std::usize::MAX)
.unwrap_or(usize::MAX)
};
let margin = Margin::new(

View File

@ -307,7 +307,7 @@ impl<'a, T: Idx> BitIter<'a, T> {
// additional state about whether we have started.
BitIter {
word: 0,
offset: std::usize::MAX - (WORD_BITS - 1),
offset: usize::MAX - (WORD_BITS - 1),
iter: words.iter(),
marker: PhantomData,
}

View File

@ -51,7 +51,7 @@ pub trait AllocMap<K: Hash + Eq, V> {
where
K: Borrow<Q>;
/// Returns data based the keys and values in the map.
/// Returns data based on the keys and values in the map.
fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
/// Returns a reference to entry `k`. If no such entry exists, call
@ -79,7 +79,7 @@ pub trait AllocMap<K: Hash + Eq, V> {
/// and some use case dependent behaviour can instead be applied.
pub trait Machine<'mir, 'tcx>: Sized {
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKind: ::std::fmt::Debug + MayLeak + Eq + 'static;
type MemoryKind: ::std::fmt::Debug + ::std::fmt::Display + MayLeak + Eq + 'static;
/// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows"
/// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>.

View File

@ -9,6 +9,7 @@
use std::borrow::Cow;
use std::collections::VecDeque;
use std::convert::TryFrom;
use std::fmt;
use std::ptr;
use rustc_ast::ast::Mutability;
@ -20,6 +21,7 @@ use super::{
AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, ErrorHandled, GlobalAlloc,
GlobalId, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
};
use crate::util::pretty;
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum MemoryKind<T> {
@ -45,6 +47,17 @@ impl<T: MayLeak> MayLeak for MemoryKind<T> {
}
}
impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MemoryKind::Stack => write!(f, "stack variable"),
MemoryKind::Vtable => write!(f, "vtable"),
MemoryKind::CallerLocation => write!(f, "caller location"),
MemoryKind::Machine(m) => write!(f, "{}", m),
}
}
}
/// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
#[derive(Debug, Copy, Clone)]
pub enum AllocCheck {
@ -258,7 +271,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
if alloc_kind != kind {
throw_ub_format!(
"deallocating `{:?}` memory using `{:?}` deallocation operation",
"deallocating {} memory using {} deallocation operation",
alloc_kind,
kind
);
@ -644,81 +657,90 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
self.dump_allocs(vec![id]);
}
fn dump_alloc_helper<Tag, Extra>(
&self,
allocs_seen: &mut FxHashSet<AllocId>,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Tag, Extra>,
) {
for &(_, (_, target_id)) in alloc.relocations().iter() {
if allocs_seen.insert(target_id) {
allocs_to_print.push_back(target_id);
}
}
crate::util::pretty::write_allocation(self.tcx.tcx, alloc, &mut std::io::stderr(), "")
.unwrap();
}
/// Print a list of allocations and all allocations they point to, recursively.
/// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
/// control for this.
pub fn dump_allocs(&self, mut allocs: Vec<AllocId>) {
// Cannot be a closure because it is generic in `Tag`, `Extra`.
fn write_allocation_track_relocs<'tcx, Tag, Extra>(
tcx: TyCtxtAt<'tcx>,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Tag, Extra>,
) {
for &(_, target_id) in alloc.relocations().values() {
allocs_to_print.push_back(target_id);
}
pretty::write_allocation(tcx.tcx, alloc, &mut std::io::stderr()).unwrap();
}
allocs.sort();
allocs.dedup();
let mut allocs_to_print = VecDeque::from(allocs);
let mut allocs_seen = FxHashSet::default();
// `allocs_printed` contains all allocations that we have already printed.
let mut allocs_printed = FxHashSet::default();
while let Some(id) = allocs_to_print.pop_front() {
eprint!("Alloc {:<5}: ", id);
fn msg<Tag, Extra>(alloc: &Allocation<Tag, Extra>, extra: &str) {
eprintln!(
"({} bytes, alignment {}){}",
alloc.size.bytes(),
alloc.align.bytes(),
extra
)
};
if !allocs_printed.insert(id) {
// Already printed, so skip this.
continue;
}
// normal alloc?
match self.alloc_map.get_or(id, || Err(())) {
Ok((kind, alloc)) => {
match kind {
MemoryKind::Stack => msg(alloc, " (stack)"),
MemoryKind::Vtable => msg(alloc, " (vtable)"),
MemoryKind::CallerLocation => msg(alloc, " (caller_location)"),
MemoryKind::Machine(m) => msg(alloc, &format!(" ({:?})", m)),
};
self.dump_alloc_helper(&mut allocs_seen, &mut allocs_to_print, alloc);
}
Err(()) => {
// global alloc?
match self.tcx.alloc_map.lock().get(id) {
Some(GlobalAlloc::Memory(alloc)) => {
msg(alloc, " (immutable)");
self.dump_alloc_helper(&mut allocs_seen, &mut allocs_to_print, alloc);
}
Some(GlobalAlloc::Function(func)) => {
eprintln!("{}", func);
}
Some(GlobalAlloc::Static(did)) => {
eprintln!("{:?}", did);
eprint!("{}", id);
match self.alloc_map.get(id) {
Some(&(kind, ref alloc)) => {
// normal alloc
eprint!(" ({}, ", kind);
write_allocation_track_relocs(self.tcx, &mut allocs_to_print, alloc);
}
None => {
eprintln!("(deallocated)");
// global alloc
match self.tcx.alloc_map.lock().get(id) {
Some(GlobalAlloc::Memory(alloc)) => {
eprint!(" (unchanged global, ");
write_allocation_track_relocs(self.tcx, &mut allocs_to_print, alloc);
}
Some(GlobalAlloc::Function(func)) => {
eprint!(" (fn: {})", func);
}
Some(GlobalAlloc::Static(did)) => {
eprint!(" (static: {})", self.tcx.def_path_str(did));
}
None => {
eprint!(" (deallocated)");
}
}
}
};
}
eprintln!();
}
}
pub fn leak_report(&self) -> usize {
let leaks: Vec<_> = self
.alloc_map
.filter_map_collect(|&id, &(kind, _)| if kind.may_leak() { None } else { Some(id) });
// Collect the set of allocations that are *reachable* from `Global` allocations.
let reachable = {
let mut reachable = FxHashSet::default();
let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
let mut todo: Vec<_> = self.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
if Some(kind) == global_kind { Some(id) } else { None }
});
while let Some(id) = todo.pop() {
if reachable.insert(id) {
// This is a new allocation, add its relocations to `todo`.
if let Some((_, alloc)) = self.alloc_map.get(id) {
todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id));
}
}
}
reachable
};
// All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
});
let n = leaks.len();
if n > 0 {
eprintln!("### LEAK REPORT ###");
eprintln!("The following memory was leaked:");
self.dump_allocs(leaks);
}
n

View File

@ -455,11 +455,18 @@ fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibilit
fn merge_codegen_units<'tcx>(
tcx: TyCtxt<'tcx>,
initial_partitioning: &mut PreInliningPartitioning<'tcx>,
target_cgu_count: usize,
mut target_cgu_count: usize,
) {
assert!(target_cgu_count >= 1);
let codegen_units = &mut initial_partitioning.codegen_units;
if tcx.is_compiler_builtins(LOCAL_CRATE) {
// Compiler builtins require some degree of control over how mono items
// are partitioned into compilation units. Provide it by keeping the
// original partitioning when compiling the compiler builtins crate.
target_cgu_count = codegen_units.len();
}
// Note that at this point in time the `codegen_units` here may not be in a
// deterministic order (but we know they're deterministically the same set).
// We want this merging to produce a deterministic ordering of codegen units

View File

@ -567,26 +567,21 @@ pub fn write_allocations<'tcx>(
}
let mut visitor = CollectAllocIds(Default::default());
body.visit_with(&mut visitor);
// `seen` contains all seen allocations, including the ones we have *not* printed yet.
// The protocol is to first `insert` into `seen`, and only if that returns `true`
// then push to `todo`.
let mut seen = visitor.0;
let mut todo: Vec<_> = seen.iter().copied().collect();
while let Some(id) = todo.pop() {
let mut write_header_and_allocation =
let mut write_allocation_track_relocs =
|w: &mut dyn Write, alloc: &Allocation| -> io::Result<()> {
write!(w, "size: {}, align: {})", alloc.size.bytes(), alloc.align.bytes())?;
if alloc.size == Size::ZERO {
write!(w, " {{}}")?;
} else {
writeln!(w, " {{")?;
write_allocation(tcx, alloc, w, " ")?;
write!(w, "}}")?;
// `.rev()` because we are popping them from the back of the `todo` vector.
for id in alloc_ids_from_alloc(alloc).rev() {
if seen.insert(id) {
todo.push(id);
}
}
}
Ok(())
write_allocation(tcx, alloc, w)
};
write!(w, "\n{}", id)?;
let alloc = tcx.alloc_map.lock().get(id);
@ -599,7 +594,7 @@ pub fn write_allocations<'tcx>(
match tcx.const_eval_poly(did) {
Ok(ConstValue::ByRef { alloc, .. }) => {
write!(w, " (static: {}, ", tcx.def_path_str(did))?;
write_header_and_allocation(w, alloc)?;
write_allocation_track_relocs(w, alloc)?;
}
Ok(_) => {
span_bug!(tcx.def_span(did), " static item without `ByRef` initializer")
@ -616,15 +611,46 @@ pub fn write_allocations<'tcx>(
}
Some(GlobalAlloc::Memory(alloc)) => {
write!(w, " (")?;
write_header_and_allocation(w, alloc)?
write_allocation_track_relocs(w, alloc)?
}
}
writeln!(w)?;
}
Ok(())
}
/// Dumps the size and metadata and content of an allocation to the given writer.
/// The expectation is that the caller first prints other relevant metadata, so the exact
/// format of this function is (*without* leading or trailing newline):
/// ```
/// size: {}, align: {}) {
/// <bytes>
/// }
/// ```
///
/// The byte format is similar to how hex editors print bytes. Each line starts with the address of
/// the start of the line, followed by all bytes in hex format (space separated).
/// If the allocation is small enough to fit into a single line, no start address is given.
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.`
/// This also prints relocations adequately.
pub fn write_allocation<Tag, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &Allocation<Tag, Extra>,
w: &mut dyn Write,
) -> io::Result<()> {
write!(w, "size: {}, align: {})", alloc.size.bytes(), alloc.align.bytes())?;
if alloc.size == Size::ZERO {
// We are done.
return write!(w, " {{}}");
}
// Write allocation bytes.
writeln!(w, " {{")?;
write_allocation_bytes(tcx, alloc, w, " ")?;
write!(w, "}}")?;
Ok(())
}
fn write_allocation_endline(w: &mut dyn Write, ascii: &str) -> io::Result<()> {
for _ in 0..(BYTES_PER_LINE - ascii.chars().count()) {
write!(w, " ")?;
@ -649,18 +675,10 @@ fn write_allocation_newline(
Ok(line_start)
}
/// Dumps the bytes of an allocation to the given writer. This also prints relocations instead of
/// the raw bytes where applicable.
/// The byte format is similar to how hex editors print bytes. Each line starts with the address of
/// the start of the line, followed by all bytes in hex format (space separated).
/// If the allocation is small enough to fit into a single line, no start address is given.
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.`
///
/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
/// is only one line). Note that your prefix should contain a trailing space as the lines are
/// printed directly after it.
pub fn write_allocation<Tag, Extra>(
fn write_allocation_bytes<Tag, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &Allocation<Tag, Extra>,
w: &mut dyn Write,

View File

@ -612,7 +612,7 @@ fn receiver_is_dispatchable<'tcx>(
// FIXME(mikeyhew) this is a total hack. Once object_safe_for_dispatch is stabilized, we can
// replace this with `dyn Trait`
let unsized_self_ty: Ty<'tcx> =
tcx.mk_ty_param(::std::u32::MAX, Symbol::intern("RustaceansAreAwesome"));
tcx.mk_ty_param(u32::MAX, Symbol::intern("RustaceansAreAwesome"));
// `Receiver[Self => U]`
let unsized_receiver_ty =

View File

@ -3650,11 +3650,7 @@ struct ProvisionalEvaluation {
impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
fn default() -> Self {
Self {
dfn: Cell::new(0),
reached_depth: Cell::new(std::usize::MAX),
map: Default::default(),
}
Self { dfn: Cell::new(0), reached_depth: Cell::new(usize::MAX), map: Default::default() }
}
}
@ -3753,7 +3749,7 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> {
op(fresh_trait_ref, eval.result);
}
self.reached_depth.set(std::usize::MAX);
self.reached_depth.set(usize::MAX);
}
}

View File

@ -2620,13 +2620,13 @@ fn check_link_ordinal(tcx: TyCtxt<'_>, attr: &ast::Attribute) -> Option<usize> {
_ => None,
};
if let Some(Lit { kind: LitKind::Int(ordinal, LitIntType::Unsuffixed), .. }) = sole_meta_list {
if *ordinal <= std::usize::MAX as u128 {
if *ordinal <= usize::MAX as u128 {
Some(*ordinal as usize)
} else {
let msg = format!("ordinal value in `link_ordinal` is too large: `{}`", &ordinal);
tcx.sess
.struct_span_err(attr.span, &msg)
.note("the value may not exceed `std::usize::MAX`")
.note("the value may not exceed `usize::MAX`")
.emit();
None
}

View File

@ -198,7 +198,7 @@ use std::num::FpCategory as Fp;
use std::ops::Index;
use std::str::FromStr;
use std::string;
use std::{char, f64, fmt, str};
use std::{char, fmt, str};
use crate::Encodable;

View File

@ -53,7 +53,7 @@ fn test_unit() {
#[test]
fn test_u8() {
let mut vec = vec![];
for i in ::std::u8::MIN..::std::u8::MAX {
for i in u8::MIN..u8::MAX {
vec.push(i);
}
check_round_trip(vec);
@ -61,30 +61,30 @@ fn test_u8() {
#[test]
fn test_u16() {
for i in ::std::u16::MIN..::std::u16::MAX {
for i in u16::MIN..u16::MAX {
check_round_trip(vec![1, 2, 3, i, i, i]);
}
}
#[test]
fn test_u32() {
check_round_trip(vec![1, 2, 3, ::std::u32::MIN, 0, 1, ::std::u32::MAX, 2, 1]);
check_round_trip(vec![1, 2, 3, u32::MIN, 0, 1, u32::MAX, 2, 1]);
}
#[test]
fn test_u64() {
check_round_trip(vec![1, 2, 3, ::std::u64::MIN, 0, 1, ::std::u64::MAX, 2, 1]);
check_round_trip(vec![1, 2, 3, u64::MIN, 0, 1, u64::MAX, 2, 1]);
}
#[test]
fn test_usize() {
check_round_trip(vec![1, 2, 3, ::std::usize::MIN, 0, 1, ::std::usize::MAX, 2, 1]);
check_round_trip(vec![1, 2, 3, usize::MIN, 0, 1, usize::MAX, 2, 1]);
}
#[test]
fn test_i8() {
let mut vec = vec![];
for i in ::std::i8::MIN..::std::i8::MAX {
for i in i8::MIN..i8::MAX {
vec.push(i);
}
check_round_trip(vec);
@ -92,24 +92,24 @@ fn test_i8() {
#[test]
fn test_i16() {
for i in ::std::i16::MIN..::std::i16::MAX {
for i in i16::MIN..i16::MAX {
check_round_trip(vec![-1, 2, -3, i, i, i, 2]);
}
}
#[test]
fn test_i32() {
check_round_trip(vec![-1, 2, -3, ::std::i32::MIN, 0, 1, ::std::i32::MAX, 2, 1]);
check_round_trip(vec![-1, 2, -3, i32::MIN, 0, 1, i32::MAX, 2, 1]);
}
#[test]
fn test_i64() {
check_round_trip(vec![-1, 2, -3, ::std::i64::MIN, 0, 1, ::std::i64::MAX, 2, 1]);
check_round_trip(vec![-1, 2, -3, i64::MIN, 0, 1, i64::MAX, 2, 1]);
}
#[test]
fn test_isize() {
check_round_trip(vec![-1, 2, -3, ::std::isize::MIN, 0, 1, ::std::isize::MAX, 2, 1]);
check_round_trip(vec![-1, 2, -3, isize::MIN, 0, 1, isize::MAX, 2, 1]);
}
#[test]

View File

@ -2617,7 +2617,6 @@ mod test_map {
use crate::cell::RefCell;
use rand::{thread_rng, Rng};
use realstd::collections::TryReserveError::*;
use realstd::usize;
// https://github.com/rust-lang/rust/issues/62301
fn _assert_hashmap_is_unwind_safe() {

View File

@ -1329,6 +1329,12 @@ impl ToOwned for CStr {
fn to_owned(&self) -> CString {
CString { inner: self.to_bytes_with_nul().into() }
}
fn clone_into(&self, target: &mut CString) {
let mut b = Vec::from(mem::take(&mut target.inner));
self.to_bytes_with_nul().clone_into(&mut b);
target.inner = b.into_boxed_slice();
}
}
#[stable(feature = "cstring_asref", since = "1.7.0")]
@ -1510,6 +1516,17 @@ mod tests {
assert_eq!(boxed.to_bytes_with_nul(), &[0]);
}
#[test]
fn test_c_str_clone_into() {
let mut c_string = CString::new("lorem").unwrap();
let c_ptr = c_string.as_ptr();
let c_str = CStr::from_bytes_with_nul(b"ipsum\0").unwrap();
c_str.clone_into(&mut c_string);
assert_eq!(c_str, c_string.as_c_str());
// The exact same size shouldn't have needed to move its allocation
assert_eq!(c_ptr, c_string.as_ptr());
}
#[test]
fn into_rc() {
let orig: &[u8] = b"Hello, world!\0";

View File

@ -1120,8 +1120,7 @@ impl ToOwned for OsStr {
self.to_os_string()
}
fn clone_into(&self, target: &mut OsString) {
target.clear();
target.push(self);
self.inner.clone_into(&mut target.inner)
}
}

View File

@ -159,6 +159,10 @@ impl Slice {
Buf { inner: buf }
}
pub fn clone_into(&self, buf: &mut Buf) {
self.inner.clone_into(&mut buf.inner)
}
#[inline]
pub fn into_box(&self) -> Box<Slice> {
unsafe { mem::transmute(self.inner.into_box()) }

View File

@ -173,6 +173,10 @@ impl Slice {
Buf { inner: self.inner.to_vec() }
}
pub fn clone_into(&self, buf: &mut Buf) {
self.inner.clone_into(&mut buf.inner)
}
#[inline]
pub fn into_box(&self) -> Box<Slice> {
let boxed: Box<[u8]> = self.inner.into();

View File

@ -613,6 +613,10 @@ impl Wtf8 {
}
}
pub fn clone_into(&self, buf: &mut Wtf8Buf) {
self.bytes.clone_into(&mut buf.bytes)
}
/// Boxes this `Wtf8`.
#[inline]
pub fn into_box(&self) -> Box<Wtf8> {

View File

@ -2,7 +2,6 @@ use super::*;
extern crate test;
use self::test::test::Bencher;
use std::f64;
use std::io;
use std::io::prelude::*;

@ -1 +1 @@
Subproject commit abe96ca3b87fcca6aa1dfcefd40d8c8d92d2e673
Subproject commit 1a577bd78e84e357e29c5336ff8beb432873046b

View File

@ -0,0 +1,40 @@
// Verifies that during compiler_builtins compilation the codegen units are kept
// unmerged. Even when only a single codegen unit is requested with -Ccodegen-units=1.
//
// compile-flags: -Zprint-mono-items=eager -Ccodegen-units=1
#![compiler_builtins]
#![crate_type="lib"]
#![feature(compiler_builtins)]
mod atomics {
//~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_1[0] @@ compiler_builtins-cgu.0[External]
#[no_mangle]
pub extern "C" fn sync_1() {}
//~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_2[0] @@ compiler_builtins-cgu.0[External]
#[no_mangle]
pub extern "C" fn sync_2() {}
//~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_3[0] @@ compiler_builtins-cgu.0[External]
#[no_mangle]
pub extern "C" fn sync_3() {}
}
mod x {
//~ MONO_ITEM fn compiler_builtins::x[0]::x[0] @@ compiler_builtins-cgu.1[External]
#[no_mangle]
pub extern "C" fn x() {}
}
mod y {
//~ MONO_ITEM fn compiler_builtins::y[0]::y[0] @@ compiler_builtins-cgu.2[External]
#[no_mangle]
pub extern "C" fn y() {}
}
mod z {
//~ MONO_ITEM fn compiler_builtins::z[0]::z[0] @@ compiler_builtins-cgu.3[External]
#[no_mangle]
pub extern "C" fn z() {}
}

View File

@ -12,7 +12,7 @@ error: ordinal value in `link_ordinal` is too large: `18446744073709551616`
LL | #[link_ordinal(18446744073709551616)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: the value may not exceed `std::usize::MAX`
= note: the value may not exceed `usize::MAX`
error: aborting due to previous error