Auto merge of #70362 - TimDiekmann:alloc-overhaul, r=Amanieu

Overhaul of the `AllocRef` trait to match allocator-wg's latest consens; Take 2

GitHub won't let me reopen #69889 so I make a new PR.

In addition to #69889 this fixes the unsoundness of `RawVec::into_box` when using allocators supporting overallocating. Also it uses `MemoryBlock` in `AllocRef` to unify `_in_place` methods by passing `&mut MemoryBlock`. Additionally, `RawVec` now checks for `size_of::<T>()` again and ignore every ZST. The internal capacity of `RawVec` isn't used by ZSTs anymore, as `into_box` now requires a length to be specified.

r? @Amanieu

fixes rust-lang/wg-allocators#38
fixes rust-lang/wg-allocators#41
fixes rust-lang/wg-allocators#44
fixes rust-lang/wg-allocators#51
This commit is contained in:
bors 2020-04-02 06:08:35 +00:00
commit 127a11a344
21 changed files with 1453 additions and 1627 deletions

View File

@ -2,7 +2,7 @@
#![stable(feature = "alloc_module", since = "1.28.0")]
use core::intrinsics::{min_align_of_val, size_of_val};
use core::intrinsics::{self, min_align_of_val, size_of_val};
use core::ptr::{NonNull, Unique};
use core::usize;
@ -165,11 +165,19 @@ pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl AllocRef for Global {
#[inline]
fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
if layout.size() == 0 {
Ok((layout.dangling(), 0))
} else {
unsafe { NonNull::new(alloc(layout)).ok_or(AllocErr).map(|p| (p, layout.size())) }
fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> {
unsafe {
let size = layout.size();
if size == 0 {
Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
} else {
let raw_ptr = match init {
AllocInit::Uninitialized => alloc(layout),
AllocInit::Zeroed => alloc_zeroed(layout),
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?;
Ok(MemoryBlock { ptr, size })
}
}
}
@ -181,32 +189,71 @@ unsafe impl AllocRef for Global {
}
#[inline]
unsafe fn realloc(
unsafe fn grow(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<(NonNull<u8>, usize), AllocErr> {
match (layout.size(), new_size) {
(0, 0) => Ok((layout.dangling(), 0)),
(0, _) => self.alloc(Layout::from_size_align_unchecked(new_size, layout.align())),
(_, 0) => {
self.dealloc(ptr, layout);
Ok((layout.dangling(), 0))
placement: ReallocPlacement,
init: AllocInit,
) -> Result<MemoryBlock, AllocErr> {
let size = layout.size();
debug_assert!(
new_size >= size,
"`new_size` must be greater than or equal to `memory.size()`"
);
if size == new_size {
return Ok(MemoryBlock { ptr, size });
}
match placement {
ReallocPlacement::InPlace => Err(AllocErr),
ReallocPlacement::MayMove if layout.size() == 0 => {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc(new_layout, init)
}
ReallocPlacement::MayMove => {
// `realloc` probably checks for `new_size > size` or something similar.
intrinsics::assume(new_size > size);
let ptr = realloc(ptr.as_ptr(), layout, new_size);
let memory =
MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size };
init.init_offset(memory, size);
Ok(memory)
}
(_, _) => NonNull::new(realloc(ptr.as_ptr(), layout, new_size))
.ok_or(AllocErr)
.map(|p| (p, new_size)),
}
}
#[inline]
fn alloc_zeroed(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
if layout.size() == 0 {
Ok((layout.dangling(), 0))
} else {
unsafe {
NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr).map(|p| (p, layout.size()))
unsafe fn shrink(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
placement: ReallocPlacement,
) -> Result<MemoryBlock, AllocErr> {
let size = layout.size();
debug_assert!(
new_size <= size,
"`new_size` must be smaller than or equal to `memory.size()`"
);
if size == new_size {
return Ok(MemoryBlock { ptr, size });
}
match placement {
ReallocPlacement::InPlace => Err(AllocErr),
ReallocPlacement::MayMove if new_size == 0 => {
self.dealloc(ptr, layout);
Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
}
ReallocPlacement::MayMove => {
// `realloc` probably checks for `new_size < size` or something similar.
intrinsics::assume(new_size < size);
let ptr = realloc(ptr.as_ptr(), layout, new_size);
Ok(MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size })
}
}
}
@ -218,14 +265,10 @@ unsafe impl AllocRef for Global {
#[lang = "exchange_malloc"]
#[inline]
unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
if size == 0 {
align as *mut u8
} else {
let layout = Layout::from_size_align_unchecked(size, align);
match Global.alloc(layout) {
Ok((ptr, _)) => ptr.as_ptr(),
Err(_) => handle_alloc_error(layout),
}
let layout = Layout::from_size_align_unchecked(size, align);
match Global.alloc(layout, AllocInit::Uninitialized) {
Ok(memory) => memory.ptr.as_ptr(),
Err(_) => handle_alloc_error(layout),
}
}
@ -239,11 +282,8 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
// We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary.
if size != 0 {
let layout = Layout::from_size_align_unchecked(size, align);
Global.dealloc(ptr.cast().into(), layout);
}
let layout = Layout::from_size_align_unchecked(size, align);
Global.dealloc(ptr.cast().into(), layout)
}
/// Abort on memory allocation error or failure.

View File

@ -8,16 +8,17 @@ use test::Bencher;
fn allocate_zeroed() {
unsafe {
let layout = Layout::from_size_align(1024, 1).unwrap();
let (ptr, _) =
Global.alloc_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout));
let memory = Global
.alloc(layout.clone(), AllocInit::Zeroed)
.unwrap_or_else(|_| handle_alloc_error(layout));
let mut i = ptr.cast::<u8>().as_ptr();
let mut i = memory.ptr.cast::<u8>().as_ptr();
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
}
Global.dealloc(ptr, layout);
Global.dealloc(memory.ptr, layout);
}
}

View File

@ -143,10 +143,9 @@ use core::ops::{
};
use core::pin::Pin;
use core::ptr::{self, NonNull, Unique};
use core::slice;
use core::task::{Context, Poll};
use crate::alloc::{self, AllocRef, Global};
use crate::alloc::{self, AllocInit, AllocRef, Global};
use crate::raw_vec::RawVec;
use crate::str::from_boxed_utf8_unchecked;
use crate::vec::Vec;
@ -196,14 +195,12 @@ impl<T> Box<T> {
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
let layout = alloc::Layout::new::<mem::MaybeUninit<T>>();
unsafe {
let ptr = if layout.size() == 0 {
NonNull::dangling()
} else {
Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast()
};
Box::from_raw(ptr.as_ptr())
}
let ptr = Global
.alloc(layout, AllocInit::Uninitialized)
.unwrap_or_else(|_| alloc::handle_alloc_error(layout))
.ptr
.cast();
unsafe { Box::from_raw(ptr.as_ptr()) }
}
/// Constructs a new `Box` with uninitialized contents, with the memory
@ -226,11 +223,13 @@ impl<T> Box<T> {
/// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
unsafe {
let mut uninit = Self::new_uninit();
ptr::write_bytes::<T>(uninit.as_mut_ptr(), 0, 1);
uninit
}
let layout = alloc::Layout::new::<mem::MaybeUninit<T>>();
let ptr = Global
.alloc(layout, AllocInit::Zeroed)
.unwrap_or_else(|_| alloc::handle_alloc_error(layout))
.ptr
.cast();
unsafe { Box::from_raw(ptr.as_ptr()) }
}
/// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
@ -265,15 +264,7 @@ impl<T> Box<[T]> {
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
let layout = alloc::Layout::array::<mem::MaybeUninit<T>>(len).unwrap();
unsafe {
let ptr = if layout.size() == 0 {
NonNull::dangling()
} else {
Global.alloc(layout).unwrap_or_else(|_| alloc::handle_alloc_error(layout)).0.cast()
};
Box::from_raw(slice::from_raw_parts_mut(ptr.as_ptr(), len))
}
unsafe { RawVec::with_capacity(len).into_box(len) }
}
}
@ -778,7 +769,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
let buf = RawVec::with_capacity(len);
unsafe {
ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
buf.into_box()
buf.into_box(slice.len()).assume_init()
}
}
}

View File

@ -1142,7 +1142,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
(*left_node.as_leaf_mut()).len += right_len as u16 + 1;
if self.node.height > 1 {
let layout = if self.node.height > 1 {
ptr::copy_nonoverlapping(
right_node.cast_unchecked().as_internal().edges.as_ptr(),
left_node
@ -1159,10 +1159,11 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
.correct_parent_link();
}
Global.dealloc(right_node.node.cast(), Layout::new::<InternalNode<K, V>>());
Layout::new::<InternalNode<K, V>>()
} else {
Global.dealloc(right_node.node.cast(), Layout::new::<LeafNode<K, V>>());
}
Layout::new::<LeafNode<K, V>>()
};
Global.dealloc(right_node.node.cast(), layout);
Handle::new_edge(self.node, self.idx)
}

View File

@ -100,6 +100,7 @@
#![feature(lang_items)]
#![feature(libc)]
#![cfg_attr(not(bootstrap), feature(negative_impls))]
#![feature(new_uninit)]
#![feature(nll)]
#![feature(optin_builtin_traits)]
#![feature(pattern)]

View File

@ -1,13 +1,19 @@
#![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
#![doc(hidden)]
use core::alloc::MemoryBlock;
use core::cmp;
use core::mem;
use core::mem::{self, MaybeUninit};
use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::ptr::{NonNull, Unique};
use core::slice;
use crate::alloc::{handle_alloc_error, AllocErr, AllocRef, Global, Layout};
use crate::alloc::{
handle_alloc_error, AllocErr,
AllocInit::{self, *},
AllocRef, Global, Layout,
ReallocPlacement::{self, *},
};
use crate::boxed::Box;
use crate::collections::TryReserveError::{self, *};
@ -21,81 +27,26 @@ mod tests;
///
/// * Produces `Unique::empty()` on zero-sized types.
/// * Produces `Unique::empty()` on zero-length allocations.
/// * Avoids freeing `Unique::empty()`.
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
/// * Guards against overflowing your length.
/// * Aborts on OOM or calls `handle_alloc_error` as applicable.
/// * Avoids freeing `Unique::empty()`.
/// * Calls `handle_alloc_error` for fallible allocations.
/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
/// * Uses the excess returned from the allocator to use the largest available capacity.
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
/// to handle the actual things *stored* inside of a `RawVec`.
///
/// Note that a `RawVec` always forces its capacity to be `usize::MAX` for zero-sized types.
/// This enables you to use capacity-growing logic catch the overflows in your length
/// that might occur with zero-sized types.
///
/// The above means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length. However, `with_capacity`,
/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub struct RawVec<T, A: AllocRef = Global> {
ptr: Unique<T>,
cap: usize,
a: A,
}
impl<T, A: AllocRef> RawVec<T, A> {
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
pub const fn new_in(a: A) -> Self {
let cap = if mem::size_of::<T>() == 0 { core::usize::MAX } else { 0 };
// `Unique::empty()` doubles as "unallocated" and "zero-sized allocation".
RawVec { ptr: Unique::empty(), cap, a }
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[inline]
pub fn with_capacity_in(capacity: usize, a: A) -> Self {
RawVec::allocate_in(capacity, false, a)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self {
RawVec::allocate_in(capacity, true, a)
}
fn allocate_in(mut capacity: usize, zeroed: bool, mut a: A) -> Self {
let elem_size = mem::size_of::<T>();
let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
// Handles ZSTs and `capacity == 0` alike.
let ptr = if alloc_size == 0 {
NonNull::<T>::dangling()
} else {
let align = mem::align_of::<T>();
let layout = Layout::from_size_align(alloc_size, align).unwrap();
let result = if zeroed { a.alloc_zeroed(layout) } else { a.alloc(layout) };
match result {
Ok((ptr, size)) => {
capacity = size / elem_size;
ptr.cast()
}
Err(_) => handle_alloc_error(layout),
}
};
RawVec { ptr: ptr.into(), cap: capacity, a }
}
alloc: A,
}
impl<T> RawVec<T, Global> {
@ -138,39 +89,26 @@ impl<T> RawVec<T, Global> {
/// Aborts on OOM.
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
RawVec::allocate_in(capacity, false, Global)
Self::with_capacity_in(capacity, Global)
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
#[inline]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
RawVec::allocate_in(capacity, true, Global)
Self::with_capacity_zeroed_in(capacity, Global)
}
}
impl<T, A: AllocRef> RawVec<T, A> {
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Undefined Behavior
///
/// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
/// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a }
}
}
impl<T> RawVec<T, Global> {
/// Reconstitutes a `RawVec` from a pointer and capacity.
///
/// # Undefined Behavior
/// # Safety
///
/// The `ptr` must be allocated (on the system heap), and with the given `capacity`.
/// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
#[inline]
pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a: Global }
Self::from_raw_parts_in(ptr, capacity, Global)
}
/// Converts a `Box<[T]>` into a `RawVec<T>`.
@ -184,6 +122,56 @@ impl<T> RawVec<T, Global> {
}
impl<T, A: AllocRef> RawVec<T, A> {
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self { ptr: Unique::empty(), cap: 0, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[inline]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, Zeroed, alloc)
}
fn allocate_in(capacity: usize, init: AllocInit, mut alloc: A) -> Self {
if mem::size_of::<T>() == 0 {
Self::new_in(alloc)
} else {
let layout = Layout::array::<T>(capacity).unwrap_or_else(|_| capacity_overflow());
alloc_guard(layout.size()).unwrap_or_else(|_| capacity_overflow());
let memory = alloc.alloc(layout, init).unwrap_or_else(|_| handle_alloc_error(layout));
Self {
ptr: memory.ptr.cast().into(),
cap: Self::capacity_from_bytes(memory.size),
alloc,
}
}
}
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
Self { ptr: Unique::new_unchecked(ptr), cap: capacity, alloc: a }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
/// `Unique::empty()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
/// be careful.
@ -196,21 +184,21 @@ impl<T, A: AllocRef> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 { !0 } else { self.cap }
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
pub fn alloc(&self) -> &A {
&self.a
&self.alloc
}
/// Returns a mutable reference to the allocator backing this `RawVec`.
pub fn alloc_mut(&mut self) -> &mut A {
&mut self.a
&mut self.alloc
}
fn current_layout(&self) -> Option<Layout> {
if self.cap == 0 {
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
@ -218,7 +206,8 @@ impl<T, A: AllocRef> RawVec<T, A> {
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
Some(Layout::from_size_align_unchecked(size, align))
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
}
@ -274,50 +263,10 @@ impl<T, A: AllocRef> RawVec<T, A> {
#[inline(never)]
#[cold]
pub fn double(&mut self) {
unsafe {
let elem_size = mem::size_of::<T>();
// Since we set the capacity to `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
assert!(elem_size != 0, "capacity overflow");
let (ptr, new_cap) = match self.current_layout() {
Some(cur) => {
// Since we guarantee that we never allocate more than
// `isize::MAX` bytes, `elem_size * self.cap <= isize::MAX` as
// a precondition, so this can't overflow. Additionally the
// alignment will never be too large as to "not be
// satisfiable", so `Layout::from_size_align` will always
// return `Some`.
//
// TL;DR, we bypass runtime checks due to dynamic assertions
// in this module, allowing us to use
// `from_size_align_unchecked`.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, new_size);
match ptr_res {
Ok((ptr, new_size)) => (ptr, new_size / elem_size),
Err(_) => handle_alloc_error(Layout::from_size_align_unchecked(
new_size,
cur.align(),
)),
}
}
None => {
// Skip to 4 because tiny `Vec`'s are dumb; but not if that
// would cause overflow.
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
let layout = Layout::array::<T>(new_cap).unwrap();
match self.a.alloc(layout) {
Ok((ptr, new_size)) => (ptr, new_size / elem_size),
Err(_) => handle_alloc_error(layout),
}
}
};
self.ptr = ptr.cast().into();
self.cap = new_cap;
match self.grow(Double, MayMove, Uninitialized) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
@ -336,99 +285,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
#[inline(never)]
#[cold]
pub fn double_in_place(&mut self) -> bool {
unsafe {
let elem_size = mem::size_of::<T>();
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false, // nothing to double
};
// Since we set the capacity to `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
assert!(elem_size != 0, "capacity overflow");
// Since we guarantee that we never allocate more than `isize::MAX`
// bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
// this can't overflow.
//
// Similarly to with `double` above, we can go straight to
// `Layout::from_size_align_unchecked` as we know this won't
// overflow and the alignment is sufficiently small.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) {
Ok(_) => {
// We can't directly divide `size`.
self.cap = new_cap;
true
}
Err(_) => false,
}
}
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
pub fn try_reserve_exact(
&mut self,
used_capacity: usize,
needed_extra_capacity: usize,
) -> Result<(), TryReserveError> {
self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Exact)
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
/// will reallocate the minimum possible amount of memory necessary.
/// Generally this will be exactly the amount of memory necessary,
/// but in principle the allocator is free to give back more than
/// we asked for.
///
/// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { .. }) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
/// Calculates the buffer's new size given that it'll hold `used_capacity +
/// needed_extra_capacity` elements. This logic is used in amortized reserve methods.
/// Returns `(new_capacity, new_alloc_size)`.
fn amortized_new_size(
&self,
used_capacity: usize,
needed_extra_capacity: usize,
) -> Result<usize, TryReserveError> {
// Nothing we can really do about these checks, sadly.
let required_cap =
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
let double_cap = self.cap * 2;
// `double_cap` guarantees exponential growth.
Ok(cmp::max(double_cap, required_cap))
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(
&mut self,
used_capacity: usize,
needed_extra_capacity: usize,
) -> Result<(), TryReserveError> {
self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Amortized)
self.grow(Double, InPlace, Uninitialized).is_ok()
}
/// Ensures that the buffer contains at least enough space to hold
@ -484,12 +341,26 @@ impl<T, A: AllocRef> RawVec<T, A> {
/// # }
/// ```
pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Amortized) {
match self.try_reserve(used_capacity, needed_extra_capacity) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { .. }) => unreachable!(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(
&mut self,
used_capacity: usize,
needed_extra_capacity: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(used_capacity, needed_extra_capacity) {
self.grow(Amortized { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
} else {
Ok(())
}
}
/// Attempts to ensure that the buffer contains at least enough space to hold
/// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
/// enough capacity, will reallocate in place enough space plus comfortable slack
@ -508,45 +379,54 @@ impl<T, A: AllocRef> RawVec<T, A> {
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// This is more readable than putting this in one line:
// `!self.needs_to_grow(...) || self.grow(...).is_ok()`
if self.needs_to_grow(used_capacity, needed_extra_capacity) {
self.grow(Amortized { used_capacity, needed_extra_capacity }, InPlace, Uninitialized)
.is_ok()
} else {
true
}
}
// Don't actually need any more capacity. If the current `cap` is 0, we can't
// reallocate in place.
// Wrapping in case they give a bad `used_capacity`
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false,
};
if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
return false;
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
/// will reallocate the minimum possible amount of memory necessary.
/// Generally this will be exactly the amount of memory necessary,
/// but in principle the allocator is free to give back more than
/// we asked for.
///
/// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
match self.try_reserve_exact(used_capacity, needed_extra_capacity) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
let new_cap = self
.amortized_new_size(used_capacity, needed_extra_capacity)
.unwrap_or_else(|_| capacity_overflow());
// Here, `cap < used_capacity + needed_extra_capacity <= new_cap`
// (regardless of whether `self.cap - used_capacity` wrapped).
// Therefore, we can safely call `grow_in_place`.
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
// FIXME: may crash and burn on over-reserve
alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
match self.a.grow_in_place(
NonNull::from(self.ptr).cast(),
old_layout,
new_layout.size(),
) {
Ok(_) => {
self.cap = new_cap;
true
}
Err(_) => false,
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
pub fn try_reserve_exact(
&mut self,
used_capacity: usize,
needed_extra_capacity: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(used_capacity, needed_extra_capacity) {
self.grow(Exact { used_capacity, needed_extra_capacity }, MayMove, Uninitialized)
} else {
Ok(())
}
}
@ -561,166 +441,157 @@ impl<T, A: AllocRef> RawVec<T, A> {
///
/// Aborts on OOM.
pub fn shrink_to_fit(&mut self, amount: usize) {
let elem_size = mem::size_of::<T>();
// Set the `cap` because they might be about to promote to a `Box<[T]>`
if elem_size == 0 {
self.cap = amount;
return;
}
// This check is my waterloo; it's the only thing `Vec` wouldn't have to do.
assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
if amount == 0 {
// We want to create a new zero-length vector within the
// same allocator. We use `ptr::write` to avoid an
// erroneous attempt to drop the contents, and we use
// `ptr::read` to sidestep condition against destructuring
// types that implement Drop.
unsafe {
let a = ptr::read(&self.a as *const A);
self.dealloc_buffer();
ptr::write(self, RawVec::new_in(a));
}
} else if self.cap != amount {
unsafe {
// We know here that our `amount` is greater than zero. This
// implies, via the assert above, that capacity is also greater
// than zero, which means that we've got a current layout that
// "fits"
//
// We also know that `self.cap` is greater than `amount`, and
// consequently we don't need runtime checks for creating either
// layout.
let old_size = elem_size * self.cap;
let new_size = elem_size * amount;
let align = mem::align_of::<T>();
let old_layout = Layout::from_size_align_unchecked(old_size, align);
match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, new_size) {
Ok((ptr, _)) => self.ptr = ptr.cast().into(),
Err(_) => {
handle_alloc_error(Layout::from_size_align_unchecked(new_size, align))
}
}
}
self.cap = amount;
match self.shrink(amount, MayMove) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
}
enum Fallibility {
Fallible,
Infallible,
#[derive(Copy, Clone)]
enum Strategy {
Double,
Amortized { used_capacity: usize, needed_extra_capacity: usize },
Exact { used_capacity: usize, needed_extra_capacity: usize },
}
use Fallibility::*;
enum ReserveStrategy {
Exact,
Amortized,
}
use ReserveStrategy::*;
use Strategy::*;
impl<T, A: AllocRef> RawVec<T, A> {
fn reserve_internal(
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
needed_extra_capacity > self.capacity().wrapping_sub(used_capacity)
}
fn capacity_from_bytes(excess: usize) -> usize {
debug_assert_ne!(mem::size_of::<T>(), 0);
excess / mem::size_of::<T>()
}
fn set_memory(&mut self, memory: MemoryBlock) {
self.ptr = memory.ptr.cast().into();
self.cap = Self::capacity_from_bytes(memory.size);
}
/// Single method to handle all possibilities of growing the buffer.
fn grow(
&mut self,
used_capacity: usize,
needed_extra_capacity: usize,
fallibility: Fallibility,
strategy: ReserveStrategy,
strategy: Strategy,
placement: ReallocPlacement,
init: AllocInit,
) -> Result<(), TryReserveError> {
let elem_size = mem::size_of::<T>();
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they gave a bad `used_capacity`.
if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
return Ok(());
}
// Nothing we can really do about these checks, sadly.
let new_cap = match strategy {
Exact => {
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?
}
Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?,
};
let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let res = match self.current_layout() {
Some(layout) => {
debug_assert!(new_layout.align() == layout.align());
self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size())
}
None => self.a.alloc(new_layout),
};
let (ptr, new_cap) = match (res, fallibility) {
(Err(AllocErr), Infallible) => handle_alloc_error(new_layout),
(Err(AllocErr), Fallible) => {
return Err(TryReserveError::AllocError {
layout: new_layout,
non_exhaustive: (),
});
}
(Ok((ptr, new_size)), _) => (ptr, new_size / elem_size),
};
self.ptr = ptr.cast().into();
self.cap = new_cap;
Ok(())
if elem_size == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow);
}
let new_layout = match strategy {
Double => unsafe {
// Since we guarantee that we never allocate more than `isize::MAX` bytes,
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow.
// Additionally the alignment will never be too large as to "not be satisfiable",
// so `Layout::from_size_align` will always return `Some`.
//
// TL;DR, we bypass runtime checks due to dynamic assertions in this module,
// allowing us to use `from_size_align_unchecked`.
let cap = if self.cap == 0 {
// Skip to 4 because tiny `Vec`'s are dumb; but not if that would cause overflow.
if elem_size > usize::MAX / 8 { 1 } else { 4 }
} else {
self.cap * 2
};
Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>())
},
Amortized { used_capacity, needed_extra_capacity } => {
// Nothing we can really do about these checks, sadly.
let required_cap =
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
let double_cap = self.cap * 2;
// `double_cap` guarantees exponential growth.
let cap = cmp::max(double_cap, required_cap);
Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
}
Exact { used_capacity, needed_extra_capacity } => {
let cap =
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?
}
};
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = self.current_memory() {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
self.alloc
.grow(ptr, old_layout, new_layout.size(), placement, init)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
}
} else {
match placement {
MayMove => self.alloc.alloc(new_layout, init),
InPlace => Err(AllocErr),
}
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
self.set_memory(memory);
Ok(())
}
fn shrink(
&mut self,
amount: usize,
placement: ReallocPlacement,
) -> Result<(), TryReserveError> {
assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
let new_size = amount * mem::size_of::<T>();
let memory = unsafe {
self.alloc.shrink(ptr, layout, new_size, placement).map_err(|_| {
TryReserveError::AllocError {
layout: Layout::from_size_align_unchecked(new_size, layout.align()),
non_exhaustive: (),
}
})?
};
self.set_memory(memory);
Ok(())
}
}
impl<T> RawVec<T, Global> {
/// Converts the entire buffer into `Box<[T]>`.
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Undefined Behavior
/// # Safety
///
/// All elements of `RawVec<T, Global>` must be initialized. Notice that
/// the rules around uninitialized boxed values are not finalized yet,
/// but until they are, it is advisable to avoid them.
pub unsafe fn into_box(self) -> Box<[T]> {
/// `shrink_to_fit(len)` must be called immediately prior to calling this function. This
/// implies, that `len` must be smaller than or equal to `self.capacity()`.
pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>]> {
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);
// NOTE: not calling `capacity()` here; actually using the real `cap` field!
let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
let output: Box<[T]> = Box::from_raw(slice);
let slice = slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, len);
let output = Box::from_raw(slice);
mem::forget(self);
output
}
}
impl<T, A: AllocRef> RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
pub unsafe fn dealloc_buffer(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 {
if let Some(layout) = self.current_layout() {
self.a.dealloc(NonNull::from(self.ptr).cast(), layout);
}
}
}
}
unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
unsafe {
self.dealloc_buffer();
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.dealloc(ptr, layout) }
}
}
}

View File

@ -12,6 +12,7 @@ fn allocator_param() {
//
// Instead, this just checks that the `RawVec` methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
@ -20,12 +21,12 @@ fn allocator_param() {
fuel: usize,
}
unsafe impl AllocRef for BoundedAlloc {
fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> {
let size = layout.size();
if size > self.fuel {
return Err(AllocErr);
}
match Global.alloc(layout) {
match Global.alloc(layout, init) {
ok @ Ok(_) => {
self.fuel -= size;
ok
@ -40,9 +41,9 @@ fn allocator_param() {
let a = BoundedAlloc { fuel: 500 };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.a.fuel, 450);
assert_eq!(v.alloc.fuel, 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.a.fuel, 250);
assert_eq!(v.alloc.fuel, 250);
}
#[test]

View File

@ -252,7 +252,7 @@ use core::ptr::{self, NonNull};
use core::slice::{self, from_raw_parts_mut};
use core::usize;
use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout};
use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
use crate::string::String;
use crate::vec::Vec;
@ -936,10 +936,12 @@ impl<T: ?Sized> Rc<T> {
let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
// Allocate for the layout.
let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
let mem = Global
.alloc(layout, AllocInit::Uninitialized)
.unwrap_or_else(|_| handle_alloc_error(layout));
// Initialize the RcBox
let inner = mem_to_rcbox(mem.as_ptr());
let inner = mem_to_rcbox(mem.ptr.as_ptr());
debug_assert_eq!(Layout::for_value(&*inner), layout);
ptr::write(&mut (*inner).strong, Cell::new(1));

View File

@ -25,7 +25,7 @@ use core::sync::atomic;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
use core::{isize, usize};
use crate::alloc::{box_free, handle_alloc_error, AllocRef, Global, Layout};
use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
use crate::boxed::Box;
use crate::rc::is_dangling;
use crate::string::String;
@ -814,10 +814,12 @@ impl<T: ?Sized> Arc<T> {
// reference (see #54908).
let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
let (mem, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
let mem = Global
.alloc(layout, AllocInit::Uninitialized)
.unwrap_or_else(|_| handle_alloc_error(layout));
// Initialize the ArcInner
let inner = mem_to_arcinner(mem.as_ptr());
let inner = mem_to_arcinner(mem.ptr.as_ptr());
debug_assert_eq!(Layout::for_value(&*inner), layout);
ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));

View File

@ -1,4 +1,4 @@
use std::alloc::{AllocRef, Global, Layout, System};
use std::alloc::{AllocInit, AllocRef, Global, Layout, System};
/// Issue #45955 and #62251.
#[test]
@ -20,7 +20,13 @@ fn check_overalign_requests<T: AllocRef>(mut allocator: T) {
unsafe {
let pointers: Vec<_> = (0..iterations)
.map(|_| {
allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap().0
allocator
.alloc(
Layout::from_size_align(size, align).unwrap(),
AllocInit::Uninitialized,
)
.unwrap()
.ptr
})
.collect();
for &ptr in &pointers {

View File

@ -679,8 +679,9 @@ impl<T> Vec<T> {
unsafe {
self.shrink_to_fit();
let buf = ptr::read(&self.buf);
let len = self.len();
mem::forget(self);
buf.into_box()
buf.into_box(len).assume_init()
}
}

File diff suppressed because it is too large Load Diff

198
src/libcore/alloc/global.rs Normal file
View File

@ -0,0 +1,198 @@
use crate::alloc::Layout;
use crate::cmp;
use crate::ptr;
/// A memory allocator that can be registered as the standard librarys default
/// through the `#[global_allocator]` attribute.
///
/// Some of the methods require that a memory block be *currently
/// allocated* via an allocator. This means that:
///
/// * the starting address for that memory block was previously
/// returned by a previous call to an allocation method
/// such as `alloc`, and
///
/// * the memory block has not been subsequently deallocated, where
/// blocks are deallocated either by being passed to a deallocation
/// method such as `dealloc` or by being
/// passed to a reallocation method that returns a non-null pointer.
///
///
/// # Example
///
/// ```no_run
/// use std::alloc::{GlobalAlloc, Layout, alloc};
/// use std::ptr::null_mut;
///
/// struct MyAllocator;
///
/// unsafe impl GlobalAlloc for MyAllocator {
/// unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { null_mut() }
/// unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
/// }
///
/// #[global_allocator]
/// static A: MyAllocator = MyAllocator;
///
/// fn main() {
/// unsafe {
/// assert!(alloc(Layout::new::<u32>()).is_null())
/// }
/// }
/// ```
///
/// # Safety
///
/// The `GlobalAlloc` trait is an `unsafe` trait for a number of reasons, and
/// implementors must ensure that they adhere to these contracts:
///
/// * It's undefined behavior if global allocators unwind. This restriction may
/// be lifted in the future, but currently a panic from any of these
/// functions may lead to memory unsafety.
///
/// * `Layout` queries and calculations in general must be correct. Callers of
/// this trait are allowed to rely on the contracts defined on each method,
/// and implementors must ensure such contracts remain true.
#[stable(feature = "global_alloc", since = "1.28.0")]
pub unsafe trait GlobalAlloc {
/// Allocate memory as described by the given `layout`.
///
/// Returns a pointer to newly-allocated memory,
/// or null to indicate allocation failure.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure that `layout` has non-zero size.
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g., guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// The allocated block of memory may or may not be initialized.
///
/// # Errors
///
/// Returning a null pointer indicates that either memory is exhausted
/// or `layout` does not meet this allocator's size or alignment constraints.
///
/// Implementations are encouraged to return null on memory
/// exhaustion rather than aborting, but this is not
/// a strict requirement. (Specifically: it is *legal* to
/// implement this trait atop an underlying native allocation
/// library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an
/// allocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[stable(feature = "global_alloc", since = "1.28.0")]
unsafe fn alloc(&self, layout: Layout) -> *mut u8;
/// Deallocate the block of memory at the given `ptr` pointer with the given `layout`.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure all of the following:
///
/// * `ptr` must denote a block of memory currently allocated via
/// this allocator,
///
/// * `layout` must be the same layout that was used
/// to allocate that block of memory,
#[stable(feature = "global_alloc", since = "1.28.0")]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout);
/// Behaves like `alloc`, but also ensures that the contents
/// are set to zero before being returned.
///
/// # Safety
///
/// This function is unsafe for the same reasons that `alloc` is.
/// However the allocated block of memory is guaranteed to be initialized.
///
/// # Errors
///
/// Returning a null pointer indicates that either memory is exhausted
/// or `layout` does not meet allocator's size or alignment constraints,
/// just as in `alloc`.
///
/// Clients wishing to abort computation in response to an
/// allocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[stable(feature = "global_alloc", since = "1.28.0")]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
let size = layout.size();
let ptr = self.alloc(layout);
if !ptr.is_null() {
ptr::write_bytes(ptr, 0, size);
}
ptr
}
/// Shrink or grow a block of memory to the given `new_size`.
/// The block is described by the given `ptr` pointer and `layout`.
///
/// If this returns a non-null pointer, then ownership of the memory block
/// referenced by `ptr` has been transferred to this allocator.
/// The memory may or may not have been deallocated,
/// and should be considered unusable (unless of course it was
/// transferred back to the caller again via the return value of
/// this method). The new memory block is allocated with `layout`, but
/// with the `size` updated to `new_size`.
///
/// If this method returns null, then ownership of the memory
/// block has not been transferred to this allocator, and the
/// contents of the memory block are unaltered.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure all of the following:
///
/// * `ptr` must be currently allocated via this allocator,
///
/// * `layout` must be the same layout that was used
/// to allocate that block of memory,
///
/// * `new_size` must be greater than zero.
///
/// * `new_size`, when rounded up to the nearest multiple of `layout.align()`,
/// must not overflow (i.e., the rounded value must be less than `usize::MAX`).
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g., guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// # Errors
///
/// Returns null if the new layout does not meet the size
/// and alignment constraints of the allocator, or if reallocation
/// otherwise fails.
///
/// Implementations are encouraged to return null on memory
/// exhaustion rather than panicking or aborting, but this is not
/// a strict requirement. (Specifically: it is *legal* to
/// implement this trait atop an underlying native allocation
/// library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to a
/// reallocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
#[stable(feature = "global_alloc", since = "1.28.0")]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let new_ptr = self.alloc(new_layout);
if !new_ptr.is_null() {
ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
self.dealloc(ptr, layout);
}
new_ptr
}
}

346
src/libcore/alloc/layout.rs Normal file
View File

@ -0,0 +1,346 @@
// ignore-tidy-undocumented-unsafe
use crate::cmp;
use crate::fmt;
use crate::mem;
use crate::num::NonZeroUsize;
use crate::ptr::NonNull;
const fn size_align<T>() -> (usize, usize) {
(mem::size_of::<T>(), mem::align_of::<T>())
}
/// Layout of a block of memory.
///
/// An instance of `Layout` describes a particular layout of memory.
/// You build a `Layout` up as an input to give to an allocator.
///
/// All layouts have an associated size and a power-of-two alignment.
///
/// (Note that layouts are *not* required to have non-zero size,
/// even though `GlobalAlloc` requires that all memory requests
/// be non-zero in size. A caller must either ensure that conditions
/// like this are met, use specific allocators with looser
/// requirements, or use the more lenient `AllocRef` interface.)
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[lang = "alloc_layout"]
pub struct Layout {
// size of the requested block of memory, measured in bytes.
size_: usize,
// alignment of the requested block of memory, measured in bytes.
// we ensure that this is always a power-of-two, because API's
// like `posix_memalign` require it and it is a reasonable
// constraint to impose on Layout constructors.
//
// (However, we do not analogously require `align >= sizeof(void*)`,
// even though that is *also* a requirement of `posix_memalign`.)
align_: NonZeroUsize,
}
impl Layout {
/// Constructs a `Layout` from a given `size` and `align`,
/// or returns `LayoutErr` if any of the following conditions
/// are not met:
///
/// * `align` must not be zero,
///
/// * `align` must be a power of two,
///
/// * `size`, when rounded up to the nearest multiple of `align`,
/// must not overflow (i.e., the rounded value must be less than
/// or equal to `usize::MAX`).
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
pub const fn from_size_align(size: usize, align: usize) -> Result<Self, LayoutErr> {
if !align.is_power_of_two() {
return Err(LayoutErr { private: () });
}
// (power-of-two implies align != 0.)
// Rounded up size is:
// size_rounded_up = (size + align - 1) & !(align - 1);
//
// We know from above that align != 0. If adding (align - 1)
// does not overflow, then rounding up will be fine.
//
// Conversely, &-masking with !(align - 1) will subtract off
// only low-order-bits. Thus if overflow occurs with the sum,
// the &-mask cannot subtract enough to undo that overflow.
//
// Above implies that checking for summation overflow is both
// necessary and sufficient.
if size > usize::MAX - (align - 1) {
return Err(LayoutErr { private: () });
}
unsafe { Ok(Layout::from_size_align_unchecked(size, align)) }
}
/// Creates a layout, bypassing all checks.
///
/// # Safety
///
/// This function is unsafe as it does not verify the preconditions from
/// [`Layout::from_size_align`](#method.from_size_align).
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[rustc_const_stable(feature = "alloc_layout", since = "1.28.0")]
#[inline]
pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
Layout { size_: size, align_: NonZeroUsize::new_unchecked(align) }
}
/// The minimum size in bytes for a memory block of this layout.
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
pub const fn size(&self) -> usize {
self.size_
}
/// The minimum byte alignment for a memory block of this layout.
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
pub const fn align(&self) -> usize {
self.align_.get()
}
/// Constructs a `Layout` suitable for holding a value of type `T`.
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")]
#[inline]
pub const fn new<T>() -> Self {
let (size, align) = size_align::<T>();
// Note that the align is guaranteed by rustc to be a power of two and
// the size+align combo is guaranteed to fit in our address space. As a
// result use the unchecked constructor here to avoid inserting code
// that panics if it isn't optimized well enough.
unsafe { Layout::from_size_align_unchecked(size, align) }
}
/// Produces layout describing a record that could be used to
/// allocate backing structure for `T` (which could be a trait
/// or other unsized type like a slice).
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[inline]
pub fn for_value<T: ?Sized>(t: &T) -> Self {
let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
// See rationale in `new` for why this is using an unsafe variant below
debug_assert!(Layout::from_size_align(size, align).is_ok());
unsafe { Layout::from_size_align_unchecked(size, align) }
}
/// Creates a `NonNull` that is dangling, but well-aligned for this Layout.
///
/// Note that the pointer value may potentially represent a valid pointer,
/// which means this must not be used as a "not yet initialized"
/// sentinel value. Types that lazily allocate must track initialization by
/// some other means.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub const fn dangling(&self) -> NonNull<u8> {
// align is non-zero and a power of two
unsafe { NonNull::new_unchecked(self.align() as *mut u8) }
}
/// Creates a layout describing the record that can hold a value
/// of the same layout as `self`, but that also is aligned to
/// alignment `align` (measured in bytes).
///
/// If `self` already meets the prescribed alignment, then returns
/// `self`.
///
/// Note that this method does not add any padding to the overall
/// size, regardless of whether the returned layout has a different
/// alignment. In other words, if `K` has size 16, `K.align_to(32)`
/// will *still* have size 16.
///
/// Returns an error if the combination of `self.size()` and the given
/// `align` violates the conditions listed in
/// [`Layout::from_size_align`](#method.from_size_align).
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn align_to(&self, align: usize) -> Result<Self, LayoutErr> {
Layout::from_size_align(self.size(), cmp::max(self.align(), align))
}
/// Returns the amount of padding we must insert after `self`
/// to ensure that the following address will satisfy `align`
/// (measured in bytes).
///
/// e.g., if `self.size()` is 9, then `self.padding_needed_for(4)`
/// returns 3, because that is the minimum number of bytes of
/// padding required to get a 4-aligned address (assuming that the
/// corresponding memory block starts at a 4-aligned address).
///
/// The return value of this function has no meaning if `align` is
/// not a power-of-two.
///
/// Note that the utility of the returned value requires `align`
/// to be less than or equal to the alignment of the starting
/// address for the whole allocated block of memory. One way to
/// satisfy this constraint is to ensure `align <= self.align()`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
pub const fn padding_needed_for(&self, align: usize) -> usize {
let len = self.size();
// Rounded up value is:
// len_rounded_up = (len + align - 1) & !(align - 1);
// and then we return the padding difference: `len_rounded_up - len`.
//
// We use modular arithmetic throughout:
//
// 1. align is guaranteed to be > 0, so align - 1 is always
// valid.
//
// 2. `len + align - 1` can overflow by at most `align - 1`,
// so the &-mask with `!(align - 1)` will ensure that in the
// case of overflow, `len_rounded_up` will itself be 0.
// Thus the returned padding, when added to `len`, yields 0,
// which trivially satisfies the alignment `align`.
//
// (Of course, attempts to allocate blocks of memory whose
// size and padding overflow in the above manner should cause
// the allocator to yield an error anyway.)
let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
len_rounded_up.wrapping_sub(len)
}
/// Creates a layout by rounding the size of this layout up to a multiple
/// of the layout's alignment.
///
/// This is equivalent to adding the result of `padding_needed_for`
/// to the layout's current size.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn pad_to_align(&self) -> Layout {
let pad = self.padding_needed_for(self.align());
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
// > must not overflow (i.e., the rounded value must be less than
// > `usize::MAX`)
let new_size = self.size() + pad;
Layout::from_size_align(new_size, self.align()).unwrap()
}
/// Creates a layout describing the record for `n` instances of
/// `self`, with a suitable amount of padding between each to
/// ensure that each instance is given its requested size and
/// alignment. On success, returns `(k, offs)` where `k` is the
/// layout of the array and `offs` is the distance between the start
/// of each element in the array.
///
/// On arithmetic overflow, returns `LayoutErr`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> {
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
// > must not overflow (i.e., the rounded value must be less than
// > `usize::MAX`)
let padded_size = self.size() + self.padding_needed_for(self.align());
let alloc_size = padded_size.checked_mul(n).ok_or(LayoutErr { private: () })?;
unsafe {
// self.align is already known to be valid and alloc_size has been
// padded already.
Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size))
}
}
/// Creates a layout describing the record for `self` followed by
/// `next`, including any necessary padding to ensure that `next`
/// will be properly aligned. Note that the resulting layout will
/// satisfy the alignment properties of both `self` and `next`.
///
/// The resulting layout will be the same as that of a C struct containing
/// two fields with the layouts of `self` and `next`, in that order.
///
/// Returns `Some((k, offset))`, where `k` is layout of the concatenated
/// record and `offset` is the relative location, in bytes, of the
/// start of the `next` embedded within the concatenated record
/// (assuming that the record itself starts at offset 0).
///
/// On arithmetic overflow, returns `LayoutErr`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> {
let new_align = cmp::max(self.align(), next.align());
let pad = self.padding_needed_for(next.align());
let offset = self.size().checked_add(pad).ok_or(LayoutErr { private: () })?;
let new_size = offset.checked_add(next.size()).ok_or(LayoutErr { private: () })?;
let layout = Layout::from_size_align(new_size, new_align)?;
Ok((layout, offset))
}
/// Creates a layout describing the record for `n` instances of
/// `self`, with no padding between each instance.
///
/// Note that, unlike `repeat`, `repeat_packed` does not guarantee
/// that the repeated instances of `self` will be properly
/// aligned, even if a given instance of `self` is properly
/// aligned. In other words, if the layout returned by
/// `repeat_packed` is used to allocate an array, it is not
/// guaranteed that all elements in the array will be properly
/// aligned.
///
/// On arithmetic overflow, returns `LayoutErr`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutErr> {
let size = self.size().checked_mul(n).ok_or(LayoutErr { private: () })?;
Layout::from_size_align(size, self.align())
}
/// Creates a layout describing the record for `self` followed by
/// `next` with no additional padding between the two. Since no
/// padding is inserted, the alignment of `next` is irrelevant,
/// and is not incorporated *at all* into the resulting layout.
///
/// On arithmetic overflow, returns `LayoutErr`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutErr> {
let new_size = self.size().checked_add(next.size()).ok_or(LayoutErr { private: () })?;
Layout::from_size_align(new_size, self.align())
}
/// Creates a layout describing the record for a `[T; n]`.
///
/// On arithmetic overflow, returns `LayoutErr`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn array<T>(n: usize) -> Result<Self, LayoutErr> {
Layout::new::<T>().repeat(n).map(|(k, offs)| {
debug_assert!(offs == mem::size_of::<T>());
k
})
}
}
/// The parameters given to `Layout::from_size_align`
/// or some other `Layout` constructor
/// do not satisfy its documented constraints.
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct LayoutErr {
private: (),
}
// (we need this for downstream impl of trait Error)
#[stable(feature = "alloc_layout", since = "1.28.0")]
impl fmt::Display for LayoutErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid parameters to Layout::from_size_align")
}
}

362
src/libcore/alloc/mod.rs Normal file
View File

@ -0,0 +1,362 @@
//! Memory allocation APIs
#![stable(feature = "alloc_module", since = "1.28.0")]
mod global;
mod layout;
#[stable(feature = "global_alloc", since = "1.28.0")]
pub use self::global::GlobalAlloc;
#[stable(feature = "alloc_layout", since = "1.28.0")]
pub use self::layout::{Layout, LayoutErr};
use crate::fmt;
use crate::ptr::{self, NonNull};
/// The `AllocErr` error indicates an allocation failure
/// that may be due to resource exhaustion or to
/// something wrong when combining the given input arguments with this
/// allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct AllocErr;
// (we need this for downstream impl of trait Error)
#[unstable(feature = "allocator_api", issue = "32838")]
impl fmt::Display for AllocErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("memory allocation failed")
}
}
/// A desired initial state for allocated memory.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[unstable(feature = "allocator_api", issue = "32838")]
pub enum AllocInit {
/// The contents of the new memory are undefined.
///
/// Reading uninitialized memory is Undefined Behavior; it must be initialized before use.
Uninitialized,
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
impl AllocInit {
/// Initialize the specified memory block.
///
/// This behaves like calling [`AllocInit::init_offset(memory, 0)`][off].
///
/// [off]: AllocInit::init_offset
///
/// # Safety
///
/// * `memory.ptr` must be [valid] for writes of `memory.size` bytes.
///
/// [valid]: ../../core/ptr/index.html#safety
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe fn init(self, memory: MemoryBlock) {
self.init_offset(memory, 0)
}
/// Initialize the memory block like specified by `init` at the specified `offset`.
///
/// This is a no-op for [`AllocInit::Uninitialized`][] and writes zeroes for
/// [`AllocInit::Zeroed`][] at `ptr + offset` until `ptr + layout.size()`.
///
/// # Safety
///
/// * `memory.ptr` must be [valid] for writes of `memory.size` bytes.
/// * `offset` must be smaller than or equal to `memory.size`
///
/// [valid]: ../../core/ptr/index.html#safety
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe fn init_offset(self, memory: MemoryBlock, offset: usize) {
debug_assert!(
offset <= memory.size,
"`offset` must be smaller than or equal to `memory.size`"
);
match self {
AllocInit::Uninitialized => (),
AllocInit::Zeroed => {
memory.ptr.as_ptr().add(offset).write_bytes(0, memory.size - offset)
}
}
}
}
/// Represents a block of allocated memory returned by an allocator.
#[derive(Debug, Copy, Clone)]
#[unstable(feature = "allocator_api", issue = "32838")]
pub struct MemoryBlock {
pub ptr: NonNull<u8>,
pub size: usize,
}
/// A placement constraint when growing or shrinking an existing allocation.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[unstable(feature = "allocator_api", issue = "32838")]
pub enum ReallocPlacement {
/// The allocator is allowed to move the allocation to a different memory address.
// FIXME(wg-allocators#46): Add a section to the module documentation "What is a legal
// allocator" and link it at "valid location".
///
/// If the allocation _does_ move, it's the responsibility of the allocator
/// to also move the data from the previous location to the new location.
MayMove,
/// The address of the new memory must not change.
///
/// If the allocation would have to be moved to a new location to fit, the
/// reallocation request will fail.
InPlace,
}
/// An implementation of `AllocRef` can allocate, grow, shrink, and deallocate arbitrary blocks of
/// data described via [`Layout`][].
///
/// `AllocRef` is designed to be implemented on ZSTs, references, or smart pointers because having
/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
/// allocated memory.
///
/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `AllocRef`. If an underlying
/// allocator does not support this (like jemalloc) or return a null pointer (such as
/// `libc::malloc`), this case must be caught.
///
/// ### Currently allocated memory
///
/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
/// means that:
///
/// * the starting address for that memory block was previously returned by [`alloc`], [`grow`], or
/// [`shrink`], and
///
/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
/// directly by being passed to [`dealloc`] or were changed by being passed to [`grow`] or
/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
/// remains valid.
///
/// [`alloc`]: AllocRef::alloc
/// [`grow`]: AllocRef::grow
/// [`shrink`]: AllocRef::shrink
/// [`dealloc`]: AllocRef::dealloc
///
/// ### Memory fitting
///
/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
/// following conditions must hold:
///
/// * The block must be allocated with the same alignment as [`layout.align()`], and
///
/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
/// - `min` is the size of the layout most recently used to allocate the block, and
/// - `max` is the latest actual size returned from [`alloc`], [`grow`], or [`shrink`].
///
/// [`layout.align()`]: Layout::align
/// [`layout.size()`]: Layout::size
///
/// # Safety
///
/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
/// until the instance and all of its clones are dropped, and
///
/// * cloning or moving the allocator must not invalidate memory blocks returned from this
/// allocator. A cloned allocator must behave like the same allocator.
///
/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
/// method of the allocator.
///
/// [*currently allocated*]: #currently-allocated-memory
#[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe trait AllocRef {
/// On success, returns a memory block meeting the size and alignment guarantees of `layout`.
///
/// The returned block may have a larger size than specified by `layout.size()` and is
/// initialized as specified by [`init`], all the way up to the returned size of the block.
///
/// [`init`]: AllocInit
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
/// allocator's size or alignment constraints.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr>;
/// Deallocates the memory denoted by `memory`.
///
/// # Safety
///
/// `memory` must be a memory block returned by this allocator.
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout);
/// Attempts to extend the memory block.
///
/// Returns a new memory block containing a pointer and the actual size of the allocated
/// block. The pointer is suitable for holding data described by a new layout with `layout`s
/// alignment and a size given by `new_size`. To accomplish this, the allocator may extend the
/// allocation referenced by `ptr` to fit the new layout. If the [`placement`] is
/// [`InPlace`], the returned pointer is guaranteed to be the same as the passed `ptr`.
///
/// If `ReallocPlacement::MayMove` is used then ownership of the memory block referenced by `ptr`
/// is transferred to this allocator. The memory may or may not be freed, and should be
/// considered unusable (unless of course it is transferred back to the caller again via the
/// return value of this method).
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
///
/// The memory block will contain the following contents after a successful call to `grow`:
/// * Bytes `0..layout.size()` are preserved from the original allocation.
/// * Bytes `layout.size()..old_size` will either be preserved or initialized according to
/// [`init`], depending on the allocator implementation. `old_size` refers to the size of
/// the `MemoryBlock` prior to the `grow` call, which may be larger than the size
/// that was originally requested when it was allocated.
/// * Bytes `old_size..new_size` are initialized according to [`init`]. `new_size` refers to
/// the size of the `MemoryBlock` returned by the `grow` call.
///
/// [`InPlace`]: ReallocPlacement::InPlace
/// [`placement`]: ReallocPlacement
/// [`init`]: AllocInit
///
/// # Safety
///
/// * `ptr` must be [*currently allocated*] via this allocator,
/// * `layout` must [*fit*] the `ptr`. (The `new_size` argument need not fit it.)
// We can't require that `new_size` is strictly greater than `memory.size` because of ZSTs.
// An alternative would be
// * `new_size must be strictly greater than `memory.size` or both are zero
/// * `new_size` must be greater than or equal to `layout.size()`
/// * `new_size`, when rounded up to the nearest multiple of `layout.align()`, must not overflow
/// (i.e., the rounded value must be less than `usize::MAX`).
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if growing otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn grow(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
placement: ReallocPlacement,
init: AllocInit,
) -> Result<MemoryBlock, AllocErr> {
match placement {
ReallocPlacement::InPlace => Err(AllocErr),
ReallocPlacement::MayMove => {
let size = layout.size();
debug_assert!(
new_size >= size,
"`new_size` must be greater than or equal to `layout.size()`"
);
if new_size == size {
return Ok(MemoryBlock { ptr, size });
}
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let new_memory = self.alloc(new_layout, init)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), size);
self.dealloc(ptr, layout);
Ok(new_memory)
}
}
}
/// Attempts to shrink the memory block.
///
/// Returns a new memory block containing a pointer and the actual size of the allocated
/// block. The pointer is suitable for holding data described by a new layout with `layout`s
/// alignment and a size given by `new_size`. To accomplish this, the allocator may shrink the
/// allocation referenced by `ptr` to fit the new layout. If the [`placement`] is
/// [`InPlace`], the returned pointer is guaranteed to be the same as the passed `ptr`.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
/// transferred to this allocator. The memory may or may not have been freed, and should be
/// considered unusable unless it was transferred back to the caller again via the
/// return value of this method.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
///
/// The behavior of how the allocator tries to shrink the memory is specified by [`placement`].
///
/// [`InPlace`]: ReallocPlacement::InPlace
/// [`placement`]: ReallocPlacement
///
/// # Safety
///
/// * `ptr` must be [*currently allocated*] via this allocator,
/// * `layout` must [*fit*] the `ptr`. (The `new_size` argument need not fit it.)
// We can't require that `new_size` is strictly smaller than `memory.size` because of ZSTs.
// An alternative would be
// * `new_size must be strictly smaller than `memory.size` or both are zero
/// * `new_size` must be smaller than or equal to `layout.size()`
///
/// [*currently allocated*]: #currently-allocated-memory
/// [*fit*]: #memory-fitting
///
/// # Errors
///
/// Returns `Err` if the new layout does not meet the allocator's size and alignment
/// constraints of the allocator, or if growing otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
/// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
/// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an allocation error are encouraged to
/// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn shrink(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
placement: ReallocPlacement,
) -> Result<MemoryBlock, AllocErr> {
match placement {
ReallocPlacement::InPlace => Err(AllocErr),
ReallocPlacement::MayMove => {
let size = layout.size();
debug_assert!(
new_size <= size,
"`new_size` must be smaller than or equal to `layout.size()`"
);
if new_size == size {
return Ok(MemoryBlock { ptr, size });
}
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let new_memory = self.alloc(new_layout, AllocInit::Uninitialized)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), new_size);
self.dealloc(ptr, layout);
Ok(new_memory)
}
}
}
}

View File

@ -61,6 +61,7 @@
#![stable(feature = "alloc_module", since = "1.28.0")]
use core::intrinsics;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicPtr, Ordering};
use core::{mem, ptr};
@ -138,27 +139,18 @@ pub struct System;
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl AllocRef for System {
#[inline]
fn alloc(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
if layout.size() == 0 {
Ok((layout.dangling(), 0))
} else {
unsafe {
NonNull::new(GlobalAlloc::alloc(self, layout))
.ok_or(AllocErr)
.map(|p| (p, layout.size()))
}
}
}
#[inline]
fn alloc_zeroed(&mut self, layout: Layout) -> Result<(NonNull<u8>, usize), AllocErr> {
if layout.size() == 0 {
Ok((layout.dangling(), 0))
} else {
unsafe {
NonNull::new(GlobalAlloc::alloc_zeroed(self, layout))
.ok_or(AllocErr)
.map(|p| (p, layout.size()))
fn alloc(&mut self, layout: Layout, init: AllocInit) -> Result<MemoryBlock, AllocErr> {
unsafe {
let size = layout.size();
if size == 0 {
Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
} else {
let raw_ptr = match init {
AllocInit::Uninitialized => GlobalAlloc::alloc(self, layout),
AllocInit::Zeroed => GlobalAlloc::alloc_zeroed(self, layout),
};
let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?;
Ok(MemoryBlock { ptr, size })
}
}
}
@ -171,26 +163,75 @@ unsafe impl AllocRef for System {
}
#[inline]
unsafe fn realloc(
unsafe fn grow(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<(NonNull<u8>, usize), AllocErr> {
match (layout.size(), new_size) {
(0, 0) => Ok((layout.dangling(), 0)),
(0, _) => self.alloc(Layout::from_size_align_unchecked(new_size, layout.align())),
(_, 0) => {
self.dealloc(ptr, layout);
Ok((layout.dangling(), 0))
placement: ReallocPlacement,
init: AllocInit,
) -> Result<MemoryBlock, AllocErr> {
let size = layout.size();
debug_assert!(
new_size >= size,
"`new_size` must be greater than or equal to `memory.size()`"
);
if size == new_size {
return Ok(MemoryBlock { ptr, size });
}
match placement {
ReallocPlacement::InPlace => Err(AllocErr),
ReallocPlacement::MayMove if layout.size() == 0 => {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc(new_layout, init)
}
ReallocPlacement::MayMove => {
// `realloc` probably checks for `new_size > size` or something similar.
intrinsics::assume(new_size > size);
let ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), layout, new_size);
let memory =
MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size };
init.init_offset(memory, size);
Ok(memory)
}
}
}
#[inline]
unsafe fn shrink(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
placement: ReallocPlacement,
) -> Result<MemoryBlock, AllocErr> {
let size = layout.size();
debug_assert!(
new_size <= size,
"`new_size` must be smaller than or equal to `memory.size()`"
);
if size == new_size {
return Ok(MemoryBlock { ptr, size });
}
match placement {
ReallocPlacement::InPlace => Err(AllocErr),
ReallocPlacement::MayMove if new_size == 0 => {
self.dealloc(ptr, layout);
Ok(MemoryBlock { ptr: layout.dangling(), size: 0 })
}
ReallocPlacement::MayMove => {
// `realloc` probably checks for `new_size < size` or something similar.
intrinsics::assume(new_size < size);
let ptr = GlobalAlloc::realloc(self, ptr.as_ptr(), layout, new_size);
Ok(MemoryBlock { ptr: NonNull::new(ptr).ok_or(AllocErr)?, size: new_size })
}
(_, _) => NonNull::new(GlobalAlloc::realloc(self, ptr.as_ptr(), layout, new_size))
.ok_or(AllocErr)
.map(|p| (p, new_size)),
}
}
}
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
/// Registers a custom allocation error hook, replacing any that was previously registered.
@ -238,9 +279,7 @@ pub fn rust_oom(layout: Layout) -> ! {
let hook: fn(Layout) =
if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } };
hook(layout);
unsafe {
crate::sys::abort_internal();
}
unsafe { crate::sys::abort_internal() }
}
#[cfg(not(test))]

View File

@ -15,7 +15,7 @@
use core::array;
use crate::alloc::{AllocErr, CannotReallocInPlace, LayoutErr};
use crate::alloc::{AllocErr, LayoutErr};
use crate::any::TypeId;
use crate::backtrace::Backtrace;
use crate::borrow::Cow;
@ -409,13 +409,6 @@ impl Error for AllocErr {}
)]
impl Error for LayoutErr {}
#[unstable(
feature = "allocator_api",
reason = "the precise API and guarantees it provides may be tweaked.",
issue = "32838"
)]
impl Error for CannotReallocInPlace {}
#[stable(feature = "rust1", since = "1.0.0")]
impl Error for str::ParseBoolError {
#[allow(deprecated)]

View File

@ -7,7 +7,7 @@
extern crate helper;
use std::alloc::{self, Global, AllocRef, System, Layout};
use std::alloc::{self, AllocInit, AllocRef, Global, Layout, System};
use std::sync::atomic::{AtomicUsize, Ordering};
static HITS: AtomicUsize = AtomicUsize::new(0);
@ -37,10 +37,10 @@ fn main() {
unsafe {
let layout = Layout::from_size_align(4, 2).unwrap();
let (ptr, _) = Global.alloc(layout.clone()).unwrap();
helper::work_with(&ptr);
let memory = Global.alloc(layout.clone(), AllocInit::Uninitialized).unwrap();
helper::work_with(&memory.ptr);
assert_eq!(HITS.load(Ordering::SeqCst), n + 1);
Global.dealloc(ptr, layout.clone());
Global.dealloc(memory.ptr, layout);
assert_eq!(HITS.load(Ordering::SeqCst), n + 2);
let s = String::with_capacity(10);
@ -49,10 +49,10 @@ fn main() {
drop(s);
assert_eq!(HITS.load(Ordering::SeqCst), n + 4);
let (ptr, _) = System.alloc(layout.clone()).unwrap();
let memory = System.alloc(layout.clone(), AllocInit::Uninitialized).unwrap();
assert_eq!(HITS.load(Ordering::SeqCst), n + 4);
helper::work_with(&ptr);
System.dealloc(ptr, layout);
helper::work_with(&memory.ptr);
System.dealloc(memory.ptr, layout);
assert_eq!(HITS.load(Ordering::SeqCst), n + 4);
}
}

View File

@ -9,8 +9,8 @@
extern crate custom;
extern crate helper;
use std::alloc::{Global, AllocRef, System, Layout};
use std::sync::atomic::{Ordering, AtomicUsize};
use std::alloc::{AllocInit, AllocRef, Global, Layout, System};
use std::sync::atomic::{AtomicUsize, Ordering};
#[global_allocator]
static GLOBAL: custom::A = custom::A(AtomicUsize::new(0));
@ -20,16 +20,16 @@ fn main() {
let n = GLOBAL.0.load(Ordering::SeqCst);
let layout = Layout::from_size_align(4, 2).unwrap();
let (ptr, _) = Global.alloc(layout.clone()).unwrap();
helper::work_with(&ptr);
let memory = Global.alloc(layout.clone(), AllocInit::Uninitialized).unwrap();
helper::work_with(&memory.ptr);
assert_eq!(GLOBAL.0.load(Ordering::SeqCst), n + 1);
Global.dealloc(ptr, layout.clone());
Global.dealloc(memory.ptr, layout);
assert_eq!(GLOBAL.0.load(Ordering::SeqCst), n + 2);
let (ptr, _) = System.alloc(layout.clone()).unwrap();
let memory = System.alloc(layout.clone(), AllocInit::Uninitialized).unwrap();
assert_eq!(GLOBAL.0.load(Ordering::SeqCst), n + 2);
helper::work_with(&ptr);
System.dealloc(ptr, layout);
helper::work_with(&memory.ptr);
System.dealloc(memory.ptr, layout);
assert_eq!(GLOBAL.0.load(Ordering::SeqCst), n + 2);
}
}

View File

@ -6,7 +6,7 @@
#![feature(allocator_api)]
use std::alloc::{Global, AllocRef, Layout, handle_alloc_error};
use std::alloc::{handle_alloc_error, AllocInit, AllocRef, Global, Layout, ReallocPlacement};
use std::ptr::{self, NonNull};
fn main() {
@ -16,17 +16,17 @@ fn main() {
}
unsafe fn test_triangle() -> bool {
static COUNT : usize = 16;
static COUNT: usize = 16;
let mut ascend = vec![ptr::null_mut(); COUNT];
let ascend = &mut *ascend;
static ALIGN : usize = 1;
static ALIGN: usize = 1;
// Checks that `ascend` forms triangle of ascending size formed
// from pairs of rows (where each pair of rows is equally sized),
// and the elements of the triangle match their row-pair index.
unsafe fn sanity_check(ascend: &[*mut u8]) {
for i in 0..COUNT / 2 {
let (p0, p1, size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
let (p0, p1, size) = (ascend[2 * i], ascend[2 * i + 1], idx_to_size(i));
for j in 0..size {
assert_eq!(*p0.add(j), i as u8);
assert_eq!(*p1.add(j), i as u8);
@ -34,20 +34,22 @@ unsafe fn test_triangle() -> bool {
}
}
static PRINT : bool = false;
static PRINT: bool = false;
unsafe fn allocate(layout: Layout) -> *mut u8 {
if PRINT {
println!("allocate({:?})", layout);
}
let (ptr, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
let memory = Global
.alloc(layout, AllocInit::Uninitialized)
.unwrap_or_else(|_| handle_alloc_error(layout));
if PRINT {
println!("allocate({:?}) = {:?}", layout, ptr);
println!("allocate({:?}) = {:?}", layout, memory.ptr);
}
ptr.cast().as_ptr()
memory.ptr.cast().as_ptr()
}
unsafe fn deallocate(ptr: *mut u8, layout: Layout) {
@ -63,19 +65,31 @@ unsafe fn test_triangle() -> bool {
println!("reallocate({:?}, old={:?}, new={:?})", ptr, old, new);
}
let (ptr, _) = Global.realloc(NonNull::new_unchecked(ptr), old, new.size())
.unwrap_or_else(|_| handle_alloc_error(
Layout::from_size_align_unchecked(new.size(), old.align())
));
let memory = if new.size() > old.size() {
Global.grow(
NonNull::new_unchecked(ptr),
old,
new.size(),
ReallocPlacement::MayMove,
AllocInit::Uninitialized,
)
} else {
Global.shrink(NonNull::new_unchecked(ptr), old, new.size(), ReallocPlacement::MayMove)
};
let memory = memory.unwrap_or_else(|_| {
handle_alloc_error(Layout::from_size_align_unchecked(new.size(), old.align()))
});
if PRINT {
println!("reallocate({:?}, old={:?}, new={:?}) = {:?}",
ptr, old, new, ptr);
println!("reallocate({:?}, old={:?}, new={:?}) = {:?}", ptr, old, new, memory.ptr);
}
ptr.cast().as_ptr()
memory.ptr.cast().as_ptr()
}
fn idx_to_size(i: usize) -> usize { (i+1) * 10 }
fn idx_to_size(i: usize) -> usize {
(i + 1) * 10
}
// Allocate pairs of rows that form a triangle shape. (Hope is
// that at least two rows will be allocated near each other, so
@ -83,13 +97,13 @@ unsafe fn test_triangle() -> bool {
// way.)
for i in 0..COUNT / 2 {
let size = idx_to_size(i);
ascend[2*i] = allocate(Layout::from_size_align(size, ALIGN).unwrap());
ascend[2*i+1] = allocate(Layout::from_size_align(size, ALIGN).unwrap());
ascend[2 * i] = allocate(Layout::from_size_align(size, ALIGN).unwrap());
ascend[2 * i + 1] = allocate(Layout::from_size_align(size, ALIGN).unwrap());
}
// Initialize each pair of rows to distinct value.
for i in 0..COUNT / 2 {
let (p0, p1, size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
let (p0, p1, size) = (ascend[2 * i], ascend[2 * i + 1], idx_to_size(i));
for j in 0..size {
*p0.add(j) = i as u8;
*p1.add(j) = i as u8;
@ -104,8 +118,8 @@ unsafe fn test_triangle() -> bool {
for i in 0..COUNT / 2 {
let size = idx_to_size(i);
deallocate(ascend[2*i], Layout::from_size_align(size, ALIGN).unwrap());
deallocate(ascend[2*i+1], Layout::from_size_align(size, ALIGN).unwrap());
deallocate(ascend[2 * i], Layout::from_size_align(size, ALIGN).unwrap());
deallocate(ascend[2 * i + 1], Layout::from_size_align(size, ALIGN).unwrap());
}
return true;
@ -115,68 +129,68 @@ unsafe fn test_triangle() -> bool {
// realloc'ing each row from top to bottom, and checking all the
// rows as we go.
unsafe fn test_1(ascend: &mut [*mut u8]) {
let new_size = idx_to_size(COUNT-1);
let new_size = idx_to_size(COUNT - 1);
let new = Layout::from_size_align(new_size, ALIGN).unwrap();
for i in 0..COUNT / 2 {
let (p0, p1, old_size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
let (p0, p1, old_size) = (ascend[2 * i], ascend[2 * i + 1], idx_to_size(i));
assert!(old_size < new_size);
let old = Layout::from_size_align(old_size, ALIGN).unwrap();
ascend[2*i] = reallocate(p0, old.clone(), new.clone());
ascend[2 * i] = reallocate(p0, old.clone(), new.clone());
sanity_check(&*ascend);
ascend[2*i+1] = reallocate(p1, old.clone(), new.clone());
ascend[2 * i + 1] = reallocate(p1, old.clone(), new.clone());
sanity_check(&*ascend);
}
}
// Test 2: turn the square back into a triangle, top to bottom.
unsafe fn test_2(ascend: &mut [*mut u8]) {
let old_size = idx_to_size(COUNT-1);
let old_size = idx_to_size(COUNT - 1);
let old = Layout::from_size_align(old_size, ALIGN).unwrap();
for i in 0..COUNT / 2 {
let (p0, p1, new_size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
let (p0, p1, new_size) = (ascend[2 * i], ascend[2 * i + 1], idx_to_size(i));
assert!(new_size < old_size);
let new = Layout::from_size_align(new_size, ALIGN).unwrap();
ascend[2*i] = reallocate(p0, old.clone(), new.clone());
ascend[2 * i] = reallocate(p0, old.clone(), new.clone());
sanity_check(&*ascend);
ascend[2*i+1] = reallocate(p1, old.clone(), new.clone());
ascend[2 * i + 1] = reallocate(p1, old.clone(), new.clone());
sanity_check(&*ascend);
}
}
// Test 3: turn triangle into a square, bottom to top.
unsafe fn test_3(ascend: &mut [*mut u8]) {
let new_size = idx_to_size(COUNT-1);
let new_size = idx_to_size(COUNT - 1);
let new = Layout::from_size_align(new_size, ALIGN).unwrap();
for i in (0..COUNT / 2).rev() {
let (p0, p1, old_size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
let (p0, p1, old_size) = (ascend[2 * i], ascend[2 * i + 1], idx_to_size(i));
assert!(old_size < new_size);
let old = Layout::from_size_align(old_size, ALIGN).unwrap();
ascend[2*i+1] = reallocate(p1, old.clone(), new.clone());
ascend[2 * i + 1] = reallocate(p1, old.clone(), new.clone());
sanity_check(&*ascend);
ascend[2*i] = reallocate(p0, old.clone(), new.clone());
ascend[2 * i] = reallocate(p0, old.clone(), new.clone());
sanity_check(&*ascend);
}
}
// Test 4: turn the square back into a triangle, bottom to top.
unsafe fn test_4(ascend: &mut [*mut u8]) {
let old_size = idx_to_size(COUNT-1);
let old_size = idx_to_size(COUNT - 1);
let old = Layout::from_size_align(old_size, ALIGN).unwrap();
for i in (0..COUNT / 2).rev() {
let (p0, p1, new_size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i));
let (p0, p1, new_size) = (ascend[2 * i], ascend[2 * i + 1], idx_to_size(i));
assert!(new_size < old_size);
let new = Layout::from_size_align(new_size, ALIGN).unwrap();
ascend[2*i+1] = reallocate(p1, old.clone(), new.clone());
ascend[2 * i + 1] = reallocate(p1, old.clone(), new.clone());
sanity_check(&*ascend);
ascend[2*i] = reallocate(p0, old.clone(), new.clone());
ascend[2 * i] = reallocate(p0, old.clone(), new.clone());
sanity_check(&*ascend);
}
}

View File

@ -1,34 +1,34 @@
// run-pass
#![allow(dead_code)]
#![allow(non_camel_case_types)]
// pretty-expanded FIXME #23616
#![feature(allocator_api)]
use std::alloc::{AllocRef, Global, Layout, handle_alloc_error};
use std::alloc::{handle_alloc_error, AllocInit, AllocRef, Global, Layout};
use std::ptr::NonNull;
struct arena(());
struct Bcx<'a> {
fcx: &'a Fcx<'a>
fcx: &'a Fcx<'a>,
}
struct Fcx<'a> {
arena: &'a arena,
ccx: &'a Ccx
ccx: &'a Ccx,
}
struct Ccx {
x: isize
x: isize,
}
fn alloc(_bcx: &arena) -> &Bcx<'_> {
unsafe {
let layout = Layout::new::<Bcx>();
let (ptr, _) = Global.alloc(layout).unwrap_or_else(|_| handle_alloc_error(layout));
&*(ptr.as_ptr() as *const _)
let memory = Global
.alloc(layout, AllocInit::Uninitialized)
.unwrap_or_else(|_| handle_alloc_error(layout));
&*(memory.ptr.as_ptr() as *const _)
}
}