rustc: Implement the #[global_allocator] attribute

This PR is an implementation of [RFC 1974] which specifies a new method of
defining a global allocator for a program. This obsoletes the old
`#![allocator]` attribute and also removes support for it.

[RFC 1974]: https://github.com/rust-lang/rfcs/pull/197

The new `#[global_allocator]` attribute solves many issues encountered with the
`#![allocator]` attribute such as composition and restrictions on the crate
graph itself. The compiler now has much more control over the ABI of the
allocator and how it's implemented, allowing much more freedom in terms of how
this feature is implemented.

cc #27389
This commit is contained in:
Alex Crichton 2017-06-03 14:54:08 -07:00
parent 4c225c4d17
commit 695dee063b
115 changed files with 2860 additions and 1201 deletions

18
src/Cargo.lock generated
View File

@ -43,6 +43,8 @@ dependencies = [
name = "alloc_jemalloc"
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
"alloc_system 0.0.0",
"build_helper 0.1.0",
"core 0.0.0",
"gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)",
@ -53,6 +55,7 @@ dependencies = [
name = "alloc_system"
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
"core 0.0.0",
"libc 0.0.0",
]
@ -1127,10 +1130,21 @@ name = "rustc-serialize"
version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc_allocator"
version = "0.0.0"
dependencies = [
"rustc 0.0.0",
"rustc_errors 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
]
[[package]]
name = "rustc_asan"
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
"alloc_system 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1205,6 +1219,7 @@ dependencies = [
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"proc_macro_plugin 0.0.0",
"rustc 0.0.0",
"rustc_allocator 0.0.0",
"rustc_back 0.0.0",
"rustc_borrowck 0.0.0",
"rustc_const_eval 0.0.0",
@ -1273,6 +1288,7 @@ dependencies = [
name = "rustc_lsan"
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
"alloc_system 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1317,6 +1333,7 @@ dependencies = [
name = "rustc_msan"
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
"alloc_system 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1399,6 +1416,7 @@ dependencies = [
"owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_allocator 0.0.0",
"rustc_back 0.0.0",
"rustc_bitflags 0.0.0",
"rustc_const_math 0.0.0",

View File

@ -0,0 +1,7 @@
# `allocator_internals`
This feature does not have a tracking issue, it is an unstable implementation
detail of the `global_allocator` feature not intended for use outside the
compiler.
------------------------

View File

@ -1,119 +0,0 @@
# `allocator`
The tracking issue for this feature is: [#27389]
[#27389]: https://github.com/rust-lang/rust/issues/27389
------------------------
Sometimes even the choices of jemalloc vs the system allocator aren't enough and
an entirely new custom allocator is required. In this you'll write your own
crate which implements the allocator API (e.g. the same as `alloc_system` or
`alloc_jemalloc`). As an example, let's take a look at a simplified and
annotated version of `alloc_system`
```rust,no_run
# // Only needed for rustdoc --test down below.
# #![feature(lang_items)]
// The compiler needs to be instructed that this crate is an allocator in order
// to realize that when this is linked in another allocator like jemalloc should
// not be linked in.
#![feature(allocator)]
#![allocator]
// Allocators are not allowed to depend on the standard library which in turn
// requires an allocator in order to avoid circular dependencies. This crate,
// however, can use all of libcore.
#![no_std]
// Let's give a unique name to our custom allocator:
#![crate_name = "my_allocator"]
#![crate_type = "rlib"]
// Our system allocator will use the in-tree libc crate for FFI bindings. Note
// that currently the external (crates.io) libc cannot be used because it links
// to the standard library (e.g. `#![no_std]` isn't stable yet), so that's why
// this specifically requires the in-tree version.
#![feature(libc)]
extern crate libc;
// Listed below are the five allocation functions currently required by custom
// allocators. Their signatures and symbol names are not currently typechecked
// by the compiler, but this is a future extension and are required to match
// what is found below.
//
// Note that the standard `malloc` and `realloc` functions do not provide a way
// to communicate alignment so this implementation would need to be improved
// with respect to alignment in that aspect.
#[no_mangle]
pub extern fn __rust_allocate(size: usize, _align: usize) -> *mut u8 {
unsafe { libc::malloc(size as libc::size_t) as *mut u8 }
}
#[no_mangle]
pub extern fn __rust_allocate_zeroed(size: usize, _align: usize) -> *mut u8 {
unsafe { libc::calloc(size as libc::size_t, 1) as *mut u8 }
}
#[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, _old_size: usize, _align: usize) {
unsafe { libc::free(ptr as *mut libc::c_void) }
}
#[no_mangle]
pub extern fn __rust_reallocate(ptr: *mut u8, _old_size: usize, size: usize,
_align: usize) -> *mut u8 {
unsafe {
libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8
}
}
#[no_mangle]
pub extern fn __rust_reallocate_inplace(_ptr: *mut u8, old_size: usize,
_size: usize, _align: usize) -> usize {
old_size // This api is not supported by libc.
}
#[no_mangle]
pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize {
size
}
# // Only needed to get rustdoc to test this:
# fn main() {}
# #[lang = "panic_fmt"] fn panic_fmt() {}
# #[lang = "eh_personality"] fn eh_personality() {}
# #[lang = "eh_unwind_resume"] extern fn eh_unwind_resume() {}
# #[no_mangle] pub extern fn rust_eh_register_frames () {}
# #[no_mangle] pub extern fn rust_eh_unregister_frames () {}
```
After we compile this crate, it can be used as follows:
```rust,ignore
extern crate my_allocator;
fn main() {
let a = Box::new(8); // Allocates memory via our custom allocator crate.
println!("{}", a);
}
```
## Custom allocator limitations
There are a few restrictions when working with custom allocators which may cause
compiler errors:
* Any one artifact may only be linked to at most one allocator. Binaries,
dylibs, and staticlibs must link to exactly one allocator, and if none have
been explicitly chosen the compiler will choose one. On the other hand rlibs
do not need to link to an allocator (but still can).
* A consumer of an allocator is tagged with `#![needs_allocator]` (e.g. the
`liballoc` crate currently) and an `#[allocator]` crate cannot transitively
depend on a crate which needs an allocator (e.g. circular dependencies are not
allowed). This basically means that allocators must restrict themselves to
libcore currently.

View File

@ -0,0 +1,71 @@
# `global_allocator`
The tracking issue for this feature is: [#27389]
[#27389]: https://github.com/rust-lang/rust/issues/27389
------------------------
Rust programs may need to change the allocator that they're running with from
time to time. This use case is distinct from an allocator-per-collection (e.g. a
`Vec` with a custom allocator) and instead is more related to changing the
global default allocator, e.g. what `Vec<T>` uses by default.
Currently Rust programs don't have a specified global allocator. The compiler
may link to a version of [jemalloc] on some platforms, but this is not
guaranteed. Libraries, however, like cdylibs and staticlibs are guaranteed
to use the "system allocator" which means something like `malloc` on Unixes and
`HeapAlloc` on Windows.
[jemalloc]: https://github.com/jemalloc/jemalloc
The `#[global_allocator]` attribute, however, allows configuring this choice.
You can use this to implement a completely custom global allocator to route all
default allocation requests to a custom object. Defined in [RFC 1974] usage
looks like:
[RFC 1974]: https://github.com/rust-lang/rfcs/pull/1974
```rust
#![feature(global_allocator, heap_api)]
use std::heap::{Alloc, System, Layout, AllocErr};
struct MyAllocator;
unsafe impl<'a> Alloc for &'a MyAllocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
System.alloc(layout)
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
System.dealloc(ptr, layout)
}
}
#[global_allocator]
static GLOBAL: MyAllocator = MyAllocator;
fn main() {
// This `Vec` will allocate memory through `GLOBAL` above
let mut v = Vec::new();
v.push(1);
}
```
And that's it! The `#[global_allocator]` attribute is applied to a `static`
which implements the `Alloc` trait in the `std::heap` module. Note, though,
that the implementation is defined for `&MyAllocator`, not just `MyAllocator`.
You may wish, however, to also provide `Alloc for MyAllocator` for other use
cases.
A crate can only have one instance of `#[global_allocator]` and this instance
may be loaded through a dependency. For example `#[global_allocator]` above
could have been placed in one of the dependencies loaded through `extern crate`.
Note that `Alloc` itself is an `unsafe` trait, with much documentation on the
trait itself about usage and for implementors. Extra care should be taken when
implementing a global allocator as well as the allocator may be called from many
portions of the standard library, such as the panicking routine. As a result it
is highly recommended to not panic during allocation and work in as many
situations with as few dependencies as possible as well.

View File

@ -13,7 +13,7 @@
slightly, especially to possibly take into account the \
types being stored to make room for a future \
tracing garbage collector",
issue = "27700")]
issue = "32838")]
use core::cmp;
use core::fmt;
@ -73,6 +73,7 @@ impl Layout {
/// * `size`, when rounded up to the nearest multiple of `align`,
/// must not overflow (i.e. the rounded value must be less than
/// `usize::MAX`).
#[inline]
pub fn from_size_align(size: usize, align: usize) -> Option<Layout> {
if !align.is_power_of_two() {
return None;
@ -96,13 +97,28 @@ impl Layout {
return None;
}
Some(Layout { size: size, align: align })
unsafe {
Some(Layout::from_size_align_unchecked(size, align))
}
}
/// Creates a layout, bypassing all checks.
///
/// # Unsafety
///
/// This function is unsafe as it does not verify that `align` is a power of
/// two nor that `size` aligned to `align` fits within the address space.
#[inline]
pub unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Layout {
Layout { size: size, align: align }
}
/// The minimum size in bytes for a memory block of this layout.
#[inline]
pub fn size(&self) -> usize { self.size }
/// The minimum byte alignment for a memory block of this layout.
#[inline]
pub fn align(&self) -> usize { self.align }
/// Constructs a `Layout` suitable for holding a value of type `T`.
@ -135,6 +151,7 @@ impl Layout {
///
/// Panics if the combination of `self.size` and the given `align`
/// violates the conditions listed in `from_size_align`.
#[inline]
pub fn align_to(&self, align: usize) -> Self {
Layout::from_size_align(self.size, cmp::max(self.align, align)).unwrap()
}
@ -155,6 +172,7 @@ impl Layout {
/// to be less than or equal to the alignment of the starting
/// address for the whole allocated block of memory. One way to
/// satisfy this constraint is to ensure `align <= self.align`.
#[inline]
pub fn padding_needed_for(&self, align: usize) -> usize {
let len = self.size();
@ -556,6 +574,7 @@ pub unsafe trait Alloc {
/// However, for clients that do not wish to track the capacity
/// returned by `alloc_excess` locally, this method is likely to
/// produce useful results.
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
(layout.size(), layout.size())
}

View File

@ -23,7 +23,6 @@ use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
use core::borrow;
use core::fmt;
use core::cmp::Ordering;
use core::mem::{align_of_val, size_of_val};
use core::intrinsics::abort;
use core::mem;
use core::mem::uninitialized;
@ -34,7 +33,8 @@ use core::marker::Unsize;
use core::hash::{Hash, Hasher};
use core::{isize, usize};
use core::convert::From;
use heap::deallocate;
use heap::{Heap, Alloc, Layout};
/// A soft limit on the amount of references that may be made to an `Arc`.
///
@ -503,7 +503,7 @@ impl<T: ?Sized> Arc<T> {
if self.inner().weak.fetch_sub(1, Release) == 1 {
atomic::fence(Acquire);
deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr))
}
}
@ -1007,7 +1007,9 @@ impl<T: ?Sized> Drop for Weak<T> {
// ref, which can only happen after the lock is released.
if self.inner().weak.fetch_sub(1, Release) == 1 {
atomic::fence(Acquire);
unsafe { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) }
unsafe {
Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr))
}
}
}
}

View File

@ -55,7 +55,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use heap;
use heap::{Heap, Layout, Alloc};
use raw_vec::RawVec;
use core::any::Any;
@ -135,8 +135,7 @@ pub struct Box<T: ?Sized>(Unique<T>);
#[allow(missing_debug_implementations)]
pub struct IntermediateBox<T: ?Sized> {
ptr: *mut u8,
size: usize,
align: usize,
layout: Layout,
marker: marker::PhantomData<*mut T>,
}
@ -156,23 +155,21 @@ unsafe fn finalize<T>(b: IntermediateBox<T>) -> Box<T> {
}
fn make_place<T>() -> IntermediateBox<T> {
let size = mem::size_of::<T>();
let align = mem::align_of::<T>();
let layout = Layout::new::<T>();
let p = if size == 0 {
let p = if layout.size() == 0 {
mem::align_of::<T>() as *mut u8
} else {
let p = unsafe { heap::allocate(size, align) };
if p.is_null() {
panic!("Box make_place allocation failure.");
unsafe {
Heap.alloc(layout.clone()).unwrap_or_else(|err| {
Heap.oom(err)
})
}
p
};
IntermediateBox {
ptr: p,
size: size,
align: align,
layout: layout,
marker: marker::PhantomData,
}
}
@ -221,8 +218,10 @@ impl<T> Placer<T> for ExchangeHeapSingleton {
issue = "27779")]
impl<T: ?Sized> Drop for IntermediateBox<T> {
fn drop(&mut self) {
if self.size > 0 {
unsafe { heap::deallocate(self.ptr, self.size, self.align) }
if self.layout.size() > 0 {
unsafe {
Heap.dealloc(self.ptr, self.layout.clone())
}
}
}
}

View File

@ -48,7 +48,7 @@ use core::ptr::{self, Unique};
use core::slice;
use boxed::Box;
use heap;
use heap::{Heap, Alloc, Layout};
const B: usize = 6;
pub const MIN_LEN: usize = B - 1;
@ -254,11 +254,7 @@ impl<K, V> Root<K, V> {
self.as_mut().as_leaf_mut().parent = ptr::null();
unsafe {
heap::deallocate(
top,
mem::size_of::<InternalNode<K, V>>(),
mem::align_of::<InternalNode<K, V>>()
);
Heap.dealloc(top, Layout::new::<InternalNode<K, V>>());
}
}
}
@ -445,7 +441,7 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> {
> {
let ptr = self.as_leaf() as *const LeafNode<K, V> as *const u8 as *mut u8;
let ret = self.ascend().ok();
heap::deallocate(ptr, mem::size_of::<LeafNode<K, V>>(), mem::align_of::<LeafNode<K, V>>());
Heap.dealloc(ptr, Layout::new::<LeafNode<K, V>>());
ret
}
}
@ -466,11 +462,7 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::Internal> {
> {
let ptr = self.as_internal() as *const InternalNode<K, V> as *const u8 as *mut u8;
let ret = self.ascend().ok();
heap::deallocate(
ptr,
mem::size_of::<InternalNode<K, V>>(),
mem::align_of::<InternalNode<K, V>>()
);
Heap.dealloc(ptr, Layout::new::<InternalNode<K, V>>());
ret
}
}
@ -1252,16 +1244,14 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
).correct_parent_link();
}
heap::deallocate(
Heap.dealloc(
right_node.node.get() as *mut u8,
mem::size_of::<InternalNode<K, V>>(),
mem::align_of::<InternalNode<K, V>>()
Layout::new::<InternalNode<K, V>>(),
);
} else {
heap::deallocate(
Heap.dealloc(
right_node.node.get() as *mut u8,
mem::size_of::<LeafNode<K, V>>(),
mem::align_of::<LeafNode<K, V>>()
Layout::new::<LeafNode<K, V>>(),
);
}

View File

@ -8,207 +8,212 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![unstable(feature = "heap_api",
#![unstable(feature = "allocator_api",
reason = "the precise API and guarantees it provides may be tweaked \
slightly, especially to possibly take into account the \
types being stored to make room for a future \
tracing garbage collector",
issue = "27700")]
issue = "32838")]
use allocator::{Alloc, AllocErr, CannotReallocInPlace, Layout};
use core::{isize, usize, cmp, ptr};
use core::intrinsics::{min_align_of_val, size_of_val};
use core::mem::{self, ManuallyDrop};
use core::usize;
#[allow(improper_ctypes)]
extern "C" {
#[allocator]
fn __rust_allocate(size: usize, align: usize) -> *mut u8;
fn __rust_allocate_zeroed(size: usize, align: usize) -> *mut u8;
fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize);
fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8;
fn __rust_reallocate_inplace(ptr: *mut u8,
old_size: usize,
size: usize,
align: usize)
-> usize;
fn __rust_usable_size(size: usize, align: usize) -> usize;
pub use allocator::*;
#[doc(hidden)]
pub mod __core {
pub use core::*;
}
#[inline(always)]
fn check_size_and_alignment(size: usize, align: usize) {
debug_assert!(size != 0);
debug_assert!(size <= isize::MAX as usize,
"Tried to allocate too much: {} bytes",
size);
debug_assert!(usize::is_power_of_two(align),
"Invalid alignment of allocation: {}",
align);
extern "Rust" {
#[allocator]
fn __rust_alloc(size: usize, align: usize, err: *mut u8) -> *mut u8;
fn __rust_oom(err: *const u8) -> !;
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
fn __rust_usable_size(layout: *const u8,
min: *mut usize,
max: *mut usize);
fn __rust_realloc(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize,
err: *mut u8) -> *mut u8;
fn __rust_alloc_zeroed(size: usize, align: usize, err: *mut u8) -> *mut u8;
fn __rust_alloc_excess(size: usize,
align: usize,
excess: *mut usize,
err: *mut u8) -> *mut u8;
fn __rust_realloc_excess(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize,
excess: *mut usize,
err: *mut u8) -> *mut u8;
fn __rust_grow_in_place(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize) -> u8;
fn __rust_shrink_in_place(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize) -> u8;
}
#[derive(Copy, Clone, Default, Debug)]
pub struct HeapAlloc;
pub struct Heap;
unsafe impl Alloc for HeapAlloc {
unsafe impl Alloc for Heap {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let addr = allocate(layout.size(), layout.align());
if addr.is_null() {
Err(AllocErr::Exhausted { request: layout })
let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>());
let ptr = __rust_alloc(layout.size(),
layout.align(),
&mut *err as *mut AllocErr as *mut u8);
if ptr.is_null() {
Err(ManuallyDrop::into_inner(err))
} else {
Ok(addr)
Ok(ptr)
}
}
unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let addr = allocate_zeroed(layout.size(), layout.align());
if addr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
Ok(addr)
#[inline]
fn oom(&mut self, err: AllocErr) -> ! {
unsafe {
__rust_oom(&err as *const AllocErr as *const u8)
}
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
deallocate(ptr, layout.size(), layout.align());
__rust_dealloc(ptr, layout.size(), layout.align())
}
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
(layout.size(), usable_size(layout.size(), layout.align()))
let mut min = 0;
let mut max = 0;
unsafe {
__rust_usable_size(layout as *const Layout as *const u8,
&mut min,
&mut max);
}
(min, max)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout)
-> Result<*mut u8, AllocErr>
{
let old_size = layout.size();
let new_size = new_layout.size();
if layout.align() == new_layout.align() {
let new_ptr = reallocate(ptr, old_size, new_size, layout.align());
if new_ptr.is_null() {
// We assume `reallocate` already tried alloc + copy +
// dealloc fallback; thus pointless to repeat effort
Err(AllocErr::Exhausted { request: new_layout })
} else {
Ok(new_ptr)
}
let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>());
let ptr = __rust_realloc(ptr,
layout.size(),
layout.align(),
new_layout.size(),
new_layout.align(),
&mut *err as *mut AllocErr as *mut u8);
if ptr.is_null() {
Err(ManuallyDrop::into_inner(err))
} else {
// if alignments don't match, fall back on alloc + copy + dealloc
let result = self.alloc(new_layout);
if let Ok(new_ptr) = result {
ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size));
self.dealloc(ptr, layout);
}
result
mem::forget(err);
Ok(ptr)
}
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>());
let ptr = __rust_alloc_zeroed(layout.size(),
layout.align(),
&mut *err as *mut AllocErr as *mut u8);
if ptr.is_null() {
Err(ManuallyDrop::into_inner(err))
} else {
Ok(ptr)
}
}
#[inline]
unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>());
let mut size = 0;
let ptr = __rust_alloc_excess(layout.size(),
layout.align(),
&mut size,
&mut *err as *mut AllocErr as *mut u8);
if ptr.is_null() {
Err(ManuallyDrop::into_inner(err))
} else {
Ok(Excess(ptr, size))
}
}
#[inline]
unsafe fn realloc_excess(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<Excess, AllocErr> {
let mut err = ManuallyDrop::new(mem::uninitialized::<AllocErr>());
let mut size = 0;
let ptr = __rust_realloc_excess(ptr,
layout.size(),
layout.align(),
new_layout.size(),
new_layout.align(),
&mut size,
&mut *err as *mut AllocErr as *mut u8);
if ptr.is_null() {
Err(ManuallyDrop::into_inner(err))
} else {
Ok(Excess(ptr, size))
}
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout)
-> Result<(), CannotReallocInPlace>
{
// grow_in_place spec requires this, and the spec for reallocate_inplace
// makes it hard to detect failure if it does not hold.
debug_assert!(new_layout.size() >= layout.size());
if layout.align() != new_layout.align() { // reallocate_inplace requires this.
return Err(CannotReallocInPlace);
debug_assert!(new_layout.align() == layout.align());
let ret = __rust_grow_in_place(ptr,
layout.size(),
layout.align(),
new_layout.size(),
new_layout.align());
if ret != 0 {
Ok(())
} else {
Err(CannotReallocInPlace)
}
let usable = reallocate_inplace(ptr, layout.size(), new_layout.size(), layout.align());
if usable >= new_layout.size() { Ok(()) } else { Err(CannotReallocInPlace) }
}
}
// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias`
/// Return a pointer to `size` bytes of memory aligned to `align`.
///
/// On failure, return a null pointer.
///
/// Behavior is undefined if the requested size is 0 or the alignment is not a
/// power of 2. The alignment must be no larger than the largest supported page
/// size on the platform.
#[inline]
pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
check_size_and_alignment(size, align);
__rust_allocate(size, align)
}
/// Return a pointer to `size` bytes of memory aligned to `align` and
/// initialized to zeroes.
///
/// On failure, return a null pointer.
///
/// Behavior is undefined if the requested size is 0 or the alignment is not a
/// power of 2. The alignment must be no larger than the largest supported page
/// size on the platform.
#[inline]
pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
check_size_and_alignment(size, align);
__rust_allocate_zeroed(size, align)
}
/// Resize the allocation referenced by `ptr` to `size` bytes.
///
/// On failure, return a null pointer and leave the original allocation intact.
///
/// If the allocation was relocated, the memory at the passed-in pointer is
/// undefined after the call.
///
/// Behavior is undefined if the requested size is 0 or the alignment is not a
/// power of 2. The alignment must be no larger than the largest supported page
/// size on the platform.
///
/// The `old_size` and `align` parameters are the parameters that were used to
/// create the allocation referenced by `ptr`. The `old_size` parameter may be
/// any value in range_inclusive(requested_size, usable_size).
#[inline]
pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
check_size_and_alignment(size, align);
__rust_reallocate(ptr, old_size, size, align)
}
/// Resize the allocation referenced by `ptr` to `size` bytes.
///
/// If the operation succeeds, it returns `usable_size(size, align)` and if it
/// fails (or is a no-op) it returns `usable_size(old_size, align)`.
///
/// Behavior is undefined if the requested size is 0 or the alignment is not a
/// power of 2. The alignment must be no larger than the largest supported page
/// size on the platform.
///
/// The `old_size` and `align` parameters are the parameters that were used to
/// create the allocation referenced by `ptr`. The `old_size` parameter may be
/// any value in range_inclusive(requested_size, usable_size).
#[inline]
pub unsafe fn reallocate_inplace(ptr: *mut u8,
old_size: usize,
size: usize,
align: usize)
-> usize {
check_size_and_alignment(size, align);
__rust_reallocate_inplace(ptr, old_size, size, align)
}
/// Deallocates the memory referenced by `ptr`.
///
/// The `ptr` parameter must not be null.
///
/// The `old_size` and `align` parameters are the parameters that were used to
/// create the allocation referenced by `ptr`. The `old_size` parameter may be
/// any value in range_inclusive(requested_size, usable_size).
#[inline]
pub unsafe fn deallocate(ptr: *mut u8, old_size: usize, align: usize) {
__rust_deallocate(ptr, old_size, align)
}
/// Returns the usable size of an allocation created with the specified the
/// `size` and `align`.
#[inline]
pub fn usable_size(size: usize, align: usize) -> usize {
unsafe { __rust_usable_size(size, align) }
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
debug_assert!(new_layout.size() <= layout.size());
debug_assert!(new_layout.align() == layout.align());
let ret = __rust_shrink_in_place(ptr,
layout.size(),
layout.align(),
new_layout.size(),
new_layout.align());
if ret != 0 {
Ok(())
} else {
Err(CannotReallocInPlace)
}
}
}
/// An arbitrary non-null address to represent zero-size allocations.
@ -228,11 +233,10 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
if size == 0 {
align as *mut u8
} else {
let ptr = allocate(size, align);
if ptr.is_null() {
::oom()
}
ptr
let layout = Layout::from_size_align_unchecked(size, align);
Heap.alloc(layout).unwrap_or_else(|err| {
Heap.oom(err)
})
}
}
@ -243,7 +247,8 @@ pub(crate) unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
let align = min_align_of_val(&*ptr);
// We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary.
if size != 0 {
deallocate(ptr as *mut u8, size, align);
let layout = Layout::from_size_align_unchecked(size, align);
Heap.dealloc(ptr as *mut u8, layout);
}
}
@ -252,38 +257,22 @@ mod tests {
extern crate test;
use self::test::Bencher;
use boxed::Box;
use heap;
use heap::{Heap, Alloc, Layout};
#[test]
fn allocate_zeroed() {
unsafe {
let size = 1024;
let ptr = heap::allocate_zeroed(size, 1);
if ptr.is_null() {
::oom()
}
let layout = Layout::from_size_align(1024, 1).unwrap();
let ptr = Heap.alloc_zeroed(layout.clone())
.unwrap_or_else(|e| Heap.oom(e));
let end = ptr.offset(size as isize);
let end = ptr.offset(layout.size() as isize);
let mut i = ptr;
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
}
heap::deallocate(ptr, size, 1);
}
}
#[test]
fn basic_reallocate_inplace_noop() {
unsafe {
let size = 4000;
let ptr = heap::allocate(size, 8);
if ptr.is_null() {
::oom()
}
let ret = heap::reallocate_inplace(ptr, size, size, 8);
heap::deallocate(ptr, size, 8);
assert_eq!(ret, heap::usable_size(size, 8));
Heap.dealloc(ptr, layout);
}
}

View File

@ -85,7 +85,7 @@
#![cfg_attr(not(test), feature(slice_rotate))]
#![cfg_attr(not(test), feature(str_checked_slicing))]
#![cfg_attr(test, feature(rand, test))]
#![feature(allocator)]
#![cfg_attr(stage0, feature(allocator))]
#![feature(allow_internal_unstable)]
#![feature(box_patterns)]
#![feature(box_syntax)]
@ -124,6 +124,7 @@
#![feature(unicode)]
#![feature(unique)]
#![feature(unsize)]
#![cfg_attr(not(stage0), feature(allocator_internals))]
#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol))]
#![cfg_attr(test, feature(test, box_heap))]
@ -168,7 +169,6 @@ mod boxed_test;
pub mod arc;
pub mod rc;
pub mod raw_vec;
pub mod oom;
// collections modules
pub mod binary_heap;
@ -260,8 +260,6 @@ trait SpecExtend<I: IntoIterator> {
fn spec_extend(&mut self, iter: I);
}
pub use oom::oom;
#[doc(no_inline)]
pub use binary_heap::BinaryHeap;
#[doc(no_inline)]

View File

@ -1,61 +0,0 @@
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(target_has_atomic = "ptr")]
pub use self::imp::set_oom_handler;
use core::intrinsics;
fn default_oom_handler() -> ! {
// The default handler can't do much more since we can't assume the presence
// of libc or any way of printing an error message.
unsafe { intrinsics::abort() }
}
/// Common out-of-memory routine
#[cold]
#[inline(never)]
#[unstable(feature = "oom", reason = "not a scrutinized interface",
issue = "27700")]
pub fn oom() -> ! {
self::imp::oom()
}
#[cfg(target_has_atomic = "ptr")]
mod imp {
use core::mem;
use core::sync::atomic::{AtomicPtr, Ordering};
static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(super::default_oom_handler as *mut ());
#[inline(always)]
pub fn oom() -> ! {
let value = OOM_HANDLER.load(Ordering::SeqCst);
let handler: fn() -> ! = unsafe { mem::transmute(value) };
handler();
}
/// Set a custom handler for out-of-memory conditions
///
/// To avoid recursive OOM failures, it is critical that the OOM handler does
/// not allocate any memory itself.
#[unstable(feature = "oom", reason = "not a scrutinized interface",
issue = "27700")]
pub fn set_oom_handler(handler: fn() -> !) {
OOM_HANDLER.store(handler as *mut (), Ordering::SeqCst);
}
}
#[cfg(not(target_has_atomic = "ptr"))]
mod imp {
#[inline(always)]
pub fn oom() -> ! {
super::default_oom_handler()
}
}

View File

@ -12,7 +12,7 @@ use allocator::{Alloc, Layout};
use core::ptr::{self, Unique};
use core::mem;
use core::slice;
use heap::{HeapAlloc};
use heap::Heap;
use super::boxed::Box;
use core::ops::Drop;
use core::cmp;
@ -45,7 +45,7 @@ use core::cmp;
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[allow(missing_debug_implementations)]
pub struct RawVec<T, A: Alloc = HeapAlloc> {
pub struct RawVec<T, A: Alloc = Heap> {
ptr: Unique<T>,
cap: usize,
a: A,
@ -112,14 +112,14 @@ impl<T, A: Alloc> RawVec<T, A> {
}
}
impl<T> RawVec<T, HeapAlloc> {
impl<T> RawVec<T, Heap> {
/// Creates the biggest possible RawVec (on the system heap)
/// without allocating. If T has positive size, then this makes a
/// RawVec with capacity 0. If T has 0 size, then it it makes a
/// RawVec with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub fn new() -> Self {
Self::new_in(HeapAlloc)
Self::new_in(Heap)
}
/// Creates a RawVec (on the system heap) with exactly the
@ -139,13 +139,13 @@ impl<T> RawVec<T, HeapAlloc> {
/// Aborts on OOM
#[inline]
pub fn with_capacity(cap: usize) -> Self {
RawVec::allocate_in(cap, false, HeapAlloc)
RawVec::allocate_in(cap, false, Heap)
}
/// Like `with_capacity` but guarantees the buffer is zeroed.
#[inline]
pub fn with_capacity_zeroed(cap: usize) -> Self {
RawVec::allocate_in(cap, true, HeapAlloc)
RawVec::allocate_in(cap, true, Heap)
}
}
@ -166,7 +166,7 @@ impl<T, A: Alloc> RawVec<T, A> {
}
}
impl<T> RawVec<T, HeapAlloc> {
impl<T> RawVec<T, Heap> {
/// Reconstitutes a RawVec from a pointer, capacity.
///
/// # Undefined Behavior
@ -178,7 +178,7 @@ impl<T> RawVec<T, HeapAlloc> {
RawVec {
ptr: Unique::new(ptr),
cap: cap,
a: HeapAlloc,
a: Heap,
}
}
@ -609,7 +609,7 @@ impl<T, A: Alloc> RawVec<T, A> {
}
}
impl<T> RawVec<T, HeapAlloc> {
impl<T> RawVec<T, Heap> {
/// Converts the entire buffer into `Box<[T]>`.
///
/// While it is not *strictly* Undefined Behavior to call
@ -693,13 +693,13 @@ mod tests {
if size > self.fuel {
return Err(AllocErr::Unsupported { details: "fuel exhausted" });
}
match HeapAlloc.alloc(layout) {
match Heap.alloc(layout) {
ok @ Ok(_) => { self.fuel -= size; ok }
err @ Err(_) => err,
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
HeapAlloc.dealloc(ptr, layout)
Heap.dealloc(ptr, layout)
}
}

View File

@ -252,13 +252,13 @@ use core::hash::{Hash, Hasher};
use core::intrinsics::abort;
use core::marker;
use core::marker::Unsize;
use core::mem::{self, align_of_val, forget, size_of, size_of_val, uninitialized};
use core::mem::{self, forget, size_of, size_of_val, uninitialized};
use core::ops::Deref;
use core::ops::CoerceUnsized;
use core::ptr::{self, Shared};
use core::convert::From;
use heap::{allocate, deallocate, box_free};
use heap::{Heap, Alloc, Layout, box_free};
use raw_vec::RawVec;
struct RcBox<T: ?Sized> {
@ -461,7 +461,8 @@ impl<T> Rc<[T]> {
// FIXME(custom-DST): creating this invalid &[T] is dubiously defined,
// we should have a better way of getting the size/align
// of a DST from its unsized part.
let ptr = allocate(size_of_val(&*ptr), align_of_val(&*ptr));
let ptr = Heap.alloc(Layout::for_value(&*ptr))
.unwrap_or_else(|e| Heap.oom(e));
let ptr: *mut RcBox<[T]> = mem::transmute([ptr as usize, value.len()]);
// Initialize the new RcBox.
@ -719,7 +720,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
self.dec_weak();
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr));
}
}
}
@ -1097,7 +1098,7 @@ impl<T: ?Sized> Drop for Weak<T> {
// the weak count starts at 1, and will only go to zero if all
// the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr));
}
}
}

View File

@ -15,6 +15,10 @@ doc = false
core = { path = "../libcore" }
libc = { path = "../rustc/libc_shim" }
[target.'cfg(not(stage0))'.dependencies]
alloc = { path = "../liballoc" }
alloc_system = { path = "../liballoc_system" }
[build-dependencies]
build_helper = { path = "../build_helper" }
gcc = "0.3.50"

View File

@ -11,23 +11,36 @@
#![crate_name = "alloc_jemalloc"]
#![crate_type = "rlib"]
#![no_std]
#![allocator]
#![unstable(feature = "alloc_jemalloc",
reason = "this library is unlikely to be stabilized in its current \
form or name",
issue = "27783")]
#![deny(warnings)]
#![feature(allocator)]
#![feature(libc)]
#![feature(staged_api)]
#![feature(linkage)]
#![cfg_attr(stage0, allocator)]
#![cfg_attr(stage0, feature(allocator))]
#![cfg_attr(not(stage0), feature(global_allocator))]
#![cfg_attr(all(not(stage0), not(dummy_jemalloc)), feature(allocator_api))]
#![cfg_attr(not(stage0), feature(alloc))]
#![cfg_attr(not(stage0), feature(alloc_system))]
#![cfg_attr(dummy_jemalloc, allow(dead_code))]
#[cfg(not(stage0))]
extern crate alloc;
#[cfg(not(stage0))]
extern crate alloc_system;
extern crate libc;
pub use imp::*;
#[cfg(all(not(stage0), not(dummy_jemalloc)))]
pub use contents::*;
#[cfg(all(not(stage0), not(dummy_jemalloc)))]
mod contents {
use core::ptr;
// See comments in build.rs for why we sometimes build a crate that does nothing
#[cfg(not(dummy_jemalloc))]
mod imp {
use alloc::heap::{Alloc, AllocErr, Layout};
use alloc_system::System;
use libc::{c_int, c_void, size_t};
// Note that the symbols here are prefixed by default on macOS and Windows (we
@ -91,96 +104,152 @@ mod imp {
}
}
// for symbol names src/librustc/middle/allocator.rs
// for signatures src/librustc_allocator/lib.rs
// linkage directives are provided as part of the current compiler allocator
// ABI
#[no_mangle]
pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
#[linkage = "external"]
pub unsafe extern fn __rde_alloc(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let flags = align_to_flags(align);
unsafe { mallocx(size as size_t, flags) as *mut u8 }
let ptr = mallocx(size as size_t, flags) as *mut u8;
if ptr.is_null() {
let layout = Layout::from_size_align_unchecked(size, align);
ptr::write(err as *mut AllocErr,
AllocErr::Exhausted { request: layout });
}
ptr
}
#[no_mangle]
pub extern "C" fn __rust_allocate_zeroed(size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
unsafe { calloc(size as size_t, 1) as *mut u8 }
#[linkage = "external"]
pub unsafe extern fn __rde_oom(err: *const u8) -> ! {
System.oom((*(err as *const AllocErr)).clone())
}
#[no_mangle]
#[linkage = "external"]
pub unsafe extern fn __rde_dealloc(ptr: *mut u8,
size: usize,
align: usize) {
let flags = align_to_flags(align);
sdallocx(ptr as *mut c_void, size, flags);
}
#[no_mangle]
#[linkage = "external"]
pub unsafe extern fn __rde_usable_size(layout: *const u8,
min: *mut usize,
max: *mut usize) {
let layout = &*(layout as *const Layout);
let flags = align_to_flags(layout.align());
let size = nallocx(layout.size(), flags) as usize;
*min = layout.size();
if size > 0 {
*max = size;
} else {
let flags = align_to_flags(align) | MALLOCX_ZERO;
unsafe { mallocx(size as size_t, flags) as *mut u8 }
*max = layout.size();
}
}
#[no_mangle]
pub extern "C" fn __rust_reallocate(ptr: *mut u8,
_old_size: usize,
size: usize,
align: usize)
-> *mut u8 {
let flags = align_to_flags(align);
unsafe { rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 }
#[linkage = "external"]
pub unsafe extern fn __rde_realloc(ptr: *mut u8,
_old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize,
err: *mut u8) -> *mut u8 {
if new_align != old_align {
ptr::write(err as *mut AllocErr,
AllocErr::Unsupported { details: "can't change alignments" });
return 0 as *mut u8
}
let flags = align_to_flags(new_align);
let ptr = rallocx(ptr as *mut c_void, new_size, flags) as *mut u8;
if ptr.is_null() {
let layout = Layout::from_size_align_unchecked(new_size, new_align);
ptr::write(err as *mut AllocErr,
AllocErr::Exhausted { request: layout });
}
ptr
}
#[no_mangle]
pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8,
_old_size: usize,
size: usize,
align: usize)
-> usize {
let flags = align_to_flags(align);
unsafe { xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize }
#[linkage = "external"]
pub unsafe extern fn __rde_alloc_zeroed(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let ptr = if align <= MIN_ALIGN {
calloc(size as size_t, 1) as *mut u8
} else {
let flags = align_to_flags(align) | MALLOCX_ZERO;
mallocx(size as size_t, flags) as *mut u8
};
if ptr.is_null() {
let layout = Layout::from_size_align_unchecked(size, align);
ptr::write(err as *mut AllocErr,
AllocErr::Exhausted { request: layout });
}
ptr
}
#[no_mangle]
pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
let flags = align_to_flags(align);
unsafe { sdallocx(ptr as *mut c_void, old_size as size_t, flags) }
#[linkage = "external"]
pub unsafe extern fn __rde_alloc_excess(size: usize,
align: usize,
excess: *mut usize,
err: *mut u8) -> *mut u8 {
let p = __rde_alloc(size, align, err);
if !p.is_null() {
*excess = size;
}
return p
}
#[no_mangle]
pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize {
let flags = align_to_flags(align);
unsafe { nallocx(size as size_t, flags) as usize }
}
}
#[cfg(dummy_jemalloc)]
mod imp {
fn bogus() -> ! {
panic!("jemalloc is not implemented for this platform");
}
#[no_mangle]
pub extern "C" fn __rust_allocate(_size: usize, _align: usize) -> *mut u8 {
bogus()
}
#[no_mangle]
pub extern "C" fn __rust_allocate_zeroed(_size: usize, _align: usize) -> *mut u8 {
bogus()
}
#[no_mangle]
pub extern "C" fn __rust_reallocate(_ptr: *mut u8,
_old_size: usize,
_size: usize,
_align: usize)
-> *mut u8 {
bogus()
}
#[no_mangle]
pub extern "C" fn __rust_reallocate_inplace(_ptr: *mut u8,
_old_size: usize,
_size: usize,
_align: usize)
-> usize {
bogus()
}
#[no_mangle]
pub extern "C" fn __rust_deallocate(_ptr: *mut u8, _old_size: usize, _align: usize) {
bogus()
}
#[no_mangle]
pub extern "C" fn __rust_usable_size(_size: usize, _align: usize) -> usize {
bogus()
#[linkage = "external"]
pub unsafe extern fn __rde_realloc_excess(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize,
excess: *mut usize,
err: *mut u8) -> *mut u8 {
let p = __rde_realloc(ptr, old_size, old_align, new_size, new_align, err);
if !p.is_null() {
*excess = new_size;
}
return p
}
#[no_mangle]
#[linkage = "external"]
pub unsafe extern fn __rde_grow_in_place(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize) -> u8 {
__rde_shrink_in_place(ptr, old_size, old_align, new_size, new_align)
}
#[no_mangle]
#[linkage = "external"]
pub unsafe extern fn __rde_shrink_in_place(ptr: *mut u8,
_old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize) -> u8 {
if old_align == new_align {
let flags = align_to_flags(new_align);
(xallocx(ptr as *mut c_void, new_size, 0, flags) == new_size) as u8
} else {
0
}
}
}

View File

@ -12,3 +12,6 @@ doc = false
[dependencies]
core = { path = "../libcore" }
libc = { path = "../rustc/libc_shim" }
[target.'cfg(not(stage0))'.dependencies]
alloc = { path = "../liballoc" }

View File

@ -11,13 +11,18 @@
#![crate_name = "alloc_system"]
#![crate_type = "rlib"]
#![no_std]
#![allocator]
#![deny(warnings)]
#![unstable(feature = "alloc_system",
reason = "this library is unlikely to be stabilized in its current \
form or name",
issue = "27783")]
#![feature(allocator)]
#![cfg_attr(stage0, allocator)]
#![cfg_attr(stage0, feature(allocator))]
#![cfg_attr(stage0, feature(core_intrinsics))]
#![cfg_attr(not(stage0), feature(global_allocator))]
#![cfg_attr(not(stage0), feature(allocator_api))]
#![cfg_attr(not(stage0), feature(alloc))]
#![cfg_attr(not(stage0), feature(core_intrinsics))]
#![feature(staged_api)]
#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
@ -39,62 +44,201 @@ const MIN_ALIGN: usize = 8;
target_arch = "sparc64")))]
const MIN_ALIGN: usize = 16;
#[no_mangle]
pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
unsafe { imp::allocate(size, align) }
#[cfg(stage0)]
pub use old::*;
#[cfg(stage0)]
mod old;
#[cfg(not(stage0))]
pub use new::System;
#[cfg(not(stage0))]
mod new {
pub extern crate alloc;
use self::alloc::heap::{Alloc, AllocErr, Layout, Excess, CannotReallocInPlace};
#[unstable(feature = "allocator_api", issue = "32838")]
pub struct System;
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl Alloc for System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
(&*self).alloc(layout)
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
(&*self).alloc_zeroed(layout)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
(&*self).dealloc(ptr, layout)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
(&*self).realloc(ptr, old_layout, new_layout)
}
fn oom(&mut self, err: AllocErr) -> ! {
(&*self).oom(err)
}
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
(&self).usable_size(layout)
}
#[inline]
unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
(&*self).alloc_excess(layout)
}
#[inline]
unsafe fn realloc_excess(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<Excess, AllocErr> {
(&*self).realloc_excess(ptr, layout, new_layout)
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
(&*self).grow_in_place(ptr, layout, new_layout)
}
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
(&*self).shrink_in_place(ptr, layout, new_layout)
}
}
}
#[no_mangle]
pub extern "C" fn __rust_allocate_zeroed(size: usize, align: usize) -> *mut u8 {
unsafe { imp::allocate_zeroed(size, align) }
}
#[no_mangle]
pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
unsafe { imp::deallocate(ptr, old_size, align) }
}
#[no_mangle]
pub extern "C" fn __rust_reallocate(ptr: *mut u8,
old_size: usize,
size: usize,
align: usize)
-> *mut u8 {
unsafe { imp::reallocate(ptr, old_size, size, align) }
}
#[no_mangle]
pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8,
old_size: usize,
size: usize,
align: usize)
-> usize {
unsafe { imp::reallocate_inplace(ptr, old_size, size, align) }
}
#[no_mangle]
pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize {
imp::usable_size(size, align)
}
#[cfg(any(unix, target_os = "redox"))]
mod imp {
#[cfg(all(not(stage0), any(unix, target_os = "redox")))]
mod platform {
extern crate libc;
use core::cmp;
use core::ptr;
use MIN_ALIGN;
pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
libc::malloc(size as libc::size_t) as *mut u8
} else {
aligned_malloc(size, align)
use MIN_ALIGN;
use new::System;
use new::alloc::heap::{Alloc, AllocErr, Layout};
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl<'a> Alloc for &'a System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let ptr = if layout.align() <= MIN_ALIGN {
libc::malloc(layout.size()) as *mut u8
} else {
aligned_malloc(&layout)
};
if !ptr.is_null() {
Ok(ptr)
} else {
Err(AllocErr::Exhausted { request: layout })
}
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
if layout.align() <= MIN_ALIGN {
let ptr = libc::calloc(layout.size(), 1) as *mut u8;
if !ptr.is_null() {
Ok(ptr)
} else {
Err(AllocErr::Exhausted { request: layout })
}
} else {
let ret = self.alloc(layout.clone());
if let Ok(ptr) = ret {
ptr::write_bytes(ptr, 0, layout.size());
}
ret
}
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, _layout: Layout) {
libc::free(ptr as *mut libc::c_void)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
if old_layout.align() != new_layout.align() {
return Err(AllocErr::Unsupported {
details: "cannot change alignment on `realloc`",
})
}
if new_layout.align() <= MIN_ALIGN {
let ptr = libc::realloc(ptr as *mut libc::c_void, new_layout.size());
if !ptr.is_null() {
Ok(ptr as *mut u8)
} else {
Err(AllocErr::Exhausted { request: new_layout })
}
} else {
let res = self.alloc(new_layout.clone());
if let Ok(new_ptr) = res {
let size = cmp::min(old_layout.size(), new_layout.size());
ptr::copy_nonoverlapping(ptr, new_ptr, size);
self.dealloc(ptr, old_layout);
}
res
}
}
fn oom(&mut self, err: AllocErr) -> ! {
use core::fmt::{self, Write};
// Print a message to stderr before aborting to assist with
// debugging. It is critical that this code does not allocate any
// memory since we are in an OOM situation. Any errors are ignored
// while printing since there's nothing we can do about them and we
// are about to exit anyways.
drop(writeln!(Stderr, "fatal runtime error: {}", err));
unsafe {
::core::intrinsics::abort();
}
struct Stderr;
impl Write for Stderr {
fn write_str(&mut self, s: &str) -> fmt::Result {
unsafe {
libc::write(libc::STDERR_FILENO,
s.as_ptr() as *const libc::c_void,
s.len());
}
Ok(())
}
}
}
}
#[cfg(any(target_os = "android", target_os = "redox"))]
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
#[inline]
unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
// On android we currently target API level 9 which unfortunately
// doesn't have the `posix_memalign` API used below. Instead we use
// `memalign`, but this unfortunately has the property on some systems
@ -112,74 +256,41 @@ mod imp {
// [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
// [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
// /memory/aligned_memory.cc
libc::memalign(align as libc::size_t, size as libc::size_t) as *mut u8
libc::memalign(layout.align(), layout.size()) as *mut u8
}
#[cfg(not(any(target_os = "android", target_os = "redox")))]
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
#[inline]
unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
let mut out = ptr::null_mut();
let ret = libc::posix_memalign(&mut out, align as libc::size_t, size as libc::size_t);
let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
if ret != 0 {
ptr::null_mut()
} else {
out as *mut u8
}
}
pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
libc::calloc(size as libc::size_t, 1) as *mut u8
} else {
let ptr = aligned_malloc(size, align);
if !ptr.is_null() {
ptr::write_bytes(ptr, 0, size);
}
ptr
}
}
pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8
} else {
let new_ptr = allocate(size, align);
if !new_ptr.is_null() {
ptr::copy(ptr, new_ptr, cmp::min(size, old_size));
deallocate(ptr, old_size, align);
}
new_ptr
}
}
pub unsafe fn reallocate_inplace(_ptr: *mut u8,
old_size: usize,
_size: usize,
_align: usize)
-> usize {
old_size
}
pub unsafe fn deallocate(ptr: *mut u8, _old_size: usize, _align: usize) {
libc::free(ptr as *mut libc::c_void)
}
pub fn usable_size(size: usize, _align: usize) -> usize {
size
}
}
#[cfg(windows)]
#[cfg(all(windows, not(stage0)))]
#[allow(bad_style)]
mod imp {
use core::cmp::min;
use core::ptr::copy_nonoverlapping;
mod platform {
use core::cmp;
use core::ptr;
use MIN_ALIGN;
use new::System;
use new::alloc::heap::{Alloc, AllocErr, Layout, CannotReallocInPlace};
type LPVOID = *mut u8;
type HANDLE = LPVOID;
type SIZE_T = usize;
type DWORD = u32;
type BOOL = i32;
type LPDWORD = *mut DWORD;
type LPOVERLAPPED = *mut u8;
const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD;
extern "system" {
fn GetProcessHeap() -> HANDLE;
@ -187,12 +298,18 @@ mod imp {
fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
fn GetLastError() -> DWORD;
fn WriteFile(hFile: HANDLE,
lpBuffer: LPVOID,
nNumberOfBytesToWrite: DWORD,
lpNumberOfBytesWritten: LPDWORD,
lpOverlapped: LPOVERLAPPED)
-> BOOL;
fn GetStdHandle(which: DWORD) -> HANDLE;
}
#[repr(C)]
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
const HEAP_REALLOC_IN_PLACE_ONLY: DWORD = 0x00000010;
@ -207,71 +324,149 @@ mod imp {
}
#[inline]
unsafe fn allocate_with_flags(size: usize, align: usize, flags: DWORD) -> *mut u8 {
if align <= MIN_ALIGN {
HeapAlloc(GetProcessHeap(), flags, size as SIZE_T) as *mut u8
unsafe fn allocate_with_flags(layout: Layout, flags: DWORD)
-> Result<*mut u8, AllocErr>
{
let ptr = if layout.align() <= MIN_ALIGN {
HeapAlloc(GetProcessHeap(), flags, layout.size())
} else {
let ptr = HeapAlloc(GetProcessHeap(), flags, (size + align) as SIZE_T) as *mut u8;
let size = layout.size() + layout.align();
let ptr = HeapAlloc(GetProcessHeap(), flags, size);
if ptr.is_null() {
return ptr;
ptr
} else {
align_ptr(ptr, layout.align())
}
align_ptr(ptr, align)
}
}
pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
allocate_with_flags(size, align, 0)
}
pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
allocate_with_flags(size, align, HEAP_ZERO_MEMORY)
}
pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, size as SIZE_T) as *mut u8
} else {
let new = allocate(size, align);
if !new.is_null() {
copy_nonoverlapping(ptr, new, min(size, old_size));
deallocate(ptr, old_size, align);
}
new
}
}
pub unsafe fn reallocate_inplace(ptr: *mut u8,
old_size: usize,
size: usize,
align: usize)
-> usize {
let new = if align <= MIN_ALIGN {
HeapReAlloc(GetProcessHeap(),
HEAP_REALLOC_IN_PLACE_ONLY,
ptr as LPVOID,
size as SIZE_T) as *mut u8
} else {
let header = get_header(ptr);
HeapReAlloc(GetProcessHeap(),
HEAP_REALLOC_IN_PLACE_ONLY,
header.0 as LPVOID,
size + align as SIZE_T) as *mut u8
};
if new.is_null() { old_size } else { size }
}
pub unsafe fn deallocate(ptr: *mut u8, _old_size: usize, align: usize) {
if align <= MIN_ALIGN {
let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
if ptr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
let header = get_header(ptr);
let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
Ok(ptr as *mut u8)
}
}
pub fn usable_size(size: usize, _align: usize) -> usize {
size
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl<'a> Alloc for &'a System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
allocate_with_flags(layout, 0)
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
allocate_with_flags(layout, HEAP_ZERO_MEMORY)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if layout.align() <= MIN_ALIGN {
let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}",
GetLastError());
} else {
let header = get_header(ptr);
let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}",
GetLastError());
}
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
if old_layout.align() != new_layout.align() {
return Err(AllocErr::Unsupported {
details: "cannot change alignment on `realloc`",
})
}
if new_layout.align() <= MIN_ALIGN {
let ptr = HeapReAlloc(GetProcessHeap(),
0,
ptr as LPVOID,
new_layout.size());
if !ptr.is_null() {
Ok(ptr as *mut u8)
} else {
Err(AllocErr::Exhausted { request: new_layout })
}
} else {
let res = self.alloc(new_layout.clone());
if let Ok(new_ptr) = res {
let size = cmp::min(old_layout.size(), new_layout.size());
ptr::copy_nonoverlapping(ptr, new_ptr, size);
self.dealloc(ptr, old_layout);
}
res
}
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
self.shrink_in_place(ptr, layout, new_layout)
}
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
if old_layout.align() != new_layout.align() {
return Err(CannotReallocInPlace)
}
let new = if new_layout.align() <= MIN_ALIGN {
HeapReAlloc(GetProcessHeap(),
HEAP_REALLOC_IN_PLACE_ONLY,
ptr as LPVOID,
new_layout.size())
} else {
let header = get_header(ptr);
HeapReAlloc(GetProcessHeap(),
HEAP_REALLOC_IN_PLACE_ONLY,
header.0 as LPVOID,
new_layout.size() + new_layout.align())
};
if new.is_null() {
Err(CannotReallocInPlace)
} else {
Ok(())
}
}
fn oom(&mut self, err: AllocErr) -> ! {
use core::fmt::{self, Write};
// Same as with unix we ignore all errors here
drop(writeln!(Stderr, "fatal runtime error: {}", err));
unsafe {
::core::intrinsics::abort();
}
struct Stderr;
impl Write for Stderr {
fn write_str(&mut self, s: &str) -> fmt::Result {
unsafe {
// WriteFile silently fails if it is passed an invalid
// handle, so there is no need to check the result of
// GetStdHandle.
WriteFile(GetStdHandle(STD_ERROR_HANDLE),
s.as_ptr() as LPVOID,
s.len() as DWORD,
ptr::null_mut(),
ptr::null_mut());
}
Ok(())
}
}
}
}
}

268
src/liballoc_system/old.rs Normal file
View File

@ -0,0 +1,268 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[no_mangle]
pub unsafe extern fn __rust_alloc(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let p = imp::allocate(size, align);
if p.is_null() {
__rust_oom(err);
}
p
}
#[no_mangle]
pub unsafe extern fn __rust_oom(_err: *const u8) -> ! {
::core::intrinsics::abort()
}
#[no_mangle]
pub unsafe extern fn __rust_dealloc(ptr: *mut u8,
size: usize,
align: usize) {
imp::deallocate(ptr, size, align)
}
#[no_mangle]
pub unsafe extern fn __rust_usable_size(size: usize,
_align: usize,
min: *mut usize,
max: *mut usize) {
*min = size;
*max = size;
}
#[no_mangle]
pub unsafe extern fn __rust_realloc(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize,
err: *mut u8) -> *mut u8 {
if new_align != old_align {
__rust_oom(err);
}
let p = imp::reallocate(ptr, old_size, new_size, new_align);
if p.is_null() {
__rust_oom(err);
}
p
}
#[no_mangle]
pub unsafe extern fn __rust_alloc_zeroed(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let p = imp::allocate_zeroed(size, align);
if p.is_null() {
__rust_oom(err);
}
p
}
#[no_mangle]
pub unsafe extern fn __rust_alloc_excess(_size: usize,
_align: usize,
_excess: *mut usize,
err: *mut u8) -> *mut u8 {
__rust_oom(err);
}
#[no_mangle]
pub unsafe extern fn __rust_realloc_excess(_ptr: *mut u8,
_old_size: usize,
_old_align: usize,
_new_size: usize,
_new_align: usize,
_excess: *mut usize,
err: *mut u8) -> *mut u8 {
__rust_oom(err);
}
#[no_mangle]
pub unsafe extern fn __rust_grow_in_place(_ptr: *mut u8,
_old_size: usize,
_old_align: usize,
_new_size: usize,
_new_align: usize) -> u8 {
0
}
#[no_mangle]
pub unsafe extern fn __rust_shrink_in_place(_ptr: *mut u8,
_old_size: usize,
_old_align: usize,
_new_size: usize,
_new_align: usize) -> u8 {
0
}
#[cfg(any(unix, target_os = "redox"))]
mod imp {
extern crate libc;
use core::cmp;
use core::ptr;
use MIN_ALIGN;
pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
libc::malloc(size as libc::size_t) as *mut u8
} else {
aligned_malloc(size, align)
}
}
#[cfg(any(target_os = "android", target_os = "redox"))]
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
// On android we currently target API level 9 which unfortunately
// doesn't have the `posix_memalign` API used below. Instead we use
// `memalign`, but this unfortunately has the property on some systems
// where the memory returned cannot be deallocated by `free`!
//
// Upon closer inspection, however, this appears to work just fine with
// Android, so for this platform we should be fine to call `memalign`
// (which is present in API level 9). Some helpful references could
// possibly be chromium using memalign [1], attempts at documenting that
// memalign + free is ok [2] [3], or the current source of chromium
// which still uses memalign on android [4].
//
// [1]: https://codereview.chromium.org/10796020/
// [2]: https://code.google.com/p/android/issues/detail?id=35391
// [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
// [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
// /memory/aligned_memory.cc
libc::memalign(align as libc::size_t, size as libc::size_t) as *mut u8
}
#[cfg(not(any(target_os = "android", target_os = "redox")))]
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
let mut out = ptr::null_mut();
let ret = libc::posix_memalign(&mut out, align as libc::size_t, size as libc::size_t);
if ret != 0 {
ptr::null_mut()
} else {
out as *mut u8
}
}
pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
libc::calloc(size as libc::size_t, 1) as *mut u8
} else {
let ptr = aligned_malloc(size, align);
if !ptr.is_null() {
ptr::write_bytes(ptr, 0, size);
}
ptr
}
}
pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8
} else {
let new_ptr = allocate(size, align);
if !new_ptr.is_null() {
ptr::copy(ptr, new_ptr, cmp::min(size, old_size));
deallocate(ptr, old_size, align);
}
new_ptr
}
}
pub unsafe fn deallocate(ptr: *mut u8, _old_size: usize, _align: usize) {
libc::free(ptr as *mut libc::c_void)
}
}
#[cfg(windows)]
#[allow(bad_style)]
mod imp {
use core::cmp::min;
use core::ptr::copy_nonoverlapping;
use MIN_ALIGN;
type LPVOID = *mut u8;
type HANDLE = LPVOID;
type SIZE_T = usize;
type DWORD = u32;
type BOOL = i32;
extern "system" {
fn GetProcessHeap() -> HANDLE;
fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
fn GetLastError() -> DWORD;
}
#[repr(C)]
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
&mut *(ptr as *mut Header).offset(-1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.offset((align - (ptr as usize & (align - 1))) as isize);
*get_header(aligned) = Header(ptr);
aligned
}
#[inline]
unsafe fn allocate_with_flags(size: usize, align: usize, flags: DWORD) -> *mut u8 {
if align <= MIN_ALIGN {
HeapAlloc(GetProcessHeap(), flags, size as SIZE_T) as *mut u8
} else {
let ptr = HeapAlloc(GetProcessHeap(), flags, (size + align) as SIZE_T) as *mut u8;
if ptr.is_null() {
return ptr;
}
align_ptr(ptr, align)
}
}
pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
allocate_with_flags(size, align, 0)
}
pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
allocate_with_flags(size, align, HEAP_ZERO_MEMORY)
}
pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, size as SIZE_T) as *mut u8
} else {
let new = allocate(size, align);
if !new.is_null() {
copy_nonoverlapping(ptr, new, min(size, old_size));
deallocate(ptr, old_size, align);
}
new
}
}
pub unsafe fn deallocate(ptr: *mut u8, _old_size: usize, align: usize) {
if align <= MIN_ALIGN {
let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
} else {
let header = get_header(ptr);
let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
}
}
}

View File

@ -23,13 +23,11 @@
issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))]
#![no_std]
#![needs_allocator]
#![deny(warnings)]
#![feature(alloc)]
#![feature(collections_range)]
#![feature(macro_reexport)]
#![feature(needs_allocator)]
#![feature(staged_api)]
//! Collection types

View File

@ -84,6 +84,7 @@ pub mod infer;
pub mod lint;
pub mod middle {
pub mod allocator;
pub mod expr_use_visitor;
pub mod const_val;
pub mod cstore;

View File

@ -0,0 +1,26 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(Clone, Copy)]
pub enum AllocatorKind {
Global,
DefaultLib,
DefaultExe,
}
impl AllocatorKind {
pub fn fn_name(&self, base: &str) -> String {
match *self {
AllocatorKind::Global => format!("__rg_{}", base),
AllocatorKind::DefaultLib => format!("__rdl_{}", base),
AllocatorKind::DefaultExe => format!("__rde_{}", base),
}
}
}

View File

@ -287,6 +287,11 @@ fn has_allow_dead_code_or_lang_attr(attrs: &[ast::Attribute]) -> bool {
return true;
}
// Don't lint about global allocators
if attr::contains_name(attrs, "global_allocator") {
return true;
}
let dead_code = lint::builtin::DEAD_CODE.name_lower();
for attr in lint::gather_attrs(attrs) {
match attr {

View File

@ -214,10 +214,9 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
//
// Things like allocators and panic runtimes may not have been activated
// quite yet, so do so here.
activate_injected_dep(sess.injected_allocator.get(), &mut ret,
&|cnum| tcx.is_allocator(cnum.as_def_id()));
activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret,
&|cnum| tcx.is_panic_runtime(cnum.as_def_id()));
activate_injected_allocator(sess, &mut ret);
// When dylib B links to dylib A, then when using B we must also link to A.
// It could be the case, however, that the rlib for A is present (hence we
@ -295,10 +294,9 @@ fn attempt_static<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option<DependencyLis
// Our allocator/panic runtime may not have been linked above if it wasn't
// explicitly linked, which is the case for any injected dependency. Handle
// that here and activate them.
activate_injected_dep(sess.injected_allocator.get(), &mut ret,
&|cnum| tcx.is_allocator(cnum.as_def_id()));
activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret,
&|cnum| tcx.is_panic_runtime(cnum.as_def_id()));
activate_injected_allocator(sess, &mut ret);
Some(ret)
}
@ -331,6 +329,18 @@ fn activate_injected_dep(injected: Option<CrateNum>,
}
}
fn activate_injected_allocator(sess: &session::Session,
list: &mut DependencyList) {
let cnum = match sess.injected_allocator.get() {
Some(cnum) => cnum,
None => return,
};
let idx = cnum.as_usize() - 1;
if list[idx] == Linkage::NotLinked {
list[idx] = Linkage::Static;
}
}
// After the linkage for a crate has been determined we need to verify that
// there's only going to be one allocator in the output.
fn verify_ok<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) {
@ -338,23 +348,12 @@ fn verify_ok<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) {
if list.len() == 0 {
return
}
let mut allocator = None;
let mut panic_runtime = None;
for (i, linkage) in list.iter().enumerate() {
if let Linkage::NotLinked = *linkage {
continue
}
let cnum = CrateNum::new(i + 1);
if tcx.is_allocator(cnum.as_def_id()) {
if let Some(prev) = allocator {
let prev_name = sess.cstore.crate_name(prev);
let cur_name = sess.cstore.crate_name(cnum);
sess.err(&format!("cannot link together two \
allocators: {} and {}",
prev_name, cur_name));
}
allocator = Some(cnum);
}
if tcx.is_panic_runtime(cnum.as_def_id()) {
if let Some((prev, _)) = panic_runtime {

View File

@ -16,6 +16,7 @@ use hir::def_id::{CrateNum, DefIndex};
use lint;
use middle::cstore::CrateStore;
use middle::allocator::AllocatorKind;
use middle::dependency_format;
use session::search_paths::PathKind;
use session::config::DebugInfoLevel;
@ -106,6 +107,7 @@ pub struct Session {
/// dependency if it didn't already find one, and this tracks what was
/// injected.
pub injected_allocator: Cell<Option<CrateNum>>,
pub allocator_kind: Cell<Option<AllocatorKind>>,
pub injected_panic_runtime: Cell<Option<CrateNum>>,
/// Map from imported macro spans (which consist of
@ -140,6 +142,9 @@ pub struct Session {
/// Loaded up early on in the initialization of this `Session` to avoid
/// false positives about a job server in our environment.
pub jobserver_from_env: Option<Client>,
/// Metadata about the allocators for the current crate being compiled
pub has_global_allocator: Cell<bool>,
}
pub struct PerfStats {
@ -715,6 +720,7 @@ pub fn build_session_(sopts: config::Options,
type_length_limit: Cell::new(1048576),
next_node_id: Cell::new(NodeId::new(1)),
injected_allocator: Cell::new(None),
allocator_kind: Cell::new(None),
injected_panic_runtime: Cell::new(None),
imported_macro_spans: RefCell::new(HashMap::new()),
incr_comp_session: RefCell::new(IncrCompSession::NotInitialized),
@ -732,7 +738,6 @@ pub fn build_session_(sopts: config::Options,
print_fuel_crate: print_fuel_crate,
print_fuel: print_fuel,
out_of_fuel: Cell::new(false),
// Note that this is unsafe because it may misinterpret file descriptors
// on Unix as jobserver file descriptors. We hopefully execute this near
// the beginning of the process though to ensure we don't get false
@ -750,6 +755,7 @@ pub fn build_session_(sopts: config::Options,
});
(*GLOBAL_JOBSERVER).clone()
},
has_global_allocator: Cell::new(false),
};
sess

View File

@ -0,0 +1,15 @@
[package]
authors = ["The Rust Project Developers"]
name = "rustc_allocator"
version = "0.0.0"
[lib]
path = "lib.rs"
crate-type = ["dylib"]
test = false
[dependencies]
rustc = { path = "../librustc" }
rustc_errors = { path = "../librustc_errors" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }

View File

@ -0,0 +1,498 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::middle::allocator::AllocatorKind;
use rustc_errors;
use syntax::abi::Abi;
use syntax::ast::{Crate, Attribute, LitKind, StrStyle, ExprKind};
use syntax::ast::{Unsafety, Constness, Generics, Mutability, Ty, Mac, Arg};
use syntax::ast::{self, Ident, Item, ItemKind, TyKind, Visibility, Expr};
use syntax::attr;
use syntax::codemap::dummy_spanned;
use syntax::codemap::{ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::ext::base::ExtCtxt;
use syntax::ext::base::Resolver;
use syntax::ext::build::AstBuilder;
use syntax::ext::expand::ExpansionConfig;
use syntax::ext::hygiene::{Mark, SyntaxContext};
use syntax::fold::{self, Folder};
use syntax::parse::ParseSess;
use syntax::ptr::P;
use syntax::symbol::Symbol;
use syntax::util::small_vector::SmallVector;
use syntax_pos::{Span, DUMMY_SP};
use {AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS};
pub fn modify(sess: &ParseSess,
resolver: &mut Resolver,
krate: Crate,
handler: &rustc_errors::Handler) -> ast::Crate {
ExpandAllocatorDirectives {
handler: handler,
sess: sess,
resolver: resolver,
found: false,
}.fold_crate(krate)
}
struct ExpandAllocatorDirectives<'a> {
found: bool,
handler: &'a rustc_errors::Handler,
sess: &'a ParseSess,
resolver: &'a mut Resolver,
}
impl<'a> Folder for ExpandAllocatorDirectives<'a> {
fn fold_item(&mut self, item: P<Item>) -> SmallVector<P<Item>> {
let name = if attr::contains_name(&item.attrs, "global_allocator") {
"global_allocator"
} else {
return fold::noop_fold_item(item, self)
};
match item.node {
ItemKind::Static(..) => {}
_ => {
self.handler.span_err(item.span, "allocators must be statics");
return SmallVector::one(item)
}
}
if self.found {
self.handler.span_err(item.span, "cannot define more than one \
#[global_allocator]");
return SmallVector::one(item)
}
self.found = true;
let mark = Mark::fresh(Mark::root());
mark.set_expn_info(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
format: MacroAttribute(Symbol::intern(name)),
span: None,
allow_internal_unstable: true,
}
});
let span = Span {
ctxt: SyntaxContext::empty().apply_mark(mark),
..item.span
};
let ecfg = ExpansionConfig::default(name.to_string());
let mut f = AllocFnFactory {
span: span,
kind: AllocatorKind::Global,
global: item.ident,
alloc: Ident::from_str("alloc"),
cx: ExtCtxt::new(self.sess, ecfg, self.resolver),
};
let super_path = f.cx.path(f.span, vec![
Ident::from_str("super"),
f.global,
]);
let mut items = vec![
f.cx.item_extern_crate(f.span, f.alloc),
f.cx.item_use_simple(f.span, Visibility::Inherited, super_path),
];
for method in ALLOCATOR_METHODS {
items.push(f.allocator_fn(method));
}
let name = f.kind.fn_name("allocator_abi");
let allocator_abi = Ident::with_empty_ctxt(Symbol::gensym(&name));
let module = f.cx.item_mod(span, span, allocator_abi, Vec::new(), items);
let module = f.cx.monotonic_expander().fold_item(module).pop().unwrap();
let mut ret = SmallVector::new();
ret.push(item);
ret.push(module);
return ret
}
fn fold_mac(&mut self, mac: Mac) -> Mac {
fold::noop_fold_mac(mac, self)
}
}
struct AllocFnFactory<'a> {
span: Span,
kind: AllocatorKind,
global: Ident,
alloc: Ident,
cx: ExtCtxt<'a>,
}
impl<'a> AllocFnFactory<'a> {
fn allocator_fn(&self, method: &AllocatorMethod) -> P<Item> {
let mut abi_args = Vec::new();
let mut i = 0;
let ref mut mk = || {
let name = Ident::from_str(&format!("arg{}", i));
i += 1;
name
};
let args = method.inputs.iter().map(|ty| {
self.arg_ty(ty, &mut abi_args, mk)
}).collect();
let result = self.call_allocator(method.name, args);
let (output_ty, output_expr) =
self.ret_ty(&method.output, &mut abi_args, mk, result);
let kind = ItemKind::Fn(self.cx.fn_decl(abi_args, output_ty),
Unsafety::Unsafe,
dummy_spanned(Constness::NotConst),
Abi::Rust,
Generics::default(),
self.cx.block_expr(output_expr));
self.cx.item(self.span,
Ident::from_str(&self.kind.fn_name(method.name)),
self.attrs(),
kind)
}
fn call_allocator(&self, method: &str, mut args: Vec<P<Expr>>) -> P<Expr> {
let method = self.cx.path(self.span, vec![
self.alloc,
Ident::from_str("heap"),
Ident::from_str("Alloc"),
Ident::from_str(method),
]);
let method = self.cx.expr_path(method);
let allocator = self.cx.path_ident(self.span, self.global);
let allocator = self.cx.expr_path(allocator);
let allocator = self.cx.expr_addr_of(self.span, allocator);
let allocator = self.cx.expr_mut_addr_of(self.span, allocator);
args.insert(0, allocator);
self.cx.expr_call(self.span, method, args)
}
fn attrs(&self) -> Vec<Attribute> {
let key = Symbol::intern("linkage");
let value = LitKind::Str(Symbol::intern("external"), StrStyle::Cooked);
let linkage = self.cx.meta_name_value(self.span, key, value);
let no_mangle = Symbol::intern("no_mangle");
let no_mangle = self.cx.meta_word(self.span, no_mangle);
vec![
self.cx.attribute(self.span, linkage),
self.cx.attribute(self.span, no_mangle),
]
}
fn arg_ty(&self,
ty: &AllocatorTy,
args: &mut Vec<Arg>,
mut ident: &mut FnMut() -> Ident) -> P<Expr> {
match *ty {
AllocatorTy::Layout => {
let usize = self.cx.path_ident(self.span, Ident::from_str("usize"));
let ty_usize = self.cx.ty_path(usize);
let size = ident();
let align = ident();
args.push(self.cx.arg(self.span, size, ty_usize.clone()));
args.push(self.cx.arg(self.span, align, ty_usize));
let layout_new = self.cx.path(self.span, vec![
self.alloc,
Ident::from_str("heap"),
Ident::from_str("Layout"),
Ident::from_str("from_size_align_unchecked"),
]);
let layout_new = self.cx.expr_path(layout_new);
let size = self.cx.expr_ident(self.span, size);
let align = self.cx.expr_ident(self.span, align);
let layout = self.cx.expr_call(self.span,
layout_new,
vec![size, align]);
layout
}
AllocatorTy::LayoutRef => {
let ident = ident();
args.push(self.cx.arg(self.span, ident, self.ptr_u8()));
// Convert our `arg: *const u8` via:
//
// &*(arg as *const Layout)
let expr = self.cx.expr_ident(self.span, ident);
let expr = self.cx.expr_cast(self.span, expr, self.layout_ptr());
let expr = self.cx.expr_deref(self.span, expr);
self.cx.expr_addr_of(self.span, expr)
}
AllocatorTy::AllocErr => {
// We're creating:
//
// (*(arg as *const AllocErr)).clone()
let ident = ident();
args.push(self.cx.arg(self.span, ident, self.ptr_u8()));
let expr = self.cx.expr_ident(self.span, ident);
let expr = self.cx.expr_cast(self.span, expr, self.alloc_err_ptr());
let expr = self.cx.expr_deref(self.span, expr);
self.cx.expr_method_call(
self.span,
expr,
Ident::from_str("clone"),
Vec::new()
)
}
AllocatorTy::Ptr => {
let ident = ident();
args.push(self.cx.arg(self.span, ident, self.ptr_u8()));
self.cx.expr_ident(self.span, ident)
}
AllocatorTy::ResultPtr |
AllocatorTy::ResultExcess |
AllocatorTy::ResultUnit |
AllocatorTy::Bang |
AllocatorTy::UsizePair |
AllocatorTy::Unit => {
panic!("can't convert AllocatorTy to an argument")
}
}
}
fn ret_ty(&self,
ty: &AllocatorTy,
args: &mut Vec<Arg>,
mut ident: &mut FnMut() -> Ident,
expr: P<Expr>) -> (P<Ty>, P<Expr>)
{
match *ty {
AllocatorTy::UsizePair => {
// We're creating:
//
// let arg = #expr;
// *min = arg.0;
// *max = arg.1;
let min = ident();
let max = ident();
args.push(self.cx.arg(self.span, min, self.ptr_usize()));
args.push(self.cx.arg(self.span, max, self.ptr_usize()));
let ident = ident();
let stmt = self.cx.stmt_let(self.span, false, ident, expr);
let min = self.cx.expr_ident(self.span, min);
let max = self.cx.expr_ident(self.span, max);
let layout = self.cx.expr_ident(self.span, ident);
let assign_min = self.cx.expr(self.span, ExprKind::Assign(
self.cx.expr_deref(self.span, min),
self.cx.expr_tup_field_access(self.span, layout.clone(), 0),
));
let assign_min = self.cx.stmt_semi(assign_min);
let assign_max = self.cx.expr(self.span, ExprKind::Assign(
self.cx.expr_deref(self.span, max),
self.cx.expr_tup_field_access(self.span, layout.clone(), 1),
));
let assign_max = self.cx.stmt_semi(assign_max);
let stmts = vec![stmt, assign_min, assign_max];
let block = self.cx.block(self.span, stmts);
let ty_unit = self.cx.ty(self.span, TyKind::Tup(Vec::new()));
(ty_unit, self.cx.expr_block(block))
}
AllocatorTy::ResultExcess => {
// We're creating:
//
// match #expr {
// Ok(ptr) => {
// *excess = ptr.1;
// ptr.0
// }
// Err(e) => {
// ptr::write(err_ptr, e);
// 0 as *mut u8
// }
// }
let excess_ptr = ident();
args.push(self.cx.arg(self.span, excess_ptr, self.ptr_usize()));
let excess_ptr = self.cx.expr_ident(self.span, excess_ptr);
let err_ptr = ident();
args.push(self.cx.arg(self.span, err_ptr, self.ptr_u8()));
let err_ptr = self.cx.expr_ident(self.span, err_ptr);
let err_ptr = self.cx.expr_cast(self.span,
err_ptr,
self.alloc_err_ptr());
let name = ident();
let ok_expr = {
let ptr = self.cx.expr_ident(self.span, name);
let write = self.cx.expr(self.span, ExprKind::Assign(
self.cx.expr_deref(self.span, excess_ptr),
self.cx.expr_tup_field_access(self.span, ptr.clone(), 1),
));
let write = self.cx.stmt_semi(write);
let ret = self.cx.expr_tup_field_access(self.span,
ptr.clone(),
0);
let ret = self.cx.stmt_expr(ret);
let block = self.cx.block(self.span, vec![write, ret]);
self.cx.expr_block(block)
};
let pat = self.cx.pat_ident(self.span, name);
let ok = self.cx.path_ident(self.span, Ident::from_str("Ok"));
let ok = self.cx.pat_tuple_struct(self.span, ok, vec![pat]);
let ok = self.cx.arm(self.span, vec![ok], ok_expr);
let name = ident();
let err_expr = {
let err = self.cx.expr_ident(self.span, name);
let write = self.cx.path(self.span, vec![
self.alloc,
Ident::from_str("heap"),
Ident::from_str("__core"),
Ident::from_str("ptr"),
Ident::from_str("write"),
]);
let write = self.cx.expr_path(write);
let write = self.cx.expr_call(self.span, write,
vec![err_ptr, err]);
let write = self.cx.stmt_semi(write);
let null = self.cx.expr_usize(self.span, 0);
let null = self.cx.expr_cast(self.span, null, self.ptr_u8());
let null = self.cx.stmt_expr(null);
let block = self.cx.block(self.span, vec![write, null]);
self.cx.expr_block(block)
};
let pat = self.cx.pat_ident(self.span, name);
let err = self.cx.path_ident(self.span, Ident::from_str("Err"));
let err = self.cx.pat_tuple_struct(self.span, err, vec![pat]);
let err = self.cx.arm(self.span, vec![err], err_expr);
let expr = self.cx.expr_match(self.span, expr, vec![ok, err]);
(self.ptr_u8(), expr)
}
AllocatorTy::ResultPtr => {
// We're creating:
//
// match #expr {
// Ok(ptr) => ptr,
// Err(e) => {
// ptr::write(err_ptr, e);
// 0 as *mut u8
// }
// }
let err_ptr = ident();
args.push(self.cx.arg(self.span, err_ptr, self.ptr_u8()));
let err_ptr = self.cx.expr_ident(self.span, err_ptr);
let err_ptr = self.cx.expr_cast(self.span,
err_ptr,
self.alloc_err_ptr());
let name = ident();
let ok_expr = self.cx.expr_ident(self.span, name);
let pat = self.cx.pat_ident(self.span, name);
let ok = self.cx.path_ident(self.span, Ident::from_str("Ok"));
let ok = self.cx.pat_tuple_struct(self.span, ok, vec![pat]);
let ok = self.cx.arm(self.span, vec![ok], ok_expr);
let name = ident();
let err_expr = {
let err = self.cx.expr_ident(self.span, name);
let write = self.cx.path(self.span, vec![
self.alloc,
Ident::from_str("heap"),
Ident::from_str("__core"),
Ident::from_str("ptr"),
Ident::from_str("write"),
]);
let write = self.cx.expr_path(write);
let write = self.cx.expr_call(self.span, write,
vec![err_ptr, err]);
let write = self.cx.stmt_semi(write);
let null = self.cx.expr_usize(self.span, 0);
let null = self.cx.expr_cast(self.span, null, self.ptr_u8());
let null = self.cx.stmt_expr(null);
let block = self.cx.block(self.span, vec![write, null]);
self.cx.expr_block(block)
};
let pat = self.cx.pat_ident(self.span, name);
let err = self.cx.path_ident(self.span, Ident::from_str("Err"));
let err = self.cx.pat_tuple_struct(self.span, err, vec![pat]);
let err = self.cx.arm(self.span, vec![err], err_expr);
let expr = self.cx.expr_match(self.span, expr, vec![ok, err]);
(self.ptr_u8(), expr)
}
AllocatorTy::ResultUnit => {
// We're creating:
//
// #expr.is_ok() as u8
let cast = self.cx.expr_method_call(
self.span,
expr,
Ident::from_str("is_ok"),
Vec::new()
);
let u8 = self.cx.path_ident(self.span, Ident::from_str("u8"));
let u8 = self.cx.ty_path(u8);
let cast = self.cx.expr_cast(self.span, cast, u8.clone());
(u8, cast)
}
AllocatorTy::Bang => {
(self.cx.ty(self.span, TyKind::Never), expr)
}
AllocatorTy::Unit => {
(self.cx.ty(self.span, TyKind::Tup(Vec::new())), expr)
}
AllocatorTy::AllocErr |
AllocatorTy::Layout |
AllocatorTy::LayoutRef |
AllocatorTy::Ptr => {
panic!("can't convert AllocatorTy to an output")
}
}
}
fn ptr_u8(&self) -> P<Ty> {
let u8 = self.cx.path_ident(self.span, Ident::from_str("u8"));
let ty_u8 = self.cx.ty_path(u8);
self.cx.ty_ptr(self.span, ty_u8, Mutability::Mutable)
}
fn ptr_usize(&self) -> P<Ty> {
let usize = self.cx.path_ident(self.span, Ident::from_str("usize"));
let ty_usize = self.cx.ty_path(usize);
self.cx.ty_ptr(self.span, ty_usize, Mutability::Mutable)
}
fn layout_ptr(&self) -> P<Ty> {
let layout = self.cx.path(self.span, vec![
self.alloc,
Ident::from_str("heap"),
Ident::from_str("Layout"),
]);
let layout = self.cx.ty_path(layout);
self.cx.ty_ptr(self.span, layout, Mutability::Mutable)
}
fn alloc_err_ptr(&self) -> P<Ty> {
let err = self.cx.path(self.span, vec![
self.alloc,
Ident::from_str("heap"),
Ident::from_str("AllocErr"),
]);
let err = self.cx.ty_path(err);
self.cx.ty_ptr(self.span, err, Mutability::Mutable)
}
}

View File

@ -0,0 +1,101 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_private)]
extern crate rustc;
extern crate rustc_errors;
extern crate syntax;
extern crate syntax_pos;
pub mod expand;
pub static ALLOCATOR_METHODS: &[AllocatorMethod] = &[
AllocatorMethod {
name: "alloc",
inputs: &[AllocatorTy::Layout],
output: AllocatorTy::ResultPtr,
is_unsafe: true,
},
AllocatorMethod {
name: "oom",
inputs: &[AllocatorTy::AllocErr],
output: AllocatorTy::Bang,
is_unsafe: false,
},
AllocatorMethod {
name: "dealloc",
inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout],
output: AllocatorTy::Unit,
is_unsafe: true,
},
AllocatorMethod {
name: "usable_size",
inputs: &[AllocatorTy::LayoutRef],
output: AllocatorTy::UsizePair,
is_unsafe: false,
},
AllocatorMethod {
name: "realloc",
inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout],
output: AllocatorTy::ResultPtr,
is_unsafe: true,
},
AllocatorMethod {
name: "alloc_zeroed",
inputs: &[AllocatorTy::Layout],
output: AllocatorTy::ResultPtr,
is_unsafe: true,
},
AllocatorMethod {
name: "alloc_excess",
inputs: &[AllocatorTy::Layout],
output: AllocatorTy::ResultExcess,
is_unsafe: true,
},
AllocatorMethod {
name: "realloc_excess",
inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout],
output: AllocatorTy::ResultExcess,
is_unsafe: true,
},
AllocatorMethod {
name: "grow_in_place",
inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout],
output: AllocatorTy::ResultUnit,
is_unsafe: true,
},
AllocatorMethod {
name: "shrink_in_place",
inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout],
output: AllocatorTy::ResultUnit,
is_unsafe: true,
},
];
pub struct AllocatorMethod {
pub name: &'static str,
pub inputs: &'static [AllocatorTy],
pub output: AllocatorTy,
pub is_unsafe: bool,
}
pub enum AllocatorTy {
AllocErr,
Bang,
Layout,
LayoutRef,
Ptr,
ResultExcess,
ResultPtr,
ResultUnit,
Unit,
UsizePair,
}

View File

@ -14,5 +14,6 @@ build_helper = { path = "../build_helper" }
cmake = "0.1.18"
[dependencies]
alloc = { path = "../liballoc" }
alloc_system = { path = "../liballoc_system" }
core = { path = "../libcore" }

View File

@ -11,6 +11,8 @@
#![sanitizer_runtime]
#![feature(sanitizer_runtime)]
#![feature(alloc_system)]
#![cfg_attr(not(stage0), feature(allocator_api))]
#![cfg_attr(not(stage0), feature(global_allocator))]
#![feature(staged_api)]
#![no_std]
#![unstable(feature = "sanitizer_runtime_lib",
@ -18,3 +20,10 @@
issue = "0")]
extern crate alloc_system;
#[cfg(not(stage0))]
use alloc_system::System;
#[cfg(not(stage0))]
#[global_allocator]
static ALLOC: System = System;

View File

@ -16,7 +16,7 @@ pub fn target() -> TargetResult {
base.max_atomic_width = Some(128);
// see #36994
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "aarch64-unknown-freebsd".to_string(),

View File

@ -16,7 +16,7 @@ pub fn target() -> TargetResult {
base.max_atomic_width = Some(128);
// see #36994
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "aarch64-unknown-linux-gnu".to_string(),

View File

@ -19,7 +19,6 @@ pub fn opts() -> TargetOptions {
linker_is_gnu: true,
has_rpath: true,
position_independent_executables: true,
exe_allocation_crate: "alloc_system".to_string(),
.. Default::default()
}

View File

@ -37,7 +37,6 @@ pub fn opts() -> TargetOptions {
has_rpath: true,
pre_link_args: args,
position_independent_executables: true,
exe_allocation_crate: "alloc_system".to_string(),
has_elf_tls: true,
.. Default::default()
}

View File

@ -29,7 +29,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(64),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
},

View File

@ -29,7 +29,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(64),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
},

View File

@ -28,7 +28,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(32),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
},

View File

@ -28,7 +28,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(32),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
}

View File

@ -28,7 +28,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(32),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
},

View File

@ -29,7 +29,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(32),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
},

View File

@ -28,7 +28,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(32),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
}

View File

@ -29,7 +29,7 @@ pub fn target() -> TargetResult {
max_atomic_width: Some(32),
// see #36994
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
..super::linux_base::opts()
},

View File

@ -378,9 +378,8 @@ pub struct TargetOptions {
/// `eh_unwind_resume` lang item.
pub custom_unwind_resume: bool,
/// Default crate for allocation symbols to link against
pub lib_allocation_crate: String,
pub exe_allocation_crate: String,
/// If necessary, a different crate to link exe allocators by default
pub exe_allocation_crate: Option<String>,
/// Flag indicating whether ELF TLS (e.g. #[thread_local]) is available for
/// this target.
@ -457,8 +456,7 @@ impl Default for TargetOptions {
link_env: Vec::new(),
archive_format: "gnu".to_string(),
custom_unwind_resume: false,
lib_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: None,
allow_asm: true,
has_elf_tls: false,
obj_is_bitcode: false,
@ -682,8 +680,7 @@ impl Target {
key!(archive_format);
key!(allow_asm, bool);
key!(custom_unwind_resume, bool);
key!(lib_allocation_crate);
key!(exe_allocation_crate);
key!(exe_allocation_crate, optional);
key!(has_elf_tls, bool);
key!(obj_is_bitcode, bool);
key!(no_integrated_as, bool);
@ -869,7 +866,6 @@ impl ToJson for Target {
target_option_val!(archive_format);
target_option_val!(allow_asm);
target_option_val!(custom_unwind_resume);
target_option_val!(lib_allocation_crate);
target_option_val!(exe_allocation_crate);
target_option_val!(has_elf_tls);
target_option_val!(obj_is_bitcode);
@ -889,10 +885,10 @@ impl ToJson for Target {
}
}
fn maybe_jemalloc() -> String {
fn maybe_jemalloc() -> Option<String> {
if cfg!(feature = "jemalloc") {
"alloc_jemalloc".to_string()
Some("alloc_jemalloc".to_string())
} else {
"alloc_system".to_string()
None
}
}

View File

@ -34,7 +34,6 @@ pub fn opts() -> TargetOptions {
is_like_openbsd: true,
pre_link_args: args,
position_independent_executables: true,
exe_allocation_crate: "alloc_system".to_string(),
.. Default::default()
}
}

View File

@ -18,7 +18,7 @@ pub fn target() -> TargetResult {
base.max_atomic_width = Some(64);
// see #36994
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "powerpc64-unknown-linux-gnu".to_string(),

View File

@ -18,7 +18,7 @@ pub fn target() -> TargetResult {
base.max_atomic_width = Some(64);
// see #36994
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),

View File

@ -17,7 +17,7 @@ pub fn target() -> TargetResult {
base.max_atomic_width = Some(32);
// see #36994
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "powerpc-unknown-linux-gnu".to_string(),

View File

@ -36,8 +36,6 @@ pub fn opts() -> TargetOptions {
eliminate_frame_pointer: false,
target_family: None,
linker_is_gnu: true,
lib_allocation_crate: "alloc_system".to_string(),
exe_allocation_crate: "alloc_system".to_string(),
has_elf_tls: true,
panic_strategy: PanicStrategy::Abort,
.. Default::default()

View File

@ -21,7 +21,7 @@ pub fn target() -> TargetResult {
base.features = "-vector".to_string();
base.max_atomic_width = Some(64);
// see #36994
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "s390x-unknown-linux-gnu".to_string(),

View File

@ -15,7 +15,7 @@ pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "v9".to_string();
base.max_atomic_width = Some(64);
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "sparc64-unknown-linux-gnu".to_string(),

View File

@ -63,7 +63,6 @@ pub fn opts() -> TargetOptions {
is_like_windows: true,
is_like_msvc: true,
pre_link_args: args,
exe_allocation_crate: "alloc_system".to_string(),
.. Default::default()
}

View File

@ -24,7 +24,7 @@ pub fn target() -> TargetResult {
base.position_independent_executables = false;
base.disable_redzone = true;
base.no_default_libraries = false;
base.exe_allocation_crate = "alloc_system".to_string();
base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "x86_64-rumprun-netbsd".to_string(),

View File

@ -15,6 +15,7 @@ log = { version = "0.3", features = ["release_max_level_info"] }
env_logger = { version = "0.4", default-features = false }
proc_macro_plugin = { path = "../libproc_macro_plugin" }
rustc = { path = "../librustc" }
rustc_allocator = { path = "../librustc_allocator" }
rustc_back = { path = "../librustc_back" }
rustc_borrowck = { path = "../librustc_borrowck" }
rustc_const_eval = { path = "../librustc_const_eval" }

View File

@ -27,6 +27,7 @@ use rustc::traits;
use rustc::util::common::{ErrorReported, time};
use rustc::util::nodemap::NodeSet;
use rustc::util::fs::rename_or_copy_remove;
use rustc_allocator as allocator;
use rustc_borrowck as borrowck;
use rustc_incremental::{self, IncrementalHashesMap};
use rustc_resolve::{MakeGlobMap, Resolver};
@ -750,6 +751,13 @@ pub fn phase_2_configure_and_expand<F>(sess: &Session,
});
}
krate = time(time_passes, "creating allocators", || {
allocator::expand::modify(&sess.parse_sess,
&mut resolver,
krate,
sess.diagnostic())
});
after_expand(&krate)?;
if sess.opts.debugging_opts.input_stats {

View File

@ -34,6 +34,7 @@ extern crate graphviz;
extern crate env_logger;
extern crate libc;
extern crate rustc;
extern crate rustc_allocator;
extern crate rustc_back;
extern crate rustc_borrowck;
extern crate rustc_const_eval;

View File

@ -1071,7 +1071,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidNoMangleItems {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
match it.node {
hir::ItemFn(.., ref generics, _) => {
if attr::contains_name(&it.attrs, "no_mangle") {
if attr::contains_name(&it.attrs, "no_mangle") &&
!attr::contains_name(&it.attrs, "linkage") {
if !cx.access_levels.is_reachable(it.id) {
let msg = format!("function {} is marked #[no_mangle], but not exported",
it.name);

View File

@ -698,6 +698,7 @@ extern "C" {
pub fn LLVMIsGlobalConstant(GlobalVar: ValueRef) -> Bool;
pub fn LLVMSetGlobalConstant(GlobalVar: ValueRef, IsConstant: Bool);
pub fn LLVMRustGetNamedValue(M: ModuleRef, Name: *const c_char) -> ValueRef;
pub fn LLVMSetTailCall(CallInst: ValueRef, IsTailCall: Bool);
// Operations on functions
pub fn LLVMAddFunction(M: ModuleRef, Name: *const c_char, FunctionTy: TypeRef) -> ValueRef;

View File

@ -14,5 +14,6 @@ build_helper = { path = "../build_helper" }
cmake = "0.1.18"
[dependencies]
alloc = { path = "../liballoc" }
alloc_system = { path = "../liballoc_system" }
core = { path = "../libcore" }

View File

@ -11,6 +11,8 @@
#![sanitizer_runtime]
#![feature(sanitizer_runtime)]
#![feature(alloc_system)]
#![cfg_attr(not(stage0), feature(allocator_api))]
#![cfg_attr(not(stage0), feature(global_allocator))]
#![feature(staged_api)]
#![no_std]
#![unstable(feature = "sanitizer_runtime_lib",
@ -18,3 +20,10 @@
issue = "0")]
extern crate alloc_system;
#[cfg(not(stage0))]
use alloc_system::System;
#[cfg(not(stage0))]
#[global_allocator]
static ALLOC: System = System;

View File

@ -16,6 +16,7 @@ use schema::{CrateRoot, Tracked};
use rustc::hir::def_id::{CrateNum, DefIndex};
use rustc::hir::svh::Svh;
use rustc::middle::allocator::AllocatorKind;
use rustc::middle::cstore::DepKind;
use rustc::session::Session;
use rustc::session::config::{Sanitizer, self};
@ -40,6 +41,7 @@ use syntax::attr;
use syntax::ext::base::SyntaxExtension;
use syntax::feature_gate::{self, GateIssue};
use syntax::symbol::Symbol;
use syntax::visit;
use syntax_pos::{Span, DUMMY_SP};
use log;
@ -920,34 +922,28 @@ impl<'a> CrateLoader<'a> {
}
}
fn inject_allocator_crate(&mut self) {
// Make sure that we actually need an allocator, if none of our
// dependencies need one then we definitely don't!
//
// Also, if one of our dependencies has an explicit allocator, then we
// also bail out as we don't need to implicitly inject one.
let mut needs_allocator = false;
let mut found_required_allocator = false;
let dep_graph = &self.sess.dep_graph;
self.cstore.iter_crate_data(|cnum, data| {
needs_allocator = needs_allocator || data.needs_allocator(dep_graph);
if data.is_allocator(dep_graph) {
info!("{} required by rlib and is an allocator", data.name());
self.inject_dependency_if(cnum, "an allocator",
&|data| data.needs_allocator(dep_graph));
found_required_allocator = found_required_allocator ||
data.dep_kind.get() == DepKind::Explicit;
}
});
if !needs_allocator || found_required_allocator { return }
fn inject_allocator_crate(&mut self, krate: &ast::Crate) {
let has_global_allocator = has_global_allocator(krate);
if has_global_allocator {
self.sess.has_global_allocator.set(true);
}
// At this point we've determined that we need an allocator and no
// previous allocator has been activated. We look through our outputs of
// crate types to see what kind of allocator types we may need.
//
// The main special output type here is that rlibs do **not** need an
// allocator linked in (they're just object files), only final products
// (exes, dylibs, staticlibs) need allocators.
// Check to see if we actually need an allocator. This desire comes
// about through the `#![needs_allocator]` attribute and is typically
// written down in liballoc.
let mut needs_allocator = attr::contains_name(&krate.attrs,
"needs_allocator");
let dep_graph = &self.sess.dep_graph;
self.cstore.iter_crate_data(|_, data| {
needs_allocator = needs_allocator || data.needs_allocator(dep_graph);
});
if !needs_allocator {
return
}
// At this point we've determined that we need an allocator. Let's see
// if our compilation session actually needs an allocator based on what
// we're emitting.
let mut need_lib_alloc = false;
let mut need_exe_alloc = false;
for ct in self.sess.crate_types.borrow().iter() {
@ -960,44 +956,132 @@ impl<'a> CrateLoader<'a> {
config::CrateTypeRlib => {}
}
}
if !need_lib_alloc && !need_exe_alloc { return }
// The default allocator crate comes from the custom target spec, and we
// choose between the standard library allocator or exe allocator. This
// distinction exists because the default allocator for binaries (where
// the world is Rust) is different than library (where the world is
// likely *not* Rust).
//
// If a library is being produced, but we're also flagged with `-C
// prefer-dynamic`, then we interpret this as a *Rust* dynamic library
// is being produced so we use the exe allocator instead.
//
// What this boils down to is:
//
// * Binaries use jemalloc
// * Staticlibs and Rust dylibs use system malloc
// * Rust dylibs used as dependencies to rust use jemalloc
let name = if need_lib_alloc && !self.sess.opts.cg.prefer_dynamic {
Symbol::intern(&self.sess.target.target.options.lib_allocation_crate)
} else {
Symbol::intern(&self.sess.target.target.options.exe_allocation_crate)
};
let dep_kind = DepKind::Implicit;
let (cnum, data) =
self.resolve_crate(&None, name, name, None, DUMMY_SP, PathKind::Crate, dep_kind);
// Sanity check the crate we loaded to ensure that it is indeed an
// allocator.
if !data.is_allocator(dep_graph) {
self.sess.err(&format!("the allocator crate `{}` is not tagged \
with #![allocator]", data.name()));
if !need_lib_alloc && !need_exe_alloc {
return
}
self.sess.injected_allocator.set(Some(cnum));
self.inject_dependency_if(cnum, "an allocator",
&|data| data.needs_allocator(dep_graph));
// Ok, we need an allocator. Not only that but we're actually going to
// create an artifact that needs one linked in. Let's go find the one
// that we're going to link in.
//
// First up we check for global allocators. Look at the crate graph here
// and see what's a global allocator, including if we ourselves are a
// global allocator.
let dep_graph = &self.sess.dep_graph;
let mut global_allocator = if has_global_allocator {
Some(None)
} else {
None
};
self.cstore.iter_crate_data(|_, data| {
if !data.has_global_allocator(dep_graph) {
return
}
match global_allocator {
Some(Some(other_crate)) => {
self.sess.err(&format!("the #[global_allocator] in {} \
conflicts with this global \
allocator in: {}",
other_crate,
data.name()));
}
Some(None) => {
self.sess.err(&format!("the #[global_allocator] in this \
crate conflicts with global \
allocator in: {}", data.name()));
}
None => global_allocator = Some(Some(data.name())),
}
});
if global_allocator.is_some() {
self.sess.allocator_kind.set(Some(AllocatorKind::Global));
return
}
// Ok we haven't found a global allocator but we still need an
// allocator. At this point we'll either fall back to the "library
// allocator" or the "exe allocator" depending on a few variables. Let's
// figure out which one.
//
// Note that here we favor linking to the "library allocator" as much as
// possible. If we're not creating rustc's version of libstd
// (need_lib_alloc and prefer_dynamic) then we select `None`, and if the
// exe allocation crate doesn't exist for this target then we also
// select `None`.
let exe_allocation_crate =
if need_lib_alloc && !self.sess.opts.cg.prefer_dynamic {
None
} else {
self.sess.target.target.options.exe_allocation_crate.as_ref()
};
match exe_allocation_crate {
// We've determined that we're injecting an "exe allocator" which
// means that we're going to load up a whole new crate. An example
// of this is that we're producing a normal binary on Linux which
// means we need to load the `alloc_jemalloc` crate to link as an
// allocator.
Some(krate) => {
self.sess.allocator_kind.set(Some(AllocatorKind::DefaultExe));
let name = Symbol::intern(krate);
let dep_kind = DepKind::Implicit;
let (cnum, _data) =
self.resolve_crate(&None,
name,
name,
None,
DUMMY_SP,
PathKind::Crate, dep_kind);
self.sess.injected_allocator.set(Some(cnum));
// self.cstore.iter_crate_data(|_, data| {
// if !data.needs_allocator(dep_graph) {
// return
// }
// data.cnum_map.borrow_mut().push(cnum);
// });
}
// We're not actually going to inject an allocator, we're going to
// require that something in our crate graph is the default lib
// allocator. This is typically libstd, so this'll rarely be an
// error.
None => {
self.sess.allocator_kind.set(Some(AllocatorKind::DefaultLib));
let mut found_lib_allocator =
attr::contains_name(&krate.attrs, "default_lib_allocator");
self.cstore.iter_crate_data(|_, data| {
if !found_lib_allocator {
if data.has_default_lib_allocator(dep_graph) {
found_lib_allocator = true;
}
}
});
if found_lib_allocator {
return
}
self.sess.err("no #[default_lib_allocator] found but one is \
required; is libstd not linked?");
}
}
fn has_global_allocator(krate: &ast::Crate) -> bool {
struct Finder(bool);
let mut f = Finder(false);
visit::walk_crate(&mut f, krate);
return f.0;
impl<'ast> visit::Visitor<'ast> for Finder {
fn visit_item(&mut self, i: &'ast ast::Item) {
if attr::contains_name(&i.attrs, "global_allocator") {
self.0 = true;
}
visit::walk_item(self, i)
}
}
}
}
fn inject_dependency_if(&self,
krate: CrateNum,
what: &str,
@ -1123,7 +1207,7 @@ impl<'a> middle::cstore::CrateLoader for CrateLoader<'a> {
// sanitizers force the use of the `alloc_system` allocator
self.inject_sanitizer_runtime();
self.inject_profiler_runtime();
self.inject_allocator_crate();
self.inject_allocator_crate(krate);
self.inject_panic_runtime(krate);
if log_enabled!(log::LogLevel::Info) {

View File

@ -275,16 +275,27 @@ impl CrateMetadata {
self.root.disambiguator
}
pub fn is_allocator(&self, dep_graph: &DepGraph) -> bool {
let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph);
attr::contains_name(&attrs, "allocator")
}
pub fn needs_allocator(&self, dep_graph: &DepGraph) -> bool {
let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph);
attr::contains_name(&attrs, "needs_allocator")
}
pub fn has_global_allocator(&self, dep_graph: &DepGraph) -> bool {
let dep_node = self.metadata_dep_node(GlobalMetaDataKind::Krate);
self.root
.has_global_allocator
.get(dep_graph, dep_node)
.clone()
}
pub fn has_default_lib_allocator(&self, dep_graph: &DepGraph) -> bool {
let dep_node = self.metadata_dep_node(GlobalMetaDataKind::Krate);
self.root
.has_default_lib_allocator
.get(dep_graph, dep_node)
.clone()
}
pub fn is_panic_runtime(&self, dep_graph: &DepGraph) -> bool {
let attrs = self.get_item_attrs(CRATE_DEF_INDEX, dep_graph);
attr::contains_name(&attrs, "panic_runtime")

View File

@ -134,7 +134,6 @@ provide! { <'tcx> tcx, def_id, cdata,
is_mir_available => { cdata.is_item_mir_available(def_id.index) }
dylib_dependency_formats => { Rc::new(cdata.get_dylib_dependency_formats(&tcx.dep_graph)) }
is_allocator => { cdata.is_allocator(&tcx.dep_graph) }
is_panic_runtime => { cdata.is_panic_runtime(&tcx.dep_graph) }
extern_crate => { Rc::new(cdata.extern_crate.get()) }
}

View File

@ -400,12 +400,17 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
let tcx = self.tcx;
let link_meta = self.link_meta;
let is_proc_macro = tcx.sess.crate_types.borrow().contains(&CrateTypeProcMacro);
let has_default_lib_allocator =
attr::contains_name(tcx.hir.krate_attrs(), "default_lib_allocator");
let has_global_allocator = tcx.sess.has_global_allocator.get();
let root = self.lazy(&CrateRoot {
name: tcx.crate_name(LOCAL_CRATE),
triple: tcx.sess.opts.target_triple.clone(),
hash: link_meta.crate_hash,
disambiguator: tcx.sess.local_crate_disambiguator(),
panic_strategy: Tracked::new(tcx.sess.panic_strategy()),
has_global_allocator: Tracked::new(has_global_allocator),
has_default_lib_allocator: Tracked::new(has_default_lib_allocator),
plugin_registrar_fn: tcx.sess
.plugin_registrar_fn
.get()

View File

@ -243,6 +243,8 @@ pub struct CrateRoot {
pub hash: hir::svh::Svh,
pub disambiguator: Symbol,
pub panic_strategy: Tracked<PanicStrategy>,
pub has_global_allocator: Tracked<bool>,
pub has_default_lib_allocator: Tracked<bool>,
pub plugin_registrar_fn: Option<DefIndex>,
pub macro_derive_registrar: Option<DefIndex>,

View File

@ -14,5 +14,6 @@ build_helper = { path = "../build_helper" }
cmake = "0.1.18"
[dependencies]
alloc = { path = "../liballoc" }
alloc_system = { path = "../liballoc_system" }
core = { path = "../libcore" }

View File

@ -11,6 +11,8 @@
#![sanitizer_runtime]
#![feature(sanitizer_runtime)]
#![feature(alloc_system)]
#![cfg_attr(not(stage0), feature(allocator_api))]
#![cfg_attr(not(stage0), feature(global_allocator))]
#![feature(staged_api)]
#![no_std]
#![unstable(feature = "sanitizer_runtime_lib",
@ -18,3 +20,10 @@
issue = "0")]
extern crate alloc_system;
#[cfg(not(stage0))]
use alloc_system::System;
#[cfg(not(stage0))]
#[global_allocator]
static ALLOC: System = System;

View File

@ -17,6 +17,7 @@ log = "0.3"
owning_ref = "0.3.3"
rustc-demangle = "0.1.4"
rustc = { path = "../librustc" }
rustc_allocator = { path = "../librustc_allocator" }
rustc_back = { path = "../librustc_back" }
rustc_bitflags = { path = "../librustc_bitflags" }
rustc_const_math = { path = "../librustc_const_math" }

View File

@ -0,0 +1,117 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ffi::CString;
use std::ptr;
use libc::c_uint;
use rustc::middle::allocator::AllocatorKind;
use rustc::ty::TyCtxt;
use rustc_allocator::{ALLOCATOR_METHODS, AllocatorTy};
use ModuleLlvm;
use llvm::{self, False, True};
pub unsafe fn trans(tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) {
let llcx = mods.llcx;
let llmod = mods.llmod;
let usize = match &tcx.sess.target.target.target_pointer_width[..] {
"16" => llvm::LLVMInt16TypeInContext(llcx),
"32" => llvm::LLVMInt32TypeInContext(llcx),
"64" => llvm::LLVMInt64TypeInContext(llcx),
tws => bug!("Unsupported target word size for int: {}", tws),
};
let i8 = llvm::LLVMInt8TypeInContext(llcx);
let i8p = llvm::LLVMPointerType(i8, 0);
let usizep = llvm::LLVMPointerType(usize, 0);
let void = llvm::LLVMVoidTypeInContext(llcx);
for method in ALLOCATOR_METHODS {
let mut args = Vec::new();
for ty in method.inputs.iter() {
match *ty {
AllocatorTy::Layout => {
args.push(usize); // size
args.push(usize); // align
}
AllocatorTy::LayoutRef => args.push(i8p),
AllocatorTy::Ptr => args.push(i8p),
AllocatorTy::AllocErr => args.push(i8p),
AllocatorTy::Bang |
AllocatorTy::ResultExcess |
AllocatorTy::ResultPtr |
AllocatorTy::ResultUnit |
AllocatorTy::UsizePair |
AllocatorTy::Unit => panic!("invalid allocator arg"),
}
}
let output = match method.output {
AllocatorTy::UsizePair => {
args.push(usizep); // min
args.push(usizep); // max
None
}
AllocatorTy::Bang => None,
AllocatorTy::ResultExcess => {
args.push(i8p); // excess_ptr
args.push(i8p); // err_ptr
Some(i8p)
}
AllocatorTy::ResultPtr => {
args.push(i8p); // err_ptr
Some(i8p)
}
AllocatorTy::ResultUnit => Some(i8),
AllocatorTy::Unit => None,
AllocatorTy::AllocErr |
AllocatorTy::Layout |
AllocatorTy::LayoutRef |
AllocatorTy::Ptr => panic!("invalid allocator output"),
};
let ty = llvm::LLVMFunctionType(output.unwrap_or(void),
args.as_ptr(),
args.len() as c_uint,
False);
let name = CString::new(format!("__rust_{}", method.name)).unwrap();
let llfn = llvm::LLVMRustGetOrInsertFunction(llmod,
name.as_ptr(),
ty);
let callee = CString::new(kind.fn_name(method.name)).unwrap();
let callee = llvm::LLVMRustGetOrInsertFunction(llmod,
callee.as_ptr(),
ty);
let llbb = llvm::LLVMAppendBasicBlockInContext(llcx,
llfn,
"entry\0".as_ptr() as *const _);
let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
let args = args.iter().enumerate().map(|(i, _)| {
llvm::LLVMGetParam(llfn, i as c_uint)
}).collect::<Vec<_>>();
let ret = llvm::LLVMRustBuildCall(llbuilder,
callee,
args.as_ptr(),
args.len() as c_uint,
ptr::null_mut(),
"\0".as_ptr() as *const _);
llvm::LLVMSetTailCall(ret, True);
if output.is_some() {
llvm::LLVMBuildRet(llbuilder, ret);
} else {
llvm::LLVMBuildRetVoid(llbuilder);
}
llvm::LLVMDisposeBuilder(llbuilder);
}
}

View File

@ -55,6 +55,10 @@ pub const METADATA_MODULE_NAME: &'static str = "crate.metadata";
/// match up with `METADATA_MODULE_NAME`.
pub const METADATA_OBJ_NAME: &'static str = "crate.metadata.o";
// same as for metadata above, but for allocator shim
pub const ALLOCATOR_MODULE_NAME: &'static str = "crate.allocator";
pub const ALLOCATOR_OBJ_NAME: &'static str = "crate.allocator.o";
// RLIB LLVM-BYTECODE OBJECT LAYOUT
// Version 1
// Bytes Data
@ -240,6 +244,9 @@ pub fn link_binary(sess: &Session,
}
}
remove(sess, &outputs.with_extension(METADATA_OBJ_NAME));
if trans.allocator_module.is_some() {
remove(sess, &outputs.with_extension(ALLOCATOR_OBJ_NAME));
}
}
out_filenames
@ -417,11 +424,21 @@ fn link_binary_output(sess: &Session,
let out_filename = out_filename(sess, crate_type, outputs, crate_name);
match crate_type {
config::CrateTypeRlib => {
link_rlib(sess, Some(trans), &objects, &out_filename,
link_rlib(sess,
trans,
RlibFlavor::Normal,
&objects,
outputs,
&out_filename,
tmpdir.path()).build();
}
config::CrateTypeStaticlib => {
link_staticlib(sess, &objects, &out_filename, tmpdir.path());
link_staticlib(sess,
trans,
outputs,
&objects,
&out_filename,
tmpdir.path());
}
_ => {
link_natively(sess, crate_type, &objects, &out_filename, trans,
@ -477,6 +494,11 @@ fn emit_metadata<'a>(sess: &'a Session, trans: &CrateTranslation, out_filename:
}
}
enum RlibFlavor {
Normal,
StaticlibBase,
}
// Create an 'rlib'
//
// An rlib in its current incarnation is essentially a renamed .a file. The
@ -484,8 +506,10 @@ fn emit_metadata<'a>(sess: &'a Session, trans: &CrateTranslation, out_filename:
// all of the object files from native libraries. This is done by unzipping
// native libraries and inserting all of the contents into this archive.
fn link_rlib<'a>(sess: &'a Session,
trans: Option<&CrateTranslation>, // None == no metadata/bytecode
trans: &CrateTranslation,
flavor: RlibFlavor,
objects: &[PathBuf],
outputs: &OutputFilenames,
out_filename: &Path,
tmpdir: &Path) -> ArchiveBuilder<'a> {
info!("preparing rlib from {:?} to {:?}", objects, out_filename);
@ -546,8 +570,8 @@ fn link_rlib<'a>(sess: &'a Session,
//
// Basically, all this means is that this code should not move above the
// code above.
match trans {
Some(trans) => {
match flavor {
RlibFlavor::Normal => {
// Instead of putting the metadata in an object file section, rlibs
// contain the metadata in a separate file. We use a temp directory
// here so concurrent builds in the same directory don't try to use
@ -620,7 +644,11 @@ fn link_rlib<'a>(sess: &'a Session,
}
}
None => {}
RlibFlavor::StaticlibBase => {
if trans.allocator_module.is_some() {
ab.add_file(&outputs.with_extension(ALLOCATOR_OBJ_NAME));
}
}
}
ab
@ -672,9 +700,19 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write,
// There's no need to include metadata in a static archive, so ensure to not
// link in the metadata object file (and also don't prepare the archive with a
// metadata file).
fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path,
fn link_staticlib(sess: &Session,
trans: &CrateTranslation,
outputs: &OutputFilenames,
objects: &[PathBuf],
out_filename: &Path,
tempdir: &Path) {
let mut ab = link_rlib(sess, None, objects, out_filename, tempdir);
let mut ab = link_rlib(sess,
trans,
RlibFlavor::StaticlibBase,
objects,
outputs,
out_filename,
tempdir);
let mut all_native_libs = vec![];
let res = each_linked_rlib(sess, &mut |cnum, path| {
@ -944,6 +982,10 @@ fn link_args(cmd: &mut Linker,
cmd.add_object(&outputs.with_extension(METADATA_OBJ_NAME));
}
if trans.allocator_module.is_some() {
cmd.add_object(&outputs.with_extension(ALLOCATOR_OBJ_NAME));
}
// Try to strip as much out of the generated object by removing unused
// sections if possible. See more comments in linker.rs
if !sess.opts.cg.link_dead_code {

View File

@ -92,7 +92,6 @@ impl ExportedSymbols {
// Down below we'll hardwire all of the symbols to the `Rust` export
// level instead.
let special_runtime_crate =
scx.tcx().is_allocator(cnum.as_def_id()) ||
scx.tcx().is_panic_runtime(cnum.as_def_id()) ||
scx.sess().cstore.is_compiler_builtins(cnum);

View File

@ -644,6 +644,7 @@ pub fn run_passes(sess: &Session,
let mut modules_config = ModuleConfig::new(tm, sess.opts.cg.passes.clone());
let mut metadata_config = ModuleConfig::new(tm, vec![]);
let mut allocator_config = ModuleConfig::new(tm, vec![]);
if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer {
match *sanitizer {
@ -674,6 +675,7 @@ pub fn run_passes(sess: &Session,
modules_config.emit_bc = true;
modules_config.emit_lto_bc = true;
metadata_config.emit_bc = true;
allocator_config.emit_bc = true;
}
// Emit bitcode files for the crate if we're emitting an rlib.
@ -699,6 +701,7 @@ pub fn run_passes(sess: &Session,
// in this case we still want the metadata object file.
if !sess.opts.output_types.contains_key(&OutputType::Assembly) {
metadata_config.emit_obj = true;
allocator_config.emit_obj = true;
}
}
OutputType::Object => { modules_config.emit_obj = true; }
@ -706,6 +709,7 @@ pub fn run_passes(sess: &Session,
OutputType::Exe => {
modules_config.emit_obj = true;
metadata_config.emit_obj = true;
allocator_config.emit_obj = true;
},
OutputType::Mir => {}
OutputType::DepInfo => {}
@ -714,6 +718,7 @@ pub fn run_passes(sess: &Session,
modules_config.set_flags(sess, trans);
metadata_config.set_flags(sess, trans);
allocator_config.set_flags(sess, trans);
// Populate a buffer with a list of codegen threads. Items are processed in
@ -729,6 +734,14 @@ pub fn run_passes(sess: &Session,
work_items.push(work);
}
if let Some(allocator) = trans.allocator_module.clone() {
let work = build_work_item(sess,
allocator,
allocator_config.clone(),
crate_output.clone());
work_items.push(work);
}
for mtrans in trans.modules.iter() {
let work = build_work_item(sess,
mtrans.clone(),
@ -905,6 +918,13 @@ pub fn run_passes(sess: &Session,
Some(&trans.metadata_module.name));
remove(sess, &path);
}
if allocator_config.emit_bc && !user_wants_bitcode {
if let Some(ref module) = trans.allocator_module {
let path = crate_output.temp_path(OutputType::Bitcode,
Some(&module.name));
remove(sess, &path);
}
}
}
// We leave the following files around by default:

View File

@ -47,6 +47,7 @@ use rustc::session::config::{self, NoDebugInfo, OutputFilenames};
use rustc::session::Session;
use rustc_incremental::IncrementalHashesMap;
use abi;
use allocator;
use mir::lvalue::LvalueRef;
use attributes;
use builder::Builder;
@ -1086,8 +1087,10 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
llmod: metadata_llmod,
}),
};
let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
// Skip crate items and just output metadata in -Z no-trans mode.
if tcx.sess.opts.debugging_opts.no_trans ||
!tcx.sess.opts.output_types.should_trans() {
@ -1097,6 +1100,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
crate_name: tcx.crate_name(LOCAL_CRATE),
modules: vec![],
metadata_module: metadata_module,
allocator_module: None,
link: link_meta,
metadata: metadata,
exported_symbols: empty_exported_symbols,
@ -1296,6 +1300,41 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
create_imps(sess, &llvm_modules);
}
// Translate an allocator shim, if any
//
// If LTO is enabled and we've got some previous LLVM module we translated
// above, then we can just translate directly into that LLVM module. If not,
// however, we need to create a separate module and trans into that. Note
// that the separate translation is critical for the standard library where
// the rlib's object file doesn't have allocator functions but the dylib
// links in an object file that has allocator functions. When we're
// compiling a final LTO artifact, though, there's no need to worry about
// this as we're not working with this dual "rlib/dylib" functionality.
let allocator_module = tcx.sess.allocator_kind.get().and_then(|kind| unsafe {
if sess.lto() && llvm_modules.len() > 0 {
time(tcx.sess.time_passes(), "write allocator module", || {
allocator::trans(tcx, &llvm_modules[0], kind)
});
None
} else {
let (llcx, llmod) =
context::create_context_and_module(tcx.sess, "allocator");
let modules = ModuleLlvm {
llmod: llmod,
llcx: llcx,
};
time(tcx.sess.time_passes(), "write allocator module", || {
allocator::trans(tcx, &modules, kind)
});
Some(ModuleTranslation {
name: link::ALLOCATOR_MODULE_NAME.to_string(),
symbol_name_hash: 0, // we always rebuild allocator shims
source: ModuleSource::Translated(modules),
})
}
});
let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols);
let subsystem = attr::first_attr_value_str_by_name(&krate.attrs,
@ -1313,6 +1352,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
crate_name: tcx.crate_name(LOCAL_CRATE),
modules: modules,
metadata_module: metadata_module,
allocator_module: allocator_module,
link: link_meta,
metadata: metadata,
exported_symbols: exported_symbols,

View File

@ -43,6 +43,7 @@ extern crate crossbeam;
extern crate libc;
extern crate owning_ref;
#[macro_use] extern crate rustc;
extern crate rustc_allocator;
extern crate rustc_back;
extern crate rustc_data_structures;
extern crate rustc_incremental;
@ -84,6 +85,7 @@ mod diagnostics;
mod abi;
mod adt;
mod allocator;
mod asm;
mod assert_module_sources;
mod attributes;
@ -163,6 +165,7 @@ pub struct CrateTranslation {
pub crate_name: Symbol,
pub modules: Vec<ModuleTranslation>,
pub metadata_module: ModuleTranslation,
pub allocator_module: Option<ModuleTranslation>,
pub link: rustc::middle::cstore::LinkMeta,
pub metadata: rustc::middle::cstore::EncodedMetadata,
pub exported_symbols: back::symbol_export::ExportedSymbols,

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use alloc::heap::{allocate, deallocate};
use alloc::heap::{Heap, Alloc, Layout};
use cmp;
use hash::{BuildHasher, Hash, Hasher};
@ -781,10 +781,8 @@ impl<K, V> RawTable<K, V> {
.expect("capacity overflow"),
"capacity overflow");
let buffer = allocate(size, alignment);
if buffer.is_null() {
::alloc::oom()
}
let buffer = Heap.alloc(Layout::from_size_align(size, alignment).unwrap())
.unwrap_or_else(|e| Heap.oom(e));
let hashes = buffer.offset(hash_offset as isize) as *mut HashUint;
@ -1193,7 +1191,8 @@ unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for RawTable<K, V> {
debug_assert!(!oflo, "should be impossible");
unsafe {
deallocate(self.hashes.ptr() as *mut u8, size, align);
Heap.dealloc(self.hashes.ptr() as *mut u8,
Layout::from_size_align(size, align).unwrap());
// Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here.
}

View File

@ -224,7 +224,7 @@ impl Error for ! {
#[unstable(feature = "allocator_api",
reason = "the precise API and guarantees it provides may be tweaked.",
issue = "27700")]
issue = "32838")]
impl Error for allocator::AllocErr {
fn description(&self) -> &str {
allocator::AllocErr::description(self)
@ -233,7 +233,7 @@ impl Error for allocator::AllocErr {
#[unstable(feature = "allocator_api",
reason = "the precise API and guarantees it provides may be tweaked.",
issue = "27700")]
issue = "32838")]
impl Error for allocator::CannotReallocInPlace {
fn description(&self) -> &str {
allocator::CannotReallocInPlace::description(self)

165
src/libstd/heap.rs Normal file
View File

@ -0,0 +1,165 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! dox
#![unstable(issue = "32838", feature = "allocator_api")]
pub use alloc::heap::{Heap, Alloc, Layout, Excess, CannotReallocInPlace, AllocErr};
#[cfg(not(stage0))]
pub use alloc_system::System;
#[cfg(all(not(stage0), not(test)))]
#[doc(hidden)]
pub mod __default_lib_allocator {
use super::{System, Layout, Alloc, AllocErr};
use ptr;
// for symbol names src/librustc/middle/allocator.rs
// for signatures src/librustc_allocator/lib.rs
// linkage directives are provided as part of the current compiler allocator
// ABI
#[no_mangle]
pub unsafe extern fn __rdl_alloc(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let layout = Layout::from_size_align_unchecked(size, align);
match System.alloc(layout) {
Ok(p) => p,
Err(e) => {
ptr::write(err as *mut AllocErr, e);
0 as *mut u8
}
}
}
#[no_mangle]
pub unsafe extern fn __rdl_oom(err: *const u8) -> ! {
System.oom((*(err as *const AllocErr)).clone())
}
#[no_mangle]
pub unsafe extern fn __rdl_dealloc(ptr: *mut u8,
size: usize,
align: usize) {
System.dealloc(ptr, Layout::from_size_align_unchecked(size, align))
}
#[no_mangle]
pub unsafe extern fn __rdl_usable_size(layout: *const u8,
min: *mut usize,
max: *mut usize) {
let pair = System.usable_size(&*(layout as *const Layout));
*min = pair.0;
*max = pair.1;
}
#[no_mangle]
pub unsafe extern fn __rdl_realloc(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize,
err: *mut u8) -> *mut u8 {
let old_layout = Layout::from_size_align_unchecked(old_size, old_align);
let new_layout = Layout::from_size_align_unchecked(new_size, new_align);
match System.realloc(ptr, old_layout, new_layout) {
Ok(p) => p,
Err(e) => {
ptr::write(err as *mut AllocErr, e);
0 as *mut u8
}
}
}
#[no_mangle]
pub unsafe extern fn __rdl_alloc_zeroed(size: usize,
align: usize,
err: *mut u8) -> *mut u8 {
let layout = Layout::from_size_align_unchecked(size, align);
match System.alloc_zeroed(layout) {
Ok(p) => p,
Err(e) => {
ptr::write(err as *mut AllocErr, e);
0 as *mut u8
}
}
}
#[no_mangle]
pub unsafe extern fn __rdl_alloc_excess(size: usize,
align: usize,
excess: *mut usize,
err: *mut u8) -> *mut u8 {
let layout = Layout::from_size_align_unchecked(size, align);
match System.alloc_excess(layout) {
Ok(p) => {
*excess = p.1;
p.0
}
Err(e) => {
ptr::write(err as *mut AllocErr, e);
0 as *mut u8
}
}
}
#[no_mangle]
pub unsafe extern fn __rdl_realloc_excess(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize,
excess: *mut usize,
err: *mut u8) -> *mut u8 {
let old_layout = Layout::from_size_align_unchecked(old_size, old_align);
let new_layout = Layout::from_size_align_unchecked(new_size, new_align);
match System.realloc_excess(ptr, old_layout, new_layout) {
Ok(p) => {
*excess = p.1;
p.0
}
Err(e) => {
ptr::write(err as *mut AllocErr, e);
0 as *mut u8
}
}
}
#[no_mangle]
pub unsafe extern fn __rdl_grow_in_place(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize) -> u8 {
let old_layout = Layout::from_size_align_unchecked(old_size, old_align);
let new_layout = Layout::from_size_align_unchecked(new_size, new_align);
match System.grow_in_place(ptr, old_layout, new_layout) {
Ok(()) => 1,
Err(_) => 0,
}
}
#[no_mangle]
pub unsafe extern fn __rdl_shrink_in_place(ptr: *mut u8,
old_size: usize,
old_align: usize,
new_size: usize,
new_align: usize) -> u8 {
let old_layout = Layout::from_size_align_unchecked(old_size, old_align);
let new_layout = Layout::from_size_align_unchecked(new_size, new_align);
match System.shrink_in_place(ptr, old_layout, new_layout) {
Ok(()) => 1,
Err(_) => 0,
}
}
}

View File

@ -230,11 +230,6 @@
// Tell the compiler to link to either panic_abort or panic_unwind
#![needs_panic_runtime]
// Always use alloc_system during stage0 since we don't know if the alloc_*
// crate the stage0 compiler will pick by default is available (most
// obviously, if the user has disabled jemalloc in `./configure`).
#![cfg_attr(any(stage0, feature = "force_alloc_system"), feature(alloc_system))]
// Turn warnings into errors, but only after stage0, where it can be useful for
// code to emit warnings during language transitions
#![deny(warnings)]
@ -246,6 +241,8 @@
// compiler details that will never be stable
#![feature(alloc)]
#![feature(allocator_api)]
#![feature(alloc_system)]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(asm)]
#![feature(associated_consts)]
@ -322,6 +319,8 @@
#![cfg_attr(test, feature(update_panic_count))]
#![cfg_attr(test, feature(float_bits_conv))]
#![cfg_attr(not(stage0), default_lib_allocator)]
// Explicitly import the prelude. The compiler uses this same unstable attribute
// to import the prelude implicitly when building crates that depend on std.
#[prelude_import]
@ -342,15 +341,13 @@ extern crate core as __core;
#[macro_use]
#[macro_reexport(vec, format)]
extern crate alloc;
extern crate alloc_system;
extern crate std_unicode;
extern crate libc;
// We always need an unwinder currently for backtraces
extern crate unwind;
#[cfg(any(stage0, feature = "force_alloc_system"))]
extern crate alloc_system;
// compiler-rt intrinsics
extern crate compiler_builtins;
@ -465,6 +462,7 @@ pub mod path;
pub mod process;
pub mod sync;
pub mod time;
pub mod heap;
// Platform-abstraction modules
#[macro_use]

View File

@ -59,8 +59,6 @@ pub mod stdio;
#[cfg(not(test))]
pub fn init() {
use alloc::oom;
// By default, some platforms will send a *signal* when an EPIPE error
// would otherwise be delivered. This runtime doesn't install a SIGPIPE
// handler, causing it to kill the program, which isn't exactly what we
@ -72,24 +70,6 @@ pub fn init() {
reset_sigpipe();
}
oom::set_oom_handler(oom_handler);
// A nicer handler for out-of-memory situations than the default one. This
// one prints a message to stderr before aborting. It is critical that this
// code does not allocate any memory since we are in an OOM situation. Any
// errors are ignored while printing since there's nothing we can do about
// them and we are about to exit anyways.
fn oom_handler() -> ! {
use intrinsics;
let msg = "fatal runtime error: out of memory\n";
unsafe {
libc::write(libc::STDERR_FILENO,
msg.as_ptr() as *const libc::c_void,
msg.len());
intrinsics::abort();
}
}
#[cfg(not(any(target_os = "nacl", target_os = "emscripten", target_os="fuchsia")))]
unsafe fn reset_sigpipe() {
assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != libc::SIG_ERR);

View File

@ -47,24 +47,6 @@ pub mod stdio;
#[cfg(not(test))]
pub fn init() {
::alloc::oom::set_oom_handler(oom_handler);
// See comment in sys/unix/mod.rs
fn oom_handler() -> ! {
use intrinsics;
use ptr;
let msg = "fatal runtime error: out of memory\n";
unsafe {
// WriteFile silently fails if it is passed an invalid handle, so
// there is no need to check the result of GetStdHandle.
c::WriteFile(c::GetStdHandle(c::STD_ERROR_HANDLE),
msg.as_ptr() as c::LPVOID,
msg.len() as c::DWORD,
ptr::null_mut(),
ptr::null_mut());
intrinsics::abort();
}
}
}
pub fn decode_error_kind(errno: i32) -> ErrorKind {

View File

@ -249,6 +249,8 @@ pub trait AstBuilder {
name: Ident, attrs: Vec<ast::Attribute>,
items: Vec<P<ast::Item>>) -> P<ast::Item>;
fn item_extern_crate(&self, span: Span, name: Ident) -> P<ast::Item>;
fn item_static(&self,
span: Span,
name: Ident,
@ -1095,6 +1097,10 @@ impl<'a> AstBuilder for ExtCtxt<'a> {
)
}
fn item_extern_crate(&self, span: Span, name: Ident) -> P<ast::Item> {
self.item(span, name, Vec::new(), ast::ItemKind::ExternCrate(None))
}
fn item_static(&self,
span: Span,
name: Ident,

View File

@ -137,7 +137,6 @@ declare_features! (
(active, placement_in_syntax, "1.0.0", Some(27779)),
(active, unboxed_closures, "1.0.0", Some(29625)),
(active, allocator, "1.0.0", Some(27389)),
(active, fundamental, "1.0.0", Some(29635)),
(active, main, "1.0.0", Some(29634)),
(active, needs_allocator, "1.4.0", Some(27389)),
@ -360,6 +359,10 @@ declare_features! (
// Allows unsized tuple coercion.
(active, unsized_tuple_coercion, "1.20.0", Some(42877)),
// global allocators and their internals
(active, global_allocator, "1.20.0", None),
(active, allocator_internals, "1.20.0", None),
);
declare_features! (
@ -379,6 +382,7 @@ declare_features! (
// rustc internal
(removed, unmarked_api, "1.0.0", None),
(removed, pushpop_unsafe, "1.2.0", None),
(removed, allocator, "1.0.0", None),
);
declare_features! (
@ -585,16 +589,22 @@ pub const BUILTIN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeG
"the `#[rustc_on_unimplemented]` attribute \
is an experimental feature",
cfg_fn!(on_unimplemented))),
("allocator", Whitelisted, Gated(Stability::Unstable,
"allocator",
"the `#[allocator]` attribute is an experimental feature",
cfg_fn!(allocator))),
("global_allocator", Normal, Gated(Stability::Unstable,
"global_allocator",
"the `#[global_allocator]` attribute is \
an experimental feature",
cfg_fn!(global_allocator))),
("default_lib_allocator", Whitelisted, Gated(Stability::Unstable,
"allocator_internals",
"the `#[default_lib_allocator]` \
attribute is an experimental feature",
cfg_fn!(allocator_internals))),
("needs_allocator", Normal, Gated(Stability::Unstable,
"needs_allocator",
"allocator_internals",
"the `#[needs_allocator]` \
attribute is an experimental \
feature",
cfg_fn!(needs_allocator))),
cfg_fn!(allocator_internals))),
("panic_runtime", Whitelisted, Gated(Stability::Unstable,
"panic_runtime",
"the `#[panic_runtime]` attribute is \

View File

@ -1,4 +1,8 @@
# If this file is modified, then llvm will be (optionally) cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
<<<<<<< 37849a002ed91ac2b80aeb2172364b4e19250e05
2017-06-27
=======
2017-06-26
>>>>>>> rustc: Implement the #[global_allocator] attribute

View File

@ -11,7 +11,7 @@
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
#![feature(allocator)]
#![feature(custom_attribute)]
pub struct S {
_field: [i64; 4],

View File

@ -1,21 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: `allocator3` cannot depend on a crate that needs an allocator
// aux-build:needs_allocator.rs
// aux-build:allocator3.rs
// The needs_allocator crate is a dependency of the allocator crate allocator3,
// which is not allowed
extern crate allocator3;
fn main() {
}

View File

@ -1,41 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-musl no dylibs
// aux-build:allocator-dylib.rs
// aux-build:allocator1.rs
// no-prefer-dynamic
// error-pattern: cannot link together two allocators
// Verify that the allocator for statically linked dynamic libraries is the
// system allocator. Do this by linking in jemalloc and making sure that we get
// an error.
// ignore-emscripten FIXME: What "other allocator" should we use for emcc?
#![feature(alloc_jemalloc)]
extern crate allocator_dylib;
// The main purpose of this test is to ensure that `alloc_jemalloc` **fails**
// here (specifically the jemalloc allocator), but currently jemalloc is
// disabled on quite a few platforms (bsds, emscripten, msvc, etc). To ensure
// that this just passes on those platforms we link in some other allocator to
// ensure we get the same error.
//
// So long as we CI linux/macOS we should be good.
#[cfg(any(target_os = "linux", target_os = "macos"))]
extern crate alloc_jemalloc;
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
extern crate allocator1;
fn main() {
allocator_dylib::foo();
}

View File

@ -1,41 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-musl no dylibs
// aux-build:allocator-dylib2.rs
// aux-build:allocator1.rs
// error-pattern: cannot link together two allocators
// Ensure that rust dynamic libraries use jemalloc as their allocator, verifying
// by linking in the system allocator here and ensuring that we get a complaint.
// ignore-emscripten FIXME: What "other allocator" is correct for emscripten?
#![feature(alloc_system)]
extern crate allocator_dylib2;
// The main purpose of this test is to ensure that `alloc_system` **fails**
// here (specifically the system allocator), but currently system is
// disabled on quite a few platforms (bsds, emscripten, msvc, etc). To ensure
// that this just passes on those platforms we link in some other allocator to
// ensure we get the same error.
//
// So long as we CI linux/macOS we should be good.
#[cfg(any(all(target_os = "linux", any(target_arch = "x86", target_arch = "x86_64")),
target_os = "macos"))]
extern crate alloc_system;
#[cfg(not(any(all(target_os = "linux", any(target_arch = "x86", target_arch = "x86_64")),
target_os = "macos")))]
extern crate allocator1;
fn main() {
allocator_dylib2::foo();
}

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -10,10 +10,10 @@
// no-prefer-dynamic
#![feature(allocator)]
#![no_std]
#![allocator]
#![feature(global_allocator, allocator_api)]
#![crate_type = "rlib"]
extern crate needs_allocator;
use std::heap::System;
#[global_allocator]
static A: System = System;

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -10,7 +10,10 @@
// no-prefer-dynamic
#![feature(allocator)]
#![allocator]
#![feature(global_allocator, allocator_api)]
#![crate_type = "rlib"]
#![no_std]
use std::heap::System;
#[global_allocator]
static A: System = System;

View File

@ -0,0 +1,16 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(global_allocator)]
#[global_allocator]
fn foo() {} //~ ERROR: allocators must be statics
fn main() {}

View File

@ -0,0 +1,26 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(global_allocator, heap_api)]
#[global_allocator]
static A: usize = 0;
//~^ the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
//~| the trait bound `&usize:
fn main() {}

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -8,12 +8,15 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: cannot link together two allocators
#![feature(global_allocator, allocator_api)]
// aux-build:allocator1.rs
// aux-build:allocator2.rs
use std::heap::System;
extern crate allocator1;
extern crate allocator2;
#[global_allocator]
static A: System = System;
#[global_allocator]
static B: System = System;
//~^ ERROR: cannot define more than one #[global_allocator]
fn main() {}

View File

@ -0,0 +1,25 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:system-allocator.rs
// no-prefer-dynamic
// error-pattern: the #[global_allocator] in
#![feature(global_allocator, allocator_api)]
extern crate system_allocator;
use std::heap::System;
#[global_allocator]
static A: System = System;
fn main() {}

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -8,12 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:system-allocator.rs
// aux-build:system-allocator2.rs
// no-prefer-dynamic
// error-pattern: the #[global_allocator] in
#![feature(alloc_system)]
#![feature(global_allocator)]
extern crate alloc_system;
extern crate system_allocator;
extern crate system_allocator2;
fn main() {
println!("{:?}", Box::new(3));
}
fn main() {}

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -8,8 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
#![default_lib_allocator] //~ ERROR: attribute is an experimental feature
#![crate_type = "dylib"]
fn main() {}
pub fn foo() {}

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -8,9 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
#[global_allocator] //~ ERROR: attribute is an experimental feature
static A: usize = 0;
#![feature(allocator)]
#![allocator]
#![crate_type = "rlib"]
#![no_std]
fn main() {}

View File

@ -1,21 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: cannot link together two allocators: allocator1 and allocator2
// aux-build:allocator1.rs
// aux-build:allocator2.rs
// Make sure we can't link together two explicit allocators.
extern crate allocator1;
extern crate allocator2;
fn main() {}

View File

@ -1,23 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:allocator1.rs
// error-pattern: cannot link together two allocators
// ignore-musl no dylibs on musl yet
// ignore-emscripten
// We're linking std dynamically (via -C prefer-dynamic for this test) which
// has an allocator and then we're also linking in a new allocator (allocator1)
// and this should be an error
extern crate allocator1;
fn main() {
}

View File

@ -4,9 +4,7 @@ ifdef IS_MSVC
# FIXME(#27979)
all:
else
all:
$(RUSTC) foo.rs
$(RUSTC) bar.rs
all: $(call STATICLIB,foo) $(call STATICLIB,bar)
$(RUSTC) main.rs
$(call RUN,main)
endif

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -8,6 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allocator] //~ ERROR: experimental feature
extern void foo();
fn main() {}
void bar() {
foo();
}

View File

@ -1,25 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(lang_items, alloc_system, compiler_builtins_lib)]
#![crate_type = "dylib"]
#![no_std]
extern crate alloc_system;
extern crate compiler_builtins;
#[no_mangle]
pub extern fn bar() {}
#[lang = "eh_personality"] fn eh_personality() {}
#[lang = "eh_unwind_resume"] fn eh_unwind_resume() {}
#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} }
#[no_mangle] pub extern fn rust_eh_register_frames () {}
#[no_mangle] pub extern fn rust_eh_unregister_frames () {}

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@ -8,5 +8,4 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn foo() {}
void foo() {}

Some files were not shown because too many files have changed in this diff Show More