auto merge of #17095 : thestinger/rust/alloc, r=alexcrichton

Previously, some parts of this optimization were impossible because the
alignment passed to the free function was not correct. That was fully
fixed by #17012.

Closes #17092
This commit is contained in:
bors 2014-09-10 09:20:39 +00:00
commit 8eee1b44b0
2 changed files with 76 additions and 35 deletions

@ -1 +1 @@
Subproject commit 024c67ad651e1a3ca228936c4cfb13a37329baf2
Subproject commit aae04170ccbfeea620502106b581c3c216cd132a

View File

@ -149,12 +149,24 @@ unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
alloc as *mut u8
}
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
#[cfg(target_arch = "arm")]
#[cfg(target_arch = "mips")]
#[cfg(target_arch = "mipsel")]
static MIN_ALIGN: uint = 8;
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
static MIN_ALIGN: uint = 16;
#[cfg(jemalloc)]
mod imp {
use core::option::{None, Option};
use core::ptr::{RawPtr, mut_null, null};
use core::num::Int;
use libc::{c_char, c_int, c_void, size_t};
use super::MIN_ALIGN;
#[link(name = "jemalloc", kind = "static")]
#[cfg(not(test))]
@ -166,7 +178,10 @@ mod imp {
flags: c_int) -> *mut c_void;
fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t,
flags: c_int) -> size_t;
#[cfg(stage0)]
fn je_dallocx(ptr: *mut c_void, flags: c_int);
#[cfg(not(stage0))]
fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
fn je_nallocx(size: size_t, flags: c_int) -> size_t;
fn je_malloc_stats_print(write_cb: Option<extern "C" fn(cbopaque: *mut c_void,
*const c_char)>,
@ -183,9 +198,15 @@ mod imp {
#[inline(always)]
fn mallocx_align(a: uint) -> c_int { a.trailing_zeros() as c_int }
#[inline(always)]
fn align_to_flags(align: uint) -> c_int {
if align <= MIN_ALIGN { 0 } else { mallocx_align(align) }
}
#[inline]
pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 {
let ptr = je_mallocx(size as size_t, mallocx_align(align)) as *mut u8;
let flags = align_to_flags(align);
let ptr = je_mallocx(size as size_t, flags) as *mut u8;
if ptr.is_null() {
::oom()
}
@ -195,8 +216,8 @@ mod imp {
#[inline]
pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint,
_old_size: uint) -> *mut u8 {
let ptr = je_rallocx(ptr as *mut c_void, size as size_t,
mallocx_align(align)) as *mut u8;
let flags = align_to_flags(align);
let ptr = je_rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8;
if ptr.is_null() {
::oom()
}
@ -206,18 +227,28 @@ mod imp {
#[inline]
pub unsafe fn reallocate_inplace(ptr: *mut u8, size: uint, align: uint,
_old_size: uint) -> bool {
je_xallocx(ptr as *mut c_void, size as size_t, 0,
mallocx_align(align)) == size as size_t
let flags = align_to_flags(align);
je_xallocx(ptr as *mut c_void, size as size_t, 0, flags) == size as size_t
}
#[inline]
#[cfg(stage0)]
pub unsafe fn deallocate(ptr: *mut u8, _size: uint, align: uint) {
je_dallocx(ptr as *mut c_void, mallocx_align(align))
let flags = align_to_flags(align);
je_dallocx(ptr as *mut c_void, flags)
}
#[inline]
#[cfg(not(stage0))]
pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) {
let flags = align_to_flags(align);
je_sdallocx(ptr as *mut c_void, size as size_t, flags)
}
#[inline]
pub fn usable_size(size: uint, align: uint) -> uint {
unsafe { je_nallocx(size as size_t, mallocx_align(align)) as uint }
let flags = align_to_flags(align);
unsafe { je_nallocx(size as size_t, flags) as uint }
}
pub fn stats_print() {
@ -234,6 +265,7 @@ mod imp {
use core::ptr;
use libc;
use libc_heap;
use super::MIN_ALIGN;
extern {
fn posix_memalign(memptr: *mut *mut libc::c_void,
@ -243,16 +275,7 @@ mod imp {
#[inline]
pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 {
// The posix_memalign manpage states
//
// alignment [...] must be a power of and a multiple of
// sizeof(void *)
//
// The `align` parameter to this function is the *minimum* alignment for
// a block of memory, so we special case everything under `*uint` to
// just pass it to malloc, which is guaranteed to align to at least the
// size of `*uint`.
if align < mem::size_of::<uint>() {
if align <= MIN_ALIGN {
libc_heap::malloc_raw(size)
} else {
let mut out = 0 as *mut libc::c_void;
@ -269,10 +292,14 @@ mod imp {
#[inline]
pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint,
old_size: uint) -> *mut u8 {
if align <= MIN_ALIGN {
libc_heap::realloc_raw(ptr, size)
} else {
let new_ptr = allocate(size, align);
ptr::copy_memory(new_ptr, ptr as *const u8, cmp::min(size, old_size));
deallocate(ptr, old_size, align);
return new_ptr;
new_ptr
}
}
#[inline]
@ -291,14 +318,16 @@ mod imp {
size
}
pub fn stats_print() {
}
pub fn stats_print() {}
}
#[cfg(not(jemalloc), windows)]
mod imp {
use libc::{c_void, size_t};
use libc;
use libc_heap;
use core::ptr::RawPtr;
use super::MIN_ALIGN;
extern {
fn _aligned_malloc(size: size_t, align: size_t) -> *mut c_void;
@ -309,16 +338,23 @@ mod imp {
#[inline]
pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 {
if align <= MIN_ALIGN {
libc_heap::malloc_raw(size)
} else {
let ptr = _aligned_malloc(size as size_t, align as size_t);
if ptr.is_null() {
::oom();
}
ptr as *mut u8
}
}
#[inline]
pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint,
_old_size: uint) -> *mut u8 {
if align <= MIN_ALIGN {
libc_heap::realloc_raw(ptr, size)
} else {
let ptr = _aligned_realloc(ptr as *mut c_void, size as size_t,
align as size_t);
if ptr.is_null() {
@ -326,6 +362,7 @@ mod imp {
}
ptr as *mut u8
}
}
#[inline]
pub unsafe fn reallocate_inplace(_ptr: *mut u8, _size: uint, _align: uint,
@ -334,9 +371,13 @@ mod imp {
}
#[inline]
pub unsafe fn deallocate(ptr: *mut u8, _size: uint, _align: uint) {
pub unsafe fn deallocate(ptr: *mut u8, _size: uint, align: uint) {
if align <= MIN_ALIGN {
libc::free(ptr as *mut libc::c_void)
} else {
_aligned_free(ptr as *mut c_void)
}
}
#[inline]
pub fn usable_size(size: uint, _align: uint) -> uint {