auto merge of #14108 : thestinger/rust/jemalloc, r=huonw

This commit is contained in:
bors 2014-05-11 11:21:44 -07:00
commit 20356e4cc3
2 changed files with 32 additions and 32 deletions

View File

@ -30,8 +30,6 @@ use std::result::{Ok, Err};
use std::slice::ImmutableVector;
mod table {
extern crate libc;
use std::clone::Clone;
use std::cmp;
use std::cmp::Eq;
@ -42,10 +40,10 @@ mod table {
use std::prelude::Drop;
use std::ptr;
use std::ptr::RawPtr;
use std::rt::libc_heap;
use std::intrinsics::{size_of, min_align_of, transmute};
use std::intrinsics::{move_val_init, set_memory};
use std::mem::{min_align_of, size_of};
use std::intrinsics::{move_val_init, set_memory, transmute};
use std::iter::{Iterator, range_step_inclusive};
use std::rt::heap::{allocate, deallocate};
static EMPTY_BUCKET: u64 = 0u64;
@ -185,10 +183,6 @@ mod table {
assert_eq!(round_up_to_next(5, 4), 8);
}
fn has_alignment(n: uint, alignment: uint) -> bool {
round_up_to_next(n, alignment) == n
}
// Returns a tuple of (minimum required malloc alignment, hash_offset,
// key_offset, val_offset, array_size), from the start of a mallocated array.
fn calculate_offsets(
@ -243,12 +237,7 @@ mod table {
keys_size, min_align_of::< K >(),
vals_size, min_align_of::< V >());
let buffer = libc_heap::malloc_raw(size) as *mut u8;
// FIXME #13094: If malloc was not at as aligned as we expected,
// our offset calculations are just plain wrong. We could support
// any alignment if we switched from `malloc` to `posix_memalign`.
assert!(has_alignment(buffer as uint, malloc_alignment));
let buffer = allocate(size, malloc_alignment);
let hashes = buffer.offset(hash_offset as int) as *mut u64;
let keys = buffer.offset(keys_offset as int) as *mut K;
@ -418,7 +407,7 @@ mod table {
// modified to no longer assume this.
#[test]
fn can_alias_safehash_as_u64() {
unsafe { assert_eq!(size_of::<SafeHash>(), size_of::<u64>()) };
assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
}
pub struct Entries<'a, K, V> {
@ -560,8 +549,15 @@ mod table {
assert_eq!(self.size, 0);
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let vals_size = self.capacity * size_of::<V>();
let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::<K>(),
vals_size, min_align_of::<V>());
unsafe {
libc::free(self.hashes as *mut libc::c_void);
deallocate(self.hashes as *mut u8, size, align);
// Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here.
}

View File

@ -51,8 +51,7 @@
use clone::Clone;
use iter::{range, Iterator};
use kinds::Send;
use libc;
use mem;
use mem::{forget, min_align_of, size_of, transmute};
use ops::Drop;
use option::{Option, Some, None};
use owned::Box;
@ -62,6 +61,7 @@ use slice::ImmutableVector;
use sync::arc::UnsafeArc;
use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::Exclusive;
use rt::heap::{allocate, deallocate};
use vec::Vec;
// Once the queue is less than 1/K full, then it will be downsized. Note that
@ -229,7 +229,7 @@ impl<T: Send> Deque<T> {
Deque {
bottom: AtomicInt::new(0),
top: AtomicInt::new(0),
array: AtomicPtr::new(unsafe { mem::transmute(buf) }),
array: AtomicPtr::new(unsafe { transmute(buf) }),
pool: pool,
}
}
@ -271,7 +271,7 @@ impl<T: Send> Deque<T> {
return Some(data);
} else {
self.bottom.store(t + 1, SeqCst);
mem::forget(data); // someone else stole this value
forget(data); // someone else stole this value
return None;
}
}
@ -293,7 +293,7 @@ impl<T: Send> Deque<T> {
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
Data(data)
} else {
mem::forget(data); // someone else stole this value
forget(data); // someone else stole this value
Abort
}
}
@ -314,7 +314,7 @@ impl<T: Send> Deque<T> {
// continue to be read after we flag this buffer for reclamation.
unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = mem::transmute(box buf);
let newbuf: *mut Buffer<T> = transmute(box buf);
self.array.store(newbuf, SeqCst);
let ss = (*newbuf).size();
self.bottom.store(b + ss, SeqCst);
@ -322,7 +322,7 @@ impl<T: Send> Deque<T> {
if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
self.bottom.store(b, SeqCst);
}
self.pool.free(mem::transmute(old));
self.pool.free(transmute(old));
return newbuf;
}
}
@ -339,15 +339,19 @@ impl<T: Send> Drop for Deque<T> {
for i in range(t, b) {
let _: T = unsafe { (*a).get(i) };
}
self.pool.free(unsafe { mem::transmute(a) });
self.pool.free(unsafe { transmute(a) });
}
}
#[inline]
fn buffer_alloc_size<T>(log_size: int) -> uint {
(1 << log_size) * size_of::<T>()
}
impl<T: Send> Buffer<T> {
unsafe fn new(log_size: int) -> Buffer<T> {
let size = (1 << log_size) * mem::size_of::<T>();
let buffer = libc::malloc(size as libc::size_t);
assert!(!buffer.is_null());
let size = buffer_alloc_size::<T>(log_size);
let buffer = allocate(size, min_align_of::<T>());
Buffer {
storage: buffer as *T,
log_size: log_size,
@ -372,7 +376,7 @@ impl<T: Send> Buffer<T> {
unsafe fn put(&mut self, i: int, t: T) {
let ptr = self.storage.offset(i & self.mask());
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
mem::forget(t);
forget(t);
}
// Again, unsafe because this has incredibly dubious ownership violations.
@ -390,7 +394,8 @@ impl<T: Send> Buffer<T> {
impl<T: Send> Drop for Buffer<T> {
fn drop(&mut self) {
// It is assumed that all buffers are empty on drop.
unsafe { libc::free(self.storage as *mut libc::c_void) }
let size = buffer_alloc_size::<T>(self.log_size);
unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
}
}
@ -606,8 +611,7 @@ mod tests {
let s = s.clone();
let unique_box = box AtomicUint::new(0);
let thread_box = unsafe {
*mem::transmute::<&Box<AtomicUint>,
**mut AtomicUint>(&unique_box)
*mem::transmute::<&Box<AtomicUint>, **mut AtomicUint>(&unique_box)
};
(Thread::start(proc() {
unsafe {