register snapshots

This commit is contained in:
Daniel Micay 2014-05-12 02:51:00 -04:00
parent 72fc4a5eb7
commit 8b912bc56b
5 changed files with 10 additions and 122 deletions

View File

@ -341,29 +341,7 @@ struct TypedArenaChunk<T> {
}
impl<T> TypedArenaChunk<T> {
#[cfg(stage0)]
#[inline]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = exchange_malloc(size);
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
mem::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
#[inline]
#[cfg(not(stage0))]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();

View File

@ -28,26 +28,10 @@ use str::StrSlice;
#[allow(ctypes)]
extern {
#[cfg(stage0)]
fn rust_malloc(size: uint) -> *u8;
#[cfg(not(stage0))]
fn rust_malloc(size: uint, align: uint) -> *u8;
fn rust_free(ptr: *u8, size: uint, align: uint);
}
#[cfg(stage0)]
unsafe fn alloc(cap: uint) -> *mut Vec<()> {
let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
let ret = rust_malloc(cap) as *mut Vec<()>;
if ret.is_null() {
intrinsics::abort();
}
(*ret).fill = 0;
(*ret).alloc = cap;
ret
}
#[cfg(not(stage0))]
unsafe fn alloc(cap: uint) -> *mut Vec<()> {
let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
// this should use the real alignment, but the new representation will take care of that

View File

@ -114,15 +114,7 @@ pub fn stats_print() {
}
/// The allocator for unique pointers.
#[cfg(stage0)]
#[lang="exchange_malloc"]
#[inline(always)]
pub unsafe fn exchange_malloc_(size: uint) -> *mut u8 {
exchange_malloc(size)
}
/// The allocator for unique pointers.
#[cfg(not(test), not(stage0))]
#[cfg(not(test))]
#[lang="exchange_malloc"]
#[inline(always)]
pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
@ -130,23 +122,6 @@ pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
}
/// The allocator for unique pointers.
#[cfg(stage0)]
#[inline]
pub unsafe fn exchange_malloc(size: uint) -> *mut u8 {
// The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
// allocations can point to this `static`. It would be incorrect to use a null
// pointer, due to enums assuming types like unique pointers are never null.
static EMPTY: () = ();
if size == 0 {
&EMPTY as *() as *mut u8
} else {
allocate(size, 8)
}
}
/// The allocator for unique pointers.
#[cfg(not(stage0))]
#[inline]
pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
// The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
@ -187,16 +162,7 @@ unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uin
#[no_mangle]
#[doc(hidden)]
#[deprecated]
#[cfg(stage0, not(test))]
pub unsafe extern "C" fn rust_malloc(size: uint) -> *mut u8 {
exchange_malloc(size)
}
// hack for libcore
#[no_mangle]
#[doc(hidden)]
#[deprecated]
#[cfg(not(stage0), not(test))]
#[cfg(not(test))]
pub unsafe extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
exchange_malloc(size, align)
}

View File

@ -291,54 +291,6 @@ pub trait CloneableVector<T> {
impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
/// Returns a copy of `v`.
#[inline]
#[cfg(stage0)]
fn to_owned(&self) -> ~[T] {
use RawVec = core::raw::Vec;
use num::{CheckedAdd, CheckedMul};
use option::Expect;
let len = self.len();
let data_size = len.checked_mul(&mem::size_of::<T>());
let data_size = data_size.expect("overflow in to_owned()");
let size = mem::size_of::<RawVec<()>>().checked_add(&data_size);
let size = size.expect("overflow in to_owned()");
unsafe {
// this should pass the real required alignment
let ret = exchange_malloc(size) as *mut RawVec<()>;
(*ret).fill = len * mem::nonzero_size_of::<T>();
(*ret).alloc = len * mem::nonzero_size_of::<T>();
// Be careful with the following loop. We want it to be optimized
// to a memcpy (or something similarly fast) when T is Copy. LLVM
// is easily confused, so any extra operations during the loop can
// prevent this optimization.
let mut i = 0;
let p = &mut (*ret).data as *mut _ as *mut T;
try_finally(
&mut i, (),
|i, ()| while *i < len {
mem::move_val_init(
&mut(*p.offset(*i as int)),
self.unsafe_ref(*i).clone());
*i += 1;
},
|i| if *i < len {
// we must be failing, clean up after ourselves
for j in range(0, *i as int) {
ptr::read(&*p.offset(j));
}
// FIXME: #13994 (should pass align and size here)
deallocate(ret as *mut u8, 0, 8);
});
mem::transmute(ret)
}
}
/// Returns a copy of `v`.
#[inline]
#[cfg(not(stage0))]
fn to_owned(&self) -> ~[T] {
use RawVec = core::raw::Vec;
use num::{CheckedAdd, CheckedMul};

View File

@ -1,3 +1,11 @@
S 2014-05-11 72fc4a5
freebsd-x86_64 82db6355b0b7c8023c8845a74e2f224da2831b50
linux-i386 91901299d5f86f5b67377d940073908a1f0e4e82
linux-x86_64 2a80e40bb8d832dba307ad6a43bb63081627c22c
macos-i386 3d7ce9b9201f07cecddae6f1b8025e9c28b10bbf
macos-x86_64 4cfe69a0499d486a7bfdb9cd05c52845ad607dcb
winnt-i386 328d13aeb6c573125c57d7103a12bebd34fadd1f
S 2014-05-09 47ecc2e
freebsd-x86_64 5c085972690e1f9412c3c0c7ec64f6b148fe04fd
linux-i386 690d2e310c025f10c54b1f2b9f32c65ea34575ed