auto merge of #17012 : thestinger/rust/sized, r=nikomatsakis

This commit is contained in:
bors 2014-09-06 21:46:25 +00:00
commit 38eb0e5514
8 changed files with 186 additions and 87 deletions

View File

@ -14,7 +14,7 @@
use core::ptr::RawPtr; use core::ptr::RawPtr;
#[cfg(not(test))] use core::raw; #[cfg(not(test))] use core::raw;
#[cfg(not(test))] use util; #[cfg(stage0, not(test))] use util;
/// Returns a pointer to `size` bytes of memory. /// Returns a pointer to `size` bytes of memory.
/// ///
@ -119,7 +119,7 @@ unsafe fn exchange_free(ptr: *mut u8, size: uint, align: uint) {
} }
// FIXME: #7496 // FIXME: #7496
#[cfg(not(test))] #[cfg(stage0, not(test))]
#[lang="closure_exchange_malloc"] #[lang="closure_exchange_malloc"]
#[inline] #[inline]
#[allow(deprecated)] #[allow(deprecated)]
@ -134,6 +134,21 @@ unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
alloc as *mut u8 alloc as *mut u8
} }
// FIXME: #7496
#[cfg(not(stage0), not(test))]
#[lang="closure_exchange_malloc"]
#[inline]
#[allow(deprecated)]
unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
align: uint) -> *mut u8 {
let p = allocate(size, align);
let alloc = p as *mut raw::Box<()>;
(*alloc).drop_glue = drop_glue;
alloc as *mut u8
}
#[cfg(jemalloc)] #[cfg(jemalloc)]
mod imp { mod imp {
use core::option::{None, Option}; use core::option::{None, Option};

View File

@ -39,7 +39,7 @@ use std::mem;
use std::num; use std::num;
use std::ptr; use std::ptr;
use std::rc::Rc; use std::rc::Rc;
use std::rt::heap::allocate; use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are // The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array // allocated, and have capacities reserved, but the fill for the array
@ -50,6 +50,7 @@ struct Chunk {
fill: Cell<uint>, fill: Cell<uint>,
is_copy: Cell<bool>, is_copy: Cell<bool>,
} }
impl Chunk { impl Chunk {
fn capacity(&self) -> uint { fn capacity(&self) -> uint {
self.data.borrow().capacity() self.data.borrow().capacity()
@ -357,13 +358,12 @@ pub struct TypedArena<T> {
end: Cell<*const T>, end: Cell<*const T>,
/// A pointer to the first arena segment. /// A pointer to the first arena segment.
first: RefCell<TypedArenaChunkRef<T>>, first: RefCell<*mut TypedArenaChunk<T>>,
} }
type TypedArenaChunkRef<T> = Option<Box<TypedArenaChunk<T>>>;
struct TypedArenaChunk<T> { struct TypedArenaChunk<T> {
/// Pointer to the next arena segment. /// Pointer to the next arena segment.
next: TypedArenaChunkRef<T>, next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold. /// The number of elements that this chunk can hold.
capacity: uint, capacity: uint,
@ -371,24 +371,24 @@ struct TypedArenaChunk<T> {
// Objects follow here, suitably aligned. // Objects follow here, suitably aligned.
} }
impl<T> TypedArenaChunk<T> { fn calculate_size<T>(capacity: uint) -> uint {
#[inline]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>(); let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>()); size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>(); let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap(); let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap(); size = size.checked_add(&elems_size).unwrap();
size
}
let mut chunk = unsafe { impl<T> TypedArenaChunk<T> {
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>()); #[inline]
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk); unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
ptr::write(&mut chunk.next, next); -> *mut TypedArenaChunk<T> {
chunk let size = calculate_size::<T>(capacity);
}; let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>;
chunk.capacity = capacity; (*chunk).next = next;
(*chunk).capacity = capacity;
chunk chunk
} }
@ -406,14 +406,13 @@ impl<T> TypedArenaChunk<T> {
} }
// Destroy the next chunk. // Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None); let next = self.next;
match next_opt { let size = calculate_size::<T>(self.capacity);
None => {} deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
Some(mut next) => { mem::min_align_of::<TypedArenaChunk<T>>());
// We assume that the next chunk is completely filled. if next.is_not_null() {
let capacity = next.capacity; let capacity = (*next).capacity;
next.destroy(capacity) (*next).destroy(capacity);
}
} }
} }
@ -448,11 +447,13 @@ impl<T> TypedArena<T> {
/// objects. /// objects.
#[inline] #[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> { pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity); unsafe {
let chunk = TypedArenaChunk::<T>::new(ptr::mut_null(), capacity);
TypedArena { TypedArena {
ptr: Cell::new(chunk.start() as *const T), ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new(chunk.end() as *const T), end: Cell::new((*chunk).end() as *const T),
first: RefCell::new(Some(chunk)), first: RefCell::new(chunk),
}
} }
} }
@ -476,26 +477,28 @@ impl<T> TypedArena<T> {
/// Grows the arena. /// Grows the arena.
#[inline(never)] #[inline(never)]
fn grow(&self) { fn grow(&self) {
let chunk = self.first.borrow_mut().take().unwrap(); unsafe {
let new_capacity = chunk.capacity.checked_mul(&2).unwrap(); let chunk = *self.first.borrow_mut();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity); let new_capacity = (*chunk).capacity.checked_mul(&2).unwrap();
self.ptr.set(chunk.start() as *const T); let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
self.end.set(chunk.end() as *const T); self.ptr.set((*chunk).start() as *const T);
*self.first.borrow_mut() = Some(chunk) self.end.set((*chunk).end() as *const T);
*self.first.borrow_mut() = chunk
}
} }
} }
#[unsafe_destructor] #[unsafe_destructor]
impl<T> Drop for TypedArena<T> { impl<T> Drop for TypedArena<T> {
fn drop(&mut self) { fn drop(&mut self) {
unsafe {
// Determine how much was filled. // Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint; let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint; let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>(); let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method. // Pass that to the `destroy` method.
unsafe { (**self.first.borrow_mut()).destroy(diff)
self.first.borrow_mut().as_mut().unwrap().destroy(diff)
} }
} }
} }

View File

@ -66,7 +66,7 @@ use middle::trans::glue;
use middle::trans::inline; use middle::trans::inline;
use middle::trans::intrinsic; use middle::trans::intrinsic;
use middle::trans::machine; use middle::trans::machine;
use middle::trans::machine::{llsize_of, llsize_of_real}; use middle::trans::machine::{llsize_of, llsize_of_real, llalign_of_min};
use middle::trans::meth; use middle::trans::meth;
use middle::trans::monomorphize; use middle::trans::monomorphize;
use middle::trans::tvec; use middle::trans::tvec;
@ -382,13 +382,44 @@ pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>,
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
} }
pub fn malloc_raw_dyn_proc<'a>(
bcx: &'a Block<'a>,
t: ty::t, alloc_fn: LangItem) -> Result<'a> {
let _icx = push_ctxt("malloc_raw_dyn_proc");
let ccx = bcx.ccx();
let langcall = require_alloc_fn(bcx, t, alloc_fn);
// Grab the TypeRef type of ptr_ty.
let ptr_ty = ty::mk_uniq(bcx.tcx(), t);
let ptr_llty = type_of(ccx, ptr_ty);
let llty = type_of(bcx.ccx(), t);
let size = llsize_of(bcx.ccx(), llty);
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
// Allocate space:
let drop_glue = glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t));
let r = callee::trans_lang_call(
bcx,
langcall,
[
PointerCast(bcx, drop_glue, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to()),
size,
llalign
],
None);
Result::new(r.bcx, PointerCast(r.bcx, r.val, ptr_llty))
}
pub fn malloc_raw_dyn_managed<'a>( pub fn malloc_raw_dyn_managed<'a>(
bcx: &'a Block<'a>, bcx: &'a Block<'a>,
t: ty::t, t: ty::t,
alloc_fn: LangItem, alloc_fn: LangItem,
size: ValueRef) size: ValueRef)
-> Result<'a> { -> Result<'a> {
let _icx = push_ctxt("malloc_raw_managed"); let _icx = push_ctxt("malloc_raw_dyn_managed");
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let langcall = require_alloc_fn(bcx, t, alloc_fn); let langcall = require_alloc_fn(bcx, t, alloc_fn);

View File

@ -340,6 +340,27 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
self.schedule_clean(cleanup_scope, drop as CleanupObj); self.schedule_clean(cleanup_scope, drop as CleanupObj);
} }
fn schedule_free_slice(&self,
cleanup_scope: ScopeId,
val: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap) {
/*!
* Schedules a call to `free(val)`. Note that this is a shallow
* operation.
*/
let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
debug!("schedule_free_slice({:?}, val={}, heap={:?})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
fn schedule_clean(&self, fn schedule_clean(&self,
cleanup_scope: ScopeId, cleanup_scope: ScopeId,
cleanup: CleanupObj) { cleanup: CleanupObj) {
@ -926,6 +947,34 @@ impl Cleanup for FreeValue {
} }
} }
pub struct FreeSlice {
ptr: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap,
}
impl Cleanup for FreeSlice {
fn must_unwind(&self) -> bool {
true
}
fn clean_on_unwind(&self) -> bool {
true
}
fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
match self.heap {
HeapManaged => {
glue::trans_free(bcx, self.ptr)
}
HeapExchange => {
glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
}
}
}
}
pub struct LifetimeEnd { pub struct LifetimeEnd {
ptr: ValueRef, ptr: ValueRef,
} }
@ -1020,6 +1069,12 @@ pub trait CleanupMethods<'a> {
val: ValueRef, val: ValueRef,
heap: Heap, heap: Heap,
content_ty: ty::t); content_ty: ty::t);
fn schedule_free_slice(&self,
cleanup_scope: ScopeId,
val: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap);
fn schedule_clean(&self, fn schedule_clean(&self,
cleanup_scope: ScopeId, cleanup_scope: ScopeId,
cleanup: CleanupObj); cleanup: CleanupObj);

View File

@ -24,7 +24,6 @@ use middle::trans::common::*;
use middle::trans::datum::{Datum, DatumBlock, Expr, Lvalue, rvalue_scratch_datum}; use middle::trans::datum::{Datum, DatumBlock, Expr, Lvalue, rvalue_scratch_datum};
use middle::trans::debuginfo; use middle::trans::debuginfo;
use middle::trans::expr; use middle::trans::expr;
use middle::trans::machine::llsize_of;
use middle::trans::type_of::*; use middle::trans::type_of::*;
use middle::trans::type_::Type; use middle::trans::type_::Type;
use middle::ty; use middle::ty;
@ -144,15 +143,12 @@ fn allocate_cbox<'a>(bcx: &'a Block<'a>,
let tcx = bcx.tcx(); let tcx = bcx.tcx();
// Allocate and initialize the box: // Allocate and initialize the box:
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
match store { match store {
ty::UniqTraitStore => { ty::UniqTraitStore => {
let ty = type_of(bcx.ccx(), cdata_ty); malloc_raw_dyn_proc(bcx, cbox_ty, ClosureExchangeMallocFnLangItem)
let size = llsize_of(bcx.ccx(), ty);
// we treat proc as @ here, which isn't ideal
malloc_raw_dyn_managed(bcx, cdata_ty, ClosureExchangeMallocFnLangItem, size)
} }
ty::RegionTraitStore(..) => { ty::RegionTraitStore(..) => {
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let llbox = alloc_ty(bcx, cbox_ty, "__closure"); let llbox = alloc_ty(bcx, cbox_ty, "__closure");
Result::new(bcx, llbox) Result::new(bcx, llbox)
} }

View File

@ -53,7 +53,7 @@ pub fn trans_free<'a>(cx: &'a Block<'a>, v: ValueRef) -> &'a Block<'a> {
Some(expr::Ignore)).bcx Some(expr::Ignore)).bcx
} }
fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef, pub fn trans_exchange_free_dyn<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
align: ValueRef) -> &'a Block<'a> { align: ValueRef) -> &'a Block<'a> {
let _icx = push_ctxt("trans_exchange_free"); let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx(); let ccx = cx.ccx();
@ -65,9 +65,7 @@ fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueR
pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64, pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
align: u64) -> &'a Block<'a> { align: u64) -> &'a Block<'a> {
trans_exchange_free_internal(cx, trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size as uint),
v,
C_uint(cx.ccx(), size as uint),
C_uint(cx.ccx(), align as uint)) C_uint(cx.ccx(), align as uint))
} }
@ -111,9 +109,6 @@ pub fn get_drop_glue_type(ccx: &CrateContext, t: ty::t) -> ty::t {
return ty::mk_i8(); return ty::mk_i8();
} }
match ty::get(t).sty { match ty::get(t).sty {
ty::ty_box(typ) if !ty::type_needs_drop(tcx, typ) =>
ty::mk_box(tcx, ty::mk_i8()),
ty::ty_uniq(typ) if !ty::type_needs_drop(tcx, typ) ty::ty_uniq(typ) if !ty::type_needs_drop(tcx, typ)
&& ty::type_is_sized(tcx, typ) => { && ty::type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ); let llty = sizing_type_of(ccx, typ);
@ -121,7 +116,7 @@ pub fn get_drop_glue_type(ccx: &CrateContext, t: ty::t) -> ty::t {
if llsize_of_alloc(ccx, llty) == 0 { if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8() ty::mk_i8()
} else { } else {
ty::mk_uniq(tcx, ty::mk_i8()) t
} }
} }
_ => t _ => t
@ -470,7 +465,7 @@ fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'
let info = GEPi(bcx, v0, [0, abi::slice_elt_len]); let info = GEPi(bcx, v0, [0, abi::slice_elt_len]);
let info = Load(bcx, info); let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info); let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
trans_exchange_free_internal(bcx, llbox, llsize, llalign) trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
}) })
} }
_ => { _ => {
@ -523,12 +518,8 @@ fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'
with_cond(bcx, IsNotNull(bcx, env), |bcx| { with_cond(bcx, IsNotNull(bcx, env), |bcx| {
let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_tydesc]); let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_tydesc]);
let dtor = Load(bcx, dtor_ptr); let dtor = Load(bcx, dtor_ptr);
let cdata = GEPi(bcx, env, [0u, abi::box_field_body]); Call(bcx, dtor, [PointerCast(bcx, box_cell_v, Type::i8p(bcx.ccx()))], None);
Call(bcx, dtor, [PointerCast(bcx, cdata, Type::i8p(bcx.ccx()))], None); bcx
// Free the environment itself
// FIXME: #13994: pass align and size here
trans_exchange_free(bcx, env, 0, 8)
}) })
} }
ty::ty_trait(..) => { ty::ty_trait(..) => {

View File

@ -25,6 +25,7 @@ use middle::trans::datum::*;
use middle::trans::expr::{Dest, Ignore, SaveIn}; use middle::trans::expr::{Dest, Ignore, SaveIn};
use middle::trans::expr; use middle::trans::expr;
use middle::trans::glue; use middle::trans::glue;
use middle::trans::machine;
use middle::trans::machine::{nonzero_llsize_of, llsize_of_alloc}; use middle::trans::machine::{nonzero_llsize_of, llsize_of_alloc};
use middle::trans::type_::Type; use middle::trans::type_::Type;
use middle::trans::type_of; use middle::trans::type_of;
@ -59,6 +60,7 @@ pub fn make_drop_glue_unboxed<'a>(
-> &'a Block<'a> { -> &'a Block<'a> {
let not_null = IsNotNull(bcx, vptr); let not_null = IsNotNull(bcx, vptr);
with_cond(bcx, not_null, |bcx| { with_cond(bcx, not_null, |bcx| {
let ccx = bcx.ccx();
let tcx = bcx.tcx(); let tcx = bcx.tcx();
let _icx = push_ctxt("tvec::make_drop_glue_unboxed"); let _icx = push_ctxt("tvec::make_drop_glue_unboxed");
@ -73,8 +75,11 @@ pub fn make_drop_glue_unboxed<'a>(
if should_deallocate { if should_deallocate {
let not_null = IsNotNull(bcx, dataptr); let not_null = IsNotNull(bcx, dataptr);
with_cond(bcx, not_null, |bcx| { with_cond(bcx, not_null, |bcx| {
// FIXME: #13994: the old `Box<[T]>` will not support sized deallocation let llty = type_of::type_of(ccx, unit_ty);
glue::trans_exchange_free(bcx, dataptr, 0, 8) let llsize = machine::llsize_of(ccx, llty);
let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let size = Mul(bcx, llsize, get_len(bcx, vptr));
glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
}) })
} else { } else {
bcx bcx
@ -281,15 +286,16 @@ pub fn trans_uniq_vec<'a>(bcx: &'a Block<'a>,
debug!(" vt={}, count={:?}", vt.to_string(ccx), count); debug!(" vt={}, count={:?}", vt.to_string(ccx), count);
let vec_ty = node_id_type(bcx, uniq_expr.id); let vec_ty = node_id_type(bcx, uniq_expr.id);
let unit_sz = nonzero_llsize_of(ccx, type_of::type_of(ccx, vt.unit_ty)); let llty = type_of::type_of(ccx, vt.unit_ty);
let unit_sz = nonzero_llsize_of(ccx, llty);
let llcount = if count < 4u { let llcount = if count < 4u {
C_int(ccx, 4) C_int(ccx, 4)
} else { } else {
C_uint(ccx, count) C_uint(ccx, count)
}; };
let alloc = Mul(bcx, llcount, unit_sz); let alloc = Mul(bcx, llcount, unit_sz);
let llty_ptr = type_of::type_of(ccx, vt.unit_ty).ptr_to(); let llty_ptr = llty.ptr_to();
let align = C_uint(ccx, 8); let align = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let Result { bcx: bcx, val: dataptr } = malloc_raw_dyn(bcx, let Result { bcx: bcx, val: dataptr } = malloc_raw_dyn(bcx,
llty_ptr, llty_ptr,
vec_ty, vec_ty,
@ -299,10 +305,9 @@ pub fn trans_uniq_vec<'a>(bcx: &'a Block<'a>,
// Create a temporary scope lest execution should fail while // Create a temporary scope lest execution should fail while
// constructing the vector. // constructing the vector.
let temp_scope = fcx.push_custom_cleanup_scope(); let temp_scope = fcx.push_custom_cleanup_scope();
// FIXME: #13994: the old `Box<[T]> will not support sized deallocation,
// this is a placeholder fcx.schedule_free_slice(cleanup::CustomScope(temp_scope),
fcx.schedule_free_value(cleanup::CustomScope(temp_scope), dataptr, alloc, align, cleanup::HeapExchange);
dataptr, cleanup::HeapExchange, vt.unit_ty);
debug!(" alloc_uniq_vec() returned dataptr={}, len={}", debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
bcx.val_to_string(dataptr), count); bcx.val_to_string(dataptr), count);

View File

@ -58,6 +58,9 @@ impl<T> OwnedSlice<T> {
if len == 0 { if len == 0 {
OwnedSlice::empty() OwnedSlice::empty()
} else { } else {
// drop excess capacity to avoid breaking sized deallocation
v.shrink_to_fit();
let p = v.as_mut_ptr(); let p = v.as_mut_ptr();
// we own the allocation now // we own the allocation now
unsafe { mem::forget(v) } unsafe { mem::forget(v) }