auto merge of #17012 : thestinger/rust/sized, r=nikomatsakis

This commit is contained in:
bors 2014-09-06 21:46:25 +00:00
commit 38eb0e5514
8 changed files with 186 additions and 87 deletions

View File

@ -14,7 +14,7 @@
use core::ptr::RawPtr;
#[cfg(not(test))] use core::raw;
#[cfg(not(test))] use util;
#[cfg(stage0, not(test))] use util;
/// Returns a pointer to `size` bytes of memory.
///
@ -119,7 +119,7 @@ unsafe fn exchange_free(ptr: *mut u8, size: uint, align: uint) {
}
// FIXME: #7496
#[cfg(not(test))]
#[cfg(stage0, not(test))]
#[lang="closure_exchange_malloc"]
#[inline]
#[allow(deprecated)]
@ -134,6 +134,21 @@ unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
alloc as *mut u8
}
// FIXME: #7496
#[cfg(not(stage0), not(test))]
#[lang="closure_exchange_malloc"]
#[inline]
#[allow(deprecated)]
unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
align: uint) -> *mut u8 {
let p = allocate(size, align);
let alloc = p as *mut raw::Box<()>;
(*alloc).drop_glue = drop_glue;
alloc as *mut u8
}
#[cfg(jemalloc)]
mod imp {
use core::option::{None, Option};

View File

@ -39,17 +39,18 @@ use std::mem;
use std::num;
use std::ptr;
use std::rc::Rc;
use std::rt::heap::allocate;
use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8> >>,
data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
@ -357,13 +358,12 @@ pub struct TypedArena<T> {
end: Cell<*const T>,
/// A pointer to the first arena segment.
first: RefCell<TypedArenaChunkRef<T>>,
first: RefCell<*mut TypedArenaChunk<T>>,
}
type TypedArenaChunkRef<T> = Option<Box<TypedArenaChunk<T>>>;
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: TypedArenaChunkRef<T>,
next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
@ -371,24 +371,24 @@ struct TypedArenaChunk<T> {
// Objects follow here, suitably aligned.
}
fn calculate_size<T>(capacity: uint) -> uint {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
size
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
ptr::write(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
-> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>;
(*chunk).next = next;
(*chunk).capacity = capacity;
chunk
}
@ -406,14 +406,13 @@ impl<T> TypedArenaChunk<T> {
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
let capacity = next.capacity;
next.destroy(capacity)
}
let next = self.next;
let size = calculate_size::<T>(self.capacity);
deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
mem::min_align_of::<TypedArenaChunk<T>>());
if next.is_not_null() {
let capacity = (*next).capacity;
(*next).destroy(capacity);
}
}
@ -448,11 +447,13 @@ impl<T> TypedArena<T> {
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: Cell::new(chunk.start() as *const T),
end: Cell::new(chunk.end() as *const T),
first: RefCell::new(Some(chunk)),
unsafe {
let chunk = TypedArenaChunk::<T>::new(ptr::mut_null(), capacity);
TypedArena {
ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new((*chunk).end() as *const T),
first: RefCell::new(chunk),
}
}
}
@ -476,26 +477,28 @@ impl<T> TypedArena<T> {
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
let chunk = self.first.borrow_mut().take().unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr.set(chunk.start() as *const T);
self.end.set(chunk.end() as *const T);
*self.first.borrow_mut() = Some(chunk)
unsafe {
let chunk = *self.first.borrow_mut();
let new_capacity = (*chunk).capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
self.ptr.set((*chunk).start() as *const T);
self.end.set((*chunk).end() as *const T);
*self.first.borrow_mut() = chunk
}
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.borrow_mut().as_mut().unwrap().destroy(diff)
// Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
(**self.first.borrow_mut()).destroy(diff)
}
}
}

View File

@ -66,7 +66,7 @@ use middle::trans::glue;
use middle::trans::inline;
use middle::trans::intrinsic;
use middle::trans::machine;
use middle::trans::machine::{llsize_of, llsize_of_real};
use middle::trans::machine::{llsize_of, llsize_of_real, llalign_of_min};
use middle::trans::meth;
use middle::trans::monomorphize;
use middle::trans::tvec;
@ -382,13 +382,44 @@ pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>,
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
pub fn malloc_raw_dyn_proc<'a>(
bcx: &'a Block<'a>,
t: ty::t, alloc_fn: LangItem) -> Result<'a> {
let _icx = push_ctxt("malloc_raw_dyn_proc");
let ccx = bcx.ccx();
let langcall = require_alloc_fn(bcx, t, alloc_fn);
// Grab the TypeRef type of ptr_ty.
let ptr_ty = ty::mk_uniq(bcx.tcx(), t);
let ptr_llty = type_of(ccx, ptr_ty);
let llty = type_of(bcx.ccx(), t);
let size = llsize_of(bcx.ccx(), llty);
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
// Allocate space:
let drop_glue = glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t));
let r = callee::trans_lang_call(
bcx,
langcall,
[
PointerCast(bcx, drop_glue, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to()),
size,
llalign
],
None);
Result::new(r.bcx, PointerCast(r.bcx, r.val, ptr_llty))
}
pub fn malloc_raw_dyn_managed<'a>(
bcx: &'a Block<'a>,
t: ty::t,
alloc_fn: LangItem,
size: ValueRef)
-> Result<'a> {
let _icx = push_ctxt("malloc_raw_managed");
let _icx = push_ctxt("malloc_raw_dyn_managed");
let ccx = bcx.ccx();
let langcall = require_alloc_fn(bcx, t, alloc_fn);

View File

@ -340,6 +340,27 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
fn schedule_free_slice(&self,
cleanup_scope: ScopeId,
val: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap) {
/*!
* Schedules a call to `free(val)`. Note that this is a shallow
* operation.
*/
let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
debug!("schedule_free_slice({:?}, val={}, heap={:?})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj) {
@ -926,6 +947,34 @@ impl Cleanup for FreeValue {
}
}
pub struct FreeSlice {
ptr: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap,
}
impl Cleanup for FreeSlice {
fn must_unwind(&self) -> bool {
true
}
fn clean_on_unwind(&self) -> bool {
true
}
fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
match self.heap {
HeapManaged => {
glue::trans_free(bcx, self.ptr)
}
HeapExchange => {
glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
}
}
}
}
pub struct LifetimeEnd {
ptr: ValueRef,
}
@ -1020,6 +1069,12 @@ pub trait CleanupMethods<'a> {
val: ValueRef,
heap: Heap,
content_ty: ty::t);
fn schedule_free_slice(&self,
cleanup_scope: ScopeId,
val: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj);

View File

@ -24,7 +24,6 @@ use middle::trans::common::*;
use middle::trans::datum::{Datum, DatumBlock, Expr, Lvalue, rvalue_scratch_datum};
use middle::trans::debuginfo;
use middle::trans::expr;
use middle::trans::machine::llsize_of;
use middle::trans::type_of::*;
use middle::trans::type_::Type;
use middle::ty;
@ -144,15 +143,12 @@ fn allocate_cbox<'a>(bcx: &'a Block<'a>,
let tcx = bcx.tcx();
// Allocate and initialize the box:
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
match store {
ty::UniqTraitStore => {
let ty = type_of(bcx.ccx(), cdata_ty);
let size = llsize_of(bcx.ccx(), ty);
// we treat proc as @ here, which isn't ideal
malloc_raw_dyn_managed(bcx, cdata_ty, ClosureExchangeMallocFnLangItem, size)
malloc_raw_dyn_proc(bcx, cbox_ty, ClosureExchangeMallocFnLangItem)
}
ty::RegionTraitStore(..) => {
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let llbox = alloc_ty(bcx, cbox_ty, "__closure");
Result::new(bcx, llbox)
}

View File

@ -53,7 +53,7 @@ pub fn trans_free<'a>(cx: &'a Block<'a>, v: ValueRef) -> &'a Block<'a> {
Some(expr::Ignore)).bcx
}
fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
pub fn trans_exchange_free_dyn<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
align: ValueRef) -> &'a Block<'a> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
@ -65,10 +65,8 @@ fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueR
pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
align: u64) -> &'a Block<'a> {
trans_exchange_free_internal(cx,
v,
C_uint(cx.ccx(), size as uint),
C_uint(cx.ccx(), align as uint))
trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size as uint),
C_uint(cx.ccx(), align as uint))
}
pub fn trans_exchange_free_ty<'a>(bcx: &'a Block<'a>, ptr: ValueRef,
@ -111,9 +109,6 @@ pub fn get_drop_glue_type(ccx: &CrateContext, t: ty::t) -> ty::t {
return ty::mk_i8();
}
match ty::get(t).sty {
ty::ty_box(typ) if !ty::type_needs_drop(tcx, typ) =>
ty::mk_box(tcx, ty::mk_i8()),
ty::ty_uniq(typ) if !ty::type_needs_drop(tcx, typ)
&& ty::type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
@ -121,7 +116,7 @@ pub fn get_drop_glue_type(ccx: &CrateContext, t: ty::t) -> ty::t {
if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8()
} else {
ty::mk_uniq(tcx, ty::mk_i8())
t
}
}
_ => t
@ -470,7 +465,7 @@ fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'
let info = GEPi(bcx, v0, [0, abi::slice_elt_len]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
trans_exchange_free_internal(bcx, llbox, llsize, llalign)
trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
})
}
_ => {
@ -523,12 +518,8 @@ fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'
with_cond(bcx, IsNotNull(bcx, env), |bcx| {
let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_tydesc]);
let dtor = Load(bcx, dtor_ptr);
let cdata = GEPi(bcx, env, [0u, abi::box_field_body]);
Call(bcx, dtor, [PointerCast(bcx, cdata, Type::i8p(bcx.ccx()))], None);
// Free the environment itself
// FIXME: #13994: pass align and size here
trans_exchange_free(bcx, env, 0, 8)
Call(bcx, dtor, [PointerCast(bcx, box_cell_v, Type::i8p(bcx.ccx()))], None);
bcx
})
}
ty::ty_trait(..) => {

View File

@ -25,6 +25,7 @@ use middle::trans::datum::*;
use middle::trans::expr::{Dest, Ignore, SaveIn};
use middle::trans::expr;
use middle::trans::glue;
use middle::trans::machine;
use middle::trans::machine::{nonzero_llsize_of, llsize_of_alloc};
use middle::trans::type_::Type;
use middle::trans::type_of;
@ -59,6 +60,7 @@ pub fn make_drop_glue_unboxed<'a>(
-> &'a Block<'a> {
let not_null = IsNotNull(bcx, vptr);
with_cond(bcx, not_null, |bcx| {
let ccx = bcx.ccx();
let tcx = bcx.tcx();
let _icx = push_ctxt("tvec::make_drop_glue_unboxed");
@ -73,8 +75,11 @@ pub fn make_drop_glue_unboxed<'a>(
if should_deallocate {
let not_null = IsNotNull(bcx, dataptr);
with_cond(bcx, not_null, |bcx| {
// FIXME: #13994: the old `Box<[T]>` will not support sized deallocation
glue::trans_exchange_free(bcx, dataptr, 0, 8)
let llty = type_of::type_of(ccx, unit_ty);
let llsize = machine::llsize_of(ccx, llty);
let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let size = Mul(bcx, llsize, get_len(bcx, vptr));
glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
})
} else {
bcx
@ -281,15 +286,16 @@ pub fn trans_uniq_vec<'a>(bcx: &'a Block<'a>,
debug!(" vt={}, count={:?}", vt.to_string(ccx), count);
let vec_ty = node_id_type(bcx, uniq_expr.id);
let unit_sz = nonzero_llsize_of(ccx, type_of::type_of(ccx, vt.unit_ty));
let llty = type_of::type_of(ccx, vt.unit_ty);
let unit_sz = nonzero_llsize_of(ccx, llty);
let llcount = if count < 4u {
C_int(ccx, 4)
} else {
C_uint(ccx, count)
};
let alloc = Mul(bcx, llcount, unit_sz);
let llty_ptr = type_of::type_of(ccx, vt.unit_ty).ptr_to();
let align = C_uint(ccx, 8);
let llty_ptr = llty.ptr_to();
let align = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let Result { bcx: bcx, val: dataptr } = malloc_raw_dyn(bcx,
llty_ptr,
vec_ty,
@ -299,16 +305,15 @@ pub fn trans_uniq_vec<'a>(bcx: &'a Block<'a>,
// Create a temporary scope lest execution should fail while
// constructing the vector.
let temp_scope = fcx.push_custom_cleanup_scope();
// FIXME: #13994: the old `Box<[T]> will not support sized deallocation,
// this is a placeholder
fcx.schedule_free_value(cleanup::CustomScope(temp_scope),
dataptr, cleanup::HeapExchange, vt.unit_ty);
debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
bcx.val_to_string(dataptr), count);
fcx.schedule_free_slice(cleanup::CustomScope(temp_scope),
dataptr, alloc, align, cleanup::HeapExchange);
let bcx = write_content(bcx, &vt, uniq_expr,
content_expr, SaveIn(dataptr));
debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
bcx.val_to_string(dataptr), count);
let bcx = write_content(bcx, &vt, uniq_expr,
content_expr, SaveIn(dataptr));
fcx.pop_custom_cleanup_scope(temp_scope);

View File

@ -58,9 +58,12 @@ impl<T> OwnedSlice<T> {
if len == 0 {
OwnedSlice::empty()
} else {
// drop excess capacity to avoid breaking sized deallocation
v.shrink_to_fit();
let p = v.as_mut_ptr();
// we own the allocation now
unsafe {mem::forget(v)}
unsafe { mem::forget(v) }
OwnedSlice { data: p, len: len }
}