auto merge of #11565 : mozilla/rust/snapshot, r=huonw

This commit is contained in:
bors 2014-01-15 17:46:42 -08:00
commit a5ed0c58cb
9 changed files with 25 additions and 277 deletions

View File

@ -39,30 +39,6 @@ static OS_DEFAULT_STACK_ESTIMATE: uint = 1 << 20;
#[cfg(unix, not(android))]
static OS_DEFAULT_STACK_ESTIMATE: uint = 2 * (1 << 20);
// XXX: this should not exist here
#[cfg(stage0, nativestart)]
#[lang = "start"]
pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int {
use std::cast;
use std::task;
do start(argc, argv) {
// Instead of invoking main directly on this thread, invoke it on
// another spawned thread that we are guaranteed to know the size of the
// stack of. Currently, we do not have a method of figuring out the size
// of the main thread's stack, so for stack overflow detection to work
// we must spawn the task in a subtask which we know the stack size of.
let main: extern "Rust" fn() = unsafe { cast::transmute(main) };
let mut task = task::task();
task.name("<main>");
match do task.try { main() } {
Ok(()) => { os::set_exit_status(0); }
Err(..) => { os::set_exit_status(rt::DEFAULT_ERROR_CODE); }
}
}
}
/// Executes the given procedure after initializing the runtime with the given
/// argc/argv.
///

View File

@ -17,6 +17,8 @@ use unstable::raw;
type DropGlue<'a> = 'a |**TyDesc, *c_void|;
static RC_IMMORTAL : uint = 0x77777777;
/*
* Box annihilation
*
@ -25,24 +27,21 @@ type DropGlue<'a> = 'a |**TyDesc, *c_void|;
struct AnnihilateStats {
n_total_boxes: uint,
n_unique_boxes: uint,
n_bytes_freed: uint
}
unsafe fn each_live_alloc(read_next_before: bool,
f: |alloc: *mut raw::Box<()>, uniq: bool| -> bool)
f: |alloc: *mut raw::Box<()>| -> bool)
-> bool {
//! Walks the internal list of allocations
use managed;
use rt::local_heap;
let mut alloc = local_heap::live_allocs();
while alloc != ptr::mut_null() {
let next_before = (*alloc).next;
let uniq = (*alloc).ref_count == managed::RC_MANAGED_UNIQUE;
if !f(alloc, uniq) {
if !f(alloc) {
return false;
}
@ -70,11 +69,9 @@ fn debug_mem() -> bool {
pub unsafe fn annihilate() {
use rt::local_heap::local_free;
use mem;
use managed;
let mut stats = AnnihilateStats {
n_total_boxes: 0,
n_unique_boxes: 0,
n_bytes_freed: 0
};
@ -82,13 +79,9 @@ pub unsafe fn annihilate() {
//
// In this pass, nothing gets freed, so it does not matter whether
// we read the next field before or after the callback.
each_live_alloc(true, |alloc, uniq| {
each_live_alloc(true, |alloc| {
stats.n_total_boxes += 1;
if uniq {
stats.n_unique_boxes += 1;
} else {
(*alloc).ref_count = managed::RC_IMMORTAL;
}
(*alloc).ref_count = RC_IMMORTAL;
true
});
@ -97,12 +90,10 @@ pub unsafe fn annihilate() {
// In this pass, unique-managed boxes may get freed, but not
// managed boxes, so we must read the `next` field *after* the
// callback, as the original value may have been freed.
each_live_alloc(false, |alloc, uniq| {
if !uniq {
let tydesc = (*alloc).type_desc;
let data = &(*alloc).data as *();
((*tydesc).drop_glue)(data as *i8);
}
each_live_alloc(false, |alloc| {
let tydesc = (*alloc).type_desc;
let data = &(*alloc).data as *();
((*tydesc).drop_glue)(data as *i8);
true
});
@ -112,13 +103,11 @@ pub unsafe fn annihilate() {
// unique-managed boxes, though I think that none of those are
// left), so we must read the `next` field before, since it will
// not be valid after.
each_live_alloc(true, |alloc, uniq| {
if !uniq {
stats.n_bytes_freed +=
(*((*alloc).type_desc)).size
+ mem::size_of::<raw::Box<()>>();
local_free(alloc as *i8);
}
each_live_alloc(true, |alloc| {
stats.n_bytes_freed +=
(*((*alloc).type_desc)).size
+ mem::size_of::<raw::Box<()>>();
local_free(alloc as *i8);
true
});
@ -126,8 +115,7 @@ pub unsafe fn annihilate() {
// We do logging here w/o allocation.
debug!("annihilator stats:\n \
total boxes: {}\n \
unique boxes: {}\n \
bytes freed: {}",
stats.n_total_boxes, stats.n_unique_boxes, stats.n_bytes_freed);
stats.n_total_boxes, stats.n_bytes_freed);
}
}

View File

@ -14,9 +14,6 @@ use ptr::to_unsafe_ptr;
#[cfg(not(test))] use cmp::*;
pub static RC_MANAGED_UNIQUE : uint = (-2) as uint;
pub static RC_IMMORTAL : uint = 0x77777777;
/// Returns the refcount of a shared box (as just before calling this)
#[inline]
pub fn refcount<T>(t: @T) -> uint {

View File

@ -227,14 +227,6 @@ impl<V:TyVisitor + MovePtr> TyVisitor for MovePtrAdaptor<V> {
true
}
#[cfg(stage0)]
fn visit_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
self.align_to::<~u8>();
if ! self.inner.visit_uniq_managed(mtbl, inner) { return false; }
self.bump_past::<~u8>();
true
}
fn visit_ptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
self.align_to::<*u8>();
if ! self.inner.visit_ptr(mtbl, inner) { return false; }
@ -276,14 +268,6 @@ impl<V:TyVisitor + MovePtr> TyVisitor for MovePtrAdaptor<V> {
true
}
#[cfg(stage0)]
fn visit_evec_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
self.align_to::<~[@u8]>();
if ! self.inner.visit_evec_uniq_managed(mtbl, inner) { return false; }
self.bump_past::<~[@u8]>();
true
}
fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
self.align_to::<&'static [u8]>();
if ! self.inner.visit_evec_slice(mtbl, inner) { return false; }

View File

@ -310,15 +310,6 @@ impl<'a> TyVisitor for ReprVisitor<'a> {
})
}
#[cfg(stage0)]
fn visit_uniq_managed(&mut self, _mtbl: uint, inner: *TyDesc) -> bool {
self.writer.write(['~' as u8]);
self.get::<&raw::Box<()>>(|this, b| {
let p = ptr::to_unsafe_ptr(&b.data) as *c_void;
this.visit_ptr_inner(p, inner);
})
}
fn visit_ptr(&mut self, mtbl: uint, _inner: *TyDesc) -> bool {
self.get::<*c_void>(|this, p| {
write!(this.writer, "({} as *", *p);
@ -359,14 +350,6 @@ impl<'a> TyVisitor for ReprVisitor<'a> {
})
}
#[cfg(stage0)]
fn visit_evec_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
self.get::<&raw::Box<raw::Vec<()>>>(|this, b| {
this.writer.write(['~' as u8]);
this.write_unboxed_vec_repr(mtbl, &b.data, inner);
})
}
fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool {
self.get::<raw::Slice<()>>(|this, s| {
this.writer.write(['&' as u8]);

View File

@ -47,41 +47,8 @@ pub use realstd::unstable::intrinsics::{TyDesc, Opaque, TyVisitor, TypeId};
pub type GlueFn = extern "Rust" fn(*i8);
// NOTE remove after next snapshot
#[lang="ty_desc"]
#[cfg(not(test), stage0)]
pub struct TyDesc {
// sizeof(T)
size: uint,
// alignof(T)
align: uint,
// Called on a copy of a value of type `T` *after* memcpy
take_glue: GlueFn,
// Called when a value of type `T` is no longer needed
drop_glue: GlueFn,
// Called by drop glue when a value of type `T` can be freed
free_glue: GlueFn,
// Called by reflection visitor to visit a value of type `T`
visit_glue: GlueFn,
// If T represents a box pointer (`@U` or `~U`), then
// `borrow_offset` is the amount that the pointer must be adjusted
// to find the payload. This is always derivable from the type
// `U`, but in the case of `@Trait` or `~Trait` objects, the type
// `U` is unknown.
borrow_offset: uint,
// Name corresponding to the type
name: &'static str
}
#[lang="ty_desc"]
#[cfg(not(test), not(stage0))]
#[cfg(not(test))]
pub struct TyDesc {
// sizeof(T)
size: uint,
@ -139,8 +106,6 @@ pub trait TyVisitor {
fn visit_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
#[cfg(stage0)]
fn visit_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_ptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_rptr(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
@ -148,8 +113,6 @@ pub trait TyVisitor {
fn visit_unboxed_vec(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_evec_box(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_evec_uniq(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
#[cfg(stage0)]
fn visit_evec_uniq_managed(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_evec_slice(&mut self, mtbl: uint, inner: *TyDesc) -> bool;
fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
mtbl: uint, inner: *TyDesc) -> bool;

View File

@ -116,18 +116,12 @@ use ptr::to_unsafe_ptr;
use ptr;
use ptr::RawPtr;
use rt::global_heap::{malloc_raw, realloc_raw, exchange_free};
#[cfg(stage0)]
use rt::local_heap::local_free;
use mem;
use mem::size_of;
use uint;
use unstable::finally::Finally;
use unstable::intrinsics;
#[cfg(stage0)]
use unstable::intrinsics::{get_tydesc, owns_managed};
use unstable::raw::{Repr, Slice, Vec};
#[cfg(stage0)]
use unstable::raw::Box;
use util;
/**
@ -182,30 +176,6 @@ pub fn from_elem<T:Clone>(n_elts: uint, t: T) -> ~[T] {
/// Creates a new vector with a capacity of `capacity`
#[inline]
#[cfg(stage0)]
pub fn with_capacity<T>(capacity: uint) -> ~[T] {
unsafe {
if owns_managed::<T>() {
let mut vec = ~[];
vec.reserve(capacity);
vec
} else {
let alloc = capacity * mem::nonzero_size_of::<T>();
let size = alloc + mem::size_of::<Vec<()>>();
if alloc / mem::nonzero_size_of::<T>() != capacity || size < alloc {
fail!("vector size is too large: {}", capacity);
}
let ptr = malloc_raw(size) as *mut Vec<()>;
(*ptr).alloc = alloc;
(*ptr).fill = 0;
cast::transmute(ptr)
}
}
}
/// Creates a new vector with a capacity of `capacity`
#[inline]
#[cfg(not(stage0))]
pub fn with_capacity<T>(capacity: uint) -> ~[T] {
unsafe {
let alloc = capacity * mem::nonzero_size_of::<T>();
@ -1503,31 +1473,6 @@ impl<T> OwnedVector<T> for ~[T] {
self.move_iter().invert()
}
#[cfg(stage0)]
fn reserve(&mut self, n: uint) {
// Only make the (slow) call into the runtime if we have to
if self.capacity() < n {
unsafe {
let td = get_tydesc::<T>();
if owns_managed::<T>() {
let ptr: *mut *mut Box<Vec<()>> = cast::transmute(self);
::at_vec::raw::reserve_raw(td, ptr, n);
} else {
let ptr: *mut *mut Vec<()> = cast::transmute(self);
let alloc = n * mem::nonzero_size_of::<T>();
let size = alloc + mem::size_of::<Vec<()>>();
if alloc / mem::nonzero_size_of::<T>() != n || size < alloc {
fail!("vector size is too large: {}", n);
}
*ptr = realloc_raw(*ptr as *mut c_void, size)
as *mut Vec<()>;
(**ptr).alloc = alloc;
}
}
}
}
#[cfg(not(stage0))]
fn reserve(&mut self, n: uint) {
// Only make the (slow) call into the runtime if we have to
if self.capacity() < n {
@ -1561,21 +1506,6 @@ impl<T> OwnedVector<T> for ~[T] {
}
#[inline]
#[cfg(stage0)]
fn capacity(&self) -> uint {
unsafe {
if owns_managed::<T>() {
let repr: **Box<Vec<()>> = cast::transmute(self);
(**repr).data.alloc / mem::nonzero_size_of::<T>()
} else {
let repr: **Vec<()> = cast::transmute(self);
(**repr).alloc / mem::nonzero_size_of::<T>()
}
}
}
#[inline]
#[cfg(not(stage0))]
fn capacity(&self) -> uint {
unsafe {
let repr: **Vec<()> = cast::transmute(self);
@ -1594,51 +1524,6 @@ impl<T> OwnedVector<T> for ~[T] {
}
#[inline]
#[cfg(stage0)]
fn push(&mut self, t: T) {
unsafe {
if owns_managed::<T>() {
let repr: **Box<Vec<()>> = cast::transmute(&mut *self);
let fill = (**repr).data.fill;
if (**repr).data.alloc <= fill {
self.reserve_additional(1);
}
push_fast(self, t);
} else {
let repr: **Vec<()> = cast::transmute(&mut *self);
let fill = (**repr).fill;
if (**repr).alloc <= fill {
self.reserve_additional(1);
}
push_fast(self, t);
}
}
// This doesn't bother to make sure we have space.
#[inline] // really pretty please
unsafe fn push_fast<T>(this: &mut ~[T], t: T) {
if owns_managed::<T>() {
let repr: **mut Box<Vec<u8>> = cast::transmute(this);
let fill = (**repr).data.fill;
(**repr).data.fill += mem::nonzero_size_of::<T>();
let p = to_unsafe_ptr(&((**repr).data.data));
let p = ptr::offset(p, fill as int) as *mut T;
intrinsics::move_val_init(&mut(*p), t);
} else {
let repr: **mut Vec<u8> = cast::transmute(this);
let fill = (**repr).fill;
(**repr).fill += mem::nonzero_size_of::<T>();
let p = to_unsafe_ptr(&((**repr).data));
let p = ptr::offset(p, fill as int) as *mut T;
intrinsics::move_val_init(&mut(*p), t);
}
}
}
#[inline]
#[cfg(not(stage0))]
fn push(&mut self, t: T) {
unsafe {
let repr: **Vec<()> = cast::transmute(&mut *self);
@ -1821,20 +1706,8 @@ impl<T> OwnedVector<T> for ~[T] {
i += 1u;
}
}
#[inline]
#[cfg(stage0)]
unsafe fn set_len(&mut self, new_len: uint) {
if owns_managed::<T>() {
let repr: **mut Box<Vec<()>> = cast::transmute(self);
(**repr).data.fill = new_len * mem::nonzero_size_of::<T>();
} else {
let repr: **mut Vec<()> = cast::transmute(self);
(**repr).fill = new_len * mem::nonzero_size_of::<T>();
}
}
#[inline]
#[cfg(not(stage0))]
unsafe fn set_len(&mut self, new_len: uint) {
let repr: **mut Vec<()> = cast::transmute(self);
(**repr).fill = new_len * mem::nonzero_size_of::<T>();
@ -3010,23 +2883,6 @@ impl<T> DoubleEndedIterator<T> for MoveIterator<T> {
}
#[unsafe_destructor]
#[cfg(stage0)]
impl<T> Drop for MoveIterator<T> {
fn drop(&mut self) {
// destroy the remaining elements
for _x in *self {}
unsafe {
if owns_managed::<T>() {
local_free(self.allocation as *u8 as *c_char)
} else {
exchange_free(self.allocation as *u8 as *c_char)
}
}
}
}
#[unsafe_destructor]
#[cfg(not(stage0))]
impl<T> Drop for MoveIterator<T> {
fn drop(&mut self) {
// destroy the remaining elements

View File

@ -638,13 +638,6 @@ pub enum TokenTree {
TTNonterminal(Span, Ident)
}
// NOTE remove after next snapshot
// Required for ext::quote macros.
#[cfg(stage0)]
pub fn tt_tok(span: Span, tok: ::parse::token::Token) -> TokenTree {
TTTok(span, tok)
}
//
// Matchers are nodes defined-by and recognized-by the main rust parser and
// language, but they're only ever found inside syntax-extension invocations;

View File

@ -1,3 +1,11 @@
S 2014-01-14 29070c3
freebsd-x86_64 c2fb6e6313a9f1d41df810fcf1ae354858a8bf76
linux-i386 6437656b81cf9f3d1377523c1e36d5cf06b2d645
linux-x86_64 f3ca80c146f3a6495c19fc77dba13f9c0abece49
macos-i386 3f1f9925fe1ddca94f2727194bd5763b0705016e
macos-x86_64 0c10e160e3a754f2cdc89aea037c458fefe03d30
winnt-i386 5cb277524157a8a883a8641b829f8aa6f53cdcf8
S 2014-01-08 f3a8baa
freebsd-x86_64 9f2491ebe48ff77774c73c111acdd951973d7e47
linux-i386 e2ba50e6a7d0cf6a7d65393f0c6416a2af58f8d4