Merge branch 'master' of github.com:graydon/rust

This commit is contained in:
Graydon Hoare 2011-12-07 11:52:38 -08:00
commit a3f48d3fe1
25 changed files with 533 additions and 176 deletions

View File

@ -353,12 +353,24 @@ $(foreach build,$(CFG_TARGET_TRIPLES), \
# Builds a functional Rustc for the given host.
######################################################################
define DEF_RUSTC_STAGE_TARGET
# $(1) == architecture
# $(2) == stage
rustc-stage$(2)-H-$(1): \
$$(foreach target,$$(CFG_TARGET_TRIPLES), \
$$(SREQ$(2)_T_$$(target)_H_$(1)))
endef
$(foreach host,$(CFG_TARGET_TRIPLES), \
$(eval $(foreach stage,1 2 3, \
$(eval $(call DEF_RUSTC_STAGE_TARGET,$(host),$(stage))))))
define DEF_RUSTC_TARGET
# $(1) == architecture
rustc-H-$(1): \
$$(foreach target,$$(CFG_TARGET_TRIPLES), \
$$(SREQ3_T_$$(target)_H_$(1)))
rustc-H-$(1): rustc-stage3-H-$(1)
endef
$(foreach host,$(CFG_TARGET_TRIPLES), \

View File

@ -27,7 +27,8 @@ type upcalls =
dynastack_free: ValueRef,
alloc_c_stack: ValueRef,
call_shim_on_c_stack: ValueRef,
rust_personality: ValueRef};
rust_personality: ValueRef,
reset_stack_limit: ValueRef};
fn declare_upcalls(targ_cfg: @session::config,
_tn: type_names,
@ -89,7 +90,8 @@ fn declare_upcalls(targ_cfg: @session::config,
// arguments: void *args, void *fn_ptr
[T_ptr(T_i8()), T_ptr(T_i8())],
int_t),
rust_personality: d("rust_personality", [], T_i32())
rust_personality: d("rust_personality", [], T_i32()),
reset_stack_limit: dv("reset_stack_limit", [])
};
}
//

View File

@ -150,6 +150,7 @@ fn visit_decl(cx: @ctx, d: @decl, &&e: (), v: visit::vt<()>) {
fn visit_expr(cx: @ctx, ex: @expr, &&e: (), v: visit::vt<()>) {
alt ex.node {
expr_call(f, args, _) { check_call(cx, f, args); }
expr_bind(f, args) { check_bind(cx, f, args); }
expr_swap(lhs, rhs) {
check_lval(cx, lhs, msg_assign);
check_lval(cx, rhs, msg_assign);
@ -230,6 +231,30 @@ fn check_call(cx: @ctx, f: @expr, args: [@expr]) {
}
}
fn check_bind(cx: @ctx, f: @expr, args: [option::t<@expr>]) {
let arg_ts = ty::ty_fn_args(cx.tcx, ty::expr_ty(cx.tcx, f));
let i = 0u;
for arg in args {
alt arg {
some(expr) {
alt (alt arg_ts[i].mode {
by_mut_ref. { some("by mutable reference") }
by_move. { some("by move") }
_ { none }
}) {
some(name) {
cx.tcx.sess.span_err(
expr.span, "can not bind an argument passed " + name);
}
none. {}
}
}
_ {}
}
i += 1u;
}
}
fn is_immutable_def(def: def) -> option::t<str> {
alt def {
def_fn(_, _) | def_mod(_) | def_native_mod(_) | def_const(_) |

View File

@ -3462,6 +3462,12 @@ fn trans_bind_thunk(cx: @local_ctxt, sp: span, incoming_fty: ty::t,
bcx = bound_arg.bcx;
let val = bound_arg.val;
if out_arg.mode == ast::by_val { val = Load(bcx, val); }
if out_arg.mode == ast::by_copy {
let {bcx: cx, val: alloc} = alloc_ty(bcx, out_arg.ty);
bcx = memmove_ty(cx, alloc, val, out_arg.ty);
bcx = take_ty(bcx, alloc, out_arg.ty);
val = alloc;
}
// If the type is parameterized, then we need to cast the
// type we actually have to the parameterized out type.
if ty::type_contains_params(cx.ccx.tcx, out_arg.ty) {
@ -3904,6 +3910,11 @@ fn trans_landing_pad(bcx: @block_ctxt,
// The landing pad block is a cleanup
SetCleanup(bcx, llpad);
// Because we may have unwound across a stack boundary, we must call into
// the runtime to figure out which stack segment we are on and place the
// stack limit back into the TLS.
Call(bcx, bcx_ccx(bcx).upcalls.reset_stack_limit, []);
// FIXME: This seems like a very naive and redundant way to generate the
// landing pads, as we're re-generating all in-scope cleanups for each
// function call. Probably good optimization opportunities here.
@ -4531,7 +4542,9 @@ fn zero_alloca(cx: @block_ctxt, llptr: ValueRef, t: ty::t)
fn trans_stmt(cx: @block_ctxt, s: ast::stmt) -> @block_ctxt {
// FIXME Fill in cx.sp
add_span_comment(cx, s.span, stmt_to_str(s));
if (!bcx_ccx(cx).sess.get_opts().no_asm_comments) {
add_span_comment(cx, s.span, stmt_to_str(s));
}
let bcx = cx;
alt s.node {

View File

@ -515,7 +515,9 @@ fn add_span_comment(bcx: @block_ctxt, sp: span, text: str) {
fn add_comment(bcx: @block_ctxt, text: str) {
let ccx = bcx_ccx(bcx);
if (!ccx.sess.get_opts().no_asm_comments) {
let comment_text = "; " + text;
check str::is_not_empty("$");
let sanitized = str::replace(text, "$", "");
let comment_text = "; " + sanitized;
let asm = str::as_buf(comment_text, { |c|
str::as_buf("", { |e|
llvm::LLVMConstInlineAsm(T_fn([], T_void()), c, e, 0, 0)})});

View File

@ -452,6 +452,10 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) {
expr_alt(ex, alts) {
find_pre_post_expr(fcx, ex);
fn do_an_alt(fcx: fn_ctxt, an_alt: arm) -> pre_and_post {
alt an_alt.guard {
some(e) { find_pre_post_expr(fcx, e); }
_ {}
}
find_pre_post_block(fcx, an_alt.body);
ret block_pp(fcx.ccx, an_alt.body);
}

View File

@ -530,6 +530,12 @@ fn find_pre_post_state_expr(fcx: fn_ctxt, pres: prestate, e: @expr) -> bool {
if vec::len(alts) > 0u {
a_post = false_postcond(num_constrs);
for an_alt: arm in alts {
alt an_alt.guard {
some(e) {
changed |= find_pre_post_state_expr(fcx, e_post, e);
}
_ {}
}
changed |=
find_pre_post_state_block(fcx, e_post, an_alt.body);
intersect(a_post, block_poststate(fcx.ccx, an_alt.body));

View File

@ -1299,23 +1299,23 @@ fn type_autoderef(cx: ctxt, t: ty::t) -> ty::t {
fn hash_type_structure(st: sty) -> uint {
fn hash_uint(id: uint, n: uint) -> uint {
let h = id;
h += h << 5u + n;
h += (h << 5u) + n;
ret h;
}
fn hash_def(id: uint, did: ast::def_id) -> uint {
let h = id;
h += h << 5u + (did.crate as uint);
h += h << 5u + (did.node as uint);
h += (h << 5u) + (did.crate as uint);
h += (h << 5u) + (did.node as uint);
ret h;
}
fn hash_subty(id: uint, subty: t) -> uint {
let h = id;
h += h << 5u + hash_ty(subty);
h += (h << 5u) + hash_ty(subty);
ret h;
}
fn hash_type_constr(id: uint, c: @type_constr) -> uint {
let h = id;
h += h << 5u + hash_def(h, c.node.id);
h += (h << 5u) + hash_def(h, c.node.id);
ret hash_type_constr_args(h, c.node.args);
}
fn hash_type_constr_args(id: uint, args: [@ty_constr_arg]) -> uint {
@ -1338,8 +1338,8 @@ fn hash_type_structure(st: sty) -> uint {
fn hash_fn(id: uint, args: [arg], rty: t) -> uint {
let h = id;
for a: arg in args { h += h << 5u + hash_ty(a.ty); }
h += h << 5u + hash_ty(rty);
for a: arg in args { h += (h << 5u) + hash_ty(a.ty); }
h += (h << 5u) + hash_ty(rty);
ret h;
}
alt st {
@ -1366,19 +1366,19 @@ fn hash_type_structure(st: sty) -> uint {
ty_str. { ret 17u; }
ty_tag(did, tys) {
let h = hash_def(18u, did);
for typ: t in tys { h += h << 5u + hash_ty(typ); }
for typ: t in tys { h += (h << 5u) + hash_ty(typ); }
ret h;
}
ty_box(mt) { ret hash_subty(19u, mt.ty); }
ty_vec(mt) { ret hash_subty(21u, mt.ty); }
ty_rec(fields) {
let h = 26u;
for f: field in fields { h += h << 5u + hash_ty(f.mt.ty); }
for f: field in fields { h += (h << 5u) + hash_ty(f.mt.ty); }
ret h;
}
ty_tup(ts) {
let h = 25u;
for tt in ts { h += h << 5u + hash_ty(tt); }
for tt in ts { h += (h << 5u) + hash_ty(tt); }
ret h;
}
@ -1389,7 +1389,7 @@ fn hash_type_structure(st: sty) -> uint {
ty_native_fn(args, rty) { ret hash_fn(28u, args, rty); }
ty_obj(methods) {
let h = 29u;
for m: method in methods { h += h << 5u + str::hash(m.ident); }
for m: method in methods { h += (h << 5u) + str::hash(m.ident); }
ret h;
}
ty_var(v) { ret hash_uint(30u, v as uint); }
@ -1400,15 +1400,15 @@ fn hash_type_structure(st: sty) -> uint {
ty_ptr(mt) { ret hash_subty(35u, mt.ty); }
ty_res(did, sub, tps) {
let h = hash_subty(hash_def(18u, did), sub);
for tp: t in tps { h += h << 5u + hash_ty(tp); }
for tp: t in tps { h += (h << 5u) + hash_ty(tp); }
ret h;
}
ty_constr(t, cs) {
let h = 36u;
for c: @type_constr in cs { h += h << 5u + hash_type_constr(h, c); }
for c: @type_constr in cs { h += (h << 5u) + hash_type_constr(h, c); }
ret h;
}
ty_uniq(mt) { let h = 37u; h += h << 5u + hash_ty(mt.ty); ret h; }
ty_uniq(mt) { let h = 37u; h += (h << 5u) + hash_ty(mt.ty); ret h; }
}
}
@ -1416,7 +1416,7 @@ fn hash_type_info(st: sty, cname_opt: option::t<str>) -> uint {
let h = hash_type_structure(st);
alt cname_opt {
none. {/* no-op */ }
some(s) { h += h << 5u + str::hash(s); }
some(s) { h += (h << 5u) + str::hash(s); }
}
ret h;
}

View File

@ -193,10 +193,13 @@ fn is_constraint_arg(e: @expr) -> bool {
fn eq_ty(&&a: @ty, &&b: @ty) -> bool { ret std::box::ptr_eq(a, b); }
fn hash_ty(&&t: @ty) -> uint { ret t.span.lo << 16u + t.span.hi; }
fn hash_ty(&&t: @ty) -> uint {
let res = (t.span.lo << 16u) + t.span.hi;
ret res;
}
fn hash_def_id(&&id: def_id) -> uint {
id.crate as uint << 16u + (id.node as uint)
(id.crate as uint << 16u) + (id.node as uint)
}
fn eq_def_id(&&a: def_id, &&b: def_id) -> bool {

View File

@ -17,10 +17,11 @@ do
-Isrc/rt/arch/$ARCH -fno-stack-protector \
-o src/rt/intrinsics/intrinsics.$ARCH.ll.in \
src/rt/intrinsics/intrinsics.cpp
sed -i \
sed -i .orig \
-e 's/^target datalayout =/; target datalayout =/' \
src/rt/intrinsics/intrinsics.$ARCH.ll.in
sed -i \
sed -i .orig \
-e 's/^target triple = "[^"]*"/target triple = "@CFG_TARGET_TRIPLE@"/' \
src/rt/intrinsics/intrinsics.$ARCH.ll.in
done
rm src/rt/intrinsics/intrinsics.$ARCH.ll.in.orig
done

View File

@ -137,21 +137,25 @@ fn mk_hashmap<copy K, copy V>(hasher: hashfn<K>, eqer: eqfn<K>)
// is always a power of 2), so that all buckets are probed for a
// fixed key.
fn hashl(n: uint, _nbkts: uint) -> uint { ret (n >>> 16u) * 2u + 1u; }
fn hashr(n: uint, _nbkts: uint) -> uint { ret 0x0000_ffff_u & n; }
fn hash(h: uint, nbkts: uint, i: uint) -> uint {
ret (hashl(h, nbkts) * i + hashr(h, nbkts)) % nbkts;
fn hashl(n: u32) -> u32 { ret (n >>> 16u32) * 2u32 + 1u32; }
fn hashr(n: u32) -> u32 { ret 0x0000_ffff_u32 & n; }
fn hash(h: u32, nbkts: uint, i: uint) -> uint {
ret ((hashl(h) as uint) * i + (hashr(h) as uint)) % nbkts;
}
fn to_u64(h: uint) -> u32 {
ret (h as u32) ^ ((h >>> 16u) as u32);
}
/**
* We attempt to never call this with a full table. If we do, it
* will fail.
*/
fn insert_common<copy K, copy V>(hasher: hashfn<K>, eqer: eqfn<K>,
bkts: [mutable bucket<K, V>],
nbkts: uint, key: K, val: V) -> bool {
let i: uint = 0u;
let h: uint = hasher(key);
let h = to_u64(hasher(key));
while i < nbkts {
let j: uint = hash(h, nbkts, i);
alt bkts[j] {
@ -171,7 +175,7 @@ fn mk_hashmap<copy K, copy V>(hasher: hashfn<K>, eqer: eqfn<K>)
bkts: [mutable bucket<K, V>],
nbkts: uint, key: K) -> option::t<V> {
let i: uint = 0u;
let h: uint = hasher(key);
let h = to_u64(hasher(key));
while i < nbkts {
let j: uint = hash(h, nbkts, i);
alt bkts[j] {
@ -244,7 +248,7 @@ fn mk_hashmap<copy K, copy V>(hasher: hashfn<K>, eqer: eqfn<K>)
}
fn remove(key: K) -> option::t<V> {
let i: uint = 0u;
let h: uint = hasher(key);
let h = to_u64(hasher(key));
while i < nbkts {
let j: uint = hash(h, nbkts, i);
alt bkts[j] {

View File

@ -324,7 +324,7 @@ fn char_range_at(s: str, i: uint) -> {ch: char, next: uint} {
// Clunky way to get the right bits from the first byte. Uses two shifts,
// the first to clip off the marker bits at the left of the byte, and then
// a second (as uint) to get it to the right position.
val += (b0 << (w + 1u as u8) as uint) << (w - 1u) * 6u - w - 1u;
val += (b0 << (w + 1u as u8) as uint) << ((w - 1u) * 6u - w - 1u);
ret {ch: val as char, next: i};
}

View File

@ -288,7 +288,7 @@ fn spawn_inner<send T>(-data: T, f: fn(T),
notify: option<comm::chan<task_notification>>)
-> task unsafe {
fn wrapper<send T>(-data: *u8, f: fn(T)) unsafe {
fn wrapper<send T>(data: *u8, f: fn(T)) unsafe {
let data: ~T = unsafe::reinterpret_cast(data);
f(*data);
}

View File

@ -2,11 +2,14 @@
#if defined(__APPLE__) || defined(_WIN32)
#define RECORD_SP _record_sp
#define GET_SP _get_sp
#else
#define RECORD_SP record_sp
#define GET_SP get_sp
#endif
.globl RECORD_SP
.globl GET_SP
#if defined(__linux__)
RECORD_SP:
@ -25,3 +28,7 @@ RECORD_SP:
ret
#endif
#endif
GET_SP:
movl %esp, %eax
ret

View File

@ -132,6 +132,10 @@ MORESTACK:
addq $8, %rsp
popq %rbp
#ifdef __linux__
.cfi_restore %rbp
.cfi_def_cfa %rsp, 8
#endif
ret
#if defined(__ELF__)

View File

@ -2,11 +2,14 @@
#if defined(__APPLE__) || defined(_WIN32)
#define RECORD_SP _record_sp
#define GET_SP _get_sp
#else
#define RECORD_SP record_sp
#define GET_SP get_sp
#endif
.globl RECORD_SP
.globl GET_SP
#if defined(__linux__)
RECORD_SP:
@ -23,3 +26,7 @@ RECORD_SP:
ret
#endif
#endif
GET_SP:
movq %rsp, %rax
ret

View File

@ -286,7 +286,6 @@ rust_scheduler::start_main_loop() {
scheduled_task->state->name);
place_task_in_tls(scheduled_task);
//pthread_setspecific(89, (void *)scheduled_task->stk->limit);
interrupt_flag = 0;

View File

@ -530,9 +530,9 @@ log::walk_res(const rust_fn *dtor, unsigned n_params,
} // end namespace shape
extern "C" void
upcall_cmp_type(int8_t *result, const type_desc *tydesc,
const type_desc **subtydescs, uint8_t *data_0,
uint8_t *data_1, uint8_t cmp_type) {
shape_cmp_type(int8_t *result, const type_desc *tydesc,
const type_desc **subtydescs, uint8_t *data_0,
uint8_t *data_1, uint8_t cmp_type) {
rust_task *task = rust_scheduler::get_task();
shape::arena arena;
@ -553,7 +553,7 @@ upcall_cmp_type(int8_t *result, const type_desc *tydesc,
}
extern "C" void
upcall_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) {
shape_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) {
rust_task *task = rust_scheduler::get_task();
if (task->sched->log_lvl < level)
return; // TODO: Don't evaluate at all?

View File

@ -14,15 +14,22 @@
#include "globals.h"
// Each stack gets some guard bytes that valgrind will verify we don't touch
#ifndef NVALGRIND
#define STACK_NOACCESS_SIZE 16
#else
#define STACK_NOACCESS_SIZE 0
#endif
// The amount of extra space at the end of each stack segment, available
// to the rt, compiler and dynamic linker for running small functions
// FIXME: We want this to be 128 but need to slim the red zone calls down
#ifdef __i386__
#define RED_ZONE_SIZE 2048
#define RED_ZONE_SIZE (65536 + STACK_NOACCESS_SIZE)
#endif
#ifdef __x86_64__
#define RED_ZONE_SIZE 2048
#define RED_ZONE_SIZE (65536 + STACK_NOACCESS_SIZE)
#endif
// Stack size
@ -51,11 +58,14 @@ new_stk(rust_scheduler *sched, rust_task *task, size_t minsz)
LOGPTR(task->sched, "new stk", (uintptr_t)stk);
memset(stk, 0, sizeof(stk_seg));
stk->next = task->stk;
stk->limit = (uintptr_t) &stk->data[minsz + RED_ZONE_SIZE];
LOGPTR(task->sched, "stk limit", stk->limit);
stk->end = (uintptr_t) &stk->data[minsz + RED_ZONE_SIZE];
LOGPTR(task->sched, "stk end", stk->end);
stk->valgrind_id =
VALGRIND_STACK_REGISTER(&stk->data[0],
&stk->data[minsz + RED_ZONE_SIZE]);
#ifndef NVALGRIND
VALGRIND_MAKE_MEM_NOACCESS(stk->data, STACK_NOACCESS_SIZE);
#endif
task->stk = stk;
return stk;
}
@ -67,6 +77,9 @@ del_stk(rust_task *task, stk_seg *stk)
task->stk = stk->next;
#ifndef NVALGRIND
VALGRIND_MAKE_MEM_DEFINED(stk->data, STACK_NOACCESS_SIZE);
#endif
VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
task->free(stk);
@ -106,7 +119,7 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
user.notify_enabled = 0;
stk = new_stk(sched, this, 0);
user.rust_sp = stk->limit;
user.rust_sp = stk->end;
if (supervisor) {
supervisor->ref();
}
@ -582,7 +595,7 @@ rust_task::new_stack(size_t stk_sz, void *args_addr, size_t args_sz) {
stk_seg *stk_seg = new_stk(sched, this, stk_sz + args_sz);
uint8_t *new_sp = (uint8_t*)stk_seg->limit;
uint8_t *new_sp = (uint8_t*)stk_seg->end;
size_t sizeof_retaddr = sizeof(void*);
// Make enough room on the new stack to hold the old stack pointer
// in addition to the function arguments
@ -608,11 +621,34 @@ rust_task::record_stack_limit() {
// account for those 256 bytes.
const unsigned LIMIT_OFFSET = 256;
A(sched,
(uintptr_t)stk->limit - RED_ZONE_SIZE
(uintptr_t)stk->end - RED_ZONE_SIZE
- (uintptr_t)stk->data >= LIMIT_OFFSET,
"Stack size must be greater than LIMIT_OFFSET");
record_sp(stk->data + LIMIT_OFFSET + RED_ZONE_SIZE);
}
extern "C" uintptr_t get_sp();
/*
Called by landing pads during unwinding to figure out which
stack segment we are currently running on, delete the others,
and record the stack limit (which was not restored when unwinding
through __morestack).
*/
void
rust_task::reset_stack_limit() {
uintptr_t sp = get_sp();
// Not positive these bounds for sp are correct.
// I think that the first possible value for esp on a new
// stack is stk->end, which points one word in front of
// the first work to be pushed onto a new stack.
while (sp <= (uintptr_t)stk->data || stk->end < sp) {
del_stk(this, stk);
A(sched, stk != NULL, "Failed to find the current stack");
}
record_stack_limit();
}
//
// Local Variables:
// mode: C++

View File

@ -25,7 +25,7 @@ struct rust_box;
struct stk_seg {
stk_seg *next;
uintptr_t limit;
uintptr_t end;
unsigned int valgrind_id;
#ifndef _LP64
uint32_t pad;
@ -200,6 +200,7 @@ rust_task : public kernel_owned<rust_task>, rust_cond
void *new_stack(size_t stk_sz, void *args_addr, size_t args_sz);
void del_stack();
void record_stack_limit();
void reset_stack_limit();
};
//

View File

@ -6,7 +6,31 @@
#include "rust_upcall.h"
#include <stdint.h>
// Upcalls.
extern "C" void record_sp(void *limit);
/**
* Switches to the C-stack and invokes |fn_ptr|, passing |args| as argument.
*/
extern "C" CDECL void
upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
rust_task *task = rust_scheduler::get_task();
// FIXME (1226) - The shim functions generated by rustc contain the
// morestack prologue, so we need to let them know they have enough
// stack.
record_sp(0);
rust_scheduler *sched = task->sched;
try {
sched->c_context.call_shim_on_c_stack(args, fn_ptr);
} catch (...) {
task = rust_scheduler::get_task();
task->record_stack_limit();
throw;
}
task = rust_scheduler::get_task();
task->record_stack_limit();
}
#if defined(__i386__) || defined(__x86_64__) || defined(_M_X64)
void
@ -47,24 +71,34 @@ copy_elements(rust_task *task, type_desc *elem_t,
}
}
struct s_fail_args {
char const *expr;
char const *file;
size_t line;
};
extern "C" CDECL void
upcall_fail(char const *expr,
char const *file,
size_t line) {
upcall_s_fail(s_fail_args *args) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
LOG_ERR(task, upcall, "upcall fail '%s', %s:%" PRIdPTR, expr, file, line);
LOG_ERR(task, upcall, "upcall fail '%s', %s:%" PRIdPTR,
args->expr, args->file, args->line);
task->fail();
}
struct s_malloc_args {
size_t nbytes;
type_desc *td;
};
extern "C" CDECL uintptr_t
upcall_malloc(size_t nbytes, type_desc *td) {
upcall_s_malloc(s_malloc_args *args) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
LOG(task, mem,
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
nbytes, td);
args->nbytes, args->td);
gc::maybe_gc(task);
cc::maybe_cc(task);
@ -72,52 +106,278 @@ upcall_malloc(size_t nbytes, type_desc *td) {
// TODO: Maybe use dladdr here to find a more useful name for the
// type_desc.
void *p = task->malloc(nbytes, "tdesc", td);
memset(p, '\0', nbytes);
void *p = task->malloc(args->nbytes, "tdesc", args->td);
memset(p, '\0', args->nbytes);
task->local_allocs[p] = td;
task->local_allocs[p] = args->td;
debug::maybe_track_origin(task, p);
LOG(task, mem,
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ") = 0x%" PRIxPTR,
nbytes, td, (uintptr_t)p);
args->nbytes, args->td, (uintptr_t)p);
return (uintptr_t) p;
}
struct s_free_args {
void *ptr;
uintptr_t is_gc;
};
/**
* Called whenever an object's ref count drops to zero.
*/
extern "C" CDECL void
upcall_s_free(s_free_args *args) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
rust_scheduler *sched = task->sched;
DLOG(sched, mem,
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
(uintptr_t)args->ptr, args->is_gc);
task->local_allocs.erase(args->ptr);
debug::maybe_untrack_origin(task, args->ptr);
task->free(args->ptr, (bool) args->is_gc);
}
struct s_shared_malloc_args {
size_t nbytes;
type_desc *td;
};
extern "C" CDECL uintptr_t
upcall_s_shared_malloc(s_shared_malloc_args *args) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
LOG(task, mem,
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
args->nbytes, args->td);
void *p = task->kernel->malloc(args->nbytes, "shared malloc");
memset(p, '\0', args->nbytes);
LOG(task, mem,
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR
") = 0x%" PRIxPTR,
args->nbytes, args->td, (uintptr_t)p);
return (uintptr_t) p;
}
struct s_shared_free_args {
void *ptr;
};
/**
* Called whenever an object's ref count drops to zero.
*/
extern "C" CDECL void
upcall_s_shared_free(s_shared_free_args *args) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
rust_scheduler *sched = task->sched;
DLOG(sched, mem,
"upcall shared_free(0x%" PRIxPTR")",
(uintptr_t)args->ptr);
task->kernel->free(args->ptr);
}
struct s_get_type_desc_args {
size_t size;
size_t align;
size_t n_descs;
type_desc const **descs;
uintptr_t n_obj_params;
};
extern "C" CDECL type_desc *
upcall_s_get_type_desc(s_get_type_desc_args *args) {
rust_task *task = rust_scheduler::get_task();
check_stack(task);
LOG_UPCALL_ENTRY(task);
LOG(task, cache, "upcall get_type_desc with size=%" PRIdPTR
", align=%" PRIdPTR ", %" PRIdPTR " descs", args->size, args->align,
args->n_descs);
rust_crate_cache *cache = task->get_crate_cache();
type_desc *td = cache->get_type_desc(args->size, args->align, args->n_descs,
args->descs, args->n_obj_params);
LOG(task, cache, "returning tydesc 0x%" PRIxPTR, td);
return td;
}
struct s_vec_grow_args {
rust_vec** vp;
size_t new_sz;
};
extern "C" CDECL void
upcall_s_vec_grow(s_vec_grow_args *args) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
reserve_vec(task, args->vp, args->new_sz);
(*args->vp)->fill = args->new_sz;
}
struct s_vec_push_args {
rust_vec** vp;
type_desc* elt_ty;
void* elt;
};
extern "C" CDECL void
upcall_s_vec_push(s_vec_push_args *args) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
size_t new_sz = (*args->vp)->fill + args->elt_ty->size;
reserve_vec(task, args->vp, new_sz);
rust_vec* v = *args->vp;
copy_elements(task, args->elt_ty, &v->data[0] + v->fill,
args->elt, args->elt_ty->size);
v->fill += args->elt_ty->size;
}
/**
* Returns a token that can be used to deallocate all of the allocated space
* space in the dynamic stack.
*/
extern "C" CDECL void *
upcall_s_dynastack_mark() {
return rust_scheduler::get_task()->dynastack.mark();
}
struct s_dynastack_alloc_args {
size_t sz;
};
/**
* Allocates space in the dynamic stack and returns it.
*
* FIXME: Deprecated since dynamic stacks need to be self-describing for GC.
*/
extern "C" CDECL void *
upcall_s_dynastack_alloc(s_dynastack_alloc_args *args) {
size_t sz = args->sz;
return sz ? rust_scheduler::get_task()->dynastack.alloc(sz, NULL) : NULL;
}
struct s_dynastack_alloc_2_args {
size_t sz;
type_desc *ty;
};
/**
* Allocates space associated with a type descriptor in the dynamic stack and
* returns it.
*/
extern "C" CDECL void *
upcall_s_dynastack_alloc_2(s_dynastack_alloc_2_args *args) {
size_t sz = args->sz;
type_desc *ty = args->ty;
return sz ? rust_scheduler::get_task()->dynastack.alloc(sz, ty) : NULL;
}
struct s_dynastack_free_args {
void *ptr;
};
/** Frees space in the dynamic stack. */
extern "C" CDECL void
upcall_s_dynastack_free(s_dynastack_free_args *args) {
return rust_scheduler::get_task()->dynastack.free(args->ptr);
}
extern "C" _Unwind_Reason_Code
__gxx_personality_v0(int version,
_Unwind_Action actions,
uint64_t exception_class,
_Unwind_Exception *ue_header,
_Unwind_Context *context);
struct s_rust_personality_args {
int version;
_Unwind_Action actions;
uint64_t exception_class;
_Unwind_Exception *ue_header;
_Unwind_Context *context;
};
extern "C" _Unwind_Reason_Code
upcall_s_rust_personality(s_rust_personality_args *args) {
return __gxx_personality_v0(args->version,
args->actions,
args->exception_class,
args->ue_header,
args->context);
}
extern "C" void
shape_cmp_type(int8_t *result, const type_desc *tydesc,
const type_desc **subtydescs, uint8_t *data_0,
uint8_t *data_1, uint8_t cmp_type);
struct s_cmp_type_args {
int8_t *result;
const type_desc *tydesc;
const type_desc **subtydescs;
uint8_t *data_0;
uint8_t *data_1;
uint8_t cmp_type;
};
extern "C" void
upcall_s_cmp_type(s_cmp_type_args *args) {
shape_cmp_type(args->result, args->tydesc, args->subtydescs,
args->data_0, args->data_1, args->cmp_type);
}
extern "C" void
shape_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level);
struct s_log_type_args {
const type_desc *tydesc;
uint8_t *data;
uint32_t level;
};
extern "C" void
upcall_s_log_type(s_log_type_args *args) {
shape_log_type(args->tydesc, args->data, args->level);
}
// ______________________________________________________________________________
// Upcalls in original format: deprecated and should be removed once snapshot
// transitions them away.
extern "C" CDECL void
upcall_fail(char const *expr,
char const *file,
size_t line) {
s_fail_args args = {expr,file,line};
upcall_s_fail(&args);
}
extern "C" CDECL uintptr_t
upcall_malloc(size_t nbytes, type_desc *td) {
s_malloc_args args = {nbytes, td};
return upcall_s_malloc(&args);
}
/**
* Called whenever an object's ref count drops to zero.
*/
extern "C" CDECL void
upcall_free(void* ptr, uintptr_t is_gc) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
rust_scheduler *sched = task->sched;
DLOG(sched, mem,
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
(uintptr_t)ptr, is_gc);
task->local_allocs.erase(ptr);
debug::maybe_untrack_origin(task, ptr);
task->free(ptr, (bool) is_gc);
s_free_args args = {ptr, is_gc};
upcall_s_free(&args);
}
extern "C" CDECL uintptr_t
upcall_shared_malloc(size_t nbytes, type_desc *td) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
LOG(task, mem,
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
nbytes, td);
void *p = task->kernel->malloc(nbytes, "shared malloc");
memset(p, '\0', nbytes);
LOG(task, mem,
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR
") = 0x%" PRIxPTR,
nbytes, td, (uintptr_t)p);
return (uintptr_t) p;
s_shared_malloc_args args = {nbytes, td};
return upcall_s_shared_malloc(&args);
}
/**
@ -125,14 +385,8 @@ upcall_shared_malloc(size_t nbytes, type_desc *td) {
*/
extern "C" CDECL void
upcall_shared_free(void* ptr) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
rust_scheduler *sched = task->sched;
DLOG(sched, mem,
"upcall shared_free(0x%" PRIxPTR")",
(uintptr_t)ptr);
task->kernel->free(ptr);
s_shared_free_args args = {ptr};
upcall_s_shared_free(&args);
}
extern "C" CDECL type_desc *
@ -142,37 +396,20 @@ upcall_get_type_desc(void *curr_crate, // ignored, legacy compat.
size_t n_descs,
type_desc const **descs,
uintptr_t n_obj_params) {
rust_task *task = rust_scheduler::get_task();
check_stack(task);
LOG_UPCALL_ENTRY(task);
LOG(task, cache, "upcall get_type_desc with size=%" PRIdPTR
", align=%" PRIdPTR ", %" PRIdPTR " descs", size, align,
n_descs);
rust_crate_cache *cache = task->get_crate_cache();
type_desc *td = cache->get_type_desc(size, align, n_descs, descs,
n_obj_params);
LOG(task, cache, "returning tydesc 0x%" PRIxPTR, td);
return td;
s_get_type_desc_args args = {size,align,n_descs,descs,n_obj_params};
return upcall_s_get_type_desc(&args);
}
extern "C" CDECL void
upcall_vec_grow(rust_vec** vp, size_t new_sz) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
reserve_vec(task, vp, new_sz);
(*vp)->fill = new_sz;
s_vec_grow_args args = {vp, new_sz};
upcall_s_vec_grow(&args);
}
extern "C" CDECL void
upcall_vec_push(rust_vec** vp, type_desc* elt_ty, void* elt) {
rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task);
size_t new_sz = (*vp)->fill + elt_ty->size;
reserve_vec(task, vp, new_sz);
rust_vec* v = *vp;
copy_elements(task, elt_ty, &v->data[0] + v->fill, elt, elt_ty->size);
v->fill += elt_ty->size;
s_vec_push_args args = {vp, elt_ty, elt};
upcall_s_vec_push(&args);
}
/**
@ -181,7 +418,7 @@ upcall_vec_push(rust_vec** vp, type_desc* elt_ty, void* elt) {
*/
extern "C" CDECL void *
upcall_dynastack_mark() {
return rust_scheduler::get_task()->dynastack.mark();
return upcall_s_dynastack_mark();
}
/**
@ -191,7 +428,8 @@ upcall_dynastack_mark() {
*/
extern "C" CDECL void *
upcall_dynastack_alloc(size_t sz) {
return sz ? rust_scheduler::get_task()->dynastack.alloc(sz, NULL) : NULL;
s_dynastack_alloc_args args = {sz};
return upcall_s_dynastack_alloc(&args);
}
/**
@ -200,40 +438,40 @@ upcall_dynastack_alloc(size_t sz) {
*/
extern "C" CDECL void *
upcall_dynastack_alloc_2(size_t sz, type_desc *ty) {
return sz ? rust_scheduler::get_task()->dynastack.alloc(sz, ty) : NULL;
s_dynastack_alloc_2_args args = {sz, ty};
return upcall_s_dynastack_alloc_2(&args);
}
/** Frees space in the dynamic stack. */
extern "C" CDECL void
upcall_dynastack_free(void *ptr) {
return rust_scheduler::get_task()->dynastack.free(ptr);
s_dynastack_free_args args = {ptr};
return upcall_s_dynastack_free(&args);
}
extern "C" void record_sp(void *limit);
extern "C" _Unwind_Reason_Code
upcall_rust_personality(int version,
_Unwind_Action actions,
uint64_t exception_class,
_Unwind_Exception *ue_header,
_Unwind_Context *context) {
s_rust_personality_args args = {version, actions, exception_class, ue_header,
context};
return upcall_s_rust_personality(&args);
}
/**
* Switch to the C stack and call the given function, passing a single pointer
* argument.
*/
extern "C" CDECL void
upcall_call_shim_on_c_stack(void *args, void *fn_ptr) {
rust_task *task = rust_scheduler::get_task();
extern "C" void
upcall_cmp_type(int8_t *result, const type_desc *tydesc,
const type_desc **subtydescs, uint8_t *data_0,
uint8_t *data_1, uint8_t cmp_type) {
s_cmp_type_args args = {result, tydesc, subtydescs, data_0, data_1, cmp_type};
upcall_s_cmp_type(&args);
}
// FIXME (1226) - The shim functions generated by rustc contain the
// morestack prologue, so we need to let them know they have enough
// stack.
record_sp(0);
rust_scheduler *sched = task->sched;
try {
sched->c_context.call_shim_on_c_stack(args, fn_ptr);
} catch (...) {
task = rust_scheduler::get_task();
task->record_stack_limit();
throw;
}
task = rust_scheduler::get_task();
task->record_stack_limit();
extern "C" void
upcall_log_type(const type_desc *tydesc, uint8_t *data, uint32_t level) {
s_log_type_args args = {tydesc, data, level};
upcall_s_log_type(&args);
}
struct rust_new_stack2_args {
@ -245,6 +483,8 @@ struct rust_new_stack2_args {
// A new stack function suitable for calling through
// upcall_call_shim_on_c_stack
// FIXME: Convert this to the same arrangement as
// the other upcalls, simplify __morestack
extern "C" CDECL void
upcall_new_stack(struct rust_new_stack2_args *args) {
rust_task *task = rust_scheduler::get_task();
@ -253,30 +493,21 @@ upcall_new_stack(struct rust_new_stack2_args *args) {
args->args_sz);
}
// FIXME: As above
extern "C" CDECL void
upcall_del_stack() {
rust_task *task = rust_scheduler::get_task();
task->del_stack();
}
extern "C" _Unwind_Reason_Code
__gxx_personality_v0(int version,
_Unwind_Action actions,
uint64_t exception_class,
_Unwind_Exception *ue_header,
_Unwind_Context *context);
extern "C" _Unwind_Reason_Code
upcall_rust_personality(int version,
_Unwind_Action actions,
uint64_t exception_class,
_Unwind_Exception *ue_header,
_Unwind_Context *context) {
return __gxx_personality_v0(version,
actions,
exception_class,
ue_header,
context);
// Landing pads need to call this to insert the
// correct limit into TLS.
// NB: This must run on the Rust stack because it
// needs to acquire the value of the stack pointer
extern "C" CDECL void
upcall_reset_stack_limit() {
rust_task *task = rust_scheduler::get_task();
task->reset_stack_limit();
}
//
@ -286,6 +517,5 @@ upcall_rust_personality(int version,
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
//

View File

@ -69,6 +69,7 @@ upcall_vec_push
upcall_call_shim_on_c_stack
upcall_new_stack
upcall_del_stack
upcall_reset_stack_limit
asm_call_on_stack
rust_uv_default_loop
rust_uv_loop_new

View File

@ -112,12 +112,12 @@ mod map_reduce {
send(out, chan(p));
let ref_count = 0;
let is_done = false;
let state = @{mutable ref_count: 0, mutable is_done: false};
fn get(p: port<reduce_proto>, &ref_count: int, &is_done: bool) ->
option<int> {
while !is_done || ref_count > 0 {
fn get(p: port<reduce_proto>, state: @{mutable ref_count: int,
mutable is_done: bool})
-> option<int> {
while !state.is_done || state.ref_count > 0 {
alt recv(p) {
emit_val(v) {
// log_err #fmt("received %d", v);
@ -125,16 +125,16 @@ mod map_reduce {
}
done. {
// log_err "all done";
is_done = true;
state.is_done = true;
}
ref. { ref_count += 1; }
release. { ref_count -= 1; }
ref. { state.ref_count += 1; }
release. { state.ref_count -= 1; }
}
}
ret none;
}
reduce(key, bind get(p, ref_count, is_done));
reduce(key, bind get(p, state));
}
fn map_reduce(-inputs: [str]) {

View File

@ -25,10 +25,10 @@ resource and_then_get_big_again(_i: @int) {
getbig(i - 1);
}
}
getbig(1000);
getbig(100);
}
fn main() {
rustrt::set_min_stack(256u);
std::task::spawn(1000, getbig_and_fail);
std::task::spawn(100, getbig_and_fail);
}

View File

@ -39,8 +39,8 @@ fn traverse_in_order() {
insert(m, 2, ());
insert(m, 1, ());
let n = 0;
fn t(&n: int, &&k: int, &&_v: ()) { assert (n == k); n += 1; }
let n = @mutable 0;
fn t(n: @mutable int, &&k: int, &&_v: ()) { assert (*n == k); *n += 1; }
traverse(m, bind t(n, _, _));
}