make boxes self-describing (fixes #1493)
This commit is contained in:
parent
49cb3fc7df
commit
c36207bfb8
1
mk/rt.mk
1
mk/rt.mk
@ -61,6 +61,7 @@ RUNTIME_CS_$(1) := \
|
||||
rt/rust_cc.cpp \
|
||||
rt/rust_debug.cpp \
|
||||
rt/memory_region.cpp \
|
||||
rt/boxed_region.cpp \
|
||||
rt/test/rust_test_harness.cpp \
|
||||
rt/test/rust_test_runtime.cpp \
|
||||
rt/test/rust_test_util.cpp \
|
||||
|
@ -7,7 +7,7 @@
|
||||
# If you are making non-backwards compatible changes to the runtime,
|
||||
# set this flag to 1. It will cause stage1 to use the snapshot
|
||||
# runtime rather than the runtime from the working directory.
|
||||
USE_SNAPSHOT_RUNTIME=0
|
||||
USE_SNAPSHOT_RUNTIME=1
|
||||
|
||||
define TARGET_STAGE_N
|
||||
|
||||
|
@ -26,9 +26,11 @@ const frame_glue_fns_field_drop: int = 1;
|
||||
|
||||
const frame_glue_fns_field_reloc: int = 2;
|
||||
|
||||
// n.b. must be same as cbox_elt_refcnt
|
||||
const box_rc_field_refcnt: int = 0;
|
||||
const box_rc_field_body: int = 1;
|
||||
const box_field_refcnt: int = 0;
|
||||
const box_field_tydesc: int = 1;
|
||||
const box_field_prev: int = 2;
|
||||
const box_field_next: int = 3;
|
||||
const box_field_body: int = 4;
|
||||
|
||||
const general_code_alignment: int = 16;
|
||||
|
||||
@ -59,13 +61,9 @@ const cmp_glue_op_le: uint = 2u;
|
||||
const fn_field_code: int = 0;
|
||||
const fn_field_box: int = 1;
|
||||
|
||||
// closure_box, see trans_closure.rs
|
||||
//
|
||||
// n.b. the refcnt must be compatible with a normal box
|
||||
const cbox_elt_refcnt: int = 0;
|
||||
const cbox_elt_tydesc: int = 1;
|
||||
const cbox_elt_ty_params: int = 2;
|
||||
const cbox_elt_bindings: int = 3;
|
||||
// closures, see trans_closure.rs
|
||||
const closure_body_ty_params: int = 0;
|
||||
const closure_body_bindings: int = 1;
|
||||
|
||||
const vec_elt_fill: int = 0;
|
||||
|
||||
|
@ -11,6 +11,7 @@ type upcalls =
|
||||
{_fail: ValueRef,
|
||||
malloc: ValueRef,
|
||||
free: ValueRef,
|
||||
validate_box: ValueRef,
|
||||
shared_malloc: ValueRef,
|
||||
shared_free: ValueRef,
|
||||
mark: ValueRef,
|
||||
@ -52,10 +53,12 @@ fn declare_upcalls(targ_cfg: @session::config,
|
||||
T_ptr(T_i8()),
|
||||
size_t]),
|
||||
malloc:
|
||||
d("malloc", [size_t, T_ptr(tydesc_type)],
|
||||
d("malloc", [T_ptr(tydesc_type)],
|
||||
T_ptr(T_i8())),
|
||||
free:
|
||||
dv("free", [T_ptr(T_i8()), int_t]),
|
||||
validate_box:
|
||||
dv("validate_box", [T_ptr(T_i8())]),
|
||||
shared_malloc:
|
||||
d("shared_malloc", [size_t, T_ptr(tydesc_type)],
|
||||
T_ptr(T_i8())),
|
||||
|
@ -14,6 +14,7 @@ import syntax::ast_util::{dummy_sp};
|
||||
import syntax::ast::def_id;
|
||||
import syntax::codemap::span;
|
||||
import syntax::print::pprust::pat_to_str;
|
||||
import back::abi;
|
||||
|
||||
import common::*;
|
||||
|
||||
@ -465,7 +466,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
|
||||
// Unbox in case of a box field
|
||||
if any_box_pat(m, col) {
|
||||
let box = Load(bcx, val);
|
||||
let unboxed = GEPi(bcx, box, [0, back::abi::box_rc_field_body]);
|
||||
let unboxed = GEPi(bcx, box, [0, abi::box_field_body]);
|
||||
compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left,
|
||||
f, exits);
|
||||
ret;
|
||||
@ -776,7 +777,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
|
||||
ast::pat_box(inner) {
|
||||
let box = Load(bcx, val);
|
||||
let unboxed =
|
||||
GEPi(bcx, box, [0, back::abi::box_rc_field_body]);
|
||||
GEPi(bcx, box, [0, abi::box_field_body]);
|
||||
bcx = bind_irrefutable_pat(bcx, inner, unboxed, true);
|
||||
}
|
||||
ast::pat_uniq(inner) {
|
||||
|
@ -91,7 +91,7 @@ fn type_of_fn(cx: @crate_ctxt, inputs: [ty::arg],
|
||||
atys += [out_ty];
|
||||
|
||||
// Arg 1: Environment
|
||||
atys += [T_opaque_cbox_ptr(cx)];
|
||||
atys += [T_opaque_box_ptr(cx)];
|
||||
|
||||
// Args >2: ty params, if not acquired via capture...
|
||||
for bounds in params {
|
||||
@ -193,7 +193,7 @@ fn type_of_inner(cx: @crate_ctxt, t: ty::t)
|
||||
T_struct(tys)
|
||||
}
|
||||
ty::ty_opaque_closure_ptr(_) {
|
||||
T_opaque_cbox_ptr(cx)
|
||||
T_opaque_box_ptr(cx)
|
||||
}
|
||||
ty::ty_constr(subt,_) {
|
||||
// FIXME: could be a constraint on ty_fn
|
||||
@ -764,54 +764,54 @@ fn trans_shared_malloc(cx: @block_ctxt, llptr_ty: TypeRef, llsize: ValueRef)
|
||||
ret rslt(cx, PointerCast(cx, rval, llptr_ty));
|
||||
}
|
||||
|
||||
// Returns a pointer to the body for the box. The box may be an opaque
|
||||
// box. The result will be casted to the type of body_t, if it is statically
|
||||
// known.
|
||||
//
|
||||
// The runtime equivalent is box_body() in "rust_internal.h".
|
||||
fn opaque_box_body(bcx: @block_ctxt,
|
||||
body_t: ty::t,
|
||||
boxptr: ValueRef) -> ValueRef {
|
||||
let ccx = bcx_ccx(bcx);
|
||||
let boxptr = PointerCast(bcx, boxptr, T_ptr(T_box_header(ccx)));
|
||||
let bodyptr = GEPi(bcx, boxptr, [1]);
|
||||
if check type_has_static_size(ccx, body_t) {
|
||||
PointerCast(bcx, bodyptr, T_ptr(type_of(ccx, body_t)))
|
||||
} else {
|
||||
PointerCast(bcx, bodyptr, T_ptr(T_i8()))
|
||||
}
|
||||
}
|
||||
|
||||
// trans_malloc_boxed_raw: expects an unboxed type and returns a pointer to
|
||||
// enough space for something of that type, along with space for a reference
|
||||
// count; in other words, it allocates a box for something of that type.
|
||||
fn trans_malloc_boxed_raw(cx: @block_ctxt, t: ty::t) -> result {
|
||||
let bcx = cx;
|
||||
|
||||
// Synthesize a fake box type structurally so we have something
|
||||
// to measure the size of.
|
||||
|
||||
// We synthesize two types here because we want both the type of the
|
||||
// pointer and the pointee. boxed_body is the type that we measure the
|
||||
// size of; box_ptr is the type that's converted to a TypeRef and used as
|
||||
// the pointer cast target in trans_raw_malloc.
|
||||
|
||||
// The mk_int here is the space being
|
||||
// reserved for the refcount.
|
||||
let boxed_body = ty::mk_tup(bcx_tcx(bcx), [ty::mk_int(bcx_tcx(cx)), t]);
|
||||
let box_ptr = ty::mk_imm_box(bcx_tcx(bcx), t);
|
||||
let r = size_of(cx, boxed_body);
|
||||
let llsz = r.val; bcx = r.bcx;
|
||||
// enough space for a box of that type. This includes a rust_opaque_box
|
||||
// header.
|
||||
fn trans_malloc_boxed_raw(bcx: @block_ctxt, t: ty::t,
|
||||
&static_ti: option<@tydesc_info>) -> result {
|
||||
let bcx = bcx;
|
||||
let ccx = bcx_ccx(bcx);
|
||||
|
||||
// Grab the TypeRef type of box_ptr, because that's what trans_raw_malloc
|
||||
// wants.
|
||||
// FIXME: Could avoid this check with a postcondition on mk_imm_box?
|
||||
// (requires Issue #586)
|
||||
let ccx = bcx_ccx(bcx);
|
||||
let box_ptr = ty::mk_imm_box(bcx_tcx(bcx), t);
|
||||
check (type_has_static_size(ccx, box_ptr));
|
||||
let llty = type_of(ccx, box_ptr);
|
||||
|
||||
let ti = none;
|
||||
let tydesc_result = get_tydesc(bcx, t, true, ti);
|
||||
let lltydesc = tydesc_result.result.val; bcx = tydesc_result.result.bcx;
|
||||
// Get the tydesc for the body:
|
||||
let {bcx, val: lltydesc} = get_tydesc(bcx, t, true, static_ti).result;
|
||||
|
||||
let rval = Call(cx, ccx.upcalls.malloc,
|
||||
[llsz, lltydesc]);
|
||||
ret rslt(cx, PointerCast(cx, rval, llty));
|
||||
// Allocate space:
|
||||
let rval = Call(bcx, ccx.upcalls.malloc, [lltydesc]);
|
||||
ret rslt(bcx, PointerCast(bcx, rval, llty));
|
||||
}
|
||||
|
||||
// trans_malloc_boxed: usefully wraps trans_malloc_box_raw; allocates a box,
|
||||
// initializes the reference count to 1, and pulls out the body and rc
|
||||
fn trans_malloc_boxed(cx: @block_ctxt, t: ty::t) ->
|
||||
fn trans_malloc_boxed(bcx: @block_ctxt, t: ty::t) ->
|
||||
{bcx: @block_ctxt, box: ValueRef, body: ValueRef} {
|
||||
let res = trans_malloc_boxed_raw(cx, t);
|
||||
let box = res.val;
|
||||
let rc = GEPi(res.bcx, box, [0, abi::box_rc_field_refcnt]);
|
||||
Store(res.bcx, C_int(bcx_ccx(cx), 1), rc);
|
||||
let body = GEPi(res.bcx, box, [0, abi::box_rc_field_body]);
|
||||
ret {bcx: res.bcx, box: res.val, body: body};
|
||||
let ti = none;
|
||||
let {bcx, val:box} = trans_malloc_boxed_raw(bcx, t, ti);
|
||||
let body = GEPi(bcx, box, [0, abi::box_field_body]);
|
||||
ret {bcx: bcx, box: box, body: body};
|
||||
}
|
||||
|
||||
// Type descriptor and type glue stuff
|
||||
@ -1231,8 +1231,8 @@ fn make_take_glue(cx: @block_ctxt, v: ValueRef, t: ty::t) {
|
||||
|
||||
fn incr_refcnt_of_boxed(cx: @block_ctxt, box_ptr: ValueRef) -> @block_ctxt {
|
||||
let ccx = bcx_ccx(cx);
|
||||
let rc_ptr =
|
||||
GEPi(cx, box_ptr, [0, abi::box_rc_field_refcnt]);
|
||||
maybe_validate_box(cx, box_ptr);
|
||||
let rc_ptr = GEPi(cx, box_ptr, [0, abi::box_field_refcnt]);
|
||||
let rc = Load(cx, rc_ptr);
|
||||
rc = Add(cx, rc, C_int(ccx, 1));
|
||||
Store(cx, rc, rc_ptr);
|
||||
@ -1243,7 +1243,7 @@ fn free_box(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt {
|
||||
ret alt ty::struct(bcx_tcx(bcx), t) {
|
||||
ty::ty_box(body_mt) {
|
||||
let v = PointerCast(bcx, v, type_of_1(bcx, t));
|
||||
let body = GEPi(bcx, v, [0, abi::box_rc_field_body]);
|
||||
let body = GEPi(bcx, v, [0, abi::box_field_body]);
|
||||
let bcx = drop_ty(bcx, body, body_mt.ty);
|
||||
trans_free_if_not_gc(bcx, v)
|
||||
}
|
||||
@ -1274,7 +1274,7 @@ fn make_free_glue(bcx: @block_ctxt, v: ValueRef, t: ty::t) {
|
||||
let ccx = bcx_ccx(bcx);
|
||||
let llbox_ty = T_opaque_iface_ptr(ccx);
|
||||
let b = PointerCast(bcx, v, llbox_ty);
|
||||
let body = GEPi(bcx, b, [0, abi::box_rc_field_body]);
|
||||
let body = GEPi(bcx, b, [0, abi::box_field_body]);
|
||||
let tydescptr = GEPi(bcx, body, [0, 0]);
|
||||
let tydesc = Load(bcx, tydescptr);
|
||||
let ti = none;
|
||||
@ -1375,9 +1375,23 @@ fn trans_res_drop(cx: @block_ctxt, rs: ValueRef, did: ast::def_id,
|
||||
ret next_cx;
|
||||
}
|
||||
|
||||
fn maybe_validate_box(_cx: @block_ctxt, _box_ptr: ValueRef) {
|
||||
// Uncomment this when debugging annoying use-after-free
|
||||
// bugs. But do not commit with this uncommented! Big performance hit.
|
||||
|
||||
// let cx = _cx, box_ptr = _box_ptr;
|
||||
// let ccx = bcx_ccx(cx);
|
||||
// warn_not_to_commit(ccx, "validate_box() is uncommented");
|
||||
// let raw_box_ptr = PointerCast(cx, box_ptr, T_ptr(T_i8()));
|
||||
// Call(cx, ccx.upcalls.validate_box, [raw_box_ptr]);
|
||||
}
|
||||
|
||||
fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t)
|
||||
-> @block_ctxt {
|
||||
let ccx = bcx_ccx(cx);
|
||||
|
||||
maybe_validate_box(cx, box_ptr);
|
||||
|
||||
let rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
|
||||
let free_cx = new_sub_block_ctxt(cx, "free");
|
||||
let next_cx = new_sub_block_ctxt(cx, "next");
|
||||
@ -1385,8 +1399,7 @@ fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t)
|
||||
let box_ptr = PointerCast(cx, box_ptr, llbox_ty);
|
||||
let null_test = IsNull(cx, box_ptr);
|
||||
CondBr(cx, null_test, next_cx.llbb, rc_adj_cx.llbb);
|
||||
let rc_ptr =
|
||||
GEPi(rc_adj_cx, box_ptr, [0, abi::box_rc_field_refcnt]);
|
||||
let rc_ptr = GEPi(rc_adj_cx, box_ptr, [0, abi::box_field_refcnt]);
|
||||
let rc = Load(rc_adj_cx, rc_ptr);
|
||||
rc = Sub(rc_adj_cx, rc, C_int(ccx, 1));
|
||||
Store(rc_adj_cx, rc, rc_ptr);
|
||||
@ -1397,7 +1410,6 @@ fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t)
|
||||
ret next_cx;
|
||||
}
|
||||
|
||||
|
||||
// Structural comparison: a rather involved form of glue.
|
||||
fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: str) {
|
||||
if cx.sess.opts.save_temps {
|
||||
@ -2208,7 +2220,7 @@ fn autoderef(cx: @block_ctxt, v: ValueRef, t: ty::t) -> result_t {
|
||||
while true {
|
||||
alt ty::struct(ccx.tcx, t1) {
|
||||
ty::ty_box(mt) {
|
||||
let body = GEPi(cx, v1, [0, abi::box_rc_field_body]);
|
||||
let body = GEPi(cx, v1, [0, abi::box_field_body]);
|
||||
t1 = mt.ty;
|
||||
|
||||
// Since we're changing levels of box indirection, we may have
|
||||
@ -2514,7 +2526,7 @@ type lval_maybe_callee = {bcx: @block_ctxt,
|
||||
generic: option<generic_info>};
|
||||
|
||||
fn null_env_ptr(bcx: @block_ctxt) -> ValueRef {
|
||||
C_null(T_opaque_cbox_ptr(bcx_ccx(bcx)))
|
||||
C_null(T_opaque_box_ptr(bcx_ccx(bcx)))
|
||||
}
|
||||
|
||||
fn lval_from_local_var(bcx: @block_ctxt, r: local_var_result) -> lval_result {
|
||||
@ -2790,7 +2802,7 @@ fn trans_lval(cx: @block_ctxt, e: @ast::expr) -> lval_result {
|
||||
let val =
|
||||
alt ty::struct(ccx.tcx, t) {
|
||||
ty::ty_box(_) {
|
||||
GEPi(sub.bcx, sub.val, [0, abi::box_rc_field_body])
|
||||
GEPi(sub.bcx, sub.val, [0, abi::box_field_body])
|
||||
}
|
||||
ty::ty_res(_, _, _) {
|
||||
GEPi(sub.bcx, sub.val, [0, 1])
|
||||
@ -3160,7 +3172,7 @@ fn trans_call_inner(in_cx: @block_ctxt, fn_expr_ty: ty::t,
|
||||
let llenv, dict_param = none;
|
||||
alt f_res.env {
|
||||
null_env {
|
||||
llenv = llvm::LLVMGetUndef(T_opaque_cbox_ptr(bcx_ccx(cx)));
|
||||
llenv = llvm::LLVMGetUndef(T_opaque_box_ptr(bcx_ccx(cx)));
|
||||
}
|
||||
self_env(e) { llenv = e; }
|
||||
dict_env(dict, e) { llenv = e; dict_param = some(dict); }
|
||||
@ -3465,6 +3477,8 @@ fn trans_expr(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
|
||||
let tcx = bcx_tcx(bcx);
|
||||
debuginfo::update_source_pos(bcx, e.span);
|
||||
|
||||
#debug["trans_expr(%s,%?)", expr_to_str(e), dest];
|
||||
|
||||
if expr_is_lval(bcx, e) {
|
||||
ret lval_to_dps(bcx, e, dest);
|
||||
}
|
||||
@ -3998,6 +4012,8 @@ fn zero_alloca(cx: @block_ctxt, llptr: ValueRef, t: ty::t)
|
||||
}
|
||||
|
||||
fn trans_stmt(cx: @block_ctxt, s: ast::stmt) -> @block_ctxt {
|
||||
#debug["trans_expr(%s)", stmt_to_str(s)];
|
||||
|
||||
if (!bcx_ccx(cx).sess.opts.no_asm_comments) {
|
||||
add_span_comment(cx, s.span, stmt_to_str(s));
|
||||
}
|
||||
@ -5122,8 +5138,7 @@ fn fill_fn_pair(bcx: @block_ctxt, pair: ValueRef, llfn: ValueRef,
|
||||
let code_cell = GEPi(bcx, pair, [0, abi::fn_field_code]);
|
||||
Store(bcx, llfn, code_cell);
|
||||
let env_cell = GEPi(bcx, pair, [0, abi::fn_field_box]);
|
||||
let llenvblobptr =
|
||||
PointerCast(bcx, llenvptr, T_opaque_cbox_ptr(ccx));
|
||||
let llenvblobptr = PointerCast(bcx, llenvptr, T_opaque_box_ptr(ccx));
|
||||
Store(bcx, llenvblobptr, env_cell);
|
||||
}
|
||||
|
||||
@ -5591,7 +5606,8 @@ fn trans_crate(sess: session::session, crate: @ast::crate, tcx: ty::ctxt,
|
||||
shape_cx: shape::mk_ctxt(llmod),
|
||||
gc_cx: gc::mk_ctxt(),
|
||||
crate_map: crate_map,
|
||||
dbg_cx: dbg_cx};
|
||||
dbg_cx: dbg_cx,
|
||||
mutable do_not_commit_warning_issued: false};
|
||||
let cx = new_local_ctxt(ccx);
|
||||
collect_items(ccx, crate);
|
||||
trans_constants(ccx, crate);
|
||||
|
@ -8,7 +8,7 @@ import lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef};
|
||||
import lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False,
|
||||
CallConv};
|
||||
import common::{block_ctxt, T_ptr, T_nil, T_i8, T_i1, T_void,
|
||||
T_fn, val_ty, bcx_ccx, C_i32};
|
||||
T_fn, val_ty, bcx_ccx, C_i32, val_str};
|
||||
|
||||
fn B(cx: @block_ctxt) -> BuilderRef {
|
||||
let b = *cx.fcx.lcx.ccx.builder;
|
||||
@ -95,6 +95,10 @@ fn Invoke(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
|
||||
if cx.unreachable { ret; }
|
||||
assert (!cx.terminated);
|
||||
cx.terminated = true;
|
||||
#debug["Invoke(%s with arguments (%s))",
|
||||
val_str(bcx_ccx(cx).tn, Fn),
|
||||
str::connect(vec::map(Args, {|a|val_str(bcx_ccx(cx).tn, a)}),
|
||||
", ")];
|
||||
unsafe {
|
||||
llvm::LLVMBuildInvoke(B(cx), Fn, vec::to_ptr(Args),
|
||||
vec::len(Args) as c_uint, Then, Catch,
|
||||
|
@ -15,17 +15,6 @@ import back::link::{
|
||||
mangle_internal_name_by_path,
|
||||
mangle_internal_name_by_path_and_seq};
|
||||
import util::ppaux::ty_to_str;
|
||||
import base::{
|
||||
trans_shared_malloc,
|
||||
type_of_inner,
|
||||
node_id_type,
|
||||
INIT,
|
||||
trans_shared_free,
|
||||
drop_ty,
|
||||
new_sub_block_ctxt,
|
||||
load_if_immediate,
|
||||
dest
|
||||
};
|
||||
import shape::{size_of};
|
||||
|
||||
// ___Good to know (tm)__________________________________________________
|
||||
@ -33,34 +22,31 @@ import shape::{size_of};
|
||||
// The layout of a closure environment in memory is
|
||||
// roughly as follows:
|
||||
//
|
||||
// struct closure_box {
|
||||
// unsigned ref_count; // only used for shared environments
|
||||
// type_desc *tydesc; // descriptor for the "struct closure_box" type
|
||||
// type_desc *bound_tdescs[]; // bound descriptors
|
||||
// struct {
|
||||
// upvar1_t upvar1;
|
||||
// ...
|
||||
// upvarN_t upvarN;
|
||||
// } bound_data;
|
||||
// struct rust_opaque_box { // see rust_internal.h
|
||||
// unsigned ref_count; // only used for fn@()
|
||||
// type_desc *tydesc; // describes closure_data struct
|
||||
// rust_opaque_box *prev; // (used internally by memory alloc)
|
||||
// rust_opaque_box *next; // (used internally by memory alloc)
|
||||
// struct closure_data {
|
||||
// type_desc *bound_tdescs[]; // bound descriptors
|
||||
// struct {
|
||||
// upvar1_t upvar1;
|
||||
// ...
|
||||
// upvarN_t upvarN;
|
||||
// } bound_data;
|
||||
// }
|
||||
// };
|
||||
//
|
||||
// Note that the closure carries a type descriptor that describes the
|
||||
// closure itself. Trippy. This is needed because the precise types
|
||||
// of the closed over data are lost in the closure type (`fn(T)->U`),
|
||||
// so if we need to take/drop, we must know what data is in the upvars
|
||||
// and so forth. This struct is defined in the code in mk_closure_tys()
|
||||
// below.
|
||||
// Note that the closure is itself a rust_opaque_box. This is true
|
||||
// even for fn~ and fn&, because we wish to keep binary compatibility
|
||||
// between all kinds of closures. The allocation strategy for this
|
||||
// closure depends on the closure type. For a sendfn, the closure
|
||||
// (and the referenced type descriptors) will be allocated in the
|
||||
// exchange heap. For a fn, the closure is allocated in the task heap
|
||||
// and is reference counted. For a block, the closure is allocated on
|
||||
// the stack.
|
||||
//
|
||||
// The allocation strategy for this closure depends on the closure
|
||||
// type. For a sendfn, the closure (and the referenced type
|
||||
// descriptors) will be allocated in the exchange heap. For a fn, the
|
||||
// closure is allocated in the task heap and is reference counted.
|
||||
// For a block, the closure is allocated on the stack. Note that in
|
||||
// all cases we allocate space for a ref count just to make our lives
|
||||
// easier when upcasting to fn(T)->U, in the shape code, and so
|
||||
// forth.
|
||||
//
|
||||
// ## Opaque Closures ##
|
||||
// ## Opaque closures and the embedded type descriptor ##
|
||||
//
|
||||
// One interesting part of closures is that they encapsulate the data
|
||||
// that they close over. So when I have a ptr to a closure, I do not
|
||||
@ -69,10 +55,10 @@ import shape::{size_of};
|
||||
// nor where its fields are located. This is called an "opaque
|
||||
// closure".
|
||||
//
|
||||
// Typically an opaque closure suffices because I only manipulate it
|
||||
// by ptr. The routine common::T_opaque_cbox_ptr() returns an
|
||||
// appropriate type for such an opaque closure; it allows access to the
|
||||
// first two fields, but not the others.
|
||||
// Typically an opaque closure suffices because we only manipulate it
|
||||
// by ptr. The routine common::T_opaque_box_ptr() returns an
|
||||
// appropriate type for such an opaque closure; it allows access to
|
||||
// the box fields, but not the closure_data itself.
|
||||
//
|
||||
// But sometimes, such as when cloning or freeing a closure, we need
|
||||
// to know the full information. That is where the type descriptor
|
||||
@ -81,31 +67,22 @@ import shape::{size_of};
|
||||
//
|
||||
// ## Subtleties concerning alignment ##
|
||||
//
|
||||
// You'll note that the closure_box structure is a flat structure with
|
||||
// four fields. In some ways, it would be more convenient to use a nested
|
||||
// structure like so:
|
||||
// It is important that we be able to locate the closure data *without
|
||||
// knowing the kind of data that is being bound*. This can be tricky
|
||||
// because the alignment requirements of the bound data affects the
|
||||
// alignment requires of the closure_data struct as a whole. However,
|
||||
// right now this is a non-issue in any case, because the size of the
|
||||
// rust_opaque_box header is always a mutiple of 16-bytes, which is
|
||||
// the maximum alignment requirement we ever have to worry about.
|
||||
//
|
||||
// struct {
|
||||
// int;
|
||||
// struct {
|
||||
// type_desc*;
|
||||
// type_desc*[];
|
||||
// bound_data;
|
||||
// } }
|
||||
//
|
||||
// This would be more convenient because it would allow us to use more
|
||||
// of the existing infrastructure: we could treat the inner struct as
|
||||
// a type and then hvae a boxed variant (which would add the int) etc.
|
||||
// However, there is one subtle problem with this: grouping the latter
|
||||
// 3 fields into an inner struct causes the alignment of the entire
|
||||
// struct to be the max alignment of the bound_data. This will
|
||||
// therefore vary from closure to closure. That would mean that we
|
||||
// cannot reliably locate the initial type_desc* in an opaque closure!
|
||||
// That's definitely a bad thing. Therefore, I have elected to create
|
||||
// a flat structure, even though it means some mild amount of code
|
||||
// duplication (however, we used to do it the other way, and we were
|
||||
// jumping through about as many hoops just trying to wedge a ref
|
||||
// count into a unique pointer, so it's kind of a wash in the end).
|
||||
// The only reason alignment matters is that, in order to learn what data
|
||||
// is bound, we would normally first load the type descriptors: but their
|
||||
// location is ultimately depend on their content! There is, however, a
|
||||
// workaround. We can load the tydesc from the rust_opaque_box, which
|
||||
// describes the closure_data struct and has self-contained derived type
|
||||
// descriptors, and read the alignment from there. It's just annoying to
|
||||
// do. Hopefully should this ever become an issue we'll have monomorphized
|
||||
// and type descriptors will all be a bad dream.
|
||||
//
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -142,16 +119,21 @@ fn mk_tydesc_ty(tcx: ty::ctxt, ck: ty::closure_kind) -> ty::t {
|
||||
};
|
||||
}
|
||||
|
||||
fn mk_tuplified_uniq_cbox_ty(tcx: ty::ctxt, cdata_ty: ty::t) -> ty::t {
|
||||
let tydesc_ty = mk_tydesc_ty(tcx, ty::ck_uniq);
|
||||
let cbox_ty = tuplify_cbox_ty(tcx, cdata_ty, tydesc_ty);
|
||||
ret ty::mk_imm_uniq(tcx, cbox_ty);
|
||||
}
|
||||
|
||||
// Given a closure ty, emits a corresponding tuple ty
|
||||
fn mk_closure_tys(tcx: ty::ctxt,
|
||||
ck: ty::closure_kind,
|
||||
ty_params: [fn_ty_param],
|
||||
bound_values: [environment_value])
|
||||
-> (ty::t, ty::t, [ty::t]) {
|
||||
-> (ty::t, [ty::t]) {
|
||||
let bound_tys = [];
|
||||
|
||||
let tydesc_ty =
|
||||
mk_tydesc_ty(tcx, ck);
|
||||
let tydesc_ty = mk_tydesc_ty(tcx, ck);
|
||||
|
||||
// Compute the closed over tydescs
|
||||
let param_ptrs = [];
|
||||
@ -173,95 +155,76 @@ fn mk_closure_tys(tcx: ty::ctxt,
|
||||
}
|
||||
let bound_data_ty = ty::mk_tup(tcx, bound_tys);
|
||||
|
||||
let norc_tys = [tydesc_ty, ty::mk_tup(tcx, param_ptrs), bound_data_ty];
|
||||
|
||||
// closure_norc_ty == everything but ref count
|
||||
//
|
||||
// This is a hack to integrate with the cycle coll. When you
|
||||
// allocate memory in the task-local space, you are expected to
|
||||
// provide a descriptor for that memory which excludes the ref
|
||||
// count. That's what this represents. However, this really
|
||||
// assumes a type setup like [uint, data] where data can be a
|
||||
// struct. We don't use that structure here because we don't want
|
||||
// to alignment of the first few fields being bound up in the
|
||||
// alignment of the bound data, as would happen if we laid out
|
||||
// that way. For now this should be fine but ultimately we need
|
||||
// to modify CC code or else modify box allocation interface to be
|
||||
// a bit more flexible, perhaps taking a vec of tys in the box
|
||||
// (which for normal rust code is always of length 1).
|
||||
let closure_norc_ty = ty::mk_tup(tcx, norc_tys);
|
||||
|
||||
#debug["closure_norc_ty=%s", ty_to_str(tcx, closure_norc_ty)];
|
||||
|
||||
// closure_ty == ref count, data tydesc, typarams, bound data
|
||||
let closure_ty = ty::mk_tup(tcx, [ty::mk_int(tcx)] + norc_tys);
|
||||
|
||||
#debug["closure_ty=%s", ty_to_str(tcx, closure_norc_ty)];
|
||||
|
||||
ret (closure_ty, closure_norc_ty, bound_tys);
|
||||
let cdata_ty = ty::mk_tup(tcx, [ty::mk_tup(tcx, param_ptrs),
|
||||
bound_data_ty]);
|
||||
#debug["cdata_ty=%s", ty_to_str(tcx, cdata_ty)];
|
||||
ret (cdata_ty, bound_tys);
|
||||
}
|
||||
|
||||
fn allocate_cbox(bcx: @block_ctxt,
|
||||
ck: ty::closure_kind,
|
||||
cbox_ty: ty::t,
|
||||
cbox_norc_ty: ty::t)
|
||||
cdata_ty: ty::t)
|
||||
-> (@block_ctxt, ValueRef, [ValueRef]) {
|
||||
|
||||
let ccx = bcx_ccx(bcx);
|
||||
// let ccx = bcx_ccx(bcx);
|
||||
let tcx = bcx_tcx(bcx);
|
||||
|
||||
let alloc_in_heap = fn@(bcx: @block_ctxt,
|
||||
xchgheap: bool,
|
||||
&temp_cleanups: [ValueRef])
|
||||
-> (@block_ctxt, ValueRef) {
|
||||
fn nuke_ref_count(bcx: @block_ctxt, box: ValueRef) {
|
||||
// Initialize ref count to arbitrary value for debugging:
|
||||
let ccx = bcx_ccx(bcx);
|
||||
let box = PointerCast(bcx, box, T_opaque_box_ptr(ccx));
|
||||
let ref_cnt = GEPi(bcx, box, [0, abi::box_field_refcnt]);
|
||||
let rc = C_int(ccx, 0x12345678);
|
||||
Store(bcx, rc, ref_cnt);
|
||||
}
|
||||
|
||||
// n.b. If you are wondering why we don't use
|
||||
// trans_malloc_boxed() or alloc_uniq(), see the section about
|
||||
// "Subtleties concerning alignment" in the big comment at the
|
||||
// top of the file.
|
||||
fn store_uniq_tydesc(bcx: @block_ctxt,
|
||||
cdata_ty: ty::t,
|
||||
box: ValueRef,
|
||||
&ti: option::t<@tydesc_info>) -> @block_ctxt {
|
||||
let ccx = bcx_ccx(bcx);
|
||||
let bound_tydesc = GEPi(bcx, box, [0, abi::box_field_tydesc]);
|
||||
let {bcx, val: td} =
|
||||
base::get_tydesc(bcx, cdata_ty, true, ti).result;
|
||||
let td = Call(bcx, ccx.upcalls.create_shared_type_desc, [td]);
|
||||
Store(bcx, td, bound_tydesc);
|
||||
bcx
|
||||
}
|
||||
|
||||
let {bcx, val:llsz} = size_of(bcx, cbox_ty);
|
||||
let ti = none;
|
||||
let tydesc_ty = if xchgheap { cbox_ty } else { cbox_norc_ty };
|
||||
let {bcx, val:lltydesc} =
|
||||
get_tydesc(bcx, tydesc_ty, true, ti).result;
|
||||
let malloc = {
|
||||
if xchgheap { ccx.upcalls.shared_malloc}
|
||||
else { ccx.upcalls.malloc }
|
||||
};
|
||||
let box = Call(bcx, malloc, [llsz, lltydesc]);
|
||||
add_clean_free(bcx, box, xchgheap);
|
||||
temp_cleanups += [box];
|
||||
(bcx, box)
|
||||
};
|
||||
|
||||
// Allocate the box:
|
||||
// Allocate and initialize the box:
|
||||
let ti = none;
|
||||
let temp_cleanups = [];
|
||||
let (bcx, box, rc) = alt ck {
|
||||
let (bcx, box) = alt ck {
|
||||
ty::ck_box {
|
||||
let (bcx, box) = alloc_in_heap(bcx, false, temp_cleanups);
|
||||
(bcx, box, 1)
|
||||
let {bcx, val: box} = trans_malloc_boxed_raw(bcx, cdata_ty, ti);
|
||||
(bcx, box)
|
||||
}
|
||||
ty::ck_uniq {
|
||||
let (bcx, box) = alloc_in_heap(bcx, true, temp_cleanups);
|
||||
(bcx, box, 0x12345678) // use arbitrary value for debugging
|
||||
let uniq_cbox_ty = mk_tuplified_uniq_cbox_ty(tcx, cdata_ty);
|
||||
check uniq::type_is_unique_box(bcx, uniq_cbox_ty);
|
||||
let {bcx, val: box} = uniq::alloc_uniq(bcx, uniq_cbox_ty);
|
||||
nuke_ref_count(bcx, box);
|
||||
let bcx = store_uniq_tydesc(bcx, cdata_ty, box, ti);
|
||||
(bcx, box)
|
||||
}
|
||||
ty::ck_block {
|
||||
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
|
||||
let {bcx, val: box} = base::alloc_ty(bcx, cbox_ty);
|
||||
(bcx, box, 0x12345678) // use arbitrary value for debugging
|
||||
nuke_ref_count(bcx, box);
|
||||
(bcx, box)
|
||||
}
|
||||
};
|
||||
|
||||
// Initialize ref count
|
||||
let box = PointerCast(bcx, box, T_opaque_cbox_ptr(ccx));
|
||||
let ref_cnt = GEPi(bcx, box, [0, abi::box_rc_field_refcnt]);
|
||||
Store(bcx, C_int(ccx, rc), ref_cnt);
|
||||
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_take_glue, ti);
|
||||
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
|
||||
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
|
||||
|
||||
ret (bcx, box, temp_cleanups);
|
||||
}
|
||||
|
||||
type closure_result = {
|
||||
llbox: ValueRef, // llvalue of ptr to closure
|
||||
cbox_ty: ty::t, // type of the closure data
|
||||
cdata_ty: ty::t, // type of the closure data
|
||||
bcx: @block_ctxt // final bcx
|
||||
};
|
||||
|
||||
@ -302,34 +265,17 @@ fn store_environment(
|
||||
let tcx = bcx_tcx(bcx);
|
||||
|
||||
// compute the shape of the closure
|
||||
let (cbox_ty, cbox_norc_ty, bound_tys) =
|
||||
let (cdata_ty, bound_tys) =
|
||||
mk_closure_tys(tcx, ck, lltyparams, bound_values);
|
||||
|
||||
// allocate closure in the heap
|
||||
let (bcx, llbox, temp_cleanups) =
|
||||
allocate_cbox(bcx, ck, cbox_ty, cbox_norc_ty);
|
||||
|
||||
// store data tydesc.
|
||||
alt ck {
|
||||
ty::ck_box | ty::ck_uniq {
|
||||
let bound_tydesc = GEPi(bcx, llbox, [0, abi::cbox_elt_tydesc]);
|
||||
let ti = none;
|
||||
|
||||
let {result:closure_td, _} =
|
||||
base::get_tydesc(bcx, cbox_ty, true, ti);
|
||||
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_take_glue, ti);
|
||||
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
|
||||
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
|
||||
bcx = closure_td.bcx;
|
||||
let td = maybe_clone_tydesc(bcx, ck, closure_td.val);
|
||||
Store(bcx, td, bound_tydesc);
|
||||
}
|
||||
ty::ck_block { /* skip this for blocks, not really relevant */ }
|
||||
}
|
||||
allocate_cbox(bcx, ck, cdata_ty);
|
||||
|
||||
// cbox_ty has the form of a tuple: (a, b, c) we want a ptr to a
|
||||
// tuple. This could be a ptr in uniq or a box or on stack,
|
||||
// whatever.
|
||||
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
|
||||
let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm});
|
||||
let llbox = cast_if_we_can(bcx, llbox, cboxptr_ty);
|
||||
check type_is_tup_like(bcx, cbox_ty);
|
||||
@ -337,7 +283,8 @@ fn store_environment(
|
||||
// If necessary, copy tydescs describing type parameters into the
|
||||
// appropriate slot in the closure.
|
||||
let {bcx:bcx, val:ty_params_slot} =
|
||||
GEP_tup_like(bcx, cbox_ty, llbox, [0, abi::cbox_elt_ty_params]);
|
||||
GEP_tup_like(bcx, cbox_ty, llbox,
|
||||
[0, abi::box_field_body, abi::closure_body_ty_params]);
|
||||
let off = 0;
|
||||
for tp in lltyparams {
|
||||
let cloned_td = maybe_clone_tydesc(bcx, ck, tp.desc);
|
||||
@ -361,7 +308,9 @@ fn store_environment(
|
||||
}
|
||||
|
||||
let bound_data = GEP_tup_like_1(bcx, cbox_ty, llbox,
|
||||
[0, abi::cbox_elt_bindings,
|
||||
[0,
|
||||
abi::box_field_body,
|
||||
abi::closure_body_bindings,
|
||||
i as int]);
|
||||
bcx = bound_data.bcx;
|
||||
let bound_data = bound_data.val;
|
||||
@ -399,7 +348,7 @@ fn store_environment(
|
||||
}
|
||||
for cleanup in temp_cleanups { revoke_clean(bcx, cleanup); }
|
||||
|
||||
ret {llbox: llbox, cbox_ty: cbox_ty, bcx: bcx};
|
||||
ret {llbox: llbox, cdata_ty: cdata_ty, bcx: bcx};
|
||||
}
|
||||
|
||||
// Given a context and a list of upvars, build a closure. This just
|
||||
@ -443,22 +392,20 @@ fn build_closure(bcx0: @block_ctxt,
|
||||
// with the upvars and type descriptors.
|
||||
fn load_environment(enclosing_cx: @block_ctxt,
|
||||
fcx: @fn_ctxt,
|
||||
cbox_ty: ty::t,
|
||||
cdata_ty: ty::t,
|
||||
cap_vars: [capture::capture_var],
|
||||
ck: ty::closure_kind) {
|
||||
let bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
|
||||
let ccx = bcx_ccx(bcx);
|
||||
let tcx = bcx_tcx(bcx);
|
||||
|
||||
let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm});
|
||||
check (type_has_static_size(ccx, cboxptr_ty));
|
||||
let llty = type_of(ccx, cboxptr_ty);
|
||||
let llclosure = PointerCast(bcx, fcx.llenv, llty);
|
||||
// Load a pointer to the closure data, skipping over the box header:
|
||||
let llcdata = base::opaque_box_body(bcx, cdata_ty, fcx.llenv);
|
||||
|
||||
// Populate the type parameters from the environment. We need to
|
||||
// do this first because the tydescs are needed to index into
|
||||
// the bindings if they are dynamically sized.
|
||||
let lltydescs = GEPi(bcx, llclosure, [0, abi::cbox_elt_ty_params]);
|
||||
check type_is_tup_like(bcx, cdata_ty);
|
||||
let {bcx, val: lltydescs} = GEP_tup_like(bcx, cdata_ty, llcdata,
|
||||
[0, abi::closure_body_ty_params]);
|
||||
let off = 0;
|
||||
for tp in copy enclosing_cx.fcx.lltyparams {
|
||||
let tydesc = Load(bcx, GEPi(bcx, lltydescs, [0, off]));
|
||||
@ -476,15 +423,15 @@ fn load_environment(enclosing_cx: @block_ctxt,
|
||||
}
|
||||
|
||||
// Populate the upvars from the environment.
|
||||
let path = [0, abi::cbox_elt_bindings];
|
||||
let i = 0u;
|
||||
vec::iter(cap_vars) { |cap_var|
|
||||
alt cap_var.mode {
|
||||
capture::cap_drop { /* ignore */ }
|
||||
_ {
|
||||
check type_is_tup_like(bcx, cbox_ty);
|
||||
let upvarptr = GEP_tup_like(
|
||||
bcx, cbox_ty, llclosure, path + [i as int]);
|
||||
check type_is_tup_like(bcx, cdata_ty);
|
||||
let upvarptr =
|
||||
GEP_tup_like(bcx, cdata_ty, llcdata,
|
||||
[0, abi::closure_body_bindings, i as int]);
|
||||
bcx = upvarptr.bcx;
|
||||
let llupvarptr = upvarptr.val;
|
||||
alt ck {
|
||||
@ -519,9 +466,9 @@ fn trans_expr_fn(bcx: @block_ctxt,
|
||||
let trans_closure_env = fn@(ck: ty::closure_kind) -> ValueRef {
|
||||
let cap_vars = capture::compute_capture_vars(
|
||||
ccx.tcx, id, proto, cap_clause);
|
||||
let {llbox, cbox_ty, bcx} = build_closure(bcx, cap_vars, ck);
|
||||
let {llbox, cdata_ty, bcx} = build_closure(bcx, cap_vars, ck);
|
||||
trans_closure(sub_cx, decl, body, llfn, no_self, [], id, {|fcx|
|
||||
load_environment(bcx, fcx, cbox_ty, cap_vars, ck);
|
||||
load_environment(bcx, fcx, cdata_ty, cap_vars, ck);
|
||||
});
|
||||
llbox
|
||||
};
|
||||
@ -531,9 +478,8 @@ fn trans_expr_fn(bcx: @block_ctxt,
|
||||
ast::proto_box { trans_closure_env(ty::ck_box) }
|
||||
ast::proto_uniq { trans_closure_env(ty::ck_uniq) }
|
||||
ast::proto_bare {
|
||||
let closure = C_null(T_opaque_cbox_ptr(ccx));
|
||||
trans_closure(sub_cx, decl, body, llfn, no_self, [],
|
||||
id, {|_fcx|});
|
||||
let closure = C_null(T_opaque_box_ptr(ccx));
|
||||
trans_closure(sub_cx, decl, body, llfn, no_self, [], id, {|_fcx|});
|
||||
closure
|
||||
}
|
||||
};
|
||||
@ -617,7 +563,7 @@ fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
|
||||
};
|
||||
|
||||
// Actually construct the closure
|
||||
let {llbox, cbox_ty, bcx} = store_environment(
|
||||
let {llbox, cdata_ty, bcx} = store_environment(
|
||||
bcx, vec::map(lltydescs, {|d| {desc: d, dicts: none}}),
|
||||
env_vals + vec::map(bound, {|x| env_expr(x)}),
|
||||
ty::ck_box);
|
||||
@ -625,7 +571,7 @@ fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
|
||||
// Make thunk
|
||||
let llthunk =
|
||||
trans_bind_thunk(cx.fcx.lcx, pair_ty, outgoing_fty_real, args,
|
||||
cbox_ty, *param_bounds, target_res);
|
||||
cdata_ty, *param_bounds, target_res);
|
||||
|
||||
// Fill the function pair
|
||||
fill_fn_pair(bcx, get_dest_addr(dest), llthunk.val, llbox);
|
||||
@ -688,16 +634,20 @@ fn make_opaque_cbox_take_glue(
|
||||
|
||||
// Hard case, a deep copy:
|
||||
let ccx = bcx_ccx(bcx);
|
||||
let llopaquecboxty = T_opaque_cbox_ptr(ccx);
|
||||
let tcx = bcx_tcx(bcx);
|
||||
let llopaquecboxty = T_opaque_box_ptr(ccx);
|
||||
let cbox_in = Load(bcx, cboxptr);
|
||||
make_null_test(bcx, cbox_in) {|bcx|
|
||||
// Load the size from the type descr found in the cbox
|
||||
let cbox_in = PointerCast(bcx, cbox_in, llopaquecboxty);
|
||||
let tydescptr = GEPi(bcx, cbox_in, [0, abi::cbox_elt_tydesc]);
|
||||
let tydescptr = GEPi(bcx, cbox_in, [0, abi::box_field_tydesc]);
|
||||
let tydesc = Load(bcx, tydescptr);
|
||||
let tydesc = PointerCast(bcx, tydesc, T_ptr(ccx.tydesc_type));
|
||||
let sz = Load(bcx, GEPi(bcx, tydesc, [0, abi::tydesc_field_size]));
|
||||
|
||||
// Adjust sz to account for the rust_opaque_box header fields
|
||||
let sz = Add(bcx, sz, base::llsize_of(ccx, T_box_header(ccx)));
|
||||
|
||||
// Allocate memory, update original ptr, and copy existing data
|
||||
let malloc = ccx.upcalls.shared_malloc;
|
||||
let cbox_out = Call(bcx, malloc, [sz, tydesc]);
|
||||
@ -705,9 +655,14 @@ fn make_opaque_cbox_take_glue(
|
||||
let {bcx, val: _} = call_memmove(bcx, cbox_out, cbox_in, sz);
|
||||
Store(bcx, cbox_out, cboxptr);
|
||||
|
||||
// Take the (deeply cloned) type descriptor
|
||||
let tydesc_out = GEPi(bcx, cbox_out, [0, abi::box_field_tydesc]);
|
||||
let bcx = take_ty(bcx, tydesc_out, mk_tydesc_ty(tcx, ty::ck_uniq));
|
||||
|
||||
// Take the data in the tuple
|
||||
let ti = none;
|
||||
call_tydesc_glue_full(bcx, cbox_out, tydesc,
|
||||
let cdata_out = GEPi(bcx, cbox_out, [0, abi::box_field_body]);
|
||||
call_tydesc_glue_full(bcx, cdata_out, tydesc,
|
||||
abi::tydesc_field_take_glue, ti);
|
||||
bcx
|
||||
}
|
||||
@ -747,20 +702,14 @@ fn make_opaque_cbox_free_glue(
|
||||
// Load the type descr found in the cbox
|
||||
let lltydescty = T_ptr(ccx.tydesc_type);
|
||||
let cbox = PointerCast(bcx, cbox, T_opaque_cbox_ptr(ccx));
|
||||
let tydescptr = GEPi(bcx, cbox, [0, abi::cbox_elt_tydesc]);
|
||||
let tydescptr = GEPi(bcx, cbox, [0, abi::box_field_tydesc]);
|
||||
let tydesc = Load(bcx, tydescptr);
|
||||
let tydesc = PointerCast(bcx, tydesc, lltydescty);
|
||||
|
||||
// Null out the type descr in the cbox. This is subtle:
|
||||
// we will be freeing the data in the cbox, and we may need the
|
||||
// information in the type descr to guide the GEP_tup_like process
|
||||
// etc if generic types are involved. So we null it out at first
|
||||
// then free it manually below.
|
||||
Store(bcx, C_null(lltydescty), tydescptr);
|
||||
|
||||
// Drop the tuple data then free the descriptor
|
||||
let ti = none;
|
||||
call_tydesc_glue_full(bcx, cbox, tydesc,
|
||||
let cdata = GEPi(bcx, cbox, [0, abi::box_field_body]);
|
||||
call_tydesc_glue_full(bcx, cdata, tydesc,
|
||||
abi::tydesc_field_drop_glue, ti);
|
||||
|
||||
// Free the ty descr (if necc) and the box itself
|
||||
@ -782,10 +731,11 @@ fn trans_bind_thunk(cx: @local_ctxt,
|
||||
incoming_fty: ty::t,
|
||||
outgoing_fty: ty::t,
|
||||
args: [option<@ast::expr>],
|
||||
cbox_ty: ty::t,
|
||||
cdata_ty: ty::t,
|
||||
param_bounds: [ty::param_bounds],
|
||||
target_fn: option<ValueRef>)
|
||||
-> {val: ValueRef, ty: TypeRef} {
|
||||
|
||||
// If we supported constraints on record fields, we could make the
|
||||
// constraints for this function:
|
||||
/*
|
||||
@ -797,6 +747,13 @@ fn trans_bind_thunk(cx: @local_ctxt,
|
||||
let tcx = ccx_tcx(ccx);
|
||||
check type_has_static_size(ccx, incoming_fty);
|
||||
|
||||
#debug["trans_bind_thunk[incoming_fty=%s,outgoing_fty=%s,\
|
||||
cdata_ty=%s,param_bounds=%?]",
|
||||
ty_to_str(tcx, incoming_fty),
|
||||
ty_to_str(tcx, outgoing_fty),
|
||||
ty_to_str(tcx, cdata_ty),
|
||||
param_bounds];
|
||||
|
||||
// Here we're not necessarily constructing a thunk in the sense of
|
||||
// "function with no arguments". The result of compiling 'bind f(foo,
|
||||
// bar, baz)' would be a thunk that, when called, applies f to those
|
||||
@ -835,15 +792,12 @@ fn trans_bind_thunk(cx: @local_ctxt,
|
||||
let l_bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
|
||||
|
||||
// The 'llenv' that will arrive in the thunk we're creating is an
|
||||
// environment that will contain the values of its arguments and a pointer
|
||||
// to the original function. So, let's create one of those:
|
||||
|
||||
// The llenv pointer needs to be the correct size. That size is
|
||||
// 'cbox_ty', which was determined by trans_bind.
|
||||
let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm});
|
||||
check type_has_static_size(ccx, cboxptr_ty);
|
||||
let llclosure_ptr_ty = type_of(ccx, cboxptr_ty);
|
||||
let llclosure = PointerCast(l_bcx, fcx.llenv, llclosure_ptr_ty);
|
||||
// environment that will contain the values of its arguments and a
|
||||
// pointer to the original function. This environment is always
|
||||
// stored like an opaque box (see big comment at the header of the
|
||||
// file), so we load the body body, which contains the type descr
|
||||
// and cached data.
|
||||
let llcdata = base::opaque_box_body(l_bcx, cdata_ty, fcx.llenv);
|
||||
|
||||
// "target", in this context, means the function that's having some of its
|
||||
// arguments bound and that will be called inside the thunk we're
|
||||
@ -856,10 +810,10 @@ fn trans_bind_thunk(cx: @local_ctxt,
|
||||
}
|
||||
none {
|
||||
// Silly check
|
||||
check type_is_tup_like(bcx, cbox_ty);
|
||||
check type_is_tup_like(bcx, cdata_ty);
|
||||
let {bcx: cx, val: pair} =
|
||||
GEP_tup_like(bcx, cbox_ty, llclosure,
|
||||
[0, abi::cbox_elt_bindings, 0]);
|
||||
GEP_tup_like(bcx, cdata_ty, llcdata,
|
||||
[0, abi::closure_body_bindings, 0]);
|
||||
let lltargetenv =
|
||||
Load(cx, GEPi(cx, pair, [0, abi::fn_field_box]));
|
||||
let lltargetfn = Load
|
||||
@ -893,10 +847,10 @@ fn trans_bind_thunk(cx: @local_ctxt,
|
||||
let llargs: [ValueRef] = [llretptr, lltargetenv];
|
||||
|
||||
// Copy in the type parameters.
|
||||
check type_is_tup_like(l_bcx, cbox_ty);
|
||||
check type_is_tup_like(l_bcx, cdata_ty);
|
||||
let {bcx: l_bcx, val: param_record} =
|
||||
GEP_tup_like(l_bcx, cbox_ty, llclosure,
|
||||
[0, abi::cbox_elt_ty_params]);
|
||||
GEP_tup_like(l_bcx, cdata_ty, llcdata,
|
||||
[0, abi::closure_body_ty_params]);
|
||||
let off = 0;
|
||||
for param in param_bounds {
|
||||
let dsc = Load(l_bcx, GEPi(l_bcx, param_record, [0, off])),
|
||||
@ -934,10 +888,10 @@ fn trans_bind_thunk(cx: @local_ctxt,
|
||||
// closure.
|
||||
some(e) {
|
||||
// Silly check
|
||||
check type_is_tup_like(bcx, cbox_ty);
|
||||
check type_is_tup_like(bcx, cdata_ty);
|
||||
let bound_arg =
|
||||
GEP_tup_like(bcx, cbox_ty, llclosure,
|
||||
[0, abi::cbox_elt_bindings, b]);
|
||||
GEP_tup_like(bcx, cdata_ty, llcdata,
|
||||
[0, abi::closure_body_bindings, b]);
|
||||
bcx = bound_arg.bcx;
|
||||
let val = bound_arg.val;
|
||||
if out_arg.mode == ast::by_val { val = Load(bcx, val); }
|
||||
|
@ -122,7 +122,8 @@ type crate_ctxt =
|
||||
shape_cx: shape::ctxt,
|
||||
gc_cx: gc::ctxt,
|
||||
crate_map: ValueRef,
|
||||
dbg_cx: option<@debuginfo::debug_ctxt>};
|
||||
dbg_cx: option<@debuginfo::debug_ctxt>,
|
||||
mutable do_not_commit_warning_issued: bool};
|
||||
|
||||
type local_ctxt =
|
||||
{path: [str],
|
||||
@ -243,6 +244,13 @@ type fn_ctxt =
|
||||
span: option<span>,
|
||||
lcx: @local_ctxt};
|
||||
|
||||
fn warn_not_to_commit(ccx: @crate_ctxt, msg: str) {
|
||||
if !ccx.do_not_commit_warning_issued {
|
||||
ccx.do_not_commit_warning_issued = true;
|
||||
ccx.sess.warn(msg + " -- do not commit like this!");
|
||||
}
|
||||
}
|
||||
|
||||
enum cleanup {
|
||||
clean(fn@(@block_ctxt) -> @block_ctxt),
|
||||
clean_temp(ValueRef, fn@(@block_ctxt) -> @block_ctxt),
|
||||
@ -652,8 +660,42 @@ fn T_opaque_vec(targ_cfg: @session::config) -> TypeRef {
|
||||
ret T_vec2(targ_cfg, T_i8());
|
||||
}
|
||||
|
||||
// Let T be the content of a box @T. tuplify_box_ty(t) returns the
|
||||
// representation of @T as a tuple (i.e., the ty::t version of what T_box()
|
||||
// returns).
|
||||
fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t {
|
||||
ret tuplify_cbox_ty(tcx, t, ty::mk_type(tcx));
|
||||
}
|
||||
|
||||
// As tuplify_box_ty(), but allows the caller to specify what type of type
|
||||
// descr is embedded in the box (ty::type vs ty::send_type). This is useful
|
||||
// for unique closure boxes, hence the name "cbox_ty" (closure box type).
|
||||
fn tuplify_cbox_ty(tcx: ty::ctxt, t: ty::t, tydesc_t: ty::t) -> ty::t {
|
||||
let ptr = ty::mk_ptr(tcx, {ty: ty::mk_nil(tcx), mut: ast::imm});
|
||||
ret ty::mk_tup(tcx, [ty::mk_uint(tcx), tydesc_t,
|
||||
ptr, ptr,
|
||||
t]);
|
||||
}
|
||||
|
||||
fn T_box_header_fields(cx: @crate_ctxt) -> [TypeRef] {
|
||||
let ptr = T_ptr(T_i8());
|
||||
ret [cx.int_type, T_ptr(cx.tydesc_type), ptr, ptr];
|
||||
}
|
||||
|
||||
fn T_box_header(cx: @crate_ctxt) -> TypeRef {
|
||||
ret T_struct(T_box_header_fields(cx));
|
||||
}
|
||||
|
||||
fn T_box(cx: @crate_ctxt, t: TypeRef) -> TypeRef {
|
||||
ret T_struct([cx.int_type, t]);
|
||||
ret T_struct(T_box_header_fields(cx) + [t]);
|
||||
}
|
||||
|
||||
fn T_opaque_box(cx: @crate_ctxt) -> TypeRef {
|
||||
ret T_box(cx, T_i8());
|
||||
}
|
||||
|
||||
fn T_opaque_box_ptr(cx: @crate_ctxt) -> TypeRef {
|
||||
ret T_ptr(T_opaque_box(cx));
|
||||
}
|
||||
|
||||
fn T_port(cx: @crate_ctxt, _t: TypeRef) -> TypeRef {
|
||||
@ -681,15 +723,9 @@ fn T_typaram(tn: type_names) -> TypeRef {
|
||||
fn T_typaram_ptr(tn: type_names) -> TypeRef { ret T_ptr(T_typaram(tn)); }
|
||||
|
||||
fn T_opaque_cbox_ptr(cx: @crate_ctxt) -> TypeRef {
|
||||
let s = "*cbox";
|
||||
alt name_has_type(cx.tn, s) { some(t) { ret t; } _ {} }
|
||||
let t = T_ptr(T_struct([cx.int_type,
|
||||
T_ptr(cx.tydesc_type),
|
||||
T_i8() /* represents closed over tydescs
|
||||
and data go here; see trans_closure.rs*/
|
||||
]));
|
||||
associate_type(cx.tn, s, t);
|
||||
ret t;
|
||||
// closures look like boxes (even when they are fn~ or fn&)
|
||||
// see trans_closure.rs
|
||||
ret T_opaque_box_ptr(cx);
|
||||
}
|
||||
|
||||
fn T_enum_variant(cx: @crate_ctxt) -> TypeRef {
|
||||
|
@ -145,7 +145,7 @@ fn trans_iface_callee(bcx: @block_ctxt, callee_id: ast::node_id,
|
||||
-> lval_maybe_callee {
|
||||
let tcx = bcx_tcx(bcx);
|
||||
let {bcx, val} = trans_temp_expr(bcx, base);
|
||||
let box_body = GEPi(bcx, val, [0, abi::box_rc_field_body]);
|
||||
let box_body = GEPi(bcx, val, [0, abi::box_field_body]);
|
||||
let dict = Load(bcx, PointerCast(bcx, GEPi(bcx, box_body, [0, 1]),
|
||||
T_ptr(T_ptr(T_dict()))));
|
||||
// FIXME[impl] I doubt this is alignment-safe
|
||||
@ -266,7 +266,7 @@ fn trans_iface_wrapper(ccx: @crate_ctxt, pt: [ast::ident], m: ty::method,
|
||||
let self = Load(bcx, PointerCast(bcx,
|
||||
LLVMGetParam(llfn, 2u as c_uint),
|
||||
T_ptr(T_opaque_iface_ptr(ccx))));
|
||||
let boxed = GEPi(bcx, self, [0, abi::box_rc_field_body]);
|
||||
let boxed = GEPi(bcx, self, [0, abi::box_field_body]);
|
||||
let dict = Load(bcx, PointerCast(bcx, GEPi(bcx, boxed, [0, 1]),
|
||||
T_ptr(T_ptr(T_dict()))));
|
||||
let vtable = PointerCast(bcx, Load(bcx, GEPi(bcx, dict, [0, 0])),
|
||||
|
@ -1928,6 +1928,7 @@ fn parse_mod_items(p: parser, term: token::token,
|
||||
while p.token != term {
|
||||
let attrs = initial_attrs + parse_outer_attributes(p);
|
||||
initial_attrs = [];
|
||||
#debug["parse_mod_items: parse_item(attrs=%?)", attrs];
|
||||
alt parse_item(p, attrs) {
|
||||
some(i) { items += [i]; }
|
||||
_ {
|
||||
@ -1935,6 +1936,7 @@ fn parse_mod_items(p: parser, term: token::token,
|
||||
token::to_str(p.reader, p.token) + "'");
|
||||
}
|
||||
}
|
||||
#debug["parse_mod_items: attrs=%?", attrs];
|
||||
}
|
||||
ret {view_items: view_items, items: items};
|
||||
}
|
||||
|
@ -75,8 +75,6 @@ native mod rustrt {
|
||||
fn drop_task(task_id: *rust_task);
|
||||
fn get_task_pointer(id: task_id) -> *rust_task;
|
||||
|
||||
fn migrate_alloc(alloc: *u8, target: task_id);
|
||||
|
||||
fn start_task(id: task, closure: *rust_closure);
|
||||
|
||||
fn rust_task_is_unwinding(rt: *rust_task) -> bool;
|
||||
|
59
src/rt/boxed_region.cpp
Normal file
59
src/rt/boxed_region.cpp
Normal file
@ -0,0 +1,59 @@
|
||||
#include <assert.h>
|
||||
#include "boxed_region.h"
|
||||
#include "rust_internal.h"
|
||||
|
||||
// #define DUMP_BOXED_REGION
|
||||
|
||||
rust_opaque_box *boxed_region::malloc(type_desc *td) {
|
||||
size_t header_size = sizeof(rust_opaque_box);
|
||||
size_t body_size = td->size;
|
||||
size_t body_align = td->align;
|
||||
size_t total_size = align_to(header_size, body_align) + body_size;
|
||||
rust_opaque_box *box =
|
||||
(rust_opaque_box*)backing_region->malloc(total_size, "@");
|
||||
box->td = td;
|
||||
box->ref_count = 1;
|
||||
box->prev = NULL;
|
||||
box->next = live_allocs;
|
||||
if (live_allocs) live_allocs->prev = box;
|
||||
live_allocs = box;
|
||||
|
||||
# ifdef DUMP_BOXED_REGION
|
||||
fprintf(stderr, "Allocated box %p with td %p,"
|
||||
" size %lu==%lu+%lu, align %lu, prev %p, next %p\n",
|
||||
box, td, total_size, header_size, body_size, body_align,
|
||||
box->prev, box->next);
|
||||
# endif
|
||||
|
||||
return box;
|
||||
}
|
||||
|
||||
rust_opaque_box *boxed_region::calloc(type_desc *td) {
|
||||
rust_opaque_box *box = malloc(td);
|
||||
memset(box_body(box), 0, td->size);
|
||||
return box;
|
||||
}
|
||||
|
||||
void boxed_region::free(rust_opaque_box *box) {
|
||||
// This turns out to not be true in various situations,
|
||||
// like when we are unwinding after a failure.
|
||||
//
|
||||
// assert(box->ref_count == 0);
|
||||
|
||||
// This however should always be true. Helps to detect
|
||||
// double frees (kind of).
|
||||
assert(box->td != NULL);
|
||||
|
||||
# ifdef DUMP_BOXED_REGION
|
||||
fprintf(stderr, "Freed box %p with td %p, prev %p, next %p\n",
|
||||
box, box->td, box->prev, box->next);
|
||||
# endif
|
||||
|
||||
if (box->prev) box->prev->next = box->next;
|
||||
if (box->next) box->next->prev = box->prev;
|
||||
if (live_allocs == box) live_allocs = box->next;
|
||||
box->prev = NULL;
|
||||
box->next = NULL;
|
||||
box->td = NULL;
|
||||
backing_region->free(box);
|
||||
}
|
39
src/rt/boxed_region.h
Normal file
39
src/rt/boxed_region.h
Normal file
@ -0,0 +1,39 @@
|
||||
#ifndef BOXED_REGION_H
|
||||
#define BOXED_REGION_H
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
struct type_desc;
|
||||
class memory_region;
|
||||
struct rust_opaque_box;
|
||||
|
||||
/* Tracks the data allocated by a particular task in the '@' region.
|
||||
* Currently still relies on the standard malloc as a backing allocator, but
|
||||
* this could be improved someday if necessary. Every allocation must provide
|
||||
* a type descr which describes the payload (what follows the header). */
|
||||
class boxed_region {
|
||||
private:
|
||||
memory_region *backing_region;
|
||||
rust_opaque_box *live_allocs;
|
||||
|
||||
size_t align_to(size_t v, size_t align) {
|
||||
size_t alignm1 = align - 1;
|
||||
v += alignm1;
|
||||
v &= ~alignm1;
|
||||
return v;
|
||||
}
|
||||
|
||||
public:
|
||||
boxed_region(memory_region *br)
|
||||
: backing_region(br)
|
||||
, live_allocs(NULL)
|
||||
{}
|
||||
|
||||
rust_opaque_box *first_live_alloc() { return live_allocs; }
|
||||
|
||||
rust_opaque_box *malloc(type_desc *td);
|
||||
rust_opaque_box *calloc(type_desc *td);
|
||||
void free(rust_opaque_box *box);
|
||||
};
|
||||
|
||||
#endif /* BOXED_REGION_H */
|
@ -50,6 +50,9 @@ private:
|
||||
void dec_alloc();
|
||||
void maybe_poison(void *mem);
|
||||
|
||||
void release_alloc(void *mem);
|
||||
void claim_alloc(void *mem);
|
||||
|
||||
public:
|
||||
memory_region(rust_srv *srv, bool synchronized);
|
||||
memory_region(memory_region *parent);
|
||||
@ -58,10 +61,7 @@ public:
|
||||
void *realloc(void *mem, size_t size);
|
||||
void free(void *mem);
|
||||
virtual ~memory_region();
|
||||
|
||||
void release_alloc(void *mem);
|
||||
void claim_alloc(void *mem);
|
||||
};
|
||||
};
|
||||
|
||||
inline void *operator new(size_t size, memory_region ®ion,
|
||||
const char *tag) {
|
||||
|
@ -429,22 +429,6 @@ start_task(rust_task_id id, fn_env_pair *f) {
|
||||
target->deref();
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
migrate_alloc(void *alloc, rust_task_id tid) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
if(!alloc) return;
|
||||
rust_task *target = task->kernel->get_task_by_id(tid);
|
||||
if(target) {
|
||||
const type_desc *tydesc = task->release_alloc(alloc);
|
||||
target->claim_alloc(alloc, tydesc);
|
||||
target->deref();
|
||||
}
|
||||
else {
|
||||
// We couldn't find the target. Maybe we should just free?
|
||||
task->fail();
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" CDECL int
|
||||
sched_threads() {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
|
@ -25,7 +25,7 @@ namespace cc {
|
||||
|
||||
// Internal reference count computation
|
||||
|
||||
typedef std::map<void *,uintptr_t> irc_map;
|
||||
typedef std::map<rust_opaque_box*,uintptr_t> irc_map;
|
||||
|
||||
class irc : public shape::data<irc,shape::ptr> {
|
||||
friend class shape::data<irc,shape::ptr>;
|
||||
@ -118,13 +118,6 @@ class irc : public shape::data<irc,shape::ptr> {
|
||||
}
|
||||
}
|
||||
|
||||
void walk_obj2() {
|
||||
dp += sizeof(void *); // skip vtable
|
||||
uint8_t *box_ptr = shape::bump_dp<uint8_t *>(dp);
|
||||
shape::ptr ref_count_dp(box_ptr);
|
||||
maybe_record_irc(ref_count_dp);
|
||||
}
|
||||
|
||||
void walk_iface2() {
|
||||
walk_box2();
|
||||
}
|
||||
@ -145,30 +138,32 @@ class irc : public shape::data<irc,shape::ptr> {
|
||||
|
||||
void walk_uniq_contents2(irc &sub) { sub.walk(); }
|
||||
|
||||
void walk_box_contents2(irc &sub, shape::ptr &ref_count_dp) {
|
||||
maybe_record_irc(ref_count_dp);
|
||||
void walk_box_contents2(irc &sub, shape::ptr &box_dp) {
|
||||
maybe_record_irc(box_dp);
|
||||
|
||||
// Do not traverse the contents of this box; it's in the allocation
|
||||
// somewhere, so we're guaranteed to come back to it (if we haven't
|
||||
// traversed it already).
|
||||
}
|
||||
|
||||
void maybe_record_irc(shape::ptr &ref_count_dp) {
|
||||
if (!ref_count_dp)
|
||||
void maybe_record_irc(shape::ptr &box_dp) {
|
||||
if (!box_dp)
|
||||
return;
|
||||
|
||||
rust_opaque_box *box_ptr = (rust_opaque_box *) box_dp;
|
||||
|
||||
// Bump the internal reference count of the box.
|
||||
if (ircs.find((void *)ref_count_dp) == ircs.end()) {
|
||||
if (ircs.find(box_ptr) == ircs.end()) {
|
||||
LOG(task, gc,
|
||||
"setting internal reference count for %p to 1",
|
||||
(void *)ref_count_dp);
|
||||
ircs[(void *)ref_count_dp] = 1;
|
||||
box_ptr);
|
||||
ircs[box_ptr] = 1;
|
||||
} else {
|
||||
uintptr_t newcount = ircs[(void *)ref_count_dp] + 1;
|
||||
uintptr_t newcount = ircs[box_ptr] + 1;
|
||||
LOG(task, gc,
|
||||
"bumping internal reference count for %p to %lu",
|
||||
(void *)ref_count_dp, newcount);
|
||||
ircs[(void *)ref_count_dp] = newcount;
|
||||
box_ptr, newcount);
|
||||
ircs[box_ptr] = newcount;
|
||||
}
|
||||
}
|
||||
|
||||
@ -207,36 +202,25 @@ irc::walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
|
||||
|
||||
void
|
||||
irc::compute_ircs(rust_task *task, irc_map &ircs) {
|
||||
std::map<void *,const type_desc *>::iterator
|
||||
begin(task->local_allocs.begin()), end(task->local_allocs.end());
|
||||
while (begin != end) {
|
||||
uint8_t *p = reinterpret_cast<uint8_t *>(begin->first);
|
||||
|
||||
const type_desc *tydesc = begin->second;
|
||||
|
||||
LOG(task, gc, "determining internal ref counts: %p, tydesc=%p", p,
|
||||
tydesc);
|
||||
boxed_region *boxed = &task->boxed;
|
||||
for (rust_opaque_box *box = boxed->first_live_alloc();
|
||||
box != NULL;
|
||||
box = box->next) {
|
||||
type_desc *tydesc = box->td;
|
||||
uint8_t *body = (uint8_t*) box_body(box);
|
||||
|
||||
LOG(task, gc,
|
||||
"determining internal ref counts: "
|
||||
"box=%p tydesc=%p body=%p",
|
||||
box, tydesc, body);
|
||||
|
||||
shape::arena arena;
|
||||
shape::type_param *params =
|
||||
shape::type_param::from_tydesc_and_data(tydesc, p, arena);
|
||||
|
||||
#if 0
|
||||
shape::print print(task, true, tydesc->shape, params,
|
||||
tydesc->shape_tables);
|
||||
print.walk();
|
||||
|
||||
shape::log log(task, true, tydesc->shape, params,
|
||||
tydesc->shape_tables, p + sizeof(uintptr_t),
|
||||
std::cerr);
|
||||
log.walk();
|
||||
#endif
|
||||
shape::type_param::from_tydesc_and_data(tydesc, body, arena);
|
||||
|
||||
irc irc(task, true, tydesc->shape, params, tydesc->shape_tables,
|
||||
p + sizeof(uintptr_t), ircs);
|
||||
body, ircs);
|
||||
irc.walk();
|
||||
|
||||
++begin;
|
||||
}
|
||||
}
|
||||
|
||||
@ -244,17 +228,17 @@ irc::compute_ircs(rust_task *task, irc_map &ircs) {
|
||||
// Root finding
|
||||
|
||||
void
|
||||
find_roots(rust_task *task, irc_map &ircs, std::vector<void *> &roots) {
|
||||
std::map<void *,const type_desc *>::iterator
|
||||
begin(task->local_allocs.begin()), end(task->local_allocs.end());
|
||||
while (begin != end) {
|
||||
void *alloc = begin->first;
|
||||
uintptr_t *ref_count_ptr = reinterpret_cast<uintptr_t *>(alloc);
|
||||
uintptr_t ref_count = *ref_count_ptr;
|
||||
find_roots(rust_task *task, irc_map &ircs,
|
||||
std::vector<rust_opaque_box *> &roots) {
|
||||
boxed_region *boxed = &task->boxed;
|
||||
for (rust_opaque_box *box = boxed->first_live_alloc();
|
||||
box != NULL;
|
||||
box = box->next) {
|
||||
uintptr_t ref_count = box->ref_count;
|
||||
|
||||
uintptr_t irc;
|
||||
if (ircs.find(alloc) != ircs.end())
|
||||
irc = ircs[alloc];
|
||||
if (ircs.find(box) != ircs.end())
|
||||
irc = ircs[box];
|
||||
else
|
||||
irc = 0;
|
||||
|
||||
@ -262,16 +246,14 @@ find_roots(rust_task *task, irc_map &ircs, std::vector<void *> &roots) {
|
||||
// This allocation must be a root, because the internal reference
|
||||
// count is smaller than the total reference count.
|
||||
LOG(task, gc,"root found: %p, irc %lu, ref count %lu",
|
||||
alloc, irc, ref_count);
|
||||
roots.push_back(alloc);
|
||||
box, irc, ref_count);
|
||||
roots.push_back(box);
|
||||
} else {
|
||||
LOG(task, gc, "nonroot found: %p, irc %lu, ref count %lu",
|
||||
alloc, irc, ref_count);
|
||||
box, irc, ref_count);
|
||||
assert(irc == ref_count && "Internal reference count must be "
|
||||
"less than or equal to the total reference count!");
|
||||
}
|
||||
|
||||
++begin;
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,7 +263,7 @@ find_roots(rust_task *task, irc_map &ircs, std::vector<void *> &roots) {
|
||||
class mark : public shape::data<mark,shape::ptr> {
|
||||
friend class shape::data<mark,shape::ptr>;
|
||||
|
||||
std::set<void *> &marked;
|
||||
std::set<rust_opaque_box *> &marked;
|
||||
|
||||
mark(const mark &other, const shape::ptr &in_dp)
|
||||
: shape::data<mark,shape::ptr>(other.task, other.align, other.sp,
|
||||
@ -319,7 +301,7 @@ class mark : public shape::data<mark,shape::ptr> {
|
||||
const shape::type_param *in_params,
|
||||
const rust_shape_tables *in_tables,
|
||||
uint8_t *in_data,
|
||||
std::set<void *> &in_marked)
|
||||
std::set<rust_opaque_box*> &in_marked)
|
||||
: shape::data<mark,shape::ptr>(in_task, in_align, in_sp, in_params,
|
||||
in_tables, in_data),
|
||||
marked(in_marked) {}
|
||||
@ -357,7 +339,7 @@ class mark : public shape::data<mark,shape::ptr> {
|
||||
case shape::SHAPE_BOX_FN: {
|
||||
// Record an irc for the environment box, but don't descend
|
||||
// into it since it will be walked via the box's allocation
|
||||
shape::data<mark,shape::ptr>::walk_fn_contents1(dp, false);
|
||||
shape::data<mark,shape::ptr>::walk_fn_contents1();
|
||||
break;
|
||||
}
|
||||
case shape::SHAPE_BARE_FN: // Does not close over data.
|
||||
@ -368,10 +350,6 @@ class mark : public shape::data<mark,shape::ptr> {
|
||||
}
|
||||
}
|
||||
|
||||
void walk_obj2() {
|
||||
shape::data<mark,shape::ptr>::walk_obj_contents1(dp);
|
||||
}
|
||||
|
||||
void walk_res2(const shape::rust_fn *dtor, unsigned n_params,
|
||||
const shape::type_param *params, const uint8_t *end_sp,
|
||||
bool live) {
|
||||
@ -392,14 +370,16 @@ class mark : public shape::data<mark,shape::ptr> {
|
||||
|
||||
void walk_uniq_contents2(mark &sub) { sub.walk(); }
|
||||
|
||||
void walk_box_contents2(mark &sub, shape::ptr &ref_count_dp) {
|
||||
if (!ref_count_dp)
|
||||
void walk_box_contents2(mark &sub, shape::ptr &box_dp) {
|
||||
if (!box_dp)
|
||||
return;
|
||||
|
||||
if (marked.find((void *)ref_count_dp) != marked.end())
|
||||
rust_opaque_box *box_ptr = (rust_opaque_box *) box_dp;
|
||||
|
||||
if (marked.find(box_ptr) != marked.end())
|
||||
return; // Skip to avoid chasing cycles.
|
||||
|
||||
marked.insert((void *)ref_count_dp);
|
||||
marked.insert(box_ptr);
|
||||
sub.walk();
|
||||
}
|
||||
|
||||
@ -418,8 +398,9 @@ class mark : public shape::data<mark,shape::ptr> {
|
||||
inline void walk_number2() { /* no-op */ }
|
||||
|
||||
public:
|
||||
static void do_mark(rust_task *task, const std::vector<void *> &roots,
|
||||
std::set<void *> &marked);
|
||||
static void do_mark(rust_task *task,
|
||||
const std::vector<rust_opaque_box *> &roots,
|
||||
std::set<rust_opaque_box*> &marked);
|
||||
};
|
||||
|
||||
void
|
||||
@ -438,35 +419,28 @@ mark::walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
|
||||
}
|
||||
|
||||
void
|
||||
mark::do_mark(rust_task *task, const std::vector<void *> &roots,
|
||||
std::set<void *> &marked) {
|
||||
std::vector<void *>::const_iterator begin(roots.begin()),
|
||||
end(roots.end());
|
||||
mark::do_mark(rust_task *task,
|
||||
const std::vector<rust_opaque_box *> &roots,
|
||||
std::set<rust_opaque_box *> &marked) {
|
||||
std::vector<rust_opaque_box *>::const_iterator
|
||||
begin(roots.begin()),
|
||||
end(roots.end());
|
||||
while (begin != end) {
|
||||
void *alloc = *begin;
|
||||
if (marked.find(alloc) == marked.end()) {
|
||||
marked.insert(alloc);
|
||||
rust_opaque_box *box = *begin;
|
||||
if (marked.find(box) == marked.end()) {
|
||||
marked.insert(box);
|
||||
|
||||
const type_desc *tydesc = task->local_allocs[alloc];
|
||||
const type_desc *tydesc = box->td;
|
||||
|
||||
LOG(task, gc, "marking: %p, tydesc=%p", alloc, tydesc);
|
||||
LOG(task, gc, "marking: %p, tydesc=%p", box, tydesc);
|
||||
|
||||
uint8_t *p = reinterpret_cast<uint8_t *>(alloc);
|
||||
uint8_t *p = (uint8_t*) box_body(box);
|
||||
shape::arena arena;
|
||||
shape::type_param *params =
|
||||
shape::type_param::from_tydesc_and_data(tydesc, p, arena);
|
||||
|
||||
#if 0
|
||||
// We skip over the reference count here.
|
||||
shape::log log(task, true, tydesc->shape, params,
|
||||
tydesc->shape_tables, p + sizeof(uintptr_t),
|
||||
std::cerr);
|
||||
log.walk();
|
||||
#endif
|
||||
|
||||
// We skip over the reference count here.
|
||||
mark mark(task, true, tydesc->shape, params, tydesc->shape_tables,
|
||||
p + sizeof(uintptr_t), marked);
|
||||
p, marked);
|
||||
mark.walk();
|
||||
}
|
||||
|
||||
@ -552,13 +526,9 @@ class sweep : public shape::data<sweep,shape::ptr> {
|
||||
fn_env_pair pair = *(fn_env_pair*)dp;
|
||||
|
||||
// free closed over data:
|
||||
shape::data<sweep,shape::ptr>::walk_fn_contents1(dp, true);
|
||||
shape::data<sweep,shape::ptr>::walk_fn_contents1();
|
||||
|
||||
// now free the embedded type descr:
|
||||
//
|
||||
// see comment in walk_fn_contents1() concerning null_td
|
||||
// to understand why this does not occur during the normal
|
||||
// walk.
|
||||
upcall_s_free_shared_type_desc((type_desc*)pair.env->td);
|
||||
|
||||
// now free the ptr:
|
||||
@ -610,7 +580,7 @@ class sweep : public shape::data<sweep,shape::ptr> {
|
||||
|
||||
void walk_uniq_contents2(sweep &sub) { sub.walk(); }
|
||||
|
||||
void walk_box_contents2(sweep &sub, shape::ptr &ref_count_dp) {
|
||||
void walk_box_contents2(sweep &sub, shape::ptr &box_dp) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -637,50 +607,50 @@ class sweep : public shape::data<sweep,shape::ptr> {
|
||||
inline void walk_number2() { /* no-op */ }
|
||||
|
||||
public:
|
||||
static void do_sweep(rust_task *task, const std::set<void *> &marked);
|
||||
static void do_sweep(rust_task *task,
|
||||
const std::set<rust_opaque_box*> &marked);
|
||||
};
|
||||
|
||||
void
|
||||
sweep::do_sweep(rust_task *task, const std::set<void *> &marked) {
|
||||
std::map<void *,const type_desc *>::iterator
|
||||
begin(task->local_allocs.begin()), end(task->local_allocs.end());
|
||||
while (begin != end) {
|
||||
void *alloc = begin->first;
|
||||
sweep::do_sweep(rust_task *task,
|
||||
const std::set<rust_opaque_box*> &marked) {
|
||||
boxed_region *boxed = &task->boxed;
|
||||
rust_opaque_box *box = boxed->first_live_alloc();
|
||||
while (box != NULL) {
|
||||
// save next ptr as we may be freeing box
|
||||
rust_opaque_box *box_next = box->next;
|
||||
if (marked.find(box) == marked.end()) {
|
||||
LOG(task, gc, "object is part of a cycle: %p", box);
|
||||
|
||||
if (marked.find(alloc) == marked.end()) {
|
||||
LOG(task, gc, "object is part of a cycle: %p", alloc);
|
||||
|
||||
const type_desc *tydesc = begin->second;
|
||||
uint8_t *p = reinterpret_cast<uint8_t *>(alloc);
|
||||
const type_desc *tydesc = box->td;
|
||||
uint8_t *p = (uint8_t*) box_body(box);
|
||||
shape::arena arena;
|
||||
shape::type_param *params =
|
||||
shape::type_param::from_tydesc_and_data(tydesc, p, arena);
|
||||
|
||||
sweep sweep(task, true, tydesc->shape,
|
||||
params, tydesc->shape_tables,
|
||||
p + sizeof(uintptr_t));
|
||||
p);
|
||||
sweep.walk();
|
||||
|
||||
// FIXME: Run the destructor, *if* it's a resource.
|
||||
task->free(alloc);
|
||||
boxed->free(box);
|
||||
}
|
||||
++begin;
|
||||
box = box_next;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
do_cc(rust_task *task) {
|
||||
LOG(task, gc, "cc; n allocs = %lu",
|
||||
(long unsigned int)task->local_allocs.size());
|
||||
LOG(task, gc, "cc");
|
||||
|
||||
irc_map ircs;
|
||||
irc::compute_ircs(task, ircs);
|
||||
|
||||
std::vector<void *> roots;
|
||||
std::vector<rust_opaque_box*> roots;
|
||||
find_roots(task, ircs, roots);
|
||||
|
||||
std::set<void *> marked;
|
||||
std::set<rust_opaque_box*> marked;
|
||||
mark::do_mark(task, roots, marked);
|
||||
|
||||
sweep::do_sweep(task, marked);
|
||||
|
@ -231,29 +231,36 @@ struct rust_shape_tables {
|
||||
uint8_t *resources;
|
||||
};
|
||||
|
||||
struct rust_opaque_closure;
|
||||
typedef unsigned long ref_cnt_t;
|
||||
|
||||
// Corresponds to the boxed data in the @ region. The body follows the
|
||||
// header; you can obtain a ptr via box_body() below.
|
||||
struct rust_opaque_box {
|
||||
ref_cnt_t ref_count;
|
||||
type_desc *td;
|
||||
rust_opaque_box *prev;
|
||||
rust_opaque_box *next;
|
||||
};
|
||||
|
||||
// The type of functions that we spawn, which fall into two categories:
|
||||
// - the main function: has a NULL environment, but uses the void* arg
|
||||
// - unique closures of type fn~(): have a non-NULL environment, but
|
||||
// no arguments (and hence the final void*) is harmless
|
||||
typedef void (*CDECL spawn_fn)(void*, rust_opaque_closure*, void *);
|
||||
typedef void (*CDECL spawn_fn)(void*, rust_opaque_box*, void *);
|
||||
|
||||
// corresponds to the layout of a fn(), fn@(), fn~() etc
|
||||
struct fn_env_pair {
|
||||
spawn_fn f;
|
||||
rust_opaque_closure *env;
|
||||
rust_opaque_box *env;
|
||||
};
|
||||
|
||||
// corresponds the closures generated in trans_closure.rs
|
||||
struct rust_opaque_closure {
|
||||
intptr_t ref_count;
|
||||
const type_desc *td;
|
||||
// The size/types of these will vary per closure, so they
|
||||
// cannot be statically expressed. See trans_closure.rs:
|
||||
const type_desc *captured_tds[0];
|
||||
// struct bound_data;
|
||||
};
|
||||
static inline void *box_body(rust_opaque_box *box) {
|
||||
// Here we take advantage of the fact that the size of a box in 32
|
||||
// (resp. 64) bit is 16 (resp. 32) bytes, and thus always 16-byte aligned.
|
||||
// If this were to change, we would have to update the method
|
||||
// rustc::middle::trans::base::opaque_box_body() as well.
|
||||
return (void*)(box + 1);
|
||||
}
|
||||
|
||||
struct type_desc {
|
||||
// First part of type_desc is known to compiler.
|
||||
|
@ -264,7 +264,7 @@ private:
|
||||
result = sub.result;
|
||||
}
|
||||
|
||||
inline void walk_box_contents2(cmp &sub, ptr_pair &ref_count_dp) {
|
||||
inline void walk_box_contents2(cmp &sub, ptr_pair &box_dp) {
|
||||
sub.align = true;
|
||||
sub.walk();
|
||||
result = sub.result;
|
||||
|
@ -28,7 +28,6 @@ namespace shape {
|
||||
|
||||
typedef unsigned long tag_variant_t;
|
||||
typedef unsigned long tag_align_t;
|
||||
typedef unsigned long ref_cnt_t;
|
||||
|
||||
// Constants
|
||||
|
||||
@ -376,7 +375,6 @@ ctxt<T>::walk() {
|
||||
case SHAPE_TAG: walk_tag0(); break;
|
||||
case SHAPE_BOX: walk_box0(); break;
|
||||
case SHAPE_STRUCT: walk_struct0(); break;
|
||||
case SHAPE_OBJ: WALK_SIMPLE(walk_obj1); break;
|
||||
case SHAPE_RES: walk_res0(); break;
|
||||
case SHAPE_VAR: walk_var0(); break;
|
||||
case SHAPE_UNIQ: walk_uniq0(); break;
|
||||
@ -591,7 +589,6 @@ public:
|
||||
default: abort();
|
||||
}
|
||||
}
|
||||
void walk_obj1() { DPRINT("obj"); }
|
||||
void walk_iface1() { DPRINT("iface"); }
|
||||
|
||||
void walk_tydesc1(char kind) {
|
||||
@ -645,7 +642,6 @@ public:
|
||||
void walk_uniq1() { sa.set(sizeof(void *), sizeof(void *)); }
|
||||
void walk_box1() { sa.set(sizeof(void *), sizeof(void *)); }
|
||||
void walk_fn1(char) { sa.set(sizeof(void *)*2, sizeof(void *)); }
|
||||
void walk_obj1() { sa.set(sizeof(void *)*2, sizeof(void *)); }
|
||||
void walk_iface1() { sa.set(sizeof(void *), sizeof(void *)); }
|
||||
void walk_tydesc1(char) { sa.set(sizeof(void *), sizeof(void *)); }
|
||||
void walk_closure1();
|
||||
@ -854,9 +850,8 @@ protected:
|
||||
|
||||
void walk_box_contents1();
|
||||
void walk_uniq_contents1();
|
||||
void walk_fn_contents1(ptr &dp, bool null_td);
|
||||
void walk_obj_contents1(ptr &dp);
|
||||
void walk_iface_contents1(ptr &dp);
|
||||
void walk_fn_contents1();
|
||||
void walk_iface_contents1();
|
||||
void walk_variant1(tag_info &tinfo, tag_variant_t variant);
|
||||
|
||||
static std::pair<uint8_t *,uint8_t *> get_vec_data_range(ptr dp);
|
||||
@ -894,13 +889,6 @@ public:
|
||||
dp = next_dp;
|
||||
}
|
||||
|
||||
void walk_obj1() {
|
||||
ALIGN_TO(alignof<void *>());
|
||||
U next_dp = dp + sizeof(void *) * 2;
|
||||
static_cast<T *>(this)->walk_obj2();
|
||||
dp = next_dp;
|
||||
}
|
||||
|
||||
void walk_iface1() {
|
||||
ALIGN_TO(alignof<void *>());
|
||||
U next_dp = dp + sizeof(void *);
|
||||
@ -946,9 +934,17 @@ template<typename T,typename U>
|
||||
void
|
||||
data<T,U>::walk_box_contents1() {
|
||||
typename U::template data<uint8_t *>::t box_ptr = bump_dp<uint8_t *>(dp);
|
||||
U ref_count_dp(box_ptr);
|
||||
T sub(*static_cast<T *>(this), ref_count_dp + sizeof(ref_cnt_t));
|
||||
static_cast<T *>(this)->walk_box_contents2(sub, ref_count_dp);
|
||||
U box_dp(box_ptr);
|
||||
|
||||
// No need to worry about alignment so long as the box header is
|
||||
// a multiple of 16 bytes. We can just find the body by adding
|
||||
// the size of header to box_dp.
|
||||
assert ((sizeof(rust_opaque_box) % 16) == 0 ||
|
||||
!"Must align to find the box body");
|
||||
|
||||
U body_dp = box_dp + sizeof(rust_opaque_box);
|
||||
T sub(*static_cast<T *>(this), body_dp);
|
||||
static_cast<T *>(this)->walk_box_contents2(sub, box_dp);
|
||||
}
|
||||
|
||||
template<typename T,typename U>
|
||||
@ -1010,80 +1006,26 @@ data<T,U>::walk_tag1(tag_info &tinfo) {
|
||||
|
||||
template<typename T,typename U>
|
||||
void
|
||||
data<T,U>::walk_fn_contents1(ptr &dp, bool null_td) {
|
||||
data<T,U>::walk_fn_contents1() {
|
||||
fn_env_pair pair = bump_dp<fn_env_pair>(dp);
|
||||
if (!pair.env)
|
||||
return;
|
||||
|
||||
arena arena;
|
||||
const type_desc *closure_td = pair.env->td;
|
||||
type_param *params =
|
||||
type_param::from_tydesc(closure_td, arena);
|
||||
ptr closure_dp((uintptr_t)pair.env);
|
||||
type_param *params = type_param::from_tydesc(closure_td, arena);
|
||||
ptr closure_dp((uintptr_t)box_body(pair.env));
|
||||
T sub(*static_cast<T *>(this), closure_td->shape, params,
|
||||
closure_td->shape_tables, closure_dp);
|
||||
sub.align = true;
|
||||
|
||||
if (null_td) {
|
||||
// if null_td flag is true, null out the type descr from
|
||||
// the data structure while we walk. This is used in cycle
|
||||
// collector when we are sweeping up data. The idea is that
|
||||
// we are using the information in the embedded type desc to
|
||||
// walk the contents, so we do not want to free it during that
|
||||
// walk. This is not *strictly* necessary today because
|
||||
// type_param::from_tydesc() actually pulls out the "shape"
|
||||
// string and other information and copies it into a new
|
||||
// location that is unaffected by the free. But it seems
|
||||
// safer, particularly as this pulling out of information will
|
||||
// not cope with nested, derived type descriptors.
|
||||
pair.env->td = NULL;
|
||||
}
|
||||
|
||||
sub.walk();
|
||||
|
||||
if (null_td) {
|
||||
pair.env->td = closure_td;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T,typename U>
|
||||
void
|
||||
data<T,U>::walk_obj_contents1(ptr &dp) {
|
||||
dp += sizeof(void *); // Skip over the vtable.
|
||||
|
||||
uint8_t *box_ptr = bump_dp<uint8_t *>(dp);
|
||||
type_desc *subtydesc =
|
||||
*reinterpret_cast<type_desc **>(box_ptr + sizeof(void *));
|
||||
ptr obj_closure_dp(box_ptr + sizeof(void *));
|
||||
if (!box_ptr) // Null check.
|
||||
return;
|
||||
|
||||
arena arena;
|
||||
type_param *params = type_param::from_obj_shape(subtydesc->shape,
|
||||
obj_closure_dp, arena);
|
||||
T sub(*static_cast<T *>(this), subtydesc->shape, params,
|
||||
subtydesc->shape_tables, obj_closure_dp);
|
||||
sub.align = true;
|
||||
sub.walk();
|
||||
}
|
||||
|
||||
template<typename T,typename U>
|
||||
void
|
||||
data<T,U>::walk_iface_contents1(ptr &dp) {
|
||||
uint8_t *box_ptr = bump_dp<uint8_t *>(dp);
|
||||
if (!box_ptr) return;
|
||||
U ref_count_dp(box_ptr);
|
||||
uint8_t *body_ptr = box_ptr + sizeof(void*);
|
||||
type_desc *valtydesc =
|
||||
*reinterpret_cast<type_desc **>(body_ptr);
|
||||
ptr value_dp(body_ptr + sizeof(void*) * 2);
|
||||
// FIXME The 5 is a hard-coded way to skip over a struct shape
|
||||
// header and the first two (number-typed) fields. This is too
|
||||
// fragile, but I didn't see a good way to properly encode it.
|
||||
T sub(*static_cast<T *>(this), valtydesc->shape + 5, NULL, NULL,
|
||||
value_dp);
|
||||
sub.align = true;
|
||||
static_cast<T *>(this)->walk_box_contents2(sub, ref_count_dp);
|
||||
data<T,U>::walk_iface_contents1() {
|
||||
walk_box_contents1();
|
||||
}
|
||||
|
||||
// Polymorphic logging, for convenience
|
||||
@ -1161,19 +1103,13 @@ private:
|
||||
void walk_fn2(char kind) {
|
||||
out << prefix << "fn";
|
||||
prefix = "";
|
||||
data<log,ptr>::walk_fn_contents1(dp, false);
|
||||
}
|
||||
|
||||
void walk_obj2() {
|
||||
out << prefix << "obj";
|
||||
prefix = "";
|
||||
data<log,ptr>::walk_obj_contents1(dp);
|
||||
data<log,ptr>::walk_fn_contents1();
|
||||
}
|
||||
|
||||
void walk_iface2() {
|
||||
out << prefix << "iface(";
|
||||
prefix = "";
|
||||
data<log,ptr>::walk_iface_contents1(dp);
|
||||
data<log,ptr>::walk_iface_contents1();
|
||||
out << prefix << ")";
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <algorithm>
|
||||
|
||||
#include "globals.h"
|
||||
#include "rust_upcall.h"
|
||||
|
||||
// The amount of extra space at the end of each stack segment, available
|
||||
// to the rt, compiler and dynamic linker for running small functions
|
||||
@ -246,6 +247,7 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
|
||||
running_on(-1),
|
||||
pinned_on(-1),
|
||||
local_region(&sched->srv->local_region),
|
||||
boxed(&local_region),
|
||||
unwinding(false),
|
||||
killed(false),
|
||||
propagate_failure(true),
|
||||
@ -295,7 +297,7 @@ rust_task::~rust_task()
|
||||
struct spawn_args {
|
||||
rust_task *task;
|
||||
spawn_fn f;
|
||||
rust_opaque_closure *envptr;
|
||||
rust_opaque_box *envptr;
|
||||
void *argptr;
|
||||
};
|
||||
|
||||
@ -330,8 +332,6 @@ cleanup_task(cleanup_args *args) {
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" void upcall_shared_free(void* ptr);
|
||||
|
||||
// This runs on the Rust stack
|
||||
extern "C" CDECL
|
||||
void task_start_wrapper(spawn_args *a)
|
||||
@ -349,12 +349,13 @@ void task_start_wrapper(spawn_args *a)
|
||||
threw_exception = true;
|
||||
}
|
||||
|
||||
rust_opaque_closure* env = a->envptr;
|
||||
rust_opaque_box* env = a->envptr;
|
||||
if(env) {
|
||||
// free the environment.
|
||||
// free the environment (which should be a unique closure).
|
||||
const type_desc *td = env->td;
|
||||
LOG(task, task, "Freeing env %p with td %p", env, td);
|
||||
td->drop_glue(NULL, NULL, td->first_param, env);
|
||||
td->drop_glue(NULL, NULL, td->first_param, box_body(env));
|
||||
upcall_free_shared_type_desc(env->td);
|
||||
upcall_shared_free(env);
|
||||
}
|
||||
|
||||
@ -367,7 +368,7 @@ void task_start_wrapper(spawn_args *a)
|
||||
|
||||
void
|
||||
rust_task::start(spawn_fn spawnee_fn,
|
||||
rust_opaque_closure *envptr,
|
||||
rust_opaque_box *envptr,
|
||||
void *argptr)
|
||||
{
|
||||
LOG(this, task, "starting task from fn 0x%" PRIxPTR
|
||||
@ -678,38 +679,6 @@ rust_port *rust_task::get_port_by_id(rust_port_id id) {
|
||||
return port;
|
||||
}
|
||||
|
||||
|
||||
// Temporary routine to allow boxes on one task's shared heap to be reparented
|
||||
// to another.
|
||||
const type_desc *
|
||||
rust_task::release_alloc(void *alloc) {
|
||||
I(sched, !lock.lock_held_by_current_thread());
|
||||
lock.lock();
|
||||
|
||||
assert(local_allocs.find(alloc) != local_allocs.end());
|
||||
const type_desc *tydesc = local_allocs[alloc];
|
||||
local_allocs.erase(alloc);
|
||||
|
||||
local_region.release_alloc(alloc);
|
||||
|
||||
lock.unlock();
|
||||
return tydesc;
|
||||
}
|
||||
|
||||
// Temporary routine to allow boxes from one task's shared heap to be
|
||||
// reparented to this one.
|
||||
void
|
||||
rust_task::claim_alloc(void *alloc, const type_desc *tydesc) {
|
||||
I(sched, !lock.lock_held_by_current_thread());
|
||||
lock.lock();
|
||||
|
||||
assert(local_allocs.find(alloc) == local_allocs.end());
|
||||
local_allocs[alloc] = tydesc;
|
||||
local_region.claim_alloc(alloc);
|
||||
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
void
|
||||
rust_task::notify(bool success) {
|
||||
// FIXME (1078) Do this in rust code
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "rust_internal.h"
|
||||
#include "rust_kernel.h"
|
||||
#include "rust_obstack.h"
|
||||
#include "boxed_region.h"
|
||||
|
||||
// Corresponds to the rust chan (currently _chan) type.
|
||||
struct chan_handle {
|
||||
@ -106,6 +107,7 @@ rust_task : public kernel_owned<rust_task>, rust_cond
|
||||
int pinned_on;
|
||||
|
||||
memory_region local_region;
|
||||
boxed_region boxed;
|
||||
|
||||
// Indicates that fail() has been called and we are cleaning up.
|
||||
// We use this to suppress the "killed" flag during calls to yield.
|
||||
@ -121,7 +123,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond
|
||||
|
||||
rust_obstack dynastack;
|
||||
|
||||
std::map<void *,const type_desc *> local_allocs;
|
||||
uint32_t cc_counter;
|
||||
|
||||
debug::task_debug_info debug;
|
||||
@ -139,7 +140,7 @@ rust_task : public kernel_owned<rust_task>, rust_cond
|
||||
~rust_task();
|
||||
|
||||
void start(spawn_fn spawnee_fn,
|
||||
rust_opaque_closure *env,
|
||||
rust_opaque_box *env,
|
||||
void *args);
|
||||
void start();
|
||||
bool running();
|
||||
@ -194,11 +195,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond
|
||||
// not at all safe.
|
||||
intptr_t get_ref_count() const { return ref_count; }
|
||||
|
||||
// FIXME: These functions only exist to get the tasking system off the
|
||||
// ground. We should never be migrating shared boxes between tasks.
|
||||
const type_desc *release_alloc(void *alloc);
|
||||
void claim_alloc(void *alloc, const type_desc *tydesc);
|
||||
|
||||
void notify(bool success);
|
||||
|
||||
void *new_stack(size_t stk_sz, void *args_addr, size_t args_sz);
|
||||
|
@ -16,6 +16,20 @@
|
||||
#include <stdint.h>
|
||||
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define LOG_UPCALL_ENTRY(task) \
|
||||
LOG(task, upcall, \
|
||||
"> UPCALL %s - task: %s 0x%" PRIxPTR \
|
||||
" retpc: x%" PRIxPTR, \
|
||||
__FUNCTION__, \
|
||||
(task)->name, (task), \
|
||||
__builtin_return_address(0));
|
||||
#else
|
||||
#define LOG_UPCALL_ENTRY(task) \
|
||||
LOG(task, upcall, "> UPCALL task: %s @x%" PRIxPTR, \
|
||||
(task)->name, (task));
|
||||
#endif
|
||||
|
||||
// This is called to ensure we've set up our rust stacks
|
||||
// correctly. Strategically placed at entry to upcalls because they begin on
|
||||
// the rust stack and happen frequently enough to catch most stack changes,
|
||||
@ -98,7 +112,6 @@ upcall_fail(char const *expr,
|
||||
|
||||
struct s_malloc_args {
|
||||
uintptr_t retval;
|
||||
size_t nbytes;
|
||||
type_desc *td;
|
||||
};
|
||||
|
||||
@ -107,31 +120,27 @@ upcall_s_malloc(s_malloc_args *args) {
|
||||
rust_task *task = rust_scheduler::get_task();
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
LOG(task, mem,
|
||||
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
|
||||
args->nbytes, args->td);
|
||||
LOG(task, mem, "upcall malloc(0x%" PRIxPTR ")", args->td);
|
||||
|
||||
gc::maybe_gc(task);
|
||||
cc::maybe_cc(task);
|
||||
|
||||
// TODO: Maybe use dladdr here to find a more useful name for the
|
||||
// type_desc.
|
||||
// FIXME--does this have to be calloc?
|
||||
rust_opaque_box *box = task->boxed.calloc(args->td);
|
||||
void *body = box_body(box);
|
||||
|
||||
void *p = task->malloc(args->nbytes, "tdesc", args->td);
|
||||
memset(p, '\0', args->nbytes);
|
||||
|
||||
task->local_allocs[p] = args->td;
|
||||
debug::maybe_track_origin(task, p);
|
||||
debug::maybe_track_origin(task, box);
|
||||
|
||||
LOG(task, mem,
|
||||
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ") = 0x%" PRIxPTR,
|
||||
args->nbytes, args->td, (uintptr_t)p);
|
||||
args->retval = (uintptr_t) p;
|
||||
"upcall malloc(0x%" PRIxPTR ") = box 0x%" PRIxPTR
|
||||
" with body 0x%" PRIxPTR,
|
||||
args->td, (uintptr_t)box, (uintptr_t)body);
|
||||
args->retval = (uintptr_t) box;
|
||||
}
|
||||
|
||||
extern "C" CDECL uintptr_t
|
||||
upcall_malloc(size_t nbytes, type_desc *td) {
|
||||
s_malloc_args args = {0, nbytes, td};
|
||||
upcall_malloc(type_desc *td) {
|
||||
s_malloc_args args = {0, td};
|
||||
UPCALL_SWITCH_STACK(&args, upcall_s_malloc);
|
||||
return args.retval;
|
||||
}
|
||||
@ -155,10 +164,10 @@ upcall_s_free(s_free_args *args) {
|
||||
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
|
||||
(uintptr_t)args->ptr, args->is_gc);
|
||||
|
||||
task->local_allocs.erase(args->ptr);
|
||||
debug::maybe_untrack_origin(task, args->ptr);
|
||||
|
||||
task->free(args->ptr, (bool) args->is_gc);
|
||||
rust_opaque_box *box = (rust_opaque_box*) args->ptr;
|
||||
task->boxed.free(box);
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
@ -167,6 +176,21 @@ upcall_free(void* ptr, uintptr_t is_gc) {
|
||||
UPCALL_SWITCH_STACK(&args, upcall_s_free);
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* Sanity checks on boxes, insert when debugging possible
|
||||
* use-after-free bugs. See maybe_validate_box() in trans.rs.
|
||||
*/
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_validate_box(rust_opaque_box* ptr) {
|
||||
if (ptr) {
|
||||
assert(ptr->ref_count > 0);
|
||||
assert(ptr->td != NULL);
|
||||
assert(ptr->td->align <= 8);
|
||||
assert(ptr->td->size <= 4096); // might not really be true...
|
||||
}
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
* Allocate an object in the exchange heap.
|
||||
*/
|
||||
|
@ -1,17 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define LOG_UPCALL_ENTRY(task) \
|
||||
LOG(task, upcall, \
|
||||
"> UPCALL %s - task: %s 0x%" PRIxPTR \
|
||||
" retpc: x%" PRIxPTR, \
|
||||
__FUNCTION__, \
|
||||
(task)->name, (task), \
|
||||
__builtin_return_address(0));
|
||||
#else
|
||||
#define LOG_UPCALL_ENTRY(task) \
|
||||
LOG(task, upcall, "> UPCALL task: %s @x%" PRIxPTR, \
|
||||
(task)->name, (task));
|
||||
#endif
|
||||
// Upcalls used from C code on occasion:
|
||||
|
||||
extern "C" CDECL void upcall_shared_free(void* ptr);
|
||||
extern "C" CDECL void upcall_free_shared_type_desc(type_desc *td);
|
||||
|
||||
|
@ -17,7 +17,6 @@ get_task_pointer
|
||||
get_time
|
||||
last_os_error
|
||||
leak
|
||||
migrate_alloc
|
||||
nano_time
|
||||
new_port
|
||||
new_task
|
||||
@ -63,6 +62,7 @@ upcall_dynastack_free
|
||||
upcall_dynastack_mark
|
||||
upcall_fail
|
||||
upcall_free
|
||||
upcall_validate_box
|
||||
upcall_create_shared_type_desc
|
||||
upcall_free_shared_type_desc
|
||||
upcall_get_type_desc
|
||||
@ -98,4 +98,3 @@ rust_uvtmp_read_start
|
||||
rust_uvtmp_timer
|
||||
rust_uvtmp_delete_buf
|
||||
rust_uvtmp_get_req_id
|
||||
|
||||
|
@ -39,7 +39,7 @@ rust_domain_test::run() {
|
||||
return true;
|
||||
}
|
||||
|
||||
void task_entry(void *, rust_opaque_closure *, void *) {
|
||||
void task_entry(void *, rust_opaque_box*, void *) {
|
||||
printf("task entry\n");
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user