Implement non-internal ivecs

Vectors are now similar to our old, pre-internal vectors, except that
they are uniquely owned, not refcounted.

Their name should probably change too, then. I've renamed them to vec
in the runtime, will do so throughout the compiler later.
This commit is contained in:
Marijn Haverbeke 2011-08-25 10:18:02 +02:00
parent 855e0a4713
commit c9c5ee252a
27 changed files with 493 additions and 1554 deletions

View File

@ -62,7 +62,7 @@ endif
# runtime used directly by the compiler -- the binaries built by the
# snapshot won't know about the changes yet. Don't leave this on. Turn
# it on, shapshot, and turn it off again.
# CFG_USE_SNAP_LIBS_FOR_STAGE1 = 1
CFG_USE_SNAP_LIBS_FOR_STAGE1 = 1
# version-string calculation
CFG_GIT_DIR := $(CFG_SRC_DIR).git

View File

@ -92,24 +92,12 @@ const closure_elt_bindings: int = 1;
const closure_elt_ty_params: int = 2;
const ivec_default_length: uint = 4u;
const ivec_elt_fill: uint = 0u;
const ivec_elt_len: uint = 0u;
const ivec_elt_alen: uint = 1u;
const ivec_elt_alloc: uint = 1u;
const ivec_elt_elems: uint = 2u;
const ivec_heap_stub_elt_zero: uint = 0u;
const ivec_heap_stub_elt_alen: uint = 1u;
const ivec_heap_stub_elt_ptr: uint = 2u;
const ivec_heap_elt_len: uint = 0u;
const ivec_heap_elt_elems: uint = 1u;
const worst_case_glue_call_args: int = 7;
const abi_version: uint = 1u;

View File

@ -40,10 +40,7 @@ type upcalls =
new_str: ValueRef,
evec_append: ValueRef,
get_type_desc: ValueRef,
ivec_resize: ValueRef,
ivec_spill: ValueRef,
ivec_resize_shared: ValueRef,
ivec_spill_shared: ValueRef,
ivec_grow: ValueRef,
ivec_push: ValueRef,
cmp_type: ValueRef,
log_type: ValueRef,
@ -95,19 +92,13 @@ fn declare_upcalls(_tn: type_names, tydesc_type: TypeRef,
d(~"get_type_desc",
[T_ptr(T_nil()), T_size_t(), T_size_t(), T_size_t(),
T_ptr(T_ptr(tydesc_type)), T_int()], T_ptr(tydesc_type)),
ivec_resize:
d(~"ivec_resize", [T_ptr(T_opaque_ivec()), T_int()], T_void()),
ivec_spill:
d(~"ivec_spill", [T_ptr(T_opaque_ivec()), T_int()], T_void()),
ivec_resize_shared:
d(~"ivec_resize_shared", [T_ptr(T_opaque_ivec()), T_int()],
T_void()),
ivec_spill_shared:
d(~"ivec_spill_shared", [T_ptr(T_opaque_ivec()), T_int()],
ivec_grow:
d(~"vec_grow", [T_ptr(T_ptr(T_opaque_ivec())), T_int()],
T_void()),
ivec_push:
d(~"ivec_push", [T_ptr(T_opaque_ivec()), T_ptr(tydesc_type),
T_ptr(T_i8())], T_void()),
d(~"vec_push",
[T_ptr(T_ptr(T_opaque_ivec())), T_ptr(tydesc_type),
T_ptr(T_i8())], T_void()),
cmp_type:
dr(~"cmp_type",
[T_ptr(T_i1()), taskptr_type, T_ptr(tydesc_type),

View File

@ -313,7 +313,6 @@ fn shape_of(ccx: &@crate_ctxt, t: ty::t) -> [u8] {
s += [shape_ivec];
add_bool(s, true); // type is POD
let unit_ty = ty::mk_mach(ccx.tcx, ast::ty_u8);
add_size_hint(ccx, s, unit_ty);
add_substr(s, shape_of(ccx, unit_ty));
}
@ -365,7 +364,6 @@ fn shape_of(ccx: &@crate_ctxt, t: ty::t) -> [u8] {
ty::ty_vec(mt) {
s += [shape_ivec];
add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty));
add_size_hint(ccx, s, mt.ty);
add_substr(s, shape_of(ccx, mt.ty));
}
ty::ty_rec(fields) {
@ -416,14 +414,6 @@ fn shape_of(ccx: &@crate_ctxt, t: ty::t) -> [u8] {
ret s;
}
fn add_size_hint(ccx: &@crate_ctxt, s: &mutable [u8], typ: ty::t) {
if ty::type_has_dynamic_size(ccx.tcx, typ) { s += [0u8, 0u8, 0u8]; ret; }
let llty = trans::type_of(ccx, dummy_sp(), typ);
add_u16(s, trans::llsize_of_real(ccx, llty) as u16);
s += [trans::llalign_of_real(ccx, llty) as u8];
}
// FIXME: We might discover other variants as we traverse these. Handle this.
fn shape_of_variant(ccx: &@crate_ctxt, v: &ty::variant_info) -> [u8] {
let s = [];

View File

@ -205,14 +205,14 @@ fn type_of_inner(cx: &@crate_ctxt, sp: &span, t: ty::t) -> TypeRef {
}
ty::ty_char. { llty = T_char(); }
ty::ty_str. { llty = T_ptr(T_str()); }
ty::ty_istr. { llty = T_ivec(T_i8()); }
ty::ty_istr. { llty = T_ptr(T_ivec(T_i8())); }
ty::ty_tag(did, _) { llty = type_of_tag(cx, sp, did, t); }
ty::ty_box(mt) { llty = T_ptr(T_box(type_of_inner(cx, sp, mt.ty))); }
ty::ty_uniq(t) { llty = T_ptr(type_of_inner(cx, sp, t)); }
ty::ty_vec(mt) {
if ty::type_has_dynamic_size(cx.tcx, mt.ty) {
llty = T_opaque_ivec();
} else { llty = T_ivec(type_of_inner(cx, sp, mt.ty)); }
llty = T_ptr(T_opaque_ivec());
} else { llty = T_ptr(T_ivec(type_of_inner(cx, sp, mt.ty))); }
}
ty::ty_ptr(mt) { llty = T_ptr(type_of_inner(cx, sp, mt.ty)); }
ty::ty_rec(fields) {
@ -631,16 +631,6 @@ fn dynamic_size_of(cx: &@block_ctxt, t: ty::t) -> result {
} else { max_size_val };
ret rslt(bcx, total_size);
}
ty::ty_vec(mt) {
let rs = size_of(cx, mt.ty);
let bcx = rs.bcx;
let llunitsz = rs.val;
let llsz =
bld::Add(bcx, llsize_of(T_opaque_ivec()),
bld::Mul(bcx, llunitsz,
C_uint(abi::ivec_default_length)));
ret rslt(bcx, llsz);
}
}
}
@ -663,13 +653,6 @@ fn dynamic_align_of(cx: &@block_ctxt, t: ty::t) -> result {
ty::ty_tag(_, _) {
ret rslt(cx, C_int(1)); // FIXME: stub
}
ty::ty_vec(tm) {
let rs = align_of(cx, tm.ty);
let bcx = rs.bcx;
let llunitalign = rs.val;
let llalign = umax(bcx, llalign_of(T_int()), llunitalign);
ret rslt(bcx, llalign);
}
ty::ty_tup(elts) {
let a = C_int(1);
let bcx = cx;
@ -1326,16 +1309,16 @@ fn make_copy_glue(cx: &@block_ctxt, src: ValueRef, dst: ValueRef, t: ty::t) {
}
fn make_take_glue(cx: &@block_ctxt, v: ValueRef, t: ty::t) {
let bcx = cx;
// NB: v is an *alias* of type t here, not a direct value.
let bcx;
if ty::type_is_boxed(bcx_tcx(cx), t) {
bcx = incr_refcnt_of_boxed(cx, bld::Load(cx, v)).bcx;
} else if ty::type_is_structural(bcx_tcx(cx), t) {
bcx = duplicate_heap_parts_if_necessary(cx, v, t).bcx;
if ty::type_is_boxed(bcx_tcx(bcx), t) {
bcx = incr_refcnt_of_boxed(bcx, bld::Load(bcx, v)).bcx;
} else if ty::type_is_structural(bcx_tcx(bcx), t) {
bcx = iter_structural_ty(bcx, v, t, take_ty).bcx;
} else { bcx = cx; }
} else if ty::type_is_ivec(bcx_tcx(bcx), t) {
bcx = ivec::duplicate(bcx, v);
bcx = ivec::iter_ivec(bcx, v, t, take_ty).bcx;
}
build_return(bcx);
}
@ -1426,51 +1409,17 @@ fn make_free_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
build_return(rs.bcx);
}
fn maybe_free_ivec_heap_part(cx: &@block_ctxt, v0: ValueRef, unit_ty: ty::t)
-> result {
let llunitty = type_of_or_i8(cx, unit_ty);
let stack_len =
bld::Load(cx, bld::InBoundsGEP(cx, v0,
[C_int(0),
C_uint(abi::ivec_elt_len)]));
let maybe_on_heap_cx = new_sub_block_ctxt(cx, ~"maybe_on_heap");
let next_cx = new_sub_block_ctxt(cx, ~"next");
let maybe_on_heap =
bld::ICmp(cx, lib::llvm::LLVMIntEQ, stack_len, C_int(0));
bld::CondBr(cx, maybe_on_heap, maybe_on_heap_cx.llbb, next_cx.llbb);
// Might be on the heap. Load the heap pointer and free it. (It's ok to
// free a null pointer.)
let stub_ptr =
bld::PointerCast(maybe_on_heap_cx, v0, T_ptr(T_ivec_heap(llunitty)));
let heap_ptr =
{
let v = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
let m = bld::InBoundsGEP(maybe_on_heap_cx, stub_ptr, v);
bld::Load(maybe_on_heap_cx, m)
};
let after_free_cx = trans_shared_free(maybe_on_heap_cx, heap_ptr).bcx;
bld::Br(after_free_cx, next_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn make_drop_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
// NB: v0 is an *alias* of type t here, not a direct value.
let ccx = bcx_ccx(cx);
let rs =
alt ty::struct(ccx.tcx, t) {
ty::ty_str. { decr_refcnt_maybe_free(cx, v0, v0, t) }
ty::ty_vec(tm) {
let v1;
if ty::type_has_dynamic_size(ccx.tcx, tm.ty) {
v1 = bld::PointerCast(cx, v0, T_ptr(T_opaque_ivec()));
} else { v1 = v0; }
let rslt = iter_structural_ty(cx, v1, t, drop_ty);
maybe_free_ivec_heap_part(rslt.bcx, v1, tm.ty)
ty::ty_vec(_) {
rslt(ivec::make_drop_glue(cx, v0, t), C_nil())
}
ty::ty_istr. {
maybe_free_ivec_heap_part(cx, v0,
ty::mk_mach(ccx.tcx, ast::ty_u8))
rslt(ivec::make_drop_glue(cx, v0, t), C_nil())
}
ty::ty_box(_) { decr_refcnt_maybe_free(cx, v0, v0, t) }
ty::ty_uniq(_) { trans_shared_free(cx, bld::Load(cx, v0)) }
@ -1489,7 +1438,7 @@ fn make_drop_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
}
_ {
if ty::type_has_pointers(ccx.tcx, t) &&
ty::type_is_structural(ccx.tcx, t) {
ty::type_is_structural(ccx.tcx, t) {
iter_structural_ty(cx, v0, t, drop_ty)
} else { rslt(cx, C_nil()) }
}
@ -1721,7 +1670,8 @@ fn iter_structural_ty(cx: &@block_ctxt, v: ValueRef, t: ty::t,
-> result {
ret f(cx, av, t);
}
ret iter_structural_ty_full(cx, v, t, bind adaptor_fn(f, _, _, _));
let x = iter_structural_ty_full(cx, v, t, bind adaptor_fn(f, _, _, _));
ret x;
}
fn load_inbounds(cx: &@block_ctxt, p: ValueRef, idxs: &[ValueRef]) ->
@ -1756,61 +1706,6 @@ fn iter_structural_ty_full(cx: &@block_ctxt, av: ValueRef, t: ty::t,
ret rslt(next_cx, C_nil());
}
fn iter_ivec(bcx: @block_ctxt, av: ValueRef, unit_ty: ty::t,
f: &val_and_ty_fn) -> result {
// FIXME: "unimplemented rebinding existing function" workaround
fn adapter(bcx: &@block_ctxt, av: ValueRef, unit_ty: ty::t,
f: val_and_ty_fn) -> result {
ret f(bcx, av, unit_ty);
}
let llunitty = type_of_or_i8(bcx, unit_ty);
let rs = size_of(bcx, unit_ty);
let unit_sz = rs.val;
bcx = rs.bcx;
let a_len_and_data = ivec::get_len_and_data(bcx, av, unit_ty);
let len = a_len_and_data.len;
let a_elem = a_len_and_data.data;
bcx = a_len_and_data.bcx;
// Calculate the last pointer address we want to handle.
// TODO: Optimize this when the size of the unit type is statically
// known to not use pointer casts, which tend to confuse LLVM.
let a_elem_i8 = bld::PointerCast(bcx, a_elem, T_ptr(T_i8()));
let a_end_i8 = bld::GEP(bcx, a_elem_i8, [len]);
let a_end = bld::PointerCast(bcx, a_end_i8, T_ptr(llunitty));
let dest_elem_ptr = alloca(bcx, T_ptr(llunitty));
bld::Store(bcx, a_elem, dest_elem_ptr);
// Now perform the iteration.
let loop_header_cx = new_sub_block_ctxt(
bcx, ~"iter_ivec_loop_header");
bld::Br(bcx, loop_header_cx.llbb);
let dest_elem = bld::Load(loop_header_cx, dest_elem_ptr);
let not_yet_at_end =
bld::ICmp(loop_header_cx, lib::llvm::LLVMIntULT, dest_elem,
a_end);
let loop_body_cx = new_sub_block_ctxt(bcx, ~"iter_ivec_loop_body");
let next_cx = new_sub_block_ctxt(bcx, ~"iter_ivec_next");
bld::CondBr(loop_header_cx, not_yet_at_end, loop_body_cx.llbb,
next_cx.llbb);
rs = f(loop_body_cx, dest_elem, unit_ty);
loop_body_cx = rs.bcx;
let increment;
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
increment = unit_sz;
} else { increment = C_int(1); }
incr_ptr(loop_body_cx, dest_elem, increment, dest_elem_ptr);
bld::Br(loop_body_cx, loop_header_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn iter_variant(cx: @block_ctxt, a_tup: ValueRef,
variant: &ty::variant_info, tps: &[ty::t],
tid: &ast::def_id, f: &val_and_ty_fn) -> result {
@ -1913,11 +1808,6 @@ fn iter_structural_ty_full(cx: &@block_ctxt, av: ValueRef, t: ty::t,
bld::GEP(cx, av, [C_int(0), C_int(abi::obj_field_box)]);
ret iter_boxpp(cx, box_cell_a, f);
}
ty::ty_vec(unit_tm) { ret iter_ivec(cx, av, unit_tm.ty, f); }
ty::ty_istr. {
let unit_ty = ty::mk_mach(bcx_tcx(cx), ast::ty_u8);
ret iter_ivec(cx, av, unit_ty, f);
}
_ { bcx_ccx(cx).sess.unimpl(~"type in iter_structural_ty_full"); }
}
ret r;
@ -1987,25 +1877,21 @@ fn iter_sequence_inner(cx: &@block_ctxt, src: ValueRef,
// Iterates through the elements of a vec or str.
fn iter_sequence(cx: @block_ctxt, v: ValueRef, t: ty::t, f: &val_and_ty_fn)
-> result {
fn iter_sequence_body(cx: @block_ctxt, v: ValueRef, elt_ty: ty::t,
fn iter_sequence_body(bcx: @block_ctxt, v: ValueRef, elt_ty: ty::t,
f: &val_and_ty_fn, trailing_null: bool,
interior: bool) -> result {
let p0;
let len;
let bcx;
let llunit_ty = type_of_or_i8(bcx, elt_ty);
if !interior {
p0 = bld::GEP(cx, v, [C_int(0), C_int(abi::vec_elt_data)]);
let lp = bld::GEP(cx, v, [C_int(0), C_int(abi::vec_elt_fill)]);
len = bld::Load(cx, lp);
bcx = cx;
p0 = bld::GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_data)]);
let lp = bld::GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_fill)]);
len = bld::Load(bcx, lp);
} else {
let len_and_data_rslt = ivec::get_len_and_data(cx, v, elt_ty);
len = len_and_data_rslt.len;
p0 = len_and_data_rslt.data;
bcx = len_and_data_rslt.bcx;
len = ivec::get_fill(bcx, v);
p0 = ivec::get_dataptr(bcx, v, llunit_ty);
}
let llunit_ty = type_of_or_i8(cx, elt_ty);
if trailing_null {
let unit_sz = size_of(bcx, elt_ty);
bcx = unit_sz.bcx;
@ -2277,8 +2163,7 @@ fn compare(cx: &@block_ctxt, lhs: ValueRef, rhs: ValueRef, t: ty::t,
}
fn take_ty(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> result {
if ty::type_has_pointers(bcx_tcx(cx), t) ||
ty::type_owns_heap_mem(bcx_tcx(cx), t) {
if ty::type_has_pointers(bcx_tcx(cx), t) {
ret call_tydesc_glue(cx, v, t, abi::tydesc_field_take_glue);
}
ret rslt(cx, C_nil());
@ -2348,19 +2233,6 @@ fn memmove_ty(cx: &@block_ctxt, dst: ValueRef, src: ValueRef, t: ty::t) ->
} else { ret rslt(cx, bld::Store(cx, bld::Load(cx, src), dst)); }
}
// Duplicates any heap-owned memory owned by a value of the given type.
fn duplicate_heap_parts_if_necessary(cx: &@block_ctxt, vptr: ValueRef,
typ: ty::t) -> result {
alt ty::struct(bcx_tcx(cx), typ) {
ty::ty_vec(tm) { ret ivec::duplicate_heap_part(cx, vptr, tm.ty); }
ty::ty_istr. {
ret ivec::duplicate_heap_part(cx, vptr,
ty::mk_mach(bcx_tcx(cx), ast::ty_u8));
}
_ { ret rslt(cx, C_nil()); }
}
}
tag copy_action { INIT; DROP_EXISTING; }
// These are the types that are passed by pointer.
@ -2403,7 +2275,8 @@ fn copy_val_no_check(cx: &@block_ctxt, action: copy_action, dst: ValueRef,
ret cx;
} else if ty::type_is_nil(ccx.tcx, t) || ty::type_is_bot(ccx.tcx, t) {
ret cx;
} else if ty::type_is_boxed(ccx.tcx, t) {
} else if ty::type_is_boxed(ccx.tcx, t) ||
ty::type_is_ivec(ccx.tcx, t) {
let bcx = if action == DROP_EXISTING {
drop_ty(cx, dst, t).bcx
} else { cx };
@ -2414,12 +2287,8 @@ fn copy_val_no_check(cx: &@block_ctxt, action: copy_action, dst: ValueRef,
let bcx = if action == DROP_EXISTING {
drop_ty(cx, dst, t).bcx
} else { cx };
if ty::type_needs_copy_glue(ccx.tcx, t) {
ret call_copy_glue(bcx, dst, src, t, true);
} else {
bcx = memmove_ty(bcx, dst, src, t).bcx;
ret take_ty(bcx, dst, t).bcx;
}
bcx = memmove_ty(bcx, dst, src, t).bcx;
ret take_ty(bcx, dst, t).bcx;
}
ccx.sess.bug(~"unexpected type in trans::copy_val_no_check: " +
ty_to_str(ccx.tcx, t));
@ -2443,7 +2312,7 @@ fn move_val(cx: @block_ctxt, action: copy_action, dst: ValueRef,
} else if ty::type_is_nil(tcx, t) || ty::type_is_bot(tcx, t) {
ret cx;
} else if ty::type_is_unique(tcx, t) ||
ty::type_is_boxed(tcx, t) {
ty::type_is_boxed(tcx, t) {
if src.is_mem { src_val = bld::Load(cx, src_val); }
if action == DROP_EXISTING {
cx = drop_ty(cx, dst, t).bcx;
@ -2456,11 +2325,7 @@ fn move_val(cx: @block_ctxt, action: copy_action, dst: ValueRef,
ret cx;
} else if type_is_structural_or_param(tcx, t) {
if action == DROP_EXISTING { cx = drop_ty(cx, dst, t).bcx; }
if ty::type_needs_copy_glue(tcx, t) {
cx = call_copy_glue(cx, dst, src_val, t, false);
} else {
cx = memmove_ty(cx, dst, src_val, t).bcx;
}
cx = memmove_ty(cx, dst, src_val, t).bcx;
if src.is_mem {
ret zero_alloca(cx, src_val, t).bcx;
} else { // Temporary value
@ -2483,26 +2348,6 @@ fn move_val_if_temp(cx: @block_ctxt, action: copy_action, dst: ValueRef,
ret move_val(cx, action, dst, src, t);
}
fn trans_lit_istr(cx: &@block_ctxt, s: &istr) -> result {
let vec_ty = ty::mk_vec(bcx_tcx(cx),
{ty: ty::mk_mach(bcx_tcx(cx), ast::ty_u8),
mut: ast::imm});
let strlen = istr::byte_len(s);
let veclen = strlen + 1u; // +1 for \0
let alloc_res = trans_ivec::alloc_with_heap(cx, vec_ty, veclen);
let bcx = alloc_res.bcx;
let llvecptr = alloc_res.llptr;
let llfirsteltptr = alloc_res.llfirsteltptr;
let llcstr = C_cstr(bcx_ccx(cx), s);
// FIXME: We need to avoid this memmove
bcx = call_memmove(bcx, llfirsteltptr, llcstr, C_uint(veclen)).bcx;
ret rslt(bcx, llvecptr);
}
fn trans_crate_lit(cx: &@crate_ctxt, lit: &ast::lit) -> ValueRef {
alt lit.node {
ast::lit_int(i) { ret C_int(i); }
@ -2544,9 +2389,7 @@ fn trans_crate_lit(cx: &@crate_ctxt, lit: &ast::lit) -> ValueRef {
fn trans_lit(cx: &@block_ctxt, lit: &ast::lit) -> result {
alt lit.node {
ast::lit_str(s, ast::sk_unique.) {
ret trans_lit_istr(cx, s);
}
ast::lit_str(s, ast::sk_unique.) { ret ivec::trans_istr(cx, s); }
_ { ret rslt(cx, trans_crate_lit(bcx_ccx(cx), lit)); }
}
}
@ -3492,9 +3335,9 @@ fn trans_index(cx: &@block_ctxt, sp: &span, base: &@ast::expr,
maybe_name_value(bcx_ccx(cx), scaled_ix, ~"scaled_ix");
let interior_len_and_data;
if is_interior {
let rslt = ivec::get_len_and_data(bcx, v, unit_ty);
interior_len_and_data = some({len: rslt.len, data: rslt.data});
bcx = rslt.bcx;
let len = ivec::get_fill(bcx, v);
let data = ivec::get_dataptr(bcx, v, type_of_or_i8(bcx, unit_ty));
interior_len_and_data = some({len: len, data: data});
} else { interior_len_and_data = none; }
let lim;
alt interior_len_and_data {
@ -3986,7 +3829,11 @@ fn trans_arg_expr(cx: &@block_ctxt, arg: &ty::arg, lldestty0: TypeRef,
val = dst.val;
add_clean_temp(bcx, val, e_ty);
} else {
if lv.is_mem {
if ty::type_is_ivec(ccx.tcx, e_ty) {
let arg_copy = do_spill(bcx, bld::Load(bcx, val));
bcx = take_ty(bcx, arg_copy, e_ty).bcx;
val = bld::Load(bcx, arg_copy);
} else if lv.is_mem {
bcx = take_ty(bcx, val, e_ty).bcx;
val = load_if_immediate(bcx, val, e_ty);
} else if is_ext_vec_plus {
@ -4291,7 +4138,7 @@ fn trans_expr(cx: &@block_ctxt, e: &@ast::expr) -> result {
fn trans_expr_out(cx: &@block_ctxt, e: &@ast::expr, output: out_method) ->
result {
// FIXME Fill in cx.sp
// Fixme Fill in cx.sp
alt e.node {
ast::expr_lit(lit) { ret trans_lit(cx, *lit); }
ast::expr_unary(op, x) {
@ -4534,7 +4381,7 @@ fn with_out_method(work: fn(&out_method) -> result, cx: @block_ctxt,
// immediate-ness of the type.
fn type_is_immediate(ccx: &@crate_ctxt, t: ty::t) -> bool {
ret ty::type_is_scalar(ccx.tcx, t) || ty::type_is_boxed(ccx.tcx, t) ||
ty::type_is_native(ccx.tcx, t);
ty::type_is_native(ccx.tcx, t) || ty::type_is_ivec(ccx.tcx, t);
}
fn do_spill(cx: &@block_ctxt, v: ValueRef) -> ValueRef {
@ -5812,9 +5659,6 @@ fn create_main_wrapper(ccx: &@crate_ctxt, sp: &span, main_llfn: ValueRef,
let args = [lloutputarg, lltaskarg, llenvarg];
if takes_ivec { args += [llargvarg]; }
bld::FastCall(bcx, main_llfn, args);
// We're responsible for freeing the arg vector
bcx = maybe_free_ivec_heap_part(bcx, llargvarg,
ty::mk_str(ccx.tcx)).bcx;
build_return(bcx);
finish_fn(fcx, lltop);

View File

@ -5,6 +5,7 @@
import std::int;
import std::vec;
import std::vec::to_ptr;
import std::str;
import std::istr;
import std::uint;
@ -454,7 +455,7 @@ fn struct_elt(llstructty: TypeRef, n: uint) -> TypeRef {
let elt_count = llvm::LLVMCountStructElementTypes(llstructty);
assert (n < elt_count);
let elt_tys = std::vec::init_elt(T_nil(), elt_count);
llvm::LLVMGetStructElementTypes(llstructty, std::vec::to_ptr(elt_tys));
llvm::LLVMGetStructElementTypes(llstructty, to_ptr(elt_tys));
ret llvm::LLVMGetElementType(elt_tys[n]);
}
@ -539,7 +540,7 @@ fn T_size_t() -> TypeRef {
}
fn T_fn(inputs: &[TypeRef], output: TypeRef) -> TypeRef {
ret llvm::LLVMFunctionType(output, std::vec::to_ptr(inputs),
ret llvm::LLVMFunctionType(output, to_ptr(inputs),
std::vec::len::<TypeRef>(inputs), False);
}
@ -550,8 +551,8 @@ fn T_fn_pair(cx: &crate_ctxt, tfn: TypeRef) -> TypeRef {
fn T_ptr(t: TypeRef) -> TypeRef { ret llvm::LLVMPointerType(t, 0u); }
fn T_struct(elts: &[TypeRef]) -> TypeRef {
ret llvm::LLVMStructType(std::vec::to_ptr(elts), std::vec::len(elts),
False);
ret llvm::LLVMStructType(to_ptr(elts),
std::vec::len(elts), False);
}
fn T_named_struct(name: &istr) -> TypeRef {
@ -562,8 +563,8 @@ fn T_named_struct(name: &istr) -> TypeRef {
}
fn set_struct_body(t: TypeRef, elts: &[TypeRef]) {
llvm::LLVMStructSetBody(t, std::vec::to_ptr(elts), std::vec::len(elts),
False);
llvm::LLVMStructSetBody(t, to_ptr(elts),
std::vec::len(elts), False);
}
fn T_empty_struct() -> TypeRef { ret T_struct([]); }
@ -606,7 +607,7 @@ fn T_tydesc_field(cx: &crate_ctxt, field: int) -> TypeRef {
let tydesc_elts: [TypeRef] =
std::vec::init_elt::<TypeRef>(T_nil(), abi::n_tydesc_fields as uint);
llvm::LLVMGetStructElementTypes(cx.tydesc_type,
std::vec::to_ptr::<TypeRef>(tydesc_elts));
to_ptr::<TypeRef>(tydesc_elts));
let t = llvm::LLVMGetElementType(tydesc_elts[field]);
ret t;
}
@ -676,48 +677,14 @@ fn T_opaque_vec_ptr() -> TypeRef { ret T_ptr(T_evec(T_int())); }
//
// TODO: Support user-defined vector sizes.
fn T_ivec(t: TypeRef) -> TypeRef {
ret T_struct([T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(t, abi::ivec_default_length)]); // Body elements
ret T_struct([T_int(), // fill
T_int(), // alloc
T_array(t, 0u)]); // elements
}
// Note that the size of this one is in bytes.
fn T_opaque_ivec() -> TypeRef {
ret T_struct([T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(T_i8(), 0u)]); // Body elements
}
fn T_ivec_heap_part(t: TypeRef) -> TypeRef {
ret T_struct([T_int(), // Real length
T_array(t, 0u)]); // Body elements
}
// Interior vector on the heap, also known as the "stub". Cast to this when
// the allocated length (second element of T_ivec above) is zero.
fn T_ivec_heap(t: TypeRef) -> TypeRef {
ret T_struct([T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_ivec_heap_part(t))]); // Pointer
}
fn T_opaque_ivec_heap_part() -> TypeRef {
ret T_struct([T_int(), // Real length
T_array(T_i8(), 0u)]); // Body elements
}
fn T_opaque_ivec_heap() -> TypeRef {
ret T_struct([T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_opaque_ivec_heap_part())]); // Pointer
ret T_ivec(T_i8());
}
fn T_str() -> TypeRef { ret T_evec(T_i8()); }

View File

@ -8,768 +8,310 @@ import trans::{call_memmove, trans_shared_malloc, llsize_of,
alloca, array_alloca, size_of, llderivedtydescs_block_ctxt,
lazily_emit_tydesc_glue, get_tydesc, load_inbounds,
move_val_if_temp, trans_lval, node_id_type,
new_sub_block_ctxt, tps_normal};
import bld = trans_build;
new_sub_block_ctxt, tps_normal, do_spill};
import trans_build::*;
import trans_common::*;
fn alloc_with_heap(bcx: @block_ctxt, typ: &ty::t, vecsz: uint) ->
{bcx: @block_ctxt,
unit_ty: ty::t,
llunitsz: ValueRef,
llptr: ValueRef,
llfirsteltptr: ValueRef} {
let unit_ty;
alt ty::struct(bcx_tcx(bcx), typ) {
ty::ty_vec(mt) { unit_ty = mt.ty; }
_ { bcx_ccx(bcx).sess.bug(~"non-ivec type in trans_ivec"); }
}
let llunitty = type_of_or_i8(bcx, unit_ty);
let ares = alloc(bcx, unit_ty);
bcx = ares.bcx;
let llvecptr = ares.llptr;
let unit_sz = ares.llunitsz;
let llalen = ares.llalen;
add_clean_temp(bcx, llvecptr, typ);
let lllen = bld::Mul(bcx, C_uint(vecsz), unit_sz);
// Allocate the vector pieces and store length and allocated length.
let llfirsteltptr;
if vecsz > 0u && vecsz <= abi::ivec_default_length {
// Interior case.
bld::Store(bcx, lllen,
bld::InBoundsGEP(bcx, llvecptr,
[C_int(0),
C_uint(abi::ivec_elt_len)]));
bld::Store(bcx, llalen,
bld::InBoundsGEP(bcx, llvecptr,
[C_int(0),
C_uint(abi::ivec_elt_alen)]));
llfirsteltptr =
bld::InBoundsGEP(bcx, llvecptr,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
} else {
// Heap case.
let stub_z = [C_int(0), C_uint(abi::ivec_heap_stub_elt_zero)];
let stub_a = [C_int(0), C_uint(abi::ivec_heap_stub_elt_alen)];
let stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
let llstubty = T_ivec_heap(llunitty);
let llstubptr = bld::PointerCast(bcx, llvecptr, T_ptr(llstubty));
bld::Store(bcx, C_int(0), bld::InBoundsGEP(bcx, llstubptr, stub_z));
let llheapty = T_ivec_heap_part(llunitty);
if vecsz == 0u {
// Null heap pointer indicates a zero-length vector.
bld::Store(bcx, llalen, bld::InBoundsGEP(bcx, llstubptr, stub_a));
bld::Store(bcx, C_null(T_ptr(llheapty)),
bld::InBoundsGEP(bcx, llstubptr, stub_p));
llfirsteltptr = C_null(T_ptr(llunitty));
} else {
bld::Store(bcx, lllen, bld::InBoundsGEP(bcx, llstubptr, stub_a));
let llheapsz = bld::Add(bcx, llsize_of(llheapty), lllen);
let rslt = trans_shared_malloc(bcx, T_ptr(llheapty), llheapsz);
bcx = rslt.bcx;
let llheapptr = rslt.val;
bld::Store(bcx, llheapptr,
bld::InBoundsGEP(bcx, llstubptr, stub_p));
let heap_l = [C_int(0), C_uint(abi::ivec_heap_elt_len)];
bld::Store(bcx, lllen, bld::InBoundsGEP(bcx, llheapptr, heap_l));
llfirsteltptr =
bld::InBoundsGEP(bcx, llheapptr,
[C_int(0),
C_uint(abi::ivec_heap_elt_elems),
C_int(0)]);
}
}
ret {
bcx: bcx,
unit_ty: unit_ty,
llunitsz: unit_sz,
llptr: llvecptr,
llfirsteltptr: llfirsteltptr};
fn get_fill(bcx: &@block_ctxt, vptr: ValueRef) -> ValueRef {
Load(bcx, InBoundsGEP(bcx, vptr, [C_int(0), C_uint(abi::ivec_elt_fill)]))
}
fn get_alloc(bcx: &@block_ctxt, vptr: ValueRef) -> ValueRef {
Load(bcx, InBoundsGEP(bcx, vptr, [C_int(0), C_uint(abi::ivec_elt_alloc)]))
}
fn get_dataptr(bcx: &@block_ctxt, vpt: ValueRef,
unit_ty: TypeRef) -> ValueRef {
let ptr = InBoundsGEP(bcx, vpt, [C_int(0), C_uint(abi::ivec_elt_elems)]);
PointerCast(bcx, ptr, T_ptr(unit_ty))
}
fn trans_ivec(bcx: @block_ctxt, args: &[@ast::expr],
fn pointer_add(bcx: &@block_ctxt, ptr: ValueRef, bytes: ValueRef)
-> ValueRef {
let old_ty = val_ty(ptr);
let bptr = PointerCast(bcx, ptr, T_ptr(T_i8()));
ret PointerCast(bcx, InBoundsGEP(bcx, bptr, [bytes]), old_ty);
}
// FIXME factor out a scaling version wrapping a non-scaling version
fn alloc(bcx: &@block_ctxt, vec_ty: &ty::t, vecsz: ValueRef, is_scaled: bool)
-> {bcx: @block_ctxt,
val: ValueRef,
unit_ty: ty::t,
llunitsz: ValueRef,
llunitty: TypeRef} {
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let llunitty = type_of_or_i8(bcx, unit_ty);
let llvecty = T_ivec(llunitty);
let {bcx, val: unit_sz} = size_of(bcx, unit_ty);
let fill = if is_scaled { vecsz }
else { Mul(bcx, vecsz, unit_sz) };
let vecsize = Add(bcx, fill, llsize_of(llvecty));
let {bcx, val: vecptr} =
trans_shared_malloc(bcx, T_ptr(llvecty), vecsize);
add_clean_temp(bcx, vecptr, vec_ty);
Store(bcx, fill, InBoundsGEP
(bcx, vecptr, [C_int(0), C_uint(abi::ivec_elt_fill)]));
Store(bcx, fill, InBoundsGEP
(bcx, vecptr, [C_int(0), C_uint(abi::ivec_elt_alloc)]));
ret {bcx: bcx, val: vecptr,
unit_ty: unit_ty, llunitsz: unit_sz, llunitty: llunitty};
}
fn duplicate(bcx: &@block_ctxt, vptrptr: ValueRef) -> @block_ctxt {
let vptr = Load(bcx, vptrptr);
let fill = get_fill(bcx, vptr);
let size = Add(bcx, fill, llsize_of(T_opaque_ivec()));
let {bcx, val: newptr} = trans_shared_malloc(bcx, val_ty(vptr), size);
let bcx = call_memmove(bcx, newptr, vptr, size).bcx;
Store(bcx, fill,
InBoundsGEP(bcx, newptr, [C_int(0), C_uint(abi::ivec_elt_alloc)]));
Store(bcx, newptr, vptrptr);
ret bcx;
}
fn make_drop_glue(bcx: &@block_ctxt, vptrptr: ValueRef, vec_ty: ty::t)
-> @block_ctxt {
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let vptr = Load(bcx, vptrptr);
let drop_cx = new_sub_block_ctxt(bcx, ~"drop");
let next_cx = new_sub_block_ctxt(bcx, ~"next");
let null_test = IsNull(bcx, vptr);
CondBr(bcx, null_test, next_cx.llbb, drop_cx.llbb);
if ty::type_needs_drop(bcx_tcx(bcx), unit_ty) {
drop_cx = iter_ivec(drop_cx, vptrptr, vec_ty, trans::drop_ty).bcx;
}
drop_cx = trans::trans_shared_free(drop_cx, vptr).bcx;
Br(drop_cx, next_cx.llbb);
ret next_cx;
}
fn trans_ivec(bcx: &@block_ctxt, args: &[@ast::expr],
id: ast::node_id) -> result {
let typ = node_id_type(bcx_ccx(bcx), id);
let alloc_res = alloc_with_heap(bcx, typ, vec::len(args));
let bcx = alloc_res.bcx;
let unit_ty = alloc_res.unit_ty;
let llunitsz = alloc_res.llunitsz;
let llvecptr = alloc_res.llptr;
let llfirsteltptr = alloc_res.llfirsteltptr;
let vec_ty = node_id_type(bcx_ccx(bcx), id);
let {bcx, val: vptr, llunitsz, unit_ty, llunitty} =
alloc(bcx, vec_ty, C_uint(vec::len(args)), false);
// Store the individual elements.
let dataptr = get_dataptr(bcx, vptr, llunitty);
let i = 0u;
for e: @ast::expr in args {
for e in args {
let lv = trans_lval(bcx, e);
bcx = lv.res.bcx;
let lleltptr;
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
lleltptr =
bld::InBoundsGEP(bcx, llfirsteltptr,
[bld::Mul(bcx, C_uint(i), llunitsz)]);
let lleltptr = if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
InBoundsGEP(bcx, dataptr, [Mul(bcx, C_uint(i), llunitsz)])
} else {
lleltptr = bld::InBoundsGEP(bcx, llfirsteltptr, [C_uint(i)]);
}
InBoundsGEP(bcx, dataptr, [C_uint(i)])
};
bcx = move_val_if_temp(bcx, INIT, lleltptr, lv, unit_ty);
i += 1u;
}
ret rslt(bcx, llvecptr);
ret rslt(bcx, vptr);
}
fn trans_istr(bcx: &@block_ctxt, s: istr) -> result {
let veclen = std::istr::byte_len(s) + 1u; // +1 for \0
let {bcx, val: sptr, _} =
alloc(bcx, ty::mk_istr(bcx_tcx(bcx)), C_uint(veclen), false);
let llcstr = C_cstr(bcx_ccx(bcx), s);
let bcx = call_memmove(bcx, get_dataptr(bcx, sptr, T_i8()),
llcstr, C_uint(veclen)).bcx;
ret rslt(bcx, sptr);
}
// Returns the length of an interior vector and a pointer to its first
// element, in that order.
fn get_len_and_data(bcx: &@block_ctxt, orig_v: ValueRef, unit_ty: ty::t)
-> {len: ValueRef, data: ValueRef, bcx: @block_ctxt} {
// If this interior vector has dynamic size, we can't assume anything
// about the LLVM type of the value passed in, so we cast it to an
// opaque vector type.
let v;
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
v = bld::PointerCast(bcx, orig_v, T_ptr(T_opaque_ivec()));
} else { v = orig_v; }
let llunitty = type_of_or_i8(bcx, unit_ty);
let stack_len =
load_inbounds(bcx, v, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_elem =
bld::InBoundsGEP(bcx, v,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
let on_heap =
bld::ICmp(bcx, lib::llvm::LLVMIntEQ, stack_len, C_int(0));
let on_heap_cx = new_sub_block_ctxt(bcx, ~"on_heap");
let next_cx = new_sub_block_ctxt(bcx, ~"next");
bld::CondBr(bcx, on_heap, on_heap_cx.llbb, next_cx.llbb);
let heap_stub =
bld::PointerCast(on_heap_cx, v, T_ptr(T_ivec_heap(llunitty)));
let heap_ptr =
load_inbounds(on_heap_cx, heap_stub,
[C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)]);
// Check whether the heap pointer is null. If it is, the vector length
// is truly zero.
let llstubty = T_ivec_heap(llunitty);
let llheapptrty = struct_elt(llstubty, abi::ivec_heap_stub_elt_ptr);
let heap_ptr_is_null =
bld::ICmp(on_heap_cx, lib::llvm::LLVMIntEQ, heap_ptr,
C_null(T_ptr(llheapptrty)));
let zero_len_cx = new_sub_block_ctxt(bcx, ~"zero_len");
let nonzero_len_cx = new_sub_block_ctxt(bcx, ~"nonzero_len");
bld::CondBr(on_heap_cx, heap_ptr_is_null, zero_len_cx.llbb,
nonzero_len_cx.llbb);
// Technically this context is unnecessary, but it makes this function
// clearer.
let zero_len = C_int(0);
let zero_elem = C_null(T_ptr(llunitty));
bld::Br(zero_len_cx, next_cx.llbb);
// If we're here, then we actually have a heapified vector.
let heap_len =
load_inbounds(nonzero_len_cx, heap_ptr,
[C_int(0), C_uint(abi::ivec_heap_elt_len)]);
let heap_elem =
{
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems), C_int(0)];
bld::InBoundsGEP(nonzero_len_cx, heap_ptr, v)
};
bld::Br(nonzero_len_cx, next_cx.llbb);
// Now we can figure out the length of `v` and get a pointer to its
// first element.
let len =
bld::Phi(next_cx, T_int(), [stack_len, zero_len, heap_len],
[bcx.llbb, zero_len_cx.llbb,
nonzero_len_cx.llbb]);
let elem =
bld::Phi(next_cx, T_ptr(llunitty),
[stack_elem, zero_elem, heap_elem],
[bcx.llbb, zero_len_cx.llbb,
nonzero_len_cx.llbb]);
ret {len: len, data: elem, bcx: next_cx};
}
// Returns a tuple consisting of a pointer to the newly-reserved space and
// a block context. Updates the length appropriately.
fn reserve_space(cx: &@block_ctxt, llunitty: TypeRef, v: ValueRef,
len_needed: ValueRef) -> result {
let stack_len_ptr =
bld::InBoundsGEP(cx, v, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_len = bld::Load(cx, stack_len_ptr);
let alen =
load_inbounds(cx, v, [C_int(0), C_uint(abi::ivec_elt_alen)]);
// There are four cases we have to consider:
// (1) On heap, no resize necessary.
// (2) On heap, need to resize.
// (3) On stack, no resize necessary.
// (4) On stack, need to spill to heap.
let maybe_on_heap =
bld::ICmp(cx, lib::llvm::LLVMIntEQ, stack_len, C_int(0));
let maybe_on_heap_cx = new_sub_block_ctxt(cx, ~"maybe_on_heap");
let on_stack_cx = new_sub_block_ctxt(cx, ~"on_stack");
bld::CondBr(cx, maybe_on_heap, maybe_on_heap_cx.llbb,
on_stack_cx.llbb);
let next_cx = new_sub_block_ctxt(cx, ~"next");
// We're possibly on the heap, unless the vector is zero-length.
let stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
let stub_ptr =
bld::PointerCast(maybe_on_heap_cx, v,
T_ptr(T_ivec_heap(llunitty)));
let heap_ptr = load_inbounds(maybe_on_heap_cx, stub_ptr, stub_p);
let on_heap =
bld::ICmp(maybe_on_heap_cx, lib::llvm::LLVMIntNE, heap_ptr,
C_null(val_ty(heap_ptr)));
let on_heap_cx = new_sub_block_ctxt(cx, ~"on_heap");
bld::CondBr(maybe_on_heap_cx, on_heap, on_heap_cx.llbb,
on_stack_cx.llbb);
// We're definitely on the heap. Check whether we need to resize.
let heap_len_ptr =
bld::InBoundsGEP(on_heap_cx, heap_ptr,
[C_int(0),
C_uint(abi::ivec_heap_elt_len)]);
let heap_len = bld::Load(on_heap_cx, heap_len_ptr);
let new_heap_len = bld::Add(on_heap_cx, heap_len, len_needed);
let heap_len_unscaled =
bld::UDiv(on_heap_cx, heap_len, llsize_of(llunitty));
let heap_no_resize_needed =
bld::ICmp(on_heap_cx, lib::llvm::LLVMIntULE, new_heap_len, alen);
let heap_no_resize_cx = new_sub_block_ctxt(cx, ~"heap_no_resize");
let heap_resize_cx = new_sub_block_ctxt(cx, ~"heap_resize");
bld::CondBr(on_heap_cx, heap_no_resize_needed, heap_no_resize_cx.llbb,
heap_resize_cx.llbb);
// Case (1): We're on the heap and don't need to resize.
let heap_data_no_resize =
{
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems),
heap_len_unscaled];
bld::InBoundsGEP(heap_no_resize_cx, heap_ptr, v)
};
bld::Store(heap_no_resize_cx, new_heap_len, heap_len_ptr);
bld::Br(heap_no_resize_cx, next_cx.llbb);
// Case (2): We're on the heap and need to resize. This path is rare,
// so we delegate to cold glue.
{
let p =
bld::PointerCast(heap_resize_cx, v, T_ptr(T_opaque_ivec()));
let upcall = bcx_ccx(cx).upcalls.ivec_resize_shared;
bld::Call(heap_resize_cx, upcall,
[cx.fcx.lltaskptr, p, new_heap_len]);
}
let heap_ptr_resize = load_inbounds(heap_resize_cx, stub_ptr, stub_p);
let heap_data_resize =
{
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems),
heap_len_unscaled];
bld::InBoundsGEP(heap_resize_cx, heap_ptr_resize, v)
};
bld::Br(heap_resize_cx, next_cx.llbb);
// We're on the stack. Check whether we need to spill to the heap.
let new_stack_len = bld::Add(on_stack_cx, stack_len, len_needed);
let stack_no_spill_needed =
bld::ICmp(on_stack_cx, lib::llvm::LLVMIntULE, new_stack_len,
alen);
let stack_len_unscaled =
bld::UDiv(on_stack_cx, stack_len, llsize_of(llunitty));
let stack_no_spill_cx = new_sub_block_ctxt(cx, ~"stack_no_spill");
let stack_spill_cx = new_sub_block_ctxt(cx, ~"stack_spill");
bld::CondBr(on_stack_cx, stack_no_spill_needed,
stack_no_spill_cx.llbb, stack_spill_cx.llbb);
// Case (3): We're on the stack and don't need to spill.
let stack_data_no_spill =
bld::InBoundsGEP(stack_no_spill_cx, v,
[C_int(0),
C_uint(abi::ivec_elt_elems),
stack_len_unscaled]);
bld::Store(stack_no_spill_cx, new_stack_len, stack_len_ptr);
bld::Br(stack_no_spill_cx, next_cx.llbb);
// Case (4): We're on the stack and need to spill. Like case (2), this
// path is rare, so we delegate to cold glue.
{
let p =
bld::PointerCast(stack_spill_cx, v, T_ptr(T_opaque_ivec()));
let upcall = bcx_ccx(cx).upcalls.ivec_spill_shared;
bld::Call(stack_spill_cx, upcall,
[cx.fcx.lltaskptr, p, new_stack_len]);
}
let spill_stub =
bld::PointerCast(stack_spill_cx, v, T_ptr(T_ivec_heap(llunitty)));
let heap_ptr_spill =
load_inbounds(stack_spill_cx, spill_stub, stub_p);
let heap_data_spill =
{
let v =
[C_int(0), C_uint(abi::ivec_heap_elt_elems),
stack_len_unscaled];
bld::InBoundsGEP(stack_spill_cx, heap_ptr_spill, v)
};
bld::Br(stack_spill_cx, next_cx.llbb);
// Phi together the different data pointers to get the result.
let data_ptr =
bld::Phi(next_cx, T_ptr(llunitty),
[heap_data_no_resize, heap_data_resize,
stack_data_no_spill, heap_data_spill],
[heap_no_resize_cx.llbb, heap_resize_cx.llbb,
stack_no_spill_cx.llbb, stack_spill_cx.llbb]);
ret rslt(next_cx, data_ptr);
}
fn trans_append(cx: &@block_ctxt, t: ty::t, lhs: ValueRef,
fn trans_append(cx: &@block_ctxt, vec_ty: ty::t, lhsptr: ValueRef,
rhs: ValueRef) -> result {
// Cast to opaque interior vector types if necessary.
if ty::type_has_dynamic_size(bcx_tcx(cx), t) {
lhs = bld::PointerCast(cx, lhs, T_ptr(T_opaque_ivec()));
rhs = bld::PointerCast(cx, rhs, T_ptr(T_opaque_ivec()));
let unit_ty = ty::sequence_element_type(bcx_tcx(cx), vec_ty);
let dynamic = ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty);
if dynamic {
lhsptr = PointerCast(cx, lhsptr, T_ptr(T_ptr(T_opaque_ivec())));
rhs = PointerCast(cx, rhs, T_ptr(T_opaque_ivec()));
}
let unit_ty = ty::sequence_element_type(bcx_tcx(cx), t);
let llunitty = type_of_or_i8(cx, unit_ty);
let rs = size_of(cx, unit_ty);
let bcx = rs.bcx;
let unit_sz = rs.val;
// Gather the various type descriptors we'll need.
// FIXME (issue #511): This is needed to prevent a leak.
let no_tydesc_info = none;
rs = get_tydesc(bcx, t, false, tps_normal, no_tydesc_info).result;
bcx = rs.bcx;
rs = get_tydesc(bcx, unit_ty, false, tps_normal, no_tydesc_info).result;
bcx = rs.bcx;
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_take_glue, none);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, none);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, none);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_copy_glue, none);
let rhs_len_and_data = get_len_and_data(bcx, rhs, unit_ty);
let rhs_len = rhs_len_and_data.len;
let rhs_data = rhs_len_and_data.data;
bcx = rhs_len_and_data.bcx;
let have_istrs = alt ty::struct(bcx_tcx(cx), t) {
let strings = alt ty::struct(bcx_tcx(cx), vec_ty) {
ty::ty_istr. { true }
ty::ty_vec(_) { false }
_ { bcx_tcx(cx).sess.bug(~"non-istr/ivec in trans_append"); }
};
let extra_len = if have_istrs {
// Only need one of the nulls
bld::Sub(bcx, rhs_len, C_uint(1u))
} else { rhs_len };
let {bcx, val: unit_sz} = size_of(cx, unit_ty);
let llunitty = type_of_or_i8(cx, unit_ty);
rs = reserve_space(bcx, llunitty, lhs, extra_len);
bcx = rs.bcx;
let lhs = Load(bcx, lhsptr);
let self_append = ICmp(bcx, lib::llvm::LLVMIntEQ, lhs, rhs);
let lfill = get_fill(bcx, lhs);
let rfill = get_fill(bcx, rhs);
let new_fill = Add(bcx, lfill, rfill);
if strings { new_fill = Sub(bcx, new_fill, C_int(1)); }
let opaque_lhs = PointerCast(bcx, lhsptr, T_ptr(T_ptr(T_opaque_ivec())));
Call(bcx, bcx_ccx(cx).upcalls.ivec_grow,
[cx.fcx.lltaskptr, opaque_lhs, new_fill]);
// Was overwritten if we resized
let lhs = Load(bcx, lhsptr);
let rhs = Select(bcx, self_append, lhs, rhs);
let lhs_data = if have_istrs {
let lhs_data = rs.val;
let lhs_data_without_null_ptr = alloca(bcx, T_ptr(llunitty));
incr_ptr(bcx, lhs_data, C_int(-1),
lhs_data_without_null_ptr);
bld::Load(bcx, lhs_data_without_null_ptr)
} else {
rs.val
};
let lhs_data = get_dataptr(bcx, lhs, llunitty);
let lhs_off = lfill;
if strings { lhs_off = Sub(bcx, lfill, C_int(1)); }
let write_ptr = pointer_add(bcx, lhs_data, lhs_off);
let write_ptr_ptr = do_spill(bcx, write_ptr);
let end_ptr = pointer_add(bcx, write_ptr, rfill);
let read_ptr_ptr = do_spill(bcx, get_dataptr(bcx, rhs, llunitty));
// If rhs is lhs then our rhs pointer may have changed
rhs_len_and_data = get_len_and_data(bcx, rhs, unit_ty);
rhs_data = rhs_len_and_data.data;
bcx = rhs_len_and_data.bcx;
// Work out the end pointer.
let lhs_unscaled_idx = bld::UDiv(bcx, rhs_len, llsize_of(llunitty));
let lhs_end = bld::InBoundsGEP(bcx, lhs_data, [lhs_unscaled_idx]);
// Now emit the copy loop.
let dest_ptr = alloca(bcx, T_ptr(llunitty));
bld::Store(bcx, lhs_data, dest_ptr);
let src_ptr = alloca(bcx, T_ptr(llunitty));
bld::Store(bcx, rhs_data, src_ptr);
let copy_loop_header_cx = new_sub_block_ctxt(bcx, ~"copy_loop_header");
bld::Br(bcx, copy_loop_header_cx.llbb);
let copy_dest_ptr = bld::Load(copy_loop_header_cx, dest_ptr);
let not_yet_at_end =
bld::ICmp(copy_loop_header_cx, lib::llvm::LLVMIntNE,
copy_dest_ptr, lhs_end);
let copy_loop_body_cx = new_sub_block_ctxt(bcx, ~"copy_loop_body");
let header_cx = new_sub_block_ctxt(bcx, ~"copy_loop_header");
Br(bcx, header_cx.llbb);
let write_ptr = Load(header_cx, write_ptr_ptr);
let not_yet_at_end = ICmp(header_cx, lib::llvm::LLVMIntNE,
write_ptr, end_ptr);
let body_cx = new_sub_block_ctxt(bcx, ~"copy_loop_body");
let next_cx = new_sub_block_ctxt(bcx, ~"next");
bld::CondBr(copy_loop_header_cx, not_yet_at_end,
copy_loop_body_cx.llbb,
next_cx.llbb);
CondBr(header_cx, not_yet_at_end,
body_cx.llbb, next_cx.llbb);
let copy_src_ptr = bld::Load(copy_loop_body_cx, src_ptr);
let copy_src =
load_if_immediate(copy_loop_body_cx, copy_src_ptr, unit_ty);
let post_copy_cx = copy_val
(copy_loop_body_cx, INIT, copy_dest_ptr, copy_src, unit_ty);
let read_ptr = Load(body_cx, read_ptr_ptr);
let body_cx = copy_val(body_cx, INIT, write_ptr,
load_if_immediate(body_cx, read_ptr, unit_ty),
unit_ty);
// Increment both pointers.
if ty::type_has_dynamic_size(bcx_tcx(cx), t) {
if dynamic {
// We have to increment by the dynamically-computed size.
incr_ptr(post_copy_cx, copy_dest_ptr, unit_sz, dest_ptr);
incr_ptr(post_copy_cx, copy_src_ptr, unit_sz, src_ptr);
incr_ptr(body_cx, write_ptr, unit_sz, write_ptr_ptr);
incr_ptr(body_cx, read_ptr, unit_sz, read_ptr_ptr);
} else {
incr_ptr(post_copy_cx, copy_dest_ptr, C_int(1), dest_ptr);
incr_ptr(post_copy_cx, copy_src_ptr, C_int(1), src_ptr);
incr_ptr(body_cx, write_ptr, C_int(1), write_ptr_ptr);
incr_ptr(body_cx, read_ptr, C_int(1), read_ptr_ptr);
}
bld::Br(post_copy_cx, copy_loop_header_cx.llbb);
Br(body_cx, header_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn trans_append_literal(bcx: &@block_ctxt, v: ValueRef, vec_ty: ty::t,
fn trans_append_literal(bcx: &@block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
vals: &[@ast::expr]) -> @block_ctxt {
let elt_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let ti = none;
let {bcx, val: td} =
get_tydesc(bcx, elt_ty, false, tps_normal, ti).result;
trans::lazily_emit_all_tydesc_glue(bcx, ti);
let opaque_v = bld::PointerCast(bcx, v, T_ptr(T_opaque_ivec()));
trans::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_take_glue, ti);
let opaque_v = PointerCast(bcx, vptrptr, T_ptr(T_ptr(T_opaque_ivec())));
for val in vals {
let {bcx: e_bcx, val: elt} = trans::trans_expr(bcx, val);
bcx = e_bcx;
let spilled = trans::spill_if_immediate(bcx, elt, elt_ty);
bld::Call(bcx, bcx_ccx(bcx).upcalls.ivec_push,
[bcx.fcx.lltaskptr, opaque_v, td,
bld::PointerCast(bcx, spilled, T_ptr(T_i8()))]);
Call(bcx, bcx_ccx(bcx).upcalls.ivec_push,
[bcx.fcx.lltaskptr, opaque_v, td,
PointerCast(bcx, spilled, T_ptr(T_i8()))]);
}
ret bcx;
}
type alloc_result =
{bcx: @block_ctxt,
llptr: ValueRef,
llunitsz: ValueRef,
llalen: ValueRef};
fn alloc(cx: &@block_ctxt, unit_ty: ty::t) -> alloc_result {
let dynamic = ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty);
let bcx;
if dynamic {
bcx = llderivedtydescs_block_ctxt(cx.fcx);
} else { bcx = cx; }
let llunitsz;
let rslt = size_of(bcx, unit_ty);
bcx = rslt.bcx;
llunitsz = rslt.val;
if dynamic { cx.fcx.llderivedtydescs = bcx.llbb; }
let llalen =
bld::Mul(bcx, llunitsz, C_uint(abi::ivec_default_length));
let llptr;
let llunitty = type_of_or_i8(bcx, unit_ty);
let bcx_result;
if dynamic {
let llarraysz = bld::Add(bcx, llsize_of(T_opaque_ivec()), llalen);
let llvecptr = array_alloca(bcx, T_i8(), llarraysz);
bcx_result = cx;
llptr =
bld::PointerCast(bcx_result, llvecptr,
T_ptr(T_opaque_ivec()));
} else { llptr = alloca(bcx, T_ivec(llunitty)); bcx_result = bcx; }
ret {bcx: bcx_result,
llptr: llptr,
llunitsz: llunitsz,
llalen: llalen};
}
fn trans_add(cx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
fn trans_add(bcx: &@block_ctxt, vec_ty: ty::t, lhs: ValueRef,
rhs: ValueRef) -> result {
let bcx = cx;
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let ares = alloc(bcx, unit_ty);
bcx = ares.bcx;
let llvecptr = ares.llptr;
let unit_sz = ares.llunitsz;
let llalen = ares.llalen;
add_clean_temp(bcx, llvecptr, vec_ty);
let llunitty = type_of_or_i8(bcx, unit_ty);
let llheappartty = T_ivec_heap_part(llunitty);
let lhs_len_and_data = get_len_and_data(bcx, lhs, unit_ty);
let lhs_len = lhs_len_and_data.len;
let lhs_data = lhs_len_and_data.data;
bcx = lhs_len_and_data.bcx;
lhs_len = alt ty::struct(bcx_tcx(bcx), vec_ty) {
ty::ty_istr. {
// Forget about the trailing null on the left side
bld::Sub(bcx, lhs_len, C_uint(1u))
}
ty::ty_vec(_) { lhs_len }
_ { bcx_tcx(bcx).sess.bug(~"non-istr/ivec in trans_add") }
let strings = alt ty::struct(bcx_tcx(bcx), vec_ty) {
ty::ty_istr. { true }
ty::ty_vec(_) { false }
};
let lhs_fill = get_fill(bcx, lhs);
if strings { lhs_fill = Sub(bcx, lhs_fill, C_int(1)); }
let rhs_fill = get_fill(bcx, rhs);
let new_fill = Add(bcx, lhs_fill, rhs_fill);
let {bcx, val: new_vec, unit_ty, llunitsz, llunitty} =
alloc(bcx, vec_ty, new_fill, true);
let rhs_len_and_data = get_len_and_data(bcx, rhs, unit_ty);
let rhs_len = rhs_len_and_data.len;
let rhs_data = rhs_len_and_data.data;
bcx = rhs_len_and_data.bcx;
let lllen = bld::Add(bcx, lhs_len, rhs_len);
// We have three cases to handle here:
// (1) Length is zero ([] + []).
// (2) Copy onto stack.
// (3) Allocate on heap and copy there.
// Emit the copy loop
let write_ptr_ptr = do_spill(bcx, get_dataptr(bcx, new_vec, llunitty));
let lhs_ptr = get_dataptr(bcx, lhs, llunitty);
let lhs_ptr_ptr = do_spill(bcx, lhs_ptr);
let lhs_end_ptr = pointer_add(bcx, lhs_ptr, lhs_fill);
let rhs_ptr = get_dataptr(bcx, rhs, llunitty);
let rhs_ptr_ptr = do_spill(bcx, rhs_ptr);
let rhs_end_ptr = pointer_add(bcx, rhs_ptr, rhs_fill);
let len_is_zero =
bld::ICmp(bcx, lib::llvm::LLVMIntEQ, lllen, C_int(0));
let zero_len_cx = new_sub_block_ctxt(bcx, ~"zero_len");
let nonzero_len_cx = new_sub_block_ctxt(bcx, ~"nonzero_len");
bld::CondBr(bcx, len_is_zero, zero_len_cx.llbb, nonzero_len_cx.llbb);
// Case (1): Length is zero.
let stub_z = [C_int(0), C_uint(abi::ivec_heap_stub_elt_zero)];
let stub_a = [C_int(0), C_uint(abi::ivec_heap_stub_elt_alen)];
let stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
let vec_l = [C_int(0), C_uint(abi::ivec_elt_len)];
let vec_a = [C_int(0), C_uint(abi::ivec_elt_alen)];
let stub_ptr_zero =
bld::PointerCast(zero_len_cx, llvecptr,
T_ptr(T_ivec_heap(llunitty)));
bld::Store(zero_len_cx, C_int(0),
bld::InBoundsGEP(zero_len_cx, stub_ptr_zero,
stub_z));
bld::Store(zero_len_cx, llalen,
bld::InBoundsGEP(zero_len_cx, stub_ptr_zero,
stub_a));
bld::Store(zero_len_cx, C_null(T_ptr(llheappartty)),
bld::InBoundsGEP(zero_len_cx, stub_ptr_zero,
stub_p));
let next_cx = new_sub_block_ctxt(bcx, ~"next");
bld::Br(zero_len_cx, next_cx.llbb);
// Determine whether we need to spill to the heap.
let on_stack =
bld::ICmp(nonzero_len_cx, lib::llvm::LLVMIntULE, lllen, llalen);
let stack_cx = new_sub_block_ctxt(bcx, ~"stack");
let heap_cx = new_sub_block_ctxt(bcx, ~"heap");
bld::CondBr(nonzero_len_cx, on_stack, stack_cx.llbb, heap_cx.llbb);
// Case (2): Copy onto stack.
bld::Store(stack_cx, lllen,
bld::InBoundsGEP(stack_cx, llvecptr, vec_l));
bld::Store(stack_cx, llalen,
bld::InBoundsGEP(stack_cx, llvecptr, vec_a));
let dest_ptr_stack =
bld::InBoundsGEP(stack_cx, llvecptr,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
let copy_cx = new_sub_block_ctxt(bcx, ~"copy");
bld::Br(stack_cx, copy_cx.llbb);
// Case (3): Allocate on heap and copy there.
let stub_ptr_heap =
bld::PointerCast(heap_cx, llvecptr, T_ptr(T_ivec_heap(llunitty)));
bld::Store(heap_cx, C_int(0),
bld::InBoundsGEP(heap_cx, stub_ptr_heap, stub_z));
bld::Store(heap_cx, lllen,
bld::InBoundsGEP(heap_cx, stub_ptr_heap, stub_a));
let heap_sz = bld::Add(heap_cx, llsize_of(llheappartty), lllen);
let rs = trans_shared_malloc(heap_cx, T_ptr(llheappartty), heap_sz);
let heap_part = rs.val;
heap_cx = rs.bcx;
bld::Store(heap_cx, heap_part,
bld::InBoundsGEP(heap_cx, stub_ptr_heap, stub_p));
{
let v = [C_int(0), C_uint(abi::ivec_heap_elt_len)];
bld::Store(heap_cx, lllen,
bld::InBoundsGEP(heap_cx, heap_part, v));
}
let dest_ptr_heap =
bld::InBoundsGEP(heap_cx, heap_part,
[C_int(0),
C_uint(abi::ivec_heap_elt_elems),
C_int(0)]);
bld::Br(heap_cx, copy_cx.llbb);
// Emit the copy loop.
let first_dest_ptr =
bld::Phi(copy_cx, T_ptr(llunitty),
[dest_ptr_stack, dest_ptr_heap],
[stack_cx.llbb, heap_cx.llbb]);
let lhs_end_ptr;
let rhs_end_ptr;
if ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty) {
lhs_end_ptr = bld::InBoundsGEP(copy_cx, lhs_data, [lhs_len]);
rhs_end_ptr = bld::InBoundsGEP(copy_cx, rhs_data, [rhs_len]);
} else {
let lhs_len_unscaled = bld::UDiv(copy_cx, lhs_len, unit_sz);
lhs_end_ptr =
bld::InBoundsGEP(copy_cx, lhs_data, [lhs_len_unscaled]);
let rhs_len_unscaled = bld::UDiv(copy_cx, rhs_len, unit_sz);
rhs_end_ptr =
bld::InBoundsGEP(copy_cx, rhs_data, [rhs_len_unscaled]);
}
let dest_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
bld::Store(copy_cx, first_dest_ptr, dest_ptr_ptr);
let lhs_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
bld::Store(copy_cx, lhs_data, lhs_ptr_ptr);
let rhs_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
bld::Store(copy_cx, rhs_data, rhs_ptr_ptr);
let lhs_copy_cx = new_sub_block_ctxt(bcx, ~"lhs_copy");
bld::Br(copy_cx, lhs_copy_cx.llbb);
// Copy in elements from the LHS.
let lhs_ptr = bld::Load(lhs_copy_cx, lhs_ptr_ptr);
let lhs_cx = new_sub_block_ctxt(bcx, ~"lhs_copy_header");
Br(bcx, lhs_cx.llbb);
let lhs_ptr = Load(lhs_cx, lhs_ptr_ptr);
let not_at_end_lhs =
bld::ICmp(lhs_copy_cx, lib::llvm::LLVMIntNE, lhs_ptr,
lhs_end_ptr);
let lhs_do_copy_cx = new_sub_block_ctxt(bcx, ~"lhs_do_copy");
let rhs_copy_cx = new_sub_block_ctxt(bcx, ~"rhs_copy");
bld::CondBr(lhs_copy_cx, not_at_end_lhs, lhs_do_copy_cx.llbb,
rhs_copy_cx.llbb);
let dest_ptr_lhs_copy = bld::Load(lhs_do_copy_cx, dest_ptr_ptr);
let lhs_val = load_if_immediate(lhs_do_copy_cx, lhs_ptr, unit_ty);
lhs_do_copy_cx = copy_val(lhs_do_copy_cx, INIT, dest_ptr_lhs_copy,
lhs_val, unit_ty);
ICmp(lhs_cx, lib::llvm::LLVMIntNE, lhs_ptr, lhs_end_ptr);
let lhs_copy_cx = new_sub_block_ctxt(bcx, ~"lhs_copy_body");
let rhs_cx = new_sub_block_ctxt(bcx, ~"rhs_copy_header");
CondBr(lhs_cx, not_at_end_lhs, lhs_copy_cx.llbb, rhs_cx.llbb);
let write_ptr = Load(lhs_copy_cx, write_ptr_ptr);
lhs_copy_cx =
copy_val(lhs_copy_cx, INIT, write_ptr,
load_if_immediate(lhs_copy_cx, lhs_ptr, unit_ty), unit_ty);
// Increment both pointers.
if ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty) {
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
// We have to increment by the dynamically-computed size.
incr_ptr(lhs_do_copy_cx, dest_ptr_lhs_copy, unit_sz,
dest_ptr_ptr);
incr_ptr(lhs_do_copy_cx, lhs_ptr, unit_sz, lhs_ptr_ptr);
incr_ptr(lhs_copy_cx, write_ptr, llunitsz, write_ptr_ptr);
incr_ptr(lhs_copy_cx, lhs_ptr, llunitsz, lhs_ptr_ptr);
} else {
incr_ptr(lhs_do_copy_cx, dest_ptr_lhs_copy, C_int(1),
dest_ptr_ptr);
incr_ptr(lhs_do_copy_cx, lhs_ptr, C_int(1), lhs_ptr_ptr);
incr_ptr(lhs_copy_cx, write_ptr, C_int(1), write_ptr_ptr);
incr_ptr(lhs_copy_cx, lhs_ptr, C_int(1), lhs_ptr_ptr);
}
Br(lhs_copy_cx, lhs_cx.llbb);
bld::Br(lhs_do_copy_cx, lhs_copy_cx.llbb);
// Copy in elements from the RHS.
let rhs_ptr = bld::Load(rhs_copy_cx, rhs_ptr_ptr);
let rhs_ptr = Load(rhs_cx, rhs_ptr_ptr);
let not_at_end_rhs =
bld::ICmp(rhs_copy_cx, lib::llvm::LLVMIntNE, rhs_ptr,
rhs_end_ptr);
let rhs_do_copy_cx = new_sub_block_ctxt(bcx, ~"rhs_do_copy");
bld::CondBr(rhs_copy_cx, not_at_end_rhs, rhs_do_copy_cx.llbb,
next_cx.llbb);
let dest_ptr_rhs_copy = bld::Load(rhs_do_copy_cx, dest_ptr_ptr);
let rhs_val = load_if_immediate(rhs_do_copy_cx, rhs_ptr, unit_ty);
rhs_do_copy_cx = copy_val(rhs_do_copy_cx, INIT, dest_ptr_rhs_copy,
rhs_val, unit_ty);
ICmp(rhs_cx, lib::llvm::LLVMIntNE, rhs_ptr, rhs_end_ptr);
let rhs_copy_cx = new_sub_block_ctxt(bcx, ~"rhs_copy_body");
let next_cx = new_sub_block_ctxt(bcx, ~"next");
CondBr(rhs_cx, not_at_end_rhs, rhs_copy_cx.llbb, next_cx.llbb);
let write_ptr = Load(rhs_copy_cx, write_ptr_ptr);
rhs_copy_cx =
copy_val(rhs_copy_cx, INIT, write_ptr,
load_if_immediate(rhs_copy_cx, rhs_ptr, unit_ty), unit_ty);
// Increment both pointers.
if ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty) {
if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
// We have to increment by the dynamically-computed size.
incr_ptr(rhs_do_copy_cx, dest_ptr_rhs_copy, unit_sz,
dest_ptr_ptr);
incr_ptr(rhs_do_copy_cx, rhs_ptr, unit_sz, rhs_ptr_ptr);
incr_ptr(rhs_copy_cx, write_ptr, llunitsz, write_ptr_ptr);
incr_ptr(rhs_copy_cx, rhs_ptr, llunitsz, rhs_ptr_ptr);
} else {
incr_ptr(rhs_do_copy_cx, dest_ptr_rhs_copy, C_int(1),
dest_ptr_ptr);
incr_ptr(rhs_do_copy_cx, rhs_ptr, C_int(1), rhs_ptr_ptr);
incr_ptr(rhs_copy_cx, write_ptr, C_int(1), write_ptr_ptr);
incr_ptr(rhs_copy_cx, rhs_ptr, C_int(1), rhs_ptr_ptr);
}
Br(rhs_copy_cx, rhs_cx.llbb);
bld::Br(rhs_do_copy_cx, rhs_copy_cx.llbb);
// Finally done!
ret rslt(next_cx, llvecptr);
ret rslt(next_cx, new_vec);
}
// NB: This does *not* adjust reference counts. The caller must have done
// this via take_ty() beforehand.
fn duplicate_heap_part(cx: &@block_ctxt, orig_vptr: ValueRef,
unit_ty: ty::t) -> result {
// Cast to an opaque interior vector if we can't trust the pointer
// type.
let vptr;
if ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty) {
vptr = bld::PointerCast(cx, orig_vptr, T_ptr(T_opaque_ivec()));
} else { vptr = orig_vptr; }
// FIXME factor out a utility that can be used to create the loops built
// above
fn iter_ivec(bcx: &@block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
f: &trans::val_and_ty_fn) -> result {
let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
let llunitty = type_of_or_i8(bcx, unit_ty);
let {bcx, val: unit_sz} = size_of(bcx, unit_ty);
let llunitty = type_of_or_i8(cx, unit_ty);
let llheappartty = T_ivec_heap_part(llunitty);
let vptr = Load(bcx, PointerCast(bcx, vptrptr,
T_ptr(T_ptr(T_ivec(llunitty)))));
let fill = get_fill(bcx, vptr);
let data_ptr = get_dataptr(bcx, vptr, llunitty);
// Check to see if the vector is heapified.
let stack_len_ptr =
bld::InBoundsGEP(cx, vptr, [C_int(0), C_uint(abi::ivec_elt_len)]);
let stack_len = bld::Load(cx, stack_len_ptr);
let stack_len_is_zero =
bld::ICmp(cx, lib::llvm::LLVMIntEQ, stack_len, C_int(0));
let maybe_on_heap_cx = new_sub_block_ctxt(cx, ~"maybe_on_heap");
let next_cx = new_sub_block_ctxt(cx, ~"next");
bld::CondBr(cx, stack_len_is_zero, maybe_on_heap_cx.llbb,
next_cx.llbb);
// Calculate the last pointer address we want to handle.
// TODO: Optimize this when the size of the unit type is statically
// known to not use pointer casts, which tend to confuse LLVM.
let data_end_ptr = pointer_add(bcx, data_ptr, fill);
let data_ptr_ptr = do_spill(bcx, data_ptr);
let stub_ptr =
bld::PointerCast(maybe_on_heap_cx, vptr,
T_ptr(T_ivec_heap(llunitty)));
let heap_ptr_ptr =
bld::InBoundsGEP(maybe_on_heap_cx,
stub_ptr,
[C_int(0),
C_uint(abi::ivec_heap_stub_elt_ptr)]);
let heap_ptr = bld::Load(maybe_on_heap_cx, heap_ptr_ptr);
let heap_ptr_is_nonnull =
bld::ICmp(maybe_on_heap_cx, lib::llvm::LLVMIntNE, heap_ptr,
C_null(T_ptr(llheappartty)));
let on_heap_cx = new_sub_block_ctxt(cx, ~"on_heap");
bld::CondBr(maybe_on_heap_cx, heap_ptr_is_nonnull, on_heap_cx.llbb,
next_cx.llbb);
// Ok, the vector is on the heap. Copy the heap part.
let alen_ptr =
bld::InBoundsGEP(on_heap_cx, stub_ptr,
[C_int(0),
C_uint(abi::ivec_heap_stub_elt_alen)]);
let alen = bld::Load(on_heap_cx, alen_ptr);
let heap_part_sz =
bld::Add(on_heap_cx, alen, llsize_of(T_opaque_ivec_heap_part()));
let rs =
trans_shared_malloc(on_heap_cx, T_ptr(llheappartty),
heap_part_sz);
on_heap_cx = rs.bcx;
let new_heap_ptr = rs.val;
rs = call_memmove(on_heap_cx, new_heap_ptr, heap_ptr, heap_part_sz);
on_heap_cx = rs.bcx;
bld::Store(on_heap_cx, new_heap_ptr, heap_ptr_ptr);
bld::Br(on_heap_cx, next_cx.llbb);
// Now perform the iteration.
let header_cx = new_sub_block_ctxt(bcx, ~"iter_ivec_loop_header");
Br(bcx, header_cx.llbb);
let data_ptr = Load(header_cx, data_ptr_ptr);
let not_yet_at_end = ICmp(header_cx, lib::llvm::LLVMIntULT,
data_ptr, data_end_ptr);
let body_cx = new_sub_block_ctxt(bcx, ~"iter_ivec_loop_body");
let next_cx = new_sub_block_ctxt(bcx, ~"iter_ivec_next");
CondBr(header_cx, not_yet_at_end, body_cx.llbb, next_cx.llbb);
body_cx = f(body_cx, data_ptr, unit_ty).bcx;
let increment = if ty::type_has_dynamic_size(bcx_tcx(bcx), unit_ty) {
unit_sz
} else { C_int(1) };
incr_ptr(body_cx, data_ptr, increment, data_ptr_ptr);
Br(body_cx, header_cx.llbb);
ret rslt(next_cx, C_nil());
}
//
// Local Variables:
// mode: rust

View File

@ -158,13 +158,13 @@ export type_kind;
export type_err;
export type_err_to_str;
export type_has_dynamic_size;
export type_needs_copy_glue;
export type_has_pointers;
export type_needs_drop;
export type_is_bool;
export type_is_bot;
export type_is_box;
export type_is_boxed;
export type_is_ivec;
export type_is_fp;
export type_is_integral;
export type_is_native;
@ -178,7 +178,6 @@ export type_is_copyable;
export type_is_tup_like;
export type_is_str;
export type_is_unique;
export type_owns_heap_mem;
export type_autoderef;
export type_param;
export unify;
@ -226,7 +225,6 @@ type ctxt =
short_names_cache: hashmap<t, @istr>,
has_pointer_cache: hashmap<t, bool>,
kind_cache: hashmap<t, ast::kind>,
owns_heap_mem_cache: hashmap<t, bool>,
ast_ty_to_ty_cache: hashmap<@ast::ty, option::t<t>>};
type ty_ctxt = ctxt;
@ -418,7 +416,6 @@ fn mk_ctxt(s: session::session, dm: resolve::def_map,
short_names_cache: map::mk_hashmap(ty::hash_ty, ty::eq_ty),
has_pointer_cache: map::mk_hashmap(ty::hash_ty, ty::eq_ty),
kind_cache: map::mk_hashmap(ty::hash_ty, ty::eq_ty),
owns_heap_mem_cache: map::mk_hashmap(ty::hash_ty, ty::eq_ty),
ast_ty_to_ty_cache: map::mk_hashmap(ast_util::hash_ty,
ast_util::eq_ty)};
populate_type_store(cx);
@ -828,8 +825,6 @@ fn type_is_structural(cx: &ctxt, ty: t) -> bool {
ty_fn(_, _, _, _, _) { ret true; }
ty_obj(_) { ret true; }
ty_res(_, _, _) { ret true; }
ty_vec(_) { ret true; }
ty_istr. { ret true; }
_ { ret false; }
}
}
@ -861,8 +856,6 @@ fn type_is_str(cx: &ctxt, ty: t) -> bool {
fn sequence_is_interior(cx: &ctxt, ty: t) -> bool {
alt struct(cx, ty) {
ty::ty_str. {
ret false;
}
@ -919,8 +912,20 @@ fn type_is_boxed(cx: &ctxt, ty: t) -> bool {
}
}
fn type_is_ivec(cx: &ctxt, ty: t) -> bool {
ret alt struct(cx, ty) {
ty_vec(_) { true }
ty_istr. { true }
_ { false }
};
}
fn type_is_unique(cx: &ctxt, ty: t) -> bool {
alt struct(cx, ty) { ty_uniq(_) { ret true; } _ { ret false; } }
alt struct(cx, ty) {
ty_uniq(_) { ret true; }
ty_vec(_) { true }
ty_istr. { true }
_ { ret false; } }
}
fn type_is_scalar(cx: &ctxt, ty: t) -> bool {
@ -947,13 +952,8 @@ fn type_has_pointers(cx: &ctxt, ty: t) -> bool {
let result = false;
alt struct(cx, ty) {
// scalar types
ty_nil. {
/* no-op */
}
ty_nil. {/* no-op */ }
ty_bot. {/* no-op */ }
ty_bool. {/* no-op */ }
ty_int. {/* no-op */ }
@ -996,6 +996,7 @@ fn type_has_pointers(cx: &ctxt, ty: t) -> bool {
fn type_needs_drop(cx: &ctxt, ty: t) -> bool {
ret alt struct(cx, ty) {
ty_res(_, _, _) { true }
ty_param(_, _) { true }
_ { type_has_pointers(cx, ty) }
};
}
@ -1152,7 +1153,6 @@ fn type_structurally_contains(cx: &ctxt, ty: t,
}
ret false;
}
ty_vec(mt) { ret type_structurally_contains(cx, mt.ty, test); }
ty_rec(fields) {
for field in fields {
if type_structurally_contains(cx, field.mt.ty, test) { ret true; }
@ -1182,17 +1182,6 @@ fn type_has_dynamic_size(cx: &ctxt, ty: t) -> bool {
});
}
fn type_needs_copy_glue(cx: &ctxt, ty: t) -> bool {
ret type_structurally_contains(cx, ty, fn(sty: &sty) -> bool {
ret alt sty {
ty_param(_, _) { true }
ty_vec(_) { true }
ty_istr. { true }
_ { false }
};
});
}
fn type_is_integral(cx: &ctxt, ty: t) -> bool {
alt struct(cx, ty) {
ty_int. { ret true; }
@ -1246,82 +1235,6 @@ fn type_is_signed(cx: &ctxt, ty: t) -> bool {
}
}
fn type_owns_heap_mem(cx: &ctxt, ty: t) -> bool {
alt cx.owns_heap_mem_cache.find(ty) {
some(result) { ret result; }
none. {/* fall through */ }
}
let result = false;
alt struct(cx, ty) {
ty_vec(_) { result = true; }
ty_istr. { result = true; }
// scalar types
ty_nil. {
result = false;
}
ty_bot. { result = false; }
ty_bool. { result = false; }
ty_int. { result = false; }
ty_float. { result = false; }
ty_uint. { result = false; }
ty_machine(_) { result = false; }
ty_char. { result = false; }
ty_type. { result = false; }
ty_native(_) { result = false; }
// boxed types
ty_str. {
result = false;
}
ty_box(_) { result = false; }
ty_fn(_, _, _, _, _) { result = false; }
ty_native_fn(_, _, _) { result = false; }
ty_obj(_) { result = false; }
// structural types
ty_tag(did, tps) {
let variants = tag_variants(cx, did);
for variant: variant_info in variants {
for aty: t in variant.args {
// Perform any type parameter substitutions.
let arg_ty = substitute_type_params(cx, tps, aty);
if type_owns_heap_mem(cx, arg_ty) { result = true; }
}
}
}
ty_rec(flds) {
for f: field in flds {
if type_owns_heap_mem(cx, f.mt.ty) { result = true; }
}
}
ty_tup(elts) {
for m in elts { if type_owns_heap_mem(cx, m) { result = true; } }
}
ty_res(_, inner, tps) {
result =
type_owns_heap_mem(cx, substitute_type_params(cx, tps, inner));
}
ty_ptr(_) {
result = false;
}
ty_var(_) { fail "ty_var in type_owns_heap_mem"; }
ty_param(_, _) { result = false; }
}
cx.owns_heap_mem_cache.insert(ty, result);
ret result;
}
// Whether a type is Plain Old Data (i.e. can be safely memmoved).
fn type_is_pod(cx: &ctxt, ty: t) -> bool {
let result = true;

View File

@ -60,7 +60,7 @@ type parser =
fn get_sess() -> parse_sess;
};
fn new_parser_from_file(sess: parse_sess, cfg: ast::crate_cfg, path: &istr,
fn new_parser_from_file(sess: parse_sess, cfg: &ast::crate_cfg, path: &istr,
chpos: uint, byte_pos: uint, ftype: file_type) ->
parser {
let src = io::read_whole_file_str(path);
@ -69,11 +69,10 @@ fn new_parser_from_file(sess: parse_sess, cfg: ast::crate_cfg, path: &istr,
sess.cm.files += [filemap];
let itr = @interner::mk(istr::hash, istr::eq);
let rdr = lexer::new_reader(sess.cm, src, filemap, itr);
ret new_parser(sess, cfg, rdr, ftype);
}
fn new_parser(sess: parse_sess, cfg: ast::crate_cfg, rdr: lexer::reader,
fn new_parser(sess: parse_sess, cfg: &ast::crate_cfg, rdr: lexer::reader,
ftype: file_type) -> parser {
obj stdio_parser(sess: parse_sess,
cfg: ast::crate_cfg,
@ -141,7 +140,6 @@ fn new_parser(sess: parse_sess, cfg: ast::crate_cfg, rdr: lexer::reader,
fn get_id() -> node_id { ret next_node_id(sess); }
fn get_sess() -> parse_sess { ret sess; }
}
let tok0 = lexer::next_token(rdr);
let span0 = ast_util::mk_sp(tok0.chpos, rdr.get_chpos());
ret stdio_parser(sess, cfg, ftype, tok0.tok, span0, span0, [],

View File

@ -45,7 +45,7 @@ tag request {
type ctx = chan<request>;
fn ip_to_sbuf(ip: net::ip_addr) -> *u8 {
vec::to_ptr(str::bytes(net::format_addr(ip)))
vec::unsafe::to_ptr(str::bytes(net::format_addr(ip)))
}
fn connect_task(ip: net::ip_addr, portnum: int, evt: chan<socket_event>) {
@ -132,7 +132,7 @@ fn request_task(c: chan<ctx>) {
task::spawn(bind server_task(ip, portnum, events, server));
}
write(socket, v, status) {
rustrt::aio_writedata(socket, vec::to_ptr::<u8>(v),
rustrt::aio_writedata(socket, vec::unsafe::to_ptr::<u8>(v),
vec::len::<u8>(v), status);
}
close_server(server, status) {

View File

@ -59,7 +59,8 @@ obj FILE_buf_reader(f: os::libc::FILE, res: option::t<@FILE_res>) {
fn read(len: uint) -> [u8] {
let buf = [];
vec::reserve::<u8>(buf, len);
let read = os::libc::fread(vec::to_ptr::<u8>(buf), 1u, len, f);
let read = os::libc::fread(vec::unsafe::to_ptr::<u8>(buf),
1u, len, f);
vec::unsafe::set_len::<u8>(buf, read);
ret buf;
}
@ -237,7 +238,7 @@ type buf_writer =
obj FILE_writer(f: os::libc::FILE, res: option::t<@FILE_res>) {
fn write(v: &[u8]) {
let len = vec::len::<u8>(v);
let vbuf = vec::to_ptr::<u8>(v);
let vbuf = vec::unsafe::to_ptr::<u8>(v);
let nout = os::libc::fwrite(vbuf, len, 1u, f);
if nout < 1u { log_err "error dumping buffer"; }
}
@ -255,7 +256,7 @@ obj fd_buf_writer(fd: int, res: option::t<@fd_res>) {
let count = 0u;
let vbuf;
while count < len {
vbuf = ptr::offset(vec::to_ptr::<u8>(v), count);
vbuf = ptr::offset(vec::unsafe::to_ptr::<u8>(v), count);
let nout = os::libc::write(fd, vbuf, len);
if nout < 0 {
log_err "error dumping buffer";

View File

@ -27,7 +27,8 @@ fn spawn_process(prog: &istr, args: &[istr], in_fd: int, out_fd: int,
// pointer to its buffer
let argv = arg_vec(prog, args);
let pid =
rustrt::rust_run_program(vec::to_ptr(argv), in_fd, out_fd, err_fd);
rustrt::rust_run_program(vec::unsafe::to_ptr(argv),
in_fd, out_fd, err_fd);
ret pid;
}

View File

@ -60,7 +60,7 @@ native "rust" mod rustrt {
fn str_buf(s: str) -> sbuf;
fn str_byte_len(s: str) -> uint;
fn str_alloc(n_bytes: uint) -> str;
fn str_from_ivec(b: &[mutable? u8]) -> str;
fn str_from_vec(b: &[mutable? u8]) -> str;
fn str_from_cstr(cstr: sbuf) -> str;
fn str_from_buf(buf: sbuf, len: uint) -> str;
fn str_push_byte(s: str, byte: uint) -> str;
@ -187,10 +187,10 @@ fn bytes(s: str) -> [u8] {
}
fn unsafe_from_bytes(v: &[mutable? u8]) -> str {
ret rustrt::str_from_ivec(v);
ret rustrt::str_from_vec(v);
}
fn unsafe_from_byte(u: u8) -> str { ret rustrt::str_from_ivec([u]); }
fn unsafe_from_byte(u: u8) -> str { ret rustrt::str_from_vec([u]); }
fn str_from_cstr(cstr: sbuf) -> str { ret rustrt::str_from_cstr(cstr); }

View File

@ -6,27 +6,20 @@ import uint::next_power_of_two;
import ptr::addr_of;
native "rust-intrinsic" mod rusti {
fn ivec_len<T>(v: &[T]) -> uint;
fn vec_len<T>(v: &[T]) -> uint;
}
native "rust" mod rustrt {
fn ivec_reserve_shared<T>(v: &mutable [mutable? T], n: uint);
fn ivec_on_heap<T>(v: &[T]) -> uint;
fn ivec_to_ptr<T>(v: &[T]) -> *T;
fn ivec_copy_from_buf_shared<T>(v: &mutable [mutable? T], ptr: *T,
count: uint);
fn vec_reserve_shared<T>(v: &mutable [mutable? T], n: uint);
fn vec_from_buf_shared<T>(ptr: *T, count: uint) -> [T];
}
/// Reserves space for `n` elements in the given vector.
fn reserve<@T>(v: &mutable [mutable? T], n: uint) {
rustrt::ivec_reserve_shared(v, n);
rustrt::vec_reserve_shared(v, n);
}
fn on_heap<T>(v: &[T]) -> bool { ret rustrt::ivec_on_heap(v) != 0u; }
fn to_ptr<T>(v: &[T]) -> *T { ret rustrt::ivec_to_ptr(v); }
fn len<T>(v: &[mutable? T]) -> uint { ret rusti::ivec_len(v); }
fn len<T>(v: &[mutable? T]) -> uint { ret rusti::vec_len(v); }
type init_op<T> = fn(uint) -> T;
@ -310,32 +303,27 @@ iter iter2<@T>(v: &[T]) -> (uint, T) {
}
mod unsafe {
type ivec_repr =
{mutable fill: uint,
mutable alloc: uint,
heap_part: *mutable ivec_heap_part};
type ivec_heap_part = {mutable fill: uint};
type ivec_repr = {mutable fill: uint,
mutable alloc: uint,
data: u8};
fn copy_from_buf<T>(v: &mutable [T], ptr: *T, count: uint) {
ret rustrt::ivec_copy_from_buf_shared(v, ptr, count);
}
fn from_buf<T>(ptr: *T, bytes: uint) -> [T] {
let v = [];
copy_from_buf(v, ptr, bytes);
ret v;
fn from_buf<T>(ptr: *T, elts: uint) -> [T] {
ret rustrt::vec_from_buf_shared(ptr, elts);
}
fn set_len<T>(v: &mutable [T], new_len: uint) {
let new_fill = new_len * sys::size_of::<T>();
let stack_part: *mutable ivec_repr =
::unsafe::reinterpret_cast(addr_of(v));
if (*stack_part).fill == 0u {
(*(*stack_part).heap_part).fill = new_fill; // On heap.
} else {
(*stack_part).fill = new_fill; // On stack.
}
let repr: **ivec_repr = ::unsafe::reinterpret_cast(addr_of(v));
(**repr).fill = new_len * sys::size_of::<T>();
}
fn to_ptr<T>(v: &[T]) -> *T {
let repr: **ivec_repr = ::unsafe::reinterpret_cast(addr_of(v));
ret ::unsafe::reinterpret_cast(addr_of((**repr).data));
}
}
fn to_ptr<T>(v: &[T]) -> *T {
ret unsafe::to_ptr(v);
}
// Local Variables:

View File

@ -9,17 +9,10 @@ extern "C" CDECL void
upcall_fail(rust_task *task, char const *expr, char const *file, size_t line);
extern "C" void
rust_intrinsic_ivec_len(rust_task *task, size_t *retptr, type_desc *ty,
rust_ivec *v)
rust_intrinsic_vec_len(rust_task *task, size_t *retptr, type_desc *ty,
rust_vec **vp)
{
size_t fill;
if (v->fill)
fill = v->fill;
else if (v->payload.ptr)
fill = v->payload.ptr->fill;
else
fill = 0;
*retptr = fill / ty->size;
*retptr = (*vp)->fill / ty->size;
}
extern "C" void

View File

@ -40,9 +40,7 @@ target triple = "@CFG_LLVM_TRIPLE@"
%struct.registers_t = type { i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i16, i16, i16, i16, i32, i32 }
%"class.rust_task::wakeup_callback" = type { i32 (...)** }
%struct.rc_base.5 = type { i32 }
%struct.rust_ivec = type { i32, i32, %union.rust_ivec_payload }
%union.rust_ivec_payload = type { %struct.rust_ivec_heap* }
%struct.rust_ivec_heap = type { i32, [0 x i8] }
%struct.rust_vec = type { i32, i32, [ 0 x i8 ] }
%class.rust_port = type { i32, %class.rust_kernel*, %struct.rust_task*, i32, %class.ptr_vec, %class.ptr_vec.7, %class.rust_chan*, %class.lock_and_signal }
%class.ptr_vec = type { %struct.rust_task*, i32, i32, %struct.rust_token** }
%struct.rust_token = type opaque
@ -53,29 +51,14 @@ target triple = "@CFG_LLVM_TRIPLE@"
@.str = private unnamed_addr constant [42 x i8] c"attempt to cast values of differing sizes\00", align 1
@.str1 = private unnamed_addr constant [15 x i8] c"intrinsics.cpp\00", align 1
define linkonce_odr void @rust_intrinsic_ivec_len(%struct.rust_task* nocapture %task, i32* nocapture %retptr, %struct.type_desc* nocapture %ty, %struct.rust_ivec* nocapture %v) nounwind {
define linkonce_odr void @rust_intrinsic_vec_len(%struct.rust_task* nocapture %task, i32* nocapture %retptr, %struct.type_desc* nocapture %ty, %struct.rust_vec** nocapture %v) nounwind {
entry:
%fill1 = getelementptr inbounds %struct.rust_ivec* %v, i32 0, i32 0
%ptr1 = load %struct.rust_vec** %v, align 4, !tbaa !0
%fill1 = getelementptr inbounds %struct.rust_vec* %ptr1, i32 0, i32 0
%tmp2 = load i32* %fill1, align 4, !tbaa !0
%tobool = icmp eq i32 %tmp2, 0
br i1 %tobool, label %if.else, label %if.end17
if.else: ; preds = %entry
%ptr = getelementptr inbounds %struct.rust_ivec* %v, i32 0, i32 2, i32 0
%tmp7 = load %struct.rust_ivec_heap** %ptr, align 4, !tbaa !3
%tobool8 = icmp eq %struct.rust_ivec_heap* %tmp7, null
br i1 %tobool8, label %if.end17, label %if.then9
if.then9: ; preds = %if.else
%fill14 = getelementptr inbounds %struct.rust_ivec_heap* %tmp7, i32 0, i32 0
%tmp15 = load i32* %fill14, align 4, !tbaa !0
br label %if.end17
if.end17: ; preds = %if.else, %entry, %if.then9
%fill.0 = phi i32 [ %tmp15, %if.then9 ], [ %tmp2, %entry ], [ 0, %if.else ]
%size = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%tmp20 = load i32* %size, align 4, !tbaa !0
%div = udiv i32 %fill.0, %tmp20
%div = udiv i32 %tmp2, %tmp20
store i32 %div, i32* %retptr, align 4, !tbaa !0
ret void
}

View File

@ -10,21 +10,21 @@
%task = type { i32, i32, i32, i32, i32, i32, i32, i32 }
%ivec = type { i32, i32, [4 x { i32, i32, i32, i32, [0 x i8] }*] }
%vec = type { i32, i32, [0 x i8] }
@_rust_crate_map_toplevel = external global %0
declare i32 @rust_start(i32, i32, i32, i32)
declare external fastcc void @_rust_main(i1* nocapture, %task*, %2* nocapture, %ivec*)
declare external fastcc void @_rust_main(i1* nocapture, %task*, %2* nocapture, %vec*)
define void @_rust_main_wrap(i1* nocapture, %task *, %2* nocapture, %ivec *)
define void @_rust_main_wrap(i1* nocapture, %task *, %2* nocapture, %vec *)
{
tail call fastcc void @_rust_main(i1* %0, %task *%1, %2* nocapture %2, %ivec* %3)
tail call fastcc void @_rust_main(i1* %0, %task *%1, %2* nocapture %2, %vec* %3)
ret void
}
define i32 @"MAIN"(i32, i32) {
%3 = tail call i32 @rust_start(i32 ptrtoint (void (i1*, %task*, %2*, %ivec*)* @_rust_main_wrap to i32), i32 %0, i32 %1, i32 ptrtoint (%0* @_rust_crate_map_toplevel to i32))
%3 = tail call i32 @rust_start(i32 ptrtoint (void (i1*, %task*, %2*, %vec*)* @_rust_main_wrap to i32), i32 %0, i32 %1, i32 ptrtoint (%0* @_rust_crate_map_toplevel to i32))
ret i32 %3
}

View File

@ -10,7 +10,7 @@ command_line_args : public kernel_owned<command_line_args>
rust_str **strs;
// [str] passed to rust_task::start.
rust_ivec *args;
rust_vec *args;
command_line_args(rust_task *task,
int sys_argc,
@ -51,21 +51,13 @@ command_line_args : public kernel_owned<command_line_args>
strs[i]->ref_count++;
}
size_t ivec_interior_sz =
sizeof(size_t) * 2 + sizeof(rust_str *) * 4;
args = (rust_ivec *)
kernel->malloc(ivec_interior_sz,
args = (rust_vec *)
kernel->malloc(vec_size<rust_str*>(argc),
"command line arg interior");
args->fill = 0;
size_t ivec_exterior_sz = sizeof(rust_str *) * argc;
args->alloc = ivec_exterior_sz;
// NB: _rust_main owns the ivec payload and will be responsible for
args->fill = args->alloc = sizeof(rust_str *) * argc;
// NB: _rust_main owns the vec and will be responsible for
// freeing it
args->payload.ptr = (rust_ivec_heap *)
kernel->malloc(ivec_exterior_sz + sizeof(size_t),
"command line arg exterior");
args->payload.ptr->fill = ivec_exterior_sz;
memcpy(&args->payload.ptr->data, strs, ivec_exterior_sz);
memcpy(&args->data[0], strs, args->fill);
}
~command_line_args() {

View File

@ -206,18 +206,14 @@ str_byte_len(rust_task *task, rust_str *s)
}
extern "C" CDECL rust_str *
str_from_ivec(rust_task *task, rust_ivec *v)
str_from_vec(rust_task *task, rust_vec **vp)
{
bool is_interior = v->fill || !v->payload.ptr;
uintptr_t fill = is_interior ? v->fill : v->payload.ptr->fill;
void *data = is_interior ? v->payload.data : v->payload.ptr->data;
rust_str *st =
vec_alloc_with_data(task,
fill + 1, // +1 to fit at least '\0'
fill,
1,
fill ? data : NULL);
rust_vec* v = *vp;
rust_str *st = vec_alloc_with_data(task,
v->fill + 1, // +1 for \0
v->fill,
1,
&v->data[0]);
if (!st) {
task->fail();
return NULL;
@ -226,6 +222,33 @@ str_from_ivec(rust_task *task, rust_ivec *v)
return st;
}
extern "C" CDECL void
vec_reserve_shared(rust_task* task, type_desc* ty, rust_vec** vp,
size_t n_elts) {
size_t new_sz = n_elts * ty->size;
if (new_sz > (*vp)->alloc) {
size_t new_alloc = next_power_of_two(new_sz);
*vp = (rust_vec*)task->kernel->realloc(*vp, new_alloc +
sizeof(rust_vec));
(*vp)->alloc = new_alloc;
}
}
/**
* Copies elements in an unsafe buffer to the given interior vector. The
* vector must have size zero.
*/
extern "C" CDECL rust_vec*
vec_from_buf_shared(rust_task *task, type_desc *ty,
void *ptr, size_t count) {
size_t fill = ty->size * count;
rust_vec* v = (rust_vec*)task->kernel->malloc(fill + sizeof(rust_vec),
"vec_from_buf");
v->fill = v->alloc = fill;
memmove(&v->data[0], ptr, fill);
return v;
}
extern "C" CDECL rust_str *
str_from_cstr(rust_task *task, char *sbuf)
{
@ -471,23 +494,19 @@ rust_list_files(rust_task *task, rust_str *path) {
closedir(dirp);
}
#endif
size_t str_ivec_sz =
sizeof(size_t) // fill
+ sizeof(size_t) // alloc
+ sizeof(rust_str *) * 4; // payload
rust_box *box = (rust_box *)task->malloc(sizeof(rust_box) + str_ivec_sz,
"rust_box(list_files_ivec)");
rust_box *box =
(rust_box *)task->malloc(sizeof(rust_box) + sizeof(rust_vec*),
"rust_box(list_files_vec)");
rust_vec* vec =
(rust_vec*)task->kernel->malloc(vec_size<rust_str*>(strings.size()),
"list_files_vec");
box->ref_count = 1;
rust_ivec *iv = (rust_ivec *)&box->data;
iv->fill = 0;
size_t alloc_sz = sizeof(rust_str *) * strings.size();
iv->alloc = alloc_sz;
iv->payload.ptr = (rust_ivec_heap *)
task->kernel->malloc(alloc_sz + sizeof(size_t), "files ivec");
iv->payload.ptr->fill = alloc_sz;
memcpy(&iv->payload.ptr->data, strings.data(), alloc_sz);
rust_vec** box_content = (rust_vec**)&box->data[0];
*box_content = vec;
size_t alloc_sz = sizeof(rust_str*) * strings.size();
vec->fill = vec->alloc = alloc_sz;
memcpy(&vec->data[0], strings.data(), alloc_sz);
return box;
}
@ -549,157 +568,6 @@ nano_time(rust_task *task, uint64_t *ns) {
*ns = t.time_ns();
}
/**
* Preallocates the exact number of bytes in the given interior vector.
*/
extern "C" CDECL void
ivec_reserve(rust_task *task, type_desc *ty, rust_ivec *v, size_t n_elems)
{
size_t new_alloc = n_elems * ty->size;
if (new_alloc <= v->alloc)
return; // Already big enough.
rust_ivec_heap *heap_part;
if (v->fill || !v->payload.ptr) {
// On stack; spill to heap.
heap_part = (rust_ivec_heap *)task->malloc(new_alloc +
sizeof(size_t),
"ivec reserve heap part");
heap_part->fill = v->fill;
memcpy(&heap_part->data, v->payload.data, v->fill);
v->fill = 0;
v->payload.ptr = heap_part;
} else {
// On heap; resize.
heap_part = (rust_ivec_heap *)
task->realloc(v->payload.ptr,
new_alloc + sizeof(size_t));
v->payload.ptr = heap_part;
}
v->alloc = new_alloc;
}
/**
* Preallocates the exact number of bytes in the given interior vector.
*/
extern "C" CDECL void
ivec_reserve_shared(rust_task *task, type_desc *ty, rust_ivec *v,
size_t n_elems)
{
size_t new_alloc = n_elems * ty->size;
if (new_alloc <= v->alloc)
return; // Already big enough.
rust_ivec_heap *heap_part;
if (v->fill || !v->payload.ptr) {
// On stack; spill to heap.
heap_part = (rust_ivec_heap *)
task->kernel->malloc(new_alloc + sizeof(size_t),
"ivec reserve shared");
heap_part->fill = v->fill;
memcpy(&heap_part->data, v->payload.data, v->fill);
v->fill = 0;
v->payload.ptr = heap_part;
} else {
// On heap; resize.
heap_part = (rust_ivec_heap *)task->kernel->realloc(v->payload.ptr,
new_alloc + sizeof(size_t));
v->payload.ptr = heap_part;
}
v->alloc = new_alloc;
}
/**
* Returns true if the given vector is on the heap and false if it's on the
* stack.
*/
extern "C" CDECL bool
ivec_on_heap(rust_task *task, type_desc *ty, rust_ivec *v)
{
return !v->fill && v->payload.ptr;
}
/**
* Returns an unsafe pointer to the data part of an interior vector.
*/
extern "C" CDECL void *
ivec_to_ptr(rust_task *task, type_desc *ty, rust_ivec *v)
{
return v->fill ? v->payload.data : v->payload.ptr->data;
}
static size_t
get_ivec_size(rust_ivec *v)
{
if (v->fill)
return v->fill;
if (v->payload.ptr)
return v->payload.ptr->fill;
return 0;
}
/**
* Copies elements in an unsafe buffer to the given interior vector. The
* vector must have size zero.
*/
extern "C" CDECL void
ivec_copy_from_buf(rust_task *task, type_desc *ty, rust_ivec *v, void *ptr,
size_t count)
{
size_t old_size = get_ivec_size(v);
if (old_size) {
task->fail();
return;
}
ivec_reserve(task, ty, v, count);
size_t new_size = count * ty->size;
if (v->fill || !v->payload.ptr) {
// On stack.
memmove(v->payload.data, ptr, new_size);
v->fill = new_size;
return;
}
// On heap.
memmove(v->payload.ptr->data, ptr, new_size);
v->payload.ptr->fill = new_size;
}
/**
* Copies elements in an unsafe buffer to the given interior vector. The
* vector must have size zero.
*/
extern "C" CDECL void
ivec_copy_from_buf_shared(rust_task *task, type_desc *ty, rust_ivec *v,
void *ptr, size_t count)
{
size_t old_size = get_ivec_size(v);
if (old_size) {
task->fail();
return;
}
ivec_reserve_shared(task, ty, v, count);
size_t new_size = count * ty->size;
if (v->fill || !v->payload.ptr) {
// On stack.
memmove(v->payload.data, ptr, new_size);
v->fill = new_size;
return;
}
// On heap.
memmove(v->payload.ptr->data, ptr, new_size);
v->payload.ptr->fill = new_size;
}
extern "C" CDECL void
pin_task(rust_task *task) {
task->pin();

View File

@ -226,17 +226,6 @@ size_of::walk_struct(bool align, const uint8_t *end_sp) {
sa = struct_sa;
}
void
size_of::walk_ivec(bool align, bool is_pod, size_align &elem_sa) {
if (!elem_sa.is_set())
walk(align); // Determine the size the slow way.
else
sa = elem_sa; // Use the size hint.
sa.set(sizeof(rust_ivec) - sizeof(uintptr_t) + sa.size * 4,
max(sa.alignment, sizeof(uintptr_t)));
}
// Copy constructors
@ -321,8 +310,8 @@ public:
walk_vec(align, is_pod, get_evec_data_range(dp));
}
void walk_ivec(bool align, bool is_pod, size_align &elem_sa) {
walk_vec(align, is_pod, get_ivec_data_range(dp));
void walk_vec(bool align, bool is_pod, uint16_t sp_size) {
walk_vec(align, is_pod, get_vec_data_range(dp));
}
void walk_box(bool align) {

View File

@ -37,7 +37,7 @@ const uint8_t SHAPE_I64 = 7u;
const uint8_t SHAPE_F32 = 8u;
const uint8_t SHAPE_F64 = 9u;
const uint8_t SHAPE_EVEC = 10u;
const uint8_t SHAPE_IVEC = 11u;
const uint8_t SHAPE_VEC = 11u;
const uint8_t SHAPE_TAG = 12u;
const uint8_t SHAPE_BOX = 13u;
const uint8_t SHAPE_STRUCT = 17u;
@ -192,7 +192,7 @@ protected:
private:
void walk_evec(bool align);
void walk_ivec(bool align);
void walk_vec(bool align);
void walk_tag(bool align);
void walk_box(bool align);
void walk_struct(bool align);
@ -278,6 +278,7 @@ public:
template<typename T>
void
ctxt<T>::walk(bool align) {
switch (*sp++) {
case SHAPE_U8: WALK_NUMBER(uint8_t); break;
case SHAPE_U16: WALK_NUMBER(uint16_t); break;
@ -290,7 +291,7 @@ ctxt<T>::walk(bool align) {
case SHAPE_F32: WALK_NUMBER(float); break;
case SHAPE_F64: WALK_NUMBER(double); break;
case SHAPE_EVEC: walk_evec(align); break;
case SHAPE_IVEC: walk_ivec(align); break;
case SHAPE_VEC: walk_vec(align); break;
case SHAPE_TAG: walk_tag(align); break;
case SHAPE_BOX: walk_box(align); break;
case SHAPE_STRUCT: walk_struct(align); break;
@ -347,18 +348,13 @@ ctxt<T>::walk_evec(bool align) {
template<typename T>
void
ctxt<T>::walk_ivec(bool align) {
ctxt<T>::walk_vec(bool align) {
bool is_pod = *sp++;
size_align elem_sa = get_size_align(sp);
uint16_t sp_size = get_u16_bump(sp);
const uint8_t *end_sp = sp + sp_size;
// FIXME: Hack to work around our incorrect alignment in some cases.
if (elem_sa.alignment == 8)
elem_sa.alignment = 4;
static_cast<T *>(this)->walk_ivec(align, is_pod, elem_sa);
static_cast<T *>(this)->walk_vec(align, is_pod, sp_size);
sp = end_sp;
}
@ -471,8 +467,8 @@ public:
void walk_evec(bool align, bool is_pod, uint16_t sp_size) {
DPRINT("evec<"); walk(align); DPRINT(">");
}
void walk_ivec(bool align, bool is_pod, size_align &elem_sa) {
DPRINT("ivec<"); walk(align); DPRINT(">");
void walk_vec(bool align, bool is_pod, uint16_t sp_size) {
DPRINT("vec<"); walk(align); DPRINT(">");
}
void walk_box(bool align) {
DPRINT("box<"); walk(align); DPRINT(">");
@ -522,7 +518,6 @@ public:
void walk_tag(bool align, tag_info &tinfo);
void walk_struct(bool align, const uint8_t *end_sp);
void walk_ivec(bool align, bool is_pod, size_align &elem_sa);
void walk_box(bool align) { sa.set(sizeof(void *), sizeof(void *)); }
void walk_port(bool align) { sa.set(sizeof(void *), sizeof(void *)); }
@ -534,6 +529,9 @@ public:
void walk_evec(bool align, bool is_pod, uint16_t sp_size) {
sa.set(sizeof(void *), sizeof(void *));
}
void walk_vec(bool align, bool is_pod, uint16_t sp_size) {
sa.set(sizeof(void*), sizeof(void*));
}
void walk_var(bool align, uint8_t param_index) {
const type_param *param = &params[param_index];
@ -725,9 +723,9 @@ protected:
void walk_variant(bool align, tag_info &tinfo, uint32_t variant);
static std::pair<uint8_t *,uint8_t *> get_evec_data_range(ptr dp);
static std::pair<uint8_t *,uint8_t *> get_ivec_data_range(ptr dp);
static std::pair<uint8_t *,uint8_t *> get_vec_data_range(ptr dp);
static std::pair<ptr_pair,ptr_pair> get_evec_data_range(ptr_pair &dp);
static std::pair<ptr_pair,ptr_pair> get_ivec_data_range(ptr_pair &dp);
static std::pair<ptr_pair,ptr_pair> get_vec_data_range(ptr_pair &dp);
public:
U dp;
@ -740,7 +738,6 @@ public:
: ctxt< data<T,U> >(in_task, in_sp, in_params, in_tables), dp(in_dp) {}
void walk_tag(bool align, tag_info &tinfo);
void walk_ivec(bool align, bool is_pod, size_align &elem_sa);
void walk_struct(bool align, const uint8_t *end_sp) {
static_cast<T *>(this)->walk_struct(align, end_sp);
@ -749,6 +746,9 @@ public:
void walk_evec(bool align, bool is_pod, uint16_t sp_size) {
DATA_SIMPLE(void *, walk_evec(align, is_pod, sp_size));
}
void walk_vec(bool align, bool is_pod, uint16_t sp_size) {
DATA_SIMPLE(void *, walk_vec(align, is_pod, sp_size));
}
void walk_box(bool align) { DATA_SIMPLE(void *, walk_box(align)); }
void walk_port(bool align) { DATA_SIMPLE(void *, walk_port(align)); }
@ -815,27 +815,10 @@ data<T,U>::get_evec_data_range(ptr dp) {
template<typename T,typename U>
std::pair<uint8_t *,uint8_t *>
data<T,U>::get_ivec_data_range(ptr dp) {
size_t fill = bump_dp<size_t>(dp);
bump_dp<size_t>(dp); // Skip over alloc.
uint8_t *payload_dp = dp;
rust_ivec_payload payload = bump_dp<rust_ivec_payload>(dp);
uint8_t *start, *end;
if (!fill) {
if (!payload.ptr) { // Zero length.
start = end = NULL;
} else { // On heap.
fill = payload.ptr->fill;
start = payload.ptr->data;
end = start + fill;
}
} else { // On stack.
start = payload_dp;
end = start + fill;
}
return std::make_pair(start, end);
data<T,U>::get_vec_data_range(ptr dp) {
rust_vec* ptr = bump_dp<rust_vec*>(dp);
uint8_t* data = &ptr->data[0];
return std::make_pair(data, data + ptr->fill);
}
template<typename T,typename U>
@ -850,32 +833,14 @@ data<T,U>::get_evec_data_range(ptr_pair &dp) {
template<typename T,typename U>
std::pair<ptr_pair,ptr_pair>
data<T,U>::get_ivec_data_range(ptr_pair &dp) {
std::pair<uint8_t *,uint8_t *> fst = get_ivec_data_range(dp.fst);
std::pair<uint8_t *,uint8_t *> snd = get_ivec_data_range(dp.snd);
data<T,U>::get_vec_data_range(ptr_pair &dp) {
std::pair<uint8_t *,uint8_t *> fst = get_vec_data_range(dp.fst);
std::pair<uint8_t *,uint8_t *> snd = get_vec_data_range(dp.snd);
ptr_pair start(fst.first, snd.first);
ptr_pair end(fst.second, snd.second);
return std::make_pair(start, end);
}
template<typename T,typename U>
void
data<T,U>::walk_ivec(bool align, bool is_pod, size_align &elem_sa) {
if (!elem_sa.is_set())
elem_sa = size_of::get(*this);
else if (elem_sa.alignment == 8)
elem_sa.alignment = 4; // FIXME: This is an awful hack.
// Get a pointer to the interior vector, and determine its size.
if (align) dp = align_to(dp, ALIGNOF(rust_ivec *));
U end_dp = dp + sizeof(rust_ivec) - sizeof(uintptr_t) + elem_sa.size * 4;
// Call to the implementation.
static_cast<T *>(this)->walk_ivec(align, is_pod, elem_sa);
dp = end_dp;
}
template<typename T,typename U>
void
data<T,U>::walk_tag(bool align, tag_info &tinfo) {
@ -978,8 +943,8 @@ private:
walk_vec(align, is_pod, get_evec_data_range(dp));
}
void walk_ivec(bool align, bool is_pod, size_align &elem_sa) {
walk_vec(align, is_pod, get_ivec_data_range(dp));
void walk_vec(bool align, bool is_pod, uint16_t sp_size) {
walk_vec(align, is_pod, get_vec_data_range(dp));
}
void walk_tag(bool align, tag_info &tinfo, uint32_t tag_variant) {

View File

@ -61,16 +61,6 @@ upcall_log_str(rust_task *task, uint32_t level, rust_str *str) {
}
}
extern "C" CDECL void
upcall_log_istr(rust_task *task, uint32_t level, rust_ivec *str) {
LOG_UPCALL_ENTRY(task);
if (task->sched->log_lvl < level)
return;
const char *buf = (const char *)
(str->fill ? str->payload.data : str->payload.ptr->data);
task->sched->log(task, level, "rust: %s", buf);
}
extern "C" CDECL void
upcall_yield(rust_task *task) {
LOG_UPCALL_ENTRY(task);
@ -354,69 +344,33 @@ upcall_get_type_desc(rust_task *task,
return td;
}
/**
* Resizes an interior vector that has been spilled to the heap.
*/
extern "C" CDECL void
upcall_ivec_resize_shared(rust_task *task,
rust_ivec *v,
size_t newsz) {
upcall_vec_grow(rust_task* task, rust_vec** vp, size_t new_sz) {
LOG_UPCALL_ENTRY(task);
scoped_lock with(task->sched->lock);
I(task->sched, !v->fill);
size_t new_alloc = next_power_of_two(newsz);
rust_ivec_heap *new_heap_part = (rust_ivec_heap *)
task->kernel->realloc(v->payload.ptr, new_alloc + sizeof(size_t));
new_heap_part->fill = newsz;
v->alloc = new_alloc;
v->payload.ptr = new_heap_part;
}
/**
* Spills an interior vector to the heap.
*/
extern "C" CDECL void
upcall_ivec_spill_shared(rust_task *task,
rust_ivec *v,
size_t newsz) {
LOG_UPCALL_ENTRY(task);
scoped_lock with(task->sched->lock);
size_t new_alloc = next_power_of_two(newsz);
rust_ivec_heap *heap_part = (rust_ivec_heap *)
task->kernel->malloc(new_alloc + sizeof(size_t),
"ivec spill shared");
heap_part->fill = newsz;
memcpy(&heap_part->data, v->payload.data, v->fill);
v->fill = 0;
v->alloc = new_alloc;
v->payload.ptr = heap_part;
}
extern "C" CDECL void
upcall_ivec_push(rust_task* task, rust_ivec* v, type_desc* elt_ty, void* x) {
LOG_UPCALL_ENTRY(task);
bool is_interior = v->fill || !v->payload.ptr;
size_t sz = elt_ty->size;
size_t old_fill = is_interior ? v->fill : v->payload.ptr->fill;
size_t new_sz = sz + old_fill;
if (new_sz > v->alloc) {
if (is_interior) {
upcall_ivec_spill_shared(task, v, new_sz);
is_interior = false;
} else {
upcall_ivec_resize_shared(task, v, new_sz);
}
} else {
if (is_interior) v->fill = new_sz;
else v->payload.ptr->fill = new_sz;
// FIXME factor this into a utility function
if (new_sz > (*vp)->alloc) {
size_t new_alloc = next_power_of_two(new_sz);
*vp = (rust_vec*)task->kernel->realloc(*vp, new_alloc +
sizeof(rust_vec));
(*vp)->alloc = new_alloc;
}
uint8_t* dataptr = is_interior ? &v->payload.data[0]
: &v->payload.ptr->data[0];
copy_elements(task, elt_ty, dataptr + old_fill, x, sz);
(*vp)->fill = new_sz;
}
extern "C" CDECL void
upcall_vec_push(rust_task* task, rust_vec** vp, type_desc* elt_ty,
void* elt) {
LOG_UPCALL_ENTRY(task);
rust_vec* v = *vp;
size_t new_sz = v->fill + elt_ty->size;
if (new_sz > v->alloc) {
size_t new_alloc = next_power_of_two(new_sz);
*vp = v = (rust_vec*)task->kernel->realloc(v, new_alloc +
sizeof(rust_vec));
v->alloc = new_alloc;
}
copy_elements(task, elt_ty, &v->data[0] + v->fill, elt, elt_ty->size);
v->fill += elt_ty->size;
}

View File

@ -197,28 +197,18 @@ typedef rust_evec rust_str;
// Interior vectors (rust-user-code level).
struct
rust_ivec_heap
{
size_t fill;
uint8_t data[];
};
// Note that the payload is actually size 4*sizeof(elem), even when heapified
union
rust_ivec_payload
{
rust_ivec_heap *ptr; // if on heap
uint8_t data[]; // if on stack
};
struct
rust_ivec
rust_vec
{
size_t fill; // in bytes; if zero, heapified
size_t alloc; // in bytes
rust_ivec_payload payload;
uint8_t data[0];
};
template <typename T>
inline size_t vec_size(size_t elems) {
return sizeof(rust_vec) + sizeof(T) * elems;
}
//
// Local Variables:
// mode: C++

View File

@ -113,12 +113,12 @@ static socket_data *make_socket(rust_task *task, rust_chan *chan) {
static uv_buf_t alloc_buffer(uv_stream_t *socket, size_t suggested_size) {
LOG_CALLBACK_ENTRY(socket);
uv_buf_t buf;
size_t actual_size = suggested_size + sizeof (rust_ivec_heap);
size_t actual_size = suggested_size + sizeof (rust_vec);
socket_data *data = (socket_data*)socket->data;
char *base =
reinterpret_cast<char*>(data->task->kernel->malloc(actual_size,
"read buffer"));
buf.base = base + sizeof (rust_ivec_heap);
buf.base = base + sizeof (rust_vec);
buf.len = suggested_size;
return buf;
}
@ -129,26 +129,23 @@ static void read_progress(uv_stream_t *socket, ssize_t nread, uv_buf_t buf) {
I(data->task->sched, data->reader != NULL);
I(data->task->sched, nread <= ssize_t(buf.len));
rust_ivec_heap *base = reinterpret_cast<rust_ivec_heap*>(
reinterpret_cast<char*>(buf.base) - sizeof (rust_ivec_heap));
rust_ivec v;
v.fill = 0;
v.alloc = buf.len;
v.payload.ptr = base;
rust_vec *v = reinterpret_cast<rust_vec*>(
reinterpret_cast<char*>(buf.base) - sizeof (rust_vec));
v->alloc = buf.len;
switch (nread) {
case -1: // End of stream
base->fill = 0;
v->fill = 0;
uv_read_stop(socket);
break;
case 0: // Nothing read
data->task->kernel->free(base);
data->task->kernel->free(v);
return;
default: // Got nread bytes
base->fill = nread;
v->fill = nread;
break;
}
data->reader->send(&v);
data->reader->send(v);
}
static void new_connection(uv_handle_t *socket, int status) {

View File

@ -30,12 +30,6 @@ get_task_pointer
get_task_trampoline
get_time
hack_allow_leaks
ivec_copy_from_buf
ivec_copy_from_buf_shared
ivec_on_heap
ivec_reserve
ivec_reserve_shared
ivec_to_ptr
last_os_error
leak
migrate_alloc
@ -70,7 +64,9 @@ str_buf
str_byte_len
str_from_buf
str_from_cstr
str_from_ivec
str_from_vec
vec_reserve_shared
vec_from_buf_shared
str_push_byte
str_slice
task_sleep
@ -86,14 +82,13 @@ upcall_fail
upcall_free
upcall_get_type_desc
upcall_grow_task
upcall_ivec_resize_shared
upcall_ivec_spill_shared
upcall_ivec_push
upcall_vec_grow
upcall_vec_push
upcall_kill
upcall_log_double
upcall_log_float
upcall_log_int
upcall_log_istr
upcall_log_str
upcall_log_str
upcall_log_type
upcall_malloc

View File

@ -1,28 +1,28 @@
import rusti::ivec_len;
import rusti::vec_len;
native "rust-intrinsic" mod rusti {
fn ivec_len<T>(v: &[T]) -> uint;
fn vec_len<T>(v: &[T]) -> uint;
}
fn main() {
let v: [int] = [];
assert (ivec_len(v) == 0u); // zero-length
assert (vec_len(v) == 0u); // zero-length
let x = [1, 2];
assert (ivec_len(x) == 2u); // on stack
assert (vec_len(x) == 2u); // on stack
let y = [1, 2, 3, 4, 5];
assert (ivec_len(y) == 5u); // on heap
assert (vec_len(y) == 5u); // on heap
v += [];
assert (ivec_len(v) == 0u); // zero-length append
assert (vec_len(v) == 0u); // zero-length append
x += [3];
assert (ivec_len(x) == 3u); // on-stack append
assert (vec_len(x) == 3u); // on-stack append
y += [6, 7, 8, 9];
assert (ivec_len(y) == 9u); // on-heap append
assert (vec_len(y) == 9u); // on-heap append
let vv = v + v;
assert (ivec_len(vv) == 0u); // zero-length add
assert (vec_len(vv) == 0u); // zero-length add
let xx = x + [4];
assert (ivec_len(xx) == 4u); // on-stack add
assert (vec_len(xx) == 4u); // on-stack add
let yy = y + [10, 11];
assert (ivec_len(yy) == 11u); // on-heap add
assert (vec_len(yy) == 11u); // on-heap add
}

View File

@ -18,21 +18,12 @@ fn square_if_odd(n: &uint) -> option::t<uint> {
fn add(x: &uint, y: &uint) -> uint { ret x + y; }
#[test]
fn test_reserve_and_on_heap() {
let v: [int] = [1, 2];
assert (!vec::on_heap(v));
vec::reserve(v, 8u);
assert (vec::on_heap(v));
}
#[test]
fn test_unsafe_ptrs() {
// Test on-stack copy-from-buf.
let a = [1, 2, 3];
let ptr = vec::to_ptr(a);
let b = [];
vec::unsafe::copy_from_buf(b, ptr, 3u);
let b = vec::unsafe::from_buf(ptr, 3u);
assert (vec::len(b) == 3u);
assert (b[0] == 1);
assert (b[1] == 2);
@ -41,8 +32,7 @@ fn test_unsafe_ptrs() {
// Test on-heap copy-from-buf.
let c = [1, 2, 3, 4, 5];
ptr = vec::to_ptr(c);
let d = [];
vec::unsafe::copy_from_buf(d, ptr, 5u);
let d = vec::unsafe::from_buf(ptr, 5u);
assert (vec::len(d) == 5u);
assert (d[0] == 1);
assert (d[1] == 2);