Use the correct LLVM integer sizes

Use the integer sizes LLVM uses, rather than having random projections
laying around. Sizes are u64, Alignments are u32, C_*int is target-dependent
but 64-bit is fine (the int -> C_int conversion is non-precision-losing,
but it can be preceded by `as int` conversions which are, so it is
somewhat ugly. However, being able to suffix a `u` to properly infer
integer types is nice).
This commit is contained in:
Ariel Ben-Yehuda 2014-10-14 23:36:11 +03:00
parent 71dfa5befe
commit 01d693b1cd
17 changed files with 90 additions and 74 deletions

View File

@ -1088,7 +1088,7 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let sw = if kind == Switch {
build::Switch(bcx, test_val, else_cx.llbb, opts.len())
} else {
C_int(ccx, 0) // Placeholder for when not using a switch
C_int(ccx, 0i) // Placeholder for when not using a switch
};
let defaults = enter_default(else_cx, dm, m, col, val);

View File

@ -45,7 +45,6 @@
#![allow(unsigned_negate)]
use libc::c_ulonglong;
use std::collections::Map;
use std::num::Int;
use std::rc::Rc;
@ -132,7 +131,7 @@ pub struct Struct {
// If the struct is DST, then the size and alignment do not take into
// account the unsized fields of the struct.
pub size: u64,
pub align: u64,
pub align: u32,
pub sized: bool,
pub packed: bool,
pub fields: Vec<ty::t>
@ -652,9 +651,7 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
LoadRangeAssert(bcx, ptr, min as c_ulonglong,
(max + 1) as c_ulonglong,
/* signed: */ True)
LoadRangeAssert(bcx, ptr, min, (max+1), /* signed: */ True)
}
}
@ -973,11 +970,11 @@ fn compute_struct_field_offsets(ccx: &CrateContext, st: &Struct) -> Vec<u64> {
for &ty in st.fields.iter() {
let llty = type_of::sizing_type_of(ccx, ty);
if !st.packed {
let type_align = type_of::align_of(ccx, ty) as u64;
let type_align = type_of::align_of(ccx, ty);
offset = roundup(offset, type_align);
}
offsets.push(offset);
offset += machine::llsize_of_alloc(ccx, llty) as u64;
offset += machine::llsize_of_alloc(ccx, llty);
}
assert_eq!(st.fields.len(), offsets.len());
offsets
@ -1004,8 +1001,7 @@ fn build_const_struct(ccx: &CrateContext, st: &Struct, vals: &[ValueRef])
let mut cfields = Vec::new();
for (&val, &target_offset) in vals.iter().zip(target_offsets.iter()) {
if !st.packed {
let val_align = machine::llalign_of_min(ccx, val_ty(val))
/*bad*/as u64;
let val_align = machine::llalign_of_min(ccx, val_ty(val));
offset = roundup(offset, val_align);
}
if offset != target_offset {
@ -1014,7 +1010,7 @@ fn build_const_struct(ccx: &CrateContext, st: &Struct, vals: &[ValueRef])
}
assert!(!is_undef(val));
cfields.push(val);
offset += machine::llsize_of_alloc(ccx, val_ty(val)) as u64;
offset += machine::llsize_of_alloc(ccx, val_ty(val));
}
assert!(st.sized && offset <= st.size);
@ -1031,7 +1027,7 @@ fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
// FIXME this utility routine should be somewhere more general
#[inline]
fn roundup(x: u64, a: u64) -> u64 { ((x + (a - 1)) / a) * a }
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
/// Get the discriminant of a constant value. (Not currently used.)
pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef)

View File

@ -398,7 +398,7 @@ pub fn malloc_raw_dyn_proc<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: ty::t) -> Resu
let llty = type_of(bcx.ccx(), t);
let size = llsize_of(bcx.ccx(), llty);
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty));
// Allocate space and store the destructor pointer:
let Result {bcx: bcx, val: llbox} = malloc_raw_dyn(bcx, ptr_llty, t, size, llalign);

View File

@ -21,7 +21,7 @@ use syntax::codemap::Span;
use middle::trans::builder::Builder;
use middle::trans::type_::Type;
use libc::{c_uint, c_ulonglong, c_char};
use libc::{c_uint, c_char};
pub fn terminate(cx: Block, _: &str) {
debug!("terminate({})", cx.to_str());
@ -380,8 +380,8 @@ pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> Va
}
pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: c_ulonglong,
hi: c_ulonglong, signed: llvm::Bool) -> ValueRef {
pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64,
hi: u64, signed: llvm::Bool) -> ValueRef {
if cx.unreachable.get() {
let ccx = cx.fcx.ccx;
let ty = val_ty(pointer_val);

View File

@ -19,7 +19,7 @@ use middle::trans::common::*;
use middle::trans::machine::llalign_of_pref;
use middle::trans::type_::Type;
use std::collections::HashMap;
use libc::{c_uint, c_ulonglong, c_char};
use libc::{c_uint, c_char};
use std::string::String;
use syntax::codemap::Span;
@ -477,8 +477,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
pub fn load_range_assert(&self, ptr: ValueRef, lo: c_ulonglong,
hi: c_ulonglong, signed: llvm::Bool) -> ValueRef {
pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
hi: u64, signed: llvm::Bool) -> ValueRef {
let value = self.load(ptr);
unsafe {
@ -490,7 +490,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
llvm::LLVMMDNodeInContext(self.ccx.llcx(),
v.as_ptr(), v.len() as c_uint));
v.as_ptr(),
v.len() as c_uint));
}
value

View File

@ -39,7 +39,7 @@ use util::nodemap::{DefIdMap, NodeMap};
use arena::TypedArena;
use std::collections::HashMap;
use libc::{c_uint, c_longlong, c_ulonglong, c_char};
use libc::{c_uint, c_char};
use std::c_str::ToCStr;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
@ -595,14 +595,26 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
C_integral(Type::i64(ccx), i, false)
}
pub fn C_int(ccx: &CrateContext, i: int) -> ValueRef {
C_integral(ccx.int_type(), i as u64, true)
pub fn C_int<I: AsI64>(ccx: &CrateContext, i: I) -> ValueRef {
C_integral(ccx.int_type(), i.as_i64() as u64, true)
}
pub fn C_uint(ccx: &CrateContext, i: uint) -> ValueRef {
C_integral(ccx.int_type(), i as u64, false)
pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
C_integral(ccx.int_type(), i.as_u64(), false)
}
pub trait AsI64 { fn as_i64(self) -> i64; }
pub trait AsU64 { fn as_u64(self) -> u64; }
// FIXME: remove the intptr conversions
impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }}
impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }}
impl AsI64 for int { fn as_i64(self) -> i64 { self as i64 }}
impl AsU64 for u64 { fn as_u64(self) -> u64 { self as u64 }}
impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 }}
impl AsU64 for uint { fn as_u64(self) -> u64 { self as u64 }}
pub fn C_u8(ccx: &CrateContext, i: uint) -> ValueRef {
C_integral(Type::i8(ccx), i as u64, false)
}
@ -717,13 +729,13 @@ pub fn is_const(v: ValueRef) -> bool {
}
}
pub fn const_to_int(v: ValueRef) -> c_longlong {
pub fn const_to_int(v: ValueRef) -> i64 {
unsafe {
llvm::LLVMConstIntGetSExtValue(v)
}
}
pub fn const_to_uint(v: ValueRef) -> c_ulonglong {
pub fn const_to_uint(v: ValueRef) -> u64 {
unsafe {
llvm::LLVMConstIntGetZExtValue(v)
}

View File

@ -499,7 +499,7 @@ pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let loc = bcx.sess().codemap().lookup_char_pos(sp.lo);
let filename = token::intern_and_get_ident(loc.file.name.as_slice());
let filename = C_str_slice(ccx, filename);
let line = C_int(ccx, loc.line as int);
let line = C_int(ccx, loc.line as i64);
let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
let expr_file_line = consts::const_addr_of(ccx, expr_file_line_const, ast::MutImmutable);
let args = vec!(expr_file_line);
@ -526,7 +526,7 @@ pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Invoke the lang item
let filename = C_str_slice(ccx, filename);
let line = C_int(ccx, loc.line as int);
let line = C_int(ccx, loc.line as i64);
let file_line_const = C_struct(ccx, &[filename, line], false);
let file_line = consts::const_addr_of(ccx, file_line_const, ast::MutImmutable);
let args = vec!(file_line, index, len);

View File

@ -202,7 +202,7 @@ use middle::ty;
use middle::pat_util;
use util::ppaux;
use libc::{c_uint, c_ulonglong, c_longlong};
use libc::c_uint;
use std::c_str::{CString, ToCStr};
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
@ -2384,7 +2384,7 @@ fn prepare_enum_metadata(cx: &CrateContext,
llvm::LLVMDIBuilderCreateEnumerator(
DIB(cx),
name,
v.disr_val as c_ulonglong)
v.disr_val as u64)
}
})
})
@ -2665,7 +2665,7 @@ fn fixed_vec_metadata(cx: &CrateContext,
llvm::LLVMDIBuilderGetOrCreateSubrange(
DIB(cx),
0,
len as c_longlong)
len as i64)
};
let subscripts = create_DIArray(DIB(cx), [subrange]);
@ -3072,11 +3072,11 @@ fn span_start(cx: &CrateContext, span: Span) -> codemap::Loc {
}
fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u64) {
(machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type))
(machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type) as u64)
}
fn bytes_to_bits(bytes: u64) -> c_ulonglong {
(bytes * 8) as c_ulonglong
fn bytes_to_bits(bytes: u64) -> u64 {
bytes * 8
}
#[inline]

View File

@ -1548,7 +1548,7 @@ fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
assert!(ty::type_is_sized(bcx.tcx(), contents_ty));
let llty = type_of::type_of(bcx.ccx(), contents_ty);
let size = llsize_of(bcx.ccx(), llty);
let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty) as uint);
let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
let llty_ptr = llty.ptr_to();
let Result { bcx, val } = malloc_raw_dyn(bcx, llty_ptr, box_ty, size, align);
// Unique boxes do not allocate for zero-size types. The standard library

View File

@ -476,7 +476,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let llalign = cmp::min(llforeign_align, llrust_align);
debug!("llrust_size={:?}", llrust_size);
base::call_memcpy(bcx, llretptr_i8, llscratch_i8,
C_uint(ccx, llrust_size as uint), llalign as u32);
C_uint(ccx, llrust_size), llalign as u32);
}
}

View File

@ -58,9 +58,9 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
size: u64, align: u64) -> Block<'blk, 'tcx> {
trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size as uint),
C_uint(cx.ccx(), align as uint))
size: u64, align: u32) -> Block<'blk, 'tcx> {
trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size),
C_uint(cx.ccx(), align))
}
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef,
@ -368,8 +368,8 @@ fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, Val
bcx.ty_to_string(t), bcx.val_to_string(info));
if ty::type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = C_uint(bcx.ccx(), llsize_of_alloc(bcx.ccx(), sizing_type) as uint);
let align = C_uint(bcx.ccx(), align_of(bcx.ccx(), t) as uint);
let size = C_uint(bcx.ccx(), llsize_of_alloc(bcx.ccx(), sizing_type));
let align = C_uint(bcx.ccx(), align_of(bcx.ccx(), t));
return (size, align);
}
match ty::get(t).sty {
@ -380,8 +380,8 @@ fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, Val
assert!(!ty::type_is_simd(bcx.tcx(), t));
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_of(ccx, &*repr, true);
let sized_size = C_uint(ccx, llsize_of_alloc(ccx, sizing_type) as uint);
let sized_align = C_uint(ccx, llalign_of_min(ccx, sizing_type) as uint);
let sized_size = C_uint(ccx, llsize_of_alloc(ccx, sizing_type));
let sized_align = C_uint(ccx, llalign_of_min(ccx, sizing_type));
// Recurse to get the size of the dynamically sized field (must be
// the last field).
@ -411,7 +411,7 @@ fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, Val
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size as uint)), C_uint(bcx.ccx(), 8))
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size)), C_uint(bcx.ccx(), 8u))
}
_ => bcx.sess().bug(format!("Unexpected unsized type, found {}",
bcx.ty_to_string(t)).as_slice())

View File

@ -239,16 +239,16 @@ pub fn trans_intrinsic_call<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, node: ast::N
(_, "size_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty) as uint)
C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty))
}
(_, "min_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
C_uint(ccx, type_of::align_of(ccx, tp_ty) as uint)
C_uint(ccx, type_of::align_of(ccx, tp_ty))
}
(_, "pref_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty) as uint)
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
}
(_, "move_val_init") => {
// Create a datum reflecting the value being moved.

View File

@ -10,6 +10,8 @@
// Information concerning the machine representation of various types.
#![allow(non_camel_case_types)]
use llvm;
use llvm::{ValueRef};
use llvm::False;
@ -17,21 +19,25 @@ use middle::trans::common::*;
use middle::trans::type_::Type;
pub type llbits = u64;
pub type llsize = u64;
pub type llalign = u32;
// ______________________________________________________________________
// compute sizeof / alignof
// Returns the number of bytes clobbered by a Store to this type.
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> u64 {
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref());
}
}
// Returns the number of bytes between successive elements of type T in an
// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref());
}
}
@ -43,9 +49,9 @@ pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
// that LLVM *does* distinguish between e.g. a 1-bit value and an 8-bit value
// at the codegen level! In general you should prefer `llbitsize_of_real`
// below.
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64;
let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref());
if nbits & 7 != 0 {
// Not an even number of bytes, spills into "next" byte.
1 + (nbits >> 3)
@ -56,9 +62,9 @@ pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
}
/// Returns the "real" size of the type in bits.
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits {
unsafe {
llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64
llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref())
}
}
@ -71,7 +77,7 @@ pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
// there's no need for that contrivance. The instruction
// selection DAG generator would flatten that GEP(1) node into a
// constant of the type's alloc size, so let's save it some work.
return C_uint(cx, llsize_of_alloc(cx, ty) as uint);
return C_uint(cx, llsize_of_alloc(cx, ty));
}
// Returns the "default" size of t (see above), or 1 if the size would
@ -89,18 +95,18 @@ pub fn nonzero_llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
// The preferred alignment may be larger than the alignment used when
// packing the type into structs. This will be used for things like
// allocations inside a stack frame, which LLVM has a free hand in.
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> u64 {
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign {
unsafe {
return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref());
}
}
// Returns the minimum alignment of a type required by the platform.
// This is the alignment that will be used for struct fields, arrays,
// and similar ABI-mandated things.
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> u64 {
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign {
unsafe {
return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref());
}
}
@ -116,6 +122,7 @@ pub fn llalign_of(cx: &CrateContext, ty: Type) -> ValueRef {
pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: uint) -> u64 {
unsafe {
return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(), element as u32) as u64;
return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(),
element as u32);
}
}

View File

@ -648,9 +648,9 @@ pub fn get_vtable(bcx: Block,
let size_ty = sizing_type_of(ccx, trait_ref.self_ty());
let size = machine::llsize_of_alloc(ccx, size_ty);
let ll_size = C_uint(ccx, size as uint);
let ll_size = C_uint(ccx, size);
let align = align_of(ccx, trait_ref.self_ty());
let ll_align = C_uint(ccx, align as uint);
let ll_align = C_uint(ccx, align);
// Generate a destructor for the vtable.
let drop_glue = glue::get_drop_glue(ccx, box_ty);

View File

@ -74,10 +74,10 @@ pub fn make_drop_glue_unboxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let unit_size = llsize_of_alloc(ccx, llty);
if unit_size != 0 {
let len = get_len(bcx, vptr);
let not_empty = ICmp(bcx, llvm::IntNE, len, C_uint(ccx, 0));
let not_empty = ICmp(bcx, llvm::IntNE, len, C_uint(ccx, 0u));
with_cond(bcx, not_empty, |bcx| {
let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let size = Mul(bcx, C_uint(ccx, unit_size as uint), len);
let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty));
let size = Mul(bcx, C_uint(ccx, unit_size), len);
glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
})
} else {
@ -461,7 +461,7 @@ pub fn iter_vec_loop<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let loop_counter = {
// i = 0
let i = alloca(loop_bcx, bcx.ccx().int_type(), "__i");
Store(loop_bcx, C_uint(bcx.ccx(), 0), i);
Store(loop_bcx, C_uint(bcx.ccx(), 0u), i);
Br(loop_bcx, cond_bcx.llbb);
i
@ -489,7 +489,7 @@ pub fn iter_vec_loop<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
{ // i += 1
let i = Load(inc_bcx, loop_counter);
let plusone = Add(inc_bcx, i, C_uint(bcx.ccx(), 1));
let plusone = Add(inc_bcx, i, C_uint(bcx.ccx(), 1u));
Store(inc_bcx, plusone, loop_counter);
Br(inc_bcx, cond_bcx.llbb);
@ -532,7 +532,7 @@ pub fn iter_vec_raw<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb);
let body_bcx = f(body_bcx, data_ptr, vt.unit_ty);
AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,
[C_int(bcx.ccx(), 1)]),
[C_int(bcx.ccx(), 1i)]),
body_bcx.llbb);
Br(body_bcx, header_bcx.llbb);
next_bcx

View File

@ -398,7 +398,7 @@ pub fn type_of(cx: &CrateContext, t: ty::t) -> Type {
return llty;
}
pub fn align_of(cx: &CrateContext, t: ty::t) -> u64 {
pub fn align_of(cx: &CrateContext, t: ty::t) -> machine::llalign {
let llty = sizing_type_of(cx, t);
machine::llalign_of_min(cx, llty)
}

View File

@ -1458,7 +1458,7 @@ extern {
/** Distance between successive elements in an array of T.
Includes ABI padding. */
pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint;
pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong;
/** Returns the preferred alignment of a type. */
pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef)