auto merge of #18041 : arielb1/rust/no-size-overflow, r=pnkfelix

Should fix #17913.

Also clean-up u64/u32-ness. I really should split this commit and add tests (I have no idea how to add them).
This commit is contained in:
bors 2014-10-18 17:02:13 +00:00
commit ce342f522c
24 changed files with 388 additions and 142 deletions

View File

@ -1090,7 +1090,7 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let sw = if kind == Switch {
build::Switch(bcx, test_val, else_cx.llbb, opts.len())
} else {
C_int(ccx, 0) // Placeholder for when not using a switch
C_int(ccx, 0i) // Placeholder for when not using a switch
};
let defaults = enter_default(else_cx, dm, m, col, val);

View File

@ -45,7 +45,6 @@
#![allow(unsigned_negate)]
use libc::c_ulonglong;
use std::collections::Map;
use std::num::Int;
use std::rc::Rc;
@ -132,7 +131,7 @@ pub struct Struct {
// If the struct is DST, then the size and alignment do not take into
// account the unsized fields of the struct.
pub size: u64,
pub align: u64,
pub align: u32,
pub sized: bool,
pub packed: bool,
pub fields: Vec<ty::t>
@ -164,7 +163,7 @@ pub fn represent_type(cx: &CrateContext, t: ty::t) -> Rc<Repr> {
fn represent_type_uncached(cx: &CrateContext, t: ty::t) -> Repr {
match ty::get(t).sty {
ty::ty_tup(ref elems) => {
return Univariant(mk_struct(cx, elems.as_slice(), false), false)
return Univariant(mk_struct(cx, elems.as_slice(), false, t), false)
}
ty::ty_struct(def_id, ref substs) => {
let fields = ty::lookup_struct_fields(cx.tcx(), def_id);
@ -175,12 +174,12 @@ fn represent_type_uncached(cx: &CrateContext, t: ty::t) -> Repr {
let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag();
if dtor { ftys.push(ty::mk_bool()); }
return Univariant(mk_struct(cx, ftys.as_slice(), packed), dtor)
return Univariant(mk_struct(cx, ftys.as_slice(), packed, t), dtor)
}
ty::ty_unboxed_closure(def_id, _) => {
let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id);
let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>();
return Univariant(mk_struct(cx, upvar_types.as_slice(), false),
return Univariant(mk_struct(cx, upvar_types.as_slice(), false, t),
false)
}
ty::ty_enum(def_id, ref substs) => {
@ -195,7 +194,8 @@ fn represent_type_uncached(cx: &CrateContext, t: ty::t) -> Repr {
// (Typechecking will reject discriminant-sizing attrs.)
assert_eq!(hint, attr::ReprAny);
let ftys = if dtor { vec!(ty::mk_bool()) } else { vec!() };
return Univariant(mk_struct(cx, ftys.as_slice(), false), dtor);
return Univariant(mk_struct(cx, ftys.as_slice(), false, t),
dtor);
}
if !dtor && cases.iter().all(|c| c.tys.len() == 0) {
@ -226,15 +226,17 @@ fn represent_type_uncached(cx: &CrateContext, t: ty::t) -> Repr {
assert_eq!(hint, attr::ReprAny);
let mut ftys = cases.get(0).tys.clone();
if dtor { ftys.push(ty::mk_bool()); }
return Univariant(mk_struct(cx, ftys.as_slice(), false), dtor);
return Univariant(mk_struct(cx, ftys.as_slice(), false, t),
dtor);
}
if !dtor && cases.len() == 2 && hint == attr::ReprAny {
// Nullable pointer optimization
let mut discr = 0;
while discr < 2 {
if cases.get(1 - discr).is_zerolen(cx) {
let st = mk_struct(cx, cases.get(discr).tys.as_slice(), false);
if cases.get(1 - discr).is_zerolen(cx, t) {
let st = mk_struct(cx, cases.get(discr).tys.as_slice(),
false, t);
match cases.get(discr).find_ptr() {
Some(ThinPointer(_)) if st.fields.len() == 1 => {
return RawNullablePointer {
@ -264,11 +266,15 @@ fn represent_type_uncached(cx: &CrateContext, t: ty::t) -> Repr {
slo: 0, shi: (cases.len() - 1) as i64 };
let ity = range_to_inttype(cx, hint, &bounds);
return General(ity, cases.iter().map(|c| {
let fields : Vec<_> = cases.iter().map(|c| {
let mut ftys = vec!(ty_of_inttype(ity)).append(c.tys.as_slice());
if dtor { ftys.push(ty::mk_bool()); }
mk_struct(cx, ftys.as_slice(), false)
}).collect(), dtor);
mk_struct(cx, ftys.as_slice(), false, t)
}).collect();
ensure_enum_fits_in_address_space(cx, ity, fields.as_slice(), t);
General(ity, fields, dtor)
}
_ => cx.sess().bug(format!("adt::represent_type called on non-ADT type: {}",
ty_to_string(cx.tcx(), t)).as_slice())
@ -289,8 +295,8 @@ pub enum PointerField {
}
impl Case {
fn is_zerolen(&self, cx: &CrateContext) -> bool {
mk_struct(cx, self.tys.as_slice(), false).size == 0
fn is_zerolen(&self, cx: &CrateContext, scapegoat: ty::t) -> bool {
mk_struct(cx, self.tys.as_slice(), false, scapegoat).size == 0
}
fn find_ptr(&self) -> Option<PointerField> {
@ -345,29 +351,25 @@ fn get_cases(tcx: &ty::ctxt, def_id: ast::DefId, substs: &subst::Substs) -> Vec<
}).collect()
}
fn mk_struct(cx: &CrateContext, tys: &[ty::t], packed: bool) -> Struct {
if tys.iter().all(|&ty| ty::type_is_sized(cx.tcx(), ty)) {
let lltys = tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect::<Vec<_>>();
let llty_rec = Type::struct_(cx, lltys.as_slice(), packed);
Struct {
size: machine::llsize_of_alloc(cx, llty_rec),
align: machine::llalign_of_min(cx, llty_rec),
sized: true,
packed: packed,
fields: Vec::from_slice(tys),
}
fn mk_struct(cx: &CrateContext, tys: &[ty::t], packed: bool, scapegoat: ty::t) -> Struct {
let sized = tys.iter().all(|&ty| ty::type_is_sized(cx.tcx(), ty));
let lltys : Vec<Type> = if sized {
tys.iter()
.map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
} else {
// Ignore any dynamically sized fields.
let lltys = tys.iter().filter(|&ty| ty::type_is_sized(cx.tcx(), *ty))
.map(|&ty| type_of::sizing_type_of(cx, ty)).collect::<Vec<_>>();
let llty_rec = Type::struct_(cx, lltys.as_slice(), packed);
Struct {
size: machine::llsize_of_alloc(cx, llty_rec),
align: machine::llalign_of_min(cx, llty_rec),
sized: false,
packed: packed,
fields: Vec::from_slice(tys),
}
tys.iter().filter(|&ty| ty::type_is_sized(cx.tcx(), *ty))
.map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
};
ensure_struct_fits_in_address_space(cx, lltys.as_slice(), packed, scapegoat);
let llty_rec = Type::struct_(cx, lltys.as_slice(), packed);
Struct {
size: machine::llsize_of_alloc(cx, llty_rec),
align: machine::llalign_of_min(cx, llty_rec),
sized: sized,
packed: packed,
fields: Vec::from_slice(tys),
}
}
@ -463,6 +465,51 @@ pub fn ty_of_inttype(ity: IntType) -> ty::t {
}
}
// LLVM doesn't like types that don't fit in the address space
fn ensure_struct_fits_in_address_space(ccx: &CrateContext,
fields: &[Type],
packed: bool,
scapegoat: ty::t) {
let mut offset = 0;
for &llty in fields.iter() {
// Invariant: offset < ccx.max_obj_size() <= 1<<61
if !packed {
let type_align = machine::llalign_of_min(ccx, llty);
offset = roundup(offset, type_align);
}
// type_align is a power-of-2, so still offset < ccx.max_obj_size()
// llsize_of_alloc(ccx, llty) is also less than ccx.max_obj_size()
// so the sum is less than 1<<62 (and therefore can't overflow).
offset += machine::llsize_of_alloc(ccx, llty);
if offset >= ccx.max_obj_size() {
ccx.report_overbig_object(scapegoat);
}
}
}
fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) {
let size = sts.iter().map(|st| st.size).max().unwrap();
let most_aligned = sts.iter().max_by(|st| st.align).unwrap();
(size, most_aligned.align)
}
fn ensure_enum_fits_in_address_space(ccx: &CrateContext,
discr: IntType,
fields: &[Struct],
scapegoat: ty::t) {
let discr_size = machine::llsize_of_alloc(ccx, ll_inttype(ccx, discr));
let (field_size, field_align) = union_size_and_align(fields);
// field_align < 1<<32, discr_size <= 8, field_size < MAX_OBJ_SIZE <= 1<<61
// so the sum is less than 1<<62 (and can't overflow).
let total_size = roundup(discr_size, field_align) + field_size;
if total_size >= ccx.max_obj_size() {
ccx.report_overbig_object(scapegoat);
}
}
/**
* LLVM-level types are a little complicated.
@ -525,13 +572,12 @@ fn generic_type_of(cx: &CrateContext,
// of the size.
//
// FIXME #10604: this breaks when vector types are present.
let size = sts.iter().map(|st| st.size).max().unwrap();
let most_aligned = sts.iter().max_by(|st| st.align).unwrap();
let align = most_aligned.align;
let (size, align) = union_size_and_align(sts.as_slice());
let align_s = align as u64;
let discr_ty = ll_inttype(cx, ity);
let discr_size = machine::llsize_of_alloc(cx, discr_ty) as u64;
let align_units = (size + align - 1) / align - 1;
let pad_ty = match align {
let discr_size = machine::llsize_of_alloc(cx, discr_ty);
let align_units = (size + align_s - 1) / align_s - 1;
let pad_ty = match align_s {
1 => Type::array(&Type::i8(cx), align_units),
2 => Type::array(&Type::i16(cx), align_units),
4 => Type::array(&Type::i32(cx), align_units),
@ -541,10 +587,10 @@ fn generic_type_of(cx: &CrateContext,
align_units),
_ => fail!("unsupported enum alignment: {}", align)
};
assert_eq!(machine::llalign_of_min(cx, pad_ty) as u64, align);
assert_eq!(align % discr_size, 0);
assert_eq!(machine::llalign_of_min(cx, pad_ty), align);
assert_eq!(align_s % discr_size, 0);
let fields = vec!(discr_ty,
Type::array(&discr_ty, align / discr_size - 1),
Type::array(&discr_ty, align_s / discr_size - 1),
pad_ty);
match name {
None => Type::struct_(cx, fields.as_slice(), false),
@ -653,9 +699,7 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
LoadRangeAssert(bcx, ptr, min as c_ulonglong,
(max + 1) as c_ulonglong,
/* signed: */ True)
LoadRangeAssert(bcx, ptr, min, (max+1), /* signed: */ True)
}
}
@ -974,11 +1018,11 @@ fn compute_struct_field_offsets(ccx: &CrateContext, st: &Struct) -> Vec<u64> {
for &ty in st.fields.iter() {
let llty = type_of::sizing_type_of(ccx, ty);
if !st.packed {
let type_align = type_of::align_of(ccx, ty) as u64;
let type_align = type_of::align_of(ccx, ty);
offset = roundup(offset, type_align);
}
offsets.push(offset);
offset += machine::llsize_of_alloc(ccx, llty) as u64;
offset += machine::llsize_of_alloc(ccx, llty);
}
assert_eq!(st.fields.len(), offsets.len());
offsets
@ -1005,8 +1049,7 @@ fn build_const_struct(ccx: &CrateContext, st: &Struct, vals: &[ValueRef])
let mut cfields = Vec::new();
for (&val, &target_offset) in vals.iter().zip(target_offsets.iter()) {
if !st.packed {
let val_align = machine::llalign_of_min(ccx, val_ty(val))
/*bad*/as u64;
let val_align = machine::llalign_of_min(ccx, val_ty(val));
offset = roundup(offset, val_align);
}
if offset != target_offset {
@ -1015,7 +1058,7 @@ fn build_const_struct(ccx: &CrateContext, st: &Struct, vals: &[ValueRef])
}
assert!(!is_undef(val));
cfields.push(val);
offset += machine::llsize_of_alloc(ccx, val_ty(val)) as u64;
offset += machine::llsize_of_alloc(ccx, val_ty(val));
}
assert!(st.sized && offset <= st.size);
@ -1032,7 +1075,7 @@ fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
// FIXME this utility routine should be somewhere more general
#[inline]
fn roundup(x: u64, a: u64) -> u64 { ((x + (a - 1)) / a) * a }
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
/// Get the discriminant of a constant value. (Not currently used.)
pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef)

View File

@ -398,7 +398,7 @@ pub fn malloc_raw_dyn_proc<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: ty::t) -> Resu
let llty = type_of(bcx.ccx(), t);
let size = llsize_of(bcx.ccx(), llty);
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty));
// Allocate space and store the destructor pointer:
let Result {bcx: bcx, val: llbox} = malloc_raw_dyn(bcx, ptr_llty, t, size, llalign);

View File

@ -21,7 +21,7 @@ use syntax::codemap::Span;
use middle::trans::builder::Builder;
use middle::trans::type_::Type;
use libc::{c_uint, c_ulonglong, c_char};
use libc::{c_uint, c_char};
pub fn terminate(cx: Block, _: &str) {
debug!("terminate({})", cx.to_str());
@ -380,8 +380,8 @@ pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> Va
}
pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: c_ulonglong,
hi: c_ulonglong, signed: llvm::Bool) -> ValueRef {
pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64,
hi: u64, signed: llvm::Bool) -> ValueRef {
if cx.unreachable.get() {
let ccx = cx.fcx.ccx;
let ty = val_ty(pointer_val);

View File

@ -19,7 +19,7 @@ use middle::trans::common::*;
use middle::trans::machine::llalign_of_pref;
use middle::trans::type_::Type;
use std::collections::HashMap;
use libc::{c_uint, c_ulonglong, c_char};
use libc::{c_uint, c_char};
use std::string::String;
use syntax::codemap::Span;
@ -477,8 +477,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
pub fn load_range_assert(&self, ptr: ValueRef, lo: c_ulonglong,
hi: c_ulonglong, signed: llvm::Bool) -> ValueRef {
pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
hi: u64, signed: llvm::Bool) -> ValueRef {
let value = self.load(ptr);
unsafe {
@ -490,7 +490,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
llvm::LLVMMDNodeInContext(self.ccx.llcx(),
v.as_ptr(), v.len() as c_uint));
v.as_ptr(),
v.len() as c_uint));
}
value

View File

@ -26,6 +26,7 @@ use middle::trans::build;
use middle::trans::cleanup;
use middle::trans::datum;
use middle::trans::debuginfo;
use middle::trans::machine;
use middle::trans::type_::Type;
use middle::trans::type_of;
use middle::traits;
@ -39,7 +40,7 @@ use util::nodemap::{DefIdMap, NodeMap};
use arena::TypedArena;
use std::collections::HashMap;
use libc::{c_uint, c_longlong, c_ulonglong, c_char};
use libc::{c_uint, c_char};
use std::c_str::ToCStr;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
@ -594,14 +595,43 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
C_integral(Type::i64(ccx), i, false)
}
pub fn C_int(ccx: &CrateContext, i: int) -> ValueRef {
C_integral(ccx.int_type(), i as u64, true)
pub fn C_int<I: AsI64>(ccx: &CrateContext, i: I) -> ValueRef {
let v = i.as_i64();
match machine::llbitsize_of_real(ccx, ccx.int_type()) {
32 => assert!(v < (1<<31) && v >= -(1<<31)),
64 => {},
n => fail!("unsupported target size: {}", n)
}
C_integral(ccx.int_type(), v as u64, true)
}
pub fn C_uint(ccx: &CrateContext, i: uint) -> ValueRef {
C_integral(ccx.int_type(), i as u64, false)
pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
let v = i.as_u64();
match machine::llbitsize_of_real(ccx, ccx.int_type()) {
32 => assert!(v < (1<<32)),
64 => {},
n => fail!("unsupported target size: {}", n)
}
C_integral(ccx.int_type(), v, false)
}
pub trait AsI64 { fn as_i64(self) -> i64; }
pub trait AsU64 { fn as_u64(self) -> u64; }
// FIXME: remove the intptr conversions, because they
// are host-architecture-dependent
impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }}
impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }}
impl AsI64 for int { fn as_i64(self) -> i64 { self as i64 }}
impl AsU64 for u64 { fn as_u64(self) -> u64 { self as u64 }}
impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 }}
impl AsU64 for uint { fn as_u64(self) -> u64 { self as u64 }}
pub fn C_u8(ccx: &CrateContext, i: uint) -> ValueRef {
C_integral(Type::i8(ccx), i as u64, false)
}
@ -716,13 +746,13 @@ pub fn is_const(v: ValueRef) -> bool {
}
}
pub fn const_to_int(v: ValueRef) -> c_longlong {
pub fn const_to_int(v: ValueRef) -> i64 {
unsafe {
llvm::LLVMConstIntGetSExtValue(v)
}
}
pub fn const_to_uint(v: ValueRef) -> c_ulonglong {
pub fn const_to_uint(v: ValueRef) -> u64 {
unsafe {
llvm::LLVMConstIntGetZExtValue(v)
}

View File

@ -25,6 +25,7 @@ use middle::trans::debuginfo;
use middle::trans::monomorphize::MonoId;
use middle::trans::type_::{Type, TypeNames};
use middle::ty;
use util::ppaux::Repr;
use util::sha2::Sha256;
use util::nodemap::{NodeMap, NodeSet, DefIdMap};
@ -711,6 +712,16 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
pub fn trait_cache(&self) -> &RefCell<HashMap<Rc<ty::TraitRef>, traits::Vtable<()>>> {
&self.local.trait_cache
}
pub fn max_obj_size(&self) -> u64 {
1<<31 /* FIXME #18069: select based on architecture */
}
pub fn report_overbig_object(&self, obj: ty::t) -> ! {
self.sess().fatal(
format!("the type `{}` is too big for the current architecture",
obj.repr(self.tcx())).as_slice())
}
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {

View File

@ -499,7 +499,7 @@ pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let loc = bcx.sess().codemap().lookup_char_pos(sp.lo);
let filename = token::intern_and_get_ident(loc.file.name.as_slice());
let filename = C_str_slice(ccx, filename);
let line = C_int(ccx, loc.line as int);
let line = C_uint(ccx, loc.line);
let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
let expr_file_line = consts::const_addr_of(ccx, expr_file_line_const, ast::MutImmutable);
let args = vec!(expr_file_line);
@ -526,7 +526,7 @@ pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Invoke the lang item
let filename = C_str_slice(ccx, filename);
let line = C_int(ccx, loc.line as int);
let line = C_uint(ccx, loc.line);
let file_line_const = C_struct(ccx, &[filename, line], false);
let file_line = consts::const_addr_of(ccx, file_line_const, ast::MutImmutable);
let args = vec!(file_line, index, len);

View File

@ -202,7 +202,7 @@ use middle::ty;
use middle::pat_util;
use util::ppaux;
use libc::{c_uint, c_ulonglong, c_longlong};
use libc::c_uint;
use std::c_str::{CString, ToCStr};
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
@ -2384,7 +2384,7 @@ fn prepare_enum_metadata(cx: &CrateContext,
llvm::LLVMDIBuilderCreateEnumerator(
DIB(cx),
name,
v.disr_val as c_ulonglong)
v.disr_val as u64)
}
})
})
@ -2663,9 +2663,9 @@ fn fixed_vec_metadata(cx: &CrateContext,
let subrange = unsafe {
llvm::LLVMDIBuilderGetOrCreateSubrange(
DIB(cx),
0,
len as c_longlong)
DIB(cx),
0,
len as i64)
};
let subscripts = create_DIArray(DIB(cx), [subrange]);
@ -3072,11 +3072,11 @@ fn span_start(cx: &CrateContext, span: Span) -> codemap::Loc {
}
fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u64) {
(machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type))
(machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type) as u64)
}
fn bytes_to_bits(bytes: u64) -> c_ulonglong {
(bytes * 8) as c_ulonglong
fn bytes_to_bits(bytes: u64) -> u64 {
bytes * 8
}
#[inline]

View File

@ -1548,7 +1548,7 @@ fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
assert!(ty::type_is_sized(bcx.tcx(), contents_ty));
let llty = type_of::type_of(bcx.ccx(), contents_ty);
let size = llsize_of(bcx.ccx(), llty);
let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty) as uint);
let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
let llty_ptr = llty.ptr_to();
let Result { bcx, val } = malloc_raw_dyn(bcx, llty_ptr, box_ty, size, align);
// Unique boxes do not allocate for zero-size types. The standard library

View File

@ -476,7 +476,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let llalign = cmp::min(llforeign_align, llrust_align);
debug!("llrust_size={}", llrust_size);
base::call_memcpy(bcx, llretptr_i8, llscratch_i8,
C_uint(ccx, llrust_size as uint), llalign as u32);
C_uint(ccx, llrust_size), llalign as u32);
}
}

View File

@ -56,9 +56,9 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
size: u64, align: u64) -> Block<'blk, 'tcx> {
trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size as uint),
C_uint(cx.ccx(), align as uint))
size: u64, align: u32) -> Block<'blk, 'tcx> {
trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size),
C_uint(cx.ccx(), align))
}
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef,
@ -301,8 +301,8 @@ fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, Val
bcx.ty_to_string(t), bcx.val_to_string(info));
if ty::type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = C_uint(bcx.ccx(), llsize_of_alloc(bcx.ccx(), sizing_type) as uint);
let align = C_uint(bcx.ccx(), align_of(bcx.ccx(), t) as uint);
let size = C_uint(bcx.ccx(), llsize_of_alloc(bcx.ccx(), sizing_type));
let align = C_uint(bcx.ccx(), align_of(bcx.ccx(), t));
return (size, align);
}
match ty::get(t).sty {
@ -313,8 +313,8 @@ fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, Val
assert!(!ty::type_is_simd(bcx.tcx(), t));
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_of(ccx, &*repr, true);
let sized_size = C_uint(ccx, llsize_of_alloc(ccx, sizing_type) as uint);
let sized_align = C_uint(ccx, llalign_of_min(ccx, sizing_type) as uint);
let sized_size = C_uint(ccx, llsize_of_alloc(ccx, sizing_type));
let sized_align = C_uint(ccx, llalign_of_min(ccx, sizing_type));
// Recurse to get the size of the dynamically sized field (must be
// the last field).
@ -344,7 +344,7 @@ fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, Val
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size as uint)), C_uint(bcx.ccx(), 8))
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size)), C_uint(bcx.ccx(), 8u))
}
_ => bcx.sess().bug(format!("Unexpected unsized type, found {}",
bcx.ty_to_string(t)).as_slice())

View File

@ -239,16 +239,16 @@ pub fn trans_intrinsic_call<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, node: ast::N
(_, "size_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty) as uint)
C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty))
}
(_, "min_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
C_uint(ccx, type_of::align_of(ccx, tp_ty) as uint)
C_uint(ccx, type_of::align_of(ccx, tp_ty))
}
(_, "pref_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty) as uint)
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
}
(_, "move_val_init") => {
// Create a datum reflecting the value being moved.

View File

@ -10,6 +10,8 @@
// Information concerning the machine representation of various types.
#![allow(non_camel_case_types)]
use llvm;
use llvm::{ValueRef};
use llvm::False;
@ -17,21 +19,25 @@ use middle::trans::common::*;
use middle::trans::type_::Type;
pub type llbits = u64;
pub type llsize = u64;
pub type llalign = u32;
// ______________________________________________________________________
// compute sizeof / alignof
// Returns the number of bytes clobbered by a Store to this type.
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> u64 {
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref());
}
}
// Returns the number of bytes between successive elements of type T in an
// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref());
}
}
@ -43,9 +49,9 @@ pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
// that LLVM *does* distinguish between e.g. a 1-bit value and an 8-bit value
// at the codegen level! In general you should prefer `llbitsize_of_real`
// below.
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize {
unsafe {
let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64;
let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref());
if nbits & 7 != 0 {
// Not an even number of bytes, spills into "next" byte.
1 + (nbits >> 3)
@ -56,9 +62,9 @@ pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
}
/// Returns the "real" size of the type in bits.
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits {
unsafe {
llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64
llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref())
}
}
@ -71,7 +77,7 @@ pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
// there's no need for that contrivance. The instruction
// selection DAG generator would flatten that GEP(1) node into a
// constant of the type's alloc size, so let's save it some work.
return C_uint(cx, llsize_of_alloc(cx, ty) as uint);
return C_uint(cx, llsize_of_alloc(cx, ty));
}
// Returns the "default" size of t (see above), or 1 if the size would
@ -89,18 +95,18 @@ pub fn nonzero_llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
// The preferred alignment may be larger than the alignment used when
// packing the type into structs. This will be used for things like
// allocations inside a stack frame, which LLVM has a free hand in.
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> u64 {
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign {
unsafe {
return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref());
}
}
// Returns the minimum alignment of a type required by the platform.
// This is the alignment that will be used for struct fields, arrays,
// and similar ABI-mandated things.
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> u64 {
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign {
unsafe {
return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref());
}
}
@ -116,6 +122,7 @@ pub fn llalign_of(cx: &CrateContext, ty: Type) -> ValueRef {
pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: uint) -> u64 {
unsafe {
return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(), element as u32) as u64;
return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(),
element as u32);
}
}

View File

@ -648,9 +648,9 @@ pub fn get_vtable(bcx: Block,
let size_ty = sizing_type_of(ccx, trait_ref.self_ty());
let size = machine::llsize_of_alloc(ccx, size_ty);
let ll_size = C_uint(ccx, size as uint);
let ll_size = C_uint(ccx, size);
let align = align_of(ccx, trait_ref.self_ty());
let ll_align = C_uint(ccx, align as uint);
let ll_align = C_uint(ccx, align);
// Generate a destructor for the vtable.
let drop_glue = glue::get_drop_glue(ccx, box_ty);

View File

@ -74,10 +74,10 @@ pub fn make_drop_glue_unboxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let unit_size = llsize_of_alloc(ccx, llty);
if unit_size != 0 {
let len = get_len(bcx, vptr);
let not_empty = ICmp(bcx, llvm::IntNE, len, C_uint(ccx, 0));
let not_empty = ICmp(bcx, llvm::IntNE, len, C_uint(ccx, 0u));
with_cond(bcx, not_empty, |bcx| {
let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let size = Mul(bcx, C_uint(ccx, unit_size as uint), len);
let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty));
let size = Mul(bcx, C_uint(ccx, unit_size), len);
glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
})
} else {
@ -461,7 +461,7 @@ pub fn iter_vec_loop<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let loop_counter = {
// i = 0
let i = alloca(loop_bcx, bcx.ccx().int_type(), "__i");
Store(loop_bcx, C_uint(bcx.ccx(), 0), i);
Store(loop_bcx, C_uint(bcx.ccx(), 0u), i);
Br(loop_bcx, cond_bcx.llbb);
i
@ -489,7 +489,7 @@ pub fn iter_vec_loop<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
{ // i += 1
let i = Load(inc_bcx, loop_counter);
let plusone = Add(inc_bcx, i, C_uint(bcx.ccx(), 1));
let plusone = Add(inc_bcx, i, C_uint(bcx.ccx(), 1u));
Store(inc_bcx, plusone, loop_counter);
Br(inc_bcx, cond_bcx.llbb);
@ -532,7 +532,7 @@ pub fn iter_vec_raw<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb);
let body_bcx = f(body_bcx, data_ptr, vt.unit_ty);
AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,
[C_int(bcx.ccx(), 1)]),
[C_int(bcx.ccx(), 1i)]),
body_bcx.llbb);
Br(body_bcx, header_bcx.llbb);
next_bcx

View File

@ -24,6 +24,20 @@ use middle::trans::type_::Type;
use syntax::abi;
use syntax::ast;
use std::num::CheckedMul;
// LLVM doesn't like objects that are too big. Issue #17913
fn ensure_array_fits_in_address_space(ccx: &CrateContext,
llet: Type,
size: machine::llsize,
scapegoat: ty::t) {
let esz = machine::llsize_of_alloc(ccx, llet);
match esz.checked_mul(&size) {
Some(n) if n < ccx.max_obj_size() => {}
_ => { ccx.report_overbig_object(scapegoat) }
}
}
pub fn arg_is_indirect(ccx: &CrateContext, arg_ty: ty::t) -> bool {
!type_is_immediate(ccx, arg_ty)
}
@ -186,7 +200,10 @@ pub fn sizing_type_of(cx: &CrateContext, t: ty::t) -> Type {
ty::ty_closure(..) => Type::struct_(cx, [Type::i8p(cx), Type::i8p(cx)], false),
ty::ty_vec(ty, Some(size)) => {
Type::array(&sizing_type_of(cx, ty), size as u64)
let llty = sizing_type_of(cx, ty);
let size = size as u64;
ensure_array_fits_in_address_space(cx, llty, size, t);
Type::array(&llty, size)
}
ty::ty_tup(..) | ty::ty_enum(..) | ty::ty_unboxed_closure(..) => {
@ -196,9 +213,10 @@ pub fn sizing_type_of(cx: &CrateContext, t: ty::t) -> Type {
ty::ty_struct(..) => {
if ty::type_is_simd(cx.tcx(), t) {
let et = ty::simd_type(cx.tcx(), t);
let n = ty::simd_size(cx.tcx(), t);
Type::vector(&type_of(cx, et), n as u64)
let llet = type_of(cx, ty::simd_type(cx.tcx(), t));
let n = ty::simd_size(cx.tcx(), t) as u64;
ensure_array_fits_in_address_space(cx, llet, n, t);
Type::vector(&llet, n)
} else {
let repr = adt::represent_type(cx, t);
adt::sizing_type_of(cx, &*repr, false)
@ -282,21 +300,21 @@ pub fn type_of(cx: &CrateContext, t: ty::t) -> Type {
ty::ty_uint(t) => Type::uint_from_ty(cx, t),
ty::ty_float(t) => Type::float_from_ty(cx, t),
ty::ty_enum(did, ref substs) => {
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache. This
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
let repr = adt::represent_type(cx, t);
let tps = substs.types.get_slice(subst::TypeSpace);
let name = llvm_type_name(cx, an_enum, did, tps);
adt::incomplete_type_of(cx, &*repr, name.as_slice())
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache. This
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
let repr = adt::represent_type(cx, t);
let tps = substs.types.get_slice(subst::TypeSpace);
let name = llvm_type_name(cx, an_enum, did, tps);
adt::incomplete_type_of(cx, &*repr, name.as_slice())
}
ty::ty_unboxed_closure(did, _) => {
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache.
let repr = adt::represent_type(cx, t);
let name = llvm_type_name(cx, an_unboxed_closure, did, []);
adt::incomplete_type_of(cx, &*repr, name.as_slice())
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache.
let repr = adt::represent_type(cx, t);
let name = llvm_type_name(cx, an_unboxed_closure, did, []);
adt::incomplete_type_of(cx, &*repr, name.as_slice())
}
ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) | ty::ty_ptr(ty::mt{ty, ..}) => {
@ -315,8 +333,11 @@ pub fn type_of(cx: &CrateContext, t: ty::t) -> Type {
}
}
ty::ty_vec(ty, Some(n)) => {
Type::array(&type_of(cx, ty), n as u64)
ty::ty_vec(ty, Some(size)) => {
let size = size as u64;
let llty = type_of(cx, ty);
ensure_array_fits_in_address_space(cx, llty, size, t);
Type::array(&llty, size)
}
ty::ty_vec(ty, None) => {
type_of(cx, ty)
@ -341,9 +362,10 @@ pub fn type_of(cx: &CrateContext, t: ty::t) -> Type {
}
ty::ty_struct(did, ref substs) => {
if ty::type_is_simd(cx.tcx(), t) {
let et = ty::simd_type(cx.tcx(), t);
let n = ty::simd_size(cx.tcx(), t);
Type::vector(&type_of(cx, et), n as u64)
let llet = type_of(cx, ty::simd_type(cx.tcx(), t));
let n = ty::simd_size(cx.tcx(), t) as u64;
ensure_array_fits_in_address_space(cx, llet, n, t);
Type::vector(&llet, n)
} else {
// Only create the named struct, but don't fill it in. We fill it
// in *after* placing it into the type cache. This prevents
@ -398,7 +420,7 @@ pub fn type_of(cx: &CrateContext, t: ty::t) -> Type {
return llty;
}
pub fn align_of(cx: &CrateContext, t: ty::t) -> u64 {
pub fn align_of(cx: &CrateContext, t: ty::t) -> machine::llalign {
let llty = sizing_type_of(cx, t);
machine::llalign_of_min(cx, llty)
}

View File

@ -1458,7 +1458,7 @@ extern {
/** Distance between successive elements in an array of T.
Includes ABI padding. */
pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint;
pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong;
/** Returns the preferred alignment of a type. */
pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef)

View File

@ -0,0 +1,15 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current
fn main() {
let fat : [u8, ..(1<<61)+(1<<31)] = [0, ..(1<<61)+(1<<31)];
}

View File

@ -0,0 +1,20 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: ..1518599999
fn generic<T: Copy>(t: T) {
let s: [T, ..1518600000] = [t, ..1518600000];
}
fn main() {
let x: [u8, ..1518599999] = [0, ..1518599999];
generic::<[u8, ..1518599999]>(x);
}

View File

@ -0,0 +1,17 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: Option
// FIXME: work properly with higher limits
fn main() {
let big: Option<[u32, ..(1<<29)-1]> = None;
}

View File

@ -0,0 +1,54 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current
struct S32<T> {
v0: T,
v1: T,
v2: T,
v3: T,
v4: T,
v5: T,
v6: T,
v7: T,
v8: T,
u9: T,
v10: T,
v11: T,
v12: T,
v13: T,
v14: T,
v15: T,
v16: T,
v17: T,
v18: T,
v19: T,
v20: T,
v21: T,
v22: T,
v23: T,
v24: T,
u25: T,
v26: T,
v27: T,
v28: T,
v29: T,
v30: T,
v31: T,
}
struct S1k<T> { val: S32<S32<T>> }
struct S1M<T> { val: S1k<S1k<T>> }
fn main() {
let fat: Option<S1M<S1M<S1M<u32>>>> = None;
}

View File

@ -0,0 +1,25 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current architecture
#[cfg(target_word_size = "64")]
fn main() {
let n = 0u;
let a = box [&n,..0xF000000000000000u];
println!("{}", a[0xFFFFFFu]);
}
#[cfg(target_word_size = "32")]
fn main() {
let n = 0u;
let a = box [&n,..0xFFFFFFFFu];
println!("{}", a[0xFFFFFFu]);
}

View File

@ -20,7 +20,8 @@ pub fn main() {
assert_eq!(size_of::<[u8, ..4]>(), 4u);
// FIXME #10183
if cfg!(target_word_size = "64") {
assert_eq!(size_of::<[u8, ..(1 << 32)]>(), (1u << 32));
}
// FIXME #18069
//if cfg!(target_word_size = "64") {
// assert_eq!(size_of::<[u8, ..(1 << 32)]>(), (1u << 32));
//}
}