Auto merge of #29781 - arielb1:mir-casts, r=nikomatsakis

I am also planning to implement casts, but I will probably do it in a separate PR.

r? @nikomatsakis
This commit is contained in:
bors 2015-11-14 00:10:28 +00:00
commit bdfb13518e
16 changed files with 998 additions and 289 deletions

View File

@ -386,14 +386,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
};
}
if let Some(target) = adj.unsize {
expr = Expr {
temp_lifetime: temp_lifetime,
ty: target,
span: self.span,
kind: ExprKind::Unsize { source: expr.to_ref() },
};
} else if let Some(autoref) = adj.autoref {
if let Some(autoref) = adj.autoref {
let adjusted_ty = expr.ty.adjust_for_autoref(cx.tcx, Some(autoref));
match autoref {
ty::adjustment::AutoPtr(r, m) => {
@ -433,6 +426,15 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
}
}
}
if let Some(target) = adj.unsize {
expr = Expr {
temp_lifetime: temp_lifetime,
ty: target,
span: self.span,
kind: ExprKind::Unsize { source: expr.to_ref() },
};
}
}
}

View File

@ -16,6 +16,7 @@
use repr::*;
use rustc::middle::subst::Substs;
use rustc::middle::ty::{self, AdtDef, Ty};
use rustc_front::hir;
#[derive(Copy, Clone, Debug)]
pub enum LvalueTy<'tcx> {
@ -102,6 +103,31 @@ impl<'tcx> Mir<'tcx> {
}
}
pub fn binop_ty(&self,
tcx: &ty::ctxt<'tcx>,
op: BinOp,
lhs_ty: Ty<'tcx>,
rhs_ty: Ty<'tcx>)
-> Ty<'tcx>
{
// FIXME: handle SIMD correctly
match op {
BinOp::Add | BinOp::Sub | BinOp::Mul | BinOp::Div | BinOp::Rem |
BinOp::BitXor | BinOp::BitAnd | BinOp::BitOr => {
// these should be integers or floats of the same size.
assert_eq!(lhs_ty, rhs_ty);
lhs_ty
}
BinOp::Shl | BinOp::Shr => {
lhs_ty // lhs_ty can be != rhs_ty
}
BinOp::Eq | BinOp::Lt | BinOp::Le |
BinOp::Ne | BinOp::Ge | BinOp::Gt => {
tcx.types.bool
}
}
}
pub fn lvalue_ty(&self,
tcx: &ty::ctxt<'tcx>,
lvalue: &Lvalue<'tcx>)
@ -123,3 +149,40 @@ impl<'tcx> Mir<'tcx> {
}
}
}
impl BorrowKind {
pub fn to_mutbl_lossy(self) -> hir::Mutability {
match self {
BorrowKind::Mut => hir::MutMutable,
BorrowKind::Shared => hir::MutImmutable,
// We have no type corresponding to a unique imm borrow, so
// use `&mut`. It gives all the capabilities of an `&uniq`
// and hence is a safe "over approximation".
BorrowKind::Unique => hir::MutMutable,
}
}
}
impl BinOp {
pub fn to_hir_binop(self) -> hir::BinOp_ {
match self {
BinOp::Add => hir::BinOp_::BiAdd,
BinOp::Sub => hir::BinOp_::BiSub,
BinOp::Mul => hir::BinOp_::BiMul,
BinOp::Div => hir::BinOp_::BiDiv,
BinOp::Rem => hir::BinOp_::BiRem,
BinOp::BitXor => hir::BinOp_::BiBitXor,
BinOp::BitAnd => hir::BinOp_::BiBitAnd,
BinOp::BitOr => hir::BinOp_::BiBitOr,
BinOp::Shl => hir::BinOp_::BiShl,
BinOp::Shr => hir::BinOp_::BiShr,
BinOp::Eq => hir::BinOp_::BiEq,
BinOp::Ne => hir::BinOp_::BiNe,
BinOp::Lt => hir::BinOp_::BiLt,
BinOp::Gt => hir::BinOp_::BiGt,
BinOp::Le => hir::BinOp_::BiLe,
BinOp::Ge => hir::BinOp_::BiGe
}
}
}

View File

@ -55,7 +55,7 @@ use trans::builder::{Builder, noname};
use trans::callee;
use trans::cleanup::{self, CleanupMethods, DropHint};
use trans::closure;
use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral};
use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
use trans::common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
use trans::common::{Result, NodeIdAndSpan, VariantInfo};
@ -312,6 +312,49 @@ pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: hir::BinOp_)
}
}
pub fn compare_fat_ptrs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs_addr: ValueRef,
lhs_extra: ValueRef,
rhs_addr: ValueRef,
rhs_extra: ValueRef,
_t: Ty<'tcx>,
op: hir::BinOp_,
debug_loc: DebugLoc)
-> ValueRef {
match op {
hir::BiEq => {
let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc);
And(bcx, addr_eq, extra_eq, debug_loc)
}
hir::BiNe => {
let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc);
let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc);
Or(bcx, addr_eq, extra_eq, debug_loc)
}
hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => {
// a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
let (op, strict_op) = match op {
hir::BiLt => (llvm::IntULT, llvm::IntULT),
hir::BiLe => (llvm::IntULE, llvm::IntULT),
hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
_ => unreachable!()
};
let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc);
let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc);
let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc);
Or(bcx, addr_strict, addr_eq_extra_op, debug_loc)
}
_ => {
bcx.tcx().sess.bug("unexpected fat ptr binop");
}
}
}
pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
@ -336,6 +379,17 @@ pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
}
ty::TyRawPtr(_) => {
let lhs_addr = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_ADDR]));
let lhs_extra = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_EXTRA]));
let rhs_addr = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_ADDR]));
let rhs_extra = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_EXTRA]));
compare_fat_ptrs(bcx,
lhs_addr, lhs_extra,
rhs_addr, rhs_extra,
t, op, debug_loc)
}
ty::TyInt(_) => {
ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, true), lhs, rhs, debug_loc)
}
@ -523,6 +577,129 @@ pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
return cx;
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be drived
/// from the old one.
pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<ValueRef>,
param_substs: &'tcx Substs<'tcx>)
-> ValueRef {
let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
(&ty::TyTrait(_), &ty::TyTrait(_)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => {
// Note that we preserve binding levels here:
let substs = principal.0.substs.with_self_ty(source).erase_regions();
let substs = ccx.tcx().mk_substs(substs);
let trait_ref = ty::Binder(ty::TraitRef { def_id: principal.def_id(),
substs: substs });
consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
Type::vtable_ptr(ccx))
}
_ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
target))
}
}
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
src: ValueRef,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>)
-> (ValueRef, ValueRef) {
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
match (&src_ty.sty, &dst_ty.sty) {
(&ty::TyBox(a), &ty::TyBox(b)) |
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
&ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
(&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(common::type_is_sized(bcx.tcx(), a));
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to();
(PointerCast(bcx, src, ptr_ty),
unsized_info(bcx.ccx(), a, b, None, bcx.fcx.param_substs))
}
_ => bcx.sess().bug(
&format!("unsize_thin_ptr: called on bad types"))
}
}
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
src: ValueRef,
src_ty: Ty<'tcx>,
dst: ValueRef,
dst_ty: Ty<'tcx>) {
match (&src_ty.sty, &dst_ty.sty) {
(&ty::TyBox(..), &ty::TyBox(..)) |
(&ty::TyRef(..), &ty::TyRef(..)) |
(&ty::TyRef(..), &ty::TyRawPtr(..)) |
(&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) {
// fat-ptr to fat-ptr unsize preserves the vtable
load_fat_ptr(bcx, src, src_ty)
} else {
let base = load_ty(bcx, src, src_ty);
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
};
store_fat_ptr(bcx, base, info, dst, dst_ty);
}
// This can be extended to enums and tuples in the future.
// (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
(&ty::TyStruct(def_a, _), &ty::TyStruct(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_repr = adt::represent_type(bcx.ccx(), src_ty);
let src_fields = match &*src_repr {
&adt::Repr::Univariant(ref s, _) => &s.fields,
_ => bcx.sess().bug("struct has non-univariant repr")
};
let dst_repr = adt::represent_type(bcx.ccx(), dst_ty);
let dst_fields = match &*dst_repr {
&adt::Repr::Univariant(ref s, _) => &s.fields,
_ => bcx.sess().bug("struct has non-univariant repr")
};
let iter = src_fields.iter().zip(dst_fields).enumerate();
for (i, (src_fty, dst_fty)) in iter {
if type_is_zero_size(bcx.ccx(), dst_fty) { continue; }
let src_f = adt::trans_field_ptr(bcx, &src_repr, src, 0, i);
let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, 0, i);
if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty);
} else {
coerce_unsized_into(
bcx,
src_f, src_fty,
dst_f, dst_fty
);
}
}
}
_ => bcx.sess().bug(&format!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
src_ty,
dst_ty))
}
}
pub fn cast_shift_expr_rhs(cx: Block,
op: hir::BinOp_,
lhs: ValueRef,
@ -828,6 +1005,10 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t
return;
}
debug!("store_ty: {} : {:?} <- {}",
cx.val_to_string(dst), t,
cx.val_to_string(v));
if common::type_is_fat_ptr(cx.tcx(), t) {
Store(cx, ExtractValue(cx, v, abi::FAT_PTR_ADDR), expr::get_dataptr(cx, dst));
Store(cx, ExtractValue(cx, v, abi::FAT_PTR_EXTRA), expr::get_meta(cx, dst));
@ -839,6 +1020,25 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t
}
}
pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
data: ValueRef,
extra: ValueRef,
dst: ValueRef,
_ty: Ty<'tcx>) {
// FIXME: emit metadata
Store(cx, data, expr::get_dataptr(cx, dst));
Store(cx, extra, expr::get_meta(cx, dst));
}
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
src: ValueRef,
_ty: Ty<'tcx>) -> (ValueRef, ValueRef)
{
// FIXME: emit metadata
(Load(cx, expr::get_dataptr(cx, src)),
Load(cx, expr::get_meta(cx, src)))
}
pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
if ty.is_bool() {
ZExt(bcx, val, Type::i8(bcx.ccx()))

View File

@ -1223,4 +1223,4 @@ pub fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
} else {
base::get_extern_const(ccx, did, ty)
}
}
}

View File

@ -410,7 +410,7 @@ pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
.expect("consts: unsizing got non-pointer target type").ty;
let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to();
let base = ptrcast(base, ptr_ty);
let info = expr::unsized_info(cx, pointee_ty, unsized_ty,
let info = base::unsized_info(cx, pointee_ty, unsized_ty,
old_info, param_substs);
if old_info.is_none() {

View File

@ -326,42 +326,6 @@ pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be drived
/// from the old one.
pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<ValueRef>,
param_substs: &'tcx Substs<'tcx>)
-> ValueRef {
let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
(&ty::TyTrait(_), &ty::TyTrait(_)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => {
// Note that we preserve binding levels here:
let substs = principal.0.substs.with_self_ty(source).erase_regions();
let substs = ccx.tcx().mk_substs(substs);
let trait_ref = ty::Binder(ty::TraitRef { def_id: principal.def_id(),
substs: substs });
consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
Type::vtable_ptr(ccx))
}
_ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
target))
}
}
fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &hir::Expr) -> bool {
let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
@ -1725,58 +1689,6 @@ fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}
}
fn trans_fat_ptr_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
binop_expr: &hir::Expr,
binop_ty: Ty<'tcx>,
op: hir::BinOp,
lhs: Datum<'tcx, Rvalue>,
rhs: Datum<'tcx, Rvalue>)
-> DatumBlock<'blk, 'tcx, Expr>
{
let debug_loc = binop_expr.debug_loc();
let lhs_addr = Load(bcx, GEPi(bcx, lhs.val, &[0, abi::FAT_PTR_ADDR]));
let lhs_extra = Load(bcx, GEPi(bcx, lhs.val, &[0, abi::FAT_PTR_EXTRA]));
let rhs_addr = Load(bcx, GEPi(bcx, rhs.val, &[0, abi::FAT_PTR_ADDR]));
let rhs_extra = Load(bcx, GEPi(bcx, rhs.val, &[0, abi::FAT_PTR_EXTRA]));
let val = match op.node {
hir::BiEq => {
let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc);
And(bcx, addr_eq, extra_eq, debug_loc)
}
hir::BiNe => {
let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc);
let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc);
Or(bcx, addr_eq, extra_eq, debug_loc)
}
hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => {
// a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
let (op, strict_op) = match op.node {
hir::BiLt => (llvm::IntULT, llvm::IntULT),
hir::BiLe => (llvm::IntULE, llvm::IntULT),
hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
_ => unreachable!()
};
let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc);
let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc);
let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc);
Or(bcx, addr_strict, addr_eq_extra_op, debug_loc)
}
_ => {
bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
}
};
immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
}
fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
binop_expr: &hir::Expr,
binop_ty: Ty<'tcx>,
@ -2005,7 +1917,15 @@ fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
"built-in binary operators on fat pointers are homogeneous");
trans_fat_ptr_binop(bcx, expr, binop_ty, op, lhs, rhs)
assert_eq!(binop_ty, bcx.tcx().types.bool);
let val = base::compare_scalar_types(
bcx,
lhs.val,
rhs.val,
lhs.ty,
op.node,
expr.debug_loc());
immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
} else {
assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
"built-in binary operators on fat pointers are homogeneous");

View File

@ -27,15 +27,15 @@ pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
for (index, temp_decl) in mir.temp_decls.iter().enumerate() {
let ty = bcx.monomorphize(&temp_decl.ty);
debug!("temp {:?} has type {:?}", index, ty);
if
ty.is_scalar() ||
if ty.is_scalar() ||
ty.is_unique() ||
(ty.is_region_ptr() && !common::type_is_fat_ptr(bcx.tcx(), ty)) ||
ty.is_region_ptr() ||
ty.is_simd()
{
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.
assert!(common::type_is_immediate(bcx.ccx(), ty));
assert!(common::type_is_immediate(bcx.ccx(), ty) ||
common::type_is_fat_ptr(bcx.tcx(), ty));
} else {
// These sorts of types require an alloca. Note that
// type_is_immediate() may *still* be true, particularly

View File

@ -43,7 +43,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let cond = self.trans_operand(bcx, cond);
let lltrue = self.llblock(true_bb);
let llfalse = self.llblock(false_bb);
build::CondBr(bcx, cond.llval, lltrue, llfalse, DebugLoc::None);
build::CondBr(bcx, cond.immediate(), lltrue, llfalse, DebugLoc::None);
}
mir::Terminator::Switch { .. } => {
@ -55,7 +55,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let discr = build::Load(bcx, self.trans_lvalue(bcx, discr).llval);
let switch = build::Switch(bcx, discr, self.llblock(*otherwise), values.len());
for (value, target) in values.iter().zip(targets) {
let llval = self.trans_constval(bcx, value, switch_ty);
let llval = self.trans_constval(bcx, value, switch_ty).immediate();
let llbb = self.llblock(*target);
build::AddCase(switch, llval, llbb)
}

View File

@ -8,14 +8,15 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::ValueRef;
use middle::ty::Ty;
use rustc::middle::const_eval::ConstVal;
use rustc_mir::repr as mir;
use trans::consts::{self, TrueConst};
use trans::common::{self, Block};
use trans::common::{C_bool, C_bytes, C_floating_f64, C_integral, C_str_slice};
use trans::type_of;
use super::operand::OperandRef;
use super::MirContext;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
@ -23,20 +24,25 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
bcx: Block<'bcx, 'tcx>,
cv: &ConstVal,
ty: Ty<'tcx>)
-> ValueRef
-> OperandRef<'tcx>
{
use super::operand::OperandValue::{Ref, Immediate};
let ccx = bcx.ccx();
let llty = type_of::type_of(ccx, ty);
match *cv {
ConstVal::Float(v) => common::C_floating_f64(v, llty),
ConstVal::Bool(v) => common::C_bool(ccx, v),
ConstVal::Int(v) => common::C_integral(llty, v as u64, true),
ConstVal::Uint(v) => common::C_integral(llty, v, false),
ConstVal::Str(ref v) => common::C_str_slice(ccx, v.clone()),
ConstVal::ByteStr(ref v) => consts::addr_of(ccx,
common::C_bytes(ccx, v),
1,
"byte_str"),
let val = match *cv {
ConstVal::Float(v) => Immediate(C_floating_f64(v, llty)),
ConstVal::Bool(v) => Immediate(C_bool(ccx, v)),
ConstVal::Int(v) => Immediate(C_integral(llty, v as u64, true)),
ConstVal::Uint(v) => Immediate(C_integral(llty, v, false)),
ConstVal::Str(ref v) => Immediate(C_str_slice(ccx, v.clone())),
ConstVal::ByteStr(ref v) => {
Immediate(consts::addr_of(ccx,
C_bytes(ccx, v),
1,
"byte_str"))
}
ConstVal::Struct(id) | ConstVal::Tuple(id) => {
let expr = bcx.tcx().map.expect_expr(id);
let (llval, _) = match consts::const_expr(ccx,
@ -47,18 +53,26 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
Ok(v) => v,
Err(_) => panic!("constant eval failure"),
};
llval
if common::type_is_immediate(bcx.ccx(), ty) {
Immediate(llval)
} else {
Ref(llval)
}
}
ConstVal::Function(_) => {
unimplemented!()
}
};
OperandRef {
ty: ty,
val: val
}
}
pub fn trans_constant(&mut self,
bcx: Block<'bcx, 'tcx>,
constant: &mir::Constant<'tcx>)
-> ValueRef
-> OperandRef<'tcx>
{
let constant_ty = bcx.monomorphize(&constant.ty);
match constant.literal {

View File

@ -9,7 +9,7 @@
// except according to those terms.
use llvm::ValueRef;
use rustc::middle::ty::Ty;
use rustc::middle::ty::{self, Ty};
use rustc_mir::repr as mir;
use rustc_mir::tcx::LvalueTy;
use trans::adt;
@ -18,7 +18,8 @@ use trans::build;
use trans::common::{self, Block};
use trans::debuginfo::DebugLoc;
use trans::machine;
use trans::tvec;
use std::ptr;
use super::{MirContext, TempRef};
@ -27,13 +28,16 @@ pub struct LvalueRef<'tcx> {
/// Pointer to the contents of the lvalue
pub llval: ValueRef,
/// This lvalue's extra data if it is unsized, or null
pub llextra: ValueRef,
/// Monomorphized type of this lvalue, including variant information
pub ty: LvalueTy<'tcx>,
}
impl<'tcx> LvalueRef<'tcx> {
pub fn new(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
LvalueRef { llval: llval, ty: lvalue_ty }
pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
}
pub fn alloca<'bcx>(bcx: Block<'bcx, 'tcx>,
@ -42,11 +46,25 @@ impl<'tcx> LvalueRef<'tcx> {
-> LvalueRef<'tcx>
{
let lltemp = base::alloc_ty(bcx, ty, name);
LvalueRef::new(lltemp, LvalueTy::from_ty(ty))
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
}
}
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn lvalue_len(&mut self,
bcx: Block<'bcx, 'tcx>,
lvalue: LvalueRef<'tcx>)
-> ValueRef {
match lvalue.ty.to_ty(bcx.tcx()).sty {
ty::TyArray(_, n) => common::C_uint(bcx.ccx(), n),
ty::TySlice(_) | ty::TyStr => {
assert!(lvalue.llextra != ptr::null_mut());
lvalue.llextra
}
_ => bcx.sess().bug("unexpected type in get_base_and_len"),
}
}
pub fn trans_lvalue(&mut self,
bcx: Block<'bcx, 'tcx>,
lvalue: &mir::Lvalue<'tcx>)
@ -67,20 +85,27 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Lvalue::Arg(index) => self.args[index as usize],
mir::Lvalue::Static(def_id) => {
let const_ty = self.mir.lvalue_ty(tcx, lvalue);
LvalueRef::new(common::get_static_val(ccx, def_id, const_ty.to_ty(tcx)), const_ty)
LvalueRef::new_sized(
common::get_static_val(ccx, def_id, const_ty.to_ty(tcx)),
const_ty)
},
mir::Lvalue::ReturnPointer => {
let return_ty = bcx.monomorphize(&self.mir.return_ty);
let llval = fcx.get_ret_slot(bcx, return_ty, "return");
LvalueRef::new(llval, LvalueTy::from_ty(return_ty.unwrap()))
LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty.unwrap()))
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
let llprojected = match projection.elem {
let (llprojected, llextra) = match projection.elem {
mir::ProjectionElem::Deref => {
let base_ty = tr_base.ty.to_ty(tcx);
base::load_ty(bcx, tr_base.llval, base_ty)
if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
(base::load_ty(bcx, tr_base.llval, base_ty),
ptr::null_mut())
} else {
base::load_fat_ptr(bcx, tr_base.llval, base_ty)
}
}
mir::ProjectionElem::Field(ref field) => {
let base_ty = tr_base.ty.to_ty(tcx);
@ -90,44 +115,44 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
};
let discr = discr as u64;
adt::trans_field_ptr(bcx, &base_repr, tr_base.llval, discr, field.index())
(adt::trans_field_ptr(bcx, &base_repr, tr_base.llval, discr, field.index()),
if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
ptr::null_mut()
} else {
tr_base.llextra
})
}
mir::ProjectionElem::Index(ref index) => {
let base_ty = tr_base.ty.to_ty(tcx);
let index = self.trans_operand(bcx, index);
let llindex = self.prepare_index(bcx, index.llval);
let (llbase, _) = tvec::get_base_and_len(bcx, tr_base.llval, base_ty);
build::InBoundsGEP(bcx, llbase, &[llindex])
let llindex = self.prepare_index(bcx, index.immediate());
(build::InBoundsGEP(bcx, tr_base.llval, &[llindex]),
ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
let base_ty = tr_base.ty.to_ty(tcx);
let lloffset = common::C_u32(bcx.ccx(), offset);
let llindex = self.prepare_index(bcx, lloffset);
let (llbase, _) = tvec::get_base_and_len(bcx,
tr_base.llval,
base_ty);
build::InBoundsGEP(bcx, llbase, &[llindex])
(build::InBoundsGEP(bcx, tr_base.llval, &[llindex]),
ptr::null_mut())
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
min_length: _ } => {
let lloffset = common::C_u32(bcx.ccx(), offset);
let base_ty = tr_base.ty.to_ty(tcx);
let (llbase, lllen) = tvec::get_base_and_len(bcx,
tr_base.llval,
base_ty);
let lllen = self.lvalue_len(bcx, tr_base);
let llindex = build::Sub(bcx, lllen, lloffset, DebugLoc::None);
let llindex = self.prepare_index(bcx, llindex);
build::InBoundsGEP(bcx, llbase, &[llindex])
(build::InBoundsGEP(bcx, tr_base.llval, &[llindex]),
ptr::null_mut())
}
mir::ProjectionElem::Downcast(..) => {
tr_base.llval
(tr_base.llval, tr_base.llextra)
}
};
LvalueRef {
llval: llprojected,
llextra: llextra,
ty: projected_ty,
}
}

View File

@ -180,7 +180,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>,
base::store_ty(bcx, llarg, lltemp, arg_ty);
lltemp
};
LvalueRef::new(llval, LvalueTy::from_ty(arg_ty))
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))
})
.collect()
}
@ -192,4 +192,3 @@ mod lvalue;
mod rvalue;
mod operand;
mod statement;

View File

@ -12,22 +12,72 @@ use llvm::ValueRef;
use rustc::middle::ty::Ty;
use rustc_mir::repr as mir;
use trans::base;
use trans::build;
use trans::common::Block;
use trans::common::{self, Block};
use trans::datum;
use super::{MirContext, TempRef};
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
/// safety check.
#[derive(Copy, Clone)]
pub enum OperandValue {
/// A reference to the actual operand. The data is guaranteed
/// to be valid for the operand's lifetime.
Ref(ValueRef),
/// A single LLVM value.
Immediate(ValueRef),
/// A fat pointer. The first ValueRef is the data and the second
/// is the extra.
FatPtr(ValueRef, ValueRef)
}
/// An `OperandRef` is an "SSA" reference to a Rust value, along with
/// its type.
///
/// NOTE: unless you know a value's type exactly, you should not
/// generate LLVM opcodes acting on it and instead act via methods,
/// to avoid nasty edge cases. In particular, using `build::Store`
/// directly is sure to cause problems - use `store_operand` instead.
#[derive(Copy, Clone)]
pub struct OperandRef<'tcx> {
// This will be "indirect" if `appropriate_rvalue_mode` returns
// ByRef, and otherwise ByValue.
pub llval: ValueRef,
// The value.
pub val: OperandValue,
// The type of value being returned.
pub ty: Ty<'tcx>
}
impl<'tcx> OperandRef<'tcx> {
/// Asserts that this operand refers to a scalar and returns
/// a reference to its value.
pub fn immediate(self) -> ValueRef {
match self.val {
OperandValue::Immediate(s) => s,
_ => unreachable!()
}
}
pub fn repr<'bcx>(self, bcx: Block<'bcx, 'tcx>) -> String {
match self.val {
OperandValue::Ref(r) => {
format!("OperandRef(Ref({}) @ {:?})",
bcx.val_to_string(r), self.ty)
}
OperandValue::Immediate(i) => {
format!("OperandRef(Immediate({}) @ {:?})",
bcx.val_to_string(i), self.ty)
}
OperandValue::FatPtr(a, d) => {
format!("OperandRef(FatPtr({}, {}) @ {:?})",
bcx.val_to_string(a),
bcx.val_to_string(d),
self.ty)
}
}
}
}
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_operand(&mut self,
bcx: Block<'bcx, 'tcx>,
@ -62,23 +112,24 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
debug!("trans_operand: tr_lvalue={} @ {:?}",
bcx.val_to_string(tr_lvalue.llval),
ty);
let llval = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) {
datum::ByValue => build::Load(bcx, tr_lvalue.llval),
datum::ByRef => tr_lvalue.llval,
let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) {
datum::ByValue => {
OperandValue::Immediate(base::load_ty(bcx, tr_lvalue.llval, ty))
}
datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => {
let (lldata, llextra) = base::load_fat_ptr(bcx, tr_lvalue.llval, ty);
OperandValue::FatPtr(lldata, llextra)
}
datum::ByRef => OperandValue::Ref(tr_lvalue.llval)
};
OperandRef {
llval: llval,
val: val,
ty: ty
}
}
mir::Operand::Constant(ref constant) => {
let llval = self.trans_constant(bcx, constant);
let ty = bcx.monomorphize(&constant.ty);
OperandRef {
llval: llval,
ty: ty,
}
self.trans_constant(bcx, constant)
}
}
}
@ -92,10 +143,25 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
bcx.val_to_string(lldest),
operand);
// FIXME: consider not copying constants through the
// stack.
let o = self.trans_operand(bcx, operand);
match datum::appropriate_rvalue_mode(bcx.ccx(), o.ty) {
datum::ByValue => base::store_ty(bcx, o.llval, lldest, o.ty),
datum::ByRef => base::memcpy_ty(bcx, lldest, o.llval, o.ty),
};
self.store_operand(bcx, lldest, o);
}
pub fn store_operand(&mut self,
bcx: Block<'bcx, 'tcx>,
lldest: ValueRef,
operand: OperandRef<'tcx>)
{
debug!("store_operand: operand={}", operand.repr(bcx));
match operand.val {
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty),
OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty),
OperandValue::FatPtr(data, extra) => {
base::store_fat_ptr(bcx, data, extra, lldest, operand.ty);
}
}
}
}

View File

@ -9,8 +9,7 @@
// except according to those terms.
use llvm::ValueRef;
use rustc::middle::ty::Ty;
use rustc_front::hir;
use rustc::middle::ty::{self, Ty};
use rustc_mir::repr as mir;
use trans::asm;
@ -26,7 +25,7 @@ use trans::type_of;
use trans::tvec;
use super::MirContext;
use super::operand::OperandRef;
use super::operand::{OperandRef, OperandValue};
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_rvalue(&mut self,
@ -45,17 +44,52 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
bcx
}
mir::Rvalue::Cast(..) => {
unimplemented!()
mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => {
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(bcx, lldest, temp);
return bcx;
}
// Unsize of a nontrivial struct. I would prefer for
// this to be eliminated by MIR translation, but
// `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it.
let operand = self.trans_operand(bcx, operand);
match operand.val {
OperandValue::FatPtr(..) => unreachable!(),
OperandValue::Immediate(llval) => {
// unsize from an immediate structure. We don't
// really need a temporary alloca here, but
// avoiding it would require us to have
// `coerce_unsized_into` use extractvalue to
// index into the struct, and this case isn't
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
base::store_ty(bcx, llval, lltemp, operand.ty);
base::coerce_unsized_into(bcx,
lltemp, operand.ty,
lldest, cast_ty);
}
OperandValue::Ref(llref) => {
base::coerce_unsized_into(bcx,
llref, operand.ty,
lldest, cast_ty);
}
}
bcx
}
mir::Rvalue::Repeat(ref elem, ref count) => {
let elem = self.trans_operand(bcx, elem);
let size = self.trans_constant(bcx, count);
let size = self.trans_constant(bcx, count).immediate();
let base = expr::get_dataptr(bcx, lldest);
tvec::iter_vec_raw(bcx, base, elem.ty, size, |b, vref, _| {
build::Store(b, elem.llval, vref);
b
tvec::iter_vec_raw(bcx, base, elem.ty, size, |bcx, llslot, _| {
self.store_operand(bcx, llslot, elem);
bcx
})
}
@ -93,7 +127,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
_ => {
assert!(rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
build::Store(bcx, temp.llval, lldest);
self.store_operand(bcx, lldest, temp);
bcx
}
}
@ -112,28 +146,82 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
(bcx, operand)
}
mir::Rvalue::Cast(..) => {
unimplemented!()
mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => {
let operand = self.trans_operand(bcx, operand);
debug!("cast operand is {}", operand.repr(bcx));
let cast_ty = bcx.monomorphize(&cast_ty);
let val = match *kind {
mir::CastKind::ReifyFnPointer |
mir::CastKind::UnsafeFnPointer => {
// these are no-ops at the LLVM level
operand.val
}
mir::CastKind::Unsize => {
// unsize targets other than to a fat pointer currently
// can't be operands.
assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
match operand.val {
OperandValue::FatPtr(..) => {
// unsize from a fat pointer - this is a
// "trait-object-to-supertrait" coercion, for
// example,
// &'a fmt::Debug+Send => &'a fmt::Debug,
// and is a no-op at the LLVM level
operand.val
}
OperandValue::Immediate(lldata) => {
// "standard" unsize
let (lldata, llextra) =
base::unsize_thin_ptr(bcx, lldata,
operand.ty, cast_ty);
OperandValue::FatPtr(lldata, llextra)
}
OperandValue::Ref(_) => {
bcx.sess().bug(
&format!("by-ref operand {} in trans_rvalue_operand",
operand.repr(bcx)));
}
}
}
mir::CastKind::Misc => unimplemented!()
};
(bcx, OperandRef {
val: val,
ty: cast_ty
})
}
mir::Rvalue::Ref(_, _, ref lvalue) => {
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ref_ty = bcx.tcx().mk_ref(
bcx.tcx().mk_region(ty::ReStatic),
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
);
// Note: lvalues are indirect, so storing the `llval` into the
// destination effectively creates a reference.
(bcx, OperandRef {
llval: tr_lvalue.llval,
ty: tr_lvalue.ty.to_ty(bcx.tcx()),
})
if common::type_is_sized(bcx.tcx(), ty) {
(bcx, OperandRef {
val: OperandValue::Immediate(tr_lvalue.llval),
ty: ref_ty,
})
} else {
(bcx, OperandRef {
val: OperandValue::FatPtr(tr_lvalue.llval,
tr_lvalue.llextra),
ty: ref_ty,
})
}
}
mir::Rvalue::Len(ref lvalue) => {
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let (_, lllen) = tvec::get_base_and_len(bcx,
tr_lvalue.llval,
tr_lvalue.ty.to_ty(bcx.tcx()));
(bcx, OperandRef {
llval: lllen,
val: OperandValue::Immediate(self.lvalue_len(bcx, tr_lvalue)),
ty: bcx.tcx().types.usize,
})
}
@ -141,123 +229,45 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.trans_operand(bcx, lhs);
let rhs = self.trans_operand(bcx, rhs);
let is_float = lhs.ty.is_fp();
let is_signed = lhs.ty.is_signed();
let binop_debug_loc = DebugLoc::None;
let llval = match op {
mir::BinOp::Add => if is_float {
build::FAdd(bcx, lhs.llval, rhs.llval, binop_debug_loc)
} else {
build::Add(bcx, lhs.llval, rhs.llval, binop_debug_loc)
},
mir::BinOp::Sub => if is_float {
build::FSub(bcx, lhs.llval, rhs.llval, binop_debug_loc)
} else {
build::Sub(bcx, lhs.llval, rhs.llval, binop_debug_loc)
},
mir::BinOp::Mul => if is_float {
build::FMul(bcx, lhs.llval, rhs.llval, binop_debug_loc)
} else {
build::Mul(bcx, lhs.llval, rhs.llval, binop_debug_loc)
},
mir::BinOp::Div => if is_float {
build::FDiv(bcx, lhs.llval, rhs.llval, binop_debug_loc)
} else if is_signed {
build::SDiv(bcx, lhs.llval, rhs.llval, binop_debug_loc)
} else {
build::UDiv(bcx, lhs.llval, rhs.llval, binop_debug_loc)
},
mir::BinOp::Rem => if is_float {
// LLVM currently always lowers the `frem` instructions appropriate
// library calls typically found in libm. Notably f64 gets wired up
// to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
// us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
// instead just an inline function in a header that goes up to a
// f64, uses `fmod`, and then comes back down to a f32.
//
// Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
// still unconditionally lower frem instructions over 32-bit floats
// to a call to `fmodf`. To work around this we special case MSVC
// 32-bit float rem instructions and instead do the call out to
// `fmod` ourselves.
//
// Note that this is currently duplicated with src/libcore/ops.rs
// which does the same thing, and it would be nice to perhaps unify
// these two implementations one day! Also note that we call `fmod`
// for both 32 and 64-bit floats because if we emit any FRem
// instruction at all then LLVM is capable of optimizing it into a
// 32-bit FRem (which we're trying to avoid).
let tcx = bcx.tcx();
let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
tcx.sess.target.target.arch == "x86";
if use_fmod {
let f64t = Type::f64(bcx.ccx());
let fty = Type::func(&[f64t, f64t], &f64t);
let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
tcx.types.f64);
if lhs.ty == tcx.types.f32 {
let lllhs = build::FPExt(bcx, lhs.llval, f64t);
let llrhs = build::FPExt(bcx, rhs.llval, f64t);
let llres = build::Call(bcx, llfn, &[lllhs, llrhs],
None, binop_debug_loc);
build::FPTrunc(bcx, llres, Type::f32(bcx.ccx()))
} else {
build::Call(bcx, llfn, &[lhs.llval, rhs.llval],
None, binop_debug_loc)
}
} else {
build::FRem(bcx, lhs.llval, rhs.llval, binop_debug_loc)
let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
match (lhs.val, rhs.val) {
(OperandValue::FatPtr(lhs_addr, lhs_extra),
OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
base::compare_fat_ptrs(bcx,
lhs_addr, lhs_extra,
rhs_addr, rhs_extra,
lhs.ty, op.to_hir_binop(),
DebugLoc::None)
}
} else if is_signed {
build::SRem(bcx, lhs.llval, rhs.llval, binop_debug_loc)
} else {
build::URem(bcx, lhs.llval, rhs.llval, binop_debug_loc)
},
mir::BinOp::BitOr => build::Or(bcx, lhs.llval, rhs.llval, binop_debug_loc),
mir::BinOp::BitAnd => build::And(bcx, lhs.llval, rhs.llval, binop_debug_loc),
mir::BinOp::BitXor => build::Xor(bcx, lhs.llval, rhs.llval, binop_debug_loc),
mir::BinOp::Shl => common::build_unchecked_lshift(bcx,
lhs.llval,
rhs.llval,
binop_debug_loc),
mir::BinOp::Shr => common::build_unchecked_rshift(bcx,
lhs.ty,
lhs.llval,
rhs.llval,
binop_debug_loc),
mir::BinOp::Eq => base::compare_scalar_types(bcx, lhs.llval, rhs.llval, lhs.ty,
hir::BiEq, binop_debug_loc),
mir::BinOp::Lt => base::compare_scalar_types(bcx, lhs.llval, rhs.llval, lhs.ty,
hir::BiLt, binop_debug_loc),
mir::BinOp::Le => base::compare_scalar_types(bcx, lhs.llval, rhs.llval, lhs.ty,
hir::BiLe, binop_debug_loc),
mir::BinOp::Ne => base::compare_scalar_types(bcx, lhs.llval, rhs.llval, lhs.ty,
hir::BiNe, binop_debug_loc),
mir::BinOp::Ge => base::compare_scalar_types(bcx, lhs.llval, rhs.llval, lhs.ty,
hir::BiGe, binop_debug_loc),
mir::BinOp::Gt => base::compare_scalar_types(bcx, lhs.llval, rhs.llval, lhs.ty,
hir::BiGt, binop_debug_loc),
_ => unreachable!()
}
} else {
self.trans_scalar_binop(bcx, op,
lhs.immediate(), rhs.immediate(),
lhs.ty, DebugLoc::None)
};
(bcx, OperandRef {
llval: llval,
ty: lhs.ty,
val: OperandValue::Immediate(llresult),
ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
})
}
mir::Rvalue::UnaryOp(op, ref operand) => {
let operand = self.trans_operand(bcx, operand);
let lloperand = operand.immediate();
let is_float = operand.ty.is_fp();
let debug_loc = DebugLoc::None;
let llval = match op {
mir::UnOp::Not => build::Not(bcx, operand.llval, debug_loc),
mir::UnOp::Not => build::Not(bcx, lloperand, debug_loc),
mir::UnOp::Neg => if is_float {
build::FNeg(bcx, operand.llval, debug_loc)
build::FNeg(bcx, lloperand, debug_loc)
} else {
build::Neg(bcx, operand.llval, debug_loc)
build::Neg(bcx, lloperand, debug_loc)
}
};
(bcx, OperandRef {
llval: llval,
val: OperandValue::Immediate(llval),
ty: operand.ty,
})
}
@ -277,7 +287,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
llalign,
DebugLoc::None);
(bcx, OperandRef {
llval: llval,
val: OperandValue::Immediate(llval),
ty: box_ty,
})
}
@ -290,6 +300,104 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
}
pub fn trans_scalar_binop(&mut self,
bcx: Block<'bcx, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
input_ty: Ty<'tcx>,
debug_loc: DebugLoc) -> ValueRef {
let is_float = input_ty.is_fp();
let is_signed = input_ty.is_signed();
match op {
mir::BinOp::Add => if is_float {
build::FAdd(bcx, lhs, rhs, debug_loc)
} else {
build::Add(bcx, lhs, rhs, debug_loc)
},
mir::BinOp::Sub => if is_float {
build::FSub(bcx, lhs, rhs, debug_loc)
} else {
build::Sub(bcx, lhs, rhs, debug_loc)
},
mir::BinOp::Mul => if is_float {
build::FMul(bcx, lhs, rhs, debug_loc)
} else {
build::Mul(bcx, lhs, rhs, debug_loc)
},
mir::BinOp::Div => if is_float {
build::FDiv(bcx, lhs, rhs, debug_loc)
} else if is_signed {
build::SDiv(bcx, lhs, rhs, debug_loc)
} else {
build::UDiv(bcx, lhs, rhs, debug_loc)
},
mir::BinOp::Rem => if is_float {
// LLVM currently always lowers the `frem` instructions appropriate
// library calls typically found in libm. Notably f64 gets wired up
// to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
// us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
// instead just an inline function in a header that goes up to a
// f64, uses `fmod`, and then comes back down to a f32.
//
// Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
// still unconditionally lower frem instructions over 32-bit floats
// to a call to `fmodf`. To work around this we special case MSVC
// 32-bit float rem instructions and instead do the call out to
// `fmod` ourselves.
//
// Note that this is currently duplicated with src/libcore/ops.rs
// which does the same thing, and it would be nice to perhaps unify
// these two implementations one day! Also note that we call `fmod`
// for both 32 and 64-bit floats because if we emit any FRem
// instruction at all then LLVM is capable of optimizing it into a
// 32-bit FRem (which we're trying to avoid).
let tcx = bcx.tcx();
let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
tcx.sess.target.target.arch == "x86";
if use_fmod {
let f64t = Type::f64(bcx.ccx());
let fty = Type::func(&[f64t, f64t], &f64t);
let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
tcx.types.f64);
if input_ty == tcx.types.f32 {
let lllhs = build::FPExt(bcx, lhs, f64t);
let llrhs = build::FPExt(bcx, rhs, f64t);
let llres = build::Call(bcx, llfn, &[lllhs, llrhs],
None, debug_loc);
build::FPTrunc(bcx, llres, Type::f32(bcx.ccx()))
} else {
build::Call(bcx, llfn, &[lhs, rhs],
None, debug_loc)
}
} else {
build::FRem(bcx, lhs, rhs, debug_loc)
}
} else if is_signed {
build::SRem(bcx, lhs, rhs, debug_loc)
} else {
build::URem(bcx, lhs, rhs, debug_loc)
},
mir::BinOp::BitOr => build::Or(bcx, lhs, rhs, debug_loc),
mir::BinOp::BitAnd => build::And(bcx, lhs, rhs, debug_loc),
mir::BinOp::BitXor => build::Xor(bcx, lhs, rhs, debug_loc),
mir::BinOp::Shl => common::build_unchecked_lshift(bcx,
lhs,
rhs,
debug_loc),
mir::BinOp::Shr => common::build_unchecked_rshift(bcx,
input_ty,
lhs,
rhs,
debug_loc),
mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
base::compare_scalar_types(bcx, lhs, rhs, input_ty,
op.to_hir_binop(), debug_loc)
}
}
}
}
pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {

View File

@ -0,0 +1,76 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs, coerce_unsized, unsize)]
use std::ops::CoerceUnsized;
use std::marker::Unsize;
#[rustc_mir]
fn identity_coercion(x: &(Fn(u32)->u32 + Send)) -> &Fn(u32)->u32 {
x
}
#[rustc_mir]
fn fn_coercions(f: &fn(u32) -> u32) ->
(unsafe fn(u32) -> u32,
&(Fn(u32) -> u32+Send))
{
(*f, f)
}
#[rustc_mir]
fn simple_array_coercion(x: &[u8; 3]) -> &[u8] { x }
fn square(a: u32) -> u32 { a * a }
#[derive(PartialEq,Eq)]
struct PtrWrapper<'a, T: 'a+?Sized>(u32, u32, (), &'a T);
impl<'a, T: ?Sized+Unsize<U>, U: ?Sized>
CoerceUnsized<PtrWrapper<'a, U>> for PtrWrapper<'a, T> {}
struct TrivPtrWrapper<'a, T: 'a+?Sized>(&'a T);
impl<'a, T: ?Sized+Unsize<U>, U: ?Sized>
CoerceUnsized<TrivPtrWrapper<'a, U>> for TrivPtrWrapper<'a, T> {}
#[rustc_mir]
fn coerce_ptr_wrapper(p: PtrWrapper<[u8; 3]>) -> PtrWrapper<[u8]> {
p
}
#[rustc_mir]
fn coerce_triv_ptr_wrapper(p: TrivPtrWrapper<[u8; 3]>) -> TrivPtrWrapper<[u8]> {
p
}
#[rustc_mir]
fn coerce_fat_ptr_wrapper(p: PtrWrapper<Fn(u32) -> u32+Send>)
-> PtrWrapper<Fn(u32) -> u32> {
p
}
fn main() {
let a = [0,1,2];
let square_local : fn(u32) -> u32 = square;
let (f,g) = fn_coercions(&square_local);
assert_eq!(f as usize, square as usize);
assert_eq!(g(4), 16);
assert_eq!(identity_coercion(g)(5), 25);
assert_eq!(simple_array_coercion(&a), &a);
let w = coerce_ptr_wrapper(PtrWrapper(2,3,(),&a));
assert!(w == PtrWrapper(2,3,(),&a) as PtrWrapper<[u8]>);
let w = coerce_triv_ptr_wrapper(TrivPtrWrapper(&a));
assert_eq!(&w.0, &a);
let z = coerce_fat_ptr_wrapper(PtrWrapper(2,3,(),&square_local));
assert_eq!((z.3)(6), 36);
}

View File

@ -0,0 +1,63 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// test that ordinary fat pointer operations work.
#![feature(rustc_attrs)]
struct Wrapper<T: ?Sized>(u32, T);
struct FatPtrContainer<'a> {
ptr: &'a [u8]
}
#[rustc_mir]
fn fat_ptr_project(a: &Wrapper<[u8]>) -> &[u8] {
&a.1
}
#[rustc_mir]
fn fat_ptr_simple(a: &[u8]) -> &[u8] {
a
}
#[rustc_mir]
fn fat_ptr_via_local(a: &[u8]) -> &[u8] {
let x = a;
x
}
#[rustc_mir]
fn fat_ptr_from_struct(s: FatPtrContainer) -> &[u8] {
s.ptr
}
#[rustc_mir]
fn fat_ptr_to_struct(a: &[u8]) -> FatPtrContainer {
FatPtrContainer { ptr: a }
}
#[rustc_mir]
fn fat_ptr_store_to<'a>(a: &'a [u8], b: &mut &'a [u8]) {
*b = a;
}
fn main() {
let a = Wrapper(4, [7,6,5]);
let p = fat_ptr_project(&a);
let p = fat_ptr_simple(p);
let p = fat_ptr_via_local(p);
let p = fat_ptr_from_struct(fat_ptr_to_struct(p));
let mut target : &[u8] = &[42];
fat_ptr_store_to(p, &mut target);
assert_eq!(target, &a.1);
}

View File

@ -0,0 +1,173 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
// check raw fat pointer ops in mir
// FIXME: please improve this when we get monomorphization support
use std::mem;
#[derive(Debug, PartialEq, Eq)]
struct ComparisonResults {
lt: bool,
le: bool,
gt: bool,
ge: bool,
eq: bool,
ne: bool
}
const LT: ComparisonResults = ComparisonResults {
lt: true,
le: true,
gt: false,
ge: false,
eq: false,
ne: true
};
const EQ: ComparisonResults = ComparisonResults {
lt: false,
le: true,
gt: false,
ge: true,
eq: true,
ne: false
};
const GT: ComparisonResults = ComparisonResults {
lt: false,
le: false,
gt: true,
ge: true,
eq: false,
ne: true
};
#[rustc_mir]
fn compare_su8(a: *const S<[u8]>, b: *const S<[u8]>) -> ComparisonResults {
ComparisonResults {
lt: a < b,
le: a <= b,
gt: a > b,
ge: a >= b,
eq: a == b,
ne: a != b
}
}
#[rustc_mir]
fn compare_au8(a: *const [u8], b: *const [u8]) -> ComparisonResults {
ComparisonResults {
lt: a < b,
le: a <= b,
gt: a > b,
ge: a >= b,
eq: a == b,
ne: a != b
}
}
#[rustc_mir(graphviz="comparefoo.gv")]
fn compare_foo<'a>(a: *const (Foo+'a), b: *const (Foo+'a)) -> ComparisonResults {
ComparisonResults {
lt: a < b,
le: a <= b,
gt: a > b,
ge: a >= b,
eq: a == b,
ne: a != b
}
}
#[rustc_mir(graphviz="simpleeq.gv")]
fn simple_eq<'a>(a: *const (Foo+'a), b: *const (Foo+'a)) -> bool {
let result = a == b;
result
}
fn assert_inorder<T: Copy>(a: &[T],
compare: fn(T, T) -> ComparisonResults) {
for i in 0..a.len() {
for j in 0..a.len() {
let cres = compare(a[i], a[j]);
if i < j {
assert_eq!(cres, LT);
} else if i == j {
assert_eq!(cres, EQ);
} else {
assert_eq!(cres, GT);
}
}
}
}
trait Foo { fn foo(&self) -> usize; }
impl<T> Foo for T {
fn foo(&self) -> usize {
mem::size_of::<T>()
}
}
struct S<T:?Sized>(u32, T);
fn main() {
let array = [0,1,2,3,4];
let array2 = [5,6,7,8,9];
// fat ptr comparison: addr then extra
// check ordering for arrays
let mut ptrs: Vec<*const [u8]> = vec![
&array[0..0], &array[0..1], &array, &array[1..]
];
let array_addr = &array as *const [u8] as *const u8 as usize;
let array2_addr = &array2 as *const [u8] as *const u8 as usize;
if array2_addr < array_addr {
ptrs.insert(0, &array2);
} else {
ptrs.push(&array2);
}
assert_inorder(&ptrs, compare_au8);
let u8_ = (0u8, 1u8);
let u32_ = (4u32, 5u32);
// check ordering for ptrs
let buf: &mut [*const Foo] = &mut [
&u8_, &u8_.0,
&u32_, &u32_.0,
];
buf.sort_by(|u,v| {
let u : [*const (); 2] = unsafe { mem::transmute(*u) };
let v : [*const (); 2] = unsafe { mem::transmute(*v) };
u.cmp(&v)
});
assert_inorder(buf, compare_foo);
// check ordering for structs containing arrays
let ss: (S<[u8; 2]>,
S<[u8; 3]>,
S<[u8; 2]>) = (
S(7, [8, 9]),
S(10, [11, 12, 13]),
S(4, [5, 6])
);
assert_inorder(&[
&ss.0 as *const S<[u8]>,
&ss.1 as *const S<[u8]>,
&ss.2 as *const S<[u8]>
], compare_su8);
assert!(simple_eq(&0u8 as *const _, &0u8 as *const _));
assert!(!simple_eq(&0u8 as *const _, &1u8 as *const _));
}