Auto merge of #32980 - Aatch:better-mir-building, r=nagisa

Various improvements to MIR and LLVM IR Construction

Primarily affects the MIR construction, which indirectly improves LLVM
IR generation, but some LLVM IR changes have been made too.

* Handle "statement expressions" more intelligently. These are
  expressions that always evaluate to `()`. Previously a temporary would
  be generated as a destination to translate into, which is unnecessary.

  This affects assignment, augmented assignment, `return`, `break` and
  `continue`.
* Avoid inserting drops for non-drop types in more places. Scheduled
  drops were already skipped for types that we knew wouldn't need
  dropping at construction time. However manually-inserted drops like
  those for `x` in `x = y;` were still generated. `build_drop` now takes
  a type parameter like its `schedule_drop` counterpart and checks to
  see if the type needs dropping.

* Avoid generating an extra temporary for an assignment where the types
  involved don't need dropping. Previously an expression like
  `a = b + 1;` would result in a temporary for `b + 1`. This is so the
  RHS can be evaluated, then the LHS evaluated and dropped and have
  everything work correctly. However, this isn't necessary if the `LHS`
  doesn't need a drop, as we can just overwrite the existing value.

* Improves lvalue analysis to allow treating an `Rvalue::Use` as an
  operand in certain conditions. The reason for it never being an
  operand is so it can be zeroed/drop-filled, but this is only true for
  types that need dropping.

The first two changes result in significantly fewer MIR blocks being
generated, as previously almost every statement would end up generating
a new block due to the drop of the `()` temporary being generated.
This commit is contained in:
bors 2016-04-28 01:26:45 -07:00
commit 009a64916e
18 changed files with 299 additions and 183 deletions

View File

@ -44,11 +44,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
StmtKind::Expr { scope, expr } => {
unpack!(block = this.in_scope(scope, block, |this, _| {
let expr = this.hir.mirror(expr);
let expr_span = expr.span;
let temp = this.temp(expr.ty.clone());
unpack!(block = this.into(&temp, block, expr));
unpack!(block = this.build_drop(block, expr_span, temp));
block.unit()
this.stmt_expr(block, expr)
}));
}
StmtKind::Let { remainder_scope, init_scope, pattern, initializer } => {

View File

@ -189,6 +189,11 @@ impl<'a,'tcx> Builder<'a,'tcx> {
block.and(Rvalue::Aggregate(AggregateKind::Adt(adt_def, variant_index, substs),
fields))
}
ExprKind::Assign { .. } |
ExprKind::AssignOp { .. } => {
block = unpack!(this.stmt_expr(block, expr));
block.and(this.unit_rvalue())
}
ExprKind::Literal { .. } |
ExprKind::Block { .. } |
ExprKind::Match { .. } |
@ -201,8 +206,6 @@ impl<'a,'tcx> Builder<'a,'tcx> {
ExprKind::Index { .. } |
ExprKind::VarRef { .. } |
ExprKind::SelfRef |
ExprKind::Assign { .. } |
ExprKind::AssignOp { .. } |
ExprKind::Break { .. } |
ExprKind::Continue { .. } |
ExprKind::Return { .. } |

View File

@ -12,12 +12,9 @@
use build::{BlockAnd, BlockAndExtension, Builder};
use build::expr::category::{Category, RvalueFunc};
use build::scope::LoopScope;
use hair::*;
use rustc::middle::region::CodeExtent;
use rustc::ty;
use rustc::mir::repr::*;
use syntax::codemap::Span;
impl<'a,'tcx> Builder<'a,'tcx> {
/// Compile `expr`, storing the result into `destination`, which
@ -207,65 +204,6 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}
exit_block.unit()
}
ExprKind::Assign { lhs, rhs } => {
// Note: we evaluate assignments right-to-left. This
// is better for borrowck interaction with overloaded
// operators like x[j] = x[i].
let lhs = this.hir.mirror(lhs);
let lhs_span = lhs.span;
let rhs = unpack!(block = this.as_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
unpack!(block = this.build_drop(block, lhs_span, lhs.clone()));
this.cfg.push_assign(block, scope_id, expr_span, &lhs, Rvalue::Use(rhs));
block.unit()
}
ExprKind::AssignOp { op, lhs, rhs } => {
// FIXME(#28160) there is an interesting semantics
// question raised here -- should we "freeze" the
// value of the lhs here? I'm inclined to think not,
// since it seems closer to the semantics of the
// overloaded version, which takes `&mut self`. This
// only affects weird things like `x += {x += 1; x}`
// -- is that equal to `x + (x + 1)` or `2*(x+1)`?
// As above, RTL.
let rhs = unpack!(block = this.as_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
// we don't have to drop prior contents or anything
// because AssignOp is only legal for Copy types
// (overloaded ops should be desugared into a call).
this.cfg.push_assign(block, scope_id, expr_span, &lhs,
Rvalue::BinaryOp(op,
Operand::Consume(lhs.clone()),
rhs));
block.unit()
}
ExprKind::Continue { label } => {
this.break_or_continue(expr_span, label, block,
|loop_scope| loop_scope.continue_block)
}
ExprKind::Break { label } => {
this.break_or_continue(expr_span, label, block, |loop_scope| {
loop_scope.might_break = true;
loop_scope.break_block
})
}
ExprKind::Return { value } => {
block = match value {
Some(value) => unpack!(this.into(&Lvalue::ReturnPointer, block, value)),
None => {
this.cfg.push_assign_unit(block, scope_id,
expr_span, &Lvalue::ReturnPointer);
block
}
};
let extent = this.extent_of_return_scope();
let return_block = this.return_block();
this.exit_scope(expr_span, extent, block, return_block);
this.cfg.start_new_block().unit()
}
ExprKind::Call { ty, fun, args } => {
let diverges = match ty.sty {
ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => {
@ -294,6 +232,15 @@ impl<'a,'tcx> Builder<'a,'tcx> {
success.unit()
}
// These cases don't actually need a destination
ExprKind::Assign { .. } |
ExprKind::AssignOp { .. } |
ExprKind::Continue { .. } |
ExprKind::Break { .. } |
ExprKind::Return {.. } => {
this.stmt_expr(block, expr)
}
// these are the cases that are more naturally handled by some other mode
ExprKind::Unary { .. } |
ExprKind::Binary { .. } |
@ -327,20 +274,4 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}
}
}
fn break_or_continue<F>(&mut self,
span: Span,
label: Option<CodeExtent>,
block: BasicBlock,
exit_selector: F)
-> BlockAnd<()>
where F: FnOnce(&mut LoopScope) -> BasicBlock
{
let (exit_block, extent) = {
let loop_scope = self.find_loop_scope(span, label);
(exit_selector(loop_scope), loop_scope.extent)
};
self.exit_scope(span, extent, block, exit_block);
self.cfg.start_new_block().unit()
}
}

View File

@ -77,3 +77,4 @@ mod as_operand;
mod as_temp;
mod category;
mod into;
mod stmt;

View File

@ -0,0 +1,135 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use build::{BlockAnd, BlockAndExtension, Builder};
use build::scope::LoopScope;
use hair::*;
use rustc::middle::region::CodeExtent;
use rustc::mir::repr::*;
use syntax::codemap::Span;
impl<'a,'tcx> Builder<'a,'tcx> {
pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd<()> {
let this = self;
let expr_span = expr.span;
let scope_id = this.innermost_scope_id();
// Handle a number of expressions that don't need a destination at all. This
// avoids needing a mountain of temporary `()` variables.
match expr.kind {
ExprKind::Scope { extent, value } => {
let value = this.hir.mirror(value);
this.in_scope(extent, block, |this, _| this.stmt_expr(block, value))
}
ExprKind::Assign { lhs, rhs } => {
let lhs = this.hir.mirror(lhs);
let rhs = this.hir.mirror(rhs);
let scope_id = this.innermost_scope_id();
let lhs_span = lhs.span;
let lhs_ty = lhs.ty;
let rhs_ty = rhs.ty;
let lhs_needs_drop = this.hir.needs_drop(lhs_ty);
let rhs_needs_drop = this.hir.needs_drop(rhs_ty);
// Note: we evaluate assignments right-to-left. This
// is better for borrowck interaction with overloaded
// operators like x[j] = x[i].
// Generate better code for things that don't need to be
// dropped.
let rhs = if lhs_needs_drop || rhs_needs_drop {
let op = unpack!(block = this.as_operand(block, rhs));
Rvalue::Use(op)
} else {
unpack!(block = this.as_rvalue(block, rhs))
};
let lhs = unpack!(block = this.as_lvalue(block, lhs));
unpack!(block = this.build_drop(block, lhs_span, lhs.clone(), lhs_ty));
this.cfg.push_assign(block, scope_id, expr_span, &lhs, rhs);
block.unit()
}
ExprKind::AssignOp { op, lhs, rhs } => {
// FIXME(#28160) there is an interesting semantics
// question raised here -- should we "freeze" the
// value of the lhs here? I'm inclined to think not,
// since it seems closer to the semantics of the
// overloaded version, which takes `&mut self`. This
// only affects weird things like `x += {x += 1; x}`
// -- is that equal to `x + (x + 1)` or `2*(x+1)`?
// As above, RTL.
let rhs = unpack!(block = this.as_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
// we don't have to drop prior contents or anything
// because AssignOp is only legal for Copy types
// (overloaded ops should be desugared into a call).
this.cfg.push_assign(block, scope_id, expr_span, &lhs,
Rvalue::BinaryOp(op,
Operand::Consume(lhs.clone()),
rhs));
block.unit()
}
ExprKind::Continue { label } => {
this.break_or_continue(expr_span, label, block,
|loop_scope| loop_scope.continue_block)
}
ExprKind::Break { label } => {
this.break_or_continue(expr_span, label, block, |loop_scope| {
loop_scope.might_break = true;
loop_scope.break_block
})
}
ExprKind::Return { value } => {
block = match value {
Some(value) => unpack!(this.into(&Lvalue::ReturnPointer, block, value)),
None => {
this.cfg.push_assign_unit(block, scope_id,
expr_span, &Lvalue::ReturnPointer);
block
}
};
let extent = this.extent_of_return_scope();
let return_block = this.return_block();
this.exit_scope(expr_span, extent, block, return_block);
this.cfg.start_new_block().unit()
}
_ => {
let expr_span = expr.span;
let expr_ty = expr.ty;
let temp = this.temp(expr.ty.clone());
unpack!(block = this.into(&temp, block, expr));
unpack!(block = this.build_drop(block, expr_span, temp, expr_ty));
block.unit()
}
}
}
fn break_or_continue<F>(&mut self,
span: Span,
label: Option<CodeExtent>,
block: BasicBlock,
exit_selector: F)
-> BlockAnd<()>
where F: FnOnce(&mut LoopScope) -> BasicBlock
{
let (exit_block, extent) = {
let loop_scope = self.find_loop_scope(span, label);
(exit_selector(loop_scope), loop_scope.extent)
};
self.exit_scope(span, extent, block, exit_block);
self.cfg.start_new_block().unit()
}
}

View File

@ -46,6 +46,10 @@ impl<'a,'tcx> Builder<'a,'tcx> {
Operand::Constant(constant)
}
pub fn unit_rvalue(&mut self) -> Rvalue<'tcx> {
Rvalue::Aggregate(AggregateKind::Tuple, vec![])
}
pub fn push_usize(&mut self,
block: BasicBlock,
scope_id: ScopeId,

View File

@ -497,8 +497,11 @@ impl<'a,'tcx> Builder<'a,'tcx> {
pub fn build_drop(&mut self,
block: BasicBlock,
span: Span,
value: Lvalue<'tcx>)
-> BlockAnd<()> {
value: Lvalue<'tcx>,
ty: Ty<'tcx>) -> BlockAnd<()> {
if !self.hir.needs_drop(ty) {
return block.unit();
}
let scope_id = self.innermost_scope_id();
let next_target = self.cfg.start_new_block();
let diverge_target = self.diverge_cleanup();

View File

@ -165,8 +165,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
args: &[ValueRef],
then: BasicBlockRef,
catch: BasicBlockRef,
bundle: Option<&OperandBundleDef>)
-> ValueRef {
bundle: Option<&OperandBundleDef>) -> ValueRef {
self.count_insn("invoke");
debug!("Invoke {:?} with args ({})",
@ -176,6 +175,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.collect::<Vec<String>>()
.join(", "));
check_call("invoke", llfn, args);
let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
unsafe {
@ -856,28 +857,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.collect::<Vec<String>>()
.join(", "));
let mut fn_ty = val_ty(llfn);
// Strip off pointers
while fn_ty.kind() == llvm::TypeKind::Pointer {
fn_ty = fn_ty.element_type();
}
assert!(fn_ty.kind() == llvm::TypeKind::Function,
"builder::call not passed a function");
let param_tys = fn_ty.func_params();
let iter = param_tys.into_iter()
.zip(args.iter().map(|&v| val_ty(v)));
for (i, (expected_ty, actual_ty)) in iter.enumerate() {
if expected_ty != actual_ty {
bug!("Type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}",
Value(llfn),
expected_ty, i, actual_ty);
}
}
check_call("call", llfn, args);
let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
@ -1121,3 +1101,30 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
}
fn check_call(typ: &str, llfn: ValueRef, args: &[ValueRef]) {
if cfg!(debug_assertions) {
let mut fn_ty = val_ty(llfn);
// Strip off pointers
while fn_ty.kind() == llvm::TypeKind::Pointer {
fn_ty = fn_ty.element_type();
}
assert!(fn_ty.kind() == llvm::TypeKind::Function,
"builder::{} not passed a function", typ);
let param_tys = fn_ty.func_params();
let iter = param_tys.into_iter()
.zip(args.iter().map(|&v| val_ty(v)));
for (i, (expected_ty, actual_ty)) in iter.enumerate() {
if expected_ty != actual_ty {
bug!("Type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}",
Value(llfn),
expected_ty, i, actual_ty);
}
}
}
}

View File

@ -14,13 +14,13 @@
use rustc_data_structures::bitvec::BitVector;
use rustc::mir::repr as mir;
use rustc::mir::visit::{Visitor, LvalueContext};
use common::{self, Block};
use common::{self, Block, BlockAndBuilder};
use super::rvalue;
pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
mir: &mir::Mir<'tcx>)
-> BitVector {
let mut analyzer = TempAnalyzer::new(mir.temp_decls.len());
mir: &mir::Mir<'tcx>) -> BitVector {
let bcx = bcx.build();
let mut analyzer = TempAnalyzer::new(mir, &bcx, mir.temp_decls.len());
analyzer.visit_mir(mir);
@ -30,7 +30,8 @@ pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
if ty.is_scalar() ||
ty.is_unique() ||
ty.is_region_ptr() ||
ty.is_simd()
ty.is_simd() ||
common::type_is_zero_size(bcx.ccx(), ty)
{
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.
@ -50,14 +51,20 @@ pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>,
analyzer.lvalue_temps
}
struct TempAnalyzer {
struct TempAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> {
mir: &'mir mir::Mir<'tcx>,
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>,
lvalue_temps: BitVector,
seen_assigned: BitVector
}
impl TempAnalyzer {
fn new(temp_count: usize) -> TempAnalyzer {
impl<'mir, 'bcx, 'tcx> TempAnalyzer<'mir, 'bcx, 'tcx> {
fn new(mir: &'mir mir::Mir<'tcx>,
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>,
temp_count: usize) -> TempAnalyzer<'mir, 'bcx, 'tcx> {
TempAnalyzer {
mir: mir,
bcx: bcx,
lvalue_temps: BitVector::new(temp_count),
seen_assigned: BitVector::new(temp_count)
}
@ -75,7 +82,7 @@ impl TempAnalyzer {
}
}
impl<'tcx> Visitor<'tcx> for TempAnalyzer {
impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for TempAnalyzer<'mir, 'bcx, 'tcx> {
fn visit_assign(&mut self,
block: mir::BasicBlock,
lvalue: &mir::Lvalue<'tcx>,
@ -85,7 +92,7 @@ impl<'tcx> Visitor<'tcx> for TempAnalyzer {
match *lvalue {
mir::Lvalue::Temp(index) => {
self.mark_assigned(index as usize);
if !rvalue::rvalue_creates_operand(rvalue) {
if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) {
self.mark_as_lvalue(index as usize);
}
}

View File

@ -436,47 +436,47 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
// FIXME: consider having some optimization to avoid tupling/untupling
// (and storing/loading in the case of immediates)
let tuple = self.trans_operand(bcx, operand);
// avoid trans_operand for pointless copying
let lv = match *operand {
mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
mir::Operand::Constant(ref constant) => {
// FIXME: consider being less pessimized
if constant.ty.is_nil() {
return;
let arg_types = match tuple.ty.sty {
ty::TyTuple(ref tys) => tys,
_ => span_bug!(self.mir.span,
"bad final argument to \"rust-call\" fn {:?}", tuple.ty)
};
// Handle both by-ref and immediate tuples.
match tuple.val {
Ref(llval) => {
let base_repr = adt::represent_type(bcx.ccx(), tuple.ty);
let base = adt::MaybeSizedValue::sized(llval);
for (n, &ty) in arg_types.iter().enumerate() {
let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n);
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
let (lldata, llextra) = load_fat_ptr(bcx, ptr);
FatPtr(lldata, llextra)
} else {
// trans_argument will load this if it needs to
Ref(ptr)
};
self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee);
}
let ty = bcx.monomorphize(&constant.ty);
let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
let constant = self.trans_constant(bcx, constant);
self.store_operand(bcx, lv.llval, constant);
lv
}
};
let lv_ty = lv.ty.to_ty(bcx.tcx());
let result_types = match lv_ty.sty {
ty::TyTuple(ref tys) => tys,
_ => span_bug!(
self.mir.span,
"bad final argument to \"rust-call\" fn {:?}", lv_ty)
};
let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
let base = adt::MaybeSizedValue::sized(lv.llval);
for (n, &ty) in result_types.iter().enumerate() {
let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n);
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
let (lldata, llextra) = load_fat_ptr(bcx, ptr);
FatPtr(lldata, llextra)
} else {
// Don't bother loading the value, trans_argument will.
Ref(ptr)
};
self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee);
}
Immediate(llval) => {
for (n, &ty) in arg_types.iter().enumerate() {
let mut elem = bcx.extract_value(llval, n);
// Truncate bools to i1, if needed
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) {
elem = bcx.trunc(elem, Type::i1(bcx.ccx()));
}
// If the tuple is immediate, the elements are as well
let val = Immediate(elem);
self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee);
}
}
FatPtr(_, _) => bug!("tuple is a fat pointer?!")
}
}
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {

View File

@ -16,7 +16,7 @@ use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use session::config::FullDebugInfo;
use base;
use common::{self, Block, BlockAndBuilder, FunctionContext};
use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext};
use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind};
use machine;
use type_of;
@ -34,7 +34,7 @@ use rustc_data_structures::bitvec::BitVector;
use self::lvalue::{LvalueRef, get_dataptr, get_meta};
use rustc_mir::traversal;
use self::operand::OperandRef;
use self::operand::{OperandRef, OperandValue};
#[derive(Clone)]
pub enum CachedMir<'mir, 'tcx: 'mir> {
@ -108,6 +108,25 @@ enum TempRef<'tcx> {
Operand(Option<OperandRef<'tcx>>),
}
impl<'tcx> TempRef<'tcx> {
fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
ty: ty::Ty<'tcx>) -> TempRef<'tcx> {
if common::type_is_zero_size(ccx, ty) {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
// we need something in the operand.
let val = OperandValue::Immediate(common::C_nil(ccx));
let op = OperandRef {
val: val,
ty: ty
};
TempRef::Operand(Some(op))
} else {
TempRef::Operand(None)
}
}
}
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
@ -154,7 +173,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
// If this is an immediate temp, we do not create an
// alloca in advance. Instead we wait until we see the
// definition and update the operand there.
TempRef::Operand(None)
TempRef::new_operand(bcx.ccx(), mty)
})
.collect();

View File

@ -29,6 +29,7 @@ use type_of;
use tvec;
use value::Value;
use Disr;
use glue;
use super::MirContext;
use super::operand::{OperandRef, OperandValue};
@ -217,7 +218,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
_ => {
assert!(rvalue_creates_operand(rvalue));
assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
self.store_operand(&bcx, dest.llval, temp);
bcx
@ -231,7 +232,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
debug_loc: DebugLoc)
-> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
{
assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
"cannot trans {:?} to operand", rvalue);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
@ -483,7 +485,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
(bcx, operand)
}
mir::Rvalue::Use(..) |
mir::Rvalue::Use(ref operand) => {
let operand = self.trans_operand(&bcx, operand);
(bcx, operand)
}
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } |
@ -599,7 +604,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}
}
pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
pub fn rvalue_creates_operand<'bcx, 'tcx>(mir: &mir::Mir<'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Ref(..) |
mir::Rvalue::Len(..) |
@ -608,16 +615,20 @@ pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Box(..) =>
true,
mir::Rvalue::Use(..) | // (**)
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } |
mir::Rvalue::InlineAsm { .. } =>
false,
mir::Rvalue::Use(ref operand) => {
let ty = mir.operand_ty(bcx.tcx(), operand);
let ty = bcx.monomorphize(&ty);
// Types that don't need dropping can just be an operand,
// this allows temporary lvalues, used as rvalues, to
// avoid a stack slot when it's unnecessary
!glue::type_needs_drop(bcx.tcx(), ty)
}
}
// (*) this is only true if the type is suitable
// (**) we need to zero-out the source operand after moving, so we are restricted to either
// ensuring all users of `Use` zero it out themselves or not allowing to “create” operand for
// it.
}

View File

@ -9,7 +9,7 @@
// except according to those terms.
use rustc::mir::repr as mir;
use common::BlockAndBuilder;
use common::{self, BlockAndBuilder};
use debuginfo::DebugLoc;
use super::MirContext;
@ -42,9 +42,18 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
bcx
}
TempRef::Operand(Some(_)) => {
span_bug!(statement.span,
"operand {:?} already assigned",
rvalue);
let ty = self.mir.lvalue_ty(bcx.tcx(), lvalue);
let ty = bcx.monomorphize(&ty.to_ty(bcx.tcx()));
if !common::type_is_zero_size(bcx.ccx(), ty) {
span_bug!(statement.span,
"operand {:?} already assigned",
rvalue);
} else {
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand
self.trans_rvalue_operand(bcx, rvalue, debug_loc).0
}
}
}
}

View File

@ -32,5 +32,3 @@ pub fn generic_function<T>(x: T) -> (T, i32) {
fn main() {
0i64.foo();
}
//~ TRANS_ITEM drop-glue i8

View File

@ -59,5 +59,3 @@ fn main() {
fn run_closure(f: &Fn(i32)) {
f(3);
}
//~ TRANS_ITEM drop-glue i8

View File

@ -57,5 +57,3 @@ mod mod2 {
cgu_explicit_inlining::never_inlined();
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -50,5 +50,3 @@ mod non_user {
}
}
//~ TRANS_ITEM drop-glue i8

View File

@ -50,5 +50,3 @@ mod non_user {
}
}
//~ TRANS_ITEM drop-glue i8