Rename load_value to load_scalar and make it support all Abi::Scalar types

This commit is contained in:
bjorn3 2019-01-02 13:37:56 +01:00
parent c7101bbfd4
commit a15af1ccba
5 changed files with 57 additions and 42 deletions

View File

@ -2,6 +2,7 @@ use std::borrow::Cow;
use std::iter;
use rustc::hir;
use rustc::ty::layout::{Scalar, Primitive, Integer, FloatTy};
use rustc_target::spec::abi::Abi;
use crate::prelude::*;
@ -23,6 +24,23 @@ impl PassMode {
}
}
pub fn scalar_to_clif_type(tcx: TyCtxt, scalar: Scalar) -> Type {
match scalar.value {
Primitive::Int(int, _sign) => match int {
Integer::I8 => types::I8,
Integer::I16 => types::I16,
Integer::I32 => types::I32,
Integer::I64 => types::I64,
Integer::I128 => unimpl!("u/i128"),
}
Primitive::Float(flt) => match flt {
FloatTy::F32 => types::F32,
FloatTy::F64 => types::F64,
}
Primitive::Pointer => pointer_ty(tcx),
}
}
fn get_pass_mode<'a, 'tcx: 'a>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
abi: Abi,
@ -66,7 +84,7 @@ fn adjust_arg_for_abi<'a, 'tcx: 'a>(
) -> Value {
match get_pass_mode(fx.tcx, sig.abi, arg.layout().ty, false) {
PassMode::NoPass => unimplemented!("pass mode nopass"),
PassMode::ByVal(_) => arg.load_value(fx),
PassMode::ByVal(_) => arg.load_scalar(fx),
PassMode::ByRef => arg.force_stack(fx),
}
}
@ -251,7 +269,7 @@ impl<'a, 'tcx: 'a, B: Backend + 'a> FunctionCx<'a, 'tcx, B> {
.map(|arg| {
(
self.clif_type(arg.layout().ty).unwrap(),
arg.load_value(self),
arg.load_scalar(self),
)
})
.unzip();
@ -441,13 +459,16 @@ pub fn codegen_fn_prelude<'a, 'tcx: 'a>(
let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
fx.local_map.insert(
RETURN_PLACE,
CPlace::Addr(null, None, fx.layout_of(fx.return_type())),
CPlace::Addr(null, None, ret_layout),
);
}
PassMode::ByVal(ret_ty) => {
fx.bcx.declare_var(mir_var(RETURN_PLACE), ret_ty);
fx.local_map
.insert(RETURN_PLACE, CPlace::Var(RETURN_PLACE, ret_layout));
PassMode::ByVal(_) => {
let is_ssa = !ssa_analyzed
.get(&RETURN_PLACE)
.unwrap()
.contains(crate::analyze::Flags::NOT_SSA);
local_place(fx, RETURN_PLACE, ret_layout, is_ssa);
}
PassMode::ByRef => {
fx.local_map.insert(
@ -613,7 +634,7 @@ pub fn codegen_call_inner<'a, 'tcx: 'a>(
} else {
func_ref = if instance.is_none() {
let func = trans_operand(fx, func.expect("indirect call without func Operand"));
Some(func.load_value(fx))
Some(func.load_scalar(fx))
} else {
None
};
@ -660,7 +681,7 @@ pub fn codegen_return(fx: &mut FunctionCx<impl Backend>) {
}
PassMode::ByVal(_) => {
let place = fx.get_local_place(RETURN_PLACE);
let ret_val = place.to_cvalue(fx).load_value(fx);
let ret_val = place.to_cvalue(fx).load_scalar(fx);
fx.bcx.ins().return_(&[ret_val]);
}
}

View File

@ -184,7 +184,7 @@ fn codegen_fn_content<'a, 'tcx: 'a>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>)
target,
cleanup: _,
} => {
let cond = trans_operand(fx, cond).load_value(fx);
let cond = trans_operand(fx, cond).load_scalar(fx);
// TODO HACK brz/brnz for i8/i16 is not yet implemented
let cond = fx.bcx.ins().uextend(types::I32, cond);
let target = fx.get_ebb(*target);
@ -202,7 +202,7 @@ fn codegen_fn_content<'a, 'tcx: 'a>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>)
values,
targets,
} => {
let discr = trans_operand(fx, discr).load_value(fx);
let discr = trans_operand(fx, discr).load_scalar(fx);
let mut switch = ::cranelift::frontend::Switch::new();
for (i, value) in values.iter().enumerate() {
let ebb = fx.get_ebb(targets[i]);
@ -430,7 +430,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
Rvalue::UnaryOp(un_op, operand) => {
let operand = trans_operand(fx, operand);
let layout = operand.layout();
let val = operand.load_value(fx);
let val = operand.load_scalar(fx);
let res = match un_op {
UnOp::Not => {
match layout.ty.sty {
@ -492,7 +492,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
| (ty::Uint(_), ty::Char)
| (ty::Uint(_), ty::Int(_))
| (ty::Uint(_), ty::Uint(_)) => {
let from = operand.load_value(fx);
let from = operand.load_scalar(fx);
let res = crate::common::clif_intcast(
fx,
from,
@ -502,7 +502,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
lval.write_cvalue(fx, CValue::ByVal(res, dest_layout));
}
(ty::Int(_), ty::Int(_)) | (ty::Int(_), ty::Uint(_)) => {
let from = operand.load_value(fx);
let from = operand.load_scalar(fx);
let res = crate::common::clif_intcast(
fx,
from,
@ -512,7 +512,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
lval.write_cvalue(fx, CValue::ByVal(res, dest_layout));
}
(ty::Float(from_flt), ty::Float(to_flt)) => {
let from = operand.load_value(fx);
let from = operand.load_scalar(fx);
let res = match (from_flt, to_flt) {
(FloatTy::F32, FloatTy::F64) => {
fx.bcx.ins().fpromote(types::F64, from)
@ -526,7 +526,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
}
(ty::Int(_), ty::Float(_)) => {
let from_ty = fx.clif_type(from_ty).unwrap();
let from = operand.load_value(fx);
let from = operand.load_scalar(fx);
// FIXME missing encoding for fcvt_from_sint.f32.i8
let from = if from_ty == types::I8 || from_ty == types::I16 {
fx.bcx.ins().sextend(types::I32, from)
@ -539,7 +539,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
}
(ty::Uint(_), ty::Float(_)) => {
let from_ty = fx.clif_type(from_ty).unwrap();
let from = operand.load_value(fx);
let from = operand.load_scalar(fx);
// FIXME missing encoding for fcvt_from_uint.f32.i8
let from = if from_ty == types::I8 || from_ty == types::I16 {
fx.bcx.ins().uextend(types::I32, from)
@ -552,7 +552,7 @@ fn trans_stmt<'a, 'tcx: 'a>(
}
(ty::Bool, ty::Uint(_)) | (ty::Bool, ty::Int(_)) => {
let to_ty = fx.clif_type(to_ty).unwrap();
let from = operand.load_value(fx);
let from = operand.load_scalar(fx);
let res = if to_ty != types::I8 {
fx.bcx.ins().uextend(to_ty, from)
} else {
@ -695,7 +695,7 @@ pub fn trans_get_discriminant<'a, 'tcx: 'a>(
let discr = value.value_field(fx, mir::Field::new(0));
let discr_ty = discr.layout().ty;
let lldiscr = discr.load_value(fx);
let lldiscr = discr.load_scalar(fx);
match layout.variants {
layout::Variants::Single { .. } => bug!(),
layout::Variants::Tagged { ref tag, .. } => {
@ -782,8 +782,8 @@ macro_rules! binop_match {
$var:ident ($sign:pat) $name:tt $( ( $($next:tt)* ) )? ;
)*
) => {{
let lhs = $lhs.load_value($fx);
let rhs = $rhs.load_value($fx);
let lhs = $lhs.load_scalar($fx);
let rhs = $rhs.load_scalar($fx);
match ($bin_op, $signed) {
$(
(BinOp::$var, $sign) => binop_match!(@single $fx, $bug_fmt, $var, $signed, lhs, rhs, $ret_ty, $name $( ( $($next)* ) )?),
@ -887,8 +887,8 @@ pub fn trans_checked_int_binop<'a, 'tcx: 'a>(
);
}
let lhs = in_lhs.load_value(fx);
let rhs = in_rhs.load_value(fx);
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
let res = match bin_op {
BinOp::Add => fx.bcx.ins().iadd(lhs, rhs),
BinOp::Sub => fx.bcx.ins().isub(lhs, rhs),
@ -1060,7 +1060,7 @@ pub fn trans_place<'a, 'tcx: 'a>(
ProjectionElem::Deref => base.place_deref(fx),
ProjectionElem::Field(field, _ty) => base.place_field(fx, field),
ProjectionElem::Index(local) => {
let index = fx.get_local_place(local).to_cvalue(fx).load_value(fx);
let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
base.place_index(fx, index)
}
ProjectionElem::ConstantIndex {

View File

@ -151,24 +151,18 @@ impl<'tcx> CValue<'tcx> {
}
}
pub fn load_value<'a>(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> Value
/// Load a value with layout.abi of scalar
pub fn load_scalar<'a>(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> Value
where
'tcx: 'a,
{
match self {
CValue::ByRef(addr, layout) => {
let clif_ty = fx.clif_type(layout.ty).unwrap_or_else(|| {
if layout.ty.is_box()
&& !fx
.layout_of(layout.ty.builtin_deref(true).unwrap().ty)
.is_unsized()
{
// Consider sized box to be a ptr
pointer_ty(fx.tcx)
} else {
panic!("load_value of type {:?}", layout.ty);
}
});
let scalar = match layout.abi {
layout::Abi::Scalar(ref scalar) => scalar.clone(),
_ => unreachable!(),
};
let clif_ty = crate::abi::scalar_to_clif_type(fx.tcx, scalar);
fx.bcx.ins().load(clif_ty, MemFlags::new(), addr, 0)
}
CValue::ByVal(value, _layout) => value,
@ -355,7 +349,7 @@ impl<'a, 'tcx: 'a> CPlace<'tcx> {
match self {
CPlace::Var(var, _) => {
let data = from.load_value(fx);
let data = from.load_scalar(fx);
fx.bcx.def_var(mir_var(var), data)
}
CPlace::Addr(addr, None, dst_layout) => {
@ -454,7 +448,7 @@ impl<'a, 'tcx: 'a> CPlace<'tcx> {
pub fn place_deref(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> CPlace<'tcx> {
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
if !inner_layout.is_unsized() {
CPlace::Addr(self.to_cvalue(fx).load_value(fx), None, inner_layout)
CPlace::Addr(self.to_cvalue(fx).load_scalar(fx), None, inner_layout)
} else {
match self.layout().abi {
Abi::ScalarPair(ref a, ref b) => {

View File

@ -14,7 +14,7 @@ macro_rules! intrinsic_arg {
$arg
};
(v $fx:expr, $arg:ident) => {
$arg.load_value($fx)
$arg.load_scalar($fx)
};
}
@ -358,7 +358,7 @@ pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
_ if intrinsic.starts_with("atomic_load"), (c ptr) {
let inner_layout =
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
let val = CValue::ByRef(ptr.load_value(fx), inner_layout);
let val = CValue::ByRef(ptr.load_scalar(fx), inner_layout);
ret.write_cvalue(fx, val);
};
_ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {

View File

@ -97,7 +97,7 @@ pub fn coerce_unsized_into<'a, 'tcx: 'a>(
// i.e., &'a fmt::Debug+Send => &'a fmt::Debug
src.load_value_pair(fx)
} else {
let base = src.load_value(fx);
let base = src.load_scalar(fx);
unsize_thin_ptr(fx, base, src_ty, dst_ty)
};
dst.write_cvalue(fx, CValue::ByValPair(base, info, dst.layout()));