FunctionCx: Rename codegen_cx -> cx

This commit is contained in:
CohenArthur 2020-08-22 16:17:58 +02:00
parent 4cb2a2b793
commit 7b534d653d
20 changed files with 255 additions and 255 deletions

View File

@ -226,9 +226,9 @@ pub(crate) fn import_function<'tcx>(
impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
/// Instance must be monomorphized
pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
let func_id = import_function(self.codegen_cx.tcx, &mut self.codegen_cx.module, inst);
let func_id = import_function(self.cx.tcx, &mut self.cx.module, inst);
let func_ref = self
.codegen_cx.module
.cx.module
.declare_func_in_func(func_id, &mut self.bcx.func);
#[cfg(debug_assertions)]
@ -250,11 +250,11 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
call_conv: CallConv::triple_default(self.triple()),
};
let func_id = self
.codegen_cx.module
.cx.module
.declare_function(&name, Linkage::Import, &sig)
.unwrap();
let func_ref = self
.codegen_cx.module
.cx.module
.declare_func_in_func(func_id, &mut self.bcx.func);
let call_inst = self.bcx.ins().call(func_ref, args);
#[cfg(debug_assertions)]
@ -374,9 +374,9 @@ pub(crate) fn codegen_fn_prelude<'tcx>(
.collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
assert!(fx.caller_location.is_none());
if fx.instance.def.requires_caller_location(fx.codegen_cx.tcx) {
if fx.instance.def.requires_caller_location(fx.cx.tcx) {
// Store caller location for `#[track_caller]`.
fx.caller_location = Some(cvalue_for_param(fx, start_block, None, None, fx.codegen_cx.tcx.caller_location_ty()).unwrap());
fx.caller_location = Some(cvalue_for_param(fx, start_block, None, None, fx.cx.tcx.caller_location_ty()).unwrap());
}
fx.bcx.switch_to_block(start_block);
@ -398,7 +398,7 @@ pub(crate) fn codegen_fn_prelude<'tcx>(
let local_decl = &fx.mir.local_decls[local];
// v this ! is important
let internally_mutable = !val.layout().ty.is_freeze(
fx.codegen_cx.tcx.at(local_decl.source_info.span),
fx.cx.tcx.at(local_decl.source_info.span),
ParamEnv::reveal_all(),
);
if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
@ -465,24 +465,24 @@ pub(crate) fn codegen_terminator_call<'tcx>(
args: &[Operand<'tcx>],
destination: Option<(Place<'tcx>, BasicBlock)>,
) {
let fn_ty = fx.monomorphize(&func.ty(fx.mir, fx.codegen_cx.tcx));
let fn_ty = fx.monomorphize(&func.ty(fx.mir, fx.cx.tcx));
let fn_sig = fx
.codegen_cx.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_ty.fn_sig(fx.codegen_cx.tcx));
.cx.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_ty.fn_sig(fx.cx.tcx));
let destination = destination.map(|(place, bb)| (trans_place(fx, place), bb));
// Handle special calls like instrinsics and empty drop glue.
let instance = if let ty::FnDef(def_id, substs) = fn_ty.kind {
let instance = ty::Instance::resolve(fx.codegen_cx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
let instance = ty::Instance::resolve(fx.cx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
.unwrap()
.unwrap()
.polymorphize(fx.codegen_cx.tcx);
.polymorphize(fx.cx.tcx);
if fx.codegen_cx.tcx.symbol_name(instance).name.starts_with("llvm.") {
if fx.cx.tcx.symbol_name(instance).name.starts_with("llvm.") {
crate::intrinsics::codegen_llvm_intrinsic_call(
fx,
&fx.codegen_cx.tcx.symbol_name(instance).name,
&fx.cx.tcx.symbol_name(instance).name,
substs,
args,
destination,
@ -510,7 +510,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let is_cold =
instance.map(|inst|
fx.codegen_cx.tcx.codegen_fn_attrs(inst.def_id())
fx.cx.tcx.codegen_fn_attrs(inst.def_id())
.flags.contains(CodegenFnAttrFlags::COLD))
.unwrap_or(false);
if is_cold {
@ -558,7 +558,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
nop_inst,
format!(
"virtual call; self arg pass mode: {:?}",
get_pass_mode(fx.codegen_cx.tcx, args[0].layout())
get_pass_mode(fx.cx.tcx, args[0].layout())
),
);
}
@ -608,7 +608,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
)
.collect::<Vec<_>>();
if instance.map(|inst| inst.def.requires_caller_location(fx.codegen_cx.tcx)).unwrap_or(false) {
if instance.map(|inst| inst.def.requires_caller_location(fx.cx.tcx)).unwrap_or(false) {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
@ -616,7 +616,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let call_inst = if let Some(func_ref) = func_ref {
let sig = clif_sig_from_fn_sig(
fx.codegen_cx.tcx,
fx.cx.tcx,
fx.triple(),
fn_sig,
span,
@ -637,7 +637,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
// FIXME find a cleaner way to support varargs
if fn_sig.c_variadic {
if fn_sig.abi != Abi::C {
fx.codegen_cx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
fx.cx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
}
let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
let abi_params = call_args
@ -646,7 +646,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let ty = fx.bcx.func.dfg.value_type(arg);
if !ty.is_int() {
// FIXME set %al to upperbound on float args once floats are supported
fx.codegen_cx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
fx.cx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
}
AbiParam::new(ty)
})
@ -668,17 +668,17 @@ pub(crate) fn codegen_drop<'tcx>(
drop_place: CPlace<'tcx>,
) {
let ty = drop_place.layout().ty;
let drop_fn = Instance::resolve_drop_in_place(fx.codegen_cx.tcx, ty).polymorphize(fx.codegen_cx.tcx);
let drop_fn = Instance::resolve_drop_in_place(fx.cx.tcx, ty).polymorphize(fx.cx.tcx);
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything
} else {
let drop_fn_ty = drop_fn.ty(fx.codegen_cx.tcx, ParamEnv::reveal_all());
let fn_sig = fx.codegen_cx.tcx.normalize_erasing_late_bound_regions(
let drop_fn_ty = drop_fn.ty(fx.cx.tcx, ParamEnv::reveal_all());
let fn_sig = fx.cx.tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
&drop_fn_ty.fn_sig(fx.codegen_cx.tcx),
&drop_fn_ty.fn_sig(fx.cx.tcx),
);
assert_eq!(fn_sig.output(), fx.codegen_cx.tcx.mk_unit());
assert_eq!(fn_sig.output(), fx.cx.tcx.mk_unit());
match ty.kind {
ty::Dynamic(..) => {
@ -687,7 +687,7 @@ pub(crate) fn codegen_drop<'tcx>(
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
let sig = clif_sig_from_fn_sig(
fx.codegen_cx.tcx,
fx.cx.tcx,
fx.triple(),
fn_sig,
span,
@ -702,7 +702,7 @@ pub(crate) fn codegen_drop<'tcx>(
let arg_place = CPlace::new_stack_slot(
fx,
fx.layout_of(fx.codegen_cx.tcx.mk_ref(
fx.layout_of(fx.cx.tcx.mk_ref(
&ty::RegionKind::ReErased,
TypeAndMut {
ty,
@ -716,7 +716,7 @@ pub(crate) fn codegen_drop<'tcx>(
let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
if drop_fn.def.requires_caller_location(fx.codegen_cx.tcx) {
if drop_fn.def.requires_caller_location(fx.cx.tcx) {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());

View File

@ -118,7 +118,7 @@ pub(super) fn adjust_arg_for_abi<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
arg: CValue<'tcx>,
) -> EmptySinglePair<Value> {
match get_pass_mode(fx.codegen_cx.tcx, arg.layout()) {
match get_pass_mode(fx.cx.tcx, arg.layout()) {
PassMode::NoPass => Empty,
PassMode::ByVal(_) => Single(arg.load_scalar(fx)),
PassMode::ByValPair(_, _) => {
@ -144,13 +144,13 @@ pub(super) fn cvalue_for_param<'tcx>(
arg_ty: Ty<'tcx>,
) -> Option<CValue<'tcx>> {
let layout = fx.layout_of(arg_ty);
let pass_mode = get_pass_mode(fx.codegen_cx.tcx, layout);
let pass_mode = get_pass_mode(fx.cx.tcx, layout);
if let PassMode::NoPass = pass_mode {
return None;
}
let clif_types = pass_mode.get_param_ty(fx.codegen_cx.tcx);
let clif_types = pass_mode.get_param_ty(fx.cx.tcx);
let block_params = clif_types.map(|t| fx.bcx.append_block_param(start_block, t));
#[cfg(debug_assertions)]

View File

@ -19,7 +19,7 @@ pub(super) fn codegen_return_param(
start_block: Block,
) {
let ret_layout = return_layout(fx);
let ret_pass_mode = get_pass_mode(fx.codegen_cx.tcx, ret_layout);
let ret_pass_mode = get_pass_mode(fx.cx.tcx, ret_layout);
let ret_param = match ret_pass_mode {
PassMode::NoPass => {
fx.local_map
@ -66,7 +66,7 @@ pub(super) fn codegen_with_call_return_arg<'tcx, B: Backend, T>(
) -> (Inst, T) {
let ret_layout = fx.layout_of(fn_sig.output());
let output_pass_mode = get_pass_mode(fx.codegen_cx.tcx, ret_layout);
let output_pass_mode = get_pass_mode(fx.cx.tcx, ret_layout);
let return_ptr = match output_pass_mode {
PassMode::NoPass => None,
PassMode::ByRef { size: Some(_)} => match ret_place {
@ -102,7 +102,7 @@ pub(super) fn codegen_with_call_return_arg<'tcx, B: Backend, T>(
}
pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, impl Backend>) {
match get_pass_mode(fx.codegen_cx.tcx, return_layout(fx)) {
match get_pass_mode(fx.cx.tcx, return_layout(fx)) {
PassMode::NoPass | PassMode::ByRef { size: Some(_) } => {
fx.bcx.ins().return_(&[]);
}

View File

@ -35,8 +35,8 @@ pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Backend>) -> IndexVec<Local,
match &bb.terminator().kind {
TerminatorKind::Call { destination, .. } => {
if let Some((dest_place, _dest_bb)) = destination {
let dest_layout = fx.layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.codegen_cx.tcx).ty));
if !crate::abi::can_return_to_ssa_var(fx.codegen_cx.tcx, dest_layout) {
let dest_layout = fx.layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.cx.tcx).ty));
if !crate::abi::can_return_to_ssa_var(fx.cx.tcx, dest_layout) {
not_ssa(&mut flag_map, dest_place.local)
}
}

View File

@ -81,7 +81,7 @@ pub(crate) fn init_global_lock_constructor(
}
pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
let atomic_mutex = fx.codegen_cx.module.declare_data(
let atomic_mutex = fx.cx.module.declare_data(
"__cg_clif_global_atomic_mutex",
Linkage::Import,
true,
@ -89,24 +89,24 @@ pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
None,
).unwrap();
let pthread_mutex_lock = fx.codegen_cx.module.declare_function("pthread_mutex_lock", Linkage::Import, &cranelift_codegen::ir::Signature {
call_conv: fx.codegen_cx.module.target_config().default_call_conv,
let pthread_mutex_lock = fx.cx.module.declare_function("pthread_mutex_lock", Linkage::Import, &cranelift_codegen::ir::Signature {
call_conv: fx.cx.module.target_config().default_call_conv,
params: vec![
AbiParam::new(fx.codegen_cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
AbiParam::new(fx.cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
}).unwrap();
let pthread_mutex_lock = fx.codegen_cx.module.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
let pthread_mutex_lock = fx.cx.module.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
let atomic_mutex = fx.codegen_cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx.bcx.ins().global_value(fx.codegen_cx.module.target_config().pointer_type(), atomic_mutex);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx.bcx.ins().global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
}
pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
let atomic_mutex = fx.codegen_cx.module.declare_data(
let atomic_mutex = fx.cx.module.declare_data(
"__cg_clif_global_atomic_mutex",
Linkage::Import,
true,
@ -114,18 +114,18 @@ pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
None,
).unwrap();
let pthread_mutex_unlock = fx.codegen_cx.module.declare_function("pthread_mutex_unlock", Linkage::Import, &cranelift_codegen::ir::Signature {
call_conv: fx.codegen_cx.module.target_config().default_call_conv,
let pthread_mutex_unlock = fx.cx.module.declare_function("pthread_mutex_unlock", Linkage::Import, &cranelift_codegen::ir::Signature {
call_conv: fx.cx.module.target_config().default_call_conv,
params: vec![
AbiParam::new(fx.codegen_cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
AbiParam::new(fx.cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
}).unwrap();
let pthread_mutex_unlock = fx.codegen_cx.module.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
let pthread_mutex_unlock = fx.cx.module.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
let atomic_mutex = fx.codegen_cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx.bcx.ins().global_value(fx.codegen_cx.module.target_config().pointer_type(), atomic_mutex);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx.bcx.ins().global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
}

View File

@ -34,7 +34,7 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
let mut fx = FunctionCx {
codegen_cx: cx,
cx,
pointer_type,
instance,
@ -220,7 +220,7 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
target,
cleanup: _,
} => {
if !fx.codegen_cx.tcx.sess.overflow_checks() {
if !fx.cx.tcx.sess.overflow_checks() {
if let mir::AssertKind::OverflowNeg(_) = *msg {
let target = fx.get_block(*target);
fx.bcx.ins().jump(target, &[]);
@ -261,12 +261,12 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
}
};
let def_id = fx.codegen_cx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| {
fx.codegen_cx.tcx.sess.span_fatal(bb_data.terminator().source_info.span, &s)
let def_id = fx.cx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| {
fx.cx.tcx.sess.span_fatal(bb_data.terminator().source_info.span, &s)
});
let instance = Instance::mono(fx.codegen_cx.tcx, def_id).polymorphize(fx.codegen_cx.tcx);
let symbol_name = fx.codegen_cx.tcx.symbol_name(instance).name;
let instance = Instance::mono(fx.cx.tcx, def_id).polymorphize(fx.cx.tcx);
let symbol_name = fx.cx.tcx.symbol_name(instance).name;
fx.lib_call(&*symbol_name, vec![fx.pointer_type, fx.pointer_type, fx.pointer_type], vec![], &args);
@ -296,7 +296,7 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
cleanup: _,
from_hir_call: _,
} => {
fx.codegen_cx.tcx.sess.time("codegen call", || crate::abi::codegen_terminator_call(
fx.cx.tcx.sess.time("codegen call", || crate::abi::codegen_terminator_call(
fx,
*fn_span,
block,
@ -415,7 +415,7 @@ fn trans_stmt<'tcx>(
let lhs = trans_operand(fx, lhs);
let rhs = trans_operand(fx, rhs);
let res = if !fx.codegen_cx.tcx.sess.overflow_checks() {
let res = if !fx.cx.tcx.sess.overflow_checks() {
let val =
crate::num::trans_int_binop(fx, *bin_op, lhs, rhs).load_scalar(fx);
let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
@ -461,14 +461,14 @@ fn trans_stmt<'tcx>(
lval.write_cvalue(fx, res);
}
Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), operand, to_ty) => {
let from_ty = fx.monomorphize(&operand.ty(&fx.mir.local_decls, fx.codegen_cx.tcx));
let from_ty = fx.monomorphize(&operand.ty(&fx.mir.local_decls, fx.cx.tcx));
let to_layout = fx.layout_of(fx.monomorphize(to_ty));
match from_ty.kind {
ty::FnDef(def_id, substs) => {
let func_ref = fx.get_function_ref(
Instance::resolve_for_fn_ptr(fx.codegen_cx.tcx, ParamEnv::reveal_all(), def_id, substs)
Instance::resolve_for_fn_ptr(fx.cx.tcx, ParamEnv::reveal_all(), def_id, substs)
.unwrap()
.polymorphize(fx.codegen_cx.tcx),
.polymorphize(fx.cx.tcx),
);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
@ -497,7 +497,7 @@ fn trans_stmt<'tcx>(
|ty::TypeAndMut {
ty: pointee_ty,
mutbl: _,
}| has_ptr_meta(fx.codegen_cx.tcx, pointee_ty),
}| has_ptr_meta(fx.cx.tcx, pointee_ty),
)
.unwrap_or(false)
}
@ -523,7 +523,7 @@ fn trans_stmt<'tcx>(
match &operand.layout().variants {
Variants::Single { index } => {
let discr = operand.layout().ty.discriminant_for_variant(fx.codegen_cx.tcx, *index).unwrap();
let discr = operand.layout().ty.discriminant_for_variant(fx.cx.tcx, *index).unwrap();
let discr = if discr.ty.is_signed() {
rustc_middle::mir::interpret::sign_extend(discr.val, fx.layout_of(discr.ty).size)
} else {
@ -575,11 +575,11 @@ fn trans_stmt<'tcx>(
match operand.layout().ty.kind {
ty::Closure(def_id, substs) => {
let instance = Instance::resolve_closure(
fx.codegen_cx.tcx,
fx.cx.tcx,
def_id,
substs,
ty::ClosureKind::FnOnce,
).polymorphize(fx.codegen_cx.tcx);
).polymorphize(fx.cx.tcx);
let func_ref = fx.get_function_ref(instance);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
@ -602,9 +602,9 @@ fn trans_stmt<'tcx>(
let operand = trans_operand(fx, operand);
let times = fx
.monomorphize(times)
.eval(fx.codegen_cx.tcx, ParamEnv::reveal_all())
.eval(fx.cx.tcx, ParamEnv::reveal_all())
.val
.try_to_bits(fx.codegen_cx.tcx.data_layout.pointer_size)
.try_to_bits(fx.cx.tcx.data_layout.pointer_size)
.unwrap();
for i in 0..times {
let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
@ -614,14 +614,14 @@ fn trans_stmt<'tcx>(
}
Rvalue::Len(place) => {
let place = trans_place(fx, *place);
let usize_layout = fx.layout_of(fx.codegen_cx.tcx.types.usize);
let usize_layout = fx.layout_of(fx.cx.tcx.types.usize);
let len = codegen_array_len(fx, place);
lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
}
Rvalue::NullaryOp(NullOp::Box, content_ty) => {
use rustc_hir::lang_items::ExchangeMallocFnLangItem;
let usize_type = fx.clif_type(fx.codegen_cx.tcx.types.usize).unwrap();
let usize_type = fx.clif_type(fx.cx.tcx.types.usize).unwrap();
let content_ty = fx.monomorphize(content_ty);
let layout = fx.layout_of(content_ty);
let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
@ -629,18 +629,18 @@ fn trans_stmt<'tcx>(
.bcx
.ins()
.iconst(usize_type, layout.align.abi.bytes() as i64);
let box_layout = fx.layout_of(fx.codegen_cx.tcx.mk_box(content_ty));
let box_layout = fx.layout_of(fx.cx.tcx.mk_box(content_ty));
// Allocate space:
let def_id = match fx.codegen_cx.tcx.lang_items().require(ExchangeMallocFnLangItem) {
let def_id = match fx.cx.tcx.lang_items().require(ExchangeMallocFnLangItem) {
Ok(id) => id,
Err(s) => {
fx.codegen_cx.tcx
fx.cx.tcx
.sess
.fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
}
};
let instance = ty::Instance::mono(fx.codegen_cx.tcx, def_id).polymorphize(fx.codegen_cx.tcx);
let instance = ty::Instance::mono(fx.cx.tcx, def_id).polymorphize(fx.cx.tcx);
let func_ref = fx.get_function_ref(instance);
let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
let ptr = fx.bcx.inst_results(call)[0];
@ -650,9 +650,9 @@ fn trans_stmt<'tcx>(
assert!(lval
.layout()
.ty
.is_sized(fx.codegen_cx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
.is_sized(fx.cx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
let val = CValue::const_val(fx, fx.layout_of(fx.codegen_cx.tcx.types.usize), ty_size.into());
let val = CValue::const_val(fx, fx.layout_of(fx.cx.tcx.types.usize), ty_size.into());
lval.write_cvalue(fx, val);
}
Rvalue::Aggregate(kind, operands) => match **kind {
@ -717,10 +717,10 @@ fn trans_stmt<'tcx>(
let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
assert_eq!(outputs.len(), 4);
trans_place(fx, outputs[0]).write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
trans_place(fx, outputs[1]).write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
trans_place(fx, outputs[2]).write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
trans_place(fx, outputs[3]).write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
trans_place(fx, outputs[0]).write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.cx.tcx.types.u32)));
trans_place(fx, outputs[1]).write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.cx.tcx.types.u32)));
trans_place(fx, outputs[2]).write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.cx.tcx.types.u32)));
trans_place(fx, outputs[3]).write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.cx.tcx.types.u32)));
}
"xgetbv" => {
assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
@ -740,17 +740,17 @@ fn trans_stmt<'tcx>(
crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
}
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
_ if fx.codegen_cx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => {
_ if fx.cx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => {
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
}
_ if fx.codegen_cx.tcx.symbol_name(fx.instance).name == "__alloca" => {
_ if fx.cx.tcx.symbol_name(fx.instance).name == "__alloca" => {
crate::trap::trap_unimplemented(fx, "Alloca is not supported");
}
// Used in sys::windows::abort_internal
"int $$0x29" => {
crate::trap::trap_unimplemented(fx, "Windows abort");
}
_ => fx.codegen_cx.tcx.sess.span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
_ => fx.cx.tcx.sess.span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
}
}
}
@ -763,8 +763,8 @@ fn codegen_array_len<'tcx>(
match place.layout().ty.kind {
ty::Array(_elem_ty, len) => {
let len = fx.monomorphize(&len)
.eval(fx.codegen_cx.tcx, ParamEnv::reveal_all())
.eval_usize(fx.codegen_cx.tcx, ParamEnv::reveal_all()) as i64;
.eval(fx.cx.tcx, ParamEnv::reveal_all())
.eval_usize(fx.cx.tcx, ParamEnv::reveal_all()) as i64;
fx.bcx.ins().iconst(fx.pointer_type, len)
}
ty::Slice(_elem_ty) => place
@ -817,7 +817,7 @@ pub(crate) fn trans_place<'tcx>(
let ptr = cplace.to_ptr();
cplace = CPlace::for_ptr(
ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * i64::from(from)),
fx.layout_of(fx.codegen_cx.tcx.mk_array(elem_ty, u64::from(to) - u64::from(from))),
fx.layout_of(fx.cx.tcx.mk_array(elem_ty, u64::from(to) - u64::from(from))),
);
}
ty::Slice(elem_ty) => {

View File

@ -86,14 +86,14 @@ pub(crate) fn clif_int_or_float_cast(
);
let from_rust_ty = if from_signed {
fx.codegen_cx.tcx.types.i128
fx.cx.tcx.types.i128
} else {
fx.codegen_cx.tcx.types.u128
fx.cx.tcx.types.u128
};
let to_rust_ty = match to_ty {
types::F32 => fx.codegen_cx.tcx.types.f32,
types::F64 => fx.codegen_cx.tcx.types.f64,
types::F32 => fx.cx.tcx.types.f32,
types::F64 => fx.cx.tcx.types.f64,
_ => unreachable!(),
};
@ -131,15 +131,15 @@ pub(crate) fn clif_int_or_float_cast(
);
let from_rust_ty = match from_ty {
types::F32 => fx.codegen_cx.tcx.types.f32,
types::F64 => fx.codegen_cx.tcx.types.f64,
types::F32 => fx.cx.tcx.types.f32,
types::F64 => fx.cx.tcx.types.f64,
_ => unreachable!(),
};
let to_rust_ty = if to_signed {
fx.codegen_cx.tcx.types.i128
fx.cx.tcx.types.i128
} else {
fx.codegen_cx.tcx.types.u128
fx.cx.tcx.types.u128
};
return fx

View File

@ -9,7 +9,7 @@ pub(crate) fn maybe_codegen<'tcx>(
lhs: CValue<'tcx>,
rhs: CValue<'tcx>,
) -> Option<CValue<'tcx>> {
if lhs.layout().ty != fx.codegen_cx.tcx.types.u128 && lhs.layout().ty != fx.codegen_cx.tcx.types.i128 {
if lhs.layout().ty != fx.cx.tcx.types.u128 && lhs.layout().ty != fx.cx.tcx.types.i128 {
return None;
}
@ -25,7 +25,7 @@ pub(crate) fn maybe_codegen<'tcx>(
}
BinOp::Add | BinOp::Sub if !checked => return None,
BinOp::Add => {
let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
let out_ty = fx.cx.tcx.mk_tup([lhs.layout().ty, fx.cx.tcx.types.bool].iter());
return Some(if is_signed {
fx.easy_call("__rust_i128_addo", &[lhs, rhs], out_ty)
} else {
@ -33,7 +33,7 @@ pub(crate) fn maybe_codegen<'tcx>(
});
}
BinOp::Sub => {
let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
let out_ty = fx.cx.tcx.mk_tup([lhs.layout().ty, fx.cx.tcx.types.bool].iter());
return Some(if is_signed {
fx.easy_call("__rust_i128_subo", &[lhs, rhs], out_ty)
} else {
@ -43,7 +43,7 @@ pub(crate) fn maybe_codegen<'tcx>(
BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
BinOp::Mul => {
let res = if checked {
let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
let out_ty = fx.cx.tcx.mk_tup([lhs.layout().ty, fx.cx.tcx.types.bool].iter());
if is_signed {
fx.easy_call("__rust_i128_mulo", &[lhs, rhs], out_ty)
} else {
@ -51,9 +51,9 @@ pub(crate) fn maybe_codegen<'tcx>(
}
} else {
let val_ty = if is_signed {
fx.codegen_cx.tcx.types.i128
fx.cx.tcx.types.i128
} else {
fx.codegen_cx.tcx.types.u128
fx.cx.tcx.types.u128
};
fx.easy_call("__multi3", &[lhs, rhs], val_ty)
};
@ -62,17 +62,17 @@ pub(crate) fn maybe_codegen<'tcx>(
BinOp::Div => {
assert!(!checked);
if is_signed {
Some(fx.easy_call("__divti3", &[lhs, rhs], fx.codegen_cx.tcx.types.i128))
Some(fx.easy_call("__divti3", &[lhs, rhs], fx.cx.tcx.types.i128))
} else {
Some(fx.easy_call("__udivti3", &[lhs, rhs], fx.codegen_cx.tcx.types.u128))
Some(fx.easy_call("__udivti3", &[lhs, rhs], fx.cx.tcx.types.u128))
}
}
BinOp::Rem => {
assert!(!checked);
if is_signed {
Some(fx.easy_call("__modti3", &[lhs, rhs], fx.codegen_cx.tcx.types.i128))
Some(fx.easy_call("__modti3", &[lhs, rhs], fx.cx.tcx.types.i128))
} else {
Some(fx.easy_call("__umodti3", &[lhs, rhs], fx.codegen_cx.tcx.types.u128))
Some(fx.easy_call("__umodti3", &[lhs, rhs], fx.cx.tcx.types.u128))
}
}
BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
@ -104,7 +104,7 @@ pub(crate) fn maybe_codegen<'tcx>(
let val = match (bin_op, is_signed) {
(BinOp::Shr, false) => {
let val = fx.bcx.ins().iconcat(lhs_msb, all_zeros);
Some(CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.u128)))
Some(CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.u128)))
}
(BinOp::Shr, true) => {
let sign = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, lhs_msb, 0);
@ -112,13 +112,13 @@ pub(crate) fn maybe_codegen<'tcx>(
let all_sign_bits = fx.bcx.ins().select(sign, all_zeros, all_ones);
let val = fx.bcx.ins().iconcat(lhs_msb, all_sign_bits);
Some(CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.i128)))
Some(CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.i128)))
}
(BinOp::Shl, _) => {
let val_ty = if is_signed {
fx.codegen_cx.tcx.types.i128
fx.cx.tcx.types.i128
} else {
fx.codegen_cx.tcx.types.u128
fx.cx.tcx.types.u128
};
let val = fx.bcx.ins().iconcat(all_zeros, lhs_lsb);
Some(CValue::by_val(val, fx.layout_of(val_ty)))
@ -127,7 +127,7 @@ pub(crate) fn maybe_codegen<'tcx>(
};
if let Some(val) = val {
if let Some(is_overflow) = is_overflow {
let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
let out_ty = fx.cx.tcx.mk_tup([lhs.layout().ty, fx.cx.tcx.types.bool].iter());
let val = val.load_scalar(fx);
return Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)));
} else {
@ -137,24 +137,24 @@ pub(crate) fn maybe_codegen<'tcx>(
}
let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fx.codegen_cx.tcx.types.u32));
let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fx.cx.tcx.types.u32));
let val = match (bin_op, is_signed) {
(BinOp::Shl, false) => {
fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.u128)
fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.cx.tcx.types.u128)
}
(BinOp::Shl, true) => {
fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.i128)
fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.cx.tcx.types.i128)
}
(BinOp::Shr, false) => {
fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.u128)
fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fx.cx.tcx.types.u128)
}
(BinOp::Shr, true) => {
fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.i128)
fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fx.cx.tcx.types.i128)
}
(_, _) => unreachable!(),
};
if let Some(is_overflow) = is_overflow {
let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
let out_ty = fx.cx.tcx.mk_tup([lhs.layout().ty, fx.cx.tcx.types.bool].iter());
let val = val.load_scalar(fx);
Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
} else {

View File

@ -265,7 +265,7 @@ pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
}
pub(crate) struct FunctionCx<'clif, 'tcx, B: Backend + 'static> {
pub(crate) codegen_cx: &'clif mut crate::CodegenCx<'tcx, B>,
pub(crate) cx: &'clif mut crate::CodegenCx<'tcx, B>,
pub(crate) pointer_type: Type, // Cached from module
pub(crate) instance: Instance<'tcx>,
@ -296,11 +296,11 @@ impl<'tcx, B: Backend> LayoutOf for FunctionCx<'_, 'tcx, B> {
fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
assert!(!ty.still_further_specializable());
self.codegen_cx.tcx
self.cx.tcx
.layout_of(ParamEnv::reveal_all().and(&ty))
.unwrap_or_else(|e| {
if let layout::LayoutError::SizeOverflow(_) = e {
self.codegen_cx.tcx.sess.fatal(&e.to_string())
self.cx.tcx.sess.fatal(&e.to_string())
} else {
bug!("failed to get layout for `{}`: {}", ty, e)
}
@ -310,13 +310,13 @@ impl<'tcx, B: Backend> LayoutOf for FunctionCx<'_, 'tcx, B> {
impl<'tcx, B: Backend + 'static> layout::HasTyCtxt<'tcx> for FunctionCx<'_, 'tcx, B> {
fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
self.codegen_cx.tcx
self.cx.tcx
}
}
impl<'tcx, B: Backend + 'static> rustc_target::abi::HasDataLayout for FunctionCx<'_, 'tcx, B> {
fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
&self.codegen_cx.tcx.data_layout
&self.cx.tcx.data_layout
}
}
@ -328,7 +328,7 @@ impl<'tcx, B: Backend + 'static> layout::HasParamEnv<'tcx> for FunctionCx<'_, 't
impl<'tcx, B: Backend + 'static> HasTargetSpec for FunctionCx<'_, 'tcx, B> {
fn target_spec(&self) -> &Target {
&self.codegen_cx.tcx.sess.target.target
&self.cx.tcx.sess.target.target
}
}
@ -338,22 +338,22 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
T: TypeFoldable<'tcx> + Copy,
{
if let Some(substs) = self.instance.substs_for_mir_body() {
self.codegen_cx.tcx.subst_and_normalize_erasing_regions(
self.cx.tcx.subst_and_normalize_erasing_regions(
substs,
ty::ParamEnv::reveal_all(),
value,
)
} else {
self.codegen_cx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
self.cx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
}
}
pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
clif_type_from_ty(self.codegen_cx.tcx, ty)
clif_type_from_ty(self.cx.tcx, ty)
}
pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
clif_pair_type_from_ty(self.codegen_cx.tcx, ty)
clif_pair_type_from_ty(self.cx.tcx, ty)
}
pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
@ -378,8 +378,8 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
}
let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
let caller = self.codegen_cx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
let const_loc = self.codegen_cx.tcx.const_caller_location((
let caller = self.cx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
let const_loc = self.cx.tcx.const_caller_location((
rustc_span::symbol::Symbol::intern(&caller.file.name.to_string()),
caller.line as u32,
caller.col_display as u32 + 1,
@ -387,12 +387,12 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
crate::constant::trans_const_value(
self,
const_loc,
self.codegen_cx.tcx.caller_location_ty(),
self.cx.tcx.caller_location_ty(),
)
}
pub(crate) fn triple(&self) -> &target_lexicon::Triple {
self.codegen_cx.module.isa().triple()
self.cx.module.isa().triple()
}
pub(crate) fn anonymous_str(&mut self, prefix: &str, msg: &str) -> Value {
@ -405,7 +405,7 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
let mut data_ctx = DataContext::new();
data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
let msg_id = self
.codegen_cx.module
.cx.module
.declare_data(
&format!("__{}_{:08x}", prefix, msg_hash),
Linkage::Local,
@ -416,9 +416,9 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
.unwrap();
// Ignore DuplicateDefinition error, as the data will be the same
let _ = self.codegen_cx.module.define_data(msg_id, &data_ctx);
let _ = self.cx.module.define_data(msg_id, &data_ctx);
let local_msg_id = self.codegen_cx.module.declare_data_in_func(msg_id, self.bcx.func);
let local_msg_id = self.cx.module.declare_data_in_func(msg_id, self.bcx.func);
#[cfg(debug_assertions)]
{
self.add_comment(local_msg_id, msg);

View File

@ -41,10 +41,10 @@ pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Backend>) {
match const_.val {
ConstKind::Value(_) => {}
ConstKind::Unevaluated(def, ref substs, promoted) => {
if let Err(err) = fx.codegen_cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
if let Err(err) = fx.cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
match err {
ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
fx.codegen_cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
fx.cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
}
ErrorHandled::TooGeneric => {
span_bug!(constant.span, "codgen encountered polymorphic constant: {:?}", err);
@ -67,8 +67,8 @@ pub(crate) fn codegen_tls_ref<'tcx>(
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
let data_id = data_id_for_static(fx.codegen_cx.tcx, &mut fx.codegen_cx.module, def_id, false);
let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
let data_id = data_id_for_static(fx.cx.tcx, &mut fx.cx.module, def_id, false);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("tls {:?}", def_id));
let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
@ -80,8 +80,8 @@ fn codegen_static_ref<'tcx>(
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
let data_id = data_id_for_static(fx.codegen_cx.tcx, &mut fx.codegen_cx.module, def_id, false);
let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
let data_id = data_id_for_static(fx.cx.tcx, &mut fx.cx.module, def_id, false);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", def_id));
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
@ -97,7 +97,7 @@ pub(crate) fn trans_constant<'tcx>(
let const_ = fx.monomorphize(&constant.literal);
let const_val = match const_.val {
ConstKind::Value(const_val) => const_val,
ConstKind::Unevaluated(def, ref substs, promoted) if fx.codegen_cx.tcx.is_static(def.did) => {
ConstKind::Unevaluated(def, ref substs, promoted) if fx.cx.tcx.is_static(def.did) => {
assert!(substs.is_empty());
assert!(promoted.is_none());
@ -108,11 +108,11 @@ pub(crate) fn trans_constant<'tcx>(
).to_cvalue(fx);
}
ConstKind::Unevaluated(def, ref substs, promoted) => {
match fx.codegen_cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
match fx.cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
Ok(const_val) => const_val,
Err(_) => {
if promoted.is_none() {
fx.codegen_cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
fx.cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
}
return crate::trap::trap_unreachable_ret_value(
fx,
@ -154,7 +154,7 @@ pub(crate) fn trans_const_value<'tcx>(
);
let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
alloc.write_scalar(fx, ptr, x.into(), size).unwrap();
let alloc = fx.codegen_cx.tcx.intern_const_alloc(alloc);
let alloc = fx.cx.tcx.intern_const_alloc(alloc);
return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
}
@ -164,25 +164,25 @@ pub(crate) fn trans_const_value<'tcx>(
return CValue::const_val(fx, layout, data);
}
Scalar::Ptr(ptr) => {
let alloc_kind = fx.codegen_cx.tcx.get_global_alloc(ptr.alloc_id);
let alloc_kind = fx.cx.tcx.get_global_alloc(ptr.alloc_id);
let base_addr = match alloc_kind {
Some(GlobalAlloc::Memory(alloc)) => {
fx.codegen_cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
let data_id = data_id_for_alloc_id(&mut fx.codegen_cx.module, ptr.alloc_id, alloc.align, alloc.mutability);
let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
let data_id = data_id_for_alloc_id(&mut fx.cx.module, ptr.alloc_id, alloc.align, alloc.mutability);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
Some(GlobalAlloc::Function(instance)) => {
let func_id = crate::abi::import_function(fx.codegen_cx.tcx, &mut fx.codegen_cx.module, instance);
let local_func_id = fx.codegen_cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
let func_id = crate::abi::import_function(fx.cx.tcx, &mut fx.cx.module, instance);
let local_func_id = fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
}
Some(GlobalAlloc::Static(def_id)) => {
assert!(fx.codegen_cx.tcx.is_static(def_id));
let data_id = data_id_for_static(fx.codegen_cx.tcx, &mut fx.codegen_cx.module, def_id, false);
let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
assert!(fx.cx.tcx.is_static(def_id));
let data_id = data_id_for_static(fx.cx.tcx, &mut fx.cx.module, def_id, false);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", def_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
@ -215,11 +215,11 @@ fn pointer_for_allocation<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
alloc: &'tcx Allocation,
) -> crate::pointer::Pointer {
let alloc_id = fx.codegen_cx.tcx.create_memory_alloc(alloc);
fx.codegen_cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
let data_id = data_id_for_alloc_id(&mut fx.codegen_cx.module, alloc_id, alloc.align, alloc.mutability);
let alloc_id = fx.cx.tcx.create_memory_alloc(alloc);
fx.cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
let data_id = data_id_for_alloc_id(&mut fx.cx.module, alloc_id, alloc.align, alloc.mutability);
let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
@ -419,7 +419,7 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
match operand {
Operand::Copy(_) | Operand::Move(_) => None,
Operand::Constant(const_) => {
Some(fx.monomorphize(&const_.literal).eval(fx.codegen_cx.tcx, ParamEnv::reveal_all()))
Some(fx.monomorphize(&const_.literal).eval(fx.cx.tcx, ParamEnv::reveal_all()))
}
}
}

View File

@ -26,7 +26,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
let ptr = place.place_field(fx, mir::Field::new(tag_field));
let to = layout
.ty
.discriminant_for_variant(fx.codegen_cx.tcx, variant_index)
.discriminant_for_variant(fx.cx.tcx, variant_index)
.unwrap()
.val;
let discr = CValue::const_val(fx, ptr.layout(), to);
@ -73,7 +73,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
Variants::Single { index } => {
let discr_val = layout
.ty
.discriminant_for_variant(fx.codegen_cx.tcx, *index)
.discriminant_for_variant(fx.cx.tcx, *index)
.map_or(u128::from(index.as_u32()), |discr| discr.val);
return CValue::const_val(fx, dest_layout, discr_val);
}

View File

@ -70,10 +70,10 @@ pub(crate) fn codegen_inline_asm<'tcx>(
let inline_asm_index = fx.inline_asm_index;
fx.inline_asm_index += 1;
let asm_name = format!("{}__inline_asm_{}", fx.codegen_cx.tcx.symbol_name(fx.instance).name, inline_asm_index);
let asm_name = format!("{}__inline_asm_{}", fx.cx.tcx.symbol_name(fx.instance).name, inline_asm_index);
let generated_asm = generate_asm_wrapper(&asm_name, InlineAsmArch::X86_64, options, template, clobbered_regs, &inputs, &outputs);
fx.codegen_cx.global_asm.push_str(&generated_asm);
fx.cx.global_asm.push_str(&generated_asm);
call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
}
@ -169,12 +169,12 @@ fn call_inline_asm<'tcx>(
#[cfg(debug_assertions)]
fx.add_comment(stack_slot, "inline asm scratch slot");
let inline_asm_func = fx.codegen_cx.module.declare_function(asm_name, Linkage::Import, &Signature {
let inline_asm_func = fx.cx.module.declare_function(asm_name, Linkage::Import, &Signature {
call_conv: CallConv::SystemV,
params: vec![AbiParam::new(fx.pointer_type)],
returns: vec![],
}).unwrap();
let inline_asm_func = fx.codegen_cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
let inline_asm_func = fx.cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(inline_asm_func, asm_name);

View File

@ -30,13 +30,13 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
fx.codegen_cx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
fx.cx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
crate::trap::trap_unimplemented(fx, intrinsic);
};
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
llvm.x86.sse2.pmovmskb.128 | llvm.x86.avx2.pmovmskb | llvm.x86.sse2.movmsk.pd, (c a) {
let (lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, a.layout());
let (lane_layout, lane_count) = lane_type_and_count(fx.cx.tcx, a.layout());
let lane_ty = fx.clif_type(lane_layout.ty).unwrap();
assert!(lane_count <= 32);
@ -61,7 +61,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
res = fx.bcx.ins().bor(res, a_lane_sign);
}
let res = CValue::by_val(res, fx.layout_of(fx.codegen_cx.tcx.types.i32));
let res = CValue::by_val(res, fx.layout_of(fx.cx.tcx.types.i32));
ret.write_cvalue(fx, res);
};
llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {

View File

@ -88,7 +88,7 @@ macro call_intrinsic_match {
let ($($arg,)*) = (
$(trans_operand($fx, $arg),)*
);
let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.codegen_cx.tcx.types.$ty);
let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.cx.tcx.types.$ty);
$ret.write_cvalue($fx, res);
if let Some((_, dest)) = $destination {
@ -144,7 +144,7 @@ macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
match $ty.kind {
ty::Uint(_) | ty::Int(_) => {}
_ => {
$fx.codegen_cx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty));
$fx.cx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty));
// Prevent verifier error
crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
return;
@ -154,7 +154,7 @@ macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
if !$ty.is_simd() {
$fx.codegen_cx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
$fx.cx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
// Prevent verifier error
crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
return;
@ -203,8 +203,8 @@ fn simd_for_each_lane<'tcx, B: Backend>(
) {
let layout = val.layout();
let (lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
let (lane_layout, lane_count) = lane_type_and_count(fx.cx.tcx, layout);
let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.cx.tcx, ret.layout());
assert_eq!(lane_count, ret_lane_count);
for lane_idx in 0..lane_count {
@ -233,8 +233,8 @@ fn simd_pair_for_each_lane<'tcx, B: Backend>(
assert_eq!(x.layout(), y.layout());
let layout = x.layout();
let (lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
let (lane_layout, lane_count) = lane_type_and_count(fx.cx.tcx, layout);
let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.cx.tcx, ret.layout());
assert_eq!(lane_count, ret_lane_count);
for lane in 0..lane_count {
@ -273,7 +273,7 @@ fn bool_to_zero_or_max_uint<'tcx>(
macro simd_cmp {
($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
let vector_ty = clif_vector_type($fx.codegen_cx.tcx, $x.layout());
let vector_ty = clif_vector_type($fx.cx.tcx, $x.layout());
if let Some(vector_ty) = vector_ty {
let x = $x.load_scalar($fx);
@ -390,7 +390,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
let def_id = instance.def_id();
let substs = instance.substs;
let intrinsic = fx.codegen_cx.tcx.item_name(def_id).as_str();
let intrinsic = fx.cx.tcx.item_name(def_id).as_str();
let intrinsic = &intrinsic[..];
let ret = match destination {
@ -423,7 +423,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
return;
}
let usize_layout = fx.layout_of(fx.codegen_cx.tcx.types.usize);
let usize_layout = fx.layout_of(fx.cx.tcx.types.usize);
call_intrinsic_match! {
fx, intrinsic, substs, ret, destination, args,
@ -473,7 +473,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
fx.codegen_cx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
fx.cx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
};
assume, (c _a) {};
@ -494,10 +494,10 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
if intrinsic.contains("nonoverlapping") {
// FIXME emit_small_memcpy
fx.bcx.call_memcpy(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
} else {
// FIXME emit_small_memmove
fx.bcx.call_memmove(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
}
};
// NOTE: the volatile variants have src and dst swapped
@ -513,10 +513,10 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
// FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
if intrinsic.contains("nonoverlapping") {
// FIXME emit_small_memcpy
fx.bcx.call_memcpy(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
} else {
// FIXME emit_small_memmove
fx.bcx.call_memmove(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
}
};
discriminant_value, (c ptr) {
@ -680,11 +680,11 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
let dst_ptr = dst.load_scalar(fx);
// FIXME make the memset actually volatile when switching to emit_small_memset
// FIXME use emit_small_memset
fx.bcx.call_memset(fx.codegen_cx.module.target_config(), dst_ptr, val, count);
fx.bcx.call_memset(fx.cx.module.target_config(), dst_ptr, val, count);
};
ctlz | ctlz_nonzero, <T> (v arg) {
// FIXME trap on `ctlz_nonzero` with zero arg.
let res = if T == fx.codegen_cx.tcx.types.u128 || T == fx.codegen_cx.tcx.types.i128 {
let res = if T == fx.cx.tcx.types.u128 || T == fx.cx.tcx.types.i128 {
// FIXME verify this algorithm is correct
let (lsb, msb) = fx.bcx.ins().isplit(arg);
let lsb_lz = fx.bcx.ins().clz(lsb);
@ -701,7 +701,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
};
cttz | cttz_nonzero, <T> (v arg) {
// FIXME trap on `cttz_nonzero` with zero arg.
let res = if T == fx.codegen_cx.tcx.types.u128 || T == fx.codegen_cx.tcx.types.i128 {
let res = if T == fx.cx.tcx.types.u128 || T == fx.cx.tcx.types.i128 {
// FIXME verify this algorithm is correct
let (lsb, msb) = fx.bcx.ins().isplit(arg);
let lsb_tz = fx.bcx.ins().ctz(lsb);
@ -842,7 +842,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
let const_val =
fx.codegen_cx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
fx.cx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
let val = crate::constant::trans_const_value(
fx,
const_val,
@ -852,7 +852,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
};
ptr_offset_from, <T> (v ptr, v base) {
let isize_layout = fx.layout_of(fx.codegen_cx.tcx.types.isize);
let isize_layout = fx.layout_of(fx.cx.tcx.types.isize);
let pointee_size: u64 = fx.layout_of(T).size.bytes();
let diff = fx.bcx.ins().isub(ptr, base);
@ -1011,22 +1011,22 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
minnumf32, (v a, v b) {
let val = fx.bcx.ins().fmin(a, b);
let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f32));
let val = CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.f32));
ret.write_cvalue(fx, val);
};
minnumf64, (v a, v b) {
let val = fx.bcx.ins().fmin(a, b);
let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f64));
let val = CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.f64));
ret.write_cvalue(fx, val);
};
maxnumf32, (v a, v b) {
let val = fx.bcx.ins().fmax(a, b);
let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f32));
let val = CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.f32));
ret.write_cvalue(fx, val);
};
maxnumf64, (v a, v b) {
let val = fx.bcx.ins().fmax(a, b);
let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f64));
let val = CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.f64));
ret.write_cvalue(fx, val);
};

View File

@ -11,13 +11,13 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let def_id = instance.def_id();
let substs = instance.substs;
let intrinsic = fx.codegen_cx.tcx.item_name(def_id).as_str();
let intrinsic = fx.cx.tcx.item_name(def_id).as_str();
let intrinsic = &intrinsic[..];
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
fx.codegen_cx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
fx.cx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
};
simd_cast, (c a) {
@ -68,8 +68,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
assert_eq!(x.layout(), y.layout());
let layout = x.layout();
let (lane_type, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
let (lane_type, lane_count) = lane_type_and_count(fx.cx.tcx, layout);
let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx.cx.tcx, ret.layout());
assert_eq!(lane_type, ret_lane_type);
assert_eq!(n, ret_lane_count);
@ -92,7 +92,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
(0..ret_lane_count).map(|i| {
let i = usize::try_from(i).unwrap();
let idx = rustc_middle::mir::interpret::read_target_uint(
fx.codegen_cx.tcx.data_layout.endian,
fx.cx.tcx.data_layout.endian,
&idx_bytes[4*i.. 4*i + 4],
).expect("read_target_uint");
u16::try_from(idx).expect("try_from u32")
@ -119,7 +119,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
idx_const
} else {
fx.codegen_cx.tcx.sess.span_warn(
fx.cx.tcx.sess.span_warn(
fx.mir.span,
"`#[rustc_arg_required_const(..)]` is not yet supported. Calling this function will panic.",
);
@ -128,9 +128,9 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
};
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
let (_lane_type, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, base.layout());
let (_lane_type, lane_count) = lane_type_and_count(fx.cx.tcx, base.layout());
if idx >= lane_count.into() {
fx.codegen_cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
fx.cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
}
ret.write_cvalue(fx, base);
@ -143,7 +143,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
idx_const
} else {
fx.codegen_cx.tcx.sess.span_warn(
fx.cx.tcx.sess.span_warn(
fx.mir.span,
"`#[rustc_arg_required_const(..)]` is not yet supported. Calling this function will panic.",
);
@ -153,9 +153,9 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
};
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
let (_lane_type, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, v.layout());
let (_lane_type, lane_count) = lane_type_and_count(fx.cx.tcx, v.layout());
if idx >= lane_count.into() {
fx.codegen_cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
fx.cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
}
let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
@ -205,8 +205,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
assert_eq!(a.layout(), c.layout());
let layout = a.layout();
let (_lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
let (_lane_layout, lane_count) = lane_type_and_count(fx.cx.tcx, layout);
let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.cx.tcx, ret.layout());
assert_eq!(lane_count, ret_lane_count);
for lane in 0..lane_count {

View File

@ -48,7 +48,7 @@ fn codegen_compare_bin_op<'tcx>(
let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
let val = fx.bcx.ins().bint(types::I8, val);
CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.bool))
CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.bool))
}
pub(crate) fn codegen_binop<'tcx>(
@ -66,8 +66,8 @@ pub(crate) fn codegen_binop<'tcx>(
let rhs = in_rhs.load_scalar(fx);
let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
&& (in_lhs.layout().ty.kind == fx.codegen_cx.tcx.types.i8.kind
|| in_lhs.layout().ty.kind == fx.codegen_cx.tcx.types.i16.kind)
&& (in_lhs.layout().ty.kind == fx.cx.tcx.types.i8.kind
|| in_lhs.layout().ty.kind == fx.cx.tcx.types.i16.kind)
{
// FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
(
@ -118,7 +118,7 @@ pub(crate) fn trans_bool_binop<'tcx>(
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
};
CValue::by_val(res, fx.layout_of(fx.codegen_cx.tcx.types.bool))
CValue::by_val(res, fx.layout_of(fx.cx.tcx.types.bool))
}
pub(crate) fn trans_int_binop<'tcx>(
@ -323,7 +323,7 @@ pub(crate) fn trans_checked_int_binop<'tcx>(
// FIXME directly write to result place instead
let out_place = CPlace::new_stack_slot(
fx,
fx.layout_of(fx.codegen_cx.tcx.mk_tup([in_lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter())),
fx.layout_of(fx.cx.tcx.mk_tup([in_lhs.layout().ty, fx.cx.tcx.types.bool].iter())),
);
let out_layout = out_place.layout();
out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
@ -368,7 +368,7 @@ pub(crate) fn trans_float_binop<'tcx>(
};
let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
let val = fx.bcx.ins().bint(types::I8, val);
return CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.bool));
return CValue::by_val(val, fx.layout_of(fx.cx.tcx.types.bool));
}
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
};
@ -383,7 +383,7 @@ pub(crate) fn trans_ptr_binop<'tcx>(
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
let is_thin_ptr = in_lhs.layout().ty.builtin_deref(true).map(|TypeAndMut { ty, mutbl: _}| {
!has_ptr_meta(fx.codegen_cx.tcx, ty)
!has_ptr_meta(fx.cx.tcx, ty)
}).unwrap_or(true);
if is_thin_ptr {
@ -440,7 +440,7 @@ pub(crate) fn trans_ptr_binop<'tcx>(
CValue::by_val(
fx.bcx.ins().bint(types::I8, res),
fx.layout_of(fx.codegen_cx.tcx.types.bool),
fx.layout_of(fx.cx.tcx.types.bool),
)
}
}

View File

@ -2,24 +2,24 @@ use crate::prelude::*;
fn codegen_print(fx: &mut FunctionCx<'_, '_, impl cranelift_module::Backend>, msg: &str) {
let puts = fx
.codegen_cx.module
.cx.module
.declare_function(
"puts",
Linkage::Import,
&Signature {
call_conv: CallConv::triple_default(fx.triple()),
params: vec![AbiParam::new(pointer_ty(fx.codegen_cx.tcx))],
params: vec![AbiParam::new(pointer_ty(fx.cx.tcx))],
returns: vec![AbiParam::new(types::I32)],
},
)
.unwrap();
let puts = fx.codegen_cx.module.declare_func_in_func(puts, &mut fx.bcx.func);
let puts = fx.cx.module.declare_func_in_func(puts, &mut fx.bcx.func);
#[cfg(debug_assertions)]
{
fx.add_comment(puts, "puts");
}
let symbol_name = fx.codegen_cx.tcx.symbol_name(fx.instance);
let symbol_name = fx.cx.tcx.symbol_name(fx.instance);
let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, symbol_name, msg);
let msg_ptr = fx.anonymous_str("trap", &real_msg);
fx.bcx.ins().call(puts, &[msg_ptr]);

View File

@ -15,12 +15,12 @@ pub(crate) fn unsized_info<'tcx>(
old_info: Option<Value>,
) -> Value {
let (source, target) =
fx.codegen_cx.tcx
fx.cx.tcx
.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
match (&source.kind, &target.kind) {
(&ty::Array(_, len), &ty::Slice(_)) => fx.bcx.ins().iconst(
fx.pointer_type,
len.eval_usize(fx.codegen_cx.tcx, ParamEnv::reveal_all()) as i64,
len.eval_usize(fx.cx.tcx, ParamEnv::reveal_all()) as i64,
),
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker

View File

@ -35,10 +35,10 @@ fn codegen_field<'tcx>(
let unaligned_offset = field_offset.bytes();
let (_, unsized_align) = crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
let one = fx.bcx.ins().iconst(pointer_ty(fx.codegen_cx.tcx), 1);
let one = fx.bcx.ins().iconst(pointer_ty(fx.cx.tcx), 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
let zero = fx.bcx.ins().iconst(pointer_ty(fx.codegen_cx.tcx), 0);
let zero = fx.bcx.ins().iconst(pointer_ty(fx.cx.tcx), 0);
let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
@ -119,9 +119,9 @@ impl<'tcx> CValue<'tcx> {
match self.0 {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi {
Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.codegen_cx.tcx, scalar.clone()),
Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.cx.tcx, scalar.clone()),
Abi::Vector { ref element, count } => {
scalar_to_clif_type(fx.codegen_cx.tcx, element.clone())
scalar_to_clif_type(fx.cx.tcx, element.clone())
.by(u16::try_from(count).unwrap()).unwrap()
}
_ => unreachable!("{:?}", layout.ty),
@ -146,9 +146,9 @@ impl<'tcx> CValue<'tcx> {
Abi::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.codegen_cx.tcx, a_scalar, b_scalar);
let clif_ty1 = scalar_to_clif_type(fx.codegen_cx.tcx, a_scalar.clone());
let clif_ty2 = scalar_to_clif_type(fx.codegen_cx.tcx, b_scalar.clone());
let b_offset = scalar_pair_calculate_b_offset(fx.cx.tcx, a_scalar, b_scalar);
let clif_ty1 = scalar_to_clif_type(fx.cx.tcx, a_scalar.clone());
let clif_ty2 = scalar_to_clif_type(fx.cx.tcx, b_scalar.clone());
let val1 = ptr.load(fx, clif_ty1, MemFlags::new());
let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new());
(val1, val2)
@ -419,13 +419,13 @@ impl<'tcx> CPlace<'tcx> {
assert_assignable(fx, a, b);
}
(ty::FnPtr(_), ty::FnPtr(_)) => {
let from_sig = fx.codegen_cx.tcx.normalize_erasing_late_bound_regions(
let from_sig = fx.cx.tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
&from_ty.fn_sig(fx.codegen_cx.tcx),
&from_ty.fn_sig(fx.cx.tcx),
);
let to_sig = fx.codegen_cx.tcx.normalize_erasing_late_bound_regions(
let to_sig = fx.cx.tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
&to_ty.fn_sig(fx.codegen_cx.tcx),
&to_ty.fn_sig(fx.cx.tcx),
);
assert_eq!(
from_sig, to_sig,
@ -436,10 +436,10 @@ impl<'tcx> CPlace<'tcx> {
}
(ty::Dynamic(from_traits, _), ty::Dynamic(to_traits, _)) => {
let from_traits = fx
.codegen_cx.tcx
.cx.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from_traits);
let to_traits = fx
.codegen_cx.tcx
.cx.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_traits);
assert_eq!(
from_traits, to_traits,
@ -569,7 +569,7 @@ impl<'tcx> CPlace<'tcx> {
}
Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.codegen_cx.tcx, a_scalar, b_scalar);
let b_offset = scalar_pair_calculate_b_offset(fx.cx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, MemFlags::new());
to_ptr.offset(fx, b_offset).store(fx, extra, MemFlags::new());
return;
@ -595,7 +595,7 @@ impl<'tcx> CPlace<'tcx> {
let src_align = src_layout.align.abi.bytes() as u8;
let dst_align = dst_layout.align.abi.bytes() as u8;
fx.bcx.emit_small_memory_copy(
fx.codegen_cx.module.target_config(),
fx.cx.module.target_config(),
to_addr,
from_addr,
size,
@ -673,7 +673,7 @@ impl<'tcx> CPlace<'tcx> {
pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> CPlace<'tcx> {
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
if has_ptr_meta(fx.codegen_cx.tcx, inner_layout.ty) {
if has_ptr_meta(fx.cx.tcx, inner_layout.ty) {
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
} else {
@ -682,7 +682,7 @@ impl<'tcx> CPlace<'tcx> {
}
pub(crate) fn write_place_ref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, dest: CPlace<'tcx>) {
if has_ptr_meta(fx.codegen_cx.tcx, self.layout().ty) {
if has_ptr_meta(fx.cx.tcx, self.layout().ty) {
let (ptr, extra) = self.to_ptr_maybe_unsized();
let ptr = CValue::by_val_pair(
ptr.get_addr(fx),

View File

@ -13,9 +13,9 @@ fn vtable_memflags() -> MemFlags {
}
pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
let usize_size = fx.layout_of(fx.cx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.codegen_cx.tcx),
pointer_ty(fx.cx.tcx),
vtable_memflags(),
vtable,
(DROP_FN_INDEX * usize_size) as i32,
@ -23,9 +23,9 @@ pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable:
}
pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
let usize_size = fx.layout_of(fx.cx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.codegen_cx.tcx),
pointer_ty(fx.cx.tcx),
vtable_memflags(),
vtable,
(SIZE_INDEX * usize_size) as i32,
@ -33,9 +33,9 @@ pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Val
}
pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
let usize_size = fx.layout_of(fx.cx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
pointer_ty(fx.codegen_cx.tcx),
pointer_ty(fx.cx.tcx),
vtable_memflags(),
vtable,
(ALIGN_INDEX * usize_size) as i32,
@ -57,9 +57,9 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
)
};
let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes();
let usize_size = fx.layout_of(fx.cx.tcx.types.usize).size.bytes();
let func_ref = fx.bcx.ins().load(
pointer_ty(fx.codegen_cx.tcx),
pointer_ty(fx.cx.tcx),
vtable_memflags(),
vtable,
((idx + 3) * usize_size as usize) as i32,
@ -72,15 +72,15 @@ pub(crate) fn get_vtable<'tcx>(
layout: TyAndLayout<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> Value {
let data_id = if let Some(data_id) = fx.codegen_cx.vtables.get(&(layout.ty, trait_ref)) {
let data_id = if let Some(data_id) = fx.cx.vtables.get(&(layout.ty, trait_ref)) {
*data_id
} else {
let data_id = build_vtable(fx, layout, trait_ref);
fx.codegen_cx.vtables.insert((layout.ty, trait_ref), data_id);
fx.cx.vtables.insert((layout.ty, trait_ref), data_id);
data_id
};
let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
@ -89,11 +89,11 @@ fn build_vtable<'tcx>(
layout: TyAndLayout<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> DataId {
let tcx = fx.codegen_cx.tcx;
let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
let tcx = fx.cx.tcx;
let usize_size = fx.layout_of(fx.cx.tcx.types.usize).size.bytes() as usize;
let drop_in_place_fn =
import_function(tcx, &mut fx.codegen_cx.module, Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.codegen_cx.tcx));
import_function(tcx, &mut fx.cx.module, Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.cx.tcx));
let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
@ -108,8 +108,8 @@ fn build_vtable<'tcx>(
opt_mth.map_or(None, |(def_id, substs)| {
Some(import_function(
tcx,
&mut fx.codegen_cx.module,
Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs).unwrap().polymorphize(fx.codegen_cx.tcx),
&mut fx.cx.module,
Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs).unwrap().polymorphize(fx.cx.tcx),
))
})
});
@ -121,19 +121,19 @@ fn build_vtable<'tcx>(
.collect::<Vec<u8>>()
.into_boxed_slice();
write_usize(fx.codegen_cx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
write_usize(fx.codegen_cx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
write_usize(fx.cx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
write_usize(fx.cx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
data_ctx.define(data);
for (i, component) in components.into_iter().enumerate() {
if let Some(func_id) = component {
let func_ref = fx.codegen_cx.module.declare_func_in_data(func_id, &mut data_ctx);
let func_ref = fx.cx.module.declare_func_in_data(func_id, &mut data_ctx);
data_ctx.write_function_addr((i * usize_size) as u32, func_ref);
}
}
let data_id = fx
.codegen_cx.module
.cx.module
.declare_data(
&format!(
"__vtable.{}.for.{:?}.{}",
@ -142,13 +142,13 @@ fn build_vtable<'tcx>(
.map(|trait_ref| format!("{:?}", trait_ref.skip_binder()).into())
.unwrap_or(std::borrow::Cow::Borrowed("???")),
layout.ty,
fx.codegen_cx.vtables.len(),
fx.cx.vtables.len(),
),
Linkage::Local,
false,
false,
Some(
fx.codegen_cx.tcx
fx.cx.tcx
.data_layout
.pointer_align
.pref
@ -159,7 +159,7 @@ fn build_vtable<'tcx>(
)
.unwrap();
fx.codegen_cx.module.define_data(data_id, &data_ctx).unwrap();
fx.cx.module.define_data(data_id, &data_ctx).unwrap();
data_id
}