rust/src/librustc_codegen_llvm/intrinsic.rs

1466 lines
58 KiB
Rust
Raw Normal View History

// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
2014-10-27 23:37:07 +01:00
#![allow(non_upper_case_globals)]
use intrinsics::{self, Intrinsic};
use llvm;
use llvm::{ValueRef};
use abi::{Abi, FnType, LlvmType, PassMode};
use mir::place::PlaceRef;
use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;
use declare;
use glue;
use type_::Type;
use type_of::LayoutLlvmExt;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{HasDataLayout, LayoutOf};
2016-03-29 07:50:44 +02:00
use rustc::hir;
use syntax::ast;
use syntax::symbol::Symbol;
2017-01-01 00:00:24 +01:00
use builder::Builder;
2015-09-19 00:42:57 +02:00
use rustc::session::Session;
2016-12-16 21:25:18 +01:00
use syntax_pos::Span;
2015-09-19 00:42:57 +02:00
2015-08-15 00:46:51 +02:00
use std::cmp::Ordering;
use std::iter;
2015-08-15 00:46:51 +02:00
2018-01-05 06:04:08 +01:00
fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32",
"sqrtf64" => "llvm.sqrt.f64",
"powif32" => "llvm.powi.f32",
"powif64" => "llvm.powi.f64",
"sinf32" => "llvm.sin.f32",
"sinf64" => "llvm.sin.f64",
"cosf32" => "llvm.cos.f32",
"cosf64" => "llvm.cos.f64",
"powf32" => "llvm.pow.f32",
"powf64" => "llvm.pow.f64",
"expf32" => "llvm.exp.f32",
"expf64" => "llvm.exp.f64",
"exp2f32" => "llvm.exp2.f32",
"exp2f64" => "llvm.exp2.f64",
"logf32" => "llvm.log.f32",
"logf64" => "llvm.log.f64",
"log10f32" => "llvm.log10.f32",
"log10f64" => "llvm.log10.f64",
"log2f32" => "llvm.log2.f32",
"log2f64" => "llvm.log2.f64",
"fmaf32" => "llvm.fma.f32",
"fmaf64" => "llvm.fma.f64",
"fabsf32" => "llvm.fabs.f32",
"fabsf64" => "llvm.fabs.f64",
"copysignf32" => "llvm.copysign.f32",
"copysignf64" => "llvm.copysign.f64",
"floorf32" => "llvm.floor.f32",
"floorf64" => "llvm.floor.f64",
"ceilf32" => "llvm.ceil.f32",
"ceilf64" => "llvm.ceil.f64",
"truncf32" => "llvm.trunc.f32",
"truncf64" => "llvm.trunc.f64",
"rintf32" => "llvm.rint.f32",
"rintf64" => "llvm.rint.f64",
"nearbyintf32" => "llvm.nearbyint.f32",
"nearbyintf64" => "llvm.nearbyint.f64",
"roundf32" => "llvm.round.f32",
"roundf64" => "llvm.round.f64",
"assume" => "llvm.assume",
"abort" => "llvm.trap",
_ => return None
};
2018-01-05 06:04:08 +01:00
Some(cx.get_intrinsic(&llvm_name))
}
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
2018-05-08 15:10:16 +02:00
/// add them to librustc_codegen_llvm/context.rs
pub fn codegen_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
2016-12-18 03:54:32 +01:00
callee_ty: Ty<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx>],
2016-12-18 03:54:32 +01:00
llresult: ValueRef,
span: Span) {
2018-01-05 06:12:32 +01:00
let cx = bx.cx;
2018-01-05 06:04:08 +01:00
let tcx = cx.tcx;
let (def_id, substs) = match callee_ty.sty {
ty::TyFnDef(def_id, substs) => (def_id, substs),
_ => bug!("expected fn item type, found {}", callee_ty)
};
let sig = callee_ty.fn_sig(tcx);
let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
let arg_tys = sig.inputs();
let ret_ty = sig.output();
let name = &*tcx.item_name(def_id).as_str();
2018-01-05 06:04:08 +01:00
let llret_ty = cx.layout_of(ret_ty).llvm_type(cx);
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
2018-01-05 06:04:08 +01:00
let simple = get_simple_intrinsic(cx, name);
2016-12-18 00:42:16 +01:00
let llval = match name {
_ if simple.is_some() => {
2018-01-05 06:12:32 +01:00
bx.call(simple.unwrap(),
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None)
}
"unreachable" => {
return;
},
2016-12-18 00:42:16 +01:00
"likely" => {
2018-01-05 06:04:08 +01:00
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
2018-01-05 06:12:32 +01:00
bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
}
2016-12-18 00:42:16 +01:00
"unlikely" => {
2018-01-05 06:04:08 +01:00
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
2018-01-05 06:12:32 +01:00
bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
}
2016-12-18 00:42:16 +01:00
"try" => {
2018-01-05 06:12:32 +01:00
try_intrinsic(bx, cx,
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
llresult);
return;
2016-03-06 13:29:31 +01:00
}
2016-12-18 00:42:16 +01:00
"breakpoint" => {
2018-01-05 06:04:08 +01:00
let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
2018-01-05 06:12:32 +01:00
bx.call(llfn, &[], None)
}
2016-12-18 00:42:16 +01:00
"size_of" => {
let tp_ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
C_usize(cx, cx.size_of(tp_ty).bytes())
}
2016-12-18 00:42:16 +01:00
"size_of_val" => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) =
2018-01-05 06:12:32 +01:00
glue::size_and_align_of_dst(bx, tp_ty, meta);
llsize
} else {
2018-01-05 06:04:08 +01:00
C_usize(cx, cx.size_of(tp_ty).bytes())
}
}
2016-12-18 00:42:16 +01:00
"min_align_of" => {
let tp_ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
C_usize(cx, cx.align_of(tp_ty).abi())
}
2016-12-18 00:42:16 +01:00
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) =
2018-01-05 06:12:32 +01:00
glue::size_and_align_of_dst(bx, tp_ty, meta);
llalign
} else {
2018-01-05 06:04:08 +01:00
C_usize(cx, cx.align_of(tp_ty).abi())
}
}
2016-12-18 00:42:16 +01:00
"pref_align_of" => {
let tp_ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
C_usize(cx, cx.align_of(tp_ty).pref())
}
2016-12-18 00:42:16 +01:00
"type_name" => {
let tp_ty = substs.type_at(0);
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
2018-01-05 06:04:08 +01:00
C_str_slice(cx, ty_name)
}
2016-12-18 00:42:16 +01:00
"type_id" => {
2018-01-05 06:04:08 +01:00
C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
}
2016-12-18 00:42:16 +01:00
"init" => {
let ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
if !cx.layout_of(ty).is_zst() {
// Just zero out the stack slot.
// If we store a zero constant, LLVM will drown in vreg allocation for large data
// structures, and the generated code will be awful. (A telltale sign of this is
// large quantities of `mov [byte ptr foo],0` in the generated code.)
2018-01-05 06:12:32 +01:00
memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
}
return;
}
// Effectively no-ops
"uninit" => {
return;
}
2016-12-18 00:42:16 +01:00
"needs_drop" => {
let tp_ty = substs.type_at(0);
2018-01-05 06:12:32 +01:00
C_bool(cx, bx.cx.type_needs_drop(tp_ty))
}
2016-12-18 00:42:16 +01:00
"offset" => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
2018-01-05 06:12:32 +01:00
bx.inbounds_gep(ptr, &[offset])
}
2016-12-18 00:42:16 +01:00
"arith_offset" => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
2018-01-05 06:12:32 +01:00
bx.gep(ptr, &[offset])
}
2016-12-18 00:42:16 +01:00
"copy_nonoverlapping" => {
2018-01-05 06:12:32 +01:00
copy_intrinsic(bx, false, false, substs.type_at(0),
args[1].immediate(), args[0].immediate(), args[2].immediate())
}
2016-12-18 00:42:16 +01:00
"copy" => {
2018-01-05 06:12:32 +01:00
copy_intrinsic(bx, true, false, substs.type_at(0),
args[1].immediate(), args[0].immediate(), args[2].immediate())
}
2016-12-18 00:42:16 +01:00
"write_bytes" => {
2018-01-05 06:12:32 +01:00
memset_intrinsic(bx, false, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
}
2016-12-18 00:42:16 +01:00
"volatile_copy_nonoverlapping_memory" => {
2018-01-05 06:12:32 +01:00
copy_intrinsic(bx, false, true, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
}
2016-12-18 00:42:16 +01:00
"volatile_copy_memory" => {
2018-01-05 06:12:32 +01:00
copy_intrinsic(bx, true, true, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
}
2016-12-18 00:42:16 +01:00
"volatile_set_memory" => {
2018-01-05 06:12:32 +01:00
memset_intrinsic(bx, true, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
}
2016-12-18 00:42:16 +01:00
"volatile_load" => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_ty.ret.mode {
2018-01-05 06:12:32 +01:00
ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
}
2018-01-05 06:12:32 +01:00
let load = bx.volatile_load(ptr);
unsafe {
2018-01-05 06:04:08 +01:00
llvm::LLVMSetAlignment(load, cx.align_of(tp_ty).abi() as u32);
}
2018-01-05 06:12:32 +01:00
to_immediate(bx, load, cx.layout_of(tp_ty))
},
2016-12-18 00:42:16 +01:00
"volatile_store" => {
2018-01-05 06:12:32 +01:00
let dst = args[0].deref(bx.cx);
args[1].val.volatile_store(bx, dst);
return;
},
"prefetch_read_data" | "prefetch_write_data" |
"prefetch_read_instruction" | "prefetch_write_instruction" => {
2018-01-05 06:04:08 +01:00
let expect = cx.get_intrinsic(&("llvm.prefetch"));
let (rw, cache_type) = match name {
"prefetch_read_data" => (0, 1),
"prefetch_write_data" => (1, 1),
"prefetch_read_instruction" => (0, 0),
"prefetch_write_instruction" => (1, 0),
_ => bug!()
};
2018-01-05 06:12:32 +01:00
bx.call(expect, &[
args[0].immediate(),
2018-01-05 06:04:08 +01:00
C_i32(cx, rw),
args[1].immediate(),
2018-01-05 06:04:08 +01:00
C_i32(cx, cache_type)
], None)
},
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
2016-04-07 19:16:40 +02:00
"bitreverse" | "add_with_overflow" | "sub_with_overflow" |
"mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
"unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => {
let ty = arg_tys[0];
2018-01-05 06:04:08 +01:00
match int_type_width_signed(ty, cx) {
Some((width, signed)) =>
2016-11-17 15:04:20 +01:00
match name {
2016-12-18 02:25:32 +01:00
"ctlz" | "cttz" => {
2018-01-05 06:12:32 +01:00
let y = C_bool(bx.cx, false);
2018-01-05 06:04:08 +01:00
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
2018-01-05 06:12:32 +01:00
bx.call(llfn, &[args[0].immediate(), y], None)
2016-12-18 02:25:32 +01:00
}
"ctlz_nonzero" | "cttz_nonzero" => {
2018-01-05 06:12:32 +01:00
let y = C_bool(bx.cx, true);
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
2018-01-05 06:04:08 +01:00
let llfn = cx.get_intrinsic(llvm_name);
2018-01-05 06:12:32 +01:00
bx.call(llfn, &[args[0].immediate(), y], None)
}
2018-01-05 06:12:32 +01:00
"ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
&[args[0].immediate()], None),
"bswap" => {
if width == 8 {
args[0].immediate() // byte swap a u8/i8 is just a no-op
} else {
2018-01-05 06:12:32 +01:00
bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
&[args[0].immediate()], None)
}
}
2016-04-07 19:16:40 +02:00
"bitreverse" => {
bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
&[args[0].immediate()], None)
}
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
&name[..3], width);
2018-01-05 06:12:32 +01:00
let llfn = bx.cx.get_intrinsic(&intrinsic);
2016-12-18 02:25:32 +01:00
// Convert `i1` to a `bool`, and write it to the out parameter
2018-01-05 06:12:32 +01:00
let pair = bx.call(llfn, &[
args[0].immediate(),
args[1].immediate()
], None);
2018-01-05 06:12:32 +01:00
let val = bx.extract_value(pair, 0);
let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx));
2018-01-05 06:12:32 +01:00
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1);
bx.store(overflow, dest.llval, dest.align);
2016-12-18 02:25:32 +01:00
return;
},
2018-01-05 06:12:32 +01:00
"overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()),
"overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()),
"overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()),
"exact_div" =>
if signed {
bx.exactsdiv(args[0].immediate(), args[1].immediate())
} else {
bx.exactudiv(args[0].immediate(), args[1].immediate())
},
"unchecked_div" =>
if signed {
2018-01-05 06:12:32 +01:00
bx.sdiv(args[0].immediate(), args[1].immediate())
} else {
2018-01-05 06:12:32 +01:00
bx.udiv(args[0].immediate(), args[1].immediate())
},
"unchecked_rem" =>
if signed {
2018-01-05 06:12:32 +01:00
bx.srem(args[0].immediate(), args[1].immediate())
} else {
2018-01-05 06:12:32 +01:00
bx.urem(args[0].immediate(), args[1].immediate())
},
2018-01-05 06:12:32 +01:00
"unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()),
"unchecked_shr" =>
if signed {
2018-01-05 06:12:32 +01:00
bx.ashr(args[0].immediate(), args[1].immediate())
} else {
2018-01-05 06:12:32 +01:00
bx.lshr(args[0].immediate(), args[1].immediate())
},
_ => bug!(),
},
None => {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, ty));
return;
}
}
},
2016-12-18 00:42:16 +01:00
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
let sty = &arg_tys[0].sty;
match float_type_width(sty) {
Some(_width) =>
2016-11-17 15:04:20 +01:00
match name {
2018-01-05 06:12:32 +01:00
"fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
"fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
"fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
"fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
"frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()),
_ => bug!(),
},
None => {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic float type, found `{}`", name, sty));
return;
}
}
},
2016-12-18 00:42:16 +01:00
"discriminant_value" => {
2018-05-08 15:10:16 +02:00
args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
}
"align_offset" => {
// `ptr as usize`
2018-01-05 06:12:32 +01:00
let ptr_val = bx.ptrtoint(args[0].immediate(), bx.cx.isize_ty);
// `ptr_val % align`
let align = args[1].immediate();
2018-01-05 06:12:32 +01:00
let offset = bx.urem(ptr_val, align);
let zero = C_null(bx.cx.isize_ty);
// `offset == 0`
2018-01-05 06:12:32 +01:00
let is_zero = bx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
// `if offset == 0 { 0 } else { align - offset }`
2018-01-05 06:12:32 +01:00
bx.select(is_zero, zero, bx.sub(align, offset))
}
2016-12-18 00:42:16 +01:00
name if name.starts_with("simd_") => {
2018-01-05 06:12:32 +01:00
match generic_simd_intrinsic(bx, name,
callee_ty,
args,
ret_ty, llret_ty,
span) {
Ok(llval) => llval,
Err(()) => return
}
}
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
2016-12-18 00:42:16 +01:00
name if name.starts_with("atomic_") => {
use llvm::AtomicOrdering::*;
let split: Vec<&str> = name.split('_').collect();
let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
let (order, failorder) = match split.len() {
2 => (SequentiallyConsistent, SequentiallyConsistent),
3 => match split[2] {
"unordered" => (Unordered, Unordered),
"relaxed" => (Monotonic, Monotonic),
"acq" => (Acquire, Acquire),
"rel" => (Release, Monotonic),
"acqrel" => (AcquireRelease, Acquire),
"failrelaxed" if is_cxchg =>
(SequentiallyConsistent, Monotonic),
"failacq" if is_cxchg =>
(SequentiallyConsistent, Acquire),
2018-01-05 06:04:08 +01:00
_ => cx.sess().fatal("unknown ordering in atomic intrinsic")
},
4 => match (split[2], split[3]) {
("acq", "failrelaxed") if is_cxchg =>
(Acquire, Monotonic),
("acqrel", "failrelaxed") if is_cxchg =>
(AcquireRelease, Monotonic),
2018-01-05 06:04:08 +01:00
_ => cx.sess().fatal("unknown ordering in atomic intrinsic")
},
2018-01-05 06:04:08 +01:00
_ => cx.sess().fatal("Atomic intrinsic not in correct format"),
};
let invalid_monomorphization = |ty| {
2016-12-18 02:25:32 +01:00
span_invalid_monomorphization_error(tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, ty));
2016-12-18 02:25:32 +01:00
};
match split[1] {
"cxchg" | "cxchgweak" => {
let ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
if int_type_width_signed(ty, cx).is_some() {
let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
2018-01-05 06:12:32 +01:00
let pair = bx.atomic_cmpxchg(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
order,
failorder,
weak);
2018-01-05 06:12:32 +01:00
let val = bx.extract_value(pair, 0);
let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
2018-01-05 06:12:32 +01:00
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1);
bx.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
}
}
"load" => {
let ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
if int_type_width_signed(ty, cx).is_some() {
let align = cx.align_of(ty);
2018-01-05 06:12:32 +01:00
bx.atomic_load(args[0].immediate(), order, align)
} else {
return invalid_monomorphization(ty);
}
}
"store" => {
let ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
if int_type_width_signed(ty, cx).is_some() {
let align = cx.align_of(ty);
2018-01-05 06:12:32 +01:00
bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
return;
} else {
return invalid_monomorphization(ty);
}
}
"fence" => {
2018-01-05 06:12:32 +01:00
bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
return;
}
"singlethreadfence" => {
2018-01-05 06:12:32 +01:00
bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
return;
}
// These are all AtomicRMW ops
op => {
let atom_op = match op {
2014-09-19 21:15:39 +02:00
"xchg" => llvm::AtomicXchg,
"xadd" => llvm::AtomicAdd,
"xsub" => llvm::AtomicSub,
"and" => llvm::AtomicAnd,
"nand" => llvm::AtomicNand,
"or" => llvm::AtomicOr,
"xor" => llvm::AtomicXor,
"max" => llvm::AtomicMax,
"min" => llvm::AtomicMin,
"umax" => llvm::AtomicUMax,
"umin" => llvm::AtomicUMin,
2018-01-05 06:04:08 +01:00
_ => cx.sess().fatal("unknown atomic operation")
};
let ty = substs.type_at(0);
2018-01-05 06:04:08 +01:00
if int_type_width_signed(ty, cx).is_some() {
2018-01-05 06:12:32 +01:00
bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
} else {
return invalid_monomorphization(ty);
}
}
}
}
"nontemporal_store" => {
2018-01-05 06:12:32 +01:00
let dst = args[0].deref(bx.cx);
args[1].val.nontemporal_store(bx, dst);
return;
}
2016-12-18 00:42:16 +01:00
_ => {
let intr = match Intrinsic::find(&name) {
2015-08-14 01:00:44 +02:00
Some(intr) => intr,
None => bug!("unknown intrinsic '{}'", name),
2015-08-14 01:00:44 +02:00
};
fn one<T>(x: Vec<T>) -> T {
assert_eq!(x.len(), 1);
x.into_iter().next().unwrap()
}
2018-01-05 06:04:08 +01:00
fn ty_to_type(cx: &CodegenCx, t: &intrinsics::Type) -> Vec<Type> {
2015-08-14 01:00:44 +02:00
use intrinsics::Type::*;
match *t {
2018-01-05 06:04:08 +01:00
Void => vec![Type::void(cx)],
Integer(_signed, _width, llvm_width) => {
2018-01-05 06:04:08 +01:00
vec![Type::ix(cx, llvm_width as u64)]
}
2015-08-14 01:00:44 +02:00
Float(x) => {
match x {
2018-01-05 06:04:08 +01:00
32 => vec![Type::f32(cx)],
64 => vec![Type::f64(cx)],
_ => bug!()
}
}
Pointer(ref t, ref llvm_elem, _const) => {
let t = llvm_elem.as_ref().unwrap_or(t);
2018-01-05 06:04:08 +01:00
let elem = one(ty_to_type(cx, t));
vec![elem.ptr_to()]
}
Vector(ref t, ref llvm_elem, length) => {
let t = llvm_elem.as_ref().unwrap_or(t);
2018-01-05 06:04:08 +01:00
let elem = one(ty_to_type(cx, t));
2016-12-18 02:25:32 +01:00
vec![Type::vector(&elem, length as u64)]
}
Aggregate(false, ref contents) => {
let elems = contents.iter()
2018-01-05 06:04:08 +01:00
.map(|t| one(ty_to_type(cx, t)))
.collect::<Vec<_>>();
2018-01-05 06:04:08 +01:00
vec![Type::struct_(cx, &elems, false)]
}
Aggregate(true, ref contents) => {
contents.iter()
2018-01-05 06:04:08 +01:00
.flat_map(|t| ty_to_type(cx, t))
.collect()
}
}
}
// This allows an argument list like `foo, (bar, baz),
// qux` to be converted into `foo, bar, baz, qux`, integer
// arguments to be truncated as needed and pointers to be
// cast.
2018-01-05 06:12:32 +01:00
fn modify_as_needed<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
2016-12-18 03:54:32 +01:00
t: &intrinsics::Type,
arg: &OperandRef<'tcx>)
2016-12-18 03:54:32 +01:00
-> Vec<ValueRef>
{
match *t {
intrinsics::Type::Aggregate(true, ref contents) => {
// We found a tuple that needs squishing! So
// run over the tuple and load each field.
//
// This assumes the type is "simple", i.e. no
// destructors, and the contents are SIMD
// etc.
2018-01-05 06:12:32 +01:00
assert!(!bx.cx.type_needs_drop(arg.layout.ty));
let (ptr, align) = match arg.val {
OperandValue::Ref(ptr, align) => (ptr, align),
_ => bug!()
};
2017-12-01 13:31:47 +01:00
let arg = PlaceRef::new_sized(ptr, arg.layout, align);
(0..contents.len()).map(|i| {
2018-01-05 06:12:32 +01:00
arg.project_field(bx, i).load(bx).immediate()
}).collect()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
2018-01-05 06:12:32 +01:00
let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
}
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
2018-01-05 06:12:32 +01:00
let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
vec![bx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))]
}
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
// the LLVM intrinsic uses a smaller integer
// size than the C intrinsic's signature, so
// we have to trim it down here.
2018-01-05 06:12:32 +01:00
vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
}
_ => vec![arg.immediate()],
2015-08-14 01:00:44 +02:00
}
}
let inputs = intr.inputs.iter()
2018-01-05 06:04:08 +01:00
.flat_map(|t| ty_to_type(cx, t))
.collect::<Vec<_>>();
2018-01-05 06:04:08 +01:00
let outputs = one(ty_to_type(cx, &intr.output));
let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
2018-01-05 06:12:32 +01:00
modify_as_needed(bx, t, arg)
}).collect();
assert_eq!(inputs.len(), llargs.len());
let val = match intr.definition {
2015-08-14 01:00:44 +02:00
intrinsics::IntrinsicDef::Named(name) => {
2018-01-05 06:04:08 +01:00
let f = declare::declare_cfn(cx,
2015-08-14 01:00:44 +02:00
name,
Type::func(&inputs, &outputs));
2018-01-05 06:12:32 +01:00
bx.call(f, &llargs, None)
}
};
match *intr.output {
intrinsics::Type::Aggregate(flatten, ref elems) => {
// the output is a tuple so we need to munge it properly
assert!(!flatten);
for i in 0..elems.len() {
2018-01-05 06:12:32 +01:00
let dest = result.project_field(bx, i);
let val = bx.extract_value(val, i as u64);
bx.store(val, dest.llval, dest.align);
}
return;
}
_ => val,
}
}
};
if !fn_ty.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_ty.ret.mode {
2018-01-05 06:12:32 +01:00
let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to());
bx.store(llval, ptr, result.align);
} else {
2018-01-05 06:12:32 +01:00
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val.store(bx, result);
}
}
}
2018-01-05 06:12:32 +01:00
fn copy_intrinsic<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
2016-12-18 03:54:32 +01:00
allow_overlap: bool,
volatile: bool,
ty: Ty<'tcx>,
2016-12-18 03:54:32 +01:00
dst: ValueRef,
src: ValueRef,
count: ValueRef)
-> ValueRef {
2018-01-05 06:12:32 +01:00
let cx = bx.cx;
2018-01-05 06:04:08 +01:00
let (size, align) = cx.size_and_align_of(ty);
let size = C_usize(cx, size.bytes());
let align = C_i32(cx, align.abi() as i32);
let operation = if allow_overlap {
"memmove"
} else {
"memcpy"
};
let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
2018-01-05 06:04:08 +01:00
cx.data_layout().pointer_size.bits());
2018-01-05 06:12:32 +01:00
let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
let src_ptr = bx.pointercast(src, Type::i8p(cx));
2018-01-05 06:04:08 +01:00
let llfn = cx.get_intrinsic(&name);
2018-01-05 06:12:32 +01:00
bx.call(llfn,
&[dst_ptr,
src_ptr,
2018-01-05 06:12:32 +01:00
bx.mul(size, count),
align,
2018-01-05 06:04:08 +01:00
C_bool(cx, volatile)],
2016-12-12 00:28:10 +01:00
None)
}
2016-12-18 03:54:32 +01:00
fn memset_intrinsic<'a, 'tcx>(
2018-01-05 06:12:32 +01:00
bx: &Builder<'a, 'tcx>,
volatile: bool,
ty: Ty<'tcx>,
dst: ValueRef,
val: ValueRef,
count: ValueRef
) -> ValueRef {
2018-01-05 06:12:32 +01:00
let cx = bx.cx;
2018-01-05 06:04:08 +01:00
let (size, align) = cx.size_and_align_of(ty);
let size = C_usize(cx, size.bytes());
let align = C_i32(cx, align.abi() as i32);
2018-01-05 06:12:32 +01:00
let dst = bx.pointercast(dst, Type::i8p(cx));
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
}
2016-12-18 03:54:32 +01:00
fn try_intrinsic<'a, 'tcx>(
2018-01-05 06:12:32 +01:00
bx: &Builder<'a, 'tcx>,
2018-01-05 06:04:08 +01:00
cx: &CodegenCx,
2016-12-11 04:32:44 +01:00
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
) {
2018-01-05 06:12:32 +01:00
if bx.sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
2018-05-08 15:10:16 +02:00
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
2018-05-08 15:10:16 +02:00
codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
}
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// MSVC's definition of the `rust_try` function.
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// This implementation uses the new exception handling instructions in LLVM
// which have support in LLVM for SEH on MSVC targets. Although these
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
2018-05-08 15:10:16 +02:00
fn codegen_msvc_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
2018-01-05 06:04:08 +01:00
cx: &CodegenCx,
2016-12-18 03:54:32 +01:00
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef) {
2018-01-05 06:12:32 +01:00
let llfn = get_rust_try_fn(cx, &mut |bx| {
let cx = bx.cx;
2018-01-05 06:12:32 +01:00
bx.set_personality_fn(bx.cx.eh_personality());
2018-01-05 06:12:32 +01:00
let normal = bx.build_sibling_block("normal");
let catchswitch = bx.build_sibling_block("catchswitch");
let catchpad = bx.build_sibling_block("catchpad");
let caught = bx.build_sibling_block("caught");
2018-01-05 06:12:32 +01:00
let func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let local_ptr = llvm::get_param(bx.llfn(), 2);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// We're generating an IR snippet that looks like:
//
// declare i32 @rust_try(%func, %data, %ptr) {
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 23:30:01 +02:00
// %slot = alloca i64*
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// invoke %func(%data) to label %normal unwind label %catchswitch
//
// normal:
// ret i32 0
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// catchswitch:
// %cs = catchswitch within none [%catchpad] unwind to caller
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// catchpad:
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 23:30:01 +02:00
// %tok = catchpad within %cs [%type_descriptor, 0, %slot]
// %ptr[0] = %slot[0]
// %ptr[1] = %slot[1]
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// catchret from %tok to label %caught
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// caught:
// ret i32 1
// }
//
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 23:30:01 +02:00
// This structure follows the basic usage of throw/try/catch in LLVM.
// For example, compile this C++ snippet to see what LLVM generates:
//
// #include <stdint.h>
//
// int bar(void (*foo)(void), uint64_t *ret) {
// try {
// foo();
// return 0;
// } catch(uint64_t a[2]) {
// ret[0] = a[0];
// ret[1] = a[1];
// return 1;
// }
// }
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
//
// More information can be found in libstd's seh.rs implementation.
2018-01-05 06:04:08 +01:00
let i64p = Type::i64(cx).ptr_to();
2018-01-05 06:12:32 +01:00
let ptr_align = bx.tcx().data_layout.pointer_align;
let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
2016-12-12 00:28:10 +01:00
None);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
2018-01-05 06:04:08 +01:00
normal.ret(C_i32(cx, 0));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb());
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
2018-01-05 06:04:08 +01:00
let tcx = cx.tcx;
let tydesc = match tcx.lang_items().msvc_try_filter() {
2018-01-05 06:04:08 +01:00
Some(did) => ::consts::get_static(cx, did),
rustc: Use C++ personalities on MSVC Currently the compiler has two relatively critical bugs in the implementation of MSVC unwinding: * #33112 - faults like segfaults and illegal instructions will run destructors in Rust, meaning we keep running code after a super-fatal exception has happened. * #33116 - When compiling with LTO plus `-Z no-landing-pads` (or `-C panic=abort` with the previous commit) LLVM won't remove all `invoke` instructions, meaning that some landing pads stick around and cleanups may be run due to the previous bug. These both stem from the flavor of "personality function" that Rust uses for unwinding on MSVC. On 32-bit this is `_except_handler3` and on 64-bit this is `__C_specific_handler`, but they both essentially are the "most generic" personality functions for catching exceptions and running cleanups. That is, thse two personalities will run cleanups for all exceptions unconditionally, so when we use them we run cleanups for **all SEH exceptions** (include things like segfaults). Note that this also explains why LLVM won't optimize away `invoke` instructions. These functions can legitimately still unwind (the `nounwind` attribute only seems to apply to "C++ exception-like unwining"). Also note that the standard library only *catches* Rust exceptions, not others like segfaults and illegal instructions. LLVM has support for another personality, `__CxxFrameHandler3`, which does not run cleanups for general exceptions, only C++ exceptions thrown by `_CxxThrowException`. This essentially ideally matches our use case, so this commit moves us over to using this well-known personality function as well as exception-throwing function. This doesn't *seem* to pull in any extra runtime dependencies just yet, but if it does we can perhaps try to work out how to implement more of it in Rust rather than relying on MSVCRT runtime bits. More details about how this is actually implemented can be found in the changes itself, but this... Closes #33112 Closes #33116
2016-04-26 23:30:01 +02:00
None => bug!("msvc_try_filter not defined"),
};
2018-01-05 06:04:08 +01:00
let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
let addr = catchpad.load(slot, ptr_align);
2018-01-05 06:12:32 +01:00
let i64_align = bx.tcx().data_layout.i64_align;
let arg1 = catchpad.load(addr, i64_align);
2018-01-05 06:04:08 +01:00
let val1 = C_i32(cx, 1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, i64_align);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
catchpad.catch_ret(tok, caught.llbb());
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
2018-01-05 06:04:08 +01:00
caught.ret(C_i32(cx, 1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
2018-01-05 06:12:32 +01:00
let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align;
bx.store(ret, dest, i32_align);
}
// Definition of the standard "try" function for Rust using the GNU-like model
// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
// instructions).
//
2018-05-08 15:10:16 +02:00
// This codegen is a little surprising because we always call a shim
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// function instead of inlining the call to `invoke` manually here. This is done
// because in LLVM we're only allowed to have one personality per function
// definition. The call to the `try` intrinsic is being inlined into the
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
2018-05-08 15:10:16 +02:00
fn codegen_gnu_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
2018-01-05 06:04:08 +01:00
cx: &CodegenCx,
2016-12-18 03:54:32 +01:00
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef) {
2018-01-05 06:12:32 +01:00
let llfn = get_rust_try_fn(cx, &mut |bx| {
let cx = bx.cx;
2018-05-08 15:10:16 +02:00
// Codegens the shims described above:
//
2018-01-05 06:12:32 +01:00
// bx:
// invoke %func(%args...) normal %normal unwind %catch
//
// normal:
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// ret 0
//
// catch:
// (ptr, _) = landingpad
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// store ptr, %local_ptr
// ret 1
//
// Note that the `local_ptr` data passed into the `try` intrinsic is
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
2018-01-05 06:12:32 +01:00
let then = bx.build_sibling_block("then");
let catch = bx.build_sibling_block("catch");
2018-01-05 06:12:32 +01:00
let func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let local_ptr = llvm::get_param(bx.llfn(), 2);
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
2018-01-05 06:04:08 +01:00
then.ret(C_i32(cx, 0));
// Type indicator for the exception being thrown.
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
//
// The first value in this tuple is a pointer to the exception object
// being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
2018-01-05 06:04:08 +01:00
let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)],
false);
2018-01-05 06:12:32 +01:00
let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
2018-01-05 06:04:08 +01:00
catch.add_clause(vals, C_null(Type::i8p(cx)));
let ptr = catch.extract_value(vals, 0);
2018-01-05 06:12:32 +01:00
let ptr_align = bx.tcx().data_layout.pointer_align;
2018-01-05 06:04:08 +01:00
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
catch.ret(C_i32(cx, 1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
2018-01-05 06:12:32 +01:00
let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align;
bx.store(ret, dest, i32_align);
}
2018-05-08 15:10:16 +02:00
// Helper function to give a Block to a closure to codegen a shim function.
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
// This is currently primarily used for the `try` intrinsic functions above.
2018-01-05 06:04:08 +01:00
fn gen_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
name: &str,
inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>,
2018-05-08 15:10:16 +02:00
codegen: &mut for<'b> FnMut(Builder<'b, 'tcx>))
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
-> ValueRef {
let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig(
inputs.into_iter(),
output,
false,
hir::Unsafety::Unsafe,
Abi::Rust
)));
2018-01-05 06:04:08 +01:00
let llfn = declare::define_internal_fn(cx, name, rust_fn_ty);
2018-01-05 06:12:32 +01:00
let bx = Builder::new_block(cx, llfn, "entry-block");
2018-05-08 15:10:16 +02:00
codegen(bx);
llfn
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
}
// Helper function used to get a handle to the `__rust_try` function used to
// catch exceptions.
//
// This function is only generated once and is then cached.
2018-01-05 06:04:08 +01:00
fn get_rust_try_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
2018-05-08 15:10:16 +02:00
codegen: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef {
2018-01-05 06:04:08 +01:00
if let Some(llfn) = cx.rust_try_fn.get() {
return llfn;
}
// Define the type up front for the signature of the rust_try function.
2018-01-05 06:04:08 +01:00
let tcx = cx.tcx;
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
iter::once(i8p),
tcx.mk_nil(),
false,
hir::Unsafety::Unsafe,
Abi::Rust
)));
let output = tcx.types.i32;
2018-05-08 15:10:16 +02:00
let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen);
2018-01-05 06:04:08 +01:00
cx.rust_try_fn.set(Some(rust_try));
return rust_try
}
2015-09-19 00:42:57 +02:00
fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
span_err!(a, b, E0511, "{}", c);
}
2016-12-18 03:54:32 +01:00
fn generic_simd_intrinsic<'a, 'tcx>(
2018-01-05 06:12:32 +01:00
bx: &Builder<'a, 'tcx>,
2016-12-11 04:32:44 +01:00
name: &str,
callee_ty: Ty<'tcx>,
args: &[OperandRef<'tcx>],
2016-12-11 04:32:44 +01:00
ret_ty: Ty<'tcx>,
llret_ty: Type,
span: Span
) -> Result<ValueRef, ()> {
// macros for error handling:
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )
};
($msg: tt, $($fmt: tt)*) => {
2015-09-19 00:42:57 +02:00
span_invalid_monomorphization_error(
2018-01-05 06:12:32 +01:00
bx.sess(), span,
2015-09-19 00:42:57 +02:00
&format!(concat!("invalid monomorphization of `{}` intrinsic: ",
$msg),
name, $($fmt)*));
}
}
macro_rules! return_error {
($($fmt: tt)*) => {
{
emit_error!($($fmt)*);
return Err(());
2015-08-14 01:00:44 +02:00
}
}
}
macro_rules! require {
($cond: expr, $($fmt: tt)*) => {
if !$cond {
return_error!($($fmt)*);
}
};
}
macro_rules! require_simd {
($ty: expr, $position: expr) => {
require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
}
}
2015-08-14 01:00:44 +02:00
2018-01-05 06:12:32 +01:00
let tcx = bx.tcx();
let sig = tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&callee_ty.fn_sig(tcx),
);
let arg_tys = sig.inputs();
// every intrinsic takes a SIMD vector as its first argument
require_simd!(arg_tys[0], "input");
let in_ty = arg_tys[0];
let in_elem = arg_tys[0].simd_type(tcx);
let in_len = arg_tys[0].simd_size(tcx);
let comparison = match name {
2015-07-31 09:04:06 +02:00
"simd_eq" => Some(hir::BiEq),
"simd_ne" => Some(hir::BiNe),
"simd_lt" => Some(hir::BiLt),
"simd_le" => Some(hir::BiLe),
"simd_gt" => Some(hir::BiGt),
"simd_ge" => Some(hir::BiGe),
_ => None
};
if let Some(cmp_op) = comparison {
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
require!(in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
in_len, in_ty,
ret_ty, out_len);
require!(llret_ty.element_type().kind() == llvm::Integer,
"expected return type with integer elements, found `{}` with non-integer `{}`",
ret_ty,
ret_ty.simd_type(tcx));
2018-01-05 06:12:32 +01:00
return Ok(compare_simd_types(bx,
args[0].immediate(),
args[1].immediate(),
in_elem,
llret_ty,
cmp_op))
}
if name.starts_with("simd_shuffle") {
let n: usize = match name["simd_shuffle".len()..].parse() {
Ok(n) => n,
Err(_) => span_bug!(span,
2018-05-08 15:10:16 +02:00
"bad `simd_shuffle` instruction only caught in codegen?")
};
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
require!(out_len == n,
"expected return type of length {}, found `{}` with length {}",
n, ret_ty, out_len);
require!(in_elem == ret_ty.simd_type(tcx),
"expected return element type `{}` (element of input `{}`), \
found `{}` with element type `{}`",
in_elem, in_ty,
ret_ty, ret_ty.simd_type(tcx));
let total_len = in_len as u128 * 2;
let vector = args[2].immediate();
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
let val = const_get_elt(vector, i as u64);
match const_to_opt_u128(val, true) {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);
None
}
Some(idx) if idx >= total_len => {
emit_error!("shuffle index #{} is out of bounds (limit {})",
arg_idx, total_len);
None
}
2018-01-05 06:12:32 +01:00
Some(idx) => Some(C_i32(bx.cx, idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
None => return Ok(C_null(llret_ty))
};
2018-01-05 06:12:32 +01:00
return Ok(bx.shuffle_vector(args[0].immediate(),
args[1].immediate(),
C_vector(&indices)))
}
if name == "simd_insert" {
require!(in_elem == arg_tys[2],
"expected inserted type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, arg_tys[2]);
2018-01-05 06:12:32 +01:00
return Ok(bx.insert_element(args[0].immediate(),
args[2].immediate(),
args[1].immediate()))
}
if name == "simd_extract" {
require!(ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
2018-01-05 06:12:32 +01:00
return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
}
2015-07-30 01:40:22 +02:00
2018-03-18 18:33:36 +01:00
if name == "simd_select" {
let m_elem_ty = in_elem;
let m_len = in_len;
let v_len = arg_tys[1].simd_size(tcx);
require!(m_len == v_len,
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len, v_len
);
match m_elem_ty.sty {
ty::TyInt(_) => {},
_ => {
return_error!("mask element type is `{}`, expected `i_`", m_elem_ty);
}
}
// truncate the mask to a vector of i1s
let i1 = Type::i1(bx.cx);
let i1xn = Type::vector(&i1, m_len as u64);
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
macro_rules! arith_red {
($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
if name == $name {
require!(ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
return match in_elem.sty {
ty::TyInt(_) | ty::TyUint(_) => {
let r = bx.$integer_reduce(args[0].immediate());
if $ordered {
// if overflow occurs, the result is the
// mathematical result modulo 2^n:
if name.contains("mul") {
Ok(bx.mul(args[1].immediate(), r))
} else {
Ok(bx.add(args[1].immediate(), r))
}
} else {
Ok(bx.$integer_reduce(args[0].immediate()))
}
},
ty::TyFloat(f) => {
// ordered arithmetic reductions take an accumulator
let acc = if $ordered {
2018-03-15 16:36:02 +01:00
let acc = args[1].immediate();
// FIXME: https://bugs.llvm.org/show_bug.cgi?id=36734
// * if the accumulator of the fadd isn't 0, incorrect
// code is generated
// * if the accumulator of the fmul isn't 1, incorrect
// code is generated
match const_get_real(acc) {
None => return_error!("accumulator of {} is not a constant", $name),
Some((v, loses_info)) => {
if $name.contains("mul") && v != 1.0_f64 {
return_error!("accumulator of {} is not 1.0", $name);
} else if $name.contains("add") && v != 0.0_f64 {
return_error!("accumulator of {} is not 0.0", $name);
} else if loses_info {
return_error!("accumulator of {} loses information", $name);
}
}
}
acc
} else {
// unordered arithmetic reductions do not:
match f.bit_width() {
32 => C_undef(Type::f32(bx.cx)),
64 => C_undef(Type::f64(bx.cx)),
v => {
2018-03-14 20:14:47 +01:00
return_error!(r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
$name, in_ty, in_elem, v, ret_ty
)
}
}
};
Ok(bx.$float_reduce(acc, args[0].immediate()))
}
_ => {
return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
$name, in_ty, in_elem, ret_ty
)
},
}
}
}
}
arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd_fast, true);
arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul_fast, true);
arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
macro_rules! minmax_red {
($name:tt: $int_red:ident, $float_red:ident) => {
if name == $name {
require!(ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
return match in_elem.sty {
ty::TyInt(_i) => {
Ok(bx.$int_red(args[0].immediate(), true))
},
ty::TyUint(_u) => {
Ok(bx.$int_red(args[0].immediate(), false))
},
ty::TyFloat(_f) => {
Ok(bx.$float_red(args[0].immediate()))
}
_ => {
return_error!("unsupported {} from `{}` with element `{}` to `{}`",
$name, in_ty, in_elem, ret_ty)
},
}
}
}
}
minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
macro_rules! bitwise_red {
($name:tt : $red:ident, $boolean:expr) => {
if name == $name {
let input = if !$boolean {
require!(ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
args[0].immediate()
} else {
2018-03-15 16:36:02 +01:00
match in_elem.sty {
ty::TyInt(_) | ty::TyUint(_) => {},
_ => {
return_error!("unsupported {} from `{}` with element `{}` to `{}`",
$name, in_ty, in_elem, ret_ty)
}
}
// boolean reductions operate on vectors of i1s:
let i1 = Type::i1(bx.cx);
let i1xn = Type::vector(&i1, in_len as u64);
bx.trunc(args[0].immediate(), i1xn)
};
return match in_elem.sty {
ty::TyInt(_) | ty::TyUint(_) => {
let r = bx.$red(input);
Ok(
if !$boolean {
r
} else {
bx.zext(r, Type::bool(bx.cx))
}
)
},
_ => {
return_error!("unsupported {} from `{}` with element `{}` to `{}`",
$name, in_ty, in_elem, ret_ty)
},
}
}
}
}
bitwise_red!("simd_reduce_and": vector_reduce_and, false);
bitwise_red!("simd_reduce_or": vector_reduce_or, false);
bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
bitwise_red!("simd_reduce_all": vector_reduce_and, true);
bitwise_red!("simd_reduce_any": vector_reduce_or, true);
2015-07-30 01:40:22 +02:00
if name == "simd_cast" {
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
require!(in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
in_len, in_ty,
ret_ty, out_len);
2015-07-30 01:40:22 +02:00
// casting cares about nominal type, not just structural type
let out_elem = ret_ty.simd_type(tcx);
2015-07-30 01:40:22 +02:00
if in_elem == out_elem { return Ok(args[0].immediate()); }
2015-07-30 01:40:22 +02:00
2015-08-15 00:46:51 +02:00
enum Style { Float, Int(/* is signed? */ bool), Unsupported }
let (in_style, in_width) = match in_elem.sty {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::TyFloat(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
let (out_style, out_width) = match out_elem.sty {
ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::TyFloat(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
match (in_style, out_style) {
(Style::Int(in_is_signed), Style::Int(_)) => {
return Ok(match in_width.cmp(&out_width) {
2018-01-05 06:12:32 +01:00
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
2015-08-15 00:46:51 +02:00
Ordering::Less => if in_is_signed {
2018-01-05 06:12:32 +01:00
bx.sext(args[0].immediate(), llret_ty)
2015-08-15 00:46:51 +02:00
} else {
2018-01-05 06:12:32 +01:00
bx.zext(args[0].immediate(), llret_ty)
2015-08-15 00:46:51 +02:00
}
})
}
2015-08-15 00:46:51 +02:00
(Style::Int(in_is_signed), Style::Float) => {
return Ok(if in_is_signed {
2018-01-05 06:12:32 +01:00
bx.sitofp(args[0].immediate(), llret_ty)
2015-08-15 00:46:51 +02:00
} else {
2018-01-05 06:12:32 +01:00
bx.uitofp(args[0].immediate(), llret_ty)
})
}
2015-08-15 00:46:51 +02:00
(Style::Float, Style::Int(out_is_signed)) => {
return Ok(if out_is_signed {
2018-01-05 06:12:32 +01:00
bx.fptosi(args[0].immediate(), llret_ty)
2015-08-15 00:46:51 +02:00
} else {
2018-01-05 06:12:32 +01:00
bx.fptoui(args[0].immediate(), llret_ty)
})
2015-07-30 01:40:22 +02:00
}
2015-08-15 00:46:51 +02:00
(Style::Float, Style::Float) => {
return Ok(match in_width.cmp(&out_width) {
2018-01-05 06:12:32 +01:00
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
2018-01-05 06:12:32 +01:00
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
})
2015-07-30 01:40:22 +02:00
}
2015-08-15 00:46:51 +02:00
_ => {/* Unsupported. Fallthrough. */}
2015-07-30 01:40:22 +02:00
}
require!(false,
"unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
in_ty, in_elem,
ret_ty, out_elem);
2015-07-30 01:40:22 +02:00
}
2015-07-31 20:23:12 +02:00
macro_rules! arith {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == stringify!($name) {
match in_elem.sty {
$($(ty::$p(_))|* => {
2018-01-05 06:12:32 +01:00
return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
})*
_ => {},
}
require!(false,
"unsupported operation on `{}` with element `{}`",
in_ty,
in_elem)
})*
2015-07-31 20:23:12 +02:00
}
}
arith! {
simd_add: TyUint, TyInt => add, TyFloat => fadd;
simd_sub: TyUint, TyInt => sub, TyFloat => fsub;
simd_mul: TyUint, TyInt => mul, TyFloat => fmul;
simd_div: TyUint => udiv, TyInt => sdiv, TyFloat => fdiv;
simd_rem: TyUint => urem, TyInt => srem, TyFloat => frem;
simd_shl: TyUint, TyInt => shl;
simd_shr: TyUint => lshr, TyInt => ashr;
simd_and: TyUint, TyInt => and;
simd_or: TyUint, TyInt => or;
simd_xor: TyUint, TyInt => xor;
2018-03-21 21:49:22 +01:00
simd_fmax: TyFloat => maxnum;
simd_fmin: TyFloat => minnum;
2015-07-31 20:23:12 +02:00
}
span_bug!(span, "unknown SIMD intrinsic");
}
// Returns the width of an int Ty, and if it's signed or not
// Returns None if the type is not an integer
// FIXME: theres multiple of this functions, investigate using some of the already existing
// stuffs.
2018-01-05 06:04:08 +01:00
fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
match ty.sty {
ty::TyInt(t) => Some((match t {
ast::IntTy::Isize => {
2018-01-05 06:04:08 +01:00
match &cx.tcx.sess.target.target.target_pointer_width[..] {
"16" => 16,
"32" => 32,
"64" => 64,
tws => bug!("Unsupported target word size for isize: {}", tws),
}
},
ast::IntTy::I8 => 8,
ast::IntTy::I16 => 16,
ast::IntTy::I32 => 32,
ast::IntTy::I64 => 64,
ast::IntTy::I128 => 128,
}, true)),
ty::TyUint(t) => Some((match t {
ast::UintTy::Usize => {
2018-01-05 06:04:08 +01:00
match &cx.tcx.sess.target.target.target_pointer_width[..] {
"16" => 16,
"32" => 32,
"64" => 64,
tws => bug!("Unsupported target word size for usize: {}", tws),
}
},
ast::UintTy::U8 => 8,
ast::UintTy::U16 => 16,
ast::UintTy::U32 => 32,
ast::UintTy::U64 => 64,
ast::UintTy::U128 => 128,
}, false)),
_ => None,
}
}
// Returns the width of a float TypeVariant
// Returns None if the type is not a float
fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
-> Option<u64> {
use rustc::ty::TyFloat;
match *sty {
TyFloat(t) => Some(match t {
ast::FloatTy::F32 => 32,
ast::FloatTy::F64 => 64,
}),
_ => None,
}
}