rust/src/librustc_codegen_llvm/builder.rs

1290 lines
43 KiB
Rust
Raw Normal View History

use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope};
2019-02-17 19:58:58 +01:00
use crate::llvm::{self, False, BasicBlock};
use crate::common::Funclet;
use crate::context::CodegenCx;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate};
2019-02-17 19:58:58 +01:00
use rustc_codegen_ssa::MemFlags;
use libc::{c_uint, c_char};
2018-09-13 14:58:19 +02:00
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::hir::def_id::DefId;
use rustc::session::config;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::base::to_immediate;
use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef};
use rustc_codegen_ssa::mir::place::PlaceRef;
2019-05-14 17:53:01 +02:00
use rustc_target::spec::{HasTargetSpec, Target};
use std::borrow::Cow;
use std::ffi::CStr;
use std::ops::{Deref, Range};
use std::ptr;
use std::iter::TrustedLen;
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
use rustc_data_structures::const_cstr;
use log::debug;
2017-01-01 00:00:24 +01:00
// All Builders must have an llfn associated with them
#[must_use]
pub struct Builder<'a, 'll, 'tcx> {
pub llbuilder: &'ll mut llvm::Builder<'ll>,
pub cx: &'a CodegenCx<'ll, 'tcx>,
}
impl Drop for Builder<'a, 'll, 'tcx> {
2016-12-17 01:39:35 +01:00
fn drop(&mut self) {
unsafe {
llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
2016-12-17 01:39:35 +01:00
}
}
}
// FIXME(eddyb) use a checked constructor when they become `const fn`.
const EMPTY_C_STR: &CStr = unsafe {
CStr::from_bytes_with_nul_unchecked(b"\0")
};
/// Empty string, to be used where LLVM expects an instruction name, indicating
/// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
// FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
impl BackendTypes for Builder<'_, 'll, 'tcx> {
type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
2019-10-13 11:28:19 +02:00
type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
2018-09-13 14:58:19 +02:00
}
impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
fn data_layout(&self) -> &ty::layout::TargetDataLayout {
self.cx.data_layout()
}
}
impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
2019-06-13 23:48:52 +02:00
fn tcx(&self) -> TyCtxt<'tcx> {
2018-09-13 14:58:19 +02:00
self.cx.tcx
}
}
2019-05-04 11:32:22 +02:00
impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.cx.param_env()
}
}
2019-05-14 17:53:01 +02:00
impl HasTargetSpec for Builder<'_, '_, 'tcx> {
fn target_spec(&self) -> &Target {
&self.cx.target_spec()
}
}
2018-09-13 14:58:19 +02:00
impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> {
type Ty = Ty<'tcx>;
type TyLayout = TyLayout<'tcx>;
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
self.cx.layout_of(ty)
}
}
impl Deref for Builder<'_, 'll, 'tcx> {
type Target = CodegenCx<'ll, 'tcx>;
fn deref(&self) -> &Self::Target {
self.cx
}
}
2018-09-13 14:58:19 +02:00
impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
2018-09-11 11:46:03 +02:00
type CodegenCx = CodegenCx<'ll, 'tcx>;
}
macro_rules! builder_methods_for_value_instructions {
($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
$(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
unsafe {
llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
}
})+
}
}
impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn new_block<'b>(
cx: &'a CodegenCx<'ll, 'tcx>,
2018-08-23 15:23:48 +02:00
llfn: &'ll Value,
name: &'b str
) -> Self {
let mut bx = Builder::with_cx(cx);
2017-01-01 00:00:24 +01:00
let llbb = unsafe {
let name = SmallCStr::new(name);
2017-01-01 00:00:24 +01:00
llvm::LLVMAppendBasicBlockInContext(
2018-01-05 06:04:08 +01:00
cx.llcx,
2017-01-01 00:00:24 +01:00
llfn,
name.as_ptr()
)
};
2018-01-05 06:12:32 +01:00
bx.position_at_end(llbb);
bx
2017-01-01 00:00:24 +01:00
}
fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
2016-12-17 01:39:35 +01:00
// Create a fresh builder from the crate context.
let llbuilder = unsafe {
2018-01-05 06:04:08 +01:00
llvm::LLVMCreateBuilderInContext(cx.llcx)
2016-12-17 01:39:35 +01:00
};
Builder {
llbuilder,
2018-01-05 06:04:08 +01:00
cx,
}
}
2019-06-21 20:27:44 +02:00
fn build_sibling_block(&self, name: &str) -> Self {
2018-01-05 06:04:08 +01:00
Builder::new_block(self.cx, self.llfn(), name)
2017-01-01 00:00:24 +01:00
}
2018-08-23 15:23:48 +02:00
fn llbb(&self) -> &'ll BasicBlock {
2017-01-01 00:00:24 +01:00
unsafe {
llvm::LLVMGetInsertBlock(self.llbuilder)
}
}
fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
unsafe {
llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
}
}
fn ret_void(&mut self) {
unsafe {
llvm::LLVMBuildRetVoid(self.llbuilder);
}
}
fn ret(&mut self, v: &'ll Value) {
unsafe {
llvm::LLVMBuildRet(self.llbuilder, v);
}
}
fn br(&mut self, dest: &'ll BasicBlock) {
unsafe {
llvm::LLVMBuildBr(self.llbuilder, dest);
}
}
fn cond_br(
&mut self,
2018-08-23 15:23:48 +02:00
cond: &'ll Value,
then_llbb: &'ll BasicBlock,
else_llbb: &'ll BasicBlock,
2018-07-17 17:26:58 +02:00
) {
unsafe {
llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
}
}
fn switch(
&mut self,
2018-08-23 15:23:48 +02:00
v: &'ll Value,
else_llbb: &'ll BasicBlock,
cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)> + TrustedLen,
2018-12-08 18:42:31 +01:00
) {
let switch = unsafe {
llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint)
2018-12-08 18:42:31 +01:00
};
for (on_val, dest) in cases {
let on_val = self.const_uint_big(self.val_ty(v), on_val);
unsafe {
llvm::LLVMAddCase(switch, on_val, dest)
}
}
}
fn invoke(
&mut self,
llfn: &'ll Value,
args: &[&'ll Value],
then: &'ll BasicBlock,
catch: &'ll BasicBlock,
funclet: Option<&Funclet<'ll>>,
) -> &'ll Value {
debug!("invoke {:?} with args ({:?})",
llfn,
args);
let args = self.check_call("invoke", llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
unsafe {
llvm::LLVMRustBuildInvoke(self.llbuilder,
llfn,
args.as_ptr(),
args.len() as c_uint,
then,
catch,
bundle,
UNNAMED)
}
}
fn unreachable(&mut self) {
unsafe {
llvm::LLVMBuildUnreachable(self.llbuilder);
}
}
builder_methods_for_value_instructions! {
add(a, b) => LLVMBuildAdd,
fadd(a, b) => LLVMBuildFAdd,
sub(a, b) => LLVMBuildSub,
fsub(a, b) => LLVMBuildFSub,
mul(a, b) => LLVMBuildMul,
fmul(a, b) => LLVMBuildFMul,
udiv(a, b) => LLVMBuildUDiv,
exactudiv(a, b) => LLVMBuildExactUDiv,
sdiv(a, b) => LLVMBuildSDiv,
exactsdiv(a, b) => LLVMBuildExactSDiv,
fdiv(a, b) => LLVMBuildFDiv,
urem(a, b) => LLVMBuildURem,
srem(a, b) => LLVMBuildSRem,
frem(a, b) => LLVMBuildFRem,
shl(a, b) => LLVMBuildShl,
lshr(a, b) => LLVMBuildLShr,
ashr(a, b) => LLVMBuildAShr,
and(a, b) => LLVMBuildAnd,
or(a, b) => LLVMBuildOr,
xor(a, b) => LLVMBuildXor,
neg(x) => LLVMBuildNeg,
fneg(x) => LLVMBuildFNeg,
not(x) => LLVMBuildNot,
2019-06-03 12:59:17 +02:00
unchecked_sadd(x, y) => LLVMBuildNSWAdd,
unchecked_uadd(x, y) => LLVMBuildNUWAdd,
unchecked_ssub(x, y) => LLVMBuildNSWSub,
unchecked_usub(x, y) => LLVMBuildNUWSub,
unchecked_smul(x, y) => LLVMBuildNSWMul,
unchecked_umul(x, y) => LLVMBuildNUWMul,
}
fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
fn checked_binop(
&mut self,
oop: OverflowOp,
ty: Ty<'_>,
lhs: Self::Value,
rhs: Self::Value,
) -> (Self::Value, Self::Value) {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use rustc::ty::{Int, Uint};
let new_kind = match ty.kind {
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
_ => panic!("tried to get overflow intrinsic for op applied to non-int type")
};
let name = match oop {
OverflowOp::Add => match new_kind {
Int(I8) => "llvm.sadd.with.overflow.i8",
Int(I16) => "llvm.sadd.with.overflow.i16",
Int(I32) => "llvm.sadd.with.overflow.i32",
Int(I64) => "llvm.sadd.with.overflow.i64",
Int(I128) => "llvm.sadd.with.overflow.i128",
Uint(U8) => "llvm.uadd.with.overflow.i8",
Uint(U16) => "llvm.uadd.with.overflow.i16",
Uint(U32) => "llvm.uadd.with.overflow.i32",
Uint(U64) => "llvm.uadd.with.overflow.i64",
Uint(U128) => "llvm.uadd.with.overflow.i128",
_ => unreachable!(),
},
OverflowOp::Sub => match new_kind {
Int(I8) => "llvm.ssub.with.overflow.i8",
Int(I16) => "llvm.ssub.with.overflow.i16",
Int(I32) => "llvm.ssub.with.overflow.i32",
Int(I64) => "llvm.ssub.with.overflow.i64",
Int(I128) => "llvm.ssub.with.overflow.i128",
Uint(U8) => "llvm.usub.with.overflow.i8",
Uint(U16) => "llvm.usub.with.overflow.i16",
Uint(U32) => "llvm.usub.with.overflow.i32",
Uint(U64) => "llvm.usub.with.overflow.i64",
Uint(U128) => "llvm.usub.with.overflow.i128",
_ => unreachable!(),
},
OverflowOp::Mul => match new_kind {
Int(I8) => "llvm.smul.with.overflow.i8",
Int(I16) => "llvm.smul.with.overflow.i16",
Int(I32) => "llvm.smul.with.overflow.i32",
Int(I64) => "llvm.smul.with.overflow.i64",
Int(I128) => "llvm.smul.with.overflow.i128",
Uint(U8) => "llvm.umul.with.overflow.i8",
Uint(U16) => "llvm.umul.with.overflow.i16",
Uint(U32) => "llvm.umul.with.overflow.i32",
Uint(U64) => "llvm.umul.with.overflow.i64",
Uint(U128) => "llvm.umul.with.overflow.i128",
_ => unreachable!(),
},
};
let intrinsic = self.get_intrinsic(&name);
let res = self.call(intrinsic, &[lhs, rhs], None);
(
self.extract_value(res, 0),
self.extract_value(res, 1),
)
}
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
2018-01-05 06:12:32 +01:00
bx.position_at_start(unsafe {
2017-01-01 00:00:24 +01:00
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
bx.dynamic_alloca(ty, align)
2017-01-01 00:00:24 +01:00
}
fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
unsafe {
let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
fn array_alloca(&mut self,
ty: &'ll Type,
2018-08-23 15:23:48 +02:00
len: &'ll Value,
align: Align) -> &'ll Value {
2018-05-28 17:07:23 +02:00
unsafe {
let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
2018-05-28 17:07:23 +02:00
alloca
}
}
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load
}
}
fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
llvm::LLVMSetVolatile(load, llvm::True);
load
}
}
fn atomic_load(
&mut self,
2018-08-23 15:23:48 +02:00
ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size,
2018-08-23 15:23:48 +02:00
) -> &'ll Value {
unsafe {
let load = llvm::LLVMRustBuildAtomicLoad(
self.llbuilder,
ptr,
UNNAMED,
AtomicOrdering::from_generic(order),
);
// LLVM requires the alignment of atomic loads to be at least the size of the type.
llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
load
}
}
2018-09-14 17:48:57 +02:00
fn load_operand(
&mut self,
2018-09-14 17:48:57 +02:00
place: PlaceRef<'tcx, &'ll Value>
) -> OperandRef<'tcx, &'ll Value> {
debug!("PlaceRef::load: {:?}", place);
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
if place.layout.is_zst() {
return OperandRef::new_zst(self, place.layout);
2018-09-14 17:48:57 +02:00
}
fn scalar_load_metadata<'a, 'll, 'tcx>(
bx: &mut Builder<'a, 'll, 'tcx>,
load: &'ll Value,
scalar: &layout::Scalar
) {
2018-09-14 17:48:57 +02:00
let vr = scalar.valid_range.clone();
match scalar.value {
layout::Int(..) => {
let range = scalar.valid_range_exclusive(bx);
2018-09-14 17:48:57 +02:00
if range.start != range.end {
bx.range_metadata(load, range);
2018-09-14 17:48:57 +02:00
}
}
layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
bx.nonnull_metadata(load);
2018-09-14 17:48:57 +02:00
}
_ => {}
}
}
2018-09-14 17:48:57 +02:00
let val = if let Some(llextra) = place.llextra {
OperandValue::Ref(place.llval, Some(llextra), place.align)
} else if place.layout.is_llvm_immediate() {
let mut const_llval = None;
unsafe {
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
const_llval = llvm::LLVMGetInitializer(global);
}
}
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.llval, place.align);
if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar);
2018-09-14 17:48:57 +02:00
}
load
});
OperandValue::Immediate(to_immediate(self, llval, place.layout))
2018-09-14 17:48:57 +02:00
} else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let mut load = |i, scalar: &layout::Scalar, align| {
2018-09-14 17:48:57 +02:00
let llptr = self.struct_gep(place.llval, i as u64);
let load = self.load(llptr, align);
scalar_load_metadata(self, load, scalar);
2018-09-14 17:48:57 +02:00
if scalar.is_bool() {
self.trunc(load, self.type_i1())
2018-09-14 17:48:57 +02:00
} else {
load
}
};
OperandValue::Pair(
load(0, a, place.align),
load(1, b, place.align.restrict_for_offset(b_offset)),
)
2018-09-14 17:48:57 +02:00
} else {
OperandValue::Ref(place.llval, None, place.align)
};
OperandRef { val, layout: place.layout }
}
fn write_operand_repeatedly(
mut self,
cg_elem: OperandRef<'tcx, &'ll Value>,
count: u64,
dest: PlaceRef<'tcx, &'ll Value>,
) -> Self {
let zero = self.const_usize(0);
let count = self.const_usize(count);
let start = dest.project_index(&mut self, zero).llval;
let end = dest.project_index(&mut self, count).llval;
let mut header_bx = self.build_sibling_block("repeat_loop_header");
let mut body_bx = self.build_sibling_block("repeat_loop_body");
let next_bx = self.build_sibling_block("repeat_loop_next");
self.br(header_bx.llbb());
let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
2018-09-14 17:48:57 +02:00
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
cg_elem.val.store(&mut body_bx,
PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
body_bx.br(header_bx.llbb());
header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
next_bx
}
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
if self.sess().target.target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks a i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
// tripping an assertion. So, for now, just disable this
// optimization.
return;
}
unsafe {
let llty = self.cx.val_ty(load);
let v = [
self.cx.const_uint_big(llty, range.start),
self.cx.const_uint_big(llty, range.end)
];
llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
2018-01-05 06:04:08 +01:00
llvm::LLVMMDNodeInContext(self.cx.llcx,
v.as_ptr(),
v.len() as c_uint));
}
}
fn nonnull_metadata(&mut self, load: &'ll Value) {
unsafe {
llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
2018-01-05 06:04:08 +01:00
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
}
}
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
self.store_with_flags(val, ptr, align, MemFlags::empty())
}
fn store_with_flags(
&mut self,
2018-08-23 15:23:48 +02:00
val: &'ll Value,
ptr: &'ll Value,
align: Align,
flags: MemFlags,
2018-08-23 15:23:48 +02:00
) -> &'ll Value {
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
2018-07-15 00:28:39 +02:00
let align = if flags.contains(MemFlags::UNALIGNED) {
1
} else {
align.bytes() as c_uint
2018-07-15 00:28:39 +02:00
};
llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
llvm::LLVMSetVolatile(store, llvm::True);
}
if flags.contains(MemFlags::NONTEMPORAL) {
// According to LLVM [1] building a nontemporal store must
// *always* point to a metadata value of the integer 1.
//
// [1]: http://llvm.org/docs/LangRef.html#store-instruction
let one = self.cx.const_i32(1);
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
}
store
}
}
fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) {
debug!("Store {:?} -> {:?}", val, ptr);
let ptr = self.check_store(val, ptr);
unsafe {
let store = llvm::LLVMRustBuildAtomicStore(
self.llbuilder,
val,
ptr,
AtomicOrdering::from_generic(order),
);
// LLVM requires the alignment of atomic stores to be at least the size of the type.
llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
}
}
fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
unsafe {
llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(),
indices.len() as c_uint, UNNAMED)
}
}
fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
unsafe {
llvm::LLVMBuildInBoundsGEP(
self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, UNNAMED)
}
}
2018-12-04 20:20:45 +01:00
fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED)
2018-12-04 20:20:45 +01:00
}
}
/* Casts */
fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,UNNAMED)
}
}
fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
unsafe {
llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed)
}
}
fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED)
}
}
/* Comparisons */
fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
let op = llvm::IntPredicate::from_generic(op);
unsafe {
llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED)
}
}
fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED)
}
}
/* Miscellaneous instructions */
fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
let src = self.pointercast(src, self.type_i8p());
unsafe {
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.bytes() as c_uint, size, is_volatile);
}
}
fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memmove.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
let src = self.pointercast(src, self.type_i8p());
unsafe {
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.bytes() as c_uint, size, is_volatile);
}
}
fn memset(
&mut self,
ptr: &'ll Value,
fill_byte: &'ll Value,
size: &'ll Value,
align: Align,
flags: MemFlags,
) {
let ptr_width = &self.sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.type_i8p());
let align = self.const_u32(align.bytes() as u32);
let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE));
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
}
fn select(
&mut self, cond: &'ll Value,
2018-08-23 15:23:48 +02:00
then_val: &'ll Value,
else_val: &'ll Value,
) -> &'ll Value {
unsafe {
llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED)
}
}
2018-12-04 20:20:45 +01:00
#[allow(dead_code)]
fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED)
2018-12-04 20:20:45 +01:00
}
}
fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
unsafe {
llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED)
}
}
fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
unsafe {
let elt_ty = self.cx.val_ty(elt);
let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
}
}
fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED)
}
}
fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value,
2018-08-23 15:23:48 +02:00
idx: u64) -> &'ll Value {
assert_eq!(idx as c_uint as u64, idx);
unsafe {
llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
UNNAMED)
}
}
fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value,
2018-08-23 15:23:48 +02:00
num_clauses: usize) -> &'ll Value {
unsafe {
llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn,
num_clauses as c_uint, UNNAMED)
}
}
fn set_cleanup(&mut self, landing_pad: &'ll Value) {
unsafe {
llvm::LLVMSetCleanup(landing_pad, llvm::True);
}
}
fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
unsafe {
llvm::LLVMBuildResume(self.llbuilder, exn)
}
}
fn cleanup_pad(&mut self,
2018-08-23 15:23:48 +02:00
parent: Option<&'ll Value>,
args: &[&'ll Value]) -> Funclet<'ll> {
let name = const_cstr!("cleanuppad");
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
let ret = unsafe {
llvm::LLVMRustBuildCleanupPad(self.llbuilder,
parent,
args.len() as c_uint,
args.as_ptr(),
name.as_ptr())
};
Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
}
fn cleanup_ret(
&mut self, funclet: &Funclet<'ll>,
2018-08-23 15:23:48 +02:00
unwind: Option<&'ll BasicBlock>,
) -> &'ll Value {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
let ret = unsafe {
llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
};
ret.expect("LLVM does not have support for cleanupret")
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
}
fn catch_pad(&mut self,
2018-08-23 15:23:48 +02:00
parent: &'ll Value,
args: &[&'ll Value]) -> Funclet<'ll> {
let name = const_cstr!("catchpad");
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
let ret = unsafe {
llvm::LLVMRustBuildCatchPad(self.llbuilder, parent,
args.len() as c_uint, args.as_ptr(),
name.as_ptr())
};
Funclet::new(ret.expect("LLVM does not have support for catchpad"))
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
}
fn catch_switch(
&mut self,
2018-08-23 15:23:48 +02:00
parent: Option<&'ll Value>,
unwind: Option<&'ll BasicBlock>,
num_handlers: usize,
2018-08-23 15:23:48 +02:00
) -> &'ll Value {
let name = const_cstr!("catchswitch");
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
let ret = unsafe {
llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind,
num_handlers as c_uint,
name.as_ptr())
};
ret.expect("LLVM does not have support for catchswitch")
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
}
fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
unsafe {
llvm::LLVMRustAddHandler(catch_switch, handler);
}
}
fn set_personality_fn(&mut self, personality: &'ll Value) {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
unsafe {
llvm::LLVMSetPersonalityFn(self.llfn(), personality);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-24 03:18:44 +02:00
}
}
// Atomic Operations
fn atomic_cmpxchg(
&mut self,
2018-08-23 15:23:48 +02:00
dst: &'ll Value,
cmp: &'ll Value,
src: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
failure_order: rustc_codegen_ssa::common::AtomicOrdering,
weak: bool,
2018-08-23 15:23:48 +02:00
) -> &'ll Value {
let weak = if weak { llvm::True } else { llvm::False };
unsafe {
llvm::LLVMRustBuildAtomicCmpXchg(
self.llbuilder,
dst,
cmp,
src,
AtomicOrdering::from_generic(order),
AtomicOrdering::from_generic(failure_order),
weak
)
}
}
fn atomic_rmw(
&mut self,
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
2018-08-23 15:23:48 +02:00
dst: &'ll Value,
src: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
2018-08-23 15:23:48 +02:00
) -> &'ll Value {
unsafe {
llvm::LLVMBuildAtomicRMW(
self.llbuilder,
AtomicRmwBinOp::from_generic(op),
dst,
src,
AtomicOrdering::from_generic(order),
False)
}
}
2013-07-28 09:48:16 +02:00
2018-09-28 12:18:03 +02:00
fn atomic_fence(
&mut self,
order: rustc_codegen_ssa::common::AtomicOrdering,
scope: rustc_codegen_ssa::common::SynchronizationScope
2018-09-28 12:18:03 +02:00
) {
2013-07-28 09:48:16 +02:00
unsafe {
llvm::LLVMRustBuildAtomicFence(
self.llbuilder,
AtomicOrdering::from_generic(order),
SynchronizationScope::from_generic(scope)
);
2013-07-28 09:48:16 +02:00
}
}
fn set_invariant_load(&mut self, load: &'ll Value) {
unsafe {
llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
2018-01-05 06:04:08 +01:00
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
}
}
fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
}
fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
}
fn call(
&mut self,
llfn: &'ll Value,
args: &[&'ll Value],
funclet: Option<&Funclet<'ll>>,
) -> &'ll Value {
debug!("call {:?} with args ({:?})",
llfn,
args);
let args = self.check_call("call", llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildCall(
self.llbuilder,
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
bundle, UNNAMED
)
}
}
fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED)
}
}
fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
2018-11-07 11:08:41 +01:00
self.cx
}
unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
llvm::LLVMDeleteBasicBlock(bb);
}
fn do_not_inline(&mut self, llret: &'ll Value) {
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
}
}
impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
fn get_static(&mut self, def_id: DefId) -> &'ll Value {
2019-03-01 15:05:18 +01:00
// Forward to the `get_static` method of `CodegenCx`
self.cx().get_static(def_id)
}
}
impl Builder<'a, 'll, 'tcx> {
2018-12-04 20:20:45 +01:00
pub fn llfn(&self) -> &'ll Value {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
}
}
fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
unsafe {
llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
}
}
pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
}
pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
}
pub fn insert_element(
&mut self, vec: &'ll Value,
elt: &'ll Value,
idx: &'ll Value,
) -> &'ll Value {
unsafe {
llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED)
}
}
pub fn shuffle_vector(
&mut self,
v1: &'ll Value,
v2: &'ll Value,
mask: &'ll Value,
) -> &'ll Value {
unsafe {
llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED)
}
}
pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
}
pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
}
pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
}
pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
}
pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
}
pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
}
pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
}
pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
}
pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
}
pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
}
pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
}
pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
unsafe {
llvm::LLVMAddClause(landing_pad, clause);
}
}
pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
let ret = unsafe {
llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind)
};
ret.expect("LLVM does not have support for catchret")
}
fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
let dest_ptr_ty = self.cx.val_ty(ptr);
let stored_ty = self.cx.val_ty(val);
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
if dest_ptr_ty == stored_ptr_ty {
ptr
} else {
debug!("type mismatch in store. \
Expected {:?}, got {:?}; inserting bitcast",
dest_ptr_ty, stored_ptr_ty);
self.bitcast(ptr, stored_ptr_ty)
}
}
fn check_call<'b>(&mut self,
typ: &str,
llfn: &'ll Value,
args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
let mut fn_ty = self.cx.val_ty(llfn);
// Strip off pointers
while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
fn_ty = self.cx.element_type(fn_ty);
}
assert!(self.cx.type_kind(fn_ty) == TypeKind::Function,
"builder::{} not passed a function, but {:?}", typ, fn_ty);
let param_tys = self.cx.func_params_types(fn_ty);
let all_args_match = param_tys.iter()
.zip(args.iter().map(|&v| self.val_ty(v)))
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
if all_args_match {
return Cow::Borrowed(args);
}
let casted_args: Vec<_> = param_tys.into_iter()
.zip(args.iter())
.enumerate()
.map(|(i, (expected_ty, &actual_val))| {
let actual_ty = self.val_ty(actual_val);
if expected_ty != actual_ty {
debug!("type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
llfn, expected_ty, i, actual_ty);
self.bitcast(actual_val, expected_ty)
} else {
actual_val
}
})
.collect();
Cow::Owned(casted_args)
}
pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED)
}
}
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
if self.cx.sess().opts.optimize == config::OptLevel::No {
return;
}
let size = size.bytes();
if size == 0 {
return;
}
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let ptr = self.pointercast(ptr, self.cx.type_i8p());
self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
}
fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
assert_eq!(vals.len(), bbs.len());
let phi = unsafe {
llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED)
};
unsafe {
llvm::LLVMAddIncoming(phi, vals.as_ptr(),
bbs.as_ptr(),
vals.len() as c_uint);
phi
}
}
fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
unsafe {
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
}
}
}