rust/src/librustc_codegen_llvm/llvm/ffi.rs

1747 lines
74 KiB
Rust
Raw Normal View History

// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: Rename 'DIGlobalVariable' to 'DIGlobalVariableExpression'
// once support for LLVM 3.9 is dropped.
//
// This method was changed in this LLVM patch:
// https://reviews.llvm.org/D26769
use super::debuginfo::{
DIBuilder, DIDescriptor, DIFile, DILexicalBlock, DISubprogram, DIType,
DIBasicType, DIDerivedType, DICompositeType, DIScope, DIVariable,
DIGlobalVariable, DIArray, DISubrange, DITemplateTypeParameter, DIEnumerator,
DINameSpace, DIFlags,
};
2016-08-02 23:25:19 +02:00
use libc::{c_uint, c_int, size_t, c_char};
use libc::{c_longlong, c_ulonglong, c_void};
use std::ptr::NonNull;
use super::RustStringRef;
2016-08-02 23:25:19 +02:00
pub type Opcode = u32;
pub type Bool = c_uint;
pub const True: Bool = 1 as Bool;
pub const False: Bool = 0 as Bool;
#[derive(Copy, Clone, PartialEq)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum LLVMRustResult {
Success,
Failure,
}
// Consts for the LLVM CallConv type, pre-cast to usize.
/// LLVM CallingConv::ID. Should we wrap this?
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum CallConv {
CCallConv = 0,
FastCallConv = 8,
ColdCallConv = 9,
X86StdcallCallConv = 64,
X86FastcallCallConv = 65,
ArmAapcsCallConv = 67,
Msp430Intr = 69,
X86_ThisCall = 70,
PtxKernel = 71,
X86_64_SysV = 78,
X86_64_Win64 = 79,
2016-10-22 15:07:35 +02:00
X86_VectorCall = 80,
X86_Intr = 83,
AmdGpuKernel = 91,
}
/// LLVMRustLinkage
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[repr(C)]
pub enum Linkage {
ExternalLinkage = 0,
AvailableExternallyLinkage = 1,
LinkOnceAnyLinkage = 2,
LinkOnceODRLinkage = 3,
WeakAnyLinkage = 4,
WeakODRLinkage = 5,
AppendingLinkage = 6,
InternalLinkage = 7,
PrivateLinkage = 8,
ExternalWeakLinkage = 9,
CommonLinkage = 10,
}
// LLVMRustVisibility
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[repr(C)]
pub enum Visibility {
Default = 0,
Hidden = 1,
Protected = 2,
}
/// LLVMDiagnosticSeverity
#[derive(Copy, Clone, Debug)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum DiagnosticSeverity {
Error = 0,
Warning = 1,
Remark = 2,
Note = 3,
}
2016-08-02 23:25:19 +02:00
/// LLVMDLLStorageClass
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum DLLStorageClass {
2016-10-22 15:07:35 +02:00
Default = 0,
DllImport = 1, // Function to be imported from DLL.
DllExport = 2, // Function to be accessible from DLL.
}
/// Matches LLVMRustAttribute in rustllvm.h
/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind,
/// though it is not ABI compatible (since it's a C++ enum)
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub enum Attribute {
AlwaysInline = 0,
ByVal = 1,
Cold = 2,
InlineHint = 3,
MinSize = 4,
Naked = 5,
NoAlias = 6,
NoCapture = 7,
NoInline = 8,
NonNull = 9,
NoRedZone = 10,
NoReturn = 11,
NoUnwind = 12,
OptimizeForSize = 13,
ReadOnly = 14,
SExt = 15,
StructRet = 16,
UWTable = 17,
ZExt = 18,
InReg = 19,
2016-12-30 05:28:11 +01:00
SanitizeThread = 20,
SanitizeAddress = 21,
2016-12-30 05:28:11 +01:00
SanitizeMemory = 22,
}
/// LLVMIntPredicate
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum IntPredicate {
IntEQ = 32,
IntNE = 33,
IntUGT = 34,
IntUGE = 35,
IntULT = 36,
IntULE = 37,
IntSGT = 38,
IntSGE = 39,
IntSLT = 40,
IntSLE = 41,
}
/// LLVMRealPredicate
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum RealPredicate {
RealPredicateFalse = 0,
RealOEQ = 1,
RealOGT = 2,
RealOGE = 3,
RealOLT = 4,
RealOLE = 5,
RealONE = 6,
RealORD = 7,
RealUNO = 8,
RealUEQ = 9,
RealUGT = 10,
RealUGE = 11,
RealULT = 12,
RealULE = 13,
RealUNE = 14,
RealPredicateTrue = 15,
}
2016-08-02 23:25:19 +02:00
/// LLVMTypeKind
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum TypeKind {
2016-10-22 15:07:35 +02:00
Void = 0,
Half = 1,
Float = 2,
Double = 3,
X86_FP80 = 4,
FP128 = 5,
PPC_FP128 = 6,
2016-10-22 15:07:35 +02:00
Label = 7,
Integer = 8,
Function = 9,
Struct = 10,
Array = 11,
Pointer = 12,
Vector = 13,
Metadata = 14,
X86_MMX = 15,
Token = 16,
}
/// LLVMAtomicRmwBinOp
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum AtomicRmwBinOp {
AtomicXchg = 0,
2016-10-22 15:07:35 +02:00
AtomicAdd = 1,
AtomicSub = 2,
AtomicAnd = 3,
AtomicNand = 4,
2016-10-22 15:07:35 +02:00
AtomicOr = 5,
AtomicXor = 6,
AtomicMax = 7,
AtomicMin = 8,
AtomicUMax = 9,
AtomicUMin = 10,
}
/// LLVMAtomicOrdering
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum AtomicOrdering {
NotAtomic = 0,
Unordered = 1,
Monotonic = 2,
// Consume = 3, // Not specified yet.
Acquire = 4,
Release = 5,
AcquireRelease = 6,
2016-10-22 15:07:35 +02:00
SequentiallyConsistent = 7,
}
/// LLVMRustSynchronizationScope
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum SynchronizationScope {
Other,
SingleThread,
CrossThread,
}
/// LLVMRustFileType
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum FileType {
Other,
AssemblyFile,
ObjectFile,
}
/// LLVMMetadataType
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum MetadataType {
MD_dbg = 0,
MD_tbaa = 1,
MD_prof = 2,
MD_fpmath = 3,
MD_range = 4,
MD_tbaa_struct = 5,
MD_invariant_load = 6,
MD_alias_scope = 7,
MD_noalias = 8,
MD_nontemporal = 9,
MD_mem_parallel_loop_access = 10,
MD_nonnull = 11,
}
2016-08-02 23:25:19 +02:00
/// LLVMRustAsmDialect
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum AsmDialect {
2016-08-02 23:25:19 +02:00
Other,
Att,
Intel,
}
/// LLVMRustCodeGenOptLevel
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub enum CodeGenOptLevel {
Other,
None,
Less,
Default,
Aggressive,
}
/// LLVMRelocMode
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub enum RelocMode {
Default,
Static,
PIC,
DynamicNoPic,
ROPI,
RWPI,
ROPI_RWPI,
}
/// LLVMRustCodeModel
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum CodeModel {
Other,
Small,
Kernel,
Medium,
Large,
None,
}
/// LLVMRustDiagnosticKind
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum DiagnosticKind {
Other,
InlineAsm,
StackSize,
DebugMetadataVersion,
SampleProfile,
OptimizationRemark,
OptimizationRemarkMissed,
OptimizationRemarkAnalysis,
OptimizationRemarkAnalysisFPCommute,
OptimizationRemarkAnalysisAliasing,
OptimizationRemarkOther,
OptimizationFailure,
PGOProfile,
}
/// LLVMRustArchiveKind
#[derive(Copy, Clone)]
2016-08-02 23:25:19 +02:00
#[repr(C)]
pub enum ArchiveKind {
Other,
K_GNU,
K_BSD,
K_COFF,
}
2016-08-02 23:25:19 +02:00
/// LLVMRustPassKind
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum PassKind {
Other,
Function,
Module,
}
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
/// LLVMRustThinLTOData
extern { pub type ThinLTOData; }
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
/// LLVMRustThinLTOBuffer
extern { pub type ThinLTOBuffer; }
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
/// LLVMRustThinLTOModule
#[repr(C)]
pub struct ThinLTOModule {
pub identifier: *const c_char,
pub data: *const u8,
pub len: usize,
}
/// LLVMThreadLocalMode
#[derive(Copy, Clone)]
#[repr(C)]
pub enum ThreadLocalMode {
NotThreadLocal,
GeneralDynamic,
LocalDynamic,
InitialExec,
LocalExec
}
// Opaque pointer types
extern { pub type Module; }
extern { pub type Context; }
extern { pub type Type; }
extern { pub type Value; }
extern { pub type Metadata; }
extern { pub type BasicBlock; }
extern { pub type Builder; }
extern { pub type MemoryBuffer; }
pub type MemoryBufferRef = *mut MemoryBuffer;
extern { pub type PassManager; }
pub type PassManagerRef = *mut PassManager;
extern { pub type PassManagerBuilder; }
pub type PassManagerBuilderRef = *mut PassManagerBuilder;
extern { pub type ObjectFile; }
pub type ObjectFileRef = *mut ObjectFile;
extern { pub type SectionIterator; }
pub type SectionIteratorRef = *mut SectionIterator;
extern { pub type Pass; }
pub type PassRef = *mut Pass;
extern { pub type TargetMachine; }
pub type TargetMachineRef = *const TargetMachine;
extern { pub type Archive; }
pub type ArchiveRef = *mut Archive;
extern { pub type ArchiveIterator; }
pub type ArchiveIteratorRef = *mut ArchiveIterator;
extern { pub type ArchiveChild; }
pub type ArchiveChildRef = *mut ArchiveChild;
extern { pub type Twine; }
pub type TwineRef = *mut Twine;
extern { pub type DiagnosticInfo; }
pub type DiagnosticInfoRef = *mut DiagnosticInfo;
extern { pub type DebugLoc; }
pub type DebugLocRef = *mut DebugLoc;
extern { pub type SMDiagnostic; }
pub type SMDiagnosticRef = *mut SMDiagnostic;
extern { pub type RustArchiveMember; }
pub type RustArchiveMemberRef = *mut RustArchiveMember;
extern { pub type OperandBundleDef; }
pub type OperandBundleDefRef = *mut OperandBundleDef;
extern { pub type Linker; }
pub type LinkerRef = *mut Linker;
pub type DiagnosticHandler = unsafe extern "C" fn(DiagnosticInfoRef, *mut c_void);
pub type InlineAsmDiagHandler = unsafe extern "C" fn(SMDiagnosticRef, *const c_void, c_uint);
pub mod debuginfo {
use super::Metadata;
extern { pub type DIBuilder; }
pub type DIDescriptor = Metadata;
pub type DIScope = DIDescriptor;
pub type DIFile = DIScope;
pub type DILexicalBlock = DIScope;
pub type DISubprogram = DIScope;
pub type DINameSpace = DIScope;
pub type DIType = DIDescriptor;
pub type DIBasicType = DIType;
pub type DIDerivedType = DIType;
pub type DICompositeType = DIDerivedType;
pub type DIVariable = DIDescriptor;
pub type DIGlobalVariable = DIDescriptor;
pub type DIArray = DIDescriptor;
pub type DISubrange = DIDescriptor;
pub type DIEnumerator = DIDescriptor;
pub type DITemplateTypeParameter = DIDescriptor;
// These values **must** match with LLVMRustDIFlags!!
bitflags! {
#[repr(C)]
#[derive(Default)]
pub struct DIFlags: ::libc::uint32_t {
const FlagZero = 0;
const FlagPrivate = 1;
const FlagProtected = 2;
const FlagPublic = 3;
const FlagFwdDecl = (1 << 2);
const FlagAppleBlock = (1 << 3);
const FlagBlockByrefStruct = (1 << 4);
const FlagVirtual = (1 << 5);
const FlagArtificial = (1 << 6);
const FlagExplicit = (1 << 7);
const FlagPrototyped = (1 << 8);
const FlagObjcClassComplete = (1 << 9);
const FlagObjectPointer = (1 << 10);
const FlagVector = (1 << 11);
const FlagStaticMember = (1 << 12);
const FlagLValueReference = (1 << 13);
const FlagRValueReference = (1 << 14);
const FlagExternalTypeRef = (1 << 15);
const FlagIntroducedVirtual = (1 << 18);
const FlagBitField = (1 << 19);
const FlagNoReturn = (1 << 20);
const FlagMainSubprogram = (1 << 21);
}
}
}
extern { pub type ModuleBuffer; }
#[allow(improper_ctypes)] // TODO remove this (use for NonNull)
2016-10-22 15:07:35 +02:00
extern "C" {
// Create and destroy contexts.
pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context;
pub fn LLVMContextDispose(C: &'static mut Context);
pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint;
// Create modules.
pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module;
pub fn LLVMGetModuleContext(M: &Module) -> &Context;
pub fn LLVMCloneModule(M: &Module) -> &Module;
/// Data layout. See Module::getDataLayout.
pub fn LLVMGetDataLayout(M: &Module) -> *const c_char;
pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
/// See Module::dump.
pub fn LLVMDumpModule(M: &Module);
/// See Module::setModuleInlineAsm.
pub fn LLVMSetModuleInlineAsm(M: &Module, Asm: *const c_char);
pub fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char);
/// See llvm::LLVMTypeKind::getTypeID.
pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind;
2016-10-22 15:07:35 +02:00
// Operations on integer types
pub fn LLVMInt1TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt8TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt16TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt32TypeInContext(C: &Context) -> &Type;
pub fn LLVMInt64TypeInContext(C: &Context) -> &Type;
pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type;
pub fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint;
2016-10-22 15:07:35 +02:00
// Operations on real types
pub fn LLVMFloatTypeInContext(C: &Context) -> &Type;
pub fn LLVMDoubleTypeInContext(C: &Context) -> &Type;
2016-10-22 15:07:35 +02:00
// Operations on function types
pub fn LLVMFunctionType(ReturnType: &'a Type,
ParamTypes: *const &'a Type,
ParamCount: c_uint,
IsVarArg: Bool)
-> &'a Type;
pub fn LLVMGetReturnType(FunctionTy: &Type) -> &Type;
pub fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint;
pub fn LLVMGetParamTypes(FunctionTy: &'a Type, Dest: *mut &'a Type);
2016-10-22 15:07:35 +02:00
// Operations on struct types
pub fn LLVMStructTypeInContext(C: &'a Context,
ElementTypes: *const &'a Type,
ElementCount: c_uint,
Packed: Bool)
-> &'a Type;
pub fn LLVMIsPackedStruct(StructTy: &Type) -> Bool;
2016-10-22 15:07:35 +02:00
// Operations on array, pointer, and vector types (sequence types)
pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
pub fn LLVMGetElementType(Ty: &Type) -> &Type;
pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;
2016-10-22 15:07:35 +02:00
// Operations on other types
pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
pub fn LLVMX86MMXTypeInContext(C: &Context) -> &Type;
pub fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type;
2016-10-22 15:07:35 +02:00
// Operations on all values
pub fn LLVMTypeOf(Val: &Value) -> &Type;
pub fn LLVMGetValueName(Val: &Value) -> *const c_char;
pub fn LLVMSetValueName(Val: &Value, Name: *const c_char);
pub fn LLVMReplaceAllUsesWith(OldVal: &'a Value, NewVal: &'a Value);
pub fn LLVMSetMetadata(Val: &'a Value, KindID: c_uint, Node: &'a Value);
2016-10-22 15:07:35 +02:00
// Operations on Users
pub fn LLVMGetOperand(Val: &Value, Index: c_uint) -> &Value;
2016-10-22 15:07:35 +02:00
// Operations on constants of any type
pub fn LLVMConstNull(Ty: &Type) -> &Value;
pub fn LLVMConstICmp(Pred: IntPredicate, V1: &'a Value, V2: &'a Value) -> &'a Value;
pub fn LLVMConstFCmp(Pred: RealPredicate, V1: &'a Value, V2: &'a Value) -> &'a Value;
pub fn LLVMGetUndef(Ty: &Type) -> &Value;
2016-10-22 15:07:35 +02:00
// Operations on metadata
pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
pub fn LLVMMDNodeInContext(C: &'a Context, Vals: *const &'a Value, Count: c_uint) -> &'a Value;
pub fn LLVMAddNamedMetadataOperand(M: &'a Module, Name: *const c_char, Val: &'a Value);
2016-10-22 15:07:35 +02:00
// Operations on scalar constants
pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value;
pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value;
pub fn LLVMConstIntGetZExtValue(ConstantVal: &Value) -> c_ulonglong;
pub fn LLVMConstIntGetSExtValue(ConstantVal: &Value) -> c_longlong;
pub fn LLVMRustConstInt128Get(ConstantVal: &Value, SExt: bool,
high: *mut u64, low: *mut u64) -> bool;
pub fn LLVMConstRealGetDouble (ConstantVal: &Value, losesInfo: *mut Bool) -> f64;
2016-10-22 15:07:35 +02:00
// Operations on composite constants
pub fn LLVMConstStringInContext(C: &Context,
Str: *const c_char,
Length: c_uint,
DontNullTerminate: Bool)
-> &Value;
pub fn LLVMConstStructInContext(C: &'a Context,
ConstantVals: *const &'a Value,
Count: c_uint,
Packed: Bool)
-> &'a Value;
pub fn LLVMConstArray(ElementTy: &'a Type,
ConstantVals: *const &'a Value,
Length: c_uint)
-> &'a Value;
pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
2016-10-22 15:07:35 +02:00
// Constant expressions
pub fn LLVMSizeOf(Ty: &Type) -> &Value;
pub fn LLVMConstNeg(ConstantVal: &Value) -> &Value;
pub fn LLVMConstFNeg(ConstantVal: &Value) -> &Value;
pub fn LLVMConstNot(ConstantVal: &Value) -> &Value;
pub fn LLVMConstAdd(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstFAdd(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstSub(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstFSub(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstMul(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstFMul(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstUDiv(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstSDiv(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstFDiv(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstURem(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstSRem(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstFRem(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstAnd(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstOr(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstXor(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstShl(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstLShr(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
pub fn LLVMConstAShr(LHSConstant: &'a Value, RHSConstant: &'a Value) -> &'a Value;
2018-01-16 09:31:48 +01:00
pub fn LLVMConstGEP(
ConstantVal: &'a Value,
ConstantIndices: *const &'a Value,
2018-01-16 09:31:48 +01:00
NumIndices: c_uint,
) -> &'a Value;
2018-01-16 09:31:48 +01:00
pub fn LLVMConstInBoundsGEP(
ConstantVal: &'a Value,
ConstantIndices: *const &'a Value,
2018-01-16 09:31:48 +01:00
NumIndices: c_uint,
) -> &'a Value;
pub fn LLVMConstTrunc(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstZExt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstUIToFP(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstSIToFP(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstFPToUI(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstFPToSI(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstPtrToInt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstIntToPtr(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstBitCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstPointerCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstIntCast(ConstantVal: &'a Value, ToType: &'a Type, isSigned: Bool) -> &'a Value;
pub fn LLVMConstFPCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstExtractValue(AggConstant: &Value,
IdxList: *const c_uint,
NumIdx: c_uint)
-> &Value;
pub fn LLVMConstInlineAsm(Ty: &Type,
AsmString: *const c_char,
Constraints: *const c_char,
HasSideEffects: Bool,
IsAlignStack: Bool)
-> &Value;
2016-10-22 15:07:35 +02:00
// Operations on global variables, functions, and aliases (globals)
pub fn LLVMIsDeclaration(Global: &Value) -> Bool;
pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage;
pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage);
pub fn LLVMGetSection(Global: &Value) -> *const c_char;
pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
2016-10-22 15:07:35 +02:00
// Operations on global variables
pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMAddGlobal(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value;
pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>;
pub fn LLVMRustGetOrInsertGlobal(M: &'a Module, Name: *const c_char, T: &'a Type) -> &'a Value;
pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>;
pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMDeleteGlobal(GlobalVar: &Value);
pub fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMSetInitializer(GlobalVar: &'a Value, ConstantVal: &'a Value);
pub fn LLVMSetThreadLocal(GlobalVar: &Value, IsThreadLocal: Bool);
pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode);
pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool;
pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool);
pub fn LLVMRustGetNamedValue(M: &Module, Name: *const c_char) -> Option<&Value>;
pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
2016-10-22 15:07:35 +02:00
// Operations on functions
pub fn LLVMAddFunction(M: &'a Module, Name: *const c_char, FunctionTy: &'a Type) -> &'a Value;
pub fn LLVMGetNamedFunction(M: &Module, Name: *const c_char) -> &Value;
pub fn LLVMGetFirstFunction(M: &Module) -> &Value;
pub fn LLVMGetNextFunction(Fn: &Value) -> &Value;
pub fn LLVMRustGetOrInsertFunction(M: &'a Module,
Name: *const c_char,
FunctionTy: &'a Type)
-> &'a Value;
pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
pub fn LLVMRustAddAlignmentAttr(Fn: &Value, index: c_uint, bytes: u32);
pub fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute);
pub fn LLVMRustAddFunctionAttrStringValue(Fn: &Value,
2016-10-22 15:07:35 +02:00
index: c_uint,
Name: *const c_char,
Value: *const c_char);
pub fn LLVMRustRemoveFunctionAttributes(Fn: &Value, index: c_uint, attr: Attribute);
2016-10-22 15:07:35 +02:00
// Operations on parameters
pub fn LLVMCountParams(Fn: &Value) -> c_uint;
pub fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value;
2016-10-22 15:07:35 +02:00
// Operations on basic blocks
pub fn LLVMBasicBlockAsValue(BB: &BasicBlock) -> &Value;
pub fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value;
pub fn LLVMAppendBasicBlockInContext(C: &'a Context,
Fn: &'a Value,
Name: *const c_char)
-> &'a BasicBlock;
pub fn LLVMDeleteBasicBlock(BB: &BasicBlock);
2016-10-22 15:07:35 +02:00
// Operations on instructions
pub fn LLVMGetInstructionParent(Inst: &Value) -> &BasicBlock;
pub fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
pub fn LLVMGetFirstInstruction(BB: &BasicBlock) -> &'a Value;
pub fn LLVMInstructionEraseFromParent(Inst: &Value);
2016-10-22 15:07:35 +02:00
// Operations on call sites
pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);
pub fn LLVMRustAddCallSiteAttribute(Instr: &Value, index: c_uint, attr: Attribute);
pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: &Value, index: c_uint, bytes: u32);
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: &Value,
index: c_uint,
bytes: u64);
2016-10-22 15:07:35 +02:00
// Operations on load/store instructions (only)
pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);
2016-10-22 15:07:35 +02:00
// Operations on phi nodes
pub fn LLVMAddIncoming(PhiNode: &'a Value,
IncomingValues: *const &'a Value,
IncomingBlocks: *const &'a BasicBlock,
Count: c_uint);
2016-10-22 15:07:35 +02:00
// Instruction builders
pub fn LLVMCreateBuilderInContext(C: &Context) -> &Builder;
pub fn LLVMPositionBuilder(Builder: &'a Builder, Block: &'a BasicBlock, Instr: &'a Value);
pub fn LLVMPositionBuilderBefore(Builder: &'a Builder, Instr: &'a Value);
pub fn LLVMPositionBuilderAtEnd(Builder: &'a Builder, Block: &'a BasicBlock);
pub fn LLVMGetInsertBlock(Builder: &Builder) -> &BasicBlock;
pub fn LLVMDisposeBuilder(Builder: &Builder);
2016-10-22 15:07:35 +02:00
// Metadata
pub fn LLVMSetCurrentDebugLocation(Builder: &'a Builder, L: Option<&'a Value>);
pub fn LLVMGetCurrentDebugLocation(Builder: &Builder) -> &Value;
pub fn LLVMSetInstDebugLocation(Builder: &'a Builder, Inst: &'a Value);
2016-10-22 15:07:35 +02:00
// Terminators
pub fn LLVMBuildRetVoid(B: &Builder) -> &Value;
pub fn LLVMBuildRet(B: &'a Builder, V: &'a Value) -> &'a Value;
pub fn LLVMBuildAggregateRet(B: &'a Builder, RetVals: *const &'a Value, N: c_uint) -> &'a Value;
pub fn LLVMBuildBr(B: &'a Builder, Dest: &'a BasicBlock) -> &'a Value;
pub fn LLVMBuildCondBr(B: &'a Builder,
If: &'a Value,
Then: &'a BasicBlock,
Else: &'a BasicBlock)
-> &'a Value;
pub fn LLVMBuildSwitch(B: &'a Builder,
V: &'a Value,
Else: &'a BasicBlock,
NumCases: c_uint)
-> &'a Value;
pub fn LLVMBuildIndirectBr(B: &'a Builder, Addr: &'a Value, NumDests: c_uint) -> &'a Value;
pub fn LLVMRustBuildInvoke(B: &'a Builder,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
Then: &'a BasicBlock,
Catch: &'a BasicBlock,
Bundle: Option<NonNull<OperandBundleDef>>,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildLandingPad(B: &'a Builder,
Ty: &'a Type,
PersFn: &'a Value,
NumClauses: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildResume(B: &'a Builder, Exn: &'a Value) -> &'a Value;
pub fn LLVMBuildUnreachable(B: &Builder) -> &Value;
pub fn LLVMRustBuildCleanupPad(B: &'a Builder,
ParentPad: Option<&'a Value>,
ArgCnt: c_uint,
Args: *const &'a Value,
2016-10-22 15:07:35 +02:00
Name: *const c_char)
-> Option<&'a Value>;
pub fn LLVMRustBuildCleanupRet(B: &'a Builder,
CleanupPad: &'a Value,
UnwindBB: Option<&'a BasicBlock>)
-> Option<&'a Value>;
pub fn LLVMRustBuildCatchPad(B: &'a Builder,
ParentPad: &'a Value,
ArgCnt: c_uint,
Args: *const &'a Value,
2016-10-22 15:07:35 +02:00
Name: *const c_char)
-> Option<&'a Value>;
pub fn LLVMRustBuildCatchRet(B: &'a Builder, Pad: &'a Value, BB: &'a BasicBlock) -> Option<&'a Value>;
pub fn LLVMRustBuildCatchSwitch(Builder: &'a Builder,
ParentPad: Option<&'a Value>,
BB: Option<&'a BasicBlock>,
NumHandlers: c_uint,
2016-10-22 15:07:35 +02:00
Name: *const c_char)
-> Option<&'a Value>;
pub fn LLVMRustAddHandler(CatchSwitch: &'a Value, Handler: &'a BasicBlock);
pub fn LLVMSetPersonalityFn(Func: &'a Value, Pers: &'a Value);
2016-10-22 15:07:35 +02:00
// Add a case to the switch instruction
pub fn LLVMAddCase(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock);
2016-10-22 15:07:35 +02:00
// Add a clause to the landing pad instruction
pub fn LLVMAddClause(LandingPad: &'a Value, ClauseVal: &'a Value);
2016-10-22 15:07:35 +02:00
// Set the cleanup on a landing pad instruction
pub fn LLVMSetCleanup(LandingPad: &Value, Val: Bool);
2016-10-22 15:07:35 +02:00
// Arithmetic
pub fn LLVMBuildAdd(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNSWAdd(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNUWAdd(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFAdd(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSub(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNSWSub(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNUWSub(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFSub(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildMul(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNSWMul(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNUWMul(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFMul(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildUDiv(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExactUDiv(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSDiv(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExactSDiv(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFDiv(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildURem(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSRem(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFRem(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildShl(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildLShr(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildAShr(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildAnd(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildOr(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildXor(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildBinOp(B: &'a Builder,
Op: Opcode,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildNeg(B: &'a Builder, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildNSWNeg(B: &'a Builder, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildNUWNeg(B: &'a Builder, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildFNeg(B: &'a Builder, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildNot(B: &'a Builder, V: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMRustSetHasUnsafeAlgebra(Instr: &Value);
2016-10-22 15:07:35 +02:00
// Memory
pub fn LLVMBuildAlloca(B: &'a Builder, Ty: &'a Type, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildFree(B: &'a Builder, PointerVal: &'a Value) -> &'a Value;
pub fn LLVMBuildLoad(B: &'a Builder, PointerVal: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildStore(B: &'a Builder, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
pub fn LLVMBuildGEP(B: &'a Builder,
Pointer: &'a Value,
Indices: *const &'a Value,
NumIndices: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildInBoundsGEP(B: &'a Builder,
Pointer: &'a Value,
Indices: *const &'a Value,
NumIndices: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildStructGEP(B: &'a Builder,
Pointer: &'a Value,
Idx: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildGlobalString(B: &Builder,
Str: *const c_char,
Name: *const c_char)
-> &Value;
pub fn LLVMBuildGlobalStringPtr(B: &Builder,
Str: *const c_char,
Name: *const c_char)
-> &Value;
2016-10-22 15:07:35 +02:00
// Casts
pub fn LLVMBuildTrunc(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildZExt(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSExt(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPToUI(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPToSI(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildUIToFP(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSIToFP(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPTrunc(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFPExt(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildPtrToInt(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildIntToPtr(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildBitCast(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildZExtOrBitCast(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSExtOrBitCast(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildTruncOrBitCast(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildCast(B: &'a Builder,
Op: Opcode,
Val: &'a Value,
DestTy: &'a Type,
2016-10-22 15:07:35 +02:00
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildPointerCast(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMRustBuildIntCast(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
2017-02-10 18:29:39 +01:00
IsSized: bool)
-> &'a Value;
pub fn LLVMBuildFPCast(B: &'a Builder,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
2016-10-22 15:07:35 +02:00
// Comparisons
pub fn LLVMBuildICmp(B: &'a Builder,
Op: c_uint,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildFCmp(B: &'a Builder,
Op: c_uint,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
2016-10-22 15:07:35 +02:00
// Miscellaneous instructions
pub fn LLVMBuildPhi(B: &'a Builder, Ty: &'a Type, Name: *const c_char) -> &'a Value;
pub fn LLVMRustBuildCall(B: &'a Builder,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
Bundle: Option<NonNull<OperandBundleDef>>,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildSelect(B: &'a Builder,
If: &'a Value,
Then: &'a Value,
Else: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildVAArg(B: &'a Builder,
list: &'a Value,
Ty: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExtractElement(B: &'a Builder,
VecVal: &'a Value,
Index: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildInsertElement(B: &'a Builder,
VecVal: &'a Value,
EltVal: &'a Value,
Index: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildShuffleVector(B: &'a Builder,
V1: &'a Value,
V2: &'a Value,
Mask: &'a Value,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildExtractValue(B: &'a Builder,
AggVal: &'a Value,
Index: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildInsertValue(B: &'a Builder,
AggVal: &'a Value,
EltVal: &'a Value,
Index: c_uint,
Name: *const c_char)
-> &'a Value;
pub fn LLVMRustBuildVectorReduceFAdd(B: &'a Builder,
Acc: &'a Value,
Src: &'a Value)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceFMul(B: &'a Builder,
Acc: &'a Value,
Src: &'a Value)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceAdd(B: &'a Builder,
Src: &'a Value)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceMul(B: &'a Builder,
Src: &'a Value)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceAnd(B: &'a Builder,
Src: &'a Value)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceOr(B: &'a Builder,
Src: &'a Value)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceXor(B: &'a Builder,
Src: &'a Value)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceMin(B: &'a Builder,
Src: &'a Value,
IsSigned: bool)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceMax(B: &'a Builder,
Src: &'a Value,
IsSigned: bool)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceFMin(B: &'a Builder,
Src: &'a Value,
IsNaN: bool)
-> Option<&'a Value>;
pub fn LLVMRustBuildVectorReduceFMax(B: &'a Builder,
Src: &'a Value,
IsNaN: bool)
-> Option<&'a Value>;
pub fn LLVMRustBuildMinNum(B: &'a Builder, LHS: &'a Value, LHS: &'a Value) -> Option<&'a Value>;
pub fn LLVMRustBuildMaxNum(B: &'a Builder, LHS: &'a Value, LHS: &'a Value) -> Option<&'a Value>;
2018-03-21 21:49:22 +01:00
pub fn LLVMBuildIsNull(B: &'a Builder, Val: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildIsNotNull(B: &'a Builder, Val: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildPtrDiff(B: &'a Builder,
LHS: &'a Value,
RHS: &'a Value,
Name: *const c_char)
-> &'a Value;
2016-10-22 15:07:35 +02:00
// Atomic Operations
pub fn LLVMRustBuildAtomicLoad(B: &'a Builder,
PointerVal: &'a Value,
Name: *const c_char,
Order: AtomicOrdering)
-> &'a Value;
pub fn LLVMRustBuildAtomicStore(B: &'a Builder,
Val: &'a Value,
Ptr: &'a Value,
Order: AtomicOrdering)
-> &'a Value;
pub fn LLVMRustBuildAtomicCmpXchg(B: &'a Builder,
LHS: &'a Value,
CMP: &'a Value,
RHS: &'a Value,
Order: AtomicOrdering,
FailureOrder: AtomicOrdering,
Weak: Bool)
-> &'a Value;
pub fn LLVMBuildAtomicRMW(B: &'a Builder,
Op: AtomicRmwBinOp,
LHS: &'a Value,
RHS: &'a Value,
Order: AtomicOrdering,
SingleThreaded: Bool)
-> &'a Value;
pub fn LLVMRustBuildAtomicFence(B: &Builder,
Order: AtomicOrdering,
Scope: SynchronizationScope);
2016-10-22 15:07:35 +02:00
// Selected entries from the downcasts.
pub fn LLVMIsATerminatorInst(Inst: &Value) -> &Value;
pub fn LLVMIsAStoreInst(Inst: &Value) -> &Value;
/// Writes a module to the specified path. Returns 0 on success.
pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
/// Creates a pass manager.
pub fn LLVMCreatePassManager() -> PassManagerRef;
/// Creates a function-by-function pass manager
pub fn LLVMCreateFunctionPassManagerForModule(M: &Module) -> PassManagerRef;
/// Disposes a pass manager.
pub fn LLVMDisposePassManager(PM: PassManagerRef);
/// Runs a pass manager on a module.
pub fn LLVMRunPassManager(PM: PassManagerRef, M: &Module) -> Bool;
pub fn LLVMInitializePasses();
pub fn LLVMPassManagerBuilderCreate() -> PassManagerBuilderRef;
pub fn LLVMPassManagerBuilderDispose(PMB: PassManagerBuilderRef);
2016-10-22 15:07:35 +02:00
pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: PassManagerBuilderRef, Value: Bool);
pub fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: PassManagerBuilderRef, Value: Bool);
pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(PMB: PassManagerBuilderRef,
threshold: c_uint);
pub fn LLVMPassManagerBuilderPopulateModulePassManager(PMB: PassManagerBuilderRef,
PM: PassManagerRef);
pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(PMB: PassManagerBuilderRef,
PM: PassManagerRef);
pub fn LLVMPassManagerBuilderPopulateLTOPassManager(PMB: PassManagerBuilderRef,
PM: PassManagerRef,
Internalize: Bool,
RunInliner: Bool);
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
PMB: PassManagerBuilderRef,
PM: PassManagerRef) -> bool;
2016-10-22 15:07:35 +02:00
// Stuff that's in rustllvm/ because it's not upstream yet.
/// Opens an object file.
pub fn LLVMCreateObjectFile(MemBuf: MemoryBufferRef) -> ObjectFileRef;
/// Closes an object file.
pub fn LLVMDisposeObjectFile(ObjFile: ObjectFileRef);
/// Enumerates the sections in an object file.
pub fn LLVMGetSections(ObjFile: ObjectFileRef) -> SectionIteratorRef;
/// Destroys a section iterator.
pub fn LLVMDisposeSectionIterator(SI: SectionIteratorRef);
/// Returns true if the section iterator is at the end of the section
/// list:
2016-10-22 15:07:35 +02:00
pub fn LLVMIsSectionIteratorAtEnd(ObjFile: ObjectFileRef, SI: SectionIteratorRef) -> Bool;
/// Moves the section iterator to point to the next section.
pub fn LLVMMoveToNextSection(SI: SectionIteratorRef);
/// Returns the current section size.
pub fn LLVMGetSectionSize(SI: SectionIteratorRef) -> c_ulonglong;
/// Returns the current section contents as a string buffer.
pub fn LLVMGetSectionContents(SI: SectionIteratorRef) -> *const c_char;
/// Reads the given file and returns it as a memory buffer. Use
/// LLVMDisposeMemoryBuffer() to get rid of it.
2016-10-22 15:07:35 +02:00
pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(Path: *const c_char) -> MemoryBufferRef;
pub fn LLVMStartMultithreaded() -> Bool;
/// Returns a string describing the last error caused by an LLVMRust* call.
pub fn LLVMRustGetLastError() -> *const c_char;
/// Print the pass timings since static dtors aren't picking them up.
pub fn LLVMRustPrintPassTimings();
pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
pub fn LLVMStructSetBody(StructTy: &'a Type,
ElementTypes: *const &'a Type,
ElementCount: c_uint,
Packed: Bool);
/// Prepares inline assembly.
pub fn LLVMRustInlineAsm(Ty: &Type,
AsmString: *const c_char,
Constraints: *const c_char,
SideEffects: Bool,
AlignStack: Bool,
2016-08-02 23:25:19 +02:00
Dialect: AsmDialect)
-> &Value;
pub fn LLVMRustDebugMetadataVersion() -> u32;
pub fn LLVMRustVersionMajor() -> u32;
pub fn LLVMRustVersionMinor() -> u32;
pub fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32);
pub fn LLVMRustMetadataAsValue(C: &'a Context, MD: &'a Metadata) -> &'a Value;
pub fn LLVMRustDIBuilderCreate(M: &Module) -> &DIBuilder;
pub fn LLVMRustDIBuilderDispose(Builder: &DIBuilder);
pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder);
pub fn LLVMRustDIBuilderCreateCompileUnit(Builder: &'a DIBuilder,
Lang: c_uint,
File: &'a DIFile,
Producer: *const c_char,
isOptimized: bool,
Flags: *const c_char,
RuntimeVer: c_uint,
SplitName: *const c_char)
-> &'a DIDescriptor;
pub fn LLVMRustDIBuilderCreateFile(Builder: &DIBuilder,
Filename: *const c_char,
Directory: *const c_char)
-> &DIFile;
pub fn LLVMRustDIBuilderCreateSubroutineType(Builder: &'a DIBuilder,
File: &'a DIFile,
ParameterTypes: &'a DIArray)
-> &'a DICompositeType;
pub fn LLVMRustDIBuilderCreateFunction(Builder: &'a DIBuilder,
Scope: &'a DIDescriptor,
Name: *const c_char,
LinkageName: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
Ty: &'a DIType,
isLocalToUnit: bool,
isDefinition: bool,
ScopeLine: c_uint,
Flags: DIFlags,
isOptimized: bool,
Fn: &'a Value,
TParam: &'a DIArray,
Decl: Option<&'a DIDescriptor>)
-> &'a DISubprogram;
pub fn LLVMRustDIBuilderCreateBasicType(Builder: &DIBuilder,
Name: *const c_char,
SizeInBits: u64,
AlignInBits: u32,
Encoding: c_uint)
-> &DIBasicType;
pub fn LLVMRustDIBuilderCreatePointerType(Builder: &'a DIBuilder,
PointeeTy: &'a DIType,
2016-10-22 15:07:35 +02:00
SizeInBits: u64,
AlignInBits: u32,
2016-10-22 15:07:35 +02:00
Name: *const c_char)
-> &'a DIDerivedType;
pub fn LLVMRustDIBuilderCreateStructType(Builder: &'a DIBuilder,
Scope: Option<&'a DIDescriptor>,
Name: *const c_char,
File: &'a DIFile,
LineNumber: c_uint,
SizeInBits: u64,
AlignInBits: u32,
Flags: DIFlags,
DerivedFrom: Option<&'a DIType>,
Elements: &'a DIArray,
RunTimeLang: c_uint,
VTableHolder: Option<&'a DIType>,
UniqueId: *const c_char)
-> &'a DICompositeType;
pub fn LLVMRustDIBuilderCreateMemberType(Builder: &'a DIBuilder,
Scope: &'a DIDescriptor,
Name: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
SizeInBits: u64,
AlignInBits: u32,
OffsetInBits: u64,
Flags: DIFlags,
Ty: &'a DIType)
-> &'a DIDerivedType;
pub fn LLVMRustDIBuilderCreateLexicalBlock(Builder: &'a DIBuilder,
Scope: &'a DIScope,
File: &'a DIFile,
Line: c_uint,
Col: c_uint)
-> &'a DILexicalBlock;
pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: &'a DIBuilder,
Scope: &'a DIScope,
File: &'a DIFile)
-> &'a DILexicalBlock;
pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: &'a DIBuilder,
Context: Option<&'a DIScope>,
Name: *const c_char,
LinkageName: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
Ty: &'a DIType,
isLocalToUnit: bool,
Val: &'a Value,
Decl: Option<&'a DIDescriptor>,
AlignInBits: u32)
-> &'a DIGlobalVariable;
pub fn LLVMRustDIBuilderCreateVariable(Builder: &'a DIBuilder,
Tag: c_uint,
Scope: &'a DIDescriptor,
Name: *const c_char,
File: &'a DIFile,
LineNo: c_uint,
Ty: &'a DIType,
AlwaysPreserve: bool,
Flags: DIFlags,
ArgNo: c_uint,
AlignInBits: u32)
-> &'a DIVariable;
pub fn LLVMRustDIBuilderCreateArrayType(Builder: &'a DIBuilder,
Size: u64,
AlignInBits: u32,
Ty: &'a DIType,
Subscripts: &'a DIArray)
-> &'a DIType;
pub fn LLVMRustDIBuilderCreateVectorType(Builder: &'a DIBuilder,
Size: u64,
AlignInBits: u32,
Ty: &'a DIType,
Subscripts: &'a DIArray)
-> &'a DIType;
pub fn LLVMRustDIBuilderGetOrCreateSubrange(Builder: &DIBuilder,
Lo: i64,
Count: i64)
-> &DISubrange;
pub fn LLVMRustDIBuilderGetOrCreateArray(Builder: &'a DIBuilder,
Ptr: *const Option<&'a DIDescriptor>,
Count: c_uint)
-> &'a DIArray;
pub fn LLVMRustDIBuilderInsertDeclareAtEnd(Builder: &'a DIBuilder,
Val: &'a Value,
VarInfo: &'a DIVariable,
AddrOps: *const i64,
AddrOpsCount: c_uint,
DL: &'a Value,
InsertAtEnd: &'a BasicBlock)
-> &'a Value;
pub fn LLVMRustDIBuilderCreateEnumerator(Builder: &DIBuilder,
Name: *const c_char,
Val: u64)
-> &DIEnumerator;
pub fn LLVMRustDIBuilderCreateEnumerationType(Builder: &'a DIBuilder,
Scope: &'a DIScope,
Name: *const c_char,
File: &'a DIFile,
LineNumber: c_uint,
SizeInBits: u64,
AlignInBits: u32,
Elements: &'a DIArray,
ClassType: &'a DIType)
-> &'a DIType;
pub fn LLVMRustDIBuilderCreateUnionType(Builder: &'a DIBuilder,
Scope: &'a DIScope,
Name: *const c_char,
File: &'a DIFile,
LineNumber: c_uint,
SizeInBits: u64,
AlignInBits: u32,
Flags: DIFlags,
Elements: Option<&'a DIArray>,
RunTimeLang: c_uint,
UniqueId: *const c_char)
-> &'a DIType;
pub fn LLVMSetUnnamedAddr(GlobalVar: &Value, UnnamedAddr: Bool);
pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(Builder: &'a DIBuilder,
Scope: Option<&'a DIScope>,
Name: *const c_char,
Ty: &'a DIType,
File: &'a DIFile,
LineNo: c_uint,
ColumnNo: c_uint)
-> &'a DITemplateTypeParameter;
pub fn LLVMRustDIBuilderCreateNameSpace(Builder: &'a DIBuilder,
Scope: Option<&'a DIScope>,
Name: *const c_char,
File: &'a DIFile,
LineNo: c_uint)
-> &'a DINameSpace;
pub fn LLVMRustDICompositeTypeSetTypeArray(Builder: &'a DIBuilder,
CompositeType: &'a DIType,
TypeArray: &'a DIArray);
pub fn LLVMRustDIBuilderCreateDebugLocation(Context: &'a Context,
Line: c_uint,
Column: c_uint,
Scope: &'a DIScope,
InlinedAt: Option<&'a Metadata>)
-> &'a Value;
pub fn LLVMRustDIBuilderCreateOpDeref() -> i64;
pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64;
pub fn LLVMRustWriteTypeToString(Type: &Type, s: RustStringRef);
pub fn LLVMRustWriteValueToString(value_ref: &Value, s: RustStringRef);
pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&Value>;
pub fn LLVMIsAConstantFP(value_ref: &Value) -> Option<&Value>;
pub fn LLVMRustPassKind(Pass: PassRef) -> PassKind;
pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> PassRef;
pub fn LLVMRustAddPass(PM: PassManagerRef, Pass: PassRef);
2016-10-22 15:07:35 +02:00
pub fn LLVMRustHasFeature(T: TargetMachineRef, s: *const c_char) -> bool;
2016-08-06 07:50:48 +02:00
pub fn LLVMRustPrintTargetCPUs(T: TargetMachineRef);
pub fn LLVMRustPrintTargetFeatures(T: TargetMachineRef);
pub fn LLVMRustCreateTargetMachine(Triple: *const c_char,
CPU: *const c_char,
Features: *const c_char,
Model: CodeModel,
Reloc: RelocMode,
Level: CodeGenOptLevel,
UseSoftFP: bool,
PositionIndependentExecutable: bool,
FunctionSections: bool,
DataSections: bool,
std: Add a new wasm32-unknown-unknown target This commit adds a new target to the compiler: wasm32-unknown-unknown. This target is a reimagining of what it looks like to generate WebAssembly code from Rust. Instead of using Emscripten which can bring with it a weighty runtime this instead is a target which uses only the LLVM backend for WebAssembly and a "custom linker" for now which will hopefully one day be direct calls to lld. Notable features of this target include: * There is zero runtime footprint. The target assumes nothing exists other than the wasm32 instruction set. * There is zero toolchain footprint beyond adding the target. No custom linker is needed, rustc contains everything. * Very small wasm modules can be generated directly from Rust code using this target. * Most of the standard library is stubbed out to return an error, but anything related to allocation works (aka `HashMap`, `Vec`, etc). * Naturally, any `#[no_std]` crate should be 100% compatible with this new target. This target is currently somewhat janky due to how linking works. The "linking" is currently unconditional whole program LTO (aka LLVM is being used as a linker). Naturally that means compiling programs is pretty slow! Eventually though this target should have a linker. This target is also intended to be quite experimental. I'm hoping that this can act as a catalyst for further experimentation in Rust with WebAssembly. Breaking changes are very likely to land to this target, so it's not recommended to rely on it in any critical capacity yet. We'll let you know when it's "production ready". --- Currently testing-wise this target is looking pretty good but isn't complete. I've got almost the entire `run-pass` test suite working with this target (lots of tests ignored, but many passing as well). The `core` test suite is still getting LLVM bugs fixed to get that working and will take some time. Relatively simple programs all seem to work though! --- It's worth nothing that you may not immediately see the "smallest possible wasm module" for the input you feed to rustc. For various reasons it's very difficult to get rid of the final "bloat" in vanilla rustc (again, a real linker should fix all this). For now what you'll have to do is: cargo install --git https://github.com/alexcrichton/wasm-gc wasm-gc foo.wasm bar.wasm And then `bar.wasm` should be the smallest we can get it! --- In any case for now I'd love feedback on this, particularly on the various integration points if you've got better ideas of how to approach them!
2017-10-23 05:01:00 +02:00
TrapUnreachable: bool,
Singlethread: bool)
-> Option<&'static mut TargetMachine>;
pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
pub fn LLVMRustAddAnalysisPasses(T: TargetMachineRef, PM: PassManagerRef, M: &Module);
pub fn LLVMRustAddBuilderLibraryInfo(PMB: PassManagerBuilderRef,
M: &Module,
DisableSimplifyLibCalls: bool);
pub fn LLVMRustConfigurePassManagerBuilder(PMB: PassManagerBuilderRef,
OptLevel: CodeGenOptLevel,
MergeFunctions: bool,
SLPVectorize: bool,
LoopVectorize: bool,
PrepareForThinLTO: bool,
PGOGenPath: *const c_char,
PGOUsePath: *const c_char);
2016-10-22 15:07:35 +02:00
pub fn LLVMRustAddLibraryInfo(PM: PassManagerRef,
M: &Module,
DisableSimplifyLibCalls: bool);
pub fn LLVMRustRunFunctionPassManager(PM: PassManagerRef, M: &Module);
pub fn LLVMRustWriteOutputFile(T: TargetMachineRef,
PM: PassManagerRef,
M: &Module,
Output: *const c_char,
FileType: FileType)
-> LLVMRustResult;
pub fn LLVMRustPrintModule(PM: PassManagerRef,
M: &Module,
Output: *const c_char,
Demangle: extern fn(*const c_char,
size_t,
*mut c_char,
size_t) -> size_t);
pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
pub fn LLVMRustPrintPasses();
pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
2016-10-22 15:07:35 +02:00
pub fn LLVMRustAddAlwaysInlinePass(P: PassManagerBuilderRef, AddLifetimes: bool);
pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
pub fn LLVMRustMarkAllFunctionsNounwind(M: &Module);
pub fn LLVMRustOpenArchive(path: *const c_char) -> ArchiveRef;
pub fn LLVMRustArchiveIteratorNew(AR: ArchiveRef) -> ArchiveIteratorRef;
pub fn LLVMRustArchiveIteratorNext(AIR: ArchiveIteratorRef) -> ArchiveChildRef;
2016-10-22 15:07:35 +02:00
pub fn LLVMRustArchiveChildName(ACR: ArchiveChildRef, size: *mut size_t) -> *const c_char;
pub fn LLVMRustArchiveChildData(ACR: ArchiveChildRef, size: *mut size_t) -> *const c_char;
pub fn LLVMRustArchiveChildFree(ACR: ArchiveChildRef);
pub fn LLVMRustArchiveIteratorFree(AIR: ArchiveIteratorRef);
pub fn LLVMRustDestroyArchive(AR: ArchiveRef);
2016-10-22 15:07:35 +02:00
pub fn LLVMRustGetSectionName(SI: SectionIteratorRef, data: *mut *const c_char) -> size_t;
pub fn LLVMRustWriteTwineToString(T: TwineRef, s: RustStringRef);
pub fn LLVMContextSetDiagnosticHandler(C: &Context,
Handler: DiagnosticHandler,
DiagnosticContext: *mut c_void);
pub fn LLVMRustUnpackOptimizationDiagnostic(DI: DiagnosticInfoRef,
pass_name_out: RustStringRef,
function_out: *mut Option<&Value>,
loc_line_out: *mut c_uint,
loc_column_out: *mut c_uint,
loc_filename_out: RustStringRef,
message_out: RustStringRef);
pub fn LLVMRustUnpackInlineAsmDiagnostic(DI: DiagnosticInfoRef,
cookie_out: *mut c_uint,
message_out: *mut TwineRef,
instruction_out: *mut Option<&Value>);
2016-10-22 15:07:35 +02:00
pub fn LLVMRustWriteDiagnosticInfoToString(DI: DiagnosticInfoRef, s: RustStringRef);
pub fn LLVMRustGetDiagInfoKind(DI: DiagnosticInfoRef) -> DiagnosticKind;
pub fn LLVMRustSetInlineAsmDiagnosticHandler(C: &Context,
H: InlineAsmDiagHandler,
CX: *mut c_void);
pub fn LLVMRustWriteSMDiagnosticToString(d: SMDiagnosticRef, s: RustStringRef);
pub fn LLVMRustWriteArchive(Dst: *const c_char,
NumMembers: size_t,
Members: *const RustArchiveMemberRef,
WriteSymbtab: bool,
2016-10-22 15:07:35 +02:00
Kind: ArchiveKind)
-> LLVMRustResult;
pub fn LLVMRustArchiveMemberNew(Filename: *const c_char,
Name: *const c_char,
Child: Option<NonNull<ArchiveChild>>)
2016-10-22 15:07:35 +02:00
-> RustArchiveMemberRef;
pub fn LLVMRustArchiveMemberFree(Member: RustArchiveMemberRef);
pub fn LLVMRustSetDataLayoutFromTargetMachine(M: &Module, TM: TargetMachineRef);
pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char,
Inputs: *const &Value,
NumInputs: c_uint)
-> OperandBundleDefRef;
pub fn LLVMRustFreeOperandBundleDef(Bundle: OperandBundleDefRef);
pub fn LLVMRustPositionBuilderAtStart(B: &'a Builder, BB: &'a BasicBlock);
pub fn LLVMRustSetComdat(M: &'a Module, V: &'a Value, Name: *const c_char);
pub fn LLVMRustUnsetComdat(V: &Value);
pub fn LLVMRustSetModulePIELevel(M: &Module);
pub fn LLVMRustModuleBufferCreate(M: &Module) -> *mut ModuleBuffer;
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 17:14:38 +02:00
pub fn LLVMRustModuleBufferPtr(p: *const ModuleBuffer) -> *const u8;
pub fn LLVMRustModuleBufferLen(p: *const ModuleBuffer) -> usize;
pub fn LLVMRustModuleBufferFree(p: *mut ModuleBuffer);
pub fn LLVMRustModuleCost(M: &Module) -> u64;
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
pub fn LLVMRustThinLTOAvailable() -> bool;
pub fn LLVMRustPGOAvailable() -> bool;
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
pub fn LLVMRustWriteThinBitcodeToFile(PMR: PassManagerRef,
M: &Module,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
BC: *const c_char) -> bool;
pub fn LLVMRustThinLTOBufferCreate(M: &Module) -> *mut ThinLTOBuffer;
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
pub fn LLVMRustThinLTOBufferFree(M: *mut ThinLTOBuffer);
pub fn LLVMRustThinLTOBufferPtr(M: *const ThinLTOBuffer) -> *const c_char;
pub fn LLVMRustThinLTOBufferLen(M: *const ThinLTOBuffer) -> size_t;
pub fn LLVMRustCreateThinLTOData(
Modules: *const ThinLTOModule,
NumModules: c_uint,
PreservedSymbols: *const *const c_char,
PreservedSymbolsLen: c_uint,
) -> *mut ThinLTOData;
pub fn LLVMRustPrepareThinLTORename(
Data: *const ThinLTOData,
Module: &Module,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
) -> bool;
pub fn LLVMRustPrepareThinLTOResolveWeak(
Data: *const ThinLTOData,
Module: &Module,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
) -> bool;
pub fn LLVMRustPrepareThinLTOInternalize(
Data: *const ThinLTOData,
Module: &Module,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
) -> bool;
pub fn LLVMRustPrepareThinLTOImport(
Data: *const ThinLTOData,
Module: &Module,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
) -> bool;
pub fn LLVMRustFreeThinLTOData(Data: *mut ThinLTOData);
pub fn LLVMRustParseBitcodeForThinLTO(
Context: &Context,
rustc: Implement ThinLTO This commit is an implementation of LLVM's ThinLTO for consumption in rustc itself. Currently today LTO works by merging all relevant LLVM modules into one and then running optimization passes. "Thin" LTO operates differently by having more sharded work and allowing parallelism opportunities between optimizing codegen units. Further down the road Thin LTO also allows *incremental* LTO which should enable even faster release builds without compromising on the performance we have today. This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then also implements two forms of ThinLTO: * In one mode we'll *only* perform ThinLTO over the codegen units produced in a single compilation. That is, we won't load upstream rlibs, but we'll instead just perform ThinLTO amongst all codegen units produced by the compiler for the local crate. This is intended to emulate a desired end point where we have codegen units turned on by default for all crates and ThinLTO allows us to do this without performance loss. * In anther mode, like full LTO today, we'll optimize all upstream dependencies in "thin" mode. Unlike today, however, this LTO step is fully parallelized so should finish much more quickly. There's a good bit of comments about what the implementation is doing and where it came from, but the tl;dr; is that currently most of the support here is copied from upstream LLVM. This code duplication is done for a number of reasons: * Controlling parallelism means we can use the existing jobserver support to avoid overloading machines. * We will likely want a slightly different form of incremental caching which integrates with our own incremental strategy, but this is yet to be determined. * This buys us some flexibility about when/where we run ThinLTO, as well as having it tailored to fit our needs for the time being. * Finally this allows us to reuse some artifacts such as our `TargetMachine` creation, where all our options we used today aren't necessarily supported by upstream LLVM yet. My hope is that we can get some experience with this copy/paste in tree and then eventually upstream some work to LLVM itself to avoid the duplication while still ensuring our needs are met. Otherwise I fear that maintaining these bindings may be quite costly over the years with LLVM updates!
2017-07-23 17:14:38 +02:00
Data: *const u8,
len: usize,
Identifier: *const c_char,
) -> Option<&Module>;
pub fn LLVMGetModuleIdentifier(M: &Module, size: *mut usize) -> *const c_char;
pub fn LLVMRustThinLTOGetDICompileUnit(M: &Module,
CU1: *mut *mut c_void,
CU2: *mut *mut c_void);
pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void);
pub fn LLVMRustLinkerNew(M: &Module) -> LinkerRef;
pub fn LLVMRustLinkerAdd(linker: LinkerRef,
bytecode: *const c_char,
bytecode_len: usize) -> bool;
pub fn LLVMRustLinkerFree(linker: LinkerRef);
}