Update LLVM and jettison jit support

LLVM's JIT has been updated numerous times, and we haven't been tracking it at
all. The existing LLVM glue code no longer compiles, and the JIT isn't used for
anything currently.

This also rebases out the FixedStackSegment support which we have added to LLVM.
None of this is still in use by the compiler, and there's no need to keep this
functionality around inside of LLVM.

This is needed to unblock #10708 (where we're tripping an LLVM assertion).
This commit is contained in:
Alex Crichton 2013-12-01 14:37:15 -08:00
parent b5bab85c1a
commit 6b34ba242d
10 changed files with 42 additions and 462 deletions

View File

@ -82,111 +82,8 @@ pub fn WriteOutputFile(
}
}
pub mod jit {
use back::link::llvm_err;
use driver::session::Session;
use lib::llvm::llvm;
use lib::llvm::{ModuleRef, ContextRef, ExecutionEngineRef};
use std::c_str::ToCStr;
use std::cast;
use std::local_data;
use std::unstable::intrinsics;
struct LLVMJITData {
ee: ExecutionEngineRef,
llcx: ContextRef
}
pub trait Engine {}
impl Engine for LLVMJITData {}
impl Drop for LLVMJITData {
fn drop(&mut self) {
unsafe {
llvm::LLVMDisposeExecutionEngine(self.ee);
llvm::LLVMContextDispose(self.llcx);
}
}
}
pub fn exec(sess: Session,
c: ContextRef,
m: ModuleRef,
stacks: bool) {
unsafe {
let manager = llvm::LLVMRustPrepareJIT(intrinsics::morestack_addr());
// We need to tell JIT where to resolve all linked
// symbols from. The equivalent of -lstd, -lcore, etc.
// By default the JIT will resolve symbols from the extra and
// core linked into rustc. We don't want that,
// incase the user wants to use an older extra library.
// We custom-build a JIT execution engine via some rust wrappers
// first. This wrappers takes ownership of the module passed in.
let ee = llvm::LLVMRustBuildJIT(manager, m, stacks);
if ee.is_null() {
llvm::LLVMContextDispose(c);
llvm_err(sess, ~"Could not create the JIT");
}
// Next, we need to get a handle on the _rust_main function by
// looking up it's corresponding ValueRef and then requesting that
// the execution engine compiles the function.
let fun = "_rust_main".with_c_str(|entry| {
llvm::LLVMGetNamedFunction(m, entry)
});
if fun.is_null() {
llvm::LLVMDisposeExecutionEngine(ee);
llvm::LLVMContextDispose(c);
llvm_err(sess, ~"Could not find _rust_main in the JIT");
}
// Finally, once we have the pointer to the code, we can do some
// closure magic here to turn it straight into a callable rust
// closure
let code = llvm::LLVMGetPointerToGlobal(ee, fun);
assert!(!code.is_null());
let func: extern "Rust" fn() = cast::transmute(code);
func();
// Currently there is no method of re-using the executing engine
// from LLVM in another call to the JIT. While this kinda defeats
// the purpose of having a JIT in the first place, there isn't
// actually much code currently which would re-use data between
// different invocations of this. Additionally, the compilation
// model currently isn't designed to support this scenario.
//
// We can't destroy the engine/context immediately here, however,
// because of annihilation. The JIT code contains drop glue for any
// types defined in the crate we just ran, and if any of those boxes
// are going to be dropped during annihilation, the drop glue must
// be run. Hence, we need to transfer ownership of this jit engine
// to the caller of this function. To be convenient for now, we
// shove it into TLS and have someone else remove it later on.
let data = ~LLVMJITData { ee: ee, llcx: c };
set_engine(data as ~Engine);
}
}
// The stage1 compiler won't work, but that doesn't really matter. TLS
// changed only very recently to allow storage of owned values.
local_data_key!(engine_key: ~Engine)
fn set_engine(engine: ~Engine) {
local_data::set(engine_key, engine)
}
pub fn consume_engine() -> Option<~Engine> {
local_data::pop(engine_key)
}
}
pub mod write {
use back::link::jit;
use back::link::{WriteOutputFile, output_type};
use back::link::{output_type_assembly, output_type_bitcode};
use back::link::{output_type_exe, output_type_llvm_assembly};
@ -307,48 +204,38 @@ pub mod write {
})
}
if sess.opts.jit {
// If we are using JIT, go ahead and create and execute the
// engine now. JIT execution takes ownership of the module and
// context, so don't dispose
jit::exec(sess, llcx, llmod, true);
} else {
// Create a codegen-specific pass manager to emit the actual
// assembly or object files. This may not end up getting used,
// but we make it anyway for good measure.
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod);
// Create a codegen-specific pass manager to emit the actual
// assembly or object files. This may not end up getting used,
// but we make it anyway for good measure.
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod);
match output_type {
output_type_none => {}
output_type_bitcode => {
output.with_c_str(|buf| {
llvm::LLVMWriteBitcodeToFile(llmod, buf);
})
}
output_type_llvm_assembly => {
output.with_c_str(|output| {
llvm::LLVMRustPrintModule(cpm, llmod, output)
})
}
output_type_assembly => {
WriteOutputFile(sess, tm, cpm, llmod, output, lib::llvm::AssemblyFile);
}
output_type_exe | output_type_object => {
WriteOutputFile(sess, tm, cpm, llmod, output, lib::llvm::ObjectFile);
}
match output_type {
output_type_none => {}
output_type_bitcode => {
output.with_c_str(|buf| {
llvm::LLVMWriteBitcodeToFile(llmod, buf);
})
}
output_type_llvm_assembly => {
output.with_c_str(|output| {
llvm::LLVMRustPrintModule(cpm, llmod, output)
})
}
output_type_assembly => {
WriteOutputFile(sess, tm, cpm, llmod, output, lib::llvm::AssemblyFile);
}
output_type_exe | output_type_object => {
WriteOutputFile(sess, tm, cpm, llmod, output, lib::llvm::ObjectFile);
}
llvm::LLVMDisposePassManager(cpm);
}
llvm::LLVMDisposePassManager(cpm);
llvm::LLVMRustDisposeTargetMachine(tm);
// the jit takes ownership of these two items
if !sess.opts.jit {
llvm::LLVMDisposeModule(llmod);
llvm::LLVMContextDispose(llcx);
}
llvm::LLVMDisposeModule(llmod);
llvm::LLVMContextDispose(llcx);
if sess.time_llvm_passes() { llvm::LLVMRustPrintPassTimings(); }
}
}

View File

@ -419,11 +419,6 @@ pub fn stop_after_phase_5(sess: Session) -> bool {
debug!("not building executable, returning early from compile_input");
return true;
}
if sess.opts.jit {
debug!("running JIT, returning early from compile_input");
return true;
}
return false;
}
@ -751,7 +746,6 @@ pub fn build_session_options(binary: @str,
} else { No }
};
let gc = debugging_opts & session::gc != 0;
let jit = debugging_opts & session::jit != 0;
let extra_debuginfo = debugging_opts & session::extra_debug_info != 0;
let debuginfo = debugging_opts & session::debug_info != 0 ||
extra_debuginfo;
@ -802,7 +796,6 @@ pub fn build_session_options(binary: @str,
extra_debuginfo: extra_debuginfo,
lint_opts: lint_opts,
save_temps: save_temps,
jit: jit,
output_type: output_type,
addl_lib_search_paths: @mut addl_lib_search_paths,
ar: ar,

View File

@ -55,19 +55,18 @@ pub static count_type_sizes: uint = 1 << 14;
pub static meta_stats: uint = 1 << 15;
pub static no_opt: uint = 1 << 16;
pub static gc: uint = 1 << 17;
pub static jit: uint = 1 << 18;
pub static debug_info: uint = 1 << 19;
pub static extra_debug_info: uint = 1 << 20;
pub static print_link_args: uint = 1 << 21;
pub static no_debug_borrows: uint = 1 << 22;
pub static lint_llvm: uint = 1 << 23;
pub static print_llvm_passes: uint = 1 << 24;
pub static no_vectorize_loops: uint = 1 << 25;
pub static no_vectorize_slp: uint = 1 << 26;
pub static no_prepopulate_passes: uint = 1 << 27;
pub static use_softfp: uint = 1 << 28;
pub static gen_crate_map: uint = 1 << 29;
pub static prefer_dynamic: uint = 1 << 30;
pub static debug_info: uint = 1 << 18;
pub static extra_debug_info: uint = 1 << 19;
pub static print_link_args: uint = 1 << 20;
pub static no_debug_borrows: uint = 1 << 21;
pub static lint_llvm: uint = 1 << 22;
pub static print_llvm_passes: uint = 1 << 23;
pub static no_vectorize_loops: uint = 1 << 24;
pub static no_vectorize_slp: uint = 1 << 25;
pub static no_prepopulate_passes: uint = 1 << 26;
pub static use_softfp: uint = 1 << 27;
pub static gen_crate_map: uint = 1 << 28;
pub static prefer_dynamic: uint = 1 << 29;
pub fn debugging_opts_map() -> ~[(&'static str, &'static str, uint)] {
~[("verbose", "in general, enable more debug printouts", verbose),
@ -95,7 +94,6 @@ pub fn debugging_opts_map() -> ~[(&'static str, &'static str, uint)] {
("no-opt", "do not optimize, even if -O is passed", no_opt),
("print-link-args", "Print the arguments passed to the linker", print_link_args),
("gc", "Garbage collect shared data (experimental)", gc),
("jit", "Execute using JIT (experimental)", jit),
("extra-debug-info", "Extra debugging info (experimental)",
extra_debug_info),
("debug-info", "Produce debug info (experimental)", debug_info),
@ -146,7 +144,6 @@ pub struct options {
extra_debuginfo: bool,
lint_opts: ~[(lint::lint, lint::level)],
save_temps: bool,
jit: bool,
output_type: back::link::output_type,
addl_lib_search_paths: @mut HashSet<Path>, // This is mutable for rustpkg, which
// updates search paths based on the
@ -370,7 +367,6 @@ pub fn basic_options() -> @options {
extra_debuginfo: false,
lint_opts: ~[],
save_temps: false,
jit: false,
output_type: link::output_type_exe,
addl_lib_search_paths: @mut HashSet::new(),
ar: None,

View File

@ -363,10 +363,6 @@ pub fn monitor(f: proc(@diagnostic::Emitter)) {
let _finally = finally { ch: ch };
f(demitter);
// Due reasons explain in #7732, if there was a jit execution context it
// must be consumed and passed along to our parent task.
back::link::jit::consume_engine()
}) {
result::Ok(_) => { /* fallthrough */ }
result::Err(_) => {

View File

@ -1441,18 +1441,6 @@ pub mod llvm {
call. */
pub fn LLVMRustGetLastError() -> *c_char;
/** Prepare the JIT. Returns a memory manager that can load crates. */
pub fn LLVMRustPrepareJIT(__morestack: *()) -> *();
/** Load a crate into the memory manager. */
pub fn LLVMRustLoadCrate(MM: *(), Filename: *c_char) -> bool;
/** Execute the JIT engine. */
pub fn LLVMRustBuildJIT(MM: *(),
M: ModuleRef,
EnableSegmentedStacks: bool)
-> ExecutionEngineRef;
/// Print the pass timings since static dtors aren't picking them up.
pub fn LLVMRustPrintPassTimings();

@ -1 +1 @@
Subproject commit c9ffab392a39eb85f2f15ffc8d41e8c4f4397b8e
Subproject commit 9e85884132d277efeb507d0aeaa160ba201d054f

View File

@ -81,7 +81,6 @@ LLVMRustCreateTargetMachine(const char *triple,
TargetOptions Options;
Options.NoFramePointerElim = true;
Options.EnableSegmentedStacks = EnableSegmentedStacks;
Options.FixedStackSegmentSize = 2 * 1024 * 1024; // XXX: This is too big.
Options.FloatABIType =
(Trip.getEnvironment() == Triple::GNUEABIHF) ? FloatABI::Hard :
FloatABI::Default;

View File

@ -34,282 +34,6 @@ extern "C" const char *LLVMRustGetLastError(void) {
return LLVMRustError;
}
// Custom memory manager for MCJITting. It needs special features
// that the generic JIT memory manager doesn't entail. Based on
// code from LLI, change where needed for Rust.
class RustMCJITMemoryManager : public JITMemoryManager {
public:
SmallVector<sys::MemoryBlock, 16> AllocatedDataMem;
SmallVector<sys::MemoryBlock, 16> AllocatedCodeMem;
SmallVector<sys::MemoryBlock, 16> FreeCodeMem;
void* __morestack;
DenseSet<DynamicLibrary*> crates;
RustMCJITMemoryManager(void* sym) : __morestack(sym) { }
~RustMCJITMemoryManager();
bool loadCrate(const char*, std::string*);
virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID);
virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID, bool isReadOnly);
bool finalizeMemory(std::string *ErrMsg) { return false; }
virtual bool applyPermissions(std::string *Str);
virtual void *getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure = true);
// Invalidate instruction cache for code sections. Some platforms with
// separate data cache and instruction cache require explicit cache flush,
// otherwise JIT code manipulations (like resolved relocations) will get to
// the data cache but not to the instruction cache.
virtual void invalidateInstructionCache();
// The MCJITMemoryManager doesn't use the following functions, so we don't
// need implement them.
virtual void setMemoryWritable() {
llvm_unreachable("Unimplemented call");
}
virtual void setMemoryExecutable() {
llvm_unreachable("Unimplemented call");
}
virtual void setPoisonMemory(bool poison) {
llvm_unreachable("Unimplemented call");
}
virtual void AllocateGOT() {
llvm_unreachable("Unimplemented call");
}
virtual uint8_t *getGOTBase() const {
llvm_unreachable("Unimplemented call");
return 0;
}
virtual uint8_t *startFunctionBody(const Function *F,
uintptr_t &ActualSize){
llvm_unreachable("Unimplemented call");
return 0;
}
virtual uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
unsigned Alignment) {
llvm_unreachable("Unimplemented call");
return 0;
}
virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
uint8_t *FunctionEnd) {
llvm_unreachable("Unimplemented call");
}
virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
llvm_unreachable("Unimplemented call");
return 0;
}
virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
llvm_unreachable("Unimplemented call");
return 0;
}
virtual void deallocateFunctionBody(void *Body) {
llvm_unreachable("Unimplemented call");
}
virtual uint8_t* startExceptionTable(const Function* F,
uintptr_t &ActualSize) {
llvm_unreachable("Unimplemented call");
return 0;
}
virtual void endExceptionTable(const Function *F, uint8_t *TableStart,
uint8_t *TableEnd, uint8_t* FrameRegister) {
llvm_unreachable("Unimplemented call");
}
virtual void deallocateExceptionTable(void *ET) {
llvm_unreachable("Unimplemented call");
}
};
bool RustMCJITMemoryManager::loadCrate(const char* file, std::string* err) {
DynamicLibrary crate = DynamicLibrary::getPermanentLibrary(file,
err);
if(crate.isValid()) {
crates.insert(&crate);
return true;
}
return false;
}
uint8_t *RustMCJITMemoryManager::allocateDataSection(uintptr_t Size,
unsigned Alignment,
unsigned SectionID,
bool isReadOnly) {
if (!Alignment)
Alignment = 16;
uint8_t *Addr = (uint8_t*)calloc((Size + Alignment - 1)/Alignment, Alignment);
AllocatedDataMem.push_back(sys::MemoryBlock(Addr, Size));
return Addr;
}
bool RustMCJITMemoryManager::applyPermissions(std::string *Str) {
// Empty.
return true;
}
uint8_t *RustMCJITMemoryManager::allocateCodeSection(uintptr_t Size,
unsigned Alignment,
unsigned SectionID) {
if (!Alignment)
Alignment = 16;
unsigned NeedAllocate = Alignment * ((Size + Alignment - 1)/Alignment + 1);
uintptr_t Addr = 0;
// Look in the list of free code memory regions and use a block there if one
// is available.
for (int i = 0, e = FreeCodeMem.size(); i != e; ++i) {
sys::MemoryBlock &MB = FreeCodeMem[i];
if (MB.size() >= NeedAllocate) {
Addr = (uintptr_t)MB.base();
uintptr_t EndOfBlock = Addr + MB.size();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
// Store cutted free memory block.
FreeCodeMem[i] = sys::MemoryBlock((void*)(Addr + Size),
EndOfBlock - Addr - Size);
return (uint8_t*)Addr;
}
}
// No pre-allocated free block was large enough. Allocate a new memory region.
sys::MemoryBlock MB = sys::Memory::AllocateRWX(NeedAllocate, 0, 0);
AllocatedCodeMem.push_back(MB);
Addr = (uintptr_t)MB.base();
uintptr_t EndOfBlock = Addr + MB.size();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
// The AllocateRWX may allocate much more memory than we need. In this case,
// we store the unused memory as a free memory block.
unsigned FreeSize = EndOfBlock-Addr-Size;
if (FreeSize > 16)
FreeCodeMem.push_back(sys::MemoryBlock((void*)(Addr + Size), FreeSize));
// Return aligned address
return (uint8_t*)Addr;
}
void RustMCJITMemoryManager::invalidateInstructionCache() {
for (int i = 0, e = AllocatedCodeMem.size(); i != e; ++i)
sys::Memory::InvalidateInstructionCache(AllocatedCodeMem[i].base(),
AllocatedCodeMem[i].size());
}
void *RustMCJITMemoryManager::getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure) {
#ifdef __linux__
// Force the following functions to be linked in to anything that uses the
// JIT. This is a hack designed to work around the all-too-clever Glibc
// strategy of making these functions work differently when inlined vs. when
// not inlined, and hiding their real definitions in a separate archive file
// that the dynamic linker can't see. For more info, search for
// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
if (Name == "stat") return (void*)(intptr_t)&stat;
if (Name == "fstat") return (void*)(intptr_t)&fstat;
if (Name == "lstat") return (void*)(intptr_t)&lstat;
if (Name == "stat64") return (void*)(intptr_t)&stat64;
if (Name == "fstat64") return (void*)(intptr_t)&fstat64;
if (Name == "lstat64") return (void*)(intptr_t)&lstat64;
if (Name == "atexit") return (void*)(intptr_t)&atexit;
if (Name == "mknod") return (void*)(intptr_t)&mknod;
#endif
if (Name == "__morestack" || Name == "___morestack") return &__morestack;
const char *NameStr = Name.c_str();
// Look through loaded crates and main for symbols.
void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
if (Ptr) return Ptr;
// If it wasn't found and if it starts with an underscore ('_') character,
// try again without the underscore.
if (NameStr[0] == '_') {
Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
if (Ptr) return Ptr;
}
if (AbortOnFailure)
report_fatal_error("Program used external function '" + Name +
"' which could not be resolved!");
return 0;
}
RustMCJITMemoryManager::~RustMCJITMemoryManager() {
for (unsigned i = 0, e = AllocatedCodeMem.size(); i != e; ++i)
sys::Memory::ReleaseRWX(AllocatedCodeMem[i]);
for (unsigned i = 0, e = AllocatedDataMem.size(); i != e; ++i)
free(AllocatedDataMem[i].base());
}
extern "C" void*
LLVMRustPrepareJIT(void* __morestack) {
// An execution engine will take ownership of this later
// and clean it up for us.
return (void*) new RustMCJITMemoryManager(__morestack);
}
extern "C" bool
LLVMRustLoadCrate(void* mem, const char* crate) {
RustMCJITMemoryManager* manager = (RustMCJITMemoryManager*) mem;
std::string Err;
assert(manager);
if(!manager->loadCrate(crate, &Err)) {
LLVMRustError = Err.c_str();
return false;
}
return true;
}
extern "C" LLVMExecutionEngineRef
LLVMRustBuildJIT(void* mem,
LLVMModuleRef M,
bool EnableSegmentedStacks) {
InitializeNativeTarget();
InitializeNativeTargetAsmPrinter();
InitializeNativeTargetAsmParser();
std::string Err;
TargetOptions Options;
Options.JITEmitDebugInfo = true;
Options.NoFramePointerElim = true;
Options.EnableSegmentedStacks = EnableSegmentedStacks;
RustMCJITMemoryManager* MM = (RustMCJITMemoryManager*) mem;
assert(MM);
ExecutionEngine* EE = EngineBuilder(unwrap(M))
.setErrorStr(&Err)
.setTargetOptions(Options)
.setJITMemoryManager(MM)
.setUseMCJIT(true)
.setAllocateGVsWithCode(false)
.create();
if(!EE || Err != "") {
LLVMRustError = Err.c_str();
// The EngineBuilder only takes ownership of these two structures if the
// create() call is successful, but here it wasn't successful.
LLVMDisposeModule(M);
delete MM;
return NULL;
}
MM->invalidateInstructionCache();
return wrap(EE);
}
extern "C" void
LLVMRustSetNormalizedTarget(LLVMModuleRef M, const char *triple) {
unwrap(M)->setTargetTriple(Triple::normalize(triple));
@ -566,7 +290,7 @@ extern "C" LLVMValueRef LLVMDIBuilderCreateStructType(
unwrapDI<DIType>(DerivedFrom),
unwrapDI<DIArray>(Elements),
RunTimeLang,
unwrapDI<MDNode*>(VTableHolder),
unwrapDI<DIType>(VTableHolder),
UniqueId));
}

View File

@ -1,4 +1,4 @@
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
2013-09-23
2013-11-19

View File

@ -3,9 +3,6 @@ LLVMRustWriteOutputFile
LLVMRustGetLastError
LLVMRustConstSmallInt
LLVMRustConstInt
LLVMRustLoadCrate
LLVMRustPrepareJIT
LLVMRustBuildJIT
LLVMRustPrintPassTimings
LLVMRustStartMultithreading
LLVMCreateObjectFile