rust/src/librustc_codegen_llvm/consts.rs

529 lines
22 KiB
Rust
Raw Normal View History

2019-02-17 19:58:58 +01:00
use crate::llvm::{self, SetUnnamedAddr, True};
use crate::debuginfo;
use crate::common::CodegenCx;
use crate::base;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use libc::c_uint;
use rustc::hir::def_id::DefId;
use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint,
Pointer, ErrorHandled, GlobalId};
use rustc::mir::mono::MonoItem;
2018-08-25 16:56:16 +02:00
use rustc::hir::Node;
use rustc_target::abi::HasDataLayout;
2019-05-23 19:15:48 +02:00
use rustc::ty::{self, Ty, Instance};
use rustc_codegen_ssa::traits::*;
use syntax::symbol::{Symbol, sym};
use syntax_pos::Span;
rustc: Link LLVM directly into rustc again This commit builds on #65501 continue to simplify the build system and compiler now that we no longer have multiple LLVM backends to ship by default. Here this switches the compiler back to what it once was long long ago, which is linking LLVM directly to the compiler rather than dynamically loading it at runtime. The `codegen-backends` directory of the sysroot no longer exists and all relevant support in the build system is removed. Note that `rustc` still supports a dynamically loaded codegen backend as it did previously, it just no longer supports dynamically loaded codegen backends in its own sysroot. Additionally as part of this the `librustc_codegen_llvm` crate now once again explicitly depends on all of its crates instead of implicitly loading them through the sysroot. This involved filling out its `Cargo.toml` and deleting all the now-unnecessary `extern crate` annotations in the header of the crate. (this in turn required adding a number of imports for names of macros too). The end results of this change are: * Rustbuild's build process for the compiler as all the "oh don't forget the codegen backend" checks can be easily removed. * Building `rustc_codegen_llvm` is much simpler since it's simply another compiler crate. * Managing the dependencies of `rustc_codegen_llvm` is much simpler since it's "just another `Cargo.toml` to edit" * The build process should be a smidge faster because there's more parallelism in the main rustc build step rather than splitting `librustc_codegen_llvm` out to its own step. * The compiler is expected to be slightly faster by default because the codegen backend does not need to be dynamically loaded. * Disabling LLVM as part of rustbuild is still supported, supporting multiple codegen backends is still supported, and dynamic loading of a codegen backend is still supported.
2019-10-22 17:51:35 +02:00
use rustc::{bug, span_bug};
use rustc_data_structures::const_cstr;
use log::debug;
use rustc::ty::layout::{self, Size, Align, LayoutOf};
use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
2015-07-31 09:04:06 +02:00
use std::ffi::CStr;
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations().iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
// This `inspect` is okay since we have checked that it is not within a relocation, it
// is within the bounds of the allocation, and it doesn't affect interpreter execution
// (we inspect the result after interpreter execution). Any undef byte is replaced with
// some arbitrary byte value.
//
// FIXME: relay undef bytes to codegen as undef const bytes
let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(next_offset..offset);
llvals.push(cx.const_bytes(bytes));
}
let ptr_offset = read_target_uint(
dl.endian,
// This `inspect` is okay since it is within the bounds of the allocation, it doesn't
// affect interpreter execution (we inspect the result after interpreter execution),
// and we properly interpret the relocation as a relocation pointer offset.
alloc.inspect_with_undef_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
valid_range: 0..=!0
},
cx.type_i8p()
));
next_offset = offset + pointer_size;
}
if alloc.len() >= next_offset {
let range = next_offset..alloc.len();
// This `inspect` is okay since we have check that it is after all relocations, it is
// within the bounds of the allocation, and it doesn't affect interpreter execution (we
// inspect the result after interpreter execution). Any undef byte is replaced with some
// arbitrary byte value.
//
// FIXME: relay undef bytes to codegen as undef const bytes
let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(range);
llvals.push(cx.const_bytes(bytes));
}
cx.const_struct(&llvals, true)
}
pub fn codegen_static_initializer(
cx: &CodegenCx<'ll, 'tcx>,
def_id: DefId,
) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
let instance = ty::Instance::mono(cx.tcx, def_id);
let cid = GlobalId {
instance,
promoted: None,
};
let param_env = ty::ParamEnv::reveal_all();
let static_ = cx.tcx.const_eval(param_env.and(cid))?;
let alloc = match static_.val {
2019-11-08 23:19:46 +01:00
ty::ConstKind::Value(ConstValue::ByRef {
alloc, offset,
2019-11-08 23:19:46 +01:00
}) if offset.bytes() == 0 => {
2019-06-14 11:29:52 +02:00
alloc
},
_ => bug!("static const eval returned {:#?}", static_),
};
Ok((const_alloc_to_llvm(cx, alloc), alloc))
}
fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
gv: &'ll Value,
mut align: Align) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
2018-01-05 06:04:08 +01:00
if let Some(min) = cx.sess().target.target.options.min_global_align {
match Align::from_bits(min) {
Ok(min) => align = align.max(min),
Err(err) => {
2018-01-05 06:04:08 +01:00
cx.sess().err(&format!("invalid minimum global alignment: {}", err));
}
}
}
unsafe {
llvm::LLVMSetAlignment(gv, align.bytes() as u32);
}
}
fn check_and_apply_linkage(
cx: &CodegenCx<'ll, 'tcx>,
attrs: &CodegenFnAttrs,
ty: Ty<'tcx>,
sym: Symbol,
span: Span
) -> &'ll Value {
let llty = cx.layout_of(ty).llvm_type(cx);
let sym = sym.as_str();
if let Some(linkage) = attrs.linkage {
debug!("get_static: sym={} linkage={:?}", sym, linkage);
// If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a
// static and call it a day. Some linkages (like weak) will make it such
// that the static actually has a null value.
2019-09-16 20:08:35 +02:00
let llty2 = if let ty::RawPtr(ref mt) = ty.kind {
2018-10-08 16:58:26 +02:00
cx.layout_of(mt.ty).llvm_type(cx)
} else {
cx.sess().span_fatal(
span, "must have type `*const T` or `*mut T` due to `#[linkage]` attribute")
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
let g1 = cx.declare_global(&sym, llty2);
llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
// Declare an internal global `extern_with_linkage_foo` which
// is initialized with the address of `foo`. If `foo` is
// discarded during linking (for example, if `foo` has weak
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{
cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
});
llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
g2
}
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global
cx.declare_global(&sym, llty)
}
}
2018-09-10 16:28:47 +02:00
pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe {
2018-09-10 16:28:47 +02:00
llvm::LLVMConstPointerCast(val, ty)
}
}
2018-11-24 17:11:59 +01:00
impl CodegenCx<'ll, 'tcx> {
2018-11-24 17:45:05 +01:00
crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
2018-09-10 16:28:47 +02:00
unsafe {
llvm::LLVMConstBitCast(val, ty)
}
}
2018-11-24 17:11:59 +01:00
2018-11-24 17:30:48 +01:00
crate fn static_addr_of_mut(
2018-09-10 16:28:47 +02:00
&self,
cv: &'ll Value,
align: Align,
2018-09-10 16:28:47 +02:00
kind: Option<&str>,
) -> &'ll Value {
unsafe {
let gv = match kind {
2018-11-07 11:08:41 +01:00
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
let gv = self.define_global(&name[..],
2018-11-07 11:08:41 +01:00
self.val_ty(cv)).unwrap_or_else(||{
2018-09-10 16:28:47 +02:00
bug!("symbol `{}` is already defined", name);
});
llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
gv
},
_ => self.define_private_global(self.val_ty(cv)),
2018-09-10 16:28:47 +02:00
};
llvm::LLVMSetInitializer(gv, cv);
set_global_alignment(&self, gv, align);
SetUnnamedAddr(gv, true);
gv
}
}
crate fn get_static(&self, def_id: DefId) -> &'ll Value {
2018-09-10 16:28:47 +02:00
let instance = Instance::mono(self.tcx, def_id);
2018-11-07 11:08:41 +01:00
if let Some(&g) = self.instances.borrow().get(&instance) {
2018-09-10 16:28:47 +02:00
return g;
}
2018-11-07 11:08:41 +01:00
let defined_in_current_codegen_unit = self.codegen_unit
2018-09-10 16:28:47 +02:00
.items()
.contains_key(&MonoItem::Static(def_id));
assert!(!defined_in_current_codegen_unit,
"consts::get_static() should always hit the cache for \
statics defined in the same CGU, but did not for `{:?}`",
def_id);
let ty = instance.ty(self.tcx);
let sym = self.tcx.symbol_name(instance).name;
2018-09-10 16:28:47 +02:00
debug!("get_static: sym={} instance={:?}", sym, instance);
let g = if let Some(id) = self.tcx.hir().as_local_hir_id(def_id) {
2018-09-10 16:28:47 +02:00
2018-09-13 14:58:19 +02:00
let llty = self.layout_of(ty).llvm_type(self);
2019-06-20 10:39:19 +02:00
let (g, attrs) = match self.tcx.hir().get(id) {
2018-09-10 16:28:47 +02:00
Node::Item(&hir::Item {
2019-09-26 18:51:36 +02:00
ref attrs, span, kind: hir::ItemKind::Static(..), ..
2018-09-10 16:28:47 +02:00
}) => {
let sym_str = sym.as_str();
if let Some(g) = self.get_declared_value(&sym_str) {
if self.val_ty(g) != self.type_ptr_to(llty) {
span_bug!(span, "Conflicting types for static");
}
2018-09-10 16:28:47 +02:00
}
let g = self.declare_global(&sym_str, llty);
2018-09-10 16:28:47 +02:00
2018-11-07 11:08:41 +01:00
if !self.tcx.is_reachable_non_generic(def_id) {
2018-09-10 16:28:47 +02:00
unsafe {
llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
}
}
(g, attrs)
}
Node::ForeignItem(&hir::ForeignItem {
ref attrs, span, kind: hir::ForeignItemKind::Static(..), ..
2018-09-10 16:28:47 +02:00
}) => {
2018-11-07 11:08:41 +01:00
let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
(check_and_apply_linkage(&self, &fn_attrs, ty, sym, span), attrs)
2018-09-10 16:28:47 +02:00
}
item => bug!("get_static: expected static, found {:?}", item)
};
debug!("get_static: sym={} attrs={:?}", sym, attrs);
for attr in attrs {
if attr.check_name(sym::thread_local) {
2018-09-10 16:28:47 +02:00
llvm::set_thread_local_mode(g, self.tls_model);
}
}
g
} else {
2018-09-10 16:28:47 +02:00
// FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
2018-11-07 11:08:41 +01:00
debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
2018-09-10 16:28:47 +02:00
2018-11-07 11:08:41 +01:00
let attrs = self.tcx.codegen_fn_attrs(def_id);
let span = self.tcx.def_span(def_id);
let g = check_and_apply_linkage(&self, &attrs, ty, sym, span);
2018-09-10 16:28:47 +02:00
// Thread-local statics in some other crate need to *always* be linked
// against in a thread-local fashion, so we need to be sure to apply the
// thread-local attribute locally if it was present remotely. If we
// don't do this then linker errors can be generated where the linker
// complains that one object files has a thread local version of the
// symbol and another one doesn't.
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
llvm::set_thread_local_mode(g, self.tls_model);
}
let needs_dll_storage_attr =
2018-11-07 11:08:41 +01:00
self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
2018-09-10 16:28:47 +02:00
// ThinLTO can't handle this workaround in all cases, so we don't
// emit the attrs. Instead we make them unnecessary by disallowing
2019-02-01 15:15:43 +01:00
// dynamic linking when linker plugin based LTO is enabled.
!self.tcx.sess.opts.cg.linker_plugin_lto.enabled();
2018-09-10 16:28:47 +02:00
// If this assertion triggers, there's something wrong with commandline
// argument validation.
2019-02-01 15:15:43 +01:00
debug_assert!(!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
2018-09-10 16:28:47 +02:00
self.tcx.sess.target.target.options.is_like_msvc &&
self.tcx.sess.opts.cg.prefer_dynamic));
if needs_dll_storage_attr {
// This item is external but not foreign, i.e., it originates from an external Rust
2018-09-10 16:28:47 +02:00
// crate. Since we don't know whether this crate will be linked dynamically or
// statically in the final application, we always mark such symbols as 'dllimport'.
// If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
// to make things work.
//
// However, in some scenarios we defer emission of statics to downstream
// crates, so there are cases where a static with an upstream DefId
// is actually present in the current crate. We can find out via the
// is_codegened_item query.
2018-11-07 11:08:41 +01:00
if !self.tcx.is_codegened_item(def_id) {
2018-09-10 16:28:47 +02:00
unsafe {
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
}
}
}
g
};
2018-09-10 16:28:47 +02:00
if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
// For foreign (native) libs we know the exact storage type to use.
unsafe {
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
rustc: Add `const` globals to the language This change is an implementation of [RFC 69][rfc] which adds a third kind of global to the language, `const`. This global is most similar to what the old `static` was, and if you're unsure about what to use then you should use a `const`. The semantics of these three kinds of globals are: * A `const` does not represent a memory location, but only a value. Constants are translated as rvalues, which means that their values are directly inlined at usage location (similar to a #define in C/C++). Constant values are, well, constant, and can not be modified. Any "modification" is actually a modification to a local value on the stack rather than the actual constant itself. Almost all values are allowed inside constants, whether they have interior mutability or not. There are a few minor restrictions listed in the RFC, but they should in general not come up too often. * A `static` now always represents a memory location (unconditionally). Any references to the same `static` are actually a reference to the same memory location. Only values whose types ascribe to `Sync` are allowed in a `static`. This restriction is in place because many threads may access a `static` concurrently. Lifting this restriction (and allowing unsafe access) is a future extension not implemented at this time. * A `static mut` continues to always represent a memory location. All references to a `static mut` continue to be `unsafe`. This is a large breaking change, and many programs will need to be updated accordingly. A summary of the breaking changes is: * Statics may no longer be used in patterns. Statics now always represent a memory location, which can sometimes be modified. To fix code, repurpose the matched-on-`static` to a `const`. static FOO: uint = 4; match n { FOO => { /* ... */ } _ => { /* ... */ } } change this code to: const FOO: uint = 4; match n { FOO => { /* ... */ } _ => { /* ... */ } } * Statics may no longer refer to other statics by value. Due to statics being able to change at runtime, allowing them to reference one another could possibly lead to confusing semantics. If you are in this situation, use a constant initializer instead. Note, however, that statics may reference other statics by address, however. * Statics may no longer be used in constant expressions, such as array lengths. This is due to the same restrictions as listed above. Use a `const` instead. [breaking-change] [rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 17:17:01 +02:00
}
2013-06-22 03:46:34 +02:00
}
2018-11-07 11:08:41 +01:00
self.instances.borrow_mut().insert(instance, g);
2018-09-10 16:28:47 +02:00
g
}
}
impl StaticMethods for CodegenCx<'ll, 'tcx> {
fn static_addr_of(
&self,
cv: &'ll Value,
align: Align,
kind: Option<&str>,
) -> &'ll Value {
if let Some(&gv) = self.const_globals.borrow().get(&cv) {
unsafe {
// Upgrade the alignment in cases where the same constant is used with different
// alignment requirements
let llalign = align.bytes() as u32;
if llalign > llvm::LLVMGetAlignment(gv) {
llvm::LLVMSetAlignment(gv, llalign);
}
}
return gv;
}
let gv = self.static_addr_of_mut(cv, align, kind);
unsafe {
llvm::LLVMSetGlobalConstant(gv, True);
}
self.const_globals.borrow_mut().insert(cv, gv);
gv
}
2018-09-10 16:28:47 +02:00
fn codegen_static(
&self,
def_id: DefId,
is_mutable: bool,
) {
unsafe {
2018-11-07 11:08:41 +01:00
let attrs = self.tcx.codegen_fn_attrs(def_id);
2018-09-10 16:28:47 +02:00
let (v, alloc) = match codegen_static_initializer(&self, def_id) {
2018-09-10 16:28:47 +02:00
Ok(v) => v,
// Error has already been reported
Err(_) => return,
};
2018-11-07 11:08:41 +01:00
let g = self.get_static(def_id);
2018-09-10 16:28:47 +02:00
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
let mut val_llty = self.val_ty(v);
let v = if val_llty == self.type_i1() {
val_llty = self.type_i8();
llvm::LLVMConstZExt(v, val_llty)
} else {
v
};
let instance = Instance::mono(self.tcx, def_id);
let ty = instance.ty(self.tcx);
2018-09-13 14:58:19 +02:00
let llty = self.layout_of(ty).llvm_type(self);
2018-09-10 16:28:47 +02:00
let g = if val_llty == llty {
g
} else {
// If we created the global with the wrong type,
// correct the type.
let name = llvm::get_value_name(g).to_vec();
llvm::set_value_name(g, b"");
2018-09-10 16:28:47 +02:00
let linkage = llvm::LLVMRustGetLinkage(g);
let visibility = llvm::LLVMRustGetVisibility(g);
let new_g = llvm::LLVMRustGetOrInsertGlobal(
self.llmod, name.as_ptr().cast(), name.len(), val_llty);
2018-09-10 16:28:47 +02:00
llvm::LLVMRustSetLinkage(new_g, linkage);
llvm::LLVMRustSetVisibility(new_g, visibility);
// To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it
// with the new global later. (See base::codegen_backend.)
2018-11-07 11:08:41 +01:00
self.statics_to_rauw.borrow_mut().push((g, new_g));
2018-09-10 16:28:47 +02:00
new_g
};
set_global_alignment(&self, g, self.align_of(ty));
llvm::LLVMSetInitializer(g, v);
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if !is_mutable {
if self.type_is_freeze(ty) {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
}
2018-09-10 16:28:47 +02:00
debuginfo::create_global_var_metadata(&self, def_id, g);
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
llvm::set_thread_local_mode(g, self.tls_model);
// Do not allow LLVM to change the alignment of a TLS on macOS.
//
// By default a global's alignment can be freely increased.
// This allows LLVM to generate more performant instructions
// e.g., using load-aligned into a SIMD register.
2018-09-10 16:28:47 +02:00
//
// However, on macOS 10.10 or below, the dynamic linker does not
// respect any alignment given on the TLS (radar 24221680).
// This will violate the alignment assumption, and causing segfault at runtime.
//
// This bug is very easy to trigger. In `println!` and `panic!`,
// the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
// which the values would be `mem::replace`d on initialization.
// The implementation of `mem::replace` will use SIMD
// whenever the size is 32 bytes or higher. LLVM notices SIMD is used
// and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
// which macOS's dyld disregarded and causing crashes
// (see issues #51794, #51758, #50867, #48866 and #44056).
//
// To workaround the bug, we trick LLVM into not increasing
// the global's alignment by explicitly assigning a section to it
// (equivalent to automatically generating a `#[link_section]` attribute).
// See the comment in the `GlobalValue::canIncreaseAlignment()` function
// of `lib/IR/Globals.cpp` for why this works.
//
// When the alignment is not increased, the optimized `mem::replace`
// will use load-unaligned instructions instead, and thus avoiding the crash.
//
// We could remove this hack whenever we decide to drop macOS 10.10 support.
if self.tcx.sess.target.target.options.is_like_osx {
assert_eq!(alloc.relocations().len(), 0);
let is_zeroed = {
// Treats undefined bytes as if they were defined with the byte value that
// happens to be currently assigned in mir. This is valid since reading
// undef bytes may yield arbitrary values.
//
// FIXME: ignore undef bytes even with representation `!= 0`.
//
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state
// (not as part of the interpreter execution).
alloc.inspect_with_undef_and_ptr_outside_interpreter(0..alloc.len())
.iter()
.all(|b| *b == 0)
};
let sect_name = if is_zeroed {
2018-09-10 16:28:47 +02:00
CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
} else {
CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
};
llvm::LLVMSetSection(g, sect_name.as_ptr());
}
}
2018-09-10 16:28:47 +02:00
// Wasm statics with custom link sections get special treatment as they
// go into custom sections of the wasm executable.
if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
if let Some(section) = attrs.link_section {
let section = llvm::LLVMMDStringInContext(
2018-11-07 11:08:41 +01:00
self.llcx,
section.as_str().as_ptr().cast(),
2018-09-10 16:28:47 +02:00
section.as_str().len() as c_uint,
);
assert!(alloc.relocations().is_empty());
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state (not
// as part of the interpreter execution).
let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(
0..alloc.len());
2018-09-10 16:28:47 +02:00
let alloc = llvm::LLVMMDStringInContext(
2018-11-07 11:08:41 +01:00
self.llcx,
bytes.as_ptr().cast(),
bytes.len() as c_uint,
2018-09-10 16:28:47 +02:00
);
let data = [section, alloc];
2018-11-07 11:08:41 +01:00
let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
2018-09-10 16:28:47 +02:00
llvm::LLVMAddNamedMetadataOperand(
2018-11-07 11:08:41 +01:00
self.llmod,
"wasm.custom_sections\0".as_ptr().cast(),
2018-09-10 16:28:47 +02:00
meta,
);
}
} else {
base::set_link_section(g, &attrs);
}
2018-09-10 16:28:47 +02:00
if attrs.flags.contains(CodegenFnAttrFlags::USED) {
// This static will be stored in the llvm.used variable which is an array of i8*
2018-11-07 11:08:41 +01:00
let cast = llvm::LLVMConstPointerCast(g, self.type_i8p());
self.used_statics.borrow_mut().push(cast);
2018-09-10 16:28:47 +02:00
}
add an #[used] attribute similar to GCC's __attribute((used))__. This attribute prevents LLVM from optimizing away a non-exported symbol, within a compilation unit (object file), when there are no references to it. This is better explained with an example: ``` #[used] static LIVE: i32 = 0; static REFERENCED: i32 = 0; static DEAD: i32 = 0; fn internal() {} pub fn exported() -> &'static i32 { &REFERENCED } ``` Without optimizations, LLVM pretty much preserves all the static variables and functions within the compilation unit. ``` $ rustc --crate-type=lib --emit=obj symbols.rs && nm -C symbols.o 0000000000000000 t drop::h1be0f8f27a2ba94a 0000000000000000 r symbols::REFERENCED::hb3bdfd46050bc84c 0000000000000000 r symbols::DEAD::hc2ea8f9bd06f380b 0000000000000000 r symbols::LIVE::h0970cf9889edb56e 0000000000000000 T symbols::exported::h6f096c2b1fc292b2 0000000000000000 t symbols::internal::h0ac1aadbc1e3a494 ``` With optimizations, LLVM will drop dead code. Here `internal` is dropped because it's not a exported function/symbol (i.e. not `pub`lic). `DEAD` is dropped for the same reason. `REFERENCED` is preserved, even though it's not exported, because it's referenced by the `exported` function. Finally, `LIVE` survives because of the `#[used]` attribute even though it's not exported or referenced. ``` $ rustc --crate-type=lib -C opt-level=3 --emit=obj symbols.rs && nm -C symbols.o 0000000000000000 r symbols::REFERENCED::hb3bdfd46050bc84c 0000000000000000 r symbols::LIVE::h0970cf9889edb56e 0000000000000000 T symbols::exported::h6f096c2b1fc292b2 ``` Note that the linker knows nothing about `#[used]` and will drop `LIVE` because no other object references to it. ``` $ echo 'fn main() {}' >> symbols.rs $ rustc symbols.rs && nm -C symbols | grep LIVE ``` At this time, `#[used]` only works on `static` variables.
2017-02-20 20:42:47 +01:00
}
}
}